mirror of
https://github.com/enricoros/big-AGI.git
synced 2026-05-10 21:50:14 -07:00
Compare commits
1429 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| cff3d90613 | |||
| 9f89243d7f | |||
| 784ee9a4da | |||
| 678e6b8ba1 | |||
| 30e301c496 | |||
| b22904f6bb | |||
| 3f0de7ddca | |||
| 9a6f0f9202 | |||
| 4f0bae5657 | |||
| 2101f06195 | |||
| 6d54b5594c | |||
| 36b8e5b1df | |||
| 8252d671c7 | |||
| 30d97c94aa | |||
| 82654a00d4 | |||
| 9595f14ddc | |||
| 8c496074b2 | |||
| 4d097d7136 | |||
| 178619d275 | |||
| 59c8b2538d | |||
| 443b72c52a | |||
| ae13abef45 | |||
| 83ae02ef9b | |||
| 9bb178413b | |||
| d85f0ebfc4 | |||
| 8f84dc2f24 | |||
| c8b4301bcd | |||
| bd8eaf0b9f | |||
| a4148cf694 | |||
| 4cb0b493dc | |||
| e6354e9089 | |||
| 08506abaee | |||
| 078c80d572 | |||
| b1c9f6be45 | |||
| fc497e9beb | |||
| 6ad01fd981 | |||
| 44ed8664c8 | |||
| 4cb16ee715 | |||
| 2dc9b87cda | |||
| 0e587c4889 | |||
| 41d42d82fb | |||
| f703c8a8c9 | |||
| bf753eab55 | |||
| 698b67af06 | |||
| 377d61056a | |||
| 94b32c8fe3 | |||
| 1e70a59ad6 | |||
| 44d05181f4 | |||
| 996998a5cc | |||
| 98474b2721 | |||
| 198dc0e23f | |||
| 079731c573 | |||
| 492c89650a | |||
| 5b5bbb7649 | |||
| 27d1f081ab | |||
| 76183fd840 | |||
| 345165eabf | |||
| c186732b3b | |||
| 04916b700e | |||
| 013dab185c | |||
| 5ab93faccf | |||
| fa301e3675 | |||
| fa6e7dd9c5 | |||
| 01736ad5da | |||
| ce682b1f85 | |||
| 96d801f40a | |||
| 8985868f63 | |||
| 8febdcd0c0 | |||
| 4d21d5134a | |||
| 09d44a4314 | |||
| 40066e975a | |||
| 202382c80a | |||
| 6ffbb32c57 | |||
| 9b8a3ca503 | |||
| cdd7892077 | |||
| 974aa12137 | |||
| d8f8999333 | |||
| 0efd87b522 | |||
| ec76e1c5cf | |||
| 1e04efe748 | |||
| 69c135ae78 | |||
| 205fb1bb5b | |||
| c8e7315de3 | |||
| 725f3b0fd7 | |||
| 7ee3701607 | |||
| 9537ce59e8 | |||
| 6c0a60e0d1 | |||
| 436a858cb0 | |||
| 6ea6c55f65 | |||
| c477fa86ce | |||
| 08cd5ed5b6 | |||
| b5f2cd35f2 | |||
| 4cb0f6d67e | |||
| 5260ec68cc | |||
| 72ce4d2884 | |||
| ed65f989d9 | |||
| 588ebf4993 | |||
| 22969033a7 | |||
| 8b5e00480b | |||
| aaf752fa9c | |||
| 82d3b36048 | |||
| 588c81f9ad | |||
| 4013a3f997 | |||
| 5823e18904 | |||
| 31ea6863aa | |||
| f3f58f26ae | |||
| 67132f285e | |||
| 20a638a8c9 | |||
| c9174e995f | |||
| 656c507c94 | |||
| a1fb744eb1 | |||
| 28367547fd | |||
| 6610211eac | |||
| b66e3e2afa | |||
| 4bf965953a | |||
| 1bd6513d59 | |||
| 6ce457913e | |||
| ef84ca5a04 | |||
| f76524c650 | |||
| 0be676229f | |||
| 40a0ca7235 | |||
| 1563c3a9dc | |||
| 80f32be80d | |||
| eea53714cc | |||
| 148f1ec22c | |||
| b5a2a70e73 | |||
| e7667e4b7d | |||
| 9250eb9aff | |||
| 92883caaab | |||
| 6d57450efc | |||
| 5dd4c600ea | |||
| 392a3b7949 | |||
| e22c40c7e4 | |||
| c7abee6969 | |||
| 4772e63fdb | |||
| f3d7abefec | |||
| ac76b156cf | |||
| 97e65efc31 | |||
| 13dcaa0a57 | |||
| 1f42b0ae66 | |||
| 003a50f181 | |||
| 32c5849a50 | |||
| 44a8ee0593 | |||
| 1ad70c7b1b | |||
| 7413983159 | |||
| 6c3e8c6a8f | |||
| 7e3e9854ac | |||
| 41fc93345c | |||
| b9275177e3 | |||
| 5ea95e4095 | |||
| 0ea041ed5b | |||
| 037e3b62d8 | |||
| 517c18c902 | |||
| 685b5c5130 | |||
| cfdab2f900 | |||
| 1a743ff264 | |||
| 85463fafb1 | |||
| 0641b0df97 | |||
| 98825081a9 | |||
| f549c13465 | |||
| 8bf7fd7106 | |||
| d8d889c706 | |||
| 90665ed84a | |||
| dd3d10a391 | |||
| 19ebd399a8 | |||
| f21a2973e9 | |||
| 04bb8f9c12 | |||
| 5ea63c8734 | |||
| f4f4ad9373 | |||
| ba06d70c05 | |||
| 62ddd17715 | |||
| f76db1d19e | |||
| f0901dbc03 | |||
| c65a2ce387 | |||
| eaee372938 | |||
| d8836534cb | |||
| 7d2e64b458 | |||
| bc942c5581 | |||
| 4ca24f8314 | |||
| b299dec68e | |||
| b9f07d011b | |||
| 9259be8dbb | |||
| 4b0b7c4493 | |||
| 73f0760809 | |||
| db6c2b1620 | |||
| 1233e846db | |||
| 27312537a7 | |||
| 1dfd4d8395 | |||
| ccd9f0980f | |||
| 5cc48d24ec | |||
| 7929d4eb30 | |||
| 14c5c83f91 | |||
| 263412c422 | |||
| d395fa817d | |||
| 9cfc8c513b | |||
| c92a1cfcb1 | |||
| f45e45ca8f | |||
| e44d4b8b01 | |||
| c342f553db | |||
| 2fab208ccf | |||
| eab3eee19f | |||
| fcb3903b5f | |||
| 90ccb64bd0 | |||
| 1772db5e98 | |||
| a04ee4de95 | |||
| 73b6a54f9e | |||
| 52b08b407c | |||
| 269a3a9991 | |||
| 1b2050cd96 | |||
| a71dd5e3aa | |||
| 8d91ea0413 | |||
| 81b39c7f9c | |||
| a3200e1aab | |||
| 4c8fa8e477 | |||
| f64aae10c5 | |||
| bd8f484cd2 | |||
| 4c3151e3be | |||
| 4e3377f1df | |||
| f95b643a5c | |||
| 85083f323d | |||
| b884386143 | |||
| 01a8d858cf | |||
| 08fed36a61 | |||
| f8b110e108 | |||
| b78b0f1323 | |||
| 148c0b1d77 | |||
| fe501831b2 | |||
| 1862b72ba5 | |||
| a609071966 | |||
| dc2d162e6e | |||
| 07f2cd291e | |||
| a6e040e3e5 | |||
| 3e6cfc9775 | |||
| 0e2abd2615 | |||
| 394e79510e | |||
| 848977820e | |||
| c893f1969c | |||
| bb9a8b81d1 | |||
| 188b338bdc | |||
| 463ef406a7 | |||
| a916ff46dc | |||
| db3a5c0b1b | |||
| b760250da1 | |||
| b5829ac541 | |||
| fa4f2b8fcd | |||
| 333c318a62 | |||
| 5f6f7086d0 | |||
| a7495bd4cf | |||
| 76c4919e9c | |||
| 5530a0253e | |||
| 86aaa65d10 | |||
| 65bf147e04 | |||
| f76ad186f0 | |||
| e5e333db70 | |||
| ddee08c2da | |||
| 93b7686f18 | |||
| e61e9626e2 | |||
| 3c6bfe0152 | |||
| e4fc44bc9c | |||
| 51e23ad3a4 | |||
| 5ebbe45a63 | |||
| 6df276d51d | |||
| f811500b60 | |||
| 2b51605c18 | |||
| 513b840b47 | |||
| d94c8c8a3b | |||
| 3dd641a398 | |||
| 8e545f1738 | |||
| 2a12597567 | |||
| e003683040 | |||
| 0338b3d2e9 | |||
| 5d5bc403c4 | |||
| b646149980 | |||
| 1e7e8ac632 | |||
| 309786e01e | |||
| 08e3caf8c2 | |||
| 21b68d7660 | |||
| 4986c61b2a | |||
| 801479cb5c | |||
| 1d18e21018 | |||
| 4c329a8f51 | |||
| 1eb4eeea42 | |||
| 5ca094111c | |||
| 4ce4202750 | |||
| 4873c0c390 | |||
| 351a28f34f | |||
| a2e99ed84f | |||
| 7d2a26ab66 | |||
| 94268187f1 | |||
| 5aafa98f1c | |||
| c42c34acb4 | |||
| f052963da3 | |||
| 07fa93609d | |||
| cbef9e5a57 | |||
| 0b342339d4 | |||
| 9de3d5a26f | |||
| 78878076c2 | |||
| 65cca958a6 | |||
| 19263f8494 | |||
| 5f71cbed47 | |||
| fe93a66d3b | |||
| aa3b451e00 | |||
| ca245bf8b8 | |||
| 9868068cd6 | |||
| 5fd27629d0 | |||
| 4bfc7636c9 | |||
| 305a7784ee | |||
| 87ecc11661 | |||
| 0faf5d5957 | |||
| 55d7ebd804 | |||
| 842b5b96c2 | |||
| b07fc759c2 | |||
| 0afa70aaab | |||
| c2cf93bf1a | |||
| 88639b8b57 | |||
| bfecc63d0d | |||
| 20bea327e4 | |||
| 1e5c26b490 | |||
| d9183c9658 | |||
| 3ecbbc3b70 | |||
| 1c1d21eed7 | |||
| 6129971bb2 | |||
| 8a3d75f077 | |||
| 9c249b513f | |||
| 04d3fe6e99 | |||
| ea7283b96e | |||
| 295fc111c4 | |||
| 58d73d5d81 | |||
| fd8ce2e99a | |||
| c8a33a06fa | |||
| 874be92a56 | |||
| 6bdb01e3c5 | |||
| ba03ab3aa8 | |||
| 3d554e513d | |||
| e516b9dae9 | |||
| 281d5a611e | |||
| 03eec23efe | |||
| e3d01f6615 | |||
| 99e15333cb | |||
| 5efd16c060 | |||
| b4a6c80d8c | |||
| 7991920f08 | |||
| a113b8223b | |||
| 7bb720a903 | |||
| 515de2679e | |||
| 38caacf816 | |||
| 676b0537e6 | |||
| a24341cda6 | |||
| d937bc246a | |||
| 5d2543131a | |||
| ca5d6872b5 | |||
| a97ce26072 | |||
| c698f78f92 | |||
| 77782a63eb | |||
| 41e1e44ef0 | |||
| 7b1fc56320 | |||
| c0ed41a529 | |||
| ba47fe1cfe | |||
| f1356d8fdc | |||
| 7a899c538f | |||
| 3daac973b1 | |||
| b0ec5f7459 | |||
| 71d6868512 | |||
| 605bb83eb3 | |||
| 3092e02ce9 | |||
| 5d82374975 | |||
| ab4d63e596 | |||
| f800bb8dae | |||
| 18862c0ff4 | |||
| 3765e8c69e | |||
| 70d54a9aa3 | |||
| 50c6ee69af | |||
| dd2532e269 | |||
| 16a54b3452 | |||
| 8373c1c785 | |||
| 39beda5519 | |||
| c7d1eae327 | |||
| ec81e2ff5b | |||
| 697090b695 | |||
| 8680fcc3db | |||
| 233037edd2 | |||
| 81c3251c6e | |||
| dc0fe7f4ca | |||
| 2c9c0f2e0b | |||
| 9c3fb9aadb | |||
| de37ac2c51 | |||
| d6b57702bd | |||
| d94642c29f | |||
| 75378ea88f | |||
| d539c1369b | |||
| 555ee6f333 | |||
| ad989d8a0b | |||
| aae7af4713 | |||
| df0a204767 | |||
| 5cdefc7b5e | |||
| c1bdb1fc61 | |||
| dde22a080b | |||
| 7f5ff30f97 | |||
| 38e1708e91 | |||
| fe4e755304 | |||
| 67f1c87d3a | |||
| eef88ffae2 | |||
| 319965c55c | |||
| 1f309b5c81 | |||
| 5273352ae9 | |||
| 5a48256d77 | |||
| 1d41294c1d | |||
| ff76229706 | |||
| b0f4b30ebe | |||
| 7be8f6c6a7 | |||
| b003993961 | |||
| 4878f361b5 | |||
| a82a3899c5 | |||
| ff0685e6e8 | |||
| a597489526 | |||
| 32e8890f62 | |||
| 211a43eab4 | |||
| 8c28df77cc | |||
| 4e82a12899 | |||
| 8d0e0dea89 | |||
| 5703f23b99 | |||
| 196d08b4fd | |||
| 2f9738f6fb | |||
| d4db225d1e | |||
| efff785713 | |||
| 234accad3f | |||
| 588b4b2c64 | |||
| 7de34d8478 | |||
| 741980adfc | |||
| 2690380bfd | |||
| b482b07335 | |||
| 03b4c6f941 | |||
| b7fd1b13de | |||
| 10a6f2d3c7 | |||
| ba149d3b43 | |||
| f175d071c4 | |||
| 874d0bca05 | |||
| 81ad0328b7 | |||
| 5198fa66cf | |||
| a807bdd6b6 | |||
| 2b209bb679 | |||
| 2f018dce9f | |||
| 2eb77f532a | |||
| 69063bb544 | |||
| 7fad2f8790 | |||
| 620275a1f5 | |||
| ba583fc448 | |||
| 0b96870644 | |||
| eb2b682eb5 | |||
| 577b52120a | |||
| b69ae3edae | |||
| 624b177996 | |||
| bbf01b49c0 | |||
| 86b2d8ae71 | |||
| d18af42d43 | |||
| 4f6e110bf9 | |||
| 62cf334e2f | |||
| 8bd6fd40fd | |||
| f21fe41188 | |||
| cfff23164c | |||
| a8d9233dc4 | |||
| 9c973efbbf | |||
| e2c4255920 | |||
| e01b9ff6a9 | |||
| 0084a635f1 | |||
| 0cd20b8d48 | |||
| 7c4094b4c2 | |||
| acd8430d51 | |||
| 6ae2195d10 | |||
| 6bcc0dd177 | |||
| 2de42c2010 | |||
| a231ccb492 | |||
| 35875d5837 | |||
| c36ff1edfa | |||
| ed35d5b541 | |||
| 2b2a2d84a9 | |||
| a645a4066c | |||
| 508a3beff7 | |||
| df0c133056 | |||
| 2da3942ce2 | |||
| 26547dec0d | |||
| aa4804bdd5 | |||
| eafa1f02cb | |||
| 836533a8c2 | |||
| cfeb134c20 | |||
| 35798b5568 | |||
| 7a250f0848 | |||
| 0a4e6d5142 | |||
| f4254a5ffb | |||
| 7b7718e578 | |||
| c261b2b156 | |||
| 237065553e | |||
| 6116af42df | |||
| 08b28cfde8 | |||
| b019655518 | |||
| 1264a2ebaf | |||
| 1960b4f618 | |||
| c75fbd89e6 | |||
| 3e67201665 | |||
| b60e2bae65 | |||
| 19c7fa4285 | |||
| f450dd3eac | |||
| d366cdd542 | |||
| c1ba83fddb | |||
| 617d6038b1 | |||
| 0abee15c30 | |||
| 1aa2e68e4a | |||
| cd692218ce | |||
| a5b7191185 | |||
| 56baba4cae | |||
| b696447be4 | |||
| e1ef2e72d7 | |||
| e85905e63c | |||
| c6208a2900 | |||
| 01299e4f19 | |||
| 1771575641 | |||
| 88a796fd87 | |||
| e403467d6d | |||
| 1914a2a8a3 | |||
| 683892afef | |||
| 470f8aab70 | |||
| 7a561d6b42 | |||
| affff0df4a | |||
| f5a81bdc94 | |||
| 818ed53b53 | |||
| 12c875f4e3 | |||
| 6ff715c0f0 | |||
| c4a89822d8 | |||
| a8a917f786 | |||
| 3aa9a71a4b | |||
| 3758612ed6 | |||
| b71a4265f8 | |||
| 870cdb67cf | |||
| 902c9dc3f4 | |||
| 0d1db0a360 | |||
| ddd784f041 | |||
| 830d45c06d | |||
| 6e27a31013 | |||
| ed87595e17 | |||
| da01b59ae3 | |||
| 79046b808b | |||
| 5a71153390 | |||
| 94056cdf4b | |||
| 41cb35c6b9 | |||
| e133fc81f6 | |||
| 418c2e496c | |||
| 3690202b38 | |||
| f069c2e5ab | |||
| 97bf6ca276 | |||
| a1390b152f | |||
| 4e8c7d46f6 | |||
| 02944d2015 | |||
| 58726f0425 | |||
| 85f796fb1d | |||
| 311a9c2bf2 | |||
| 6768917d44 | |||
| 7beb412738 | |||
| cf724625cc | |||
| f60b2410dd | |||
| bbdc16b06a | |||
| 0fa2d06725 | |||
| 36cdc4b55f | |||
| c2b4a50bfa | |||
| 73f88d4715 | |||
| af919be2ac | |||
| facffbc6c8 | |||
| dd5b7cb8c2 | |||
| 3dc61109d7 | |||
| 9ef84260b0 | |||
| cf2df7d7f9 | |||
| 16a883526b | |||
| 7b66b1a2eb | |||
| a4adce5c79 | |||
| 9e4174df53 | |||
| b5975713a3 | |||
| 0cd04266b7 | |||
| 5cbd162454 | |||
| bea1600358 | |||
| 6a2e201cf5 | |||
| 960551933e | |||
| 8b38b6416d | |||
| fac4c39f48 | |||
| 4c930efbf0 | |||
| 5a2a47cb87 | |||
| 4912a03250 | |||
| 3b13580613 | |||
| 95905113ac | |||
| c6b34bb252 | |||
| e5387c2323 | |||
| d3b4447669 | |||
| d5c5eac9ec | |||
| 49b61495d0 | |||
| e8298e9d30 | |||
| b29681e1f7 | |||
| 1e0b9a2f0c | |||
| 442b8e95b1 | |||
| 27090d9e28 | |||
| c37b4fa076 | |||
| 83161bbe98 | |||
| 4b166120e6 | |||
| 04494ac752 | |||
| 979809ddb1 | |||
| 5d797c3339 | |||
| 2ff74f6b80 | |||
| 06b1195f9a | |||
| c337b70a42 | |||
| 5047354892 | |||
| ce4e405fc6 | |||
| 30c8d66cd1 | |||
| fb5c8aad29 | |||
| 08d221d00f | |||
| af918178f6 | |||
| ed19896e3c | |||
| 47ad135e4b | |||
| 0eff7825c8 | |||
| 5c8baee390 | |||
| 3f71facb49 | |||
| eba42cc8f2 | |||
| 53092cee51 | |||
| 4bf621f128 | |||
| 33505dbb8e | |||
| c81e1f144f | |||
| ee788b967b | |||
| 38ac8733f6 | |||
| 737a20ee06 | |||
| 19f48b8001 | |||
| 3471d6b4f5 | |||
| 2dc7ba72b3 | |||
| e12279dab0 | |||
| 2e0c79cb64 | |||
| aa697edb8c | |||
| c72e3c58dd | |||
| 1de30c8bd5 | |||
| 3a8eea6fb7 | |||
| b7fd0bdba7 | |||
| 58457cac50 | |||
| 0fbacee7dc | |||
| a498f28d14 | |||
| 5b9c6a2d0e | |||
| 4c7f50ab98 | |||
| ef03d33bbf | |||
| 22c9fc56c0 | |||
| c952fd734f | |||
| 310e99af23 | |||
| e78446904a | |||
| 760e9d8279 | |||
| 61a60c5b9f | |||
| 3054e1b88d | |||
| 6f4fabf147 | |||
| b0c791a055 | |||
| 748991249a | |||
| 1aea7122cc | |||
| 9a83b428f1 | |||
| 2cd38bc02b | |||
| e586142190 | |||
| a10d0dcf5d | |||
| 6fdff488a9 | |||
| 8af0d78127 | |||
| 177686a7fc | |||
| 09b6e47036 | |||
| 704187ba3e | |||
| 4ea8a06503 | |||
| 80fcc7d3e3 | |||
| a04c62da6f | |||
| fcb518a050 | |||
| a222626933 | |||
| a3ceade738 | |||
| 51d58223b4 | |||
| d37a603db2 | |||
| ea984f3ddf | |||
| a9d3e3dead | |||
| 5499e57205 | |||
| 6f8ee0247f | |||
| 05ee5cc3d1 | |||
| cb6b569330 | |||
| 53073ff109 | |||
| 26d362d7a6 | |||
| 91d99e1a63 | |||
| a20917c971 | |||
| af9bf9e5b3 | |||
| 46b473b8a0 | |||
| e2b4028223 | |||
| bac2a31782 | |||
| 3d20e6bf91 | |||
| 9337216092 | |||
| cd35d0ca55 | |||
| 6d591b98b8 | |||
| 486381ab9d | |||
| c619b4debb | |||
| 383a3085ec | |||
| 5a3bb3d817 | |||
| d1ba758887 | |||
| 6fef149997 | |||
| aad3b16ff2 | |||
| 819ba14523 | |||
| d3c25ca16a | |||
| 99a65f72ac | |||
| be9080d392 | |||
| f32d991413 | |||
| 94b68ebefa | |||
| 0450eaaceb | |||
| 408c5ce088 | |||
| d936629ead | |||
| 9bd1a66208 | |||
| 1a0c029ee8 | |||
| e7be228703 | |||
| 0ab4dc972f | |||
| 5f1ca8954f | |||
| 3ec1b033ce | |||
| 0caf27af9b | |||
| bd67e14fa4 | |||
| 494c3b542c | |||
| 8e0884eb64 | |||
| 73c4dc4ac8 | |||
| d77274058d | |||
| 0c8460419b | |||
| eabb589390 | |||
| 62f860ae93 | |||
| 605aae873c | |||
| 62e9ee5b05 | |||
| d686f5d143 | |||
| 3922f232ae | |||
| 6735b438d3 | |||
| fb1e30ab32 | |||
| 0ec06edb57 | |||
| 2a52673c56 | |||
| cc20d00d8a | |||
| 3d9201f7dc | |||
| 176732a6c0 | |||
| 39815b3af3 | |||
| bcce517089 | |||
| a4b50d0d97 | |||
| 2a124e7588 | |||
| a85556ab5b | |||
| cef93d6084 | |||
| 207e257778 | |||
| 12203daa22 | |||
| 27f8e9248d | |||
| 51384dc984 | |||
| bc76cbb5ad | |||
| 5a1ca83f6d | |||
| c9f585f808 | |||
| 9f559e1dbf | |||
| e458bca1a7 | |||
| 43d2226019 | |||
| 122bc34701 | |||
| e01358e268 | |||
| 847c84c3e6 | |||
| b11cac4328 | |||
| f617b06109 | |||
| 345ccf3369 | |||
| d111b8af62 | |||
| 8f964c5c49 | |||
| b6f3f4538f | |||
| f6dd30d5d8 | |||
| af8b79f849 | |||
| 0cfccc423b | |||
| f9a5d582d4 | |||
| 684e00d594 | |||
| 3cd2df0b50 | |||
| 02197f4ee6 | |||
| f9049a3fea | |||
| 462bddc271 | |||
| f79000cf39 | |||
| 1d95273f4d | |||
| 6c4579f434 | |||
| 4ef56ade21 | |||
| 7c1369d6e9 | |||
| 533d54b106 | |||
| cce0ca6560 | |||
| e87ce2593c | |||
| 431dc8b667 | |||
| 5caf614bf7 | |||
| ecf9703570 | |||
| e7641393a0 | |||
| 2201f6ff5a | |||
| 557e1ce293 | |||
| cbe9a6b9a5 | |||
| 9bbcb038d4 | |||
| 3602204420 | |||
| 6f485e5589 | |||
| 2f46a3dfaf | |||
| 267845bba3 | |||
| 6f33a8eebf | |||
| b0d2b09a2e | |||
| c699b6b16b | |||
| 1789bac28d | |||
| 60c05f615f | |||
| bd84523671 | |||
| eb21b9c770 | |||
| ff3ac11afb | |||
| 1ef8c3d02b | |||
| 2ebaf6279b | |||
| a5ee40e184 | |||
| b17a97eac7 | |||
| 63908bfaf6 | |||
| 3f9a419a19 | |||
| bae691e33e | |||
| 91539346ee | |||
| 4842ca81b3 | |||
| 9c77a1a4ab | |||
| 4af284be42 | |||
| 6aec68bb3c | |||
| d4e2b0834f | |||
| 24c2702f96 | |||
| 4691fc9bad | |||
| 8c6c60b6f1 | |||
| bc482407fe | |||
| ff05593db8 | |||
| 3d304d9374 | |||
| 1734f0c2f1 | |||
| 1b25e5df85 | |||
| ea8eb32b0b | |||
| 614a1f95de | |||
| d36bc28914 | |||
| deec48d7c1 | |||
| b318ec8d39 | |||
| b4b0e2befc | |||
| 51d3fe13da | |||
| 58220216d3 | |||
| cac75cca42 | |||
| 47f247907f | |||
| 81e04b7322 | |||
| 56a964b700 | |||
| 458341d79f | |||
| d1d212b075 | |||
| 59c9996489 | |||
| bf8221a2f1 | |||
| 787a11a040 | |||
| 05d114be2f | |||
| 3c04a7dbac | |||
| 1673e1148d | |||
| de416b035d | |||
| 08aaf2989d | |||
| a50964060c | |||
| 54b6108719 | |||
| 585e5c254a | |||
| 477808c9bb | |||
| 6c58a2b688 | |||
| c9854bf30f | |||
| cfed4bbd41 | |||
| 2dd6485b0e | |||
| bf1dd5b860 | |||
| 765c373f7d | |||
| 32d752e82b | |||
| 4623e438fa | |||
| 8a44ff396f | |||
| 086d7ecae4 | |||
| d6adebb711 | |||
| 8325fe7b3c | |||
| 7cf83f878b | |||
| 597ba26424 | |||
| 7bccea47f5 | |||
| 5770116779 | |||
| 0679144f69 | |||
| c9fd288b52 | |||
| 9ae449fcfd | |||
| 249f67f796 | |||
| e91c0bb554 | |||
| 5e306d9598 | |||
| 42ebc81cbb | |||
| f624c37db5 | |||
| 22b6f42936 | |||
| 760c66cac8 | |||
| 1d91e9da03 | |||
| 7eac409ec6 | |||
| 128558420c | |||
| ca3e664690 | |||
| 7eb37462d7 | |||
| 31e02c2d39 | |||
| 003a68b9b8 | |||
| f418708389 | |||
| d23a564035 | |||
| 7fe586244c | |||
| f1a597cdc6 | |||
| 9b68c8f58c | |||
| be5b57ea71 | |||
| 425c82f26d | |||
| 942421c1fb | |||
| b1184f6928 | |||
| ffeb6d1b98 | |||
| b2718b56b7 | |||
| 455f834957 | |||
| 8a14c80ff8 | |||
| e268e733c7 | |||
| 8933a8dfb3 | |||
| 9796cc525c | |||
| cdbf9a9190 | |||
| c26792292d | |||
| 4698e0ee03 | |||
| 68afcb2f4b | |||
| e8f61e46e3 | |||
| 317bb2b7c8 | |||
| d1b3c6b468 | |||
| b35eccc984 | |||
| a780c92047 | |||
| 5fc65698ba | |||
| c923b5ec4c | |||
| 609b2b9a7b | |||
| a257278004 | |||
| 273daed634 | |||
| a6862d8c58 | |||
| 323e5b4ea7 | |||
| 89217a5308 | |||
| a45e995d2f | |||
| 8700b4c8ca | |||
| 1f7f5fb488 | |||
| afde8ee864 | |||
| 3884c26b15 | |||
| 24dce7eae9 | |||
| 1db4e9b771 | |||
| b2ed7eae00 | |||
| 3169fd67e8 | |||
| 773ceb1396 | |||
| 8c62ee1720 | |||
| 5fa1f52922 | |||
| d2180c010c | |||
| b73df7b2ce | |||
| 971f737846 | |||
| a393353907 | |||
| 751f609554 | |||
| e8cd5c6552 | |||
| 86e387b270 | |||
| 32f15aa621 | |||
| bfc889a9e5 | |||
| bd907625a8 | |||
| 60004926d7 | |||
| ac751dfd1a | |||
| 6828eee17f | |||
| 19c97f397b | |||
| 0167a8bdd8 | |||
| 93e5044603 | |||
| 024d930677 | |||
| 98873446a8 | |||
| 5318b7a406 | |||
| 4a6c3cbcd2 | |||
| ac0a39c202 | |||
| 88d39345a5 | |||
| 7aa9cb07b2 | |||
| ef30c8d28d | |||
| 2727f690b4 | |||
| 5945c24301 | |||
| 7b6aff1f95 | |||
| cb0fe3aadd | |||
| 4f9d69f9c2 | |||
| c18aeabe06 | |||
| 550742323a | |||
| c71f789a08 | |||
| a9b4b195bf | |||
| 52e8177f42 | |||
| b0743efc48 | |||
| 6dfd652dac | |||
| 3f93cb2e6d | |||
| 8f7b9b7f19 | |||
| abff89ab6b | |||
| d4f03f743a | |||
| c3714f6651 | |||
| 9b4d0ddf2f | |||
| 2c9ac2f549 | |||
| c1292de2a0 | |||
| 21d5e4cd29 | |||
| a9495a3e15 | |||
| bff5b3d765 | |||
| a4ff37eecc | |||
| 460209f486 | |||
| 96c68c86a4 | |||
| 8b152fdff8 | |||
| 25c9a52873 | |||
| 44302d903c | |||
| c7b8668609 | |||
| 7d60df6266 | |||
| b7f898a5e5 | |||
| 04c4dbe4b8 | |||
| 8d04c494df | |||
| a6aadf76f3 | |||
| a685ef97bf | |||
| d46c29689f | |||
| 65ce07395b | |||
| cc1542fe95 | |||
| b70d57d878 | |||
| 5aa857362b | |||
| c92fc34051 | |||
| b01e66f12a | |||
| a88d20784a | |||
| 63486ed6cf | |||
| 3ceec773f2 | |||
| 817fa56ec4 | |||
| 088fb21a90 | |||
| 79c755a469 | |||
| a091d3f011 | |||
| c7c01a5d7c | |||
| cdc0f48973 | |||
| e884f6b962 | |||
| 485a9bea71 | |||
| f3c3b667ca | |||
| 3b0c4f31b6 | |||
| 5e54600766 | |||
| c3e54f69b7 | |||
| c4022d1c9b | |||
| 6e13a78a24 | |||
| c7cacd9727 | |||
| a77110f704 | |||
| 83a6069de5 | |||
| e9a1890e54 | |||
| bf928aa06e | |||
| b2dc50590c | |||
| 229e53ac32 | |||
| 51e8a47615 | |||
| e80b58a412 | |||
| 48ced8b079 | |||
| c07e2aea1e | |||
| f3194aa30e | |||
| cb3e4cd951 | |||
| f5d8d029ea | |||
| 7c946c4126 | |||
| ded4ea0d69 | |||
| c180c549fe | |||
| 1f30f1168f | |||
| 9446f15922 | |||
| e13b2c9cd9 | |||
| e9e14e0292 | |||
| added19656 | |||
| 4fa3c4d479 | |||
| 690738de9a | |||
| cb31d27e68 | |||
| e6658df123 | |||
| 0b7154a14c | |||
| 02c1838de5 | |||
| fc455fceb8 | |||
| 8d40cdd234 | |||
| 40145c669a | |||
| 34d2fc233f | |||
| 670ec0381a | |||
| 2128f255fe | |||
| b717bd9a9a | |||
| 8aab9311f5 | |||
| ff3e16ea67 | |||
| 1de039c315 | |||
| d05e1786d7 | |||
| e34b5a7372 | |||
| a1b3d1b508 | |||
| 1ebccdf420 | |||
| e5f674509c | |||
| 197a4ae5c0 | |||
| 64d2dcf39c | |||
| caf54c736b | |||
| 423c2cce28 | |||
| a1af51efcb | |||
| ffc1bf9c58 | |||
| a54bfdb342 | |||
| 03861d2dbd | |||
| 8c080da6bf | |||
| a8c98056b6 | |||
| 78e663f955 | |||
| 70546a5039 | |||
| 30f78b33cb | |||
| 712e8c1f16 | |||
| 933dfdfb53 | |||
| 9ce86b029f | |||
| 13580cc69d | |||
| a7dee0002d | |||
| c84b2df3fa | |||
| d9471a8684 | |||
| ef630c2272 | |||
| e188c71652 | |||
| 910260c2c8 | |||
| 22752abc38 | |||
| 92bc3a5d64 | |||
| 1383752cc1 | |||
| 66af16fb81 | |||
| fc019d7b46 | |||
| ac4f0fcb12 | |||
| a6c2bc663d | |||
| e62ffa02e9 | |||
| a003600839 | |||
| ea73feb06d | |||
| 3bdf69e1b7 | |||
| 590fe78bd1 | |||
| 76187ba0e7 | |||
| 5eba375f4d | |||
| 8fa6a8251f | |||
| 75fa046f30 | |||
| 08a8cd1430 | |||
| 3afbb78a39 | |||
| fca6ccd816 | |||
| 8d351822c1 | |||
| 7d274a31fe | |||
| e36dde0d25 | |||
| 51cc6e5ae5 | |||
| 28d911c617 | |||
| b1e9fe58fb | |||
| 16ba014ade | |||
| e9d5a20c1a | |||
| 6e0036f9c4 | |||
| d7e189aa1c | |||
| ea2b444fb2 | |||
| cd1efaf26e | |||
| e47f0e5d43 | |||
| 5284d37984 | |||
| 1bf6fa0e4d | |||
| fc294c82f1 | |||
| 7b1dc49dda | |||
| d15ddeea24 | |||
| eaac213859 | |||
| 02c1460351 | |||
| 2fff35b7d9 | |||
| c5b9072bde | |||
| 8a570e912a | |||
| 1dcc40afb8 | |||
| c2092f8035 | |||
| 886c4b411e | |||
| 8888fd40cd | |||
| 31cd01bccf | |||
| c59b221004 | |||
| cb3cc3e74c | |||
| 9e90015fcc | |||
| 95e0517056 | |||
| 2b2f47915f | |||
| 9acd178ce1 | |||
| f381f80184 | |||
| c83be61343 | |||
| f6e49d31ec | |||
| cc0429a362 | |||
| b35901d94c | |||
| c0df1a23f4 | |||
| 495619af2c | |||
| 72dfadf106 | |||
| 5825909e45 | |||
| d3f6d87ee0 | |||
| c4f4c5ddad | |||
| 2921d7ca27 | |||
| 2021cbc988 | |||
| e9e29861b2 | |||
| 8e6da36059 | |||
| 5e1469e12e | |||
| bd7465f8b1 | |||
| 570397a616 | |||
| b3b5f1daef | |||
| 25ec3ae47c | |||
| 5ba5e3da58 | |||
| 9296c14ca0 | |||
| 310b5d3422 | |||
| 1c5967112e | |||
| 49a3d8ee71 | |||
| cf8b61e8d9 | |||
| 967ae5723e | |||
| 03421acf2f | |||
| d43896cc5a | |||
| b283124a2f | |||
| 8c39be01f8 | |||
| fb2bd4ccd8 | |||
| 5b826ffc45 | |||
| 0b2ab365d3 | |||
| 93fc54992c | |||
| 60b7326deb | |||
| d6e6139244 | |||
| 0892911ddc | |||
| 30267ac50c | |||
| ffef0ef31d | |||
| fc047087ce | |||
| 81d4966535 | |||
| 004d63fda1 | |||
| 23e2dbb354 | |||
| 28e9899b97 | |||
| 7441d41550 | |||
| 99e2d5597a | |||
| 74321a44ca | |||
| 7b664affb7 | |||
| c411835f3b | |||
| 7b62c946a5 | |||
| 252e2fcd29 | |||
| aa2731bccc | |||
| 282c439963 | |||
| e99459aba0 | |||
| 4c35cbbe34 | |||
| cab3537ae2 | |||
| c3f211389b | |||
| a4de84a842 | |||
| 2bf1eaaa0f | |||
| 7f5ddd1629 | |||
| ed798fec65 | |||
| 90386f5794 | |||
| 8ada8811bf | |||
| b24badabef | |||
| 4e20cb12cd | |||
| 245da9e6cc | |||
| a800b34aa7 | |||
| 50c3941f42 | |||
| 6e5d5ee36c | |||
| 2c8b713ff3 | |||
| 8162a6706d | |||
| 952f6883fa | |||
| 373f3e3698 | |||
| 17791f631f | |||
| 6987c67cc7 | |||
| 65a59e5d2d | |||
| 05b9a6d412 | |||
| 6608f4f164 | |||
| 93378ad6b0 | |||
| bd4a60203e | |||
| c9e6a62641 | |||
| 68d797fa99 | |||
| 08011d8cf2 | |||
| 2f91bf7f52 | |||
| d5182c05c1 | |||
| 8e0947a833 | |||
| 1d88fc37b0 | |||
| 46bd8e6f4d | |||
| b95b427331 | |||
| 9b574c60eb | |||
| a8b39cc0a4 | |||
| cdbc7dd9b8 | |||
| 08dfec4fcf | |||
| 7f4553225b | |||
| f37e65a91e | |||
| c022f8a68c | |||
| daa7a506a5 | |||
| f3dcf39c15 | |||
| 06cbef16d4 | |||
| ab31bcd3e3 | |||
| 563a99864f | |||
| 39b8abc2c6 | |||
| f3dd837076 | |||
| d6b3a5259d | |||
| 9fea1d5c64 | |||
| 0adb5355c7 | |||
| 01d807b61e | |||
| 285bb812d0 | |||
| d897155d6e | |||
| 7154426279 | |||
| 4526084e4d | |||
| 0c5c786ae3 | |||
| 8a2c4aa356 | |||
| 4cba819edd | |||
| 4db42a2b29 | |||
| fc0ee5b698 | |||
| 2c0c3f1c70 | |||
| 3f3976b73c | |||
| 82d5dcced5 | |||
| f4eaed694a | |||
| 05d9869326 | |||
| 2675934ff8 | |||
| fb6e19d3ea | |||
| f1151d54e1 | |||
| 6a0fa4f9fa | |||
| 20d96fffc8 | |||
| ad6c06308a | |||
| 84ee4171a4 | |||
| 6bc4f8a1e4 | |||
| 8876aa0866 | |||
| 691d2e7228 | |||
| 7a12755de9 | |||
| 8573f56d03 | |||
| 8f3e683321 | |||
| 64867b0b67 | |||
| e42d060e57 | |||
| 2ca9ab8a0c | |||
| fdc0c6b371 | |||
| 8f8779c3cd | |||
| 851877ad8b | |||
| 8df74529ad | |||
| 353f51ebf0 | |||
| 6c5cb08118 | |||
| 54fee92b15 | |||
| 776431c801 | |||
| 9f893ce999 | |||
| 820447670c | |||
| b43c49cd64 | |||
| f9c3558975 | |||
| 1b75250824 | |||
| 3fa3bb5d03 | |||
| ef0ff55f1f | |||
| 66aa8ed177 | |||
| 519286bc69 | |||
| 9882f45fd2 | |||
| 634f6216a0 | |||
| 69574a7d1c | |||
| eddd4b9be8 | |||
| 9a9c31ff53 | |||
| 41ee7a1c85 | |||
| 2f9bbf373c | |||
| d662e10ebb | |||
| cd31092333 | |||
| 1eae7ab6f3 | |||
| ba378f852f | |||
| 5cfd1e557d | |||
| df31d79eaf | |||
| 12d7304325 | |||
| 41424cbdfd | |||
| 05dda519a2 | |||
| 120d39282e | |||
| 8e7d0fd13b | |||
| 3d979fdfbb | |||
| 6ab47ae3cb | |||
| a4977b4924 | |||
| bac9c692b8 | |||
| 6ab15356e1 | |||
| 73cc7121c3 | |||
| 1aeef06f49 | |||
| 3b16bcf01d | |||
| f6351fda41 | |||
| 007e91480d | |||
| 163ef9296e | |||
| fa042f7d68 | |||
| 8a11040dde | |||
| a88971d557 | |||
| 5867e5fcc5 | |||
| 20e587d6d3 | |||
| 6bfa8471cd | |||
| 5c10bce2f4 | |||
| f1663f6668 | |||
| 90c27e0e74 | |||
| b5eac0d907 | |||
| 4eabe2cb3a | |||
| a1c0d30a06 | |||
| 63c9f65040 | |||
| f58a066bff | |||
| 952ea6357a | |||
| 6695973035 | |||
| 3dc28635f4 | |||
| 0bde01a85f | |||
| b9840c2074 | |||
| 8228a76875 | |||
| 46b370a2e3 | |||
| 820e9513ba | |||
| bd71d64db3 | |||
| 9d4baf827c | |||
| d6843d7fcf | |||
| babb1dd962 | |||
| aa32e396a7 | |||
| 1068efcb49 | |||
| 576c7f1458 | |||
| 37c857b055 | |||
| 794dfb44d1 | |||
| 929bb6dc66 | |||
| 28337e31eb | |||
| 09a38c0e4b | |||
| 645b8fb9cd | |||
| 541588948c | |||
| bdd6fcfbbc | |||
| 9e50286c66 | |||
| 418e4649dc | |||
| 4a70f20f4a | |||
| d6eabfcb6d | |||
| d88889d760 | |||
| 85146d8af0 | |||
| 9612572f07 | |||
| 4bb1dddf4d | |||
| b066a86962 | |||
| 6086455782 | |||
| 9020b3cbad | |||
| 5822dea270 | |||
| c445f59664 | |||
| 737e4cb4f9 | |||
| dba7368d01 | |||
| 314c4cd8cc | |||
| 3e46f99e14 | |||
| e0cc552b8d | |||
| 6b5be403af | |||
| 269d5989bc | |||
| edfe3d9b65 | |||
| ffb2c42a26 | |||
| b7de19b020 | |||
| 77cd659b39 | |||
| fbba9d8357 | |||
| f464a9efdf | |||
| 7ec4290582 | |||
| 3f887a1d3a | |||
| ffd76dc587 | |||
| d7f3594a73 | |||
| 32fa5f206b | |||
| 70d2c09e81 | |||
| 17f03806d0 | |||
| b6aba0efa4 | |||
| 65a5e06935 | |||
| f459cb9805 | |||
| f5470aca5d | |||
| c26af97fe7 | |||
| 766ec458a2 | |||
| 48ff78580c | |||
| 396f7524d7 | |||
| da19ef42f5 | |||
| 91abe5aa43 | |||
| 682435321b | |||
| 76f0d60224 | |||
| 628b88ef9f | |||
| 6a792814ce | |||
| 05ce15d677 | |||
| 4a9d0d4f8e | |||
| 16f0552682 | |||
| 9e3819b9c7 | |||
| 233a0d4b35 | |||
| bd95b808ae | |||
| 96132c4585 | |||
| 3edacef572 | |||
| 36889c1695 | |||
| cd2c6c1d8f | |||
| d8c78b1a00 | |||
| 74a22c26cf | |||
| f742eba4c1 | |||
| 36c2812157 | |||
| d353fc4c63 | |||
| 98bd3d6da0 | |||
| cd5ec8d295 | |||
| f91c6456bd | |||
| 67af87968e | |||
| 58ea3e1b35 | |||
| a9435c10e8 | |||
| a86860fe76 | |||
| a3d707f78a | |||
| c502426249 | |||
| 2fb5ffcecf | |||
| 6d995c1253 | |||
| a860c1c490 | |||
| 481d9cc745 | |||
| 7e53a7bc2b | |||
| 4df10e3782 | |||
| 396da65178 | |||
| 87e8faf383 | |||
| 9eb3e6d398 | |||
| 332c4fdf82 | |||
| 4d247344d5 | |||
| 4e4738d4f6 | |||
| dbfa7b0932 | |||
| e90231d58d | |||
| 9bc7d40425 | |||
| d2d5c0621b | |||
| e41d57c914 | |||
| 7c5336cba3 |
@@ -0,0 +1 @@
|
||||
commands/code/apply-issue-main.md
|
||||
@@ -0,0 +1,49 @@
|
||||
---
|
||||
description: Sync OpenRouter API implementation with latest upstream documentation
|
||||
argument-hint: specific feature to check
|
||||
---
|
||||
|
||||
Review the OpenRouter implementation:
|
||||
- Models list: `src/modules/llms/server/openai/openrouter.wiretypes.ts` (list API response schema)
|
||||
- Chat wire types: `src/modules/aix/server/dispatch/wiretypes/openai.wiretypes.ts` (OpenAI-compatible)
|
||||
- Request adapter: `src/modules/aix/server/dispatch/chatGenerate/adapters/openai.chatCompletions.ts` ('openrouter' dialect)
|
||||
- Response parser: `src/modules/aix/server/dispatch/chatGenerate/parsers/openai.parser.ts` (shared OpenAI parser)
|
||||
- Vendor config: `src/modules/llms/vendors/openrouter/openrouter.vendor.ts`
|
||||
|
||||
GOAL: Ensure complete support for OpenRouter's API including advanced features like reasoning/thinking tokens, tool use, search integration, and multi-modal capabilities. OpenRouter is OpenAI-compatible but has important extensions and differences.
|
||||
|
||||
Use Task tool with subagent_type=Explore and thoroughness="very thorough" to discover:
|
||||
1. Map API structure - all endpoints, parameters, capabilities from https://openrouter.ai/docs
|
||||
2. **Advanced features** - How to use: reasoning/thinking tokens (o1, DeepSeek R1), tool use/function calling, search integration, multi-modal (vision/audio)
|
||||
3. Changelog location - How does OpenRouter communicate API updates and breaking changes?
|
||||
4. Model metadata - What capabilities are exposed in the models list API? How to detect feature support?
|
||||
5. OpenAI deviations - Extensions, special headers (HTTP-Referer, X-Title), response fields, streaming differences
|
||||
|
||||
Then check the latest API information. Try these sources (be creative if blocked):
|
||||
|
||||
**Primary Sources:**
|
||||
- API Reference: https://openrouter.ai/docs/api-reference
|
||||
- Chat Completions: https://openrouter.ai/docs/api-reference#chat-completions
|
||||
- Models List: https://openrouter.ai/docs/api-reference#models-list
|
||||
- Parameters Guide: https://openrouter.ai/docs/parameters
|
||||
- Announcements: https://openrouter.ai/announcements (feature launches, API updates, new models)
|
||||
- Models Directory: https://openrouter.ai/models (check metadata for capabilities)
|
||||
|
||||
**Alternative Sources:**
|
||||
- GitHub: https://github.com/OpenRouterTeam (SDKs, examples, issues for recent changes)
|
||||
- Web Search: "openrouter api changelog" or "openrouter reasoning tokens" or "openrouter tool use"
|
||||
|
||||
**If blocked:** Ask user to provide documentation.
|
||||
|
||||
$ARGUMENTS
|
||||
Focus on discrepancies and gaps:
|
||||
- **Request/Response structure**: New fields, changed requirements, streaming event types
|
||||
- **Feature support**: Thinking tokens format, tool calling protocol, search parameters
|
||||
- **Model capabilities**: How to detect and enable advanced features per model
|
||||
- **OpenRouter extensions**: Headers, routing, fallbacks, rate limiting (free vs paid)
|
||||
- **Breaking changes**: Protocol updates, deprecated fields, new required parameters
|
||||
|
||||
Report differences in wire types, adapter logic, parser handling, or dialect-specific quirks.
|
||||
Prioritize new capabilities that improve user experience (reasoning visibility, better tool use, etc.).
|
||||
|
||||
When making changes, add comments with date: `// [OpenRouter, 2026-MM-DD]: explanation`
|
||||
@@ -0,0 +1,56 @@
|
||||
---
|
||||
description: Sync xAI Responses API implementation with latest upstream documentation
|
||||
argument-hint: specific feature to check
|
||||
---
|
||||
|
||||
Review the xAI Responses API implementation:
|
||||
- xAI wire types: `src/modules/aix/server/dispatch/wiretypes/xai.wiretypes.ts` (xAI-specific request schema, tools)
|
||||
- Request adapter: `src/modules/aix/server/dispatch/chatGenerate/adapters/xai.responsesCreate.ts` (AIX → xAI Responses API)
|
||||
- Response parser: `src/modules/aix/server/dispatch/chatGenerate/parsers/openai.responses.parser.ts` (shared with OpenAI Responses)
|
||||
- Dispatch routing: `src/modules/aix/server/dispatch/chatGenerate/chatGenerate.dispatch.ts` (dialect='xai' routing)
|
||||
- OpenAI shared types: `src/modules/aix/server/dispatch/wiretypes/openai.wiretypes.ts` (InputItem/OutputItem schemas reused by xAI)
|
||||
|
||||
IMPORTANT context:
|
||||
- We use ONLY the xAI Responses API (`POST /v1/responses`). We do NOT use the Chat Completions API (`/v1/chat/completions`) for xAI anymore.
|
||||
- xAI's Responses API is similar to OpenAI's but has key differences - the skill should find what changed since our last sync.
|
||||
- Response streaming/parsing reuses the OpenAI Responses parser since the format is compatible.
|
||||
- We do NOT implement: Files API, Collections Search, Remote MCP tools, Voice Agent API, Image/Video generation, Batch API, or Deferred Completions.
|
||||
|
||||
Then take a look at the newest API information available. Try these sources, and be creative if some are blocked:
|
||||
|
||||
**Primary Sources (guide pages work well with WebFetch despite being JS-rendered):**
|
||||
- Responses API Guide: https://docs.x.ai/docs/guides/chat
|
||||
- Stateful Responses: https://docs.x.ai/docs/guides/responses-api
|
||||
- Tools Overview: https://docs.x.ai/docs/guides/tools/overview
|
||||
- Search Tools (web_search, x_search): https://docs.x.ai/docs/guides/tools/search-tools
|
||||
- Code Execution Tool: https://docs.x.ai/docs/guides/tools/code-execution-tool
|
||||
- Function Calling: https://docs.x.ai/docs/guides/function-calling
|
||||
- Streaming: https://docs.x.ai/docs/guides/streaming-response
|
||||
- Reasoning: https://docs.x.ai/docs/guides/reasoning
|
||||
- Structured Outputs: https://docs.x.ai/docs/guides/structured-outputs
|
||||
- Models & Pricing: https://docs.x.ai/developers/models
|
||||
- Release Notes: https://docs.x.ai/developers/release-notes
|
||||
- API Reference: https://docs.x.ai/developers/api-reference#create-new-response
|
||||
|
||||
**Alternative Sources if primary blocked:**
|
||||
- xAI Python SDK: https://github.com/xai-org/xai-sdk-python
|
||||
- Web Search for "xai grok api changelog 2026" or "xai responses api new features"
|
||||
|
||||
**If all blocked:** Explain what you attempted and ask user to provide documentation manually.
|
||||
|
||||
$ARGUMENTS
|
||||
Check carefully for discrepancies between our implementation and the current API docs:
|
||||
|
||||
1. **Request fields**: Compare `XAIWire_API_Responses.Request_schema` against current docs - any new, changed, or deprecated parameters?
|
||||
2. **Tool definitions**: Compare `XAIWire_Responses_Tools` - any new parameters on web_search/x_search/code_interpreter? Any new hosted tool types?
|
||||
3. **Input/Output item types**: Any xAI-specific output items not handled by the shared OpenAI parser (e.g., x_search_call, web_search_call, code_interpreter_call)?
|
||||
4. **Streaming events**: Any xAI-specific SSE event types beyond what the OpenAI Responses parser handles?
|
||||
5. **Response shape**: Usage reporting differences, new fields in the response object?
|
||||
6. **Adapter logic**: Message role mapping, content type handling, system message approach - still correct?
|
||||
7. **Include options**: Any new values for the `include` array?
|
||||
8. **Reasoning config**: Which models support it and with what values?
|
||||
|
||||
Prioritize breaking changes and new capabilities that would improve the user experience.
|
||||
When making changes, add comments with date: `// [xAI, 2026-MM-DD]: explanation`
|
||||
|
||||
**Self-update this skill**: After completing the sync, if your research reveals that assumptions in THIS skill file (`.claude/commands/aix/sync-xai-api.md`) are wrong or outdated - e.g., new APIs we now implement, new tool types added, URLs moved, file paths changed - update this skill file to stay accurate for next time.
|
||||
@@ -0,0 +1,63 @@
|
||||
---
|
||||
description: Search git history for commits that introduce or remove an exact string, within a commit range
|
||||
argument-hint: "[search-string] [ancestor-commit]"
|
||||
allowed-tools: Bash(git *)
|
||||
---
|
||||
|
||||
Search git history using `git log -S` (pickaxe) to find commits that add or remove an exact string.
|
||||
This repo has 7000+ commits, so pickaxe searches can take 30-60+ seconds - this is expected.
|
||||
|
||||
## Parameters
|
||||
|
||||
- `$0` - The exact string to search for in file contents (not commit messages). Examples: `getLabsSUDO`, `EXPERIMENT_ON_SUDO`, `myFunctionName`
|
||||
- `$1` - A commit hash or unique commit message substring to identify the start of the range. Examples: `5af80b96a8`, `"Sudo Mode": 10-click`
|
||||
|
||||
## Example
|
||||
|
||||
```
|
||||
/code:grep-history EXPERIMENT_ON_SUDO "Sudo Mode": 10-click
|
||||
```
|
||||
|
||||
This searches all commits between the `"Sudo Mode": 10-click` commit and HEAD for any that add or remove the string `EXPERIMENT_ON_SUDO` in file contents.
|
||||
|
||||
## Procedure
|
||||
|
||||
### Step 1: Resolve the ancestor commit
|
||||
|
||||
If `$1` looks like a commit hash (hex string), use it directly.
|
||||
Otherwise, search for it by message, restricting to ancestors of HEAD:
|
||||
|
||||
```bash
|
||||
git log --oneline --grep='$1' HEAD | head -5
|
||||
```
|
||||
|
||||
This only walks commits reachable from HEAD, so every result is a guaranteed ancestor - no verification loop needed.
|
||||
|
||||
If multiple results, pick the oldest (last listed) since it represents the earliest matching commit.
|
||||
If none, report the error and stop.
|
||||
|
||||
### Step 2: Run pickaxe search
|
||||
|
||||
```bash
|
||||
git log -S "$0" --oneline <resolved_ancestor>..HEAD
|
||||
```
|
||||
|
||||
This finds commits where the count of `$0` in the codebase changes (i.e., it was added or removed).
|
||||
This can be slow on 7000+ commits - wait for it.
|
||||
|
||||
### Step 3: Check endpoints
|
||||
|
||||
Also check whether the string exists at HEAD and at the ancestor commit:
|
||||
|
||||
```bash
|
||||
git grep -l "$0" HEAD 2>/dev/null || echo "(not found at HEAD)"
|
||||
git grep -l "$0" <resolved_ancestor> 2>/dev/null || echo "(not found at ancestor)"
|
||||
```
|
||||
|
||||
### Step 4: Report
|
||||
|
||||
Present results concisely:
|
||||
- Number of commits found (or "none")
|
||||
- List of matching commits (hash + subject line)
|
||||
- Whether the string exists at HEAD and/or at the ancestor
|
||||
- If found, suggest next steps (e.g., `git show <hash>` to inspect specific commits)
|
||||
@@ -0,0 +1,34 @@
|
||||
---
|
||||
description: Review in-flight changes for coherence, completeness, and quality
|
||||
---
|
||||
|
||||
Review the current in-flight changes in the big-agi-private repository (dev branch, continuously rebased ~1800 commits on top of main).
|
||||
|
||||
**Step 1: Scope and read**
|
||||
|
||||
`git diff --stat` + `git status` for breadth. Then full `git diff` (if empty: `git diff --cached`, then `git diff HEAD~1`).
|
||||
For every file in the diff, read surrounding context in the actual source file - the diff alone hides bugs in adjacent untouched code.
|
||||
|
||||
**Step 2: Reverse-engineer the intent**
|
||||
|
||||
From the diff, determine the **what**, **how**, and **why**. Present this concisely so the author can confirm or correct,
|
||||
but don't stop here, continue to the full review in the same response.
|
||||
|
||||
**Step 3: Validate**
|
||||
|
||||
Run `tsc --noEmit --pretty` and `npm run lint` (in parallel). Report any errors with the review.
|
||||
If the diff removes/renames identifiers, grep the codebase for stale references to the OLD names. This catches broken guards, stale imports, and incomplete migrations.
|
||||
|
||||
**Step 4: Deep review**
|
||||
|
||||
Evaluate every file in the diff.
|
||||
Leave no rocks unturned - correctness, coherence, completeness, excess, generalization, maintenance burden,
|
||||
codebase consistency, etc.
|
||||
|
||||
**Step 5: Prioritized next steps**
|
||||
|
||||
Think about what happens when the next developer touches this code.
|
||||
Rank findings by severity (bug > correctness > cleanup > cosmetic). Be specific about what to change and where.
|
||||
|
||||
Remember: design values for this codebase: orthogonal features, features that generalize well, modularized and reusable code,
|
||||
type-discriminated data, optimized code, zero maintenance burden. Minimize future pain, etc.
|
||||
@@ -0,0 +1,57 @@
|
||||
---
|
||||
description: Show a hierarchical progress tree of the current conversation thread
|
||||
---
|
||||
|
||||
Analyze this conversation thread and produce a **hierarchical progress tree** - a vertical breadcrumb of the chat and actions from the very start to now.
|
||||
|
||||
**Format:**
|
||||
|
||||
A tree, where every rabbithole that was taken adds a level.
|
||||
|
||||
```
|
||||
[ ] Brief initial phase/ask/goal description
|
||||
[x] Specific thing done or decided - "user quote if relevant"
|
||||
[x] Another step
|
||||
[ ] Sub-phase/rabbithole/etc
|
||||
[x] Done step (if important)
|
||||
[ ] Sub-sub-phase
|
||||
[ ] Current step doing <-- HERE
|
||||
[ ] Next step since this sub-sub-phase was broken out
|
||||
|
||||
[ ] Remaining step
|
||||
[ ] ...
|
||||
|
||||
[ ] Missing, back to the main goal
|
||||
[ ] ...
|
||||
|
||||
### What do we rewind the rabbithole to (once the current level is complete)?
|
||||
...
|
||||
|
||||
### What's up (towards user value) and down (towards deeper code levels) the rabbithole?
|
||||
...
|
||||
|
||||
### What's a good hyphenated title for this chat?
|
||||
...
|
||||
|
||||
```
|
||||
|
||||
**Rules:**
|
||||
- `[x]` done, `[ ]` not done. Parent is done only when ALL children on the next level are `[x]`
|
||||
- Each node: a few words, specific. Quote the user briefly when it captures the intent
|
||||
- Group by logical phases or rabbitholes (when descending to a deeper level of implementation or going off for a temporary tangent or sub-quest), not by messages
|
||||
- Earlier levels that are fully completed don't need to be expanded in subtasks
|
||||
- Root nodes/completed nodes need to show what was "wanted" from them, not being checked because they are shown as earlier phases (i.e. upper hierarchy contains more)
|
||||
- Some earlier sub-phases or even levels of rabbitholes can be marked as done as indented [x] below each other (do not add non-major bullets on already completed nodes)
|
||||
- Insert newlines in between large groups of items
|
||||
- Decisions: state what was chosen, not the alternatives
|
||||
- If a former phase produced no code change or decision, omit
|
||||
- Very important to insert incomplete `[ ]` items for things that wre mentioned and are likely useful but mentioned at higher levels of the rabbithole so they must come after, when unwinding the stack
|
||||
- Keep it short, tight (min 0 max item count below *ONE QUARTER the user messages*). This is a navigation aid, not a transcript
|
||||
|
||||
It's important for this to represent a high-level sequence of important actions and turns and pivots and rabbiholes, all focuses on trying to solve something.
|
||||
|
||||
First think through it looking at all the chat from the back to the front, then front to back, user requests, and understand the main storybeats. This is useful especially to remove already done leaves that don't add much if shown.
|
||||
So think about the full list, so you have it all in front of you when you do the last pass to show it to me.
|
||||
It's important to see the progress of what we were doing (e.g. see that we set out to do something at the beginning, but a few items of those are still incomplete, also because we took 2 detours to fix more things in the meantime...).
|
||||
|
||||
At the end anser the questions in the Format, with brief bullet points.
|
||||
@@ -0,0 +1,63 @@
|
||||
---
|
||||
description: Sync LLM parameter options between full model dialog and chat side panel
|
||||
---
|
||||
|
||||
Audit and sync LLM parameter configurations between the two UI editors. Goal: identical `value` fields in option arrays + equivalent onChange logic. Labels/descriptions can differ for UI space.
|
||||
|
||||
**Files to Compare:**
|
||||
1. **Full Model Dialog**: `src/modules/llms/models-modal/LLMParametersEditor.tsx` (main branch)
|
||||
2. **Chat Side Panel**: `src/apps/chat/components/layout-panel/ChatPanelModelParameters.tsx` (main derived branches only)
|
||||
|
||||
**Reference Documentation:**
|
||||
- Parameter system: `kb/systems/LLM-parameters-system.md`
|
||||
- Parameter registry: `src/common/stores/llms/llms.parameters.ts`
|
||||
|
||||
**Task: Perform a comprehensive audit**
|
||||
|
||||
1. **Read both files** and extract all option arrays (e.g., `_reasoningEffortOptions`, `_antEffortOptions`, `_geminiThinkingLevelOptions`, etc.)
|
||||
|
||||
2. **Check for missing parameters:**
|
||||
- Parameters handled in `LLMParametersEditor.tsx` but NOT in `ChatPanelModelParameters.tsx`
|
||||
- Parameters in `ChatPanelModelParameters.tsx`'s `_interestingParameters` array but missing UI controls
|
||||
- Note: The side panel intentionally shows only "interesting" parameters - focus on those listed in `_interestingParameters`
|
||||
|
||||
3. **Check for value mismatches** between corresponding option arrays:
|
||||
- Different number of options (e.g., 3 vs 4 options)
|
||||
- Same label but different `value` (this causes the bug in issue #926)
|
||||
- Different labels for the same `value`
|
||||
- Missing `_UNSPECIFIED`/Default option in one but not the other
|
||||
|
||||
4. **Check onChange handler consistency:**
|
||||
- Both should remove parameter on `_UNSPECIFIED` selection
|
||||
- Both should set explicit values the same way
|
||||
- Watch for conditions like `value === 'high'` that may differ
|
||||
|
||||
**Output Format:**
|
||||
|
||||
```
|
||||
## Parameter Sync Audit Report
|
||||
|
||||
### Missing Parameters
|
||||
- [ ] `llmVndXyz` - In full dialog, missing from side panel
|
||||
|
||||
### Value Mismatches
|
||||
- [ ] `_xyzOptions`:
|
||||
- Full dialog: [values...]
|
||||
- Side panel: [values...]
|
||||
- Issue: [description]
|
||||
|
||||
### Handler Inconsistencies
|
||||
- [ ] `llmVndXyz` onChange differs: [explanation]
|
||||
|
||||
### Recommended Fixes
|
||||
1. [Specific fix with code snippet if needed]
|
||||
```
|
||||
|
||||
**Fix Direction:** Full dialog is source of truth. Update side panel to match its values when mismatched.
|
||||
|
||||
**Notes:**
|
||||
- Side panel uses shorter descriptions (space-constrained) - that's fine
|
||||
- Variable names may differ (e.g., `_anthropicEffortOptions` vs `_antEffortOptions`) - that's fine, but same is better
|
||||
- `value` fields must be identical sets
|
||||
- `_UNSPECIFIED` must mean the same thing in both
|
||||
- onChange: remove on `_UNSPECIFIED`, set explicit value otherwise
|
||||
@@ -4,7 +4,7 @@ description: Update Alibaba model definitions with latest pricing and capabiliti
|
||||
|
||||
Update `src/modules/llms/server/openai/models/alibaba.models.ts` with latest model definitions.
|
||||
|
||||
Reference `src/modules/llms/server/llm.server.types.ts` and `src/modules/llms/server/models.data.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
Reference `src/modules/llms/server/llm.server.types.ts` and `src/modules/llms/server/models.mappings.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
|
||||
**Primary Sources:**
|
||||
- Models & Pricing: https://www.alibabacloud.com/help/en/model-studio/models
|
||||
|
||||
@@ -4,17 +4,46 @@ description: Update Anthropic model definitions with latest pricing and capabili
|
||||
|
||||
Update `src/modules/llms/server/anthropic/anthropic.models.ts` with latest model definitions.
|
||||
|
||||
Reference `src/modules/llms/server/llm.server.types.ts` and `src/modules/llms/server/models.data.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
Reference files (for context only, do not modify):
|
||||
- `src/modules/llms/server/llm.server.types.ts`
|
||||
- `src/modules/llms/server/models.mappings.ts`
|
||||
- `src/common/stores/llms/llms.parameters.ts`
|
||||
|
||||
**Primary Sources:**
|
||||
- Models: https://docs.claude.com/en/docs/about-claude/models/overview
|
||||
- Pricing: https://claude.com/pricing#api
|
||||
- Deprecations: https://docs.claude.com/en/docs/about-claude/model-deprecations
|
||||
**Workflow: Start with recent changes, then verify the full model list.**
|
||||
|
||||
**Fallbacks if blocked:** Check Anthropic TypeScript SDK at https://github.com/anthropics/anthropic-sdk-typescript, search "anthropic models latest pricing", "anthropic latest models", or search GitHub for latest model prices and context windows
|
||||
**Primary Sources (append `.md` to any path for clean markdown):**
|
||||
1. Recent changes: https://platform.claude.com/docs/en/release-notes/overview.md
|
||||
2. Models & IDs: https://platform.claude.com/docs/en/about-claude/models/overview.md
|
||||
3. Pricing (base, cache, batch, long context): https://platform.claude.com/docs/en/about-claude/pricing.md
|
||||
4. Deprecations & retirement dates: https://platform.claude.com/docs/en/about-claude/model-deprecations.md
|
||||
|
||||
**Discovering feature docs:** The release notes and models overview markdown
|
||||
contain inline links to feature-specific pages (thinking modes, effort,
|
||||
context windows, what's-new pages, etc.). When a new capability is
|
||||
referenced, follow those links - append `.md` to get markdown. Examples of
|
||||
pages you might discover this way:
|
||||
- `about-claude/models/whats-new-claude-*` - per-generation changes
|
||||
- `build-with-claude/extended-thinking` - thinking budget configuration
|
||||
- `build-with-claude/effort` - effort parameter levels
|
||||
- `build-with-claude/adaptive-thinking` - adaptive thinking mode
|
||||
|
||||
**Fallback web pages** (crawl if `.md` paths break or structure changes):
|
||||
- https://platform.claude.com/docs/en/about-claude/models/overview
|
||||
- https://platform.claude.com/docs/en/about-claude/pricing
|
||||
- https://platform.claude.com/docs/en/release-notes/overview
|
||||
- https://claude.com/pricing
|
||||
|
||||
**Fallbacks if blocked:** Check the Anthropic TypeScript SDK at
|
||||
https://github.com/anthropics/anthropic-sdk-typescript, or web-search
|
||||
for "anthropic models latest pricing" / "anthropic latest models".
|
||||
|
||||
**Important:**
|
||||
- Review the full model list for additions, removals, and price changes
|
||||
- For new models: check which `parameterSpecs` are needed (thinking mode,
|
||||
effort levels, 1M context, skills, web tools) by reading the linked
|
||||
feature docs and comparing with existing model entries
|
||||
- When thinking/effort semantics change between generations
|
||||
(e.g. adaptive vs manual thinking), document in comments
|
||||
- Minimize whitespace/comment changes, focus on content
|
||||
- Preserve comments to make diffs easy to review
|
||||
- Flag broken links or unexpected content
|
||||
@@ -4,7 +4,7 @@ description: Update DeepSeek model definitions with latest pricing and capabilit
|
||||
|
||||
Update `src/modules/llms/server/openai/models/deepseek.models.ts` with latest model definitions.
|
||||
|
||||
Reference `src/modules/llms/server/llm.server.types.ts` and `src/modules/llms/server/models.data.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
Reference `src/modules/llms/server/llm.server.types.ts` and `src/modules/llms/server/models.mappings.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
|
||||
**Primary Sources:**
|
||||
- Pricing: https://api-docs.deepseek.com/quick_start/pricing
|
||||
|
||||
@@ -0,0 +1,91 @@
|
||||
---
|
||||
description: Update/validate dynamic vendor model parsers (OpenRouter, TogetherAI, Alibaba, Azure, Novita, ChutesAI, FireworksAI, TLUS, LM Studio, LocalAI, FastAPI)
|
||||
---
|
||||
|
||||
Validate that the dynamic (API-fetched) vendor model parsers are up to date and not silently broken.
|
||||
|
||||
These vendors do NOT have hardcoded model lists - they fetch models from APIs at runtime. But their parsers, filters, heuristic detection, and capability mapping can break if upstream APIs change. This skill covers all dynamic vendors NOT covered by the other `llms:update-models-{vendor}` skills.
|
||||
|
||||
## Vendors to Validate
|
||||
|
||||
### High Risk
|
||||
|
||||
**OpenRouter** - `src/modules/llms/server/openai/models/openrouter.models.ts`
|
||||
- Most complex parser. Vendor-specific parameter inheritance (Anthropic thinking variants, Gemini thinking/image, OpenAI reasoning effort, xAI/DeepSeek reasoning).
|
||||
- Hardcoded family ordering list (lines ~24-37) - check if new leading vendors are missing.
|
||||
- Hardcoded old/deprecated model hiding list (lines ~39-49) - check if stale.
|
||||
- Cache pricing detection (Anthropic-style vs OpenAI-style) - verify format still valid.
|
||||
- Variant injection for Anthropic thinking/non-thinking - verify still correct.
|
||||
- Reference: https://openrouter.ai/docs/models
|
||||
|
||||
### Medium Risk
|
||||
|
||||
**Novita** - `src/modules/llms/server/openai/models/novita.models.ts`
|
||||
- Features array mapping (`function-calling`, `reasoning`, `structured-outputs`) and input modalities parsing.
|
||||
- Pricing unit conversion (hundredths of cent per million → dollars per 1K).
|
||||
- Hostname heuristic: `novita.ai`.
|
||||
|
||||
**ChutesAI** - `src/modules/llms/server/openai/models/chutesai.models.ts`
|
||||
- Custom `max_model_len` field for context window.
|
||||
- Assumes all models support Vision + Functions (aggressive).
|
||||
- Hostname heuristic: `.chutes.ai`.
|
||||
|
||||
**FireworksAI** - `src/modules/llms/server/openai/models/fireworksai.models.ts`
|
||||
- Relies on provider capability flags: `supports_chat`, `supports_image_input`, `supports_tools`.
|
||||
- Hostname heuristic: `fireworks.ai/`.
|
||||
|
||||
**TogetherAI** - `src/modules/llms/server/openai/models/together.models.ts`
|
||||
- Type allow-list (`type: 'chat'`), vision detection by string match.
|
||||
- Custom wire schema with pricing conversion.
|
||||
|
||||
**TLUS** - `src/modules/llms/server/openai/models/tlusapi.models.ts`
|
||||
- Detected by response structure (`total_models`, `free_models`, `pro_models` fields).
|
||||
- Capability enum mapping (`text`, `vision`, `audio`, `tool-calling`, `reasoning`, `websearch`).
|
||||
- Tier-based pricing (`free` vs paid).
|
||||
|
||||
**Alibaba** - `src/modules/llms/server/openai/models/alibaba.models.ts`
|
||||
- Model list was cleared (dynamic-only). Exclusion patterns for non-chat models.
|
||||
- Assumes 128K context and Vision+Functions for all models (overly permissive).
|
||||
- Check if hardcoded data should be restored now that naming has stabilized.
|
||||
|
||||
### Low Risk (local/generic - validate only if issues reported)
|
||||
|
||||
**Azure** - `src/modules/llms/server/openai/models/azure.models.ts`
|
||||
- Custom deployments API, not `/v1/models`. User-specific. Deployment name fallback logic.
|
||||
|
||||
**LM Studio** - `src/modules/llms/server/openai/models/lmstudio.models.ts`
|
||||
- Local service, native API (`/api/v1/models`). GGUF metadata parsing, capability flags.
|
||||
|
||||
**LocalAI** - `src/modules/llms/server/openai/models/localai.models.ts`
|
||||
- Local service. String-based hide list, vision/reasoning detection by name pattern.
|
||||
|
||||
**FastAPI** - `src/modules/llms/server/openai/models/fastapi.models.ts`
|
||||
- Generic passthrough. Detected by `owned_by === 'fastchat'`. Minimal parsing.
|
||||
|
||||
## Validation Checklist
|
||||
|
||||
For each vendor (prioritize High > Medium > Low):
|
||||
|
||||
1. **Read the parser file** and check for:
|
||||
- Deny/allow lists that may be stale (new model families missing)
|
||||
- Capability assumptions that may be wrong (e.g. "all models support vision")
|
||||
- Field names that may have changed upstream
|
||||
- Pricing conversion math that may use wrong units
|
||||
|
||||
2. **Check upstream docs** (where available) for:
|
||||
- API response schema changes
|
||||
- New model types or capability fields
|
||||
- Deprecated fields
|
||||
|
||||
3. **Cross-reference with OpenRouter** (aggregator):
|
||||
- OpenRouter surfaces models from many of these vendors
|
||||
- If OpenRouter shows capabilities that a vendor's parser misses, the parser is stale
|
||||
|
||||
4. **Fix issues found** - update parsers, filters, deny lists as needed.
|
||||
|
||||
5. Run `tsc --noEmit` after changes.
|
||||
|
||||
**Important:**
|
||||
- Do NOT convert dynamic vendors to hardcoded lists - the dynamic approach is intentional
|
||||
- Focus on parser correctness, not model coverage
|
||||
- Flag any vendor whose API response format seems to have changed substantially
|
||||
@@ -4,7 +4,7 @@ description: Update Gemini model definitions with latest pricing and capabilitie
|
||||
|
||||
Update `src/modules/llms/server/gemini/gemini.models.ts` with latest model definitions.
|
||||
|
||||
Reference `src/modules/llms/server/llm.types.ts`, `src/modules/llms/server/llm.server.types.ts`, and `src/modules/llms/server/models.data.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
Reference `src/modules/llms/server/llm.types.ts`, `src/modules/llms/server/llm.server.types.ts`, and `src/modules/llms/server/models.mappings.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
|
||||
**Primary Sources:**
|
||||
- Models: https://ai.google.dev/gemini-api/docs/models
|
||||
|
||||
@@ -4,13 +4,13 @@ description: Update Groq model definitions with latest pricing and capabilities
|
||||
|
||||
Update `src/modules/llms/server/openai/models/groq.models.ts` with latest model definitions.
|
||||
|
||||
Reference `src/modules/llms/server/llm.server.types.ts` and `src/modules/llms/server/models.data.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
Reference `src/modules/llms/server/llm.server.types.ts` and `src/modules/llms/server/models.mappings.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
|
||||
**Primary Sources:**
|
||||
- Models: https://console.groq.com/docs/models
|
||||
**Primary Source:**
|
||||
- Fetch https://console.groq.com/docs/models.md directly (markdown format, no search needed)
|
||||
- Pricing: https://groq.com/pricing/
|
||||
|
||||
**Fallbacks if blocked:** Search "groq models latest pricing", "groq latest models", "groq api models", or search GitHub for latest model prices and context windows
|
||||
**Do NOT use web search.** The `.md` endpoint provides structured markdown content directly.
|
||||
|
||||
**Important:**
|
||||
- Review the full model list for additions, removals, and price changes
|
||||
|
||||
@@ -0,0 +1,19 @@
|
||||
---
|
||||
description: Update Kimi model definitions with latest pricing and capabilities
|
||||
---
|
||||
|
||||
Update `src/modules/llms/server/openai/models/moonshot.models.ts` with latest model definitions.
|
||||
|
||||
Reference `src/modules/llms/server/llm.server.types.ts` and `src/modules/llms/server/models.mappings.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
|
||||
**Primary Sources (fetch directly, no search needed):**
|
||||
- Pricing: https://platform.moonshot.ai/docs/pricing/chat
|
||||
- API Reference: https://platform.moonshot.ai/docs/api/chat
|
||||
|
||||
**Do NOT use web search.** Fetch the URLs directly, or ask the user to provide data, if unaccessible.
|
||||
|
||||
**Important:**
|
||||
- Review the full model list for additions, removals, and price changes
|
||||
- Minimize whitespace/comment changes, focus on content
|
||||
- Preserve comments to make diffs easy to review
|
||||
- Flag broken links or unexpected content
|
||||
@@ -0,0 +1,26 @@
|
||||
---
|
||||
description: Update MiniMax model definitions with latest pricing and capabilities
|
||||
---
|
||||
|
||||
Update `src/modules/llms/server/openai/models/minimax.models.ts` with latest model definitions.
|
||||
|
||||
Reference `src/modules/llms/server/llm.server.types.ts` and `src/modules/llms/server/models.mappings.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
|
||||
**Primary Sources:**
|
||||
- Models & Changelog: https://platform.minimax.io/docs/release-notes/models.md
|
||||
- Pricing: https://platform.minimax.io/docs/guides/pricing-paygo.md
|
||||
- Pricing Overview: https://platform.minimax.io/docs/pricing/overview.md
|
||||
- Text Generation API: https://platform.minimax.io/docs/guides/text-generation.md
|
||||
|
||||
**Note:** MiniMax is a hardcoded-only vendor (no `/v1/models` API yet). All model IDs, context windows, and pricing must be manually maintained from the docs. Pay attention to new model releases (M-series), highspeed variants, and deprecated models.
|
||||
|
||||
**Fallbacks if blocked:** Search "minimax api models pricing", "minimax m2 m3 models", "minimax api changelog" or check https://openrouter.ai models list for MiniMax entries.
|
||||
|
||||
**Important:**
|
||||
- Models are `ModelDescriptionSchema[]` objects (not ManualMappings) - match existing pattern in the file
|
||||
- Review the full model list for additions, removals, and price changes
|
||||
- Check for new `-highspeed` variants and new model families
|
||||
- Verify context window sizes and max completion tokens against docs
|
||||
- Minimize whitespace/comment changes, focus on content
|
||||
- Preserve comments to make diffs easy to review
|
||||
- Flag broken links or unexpected content
|
||||
@@ -4,7 +4,7 @@ description: Update Mistral model definitions with latest pricing and capabiliti
|
||||
|
||||
Update `src/modules/llms/server/openai/models/mistral.models.ts` with latest model definitions.
|
||||
|
||||
Reference `src/modules/llms/server/llm.server.types.ts` and `src/modules/llms/server/models.data.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
Reference `src/modules/llms/server/llm.server.types.ts` and `src/modules/llms/server/models.mappings.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
|
||||
**Primary Sources:**
|
||||
- Models: https://docs.mistral.ai/getting-started/models/models_overview/
|
||||
|
||||
@@ -4,32 +4,33 @@ description: Update Ollama model definitions with latest featured models
|
||||
|
||||
Update `src/modules/llms/server/ollama/ollama.models.ts` with latest model definitions.
|
||||
|
||||
Reference `src/modules/llms/server/llm.server.types.ts` and `src/modules/llms/server/models.data.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
Reference `src/modules/llms/server/llm.server.types.ts` and `src/modules/llms/server/models.mappings.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
|
||||
**Automated Workflow:**
|
||||
```bash
|
||||
# 1. Fetch the HTML
|
||||
curl -s "https://ollama.com/library?sort=featured" -o /tmp/ollama-featured.html
|
||||
# 1. Fetch the HTML to a cross-platform temp path (sorted by newest for stable ordering)
|
||||
curl -s "https://ollama.com/library?sort=newest" -o "$(node -p "require('os').tmpdir()")/ollama-newest.html"
|
||||
|
||||
# 2. Parse it with the script
|
||||
node .claude/scripts/parse-ollama-models.js > /tmp/ollama-parsed.txt 2>&1
|
||||
|
||||
# 3. Review the parsed output
|
||||
cat /tmp/ollama-parsed.txt
|
||||
# 2. Parse it with the script (auto-finds the file in os.tmpdir())
|
||||
node .claude/scripts/parse-ollama-models.js 2>&1
|
||||
```
|
||||
|
||||
The parser outputs: `modelName|pulls|capabilities|sizes`
|
||||
- Example: `deepseek-r1|66200000|tools,thinking|1.5b,7b,8b,14b,32b,70b,671b`
|
||||
|
||||
**Primary Sources:**
|
||||
- Model Library: https://ollama.com/library?sort=featured
|
||||
- Model Library: https://ollama.com/library?sort=newest
|
||||
- Parser script: `.claude/scripts/parse-ollama-models.js`
|
||||
|
||||
**Fallbacks if blocked:** Check https://github.com/ollama/ollama, search "ollama featured models", "ollama latest models", or search GitHub for latest model info
|
||||
|
||||
**Important:**
|
||||
- Skip models below 50,000 pulls (parser does this automatically)
|
||||
- Sort them in the EXACT same order as the source (featured models)
|
||||
- Parser filtering rules:
|
||||
- Top 30 newest models are always included (regardless of pull count)
|
||||
- After top 30, only models with 50K+ pulls are included
|
||||
- Models with 'cloud' capability are automatically excluded
|
||||
- Models with 'embedding' capability are automatically excluded
|
||||
- Sort them in the EXACT same order as the source (newest first, for stable ordering)
|
||||
- Extract tags: 'tools' → hasTools, 'vision' → hasVision, 'embedding' → isEmbeddings (note the 's'), 'thinking' → tags only
|
||||
- Extract 'b' tags (1.5b, 7b, 32b) to tags field
|
||||
- Set today's date (YYYYMMDD format) for newly added models only
|
||||
|
||||
@@ -4,7 +4,7 @@ description: Update OpenAI model definitions with latest pricing and capabilitie
|
||||
|
||||
Update `src/modules/llms/server/openai/models/openai.models.ts` with latest model definitions.
|
||||
|
||||
Reference `src/modules/llms/server/llm.server.types.ts` and `src/modules/llms/server/models.data.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
Reference `src/modules/llms/server/llm.server.types.ts` and `src/modules/llms/server/models.mappings.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
|
||||
**Manual hint:** For pricing page, expand all tables before copying content.
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ description: Update OpenPipe model definitions with latest pricing and capabilit
|
||||
|
||||
Update `src/modules/llms/server/openai/models/openpipe.models.ts` with latest model definitions.
|
||||
|
||||
Reference `src/modules/llms/server/llm.server.types.ts` and `src/modules/llms/server/models.data.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
Reference `src/modules/llms/server/llm.server.types.ts` and `src/modules/llms/server/models.mappings.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
|
||||
**Primary Sources:**
|
||||
- Base Models: https://docs.openpipe.ai/base-models
|
||||
|
||||
@@ -4,7 +4,7 @@ description: Update Perplexity model definitions with latest pricing and capabil
|
||||
|
||||
Update `src/modules/llms/server/openai/models/perplexity.models.ts` with latest model definitions.
|
||||
|
||||
Reference `src/modules/llms/server/llm.server.types.ts` and `src/modules/llms/server/models.data.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
Reference `src/modules/llms/server/llm.server.types.ts` and `src/modules/llms/server/models.mappings.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
|
||||
**Primary Sources:**
|
||||
- Models: https://docs.perplexity.ai/getting-started/models
|
||||
|
||||
@@ -4,7 +4,7 @@ description: Update xAI model definitions with latest pricing and capabilities
|
||||
|
||||
Update `src/modules/llms/server/openai/models/xai.models.ts` with latest model definitions.
|
||||
|
||||
Reference `src/modules/llms/server/llm.server.types.ts` and `src/modules/llms/server/models.data.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
Reference `src/modules/llms/server/llm.server.types.ts` and `src/modules/llms/server/models.mappings.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
|
||||
**Primary Sources:**
|
||||
- Models & Pricing: https://docs.x.ai/docs/models?cluster=us-east-1#detailed-pricing-for-all-grok-models
|
||||
|
||||
@@ -0,0 +1,66 @@
|
||||
---
|
||||
description: Verify model parameterSpecs match API-validated sweep data
|
||||
argument-hint: openai | anthropic | gemini | xai (or empty for all)
|
||||
---
|
||||
|
||||
# Verify LLM Parameters
|
||||
|
||||
Compare model `parameterSpecs` in definition files against API-validated sweep data.
|
||||
|
||||
If `$ARGUMENTS` provided, verify only that dialect, which includes reading the pair of sweep results and model defintions. Otherwise verify all four, and read the pairs in sequence.
|
||||
|
||||
## Files
|
||||
|
||||
**Sweep results** (source of truth for select parameters):
|
||||
- `tools/develop/llm-parameter-sweep/llm-{dialect}-parameters-sweep.json`
|
||||
By the time you see these files, the repo owner has already updated them via `tools/develop/llm-parameter-sweep/sweep.sh` (very long running, 15 min per vendor).
|
||||
|
||||
**Model definitions (source of truth for model defintions for the user and application, including constants, interfaces, supported parameters and sometimes allowed parameter values)**:
|
||||
- OpenAI: `src/modules/llms/server/openai/models/openai.models.ts`
|
||||
- Anthropic: `src/modules/llms/server/anthropic/anthropic.models.ts`
|
||||
- Gemini: `src/modules/llms/server/gemini/gemini.models.ts`
|
||||
- xAI: `src/modules/llms/server/openai/models/xai.models.ts`
|
||||
|
||||
## Task
|
||||
|
||||
The sweep data is the source of truth for allowed model parameter values or value ranges, and for the `fn` function-calling capability probe.
|
||||
|
||||
For each model in the sweep, verify the model definition exposes exactly those capabilities - no more, no less. This includes:
|
||||
- The parameter is present in parameterSpecs
|
||||
- The paramId variant covers exactly the values from the sweep, if applicable
|
||||
- `LLM_IF_OAI_Fn` in `interfaces` matches `"roundtrip"` in the sweep's `fn` array (see below)
|
||||
- etc.
|
||||
|
||||
Report models where the definition doesn't match the sweep.
|
||||
|
||||
## Parameter Mapping
|
||||
|
||||
Example parameter mapping. Note that new parameters may have been added to both the definition, and the sweep.
|
||||
The objective of the sweep is to hint at model definition values, but the model definitions are what matters for Big-AGI,
|
||||
and need to be carefully updated, otherwise thousands of clients may break.
|
||||
|
||||
| Dialect | Sweep Key | Model paramId |
|
||||
|-----------|--------------------------|------------------------------|
|
||||
| OpenAI | `oai-reasoning-effort` | `llmVndOaiEffort` |
|
||||
| OpenAI | `oai-verbosity` | `llmVndOaiVerbosity` |
|
||||
| OpenAI | `oai-image-generation` | `llmVndOaiImageGeneration` |
|
||||
| OpenAI | `oai-web-search` | `llmVndOaiWebSearchContext` |
|
||||
| Anthropic | `ant-effort` | `llmVndAntEffort` |
|
||||
| Anthropic | `ant-thinking-budget` | `llmVndAntThinkingBudget` |
|
||||
| Gemini | `gemini-thinking-level` | `llmVndGemEffort` |
|
||||
| Gemini | `gemini-thinking-budget` | `llmVndGeminiThinkingBudget` |
|
||||
| xAI | `xai-web-search` | `llmVndXaiWebSearch` |
|
||||
|
||||
## Function-Calling Capability (`fn`)
|
||||
|
||||
The sweep `fn` array is a capability probe (not a paramId). `"roundtrip"` is the authoritative signal - full tool-call -> response -> coherent follow-up. `LLM_IF_OAI_Fn` in the model's `interfaces` must track `"roundtrip"`: present iff present.
|
||||
|
||||
Flag:
|
||||
- `"roundtrip"` in sweep but `LLM_IF_OAI_Fn` missing (or vice versa)
|
||||
- `fn` contains `"auto"`/`"required"` without `"roundtrip"` - partial capability, call it out
|
||||
|
||||
## Output
|
||||
|
||||
Report first for every model the expected values from the sweep, then the actual values from the definition, then the mismatches.
|
||||
|
||||
Finally make one table for each dialect listing all models with mismatches and the specific issues.
|
||||
@@ -0,0 +1,56 @@
|
||||
---
|
||||
description: Generate changelog bullets for big-agi.com/changes
|
||||
argument-hint: date like "2026-01-10" or empty for auto-detect
|
||||
---
|
||||
|
||||
Generate changelog bullets for a single entry in https://big-agi.com/changes
|
||||
|
||||
**Step 1: Find the starting date**
|
||||
|
||||
IMPORTANT: This repo rebases frequently, so commits are INTERLEAVED throughout history.
|
||||
New commits can appear at line 10, 500, or 1800. Use AUTHOR DATE (`%ad`) to filter - it's preserved during rebases.
|
||||
|
||||
If `$ARGUMENTS` provided, use it as the cutoff date.
|
||||
|
||||
If NO argument:
|
||||
1. Fetch https://big-agi.com/changes to get the most recent changelog date
|
||||
2. Use that date as the cutoff
|
||||
|
||||
**Step 2: Get commits by author date**
|
||||
|
||||
Filter commits by author date to catch ALL new commits regardless of position in history:
|
||||
|
||||
```bash
|
||||
# For commits after Jan 10, 2026 (adjust date pattern as needed)
|
||||
git log --oneline --no-merges --format="%h %ad %s" --date=short | grep "2026-01-1[1-9]\|2026-01-2\|2026-02"
|
||||
|
||||
# Verify interleaving by checking line numbers
|
||||
git log --oneline --no-merges --format="%h %ad %s" --date=short | grep -n "2026-01-1[1-9]"
|
||||
```
|
||||
|
||||
The line numbers prove commits are scattered (e.g., lines 14, 638, 1156, 1803 = interleaved).
|
||||
|
||||
**Step 3: Write bullets**
|
||||
|
||||
Real examples from big-agi.com/changes:
|
||||
- "Gemini 3 Flash support with 4-level thinking: high, medium, low, minimal"
|
||||
- "Cloud Sync launched! - long awaited and top requested"
|
||||
- "Deepseek V3.2 Speciale comes with almost Gemini 3 Pro performance but 20 times cheaper"
|
||||
- "Anthropic Opus 4.5 with controls for effort (speed tradeoff), thinking budget, search"
|
||||
- "Login with email, via magic link"
|
||||
- "Mobile UX fixes for popups drag/interaction"
|
||||
|
||||
**Rules:**
|
||||
|
||||
1. **Order by importance** - most significant changes first, minor fixes last
|
||||
2. **Feature-first, no verb prefixes** - "Gemini 3 support" not "Add Gemini 3 support"
|
||||
3. **Model names lead** when it's about LLMs
|
||||
4. **Specific details** - "4-level thinking: high, medium, low, minimal" not "multiple thinking levels"
|
||||
5. **One-liners** - short, no fluff
|
||||
6. **Consolidate commits** - 10 persona editor commits = 1 bullet
|
||||
7. **No corporate speak** - no "enhanced", "streamlined", "robust", "leverage"
|
||||
|
||||
**Skip:** WIP, internal refactors, KB docs, automation, review cleanups, trivial fixes, deps bumps, CI changes.
|
||||
|
||||
**Output:** Just bullets, ready to paste. 2-5 bullets but adapt depending on scope, especially
|
||||
in relation to the usual https://big-agi.com/changes entries.
|
||||
@@ -0,0 +1,149 @@
|
||||
---
|
||||
description: Execute the Big-AGI release process
|
||||
argument-hint: version like "2.0.4" or empty to auto-increment patch
|
||||
---
|
||||
|
||||
Execute the release process for Big-AGI. Go step-by-step, waiting for user approval between major steps.
|
||||
|
||||
## Step 1: Determine Version
|
||||
|
||||
If `$ARGUMENTS` provided, use it. Otherwise, read `package.json` and increment patch version.
|
||||
|
||||
## Step 2: Gather Context
|
||||
|
||||
Before drafting, gather what changed:
|
||||
1. `git log --oneline` since last release tag to see all commits
|
||||
2. Fetch https://big-agi.com/changes to see what daily entries already covered
|
||||
3. `gh issue list --state closed --search "closed:>LAST_RELEASE_DATE"` to find closed issues
|
||||
4. Check auto-generated release notes (`gh release create --generate-notes --draft`) for community PRs and new contributors
|
||||
|
||||
## Step 3: Update Files
|
||||
|
||||
1. **package.json** - Update `version` field
|
||||
2. **src/common/app.release.ts** - Increment `Monotonics.NewsVersion` (e.g., 203 → 204)
|
||||
3. **src/apps/news/news.data.tsx** - Add new entry at top of `NewsItems` array
|
||||
|
||||
For the news entry, ask user for release name and key highlights.
|
||||
|
||||
**News entry style** - Draft is a starting point, user will refine:
|
||||
- Models lead when model-heavy, grouped together
|
||||
- Callout features get own bullet with colon explanation
|
||||
- UX items grouped, minimal bold
|
||||
- Fixes last, brief
|
||||
- Release name stays subtle - don't oversell the theme
|
||||
- Apply the draft, then let the user edit manually and re-read after - don't over-iterate
|
||||
|
||||
Use `<B>`, `<B issue={N}>`, `<B href='url'>`. Re-read file after user edits.
|
||||
|
||||
4. User runs `npm i` to update lockfile
|
||||
|
||||
## Step 4: README
|
||||
|
||||
Update `README.md`:
|
||||
- Line ~46: Update model examples if new flagship models
|
||||
- Line ~147: Add release bullet above previous version
|
||||
|
||||
**Style:** `- Open X.Y.Z: **Name** feature1, feature2, feature3`
|
||||
|
||||
## Step 5: Git Operations
|
||||
|
||||
User commits changes, then:
|
||||
```bash
|
||||
git tag vX.Y.Z
|
||||
git push opensource vX.Y.Z
|
||||
```
|
||||
|
||||
## Step 6: GitHub Release
|
||||
|
||||
Create release with `gh release create` using `--notes` (not `--body`).
|
||||
|
||||
**Structure** - discursive intro paragraph, then themed sections, not a generic "What's New" header:
|
||||
|
||||
```
|
||||
# Big-AGI X.Y.Z - Name
|
||||
|
||||
### Theme tagline.
|
||||
|
||||
1-2 sentence discursive paragraph setting the release theme - what it means, not a feature list.
|
||||
|
||||
### Section Name (e.g., Models & Parameters)
|
||||
- Bullet points for specifics
|
||||
- Group by theme, not by commit order
|
||||
|
||||
### Vendor/Platform Section (when enough substance)
|
||||
- Give a vendor its own section if 3+ related changes (e.g., Anthropic, AWS Bedrock)
|
||||
|
||||
### Also New
|
||||
- Remaining features, scannable
|
||||
|
||||
## New Contributors
|
||||
* @user made their first contribution (brief description) in PR_URL
|
||||
|
||||
**Full Changelog**: https://github.com/enricoros/big-AGI/compare/vPREV...vNEW
|
||||
|
||||
## Get Started
|
||||
Available now at [big-agi.com](https://big-agi.com), via Docker, or self-host from source.
|
||||
```
|
||||
|
||||
## Step 7: Changelog (big-agi.com/changes)
|
||||
|
||||
The Open release entry on big-agi.com/changes is lightweight - just 1-2 bullets announcing the stable release, since daily entries already covered the individual features. Use `/rel:changelog` to generate.
|
||||
|
||||
**Style:** `- Open X.Y.Z Name stable release on GitHub and Docker`
|
||||
followed by 1 bullet summarizing what landed in the final days since the last daily entry.
|
||||
|
||||
## Step 8: Announcements
|
||||
|
||||
Draft for user to post:
|
||||
|
||||
**Twitter** - Thematic, not feature dumps. Talk about what it means, not what it lists:
|
||||
```
|
||||
Big-AGI Open X.Y.Z is out!
|
||||
|
||||
[Theme - e.g., "Lots of love to models: native support, latest protocols, total configuration - puts you in control."]
|
||||
|
||||
[One more angle, natural prose]
|
||||
|
||||
[Optional link]
|
||||
```
|
||||
|
||||
**Discord** - Structured with bold headers:
|
||||
```
|
||||
## :partyblob: Big-AGI **Open** X.Y.Z
|
||||
|
||||
**Category:** Items
|
||||
**Category:** Items
|
||||
**More:** Count of commits/fixes
|
||||
```
|
||||
|
||||
## Step 9: Cover Image Prompts
|
||||
|
||||
Offer cover image prompt alternatives for the release. Read past prompts from `news.data.tsx` comments (lines ~24-37) for the pattern.
|
||||
|
||||
**Pattern:** Always a capybara sculpture made of crystal glass, wearing rayban-like oversized black sunglasses. Each release has a unique theme/activity that symbolizes the release.
|
||||
|
||||
**Shared prefix:** `High-key white scene, very clean, hero framing. A close-up photo of a capybara sculpture made of crystal glass. The capybara wears rayban-like oversized black sunglasses.`
|
||||
|
||||
**Also offer future release concepts** tied to vision vectors from `kb/vision-inlined.md` (e.g., agency, inhabitation, sculpting, safe exploration).
|
||||
|
||||
## Tone Guide
|
||||
|
||||
**Good:**
|
||||
- "Lots of love to models: native support, latest protocols, total configuration"
|
||||
- "UX quality of life improvements, from Google Drive to message reorder"
|
||||
- "Gemini 3 Flash support with 4-level thinking: high, medium, low, minimal"
|
||||
|
||||
**Bad:**
|
||||
- "Rolling out the red carpet for top models!" (too salesy)
|
||||
- "Enhanced and streamlined the robust model experience" (corporate speak)
|
||||
- "Added support for Gemini 3 Flash model with multiple thinking levels" (verb prefix, vague)
|
||||
|
||||
## Reference
|
||||
|
||||
Find previous copy at:
|
||||
- **GitHub releases:** https://github.com/enricoros/big-AGI/releases
|
||||
- **News entries:** `src/apps/news/news.data.tsx`
|
||||
- **README:** `README.md` release notes section
|
||||
- **Changelog:** https://big-agi.com/changes
|
||||
|
||||
Match the existing tone - professional but human, specific not generic, features not marketing.
|
||||
@@ -1,23 +1,38 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Parse Ollama featured models from HTML
|
||||
* Parse Ollama models from HTML (sorted by newest for stable ordering)
|
||||
*
|
||||
* Usage:
|
||||
* 1. Fetch HTML: curl -s "https://ollama.com/library?sort=featured" -o /tmp/ollama-featured.html
|
||||
* 1. Fetch HTML: curl -s "https://ollama.com/library?sort=newest" -o /tmp/ollama-newest.html
|
||||
* 2. Parse: node .claude/scripts/parse-ollama-models.js
|
||||
*
|
||||
* Outputs: pipe-delimited format: modelName|pulls|capabilities|sizes
|
||||
* Example: deepseek-r1|66200000|tools,thinking|1.5b,7b,8b,14b,32b,70b,671b
|
||||
*
|
||||
* Filtering rules:
|
||||
* - Top 30 newest models are always included (regardless of pull count)
|
||||
* - After top 30, only models with 50K+ pulls are included
|
||||
* - Models with 'cloud' capability are always excluded
|
||||
* - Models with 'embedding' capability are always excluded
|
||||
*
|
||||
* Pull counts are rounded to significant figures for stable diffs:
|
||||
* - >=10M: round to 100K (e.g., 109,123,456 -> 109,100,000)
|
||||
* - >=1M: round to 10K (e.g., 5,432,100 -> 5,430,000)
|
||||
* - <1M: round to 1K (e.g., 88,700 -> 89,000)
|
||||
*/
|
||||
|
||||
const fs = require('fs');
|
||||
const os = require('os');
|
||||
const path = require('path');
|
||||
|
||||
const htmlPath = process.argv[2] || '/tmp/ollama-featured.html';
|
||||
const htmlPath = process.argv[2] || path.join(os.tmpdir(), 'ollama-newest.html');
|
||||
const TOP_N_ALWAYS_INCLUDE = 30;
|
||||
const MIN_PULLS_THRESHOLD = 50000;
|
||||
|
||||
if (!fs.existsSync(htmlPath)) {
|
||||
console.error(`Error: HTML file not found at ${htmlPath}`);
|
||||
console.error('Please fetch it first with:');
|
||||
console.error(' curl -s "https://ollama.com/library?sort=featured" -o /tmp/ollama-featured.html');
|
||||
console.error(' curl -s "https://ollama.com/library?sort=newest" -o /tmp/ollama-newest.html');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
@@ -25,7 +40,7 @@ const html = fs.readFileSync(htmlPath, 'utf8');
|
||||
|
||||
// Split into model sections - each starts with <a href="/library/
|
||||
const modelSections = html.split(/<a href="\/library\//);
|
||||
const models = [];
|
||||
const allParsedModels = [];
|
||||
|
||||
for (let i = 1; i < modelSections.length; i++) {
|
||||
const section = modelSections[i].substring(0, 5000); // Large enough window to capture all data
|
||||
@@ -65,10 +80,27 @@ for (let i = 1; i < modelSections.length; i++) {
|
||||
sizes.push(sizeMatch[1].trim());
|
||||
}
|
||||
|
||||
// Only include models with 50K+ pulls
|
||||
if (pulls >= 50000) {
|
||||
models.push({ name, pulls, capabilities, sizes });
|
||||
// Skip models with 'cloud' or 'embedding' capability
|
||||
if (capabilities.includes('cloud') || capabilities.includes('embedding')) {
|
||||
continue;
|
||||
}
|
||||
|
||||
allParsedModels.push({ name, pulls: roundPulls(pulls), capabilities, sizes });
|
||||
}
|
||||
|
||||
// Apply filtering: top 30 always included, rest need 50K+ pulls
|
||||
const models = allParsedModels.filter((model, index) => {
|
||||
return index < TOP_N_ALWAYS_INCLUDE || model.pulls >= MIN_PULLS_THRESHOLD;
|
||||
});
|
||||
|
||||
/**
|
||||
* Round pulls to significant figures for stable output.
|
||||
* This reduces churn from daily fluctuations while preserving magnitude.
|
||||
*/
|
||||
function roundPulls(pulls) {
|
||||
if (pulls >= 10000000) return Math.round(pulls / 100000) * 100000; // >=10M: round to 100K
|
||||
if (pulls >= 1000000) return Math.round(pulls / 10000) * 10000; // >=1M: round to 10K
|
||||
return Math.round(pulls / 1000) * 1000; // <1M: round to 1K
|
||||
}
|
||||
|
||||
// Output in pipe-delimited format (in the order they appear on the page)
|
||||
@@ -78,4 +110,6 @@ models.forEach(m => {
|
||||
console.log(`${m.name}|${m.pulls}|${caps}|${tags}`);
|
||||
});
|
||||
|
||||
console.error(`\nTotal models with 50K+ pulls: ${models.length}`);
|
||||
const topNCount = Math.min(TOP_N_ALWAYS_INCLUDE, allParsedModels.length);
|
||||
const thresholdCount = models.length - topNCount;
|
||||
console.error(`\nTotal models: ${models.length} (top ${topNCount} newest + ${thresholdCount} with ${MIN_PULLS_THRESHOLD / 1000}K+ pulls)`);
|
||||
|
||||
@@ -3,19 +3,37 @@
|
||||
"allow": [
|
||||
"Bash(cat:*)",
|
||||
"Bash(cp:*)",
|
||||
"Bash(curl:*)",
|
||||
"Bash(eslint:*)",
|
||||
"Bash(find:*)",
|
||||
"Bash(gh issue list:*)",
|
||||
"Bash(gh issue view:*)",
|
||||
"Bash(git branch:*)",
|
||||
"Bash(git cherry-pick:*)",
|
||||
"Bash(git describe:*)",
|
||||
"Bash(git grep:*)",
|
||||
"Bash(git log:*)",
|
||||
"Bash(git ls-tree:*)",
|
||||
"Bash(git mv:*)",
|
||||
"Bash(git show:*)",
|
||||
"Bash(grep:*)",
|
||||
"Bash(head:*)",
|
||||
"Bash(ls:*)",
|
||||
"Bash(mkdir:*)",
|
||||
"Bash(node:*)",
|
||||
"Bash(npm install)",
|
||||
"Bash(npm install:*)",
|
||||
"Bash(npm run:*)",
|
||||
"Bash(npx eslint:*)",
|
||||
"Bash(npx tsc:*)",
|
||||
"Bash(rg:*)",
|
||||
"Bash(rm:*)",
|
||||
"Bash(sed:*)",
|
||||
"Bash(tail:*)",
|
||||
"Bash(tree:*)",
|
||||
"Bash(tsc:*)",
|
||||
"Read(//tmp/**)",
|
||||
"Skill(llms:update-models*)",
|
||||
"WebFetch",
|
||||
"WebFetch(domain:big-agi.com)",
|
||||
"WebSearch",
|
||||
|
||||
+15
-40
@@ -1,43 +1,18 @@
|
||||
# big-AGI non-code files
|
||||
/docs/
|
||||
/dist/
|
||||
README.md
|
||||
*
|
||||
|
||||
# Ignore build and log files
|
||||
Dockerfile
|
||||
/.dockerignore
|
||||
!app/
|
||||
!kb/
|
||||
!pages/
|
||||
!public/
|
||||
!src/
|
||||
!tools/
|
||||
|
||||
# Node build artifacts
|
||||
/node_modules
|
||||
/.pnp
|
||||
.pnp.js
|
||||
!*.mjs
|
||||
!middleware_BASIC_AUTH.ts
|
||||
!middleware.ts
|
||||
!next.config.ts
|
||||
!package*.json
|
||||
!tsconfig.json
|
||||
|
||||
# next.js
|
||||
/.next/
|
||||
/out/
|
||||
|
||||
# production
|
||||
/build
|
||||
|
||||
# versioning
|
||||
.git/
|
||||
.github/
|
||||
|
||||
# IDEs
|
||||
.idea/
|
||||
|
||||
# debug
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
.pnpm-debug.log*
|
||||
|
||||
# local env files
|
||||
.env*.local
|
||||
|
||||
# vercel
|
||||
.vercel
|
||||
|
||||
# typescript
|
||||
*.tsbuildinfo
|
||||
next-env.d.ts
|
||||
!LICENSE
|
||||
!README.md
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
{
|
||||
"extends": "next/core-web-vitals"
|
||||
}
|
||||
@@ -0,0 +1,70 @@
|
||||
name: 🔥 Make AI Fix This
|
||||
description: Bug, question, or feedback - AI analyzes and changes Big-AGI appropriately
|
||||
labels: [ 'claude-triage' ]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for opening an issue! Our AI will analyze it and change Big-AGI appropriately.
|
||||
|
||||
**What happens next:**
|
||||
- AI searches the codebase and documentation
|
||||
- You get a response, typically within 30 minutes
|
||||
- Ticket gets follow-up and community votes
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: What's happening?
|
||||
description: Describe the bug, feature request, or question. Be as detailed as you can.
|
||||
placeholder: |
|
||||
Bug example: "In Beam, Anthropic models seem to have search off..."
|
||||
Model request: "Add Claude Opus 4.5 out today, see https://..."
|
||||
Feature example: "Add the option to to save frequent prompt templates for reuse..."
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Where does this happen?
|
||||
description: If this is a bug or issue, where are you experiencing it?
|
||||
options:
|
||||
- Big-AGI Pro (big-agi.com)
|
||||
- Self-deployed from GitHub
|
||||
- Docker deployment
|
||||
- Local development
|
||||
- Not applicable (question/feedback)
|
||||
- Other
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Impact on your workflow
|
||||
description: How does this affect your use of Big-AGI?
|
||||
options:
|
||||
- Blocking - Can't use Big-AGI
|
||||
- High - Major feature broken
|
||||
- Medium - Workaround exists
|
||||
- Low - Minor inconvenience
|
||||
- None - Just a question/suggestion
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Environment (if applicable)
|
||||
description: Device, OS, browser - only if reporting a bug
|
||||
placeholder: |
|
||||
Device: Macbook Pro M3
|
||||
OS: macOS 15.2
|
||||
Browser: Chrome 131
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Additional context
|
||||
description: Screenshots, error messages, or anything else that helps
|
||||
placeholder: Paste screenshots or error messages here
|
||||
validations:
|
||||
required: false
|
||||
@@ -0,0 +1,69 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: docker
|
||||
directory: /
|
||||
schedule:
|
||||
interval: weekly
|
||||
commit-message:
|
||||
prefix: "chore(deps)"
|
||||
ignore:
|
||||
- dependency-name: "node"
|
||||
versions: [">=25", "<26"] # Node 25 breaks the build because of a dummy localStorage object
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: /
|
||||
schedule:
|
||||
interval: weekly
|
||||
commit-message:
|
||||
prefix: "chore(deps)"
|
||||
|
||||
# Disabled npm updates for now - will need precise package pinning, as some packages changed behavior upstream
|
||||
# - package-ecosystem: npm
|
||||
# directory: /
|
||||
# schedule:
|
||||
# interval: weekly
|
||||
# commit-message:
|
||||
# prefix: "chore(deps)"
|
||||
# cooldown:
|
||||
# semver-patch: 3
|
||||
# semver-minor: 7
|
||||
# semver-major: 14
|
||||
# # Ignore packages intentionally pinned due to upstream issues
|
||||
# ignore:
|
||||
# # Issue #857: v11.6+ breaks streaming; tried 11.4.4/11.6/11.7, only 11.5.1 works
|
||||
# - dependency-name: "@trpc/*"
|
||||
# versions: [">=11.5.1", "<12"]
|
||||
# # Pinned during tRPC #857 debugging - may be safe to unpin, test first
|
||||
# - dependency-name: "@tanstack/react-query"
|
||||
# versions: [">=5.90.10", "<6"]
|
||||
# # Pinned because 5.0.8 changes signatures so return set({ .. }) != void;
|
||||
# - dependency-name: "zustand"
|
||||
# versions: [">=5.0.7", "<6"]
|
||||
# groups:
|
||||
# next:
|
||||
# patterns:
|
||||
# - "@next/*"
|
||||
# - "eslint-config-next"
|
||||
# - "next"
|
||||
# react:
|
||||
# patterns:
|
||||
# - "react"
|
||||
# - "react-dom"
|
||||
# - "@types/react"
|
||||
# - "@types/react-dom"
|
||||
# emotion:
|
||||
# patterns:
|
||||
# - "@emotion/*"
|
||||
# mui:
|
||||
# patterns:
|
||||
# - "@mui/*"
|
||||
# dnd-kit:
|
||||
# patterns:
|
||||
# - "@dnd-kit/*"
|
||||
# prisma:
|
||||
# patterns:
|
||||
# - "@prisma/*"
|
||||
# - "prisma"
|
||||
# vercel:
|
||||
# patterns:
|
||||
# - "@vercel/*"
|
||||
@@ -12,27 +12,30 @@ on:
|
||||
|
||||
jobs:
|
||||
claude-dm:
|
||||
# Only allow repository owner to trigger DMs with @claude (blocks other users and bots)
|
||||
if: |
|
||||
(github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude'))) ||
|
||||
github.actor == 'enricoros' &&
|
||||
github.triggering_actor == 'enricoros' &&
|
||||
((github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude'))) ||
|
||||
(github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) ||
|
||||
(github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) ||
|
||||
(github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude'))
|
||||
(github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')))
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
timeout-minutes: 30
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
contents: write # Required for code creation and commits
|
||||
issues: write
|
||||
id-token: write
|
||||
pull-requests: write
|
||||
actions: read # Required for Claude to read CI results on PRs
|
||||
id-token: write # required to use OIDC to authenticate to Claude Code API
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 1
|
||||
fetch-depth: 0 # 1 -> 0: full history helps with git blame, etc.
|
||||
|
||||
- name: Run Claude Code DM Response
|
||||
id: claude
|
||||
@@ -41,6 +44,7 @@ jobs:
|
||||
claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
|
||||
|
||||
# Security: Only users with write access can trigger (DMs allow code execution)
|
||||
# Note: contents:write permission enables code creation and commits
|
||||
|
||||
# This is an optional setting that allows Claude to read CI results on PRs
|
||||
additional_permissions: |
|
||||
@@ -49,9 +53,7 @@ jobs:
|
||||
# Optional: Add claude_args to customize behavior and configuration
|
||||
# See https://github.com/anthropics/claude-code-action/blob/main/docs/usage.md
|
||||
# or https://docs.claude.com/en/docs/claude-code/cli-reference for available options
|
||||
# claude_args: '--allowed-tools Bash(gh pr:*)'
|
||||
# disabling opus for now claude-opus-4-1-20250805
|
||||
claude_args: |
|
||||
--model claude-sonnet-4-5-20250929
|
||||
--model claude-opus-4-6
|
||||
--max-turns 100
|
||||
--allowedTools "Edit,Read,Write,WebFetch,WebSearch,Bash(cat:*),Bash(cp:*),Bash(find:*),Bash(git branch:*),Bash(grep:*),Bash(ls:*),Bash(mkdir:*),Bash(npm install),Bash(npm install:*),Bash(npm run:*),Bash(gh issue:*),Bash(gh search:*),Bash(gh label:*),Bash(gh pr:*),mcp__chrome-devtools,SlashCommand"
|
||||
--allowedTools "Edit,Read,Write,WebFetch,WebSearch,Bash(cat:*),Bash(cp:*),Bash(find:*),Bash(git branch:*),Bash(grep:*),Bash(ls:*),Bash(mkdir:*),Bash(npm run:*),Bash(gh issue:*),Bash(gh search:*),Bash(gh label:*),Bash(gh pr:*),SlashCommand"
|
||||
|
||||
@@ -2,7 +2,7 @@ name: Claude Code Auto-Triage Issues
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [ opened, assigned ]
|
||||
types: [ opened ]
|
||||
|
||||
jobs:
|
||||
claude-issue-triage:
|
||||
@@ -12,19 +12,20 @@ jobs:
|
||||
!contains(github.event.issue.body, '@claude')
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
timeout-minutes: 30
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
pull-requests: read
|
||||
id-token: write
|
||||
pull-requests: read # was write, but we're not altering PRs here
|
||||
actions: read
|
||||
id-token: write # required to use OIDC to authenticate to Claude Code API
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 1
|
||||
fetch-depth: 0 # 1 -> 0: full history helps with git blame, etc.
|
||||
|
||||
- name: Analyze issue and provide help
|
||||
uses: anthropics/claude-code-action@v1
|
||||
@@ -34,6 +35,11 @@ jobs:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
allowed_non_write_users: '*'
|
||||
# track_progress: true # Enables tracking comments
|
||||
show_full_output: ${{ github.event.repository.private }} # security: do not log verbosely in private repo
|
||||
|
||||
# This is an optional setting that allows Claude to read CI results on PRs
|
||||
additional_permissions: |
|
||||
actions: read
|
||||
|
||||
prompt: |
|
||||
REPO: ${{ github.repository }}
|
||||
@@ -49,9 +55,11 @@ jobs:
|
||||
**Use web search**: When potentially outside Big-AGI (e.g. user configuration), search the web for similar errors or related issues
|
||||
**Provide a solution**:
|
||||
- Provide multiple solutions if uncertain, and say so
|
||||
- If you can fix it in code, propose the fix
|
||||
- If possible also suggest fixes or workarounds for immediate relief
|
||||
- Analyze the code and suggest specific fixes with code examples
|
||||
- If possible also suggest fixes or workarounds for immediate relief
|
||||
- Reference specific files and line numbers
|
||||
- Suggest workarounds for immediate relief if applicable
|
||||
- Use web search to find similar issues and solutions
|
||||
- Test selectively and even npm install and run build if needed to verify the solution
|
||||
2. Always add the 'claude-triage' issue label to indicate this issue was triaged by Claude
|
||||
3. Comment with:
|
||||
@@ -60,12 +68,16 @@ jobs:
|
||||
- Next steps or clarification needed
|
||||
- Link duplicates if found
|
||||
|
||||
Remember: design values for this codebase: orthogonal features, features that generalize well, modularized and reusable code,
|
||||
type-discriminated data, optimized code, zero maintenance burden. Minimize future pain, etc.
|
||||
|
||||
IMPORTANT: You are in READ-ONLY triage mode. Analyze and suggest solutions in your comment, but do NOT attempt to push code changes.
|
||||
If you're uncertain, say so and suggest next steps.
|
||||
Be welcoming, helpful, professional, solution-focused and no-BS.
|
||||
|
||||
# See https://github.com/anthropics/claude-code-action/blob/main/docs/usage.md
|
||||
# or https://docs.claude.com/en/docs/claude-code/cli-reference for available options
|
||||
claude_args: |
|
||||
--model claude-sonnet-4-5-20250929
|
||||
--max-turns 60
|
||||
--allowedTools "Edit,Read,Write,WebFetch,WebSearch,Bash(cat:*),Bash(cp:*),Bash(find:*),Bash(git branch:*),Bash(grep:*),Bash(ls:*),Bash(mkdir:*),Bash(npm install),Bash(npm install:*),Bash(npm run:*),Bash(gh issue:*),Bash(gh search:*),Bash(gh label:*),Bash(gh pr:*),mcp__chrome-devtools,SlashCommand"
|
||||
--model claude-opus-4-6
|
||||
--max-turns 75
|
||||
--allowedTools "Edit,Read,Write,WebFetch,WebSearch,Bash(cat:*),Bash(cp:*),Bash(find:*),Bash(git branch:*),Bash(grep:*),Bash(ls:*),Bash(mkdir:*),Bash(npm run:*),Bash(gh issue:*),Bash(gh search:*),Bash(gh label:*),Bash(gh pr:*),SlashCommand"
|
||||
|
||||
@@ -1,77 +0,0 @@
|
||||
name: Claude Code PR Review
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [ opened, synchronize, ready_for_review ]
|
||||
|
||||
# Limit branches
|
||||
branches: [ main, dev, v1 ]
|
||||
|
||||
# Optional: Only run on specific file changes
|
||||
# paths:
|
||||
# - "src/**/*.ts"
|
||||
# - "src/**/*.tsx"
|
||||
|
||||
jobs:
|
||||
claude-pr-review:
|
||||
# Skip draft PRs
|
||||
# Optional: filter authors: github.event.pull_request.user.login != 'enricoros'
|
||||
if: |
|
||||
github.event.pull_request.draft == false
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
issues: read
|
||||
id-token: write
|
||||
actions: read # Required for Claude to read CI results on PRs
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Run PR Review
|
||||
uses: anthropics/claude-code-action@v1
|
||||
with:
|
||||
claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
|
||||
# Security: Allow any user to trigger reviews (read-only PR analysis is safe)
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
allowed_non_write_users: '*'
|
||||
# track_progress: true # Enables tracking comments
|
||||
|
||||
# This setting allows Claude to read CI results on PRs
|
||||
additional_permissions: |
|
||||
actions: read
|
||||
|
||||
prompt: |
|
||||
REPO: ${{ github.repository }}
|
||||
PR NUMBER: ${{ github.event.pull_request.number }}
|
||||
|
||||
Please review this pull request and provide feedback on:
|
||||
- Potential bugs or issues
|
||||
- Adherence to Big-AGI architecture and design patterns
|
||||
- Code quality and best practices, including TypeScript types, error handling, and edge cases
|
||||
- Performance considerations: bundle size, React patterns, streaming efficiency
|
||||
- Security concerns if applicable
|
||||
|
||||
Use the repository's CLAUDE.md for guidance on style and conventions.
|
||||
|
||||
Use `gh pr comment` with your Bash tool to leave your review as a comment on the PR.
|
||||
Use `gh pr review comment` for inline suggestions on specific lines.
|
||||
|
||||
IMPORTANT: After completing your review, always add the 'claude-review' label to the PR to indicate it was reviewed by Claude:
|
||||
gh pr edit ${{ github.event.pull_request.number }} --add-label "claude-review"
|
||||
|
||||
Be constructive, helpful, no-BS, and specific with file:line references.
|
||||
|
||||
# See https://github.com/anthropics/claude-code-action/blob/main/docs/usage.md
|
||||
# or https://docs.claude.com/en/docs/claude-code/cli-reference for available options
|
||||
claude_args: |
|
||||
--model claude-sonnet-4-5-20250929
|
||||
--max-turns 100
|
||||
--allowedTools "Edit,Read,Write,WebFetch,WebSearch,Bash(cat:*),Bash(cp:*),Bash(find:*),Bash(git branch:*),Bash(grep:*),Bash(ls:*),Bash(mkdir:*),Bash(npm install),Bash(npm install:*),Bash(npm run:*),Bash(gh issue:*),Bash(gh search:*),Bash(gh label:*),Bash(gh pr:*),mcp__chrome-devtools"
|
||||
@@ -20,28 +20,122 @@ env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build-and-push-image:
|
||||
runs-on: ubuntu-latest
|
||||
# Build job: runs on native runners for each platform (no QEMU emulation)
|
||||
build:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runner: ubuntu-latest
|
||||
- platform: linux/arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
|
||||
runs-on: ${{ matrix.runner }}
|
||||
name: Build ${{ matrix.platform }}
|
||||
timeout-minutes: 30
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
security-events: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Prepare
|
||||
run: |
|
||||
platform=${{ matrix.platform }}
|
||||
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
||||
echo "IMAGE_NAME_LC=${IMAGE_NAME,,}" >> $GITHUB_ENV
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4.1.0
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@030e881283bb7a6894de51c315a6bfe6a94e05cf # v6.0.0
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
labels: |
|
||||
org.opencontainers.image.title=Big-AGI Open
|
||||
org.opencontainers.image.description=Big-AGI Open - Multi-model AI workspace for experts who need to think broader, decide smarter, and build with confidence.
|
||||
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
|
||||
org.opencontainers.image.documentation=https://big-agi.com
|
||||
|
||||
- name: Build and push by digest
|
||||
id: build
|
||||
uses: docker/build-push-action@bcafcacb16a39f128d818304e6c9c0c18556b85f # v7.1.0
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile
|
||||
platforms: ${{ matrix.platform }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
tags: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME_LC }}
|
||||
build-args: |
|
||||
NEXT_PUBLIC_GA4_MEASUREMENT_ID=${{ secrets.GA4_MEASUREMENT_ID }}
|
||||
NEXT_PUBLIC_BUILD_HASH=${{ github.sha }}
|
||||
NEXT_PUBLIC_BUILD_REF_NAME=${{ github.ref_name }}
|
||||
outputs: type=image,push-by-digest=true,name-canonical=true,push=true,oci-mediatypes=true
|
||||
provenance: false
|
||||
cache-from: type=gha,scope=${{ github.repository }}-${{ matrix.platform }}
|
||||
cache-to: type=gha,scope=${{ github.repository }}-${{ matrix.platform }},mode=max
|
||||
|
||||
- name: Export digest
|
||||
run: |
|
||||
mkdir -p ${{ runner.temp }}/digests
|
||||
digest="${{ steps.build.outputs.digest }}"
|
||||
touch "${{ runner.temp }}/digests/${digest#sha256:}"
|
||||
|
||||
- name: Upload digest
|
||||
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
|
||||
with:
|
||||
name: digests-${{ env.PLATFORM_PAIR }}
|
||||
path: ${{ runner.temp }}/digests/*
|
||||
if-no-files-found: error
|
||||
retention-days: 1
|
||||
|
||||
# Merge job: combines platform-specific images into a unified multi-arch manifest
|
||||
merge:
|
||||
name: Merge manifests
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
needs: build
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Prepare
|
||||
run: echo "IMAGE_NAME_LC=${IMAGE_NAME,,}" >> $GITHUB_ENV
|
||||
|
||||
- name: Download digests
|
||||
uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1
|
||||
with:
|
||||
path: ${{ runner.temp }}/digests
|
||||
pattern: digests-*
|
||||
merge-multiple: true
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4.1.0
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
@@ -49,7 +143,7 @@ jobs:
|
||||
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
uses: docker/metadata-action@030e881283bb7a6894de51c315a6bfe6a94e05cf # v6.0.0
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
@@ -65,28 +159,18 @@ jobs:
|
||||
# Version tags (v2.0.0, 2.0.0)
|
||||
type=ref,event=tag
|
||||
type=semver,pattern={{version}}
|
||||
labels: |
|
||||
org.opencontainers.image.title=Big-AGI Open
|
||||
org.opencontainers.image.description=Big-AGI Open - Multi-model AI workspace for experts who need to think broader, decide smarter, and build with confidence.
|
||||
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
|
||||
org.opencontainers.image.documentation=https://big-agi.com
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
build-args: |
|
||||
NEXT_PUBLIC_GA4_MEASUREMENT_ID=${{ secrets.GA4_MEASUREMENT_ID }}
|
||||
NEXT_PUBLIC_BUILD_HASH=${{ github.sha }}
|
||||
NEXT_PUBLIC_BUILD_REF_NAME=${{ github.ref_name }}
|
||||
# Enable build cache (future)
|
||||
#cache-from: type=gha
|
||||
#cache-to: type=gha,mode=max
|
||||
# Enable provenance and SBOM (future)
|
||||
#provenance: true
|
||||
#sbom: true
|
||||
- name: Create manifest list and push
|
||||
working-directory: ${{ runner.temp }}/digests
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
$(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
|
||||
--annotation='index:org.opencontainers.image.title=Big-AGI Open' \
|
||||
--annotation='index:org.opencontainers.image.description=Big-AGI Open - Multi-model AI workspace for experts who need to think broader, decide smarter, and build with confidence.' \
|
||||
--annotation='index:org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}' \
|
||||
--annotation='index:org.opencontainers.image.documentation=https://big-agi.com' \
|
||||
$(printf '${{ env.REGISTRY }}/${{ env.IMAGE_NAME_LC }}@sha256:%s ' *)
|
||||
|
||||
- name: Inspect image
|
||||
run: |
|
||||
docker buildx imagetools inspect ${{ env.REGISTRY }}/${{ env.IMAGE_NAME_LC }}:${{ steps.meta.outputs.version }}
|
||||
@@ -53,3 +53,6 @@ next-env.d.ts
|
||||
.env*.local
|
||||
/.run/dev (ENV).run.xml
|
||||
/src/modules/3rdparty/aider/scratch*
|
||||
|
||||
# Ignore temporary CC files
|
||||
/tmpclaude*
|
||||
@@ -1,3 +0,0 @@
|
||||
overrides=@mui/material@^5.0.0:
|
||||
dependencies:
|
||||
@mui/material: replaced-by=@mui/joy
|
||||
@@ -1,22 +1,44 @@
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
Guidance to Claude Code when working with code in this repository.
|
||||
|
||||
## Development Commands
|
||||
|
||||
```bash
|
||||
# Targeted Code Quality (safe while dev server runs)
|
||||
npx tsc --noEmit # Type check without building
|
||||
npx eslint src/path/to/file.ts # Lint specific file
|
||||
npm run lint # Lint entire project
|
||||
```
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
Big-AGI is a Next.js 15 application with a modular architecture built for advanced AI interactions. The codebase follows a three-layer structure with distinct separation of concerns.
|
||||
Big-AGI is a Next.js 15 application with a sophisticated modular architecture built for professional AI interactions.
|
||||
|
||||
### Development Commands
|
||||
|
||||
Dev servers may be already running on ports 3000, 3001, 3002, or 3003 (not always this app - other projects may occupy these ports). Never start or stop dev servers, let the user do it.
|
||||
|
||||
```bash
|
||||
# Validate (~5s, safe while dev server runs, do NOT use `next build` ~45s for same checks)
|
||||
tsc --noEmit --pretty && npm run lint # Type check (~3.5s) + ESLint (~2s)
|
||||
eslint src/path/to/file.ts # Lint specific file
|
||||
|
||||
# Full build (~60s+, only when suspecting runtime/bundle issues)
|
||||
npm run build # next build runs compile+lint+types but stops at first type-error file; tsc shows all at once
|
||||
|
||||
# Database & External Services
|
||||
# npm run supabase:local-update-types # Generate TypeScript types
|
||||
# npm run stripe:listen # Listen for Stripe webhooks
|
||||
```
|
||||
|
||||
### Git/GitHub remotes
|
||||
|
||||
The `gh` command is available to interact with GitHub from the terminal, but **NEVER PUSH TO ANY BRANCH**. The user manages all 'write' git operations.
|
||||
- `opensource` -> `enricoros/big-AGI` (public, default branch: `main`, MIT) - community issues/PRs/releases
|
||||
- `private` -> `big-agi/big-agi-private` (private, default branch: `dev`) - main dev repo with `dev`->`staging`->`prod` pipeline
|
||||
- **Always use `git mv` instead of `mv`** when renaming or moving files - preserves git history tracking
|
||||
- **NEVER run `git stash`** - it causes work loss
|
||||
|
||||
### Core Directory Structure
|
||||
|
||||
You are started from the root of the repository (i.e. where the git folder is or scripts should be run from).
|
||||
**ISSUE ALL COMMANDS FROM THE ROOT, OMITTING 'cd' COMMANDS. DO NOT CHAIN CD AND OTHER COMMANDS**
|
||||
**NEVER RUN COMPOUND `cd` COMMANDS LIKE `cd some-folder && command` - ONLY RUN `command` FROM THE ROOT, ALWAYS.**
|
||||
The directory structure is as follows:
|
||||
|
||||
```
|
||||
/app/api/ # Next.js App Router (API routes only, mostly -> /src/server/)
|
||||
/pages/ # Next.js Pages Router (file-based, mostly -> /src/apps/)
|
||||
@@ -31,11 +53,11 @@ Big-AGI is a Next.js 15 application with a modular architecture built for advanc
|
||||
### Key Technologies
|
||||
|
||||
- **Frontend**: Next.js 15, React 18, Material-UI Joy, Emotion (CSS-in-JS)
|
||||
- **State Management**: Zustand with localStorge/IndexedDB (single cell) persistence
|
||||
- **API Layer**: tRPC with React Query for type-safe communication
|
||||
- **State Management**: Zustand with localStorage/IndexedDB (single cell) persistence
|
||||
- **API Layer**: tRPC with TanStack React Query for type-safe communication
|
||||
- **Runtime**: Edge Runtime for AI operations, Node.js for data processing
|
||||
|
||||
### Apps Architecture Pattern
|
||||
### "Apps" Architecture Pattern
|
||||
|
||||
Each app in `/src/apps/` is a self-contained feature module:
|
||||
- Main component (`App*.tsx`)
|
||||
@@ -51,20 +73,20 @@ Modules in `/src/modules/` provide reusable business logic:
|
||||
- **`aix/`** - AI communication framework for real-time streaming
|
||||
- **`beam/`** - Multi-model AI reasoning system (scatter/gather pattern)
|
||||
- **`blocks/`** - Content rendering (markdown, code, images, etc.)
|
||||
- **`llms/`** - Language model abstraction supporting 16 vendors
|
||||
- **`llms/`** - Language model abstraction supporting 20+ vendors
|
||||
|
||||
### Key Subsystems & Their Patterns
|
||||
|
||||
#### 1. AIX - Real-time AI Communication
|
||||
#### AIX - Real-time AI Communication
|
||||
**Location**: `/src/modules/aix/`
|
||||
**Pattern**: Client-server streaming architecture with provider abstraction
|
||||
|
||||
- **Client** → tRPC → **Server** → **AI Providers**
|
||||
- **Client** -> tRPC -> **Server** -> **AI Providers**
|
||||
- Handles streaming/non-streaming responses with batching and error recovery
|
||||
- Particle-based streaming: `AixWire_Particles` → `ContentReassembler` → `DMessage`
|
||||
- Particle-based streaming: `AixWire_Particles` -> `ContentReassembler` -> `DMessage`
|
||||
- Provider-agnostic through adapter pattern (OpenAI, Anthropic, Gemini protocols)
|
||||
|
||||
#### 3. Beam - Multi-Model Reasoning
|
||||
#### Beam - Multi-Model Reasoning
|
||||
**Location**: `/src/modules/beam/`
|
||||
**Pattern**: Scatter/Gather for parallel AI processing
|
||||
|
||||
@@ -73,15 +95,24 @@ Modules in `/src/modules/` provide reusable business logic:
|
||||
- Real-time UI updates via vanilla Zustand stores
|
||||
- BeamStore per conversation via ConversationHandler
|
||||
|
||||
#### 4. Conversation Management
|
||||
#### Conversation Management
|
||||
**Location**: `/src/common/stores/chat/` and `/src/common/chat-overlay/`
|
||||
**Pattern**: Overlay architecture with handler per conversation
|
||||
|
||||
- `ConversationHandler` orchestrates chat, beam, ephemerals
|
||||
- Per-chat stores: `PerChatOverlayStore` + `BeamStore`
|
||||
- Message structure: `DMessage` → `DMessageFragment[]`
|
||||
- Message structure: `DMessage` -> `DMessageFragment[]`
|
||||
- Supports multi-pane with independent conversation states
|
||||
|
||||
#### Layout System ("Optima")
|
||||
|
||||
The Optima layout system provides:
|
||||
- **Responsive design** adapting desktop/mobile
|
||||
- **Drawer(left)/Toolbar/Panel(right)** composition
|
||||
- **Portal-based rendering** for flexible component placement
|
||||
|
||||
Located in `/src/common/layout/optima/`
|
||||
|
||||
### Storage System
|
||||
|
||||
Big-AGI uses a local-first architecture with Zustand + IndexedDB:
|
||||
@@ -89,7 +120,6 @@ Big-AGI uses a local-first architecture with Zustand + IndexedDB:
|
||||
- **localStorage** for persistent settings/all storage (via Zustand persist middleware)
|
||||
- **IndexedDB** for persistent chat-only storage (via Zustand persist middleware) on a single key-val cell
|
||||
- **Local-first** architecture with offline capability
|
||||
- **Migration system** for upgrading data structures across versions
|
||||
|
||||
Key storage patterns:
|
||||
- Stores use `createIDBPersistStorage()` for IndexedDB persistence
|
||||
@@ -101,26 +131,18 @@ Located in `/src/common/stores/` with stores like:
|
||||
- `chat/store-chats.ts`: Conversations and messages
|
||||
- `llms/store-llms.ts`: Model configurations
|
||||
|
||||
### Layout System ("Optima")
|
||||
|
||||
The Optima layout system provides:
|
||||
- **Responsive design** adapting desktop/mobile
|
||||
- **Drawer/Panel/Toolbar** composition
|
||||
- **Split-pane support** for multi-conversation views
|
||||
- **Portal-based rendering** for flexible component placement
|
||||
|
||||
Located in `/src/common/layout/optima/`
|
||||
|
||||
### State Management Patterns
|
||||
|
||||
1. **Global Stores** (Zustand with IndexedDB persistence)
|
||||
- `store-chats`: Conversations and messages
|
||||
- `store-llms`: Model configurations
|
||||
- `store-ux-labs`: UI preferences and labs features
|
||||
- **Zustand pattern**: Always wrap multi-property selectors with `useShallow` from `zustand/react/shallow` to prevent re-renders on reference changes
|
||||
|
||||
2. **Per-Instance Stores** (Vanilla Zustand)
|
||||
- `store-beam_vanilla`: Beam scatter/gather state
|
||||
- `store-perchat_vanilla`: Chat overlay state
|
||||
- `store-attachment-drafts_vanilla`: Attachment drafts
|
||||
- High-performance, no React integration
|
||||
|
||||
3. **Module Stores**
|
||||
@@ -130,94 +152,60 @@ Located in `/src/common/layout/optima/`
|
||||
### User Flows & Interdependencies
|
||||
|
||||
#### Chat Message Flow
|
||||
1. User input → `Composer` → `DMessage` creation
|
||||
2. `ConversationHandler.messageAppend()` → Store update
|
||||
3. `_handleExecute()` / `ConversationHandler.executeChatMessages()` → AIX client request
|
||||
4. AIX streaming → `ContentReassembler` → UI updates
|
||||
5. Zustand auto-persistence → IndexedDB
|
||||
1. User input -> `Composer` -> `DMessage` creation
|
||||
2. `ConversationHandler.messageAppend()` -> Store update
|
||||
3. `_handleExecute()` / `ConversationHandler.executeChatMessages()` -> AIX client request
|
||||
4. AIX streaming -> `ContentReassembler` -> UI updates
|
||||
5. Zustand auto-persistence -> IndexedDB
|
||||
|
||||
#### Beam Multi-Model Flow
|
||||
1. User triggers Beam → `BeamStore.open()` state update
|
||||
1. User triggers Beam -> `BeamStore.open()` state update
|
||||
2. Scatter: Parallel `aixChatGenerateContent()` to N models
|
||||
3. Real-time ray updates → UI progress
|
||||
4. Gather: User selects fusion → Combined output
|
||||
5. Result → New message in conversation
|
||||
3. Real-time ray updates -> UI progress
|
||||
4. Gather: User selects fusion -> Combined output
|
||||
5. Result -> New message in conversation
|
||||
|
||||
### Development Patterns
|
||||
|
||||
#### TypeScript & Code Quality
|
||||
- Type-safe through strict TypeScript interfaces
|
||||
- Clear interface-first approach for modules and components
|
||||
- Use latest TypeScript 5.9+ features
|
||||
- Use forward-looking patterns to minimize future refactors (e.g., discriminated unions, `satisfies` operator, as const assertions)
|
||||
- Type guards and exhaustiveChecks for robustness
|
||||
- Type inference where possible
|
||||
- Runtime validation with Zod schemas for API inputs/outputs (usually server-side, with the client importing as types the inferred types)
|
||||
|
||||
#### Module Integration
|
||||
- Each module exports its functionality through index files
|
||||
- Modules register with central registries (e.g., `vendors.registry.ts`)
|
||||
- Configuration objects define module behavior
|
||||
- Type-safe integration through strict TypeScript interfaces
|
||||
|
||||
#### Component Patterns
|
||||
- **Controlled components** with clear prop interfaces
|
||||
- **Hook-based logic** extraction for reusability
|
||||
- **Portal rendering** for overlays and modals
|
||||
- **Suspense boundaries** for async operations
|
||||
|
||||
#### API Patterns
|
||||
- **tRPC routers** for type-safe API endpoints
|
||||
- **Zod schemas** for runtime validation
|
||||
- **Middleware** for request/response processing
|
||||
- **Edge functions** for performance-critical AI operations
|
||||
- **tRPC procedures middleware** for authorization and logging (authorization is on a httpOnly cookie)
|
||||
- **Edge functions** for performance-critical operations
|
||||
|
||||
## Security Considerations
|
||||
|
||||
- API keys stored client-side in localStorage (user-provided)
|
||||
- Server-side API keys in environment variables only
|
||||
#### Security Considerations
|
||||
- API keys in environment variables only (server-side); on the client they're in localStorage for now, but we want to move away from this
|
||||
- XSS protection through proper content escaping
|
||||
- No credential transmission to third parties
|
||||
|
||||
## Knowledge Base
|
||||
#### Writing Style
|
||||
- **Never use emdashes (—).** Use normal dashes (-) instead, in all generated text, code comments, and documentation.
|
||||
|
||||
Architecture and system documentation is available in the `/kb/` knowledge base:
|
||||
|
||||
@kb/KB.md
|
||||
|
||||
## Common Development Tasks
|
||||
|
||||
### Testing & Quality
|
||||
- Run `npm run lint` before committing
|
||||
- Type-check with `npx tsc --noEmit`
|
||||
- Type-check with `tsc --noEmit`
|
||||
- Test critical user flows manually
|
||||
|
||||
### Adding a New LLM Vendor
|
||||
1. Create vendor in `/src/modules/llms/vendors/[vendor]/`
|
||||
2. Implement `IModelVendor` interface
|
||||
3. Register in `vendors.registry.ts`
|
||||
4. Add environment variables to `env.ts` (if server-side keys needed)
|
||||
|
||||
### Debugging Storage Issues
|
||||
- Check IndexedDB: DevTools → Application → IndexedDB → `app-chats`
|
||||
- Check IndexedDB: DevTools -> Application -> IndexedDB -> `app-chats`
|
||||
- Monitor Zustand state: Use Zustand DevTools
|
||||
- Check migration logs in console during rehydration
|
||||
|
||||
## Code Examples
|
||||
|
||||
### AIX Streaming Pattern
|
||||
```typescript
|
||||
// Efficient streaming with decimation
|
||||
aixChatGenerateContent_DMessage(
|
||||
llmId,
|
||||
request,
|
||||
{ abortSignal, throttleParallelThreads: 1 },
|
||||
async (update, isDone) => {
|
||||
// Real-time UI updates
|
||||
}
|
||||
);
|
||||
```
|
||||
|
||||
### Model Registry Pattern
|
||||
```typescript
|
||||
// Registry pattern for extensibility
|
||||
const MODEL_VENDOR_REGISTRY: Record<ModelVendorId, IModelVendor> = {
|
||||
openai: ModelVendorOpenAI,
|
||||
anthropic: ModelVendorAnthropic,
|
||||
// ... 14 more vendors
|
||||
};
|
||||
```
|
||||
|
||||
## Server Architecture
|
||||
|
||||
@@ -225,9 +213,13 @@ The server uses a split architecture with two tRPC routers:
|
||||
|
||||
### Edge Network (`trpc.router-edge`)
|
||||
Distributed edge runtime for low-latency AI operations:
|
||||
- **AIX** - AI streaming and communication
|
||||
- **LLM Routers** - Direct vendor integrations (OpenAI, Anthropic, Gemini, Ollama)
|
||||
- **External Services** - ElevenLabs (TTS), Google Search, YouTube transcripts
|
||||
- **AIX** [1] - AI streaming and communication
|
||||
- **LLM Routers** [1] - Vendor-specific operations such as list models (OpenAI, Anthropic, Gemini, Ollama)
|
||||
- **Speex** [1] - Unified TTS router (ElevenLabs, Inworld, and other TTS vendors)
|
||||
- **External Services** - Google Search, YouTube transcripts
|
||||
|
||||
[1]: also supports client-side fetch (CSF) via client-side inclusion (rebundling with stubs),
|
||||
for direct browser-to-API communication when possible (CORS), to reduce latency and network barriers
|
||||
|
||||
Located at `/src/server/trpc/trpc.router-edge.ts`
|
||||
|
||||
@@ -239,3 +231,9 @@ Centralized server for data processing operations:
|
||||
Located at `/src/server/trpc/trpc.router-cloud.ts`
|
||||
|
||||
**Key Pattern**: Edge runtime for AI (fast, distributed), Cloud runtime for data ops (centralized, Node.js)
|
||||
|
||||
@kb/KB.md
|
||||
|
||||
@kb/vision-inlined.md
|
||||
|
||||
As a side note, the product tiers (independent, non-VC-funded) are: **Open** (self-host, MIT) · **Free** (big-agi.com) · **Pro** (paid, includes Sync + backup). All tiers use the user's own API keys.
|
||||
|
||||
+19
-10
@@ -1,5 +1,8 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
# check=skip=CopyIgnoredFile
|
||||
|
||||
# Base
|
||||
FROM node:22-alpine AS base
|
||||
FROM node:24-alpine AS base
|
||||
ENV NEXT_TELEMETRY_DISABLED=1
|
||||
|
||||
# Dependencies
|
||||
@@ -39,19 +42,20 @@ ENV NEXT_PUBLIC_GA4_MEASUREMENT_ID=${NEXT_PUBLIC_GA4_MEASUREMENT_ID}
|
||||
ARG NEXT_PUBLIC_POSTHOG_KEY
|
||||
ENV NEXT_PUBLIC_POSTHOG_KEY=${NEXT_PUBLIC_POSTHOG_KEY}
|
||||
|
||||
# Optional argument to configure Google Drive Picker at build time (can reuse AUTH_GOOGLE_ID value)
|
||||
ARG NEXT_PUBLIC_GOOGLE_DRIVE_CLIENT_ID
|
||||
ENV NEXT_PUBLIC_GOOGLE_DRIVE_CLIENT_ID=${NEXT_PUBLIC_GOOGLE_DRIVE_CLIENT_ID}
|
||||
|
||||
# Copy development deps and source
|
||||
COPY --from=deps /app/node_modules ./node_modules
|
||||
COPY . .
|
||||
|
||||
# link ssl3 for latest Alpine
|
||||
RUN sh -c '[ ! -e /lib/libssl.so.3 ] && ln -s /usr/lib/libssl.so.3 /lib/libssl.so.3 || echo "Link already exists"'
|
||||
|
||||
# Build the application
|
||||
ENV NODE_ENV=production
|
||||
RUN npm run build
|
||||
|
||||
# Reduce installed packages to production-only
|
||||
RUN npm prune --production
|
||||
RUN npm prune --omit=dev
|
||||
|
||||
|
||||
# Runner
|
||||
@@ -59,18 +63,23 @@ FROM base AS runner
|
||||
WORKDIR /app
|
||||
|
||||
# As user
|
||||
RUN addgroup --system --gid 1001 nodejs
|
||||
RUN adduser --system --uid 1001 nextjs
|
||||
RUN addgroup --system --gid 1001 nodejs \
|
||||
&& adduser --system --uid 1001 nextjs \
|
||||
&& apk add --no-cache openssl
|
||||
|
||||
# Copy Built app
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/public ./public
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/.next ./.next
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/node_modules ./node_modules
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/src/server/prisma ./src/server/prisma
|
||||
# Instead of `COPY --from=builder --chown=nextjs:nodejs /app/.next ./.next`, we only extract some parts, excluding .next/cache which is build time only:
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/.next/BUILD_ID ./.next/
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/.next/static ./.next/static
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/.next/server ./.next/server
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/.next/types ./.next/types
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/.next/*.json ./.next/
|
||||
|
||||
# Minimal ENV for production
|
||||
ENV NODE_ENV=production
|
||||
ENV PATH=$PATH:/app/node_modules/.bin
|
||||
|
||||
# Run as non-root user
|
||||
USER nextjs
|
||||
@@ -79,4 +88,4 @@ USER nextjs
|
||||
EXPOSE 3000
|
||||
|
||||
# Start the application
|
||||
CMD ["next", "start"]
|
||||
CMD ["/app/node_modules/.bin/next", "start"]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2023-2025 Enrico Ros
|
||||
Copyright (c) 2023-2026 Enrico Ros
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
||||
@@ -1,37 +1,124 @@
|
||||
<div align="center">
|
||||
|
||||
<img width="256" height="256" alt="Big-AGI Logo" src="https://big-agi.com/assets/logo-bright-github.svg" />
|
||||
|
||||
<h1><a href="https://big-agi.com">Big-AGI</a></h1>
|
||||
|
||||
[](https://big-agi.com)
|
||||
[](https://github.com/enricoros/big-AGI/pkgs/container/big-agi)
|
||||
[](https://vercel.com/new/clone?repository-url=https://github.com/enricoros/big-agi)
|
||||
[](https://discord.gg/MkH4qj2Jp9)
|
||||
<br/>
|
||||
[](https://github.com/enricoros/big-agi/commits)
|
||||
[](https://github.com/enricoros/big-AGI/pkgs/container/big-agi)
|
||||
[](https://github.com/enricoros/big-AGI/graphs/contributors)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
<br/>
|
||||
|
||||
[](https://github.com/enricoros/big-agi/issues/new?template=ai-triage.yml)
|
||||
|
||||
[//]: # ([](https://stats.uptimerobot.com/59MXcnmjrM))
|
||||
[//]: # ([](https://github.com/enricoros/big-AGI/releases/latest))
|
||||
[//]: # ()
|
||||
[//]: # ([](#))
|
||||
[//]: # ([](https://x.com/enricoros))
|
||||
|
||||
</div>
|
||||
|
||||
<br/>
|
||||
|
||||
# Big-AGI Open 🧠
|
||||
|
||||
This is the open-source foundation for **Big-AGI**.
|
||||
This is the open-source foundation of **Big-AGI**, ___the multi-model AI workspace for experts___.
|
||||
|
||||
Big-AGI is the multi-model AI workspace for experts: Engineers architecting systems. Founders making decisions. Researchers validating hypotheses.
|
||||
You need to think broader, decide faster, and build with confidence, then you need Big-AGI.
|
||||
|
||||
Big-AGI is the multi-model AI workspace for experts who need to think broader, decide smarter, and build with confidence.
|
||||
It comes packed with **world-class features** like Beam, and is praised for its **best-in-class AI chat UX**.
|
||||
**As an independent, non-VC-funded project, Pro subscriptions at $10.99/mo fund development for everyone, including the free and open-source tiers.**
|
||||
|
||||
**What makes Big-AGI different:**
|
||||
**Intelligence**, with [Beam](https://big-agi.com/beam) & Merge for multi-model reasoning and bleeding-edge AI models like Nano Banana, GPT-5 Pro, Sonnet 4.5 -
|
||||
**Control** with personas, data ownership, requests inspection, unlimited usage with API keys, and *no vendor lock-in* -
|
||||
and **Speed** with a local-first, over-powered, zero-latency, madly optimized web app.
|
||||

|
||||
[](https://big-agi.com/beam)
|
||||
[](https://big-agi.com/inspector)
|
||||
|
||||
**Who uses Big-AGI:**
|
||||
### What makes Big-AGI different:
|
||||
|
||||
**Intelligence**: with [Beam & Merge](https://big-agi.com/beam) for multi-model de-hallucination, native search, and bleeding-edge AI models like Opus 4.7, Nano Banana Pro, Kimi K2.6 or GPT 5.4 -
|
||||
**Control**: with personas, data ownership, requests inspection, unlimited usage with API keys, and *no vendor lock-in* -
|
||||
and **Speed**: with a local-first, over-powered, zero-latency, madly optimized web app.
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td align="center" width="25%">
|
||||
<b>🧠 Intelligence</b><br/>
|
||||
<img src="https://img.shields.io/badge/Multi--Model-Trust-4285F4?style=for-the-badge" alt="Multi-Model"/>
|
||||
</td>
|
||||
<td align="center" width="25%">
|
||||
<b>✨ Experience</b><br/>
|
||||
<img src="https://img.shields.io/badge/Clean-UX-34A853?style=for-the-badge" alt="Clean UX"/>
|
||||
</td>
|
||||
<td align="center" width="25%">
|
||||
<b>⚡ Performance</b><br/>
|
||||
<img src="https://img.shields.io/badge/Zero-Latency-EA4335?style=for-the-badge" alt="Zero Latency"/>
|
||||
</td>
|
||||
<td align="center" width="25%">
|
||||
<b>🔒 Control</b><br/>
|
||||
<img src="https://img.shields.io/badge/No-Lock--in-FBBC04?style=for-the-badge" alt="No Lock-in"/>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" valign="top">
|
||||
Beam & Merge<br/>
|
||||
No context junk<br/>
|
||||
Purest AI outputs
|
||||
</td>
|
||||
<td align="center" valign="top">
|
||||
Flow-state interface<br/>
|
||||
Highly customizable<br/>
|
||||
Best-in-class UX
|
||||
</td>
|
||||
<td align="center" valign="top">
|
||||
Local-first<br/>
|
||||
Highly parallel<br/>
|
||||
Madly optimized
|
||||
</td>
|
||||
<td align="center" valign="top">
|
||||
No vendor lock-in<br/>
|
||||
Your API keys<br/>
|
||||
AI Inspector
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
### Who uses Big-AGI:
|
||||
Loved by engineers, founders, researchers, self-hosters, and IT departments for its power, reliability, and transparency.
|
||||
|
||||
<img width="830" height="370" alt="image" src="https://github.com/user-attachments/assets/513c4f77-0970-4a56-b23b-1416c8246174" />
|
||||
|
||||
Choose Big-AGI because you don't need another clone or slop - you need an AI tool that scales with you.
|
||||
|
||||
### Show me a screenshot:
|
||||
Sure - here is real-world screeengrab as I'm writing this, while running a Beam to extract SVG from an image with Sonnet 4.5, Opus 4.1, GPT 5.1, Gemini 2.5 Pro, Nano Banana, etc.
|
||||
<img alt="Real-world screen capture as of Nov 15 2025, 2am" src="https://github.com/user-attachments/assets/853f4160-27cb-4ac9-826b-402f1e63d4af" />
|
||||
|
||||
|
||||
## Get Started
|
||||
|
||||
**Most users: [big-agi.com](https://big-agi.com)** (fastest, zero setup, support the project)
|
||||
Free tier with all core features and more, Pro tier with Cloud Sync.
|
||||
| Tier | Best For | What You Get | Setup |
|
||||
|------------------------------------------------------|-------------------|---------------------------------------------------------------|-------------|
|
||||
| Big-AGI Open (self-host) | **IT** | First to get new models support. Maximum control and privacy. | 5-30 min |
|
||||
| [big-agi.com](https://big-agi.com) Free | **Everyone** | Full core experience, improved Beam, new Personas, best UX. | **2 min**\* |
|
||||
| **[big-agi.com](https://big-agi.com) Pro** $10.99/mo | **Professionals** | Everything + **Sync** across unlimited devices + 1GB storage | **2 min**\* |
|
||||
|
||||
<a href="https://big-agi.com">
|
||||
<img width="210" height="68" alt="image" src="https://github.com/user-attachments/assets/b2f8a7b8-415f-4c92-b228-4f5a54fe2bdd" />
|
||||
</a>
|
||||
\*: **Configuration requires your API keys**. *Big-AGI does not charge for model usage or limit your access*.
|
||||
**Why Pro?** As an independent project, Pro subscriptions fund all development. Early subscribers shape the roadmap directly.
|
||||
|
||||
[](https://big-agi.com)
|
||||
|
||||
**Self-host and developers** (full control)
|
||||
Develop locally or self-host with Docker on your own infrastructure – [guide](docs/installation.md)
|
||||
|
||||
Or fork & run on Vercel:
|
||||
|
||||
[](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Fenricoros%2Fbig-AGI&env=OPENAI_API_KEY&envDescription=Backend%20API%20keys%2C%20optional%20and%20may%20be%20overridden%20by%20the%20UI.&envLink=https%3A%2F%2Fgithub.com%2Fenricoros%2Fbig-AGI%2Fblob%2Fmain%2Fdocs%2Fenvironment-variables.md&project-name=big-AGI)
|
||||
- Develop locally or self-host with Docker on your own infrastructure – [guide](docs/installation.md)
|
||||
- Or fork & run on Vercel:
|
||||
[](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Fenricoros%2Fbig-AGI&env=OPENAI_API_KEY&envDescription=Backend%20API%20keys%2C%20optional%20and%20may%20be%20overridden%20by%20the%20UI.&envLink=https%3A%2F%2Fgithub.com%2Fenricoros%2Fbig-AGI%2Fblob%2Fmain%2Fdocs%2Fenvironment-variables.md&project-name=big-AGI)
|
||||
|
||||
[//]: # (**For the latest Big-AGI:**)
|
||||
|
||||
@@ -41,7 +128,27 @@ Or fork & run on Vercel:
|
||||
|
||||
---
|
||||
|
||||
## What's New in 2.0 · Oct 6, 2025 · Open
|
||||
## Our Philosophy
|
||||
|
||||
We're an independent, non-VC-funded project with a simple belief: **AI should elevate you, not replace you**.
|
||||
|
||||
This is why we built Big-AGI to be **local-first**, madly optimized to 0-latency, launched multi-model first to
|
||||
defeat hallucinations, designed Beam around the **humans in the loop**, re-wrote frameworks and abstractions
|
||||
so you **are not vendor locked-in**, and obsessed over a powerful UI that works, just works.
|
||||
|
||||
NOTE: this is a powerful tool - if you need a toy UI or clone, this ain't it.
|
||||
|
||||
|
||||
---
|
||||
|
||||
## Release Notes
|
||||
|
||||
👉 **[See the Live Release Notes](https://big-agi.com/changes)**
|
||||
- Open 2.0.4: **Hyper Params** **Opus 4.6**, **GPT-5.4**, **Gemini 3.1 Pro**, AWS Bedrock, parameter accuracy, Anthropic continuation/Fast mode
|
||||
- Open 2.0.3: **Red Carpet** **Kimi K2.5**, **Gemini 3 Flash**, **GPT 5.2**, Google Drive, Inworld, Novita.ai, Speech/UX improvements
|
||||
- Open 2.0.2: **Speex** multi-vendor speech synthesis, **Opus 4.5**, **Gemini 3 Pro**, **Nano Banana Pro**, **Grok 4.1**, **GPT-5.1**, **Kimi K2** + 280 fixes
|
||||
|
||||
### What's New in 2.0 · Oct 31, 2025 · Open
|
||||
|
||||
- **Big-AGI Open** is ready and more productive and faster than ever, with:
|
||||
- **Beam 2**: multi-modal, program-based, follow-ups, save presets
|
||||
@@ -54,7 +161,9 @@ Or fork & run on Vercel:
|
||||
|
||||
<img width="830" height="385" alt="image" src="https://github.com/user-attachments/assets/ad52761d-7e3f-44d8-b41e-947ce8b4faa1" />
|
||||
|
||||
### Open links: 👉 [installation](docs/installation.md) 👉 [roadmap](https://github.com/users/enricoros/projects/4/views/2) 👉 [documentation](docs/README.md)
|
||||
#### **Open** links: 👉 [changelog](https://big-agi.com/changes) 👉 [installation](docs/installation.md) 👉 [roadmap](https://github.com/users/enricoros/projects/4/views/2) 👉 [documentation](docs/README.md)
|
||||
|
||||
**For teams and institutions:** Need shared prompts, SSO, or managed deployments? Reach out at enrico@big-agi.com. We're actively collecting requirements from research groups and IT departments.
|
||||
|
||||
<details>
|
||||
<summary>5,000 Commits Milestone</summary>
|
||||
@@ -74,8 +183,11 @@ The new architecture is solid and the speed improvements are real.
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>What's New in 1.16.1...1.16.10 · 2024-2025 (patch releases)</summary>
|
||||
<summary>What's New in 1.16.1...1.16.13 · (patch releases)</summary>
|
||||
|
||||
- 1.16.13: Docker fix ([#840](https://github.com/enricoros/big-AGI/issues/840))
|
||||
- 1.16.12: Dockerfile update ([#840](https://github.com/enricoros/big-AGI/issues/840))
|
||||
- 1.16.11: v1 final release, documentation updates
|
||||
- 1.16.10: OpenRouter models support
|
||||
- 1.16.9: Docker Gemini fix, R1 models support
|
||||
- 1.16.8: OpenAI ChatGPT-4o Latest, o1 models support
|
||||
@@ -137,7 +249,7 @@ The new architecture is solid and the speed improvements are real.
|
||||
- New **[Perplexity](https://www.perplexity.ai/)** and **[Groq](https://groq.com/)** integration (thanks @Penagwin). [#407](https://github.com/enricoros/big-AGI/issues/407), [#427](https://github.com/enricoros/big-AGI/issues/427)
|
||||
- **[LocalAI](https://localai.io/models/)** deep integration, including support for [model galleries](https://github.com/enricoros/big-AGI/issues/411)
|
||||
- **Mistral** Large and Google **Gemini 1.5** support
|
||||
- Performance optimizations: runs [much faster](https://twitter.com/enricoros/status/1756553038293303434?utm_source=localhost:3000&utm_medium=big-agi), saves lots of power, reduces memory usage
|
||||
- Performance optimizations: runs [much faster](https://x.com/enricoros/status/1756553038293303434?utm_source=localhost:3000&utm_medium=big-agi), saves lots of power, reduces memory usage
|
||||
- Enhanced UX with auto-sizing charts, refined search and folder functionalities, perfected scaling
|
||||
- And with more UI improvements, documentation, bug fixes (20 tickets), and developer enhancements
|
||||
|
||||
@@ -200,98 +312,85 @@ https://github.com/enricoros/big-AGI/assets/1590910/a6b8e172-0726-4b03-a5e5-10cf
|
||||
|
||||
</details>
|
||||
|
||||
For full details and former releases, check out the [changelog](docs/changelog.md).
|
||||
For full details and former releases, check out the [archived versions changelog](docs/changelog.md).
|
||||
|
||||
## 👉 Key Features
|
||||
## 👉 Supported Models & Integrations
|
||||
|
||||
|  |  |  |  |  |
|
||||
Delightful UX with latest models exclusive features like Beam for **multi-model AI validation**.
|
||||
> 
|
||||
> [](https://big-agi.com/beam)
|
||||
|
||||
|  |  |  |  |  |
|
||||
|---------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------|
|
||||
| **Chat**<br/>**Call**<br/>**Beam**<br/>**Draw**, ... | Local & Cloud<br/>Open & Closed<br/>Cheap & Heavy<br/>Google, Mistral, ... | Attachments<br/>Diagrams<br/>Multi-Chat<br/>Mobile-first UI | Stored Locally<br/>Easy self-Host<br/>Local actions<br/>Data = Gold | AI Personas<br/>Voice Modes<br/>Screen Capture<br/>Camera + OCR |
|
||||
|
||||

|
||||
|
||||
You can easily configure 100s of AI models in big-AGI:
|
||||
### AI Models & Vendors
|
||||
|
||||
| **AI models** | _supported vendors_ |
|
||||
|:--------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| Opensource Servers | [LocalAI](https://localai.io/) (multimodal) · [Ollama](https://ollama.com/) |
|
||||
| Local Servers | [LM Studio](https://lmstudio.ai/) |
|
||||
| Multimodal services | [Azure](https://azure.microsoft.com/en-us/products/ai-services/openai-service) · [Anthropic](https://anthropic.com) · [Google Gemini](https://ai.google.dev/) · [OpenAI](https://platform.openai.com/docs/overview) |
|
||||
| Language services | [Alibaba](https://www.alibabacloud.com/en/product/modelstudio) · [DeepSeek](https://deepseek.com) · [Groq](https://wow.groq.com/) · [Mistral](https://mistral.ai/) · [OpenRouter](https://openrouter.ai/) · [Perplexity](https://www.perplexity.ai/) · [Together AI](https://www.together.ai/) · [xAI](https://x.ai/) |
|
||||
| Image services | OpenAI · Google Gemini |
|
||||
| Speech services | [ElevenLabs](https://elevenlabs.io) (Voice synthesis / cloning) |
|
||||
Configure 100s of AI models from 20+ providers:
|
||||
|
||||
Add extra functionality with these integrations:
|
||||
| **AI models** | _supported vendors_ |
|
||||
|:--------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| Opensource Servers | [LocalAI](https://localai.io/) · [Ollama](https://ollama.com/) |
|
||||
| Local Servers | [LM Studio](https://lmstudio.ai/) (non-open) |
|
||||
| Multimodal services | [Anthropic](https://anthropic.com) · [AWS Bedrock](https://aws.amazon.com/bedrock/) · [Azure](https://azure.microsoft.com/en-us/products/ai-services/openai-service) · [Google Gemini](https://ai.google.dev/) · [OpenAI](https://platform.openai.com/docs/overview) |
|
||||
| LLM services | [Alibaba](https://www.alibabacloud.com/en/product/modelstudio) · [DeepSeek](https://deepseek.com) · [Groq](https://wow.groq.com/) · [Mistral](https://mistral.ai/) · [Moonshot](https://www.moonshot.cn/) · [OpenPipe](https://openpipe.ai/) · [OpenRouter](https://openrouter.ai/) · [Perplexity](https://www.perplexity.ai/) · [Together AI](https://www.together.ai/) · [xAI](https://x.ai/) · [Z.ai](https://z.ai/) |
|
||||
| OpenAI-compatible | Any OpenAI-compatible endpoint - models, pricing, and capabilities are auto-detected |
|
||||
| Image services | OpenAI · Google Gemini (Nano Banana) · LocalAI |
|
||||
| Speech services | [ElevenLabs](https://elevenlabs.io) · [Inworld](https://inworld.ai) · [OpenAI TTS](https://platform.openai.com/docs/guides/text-to-speech) · LocalAI · Browser (Web Speech API) |
|
||||
|
||||
| **More** | _integrations_ |
|
||||
|:-------------|:---------------------------------------------------------------------------------------------------------------|
|
||||
| Web Browse | [Browserless](https://www.browserless.io/) · [Puppeteer](https://pptr.dev/)-based |
|
||||
| Web Search | [Google CSE](https://programmablesearchengine.google.com/) |
|
||||
| Code Editors | [CodePen](https://codepen.io/pen/) · [StackBlitz](https://stackblitz.com/) · [JSFiddle](https://jsfiddle.net/) |
|
||||
| Tracking | [Helicone](https://www.helicone.ai) (LLM Observability) |
|
||||
### Additional Integrations
|
||||
|
||||
[//]: # (- [x] **Flow-state UX** for uncompromised productivity)
|
||||
|
||||
[//]: # (- [x] **AI Personas**: Tailor your AI interactions with customizable personas)
|
||||
|
||||
[//]: # (- [x] **Sleek UI/UX**: A smooth, intuitive, and mobile-responsive interface)
|
||||
|
||||
[//]: # (- [x] **Efficient Interaction**: Voice commands, OCR, and drag-and-drop file uploads)
|
||||
|
||||
[//]: # (- [x] **Privacy First**: Self-host and use your own API keys for full control)
|
||||
|
||||
[//]: # (- [x] **Advanced Tools**: Execute code, import PDFs, and summarize documents)
|
||||
|
||||
[//]: # (- [x] **Seamless Integrations**: Enhance functionality with various third-party services)
|
||||
|
||||
[//]: # (- [x] **Open Roadmap**: Contribute to the progress of big-AGI)
|
||||
|
||||
<br/>
|
||||
|
||||
## 🚀 Installation
|
||||
|
||||
To get started with big-AGI, follow our comprehensive [Installation Guide](docs/installation.md).
|
||||
The guide covers various installation options, whether you're spinning it up on
|
||||
your local computer, deploying on Vercel, on Cloudflare, or rolling it out
|
||||
through Docker.
|
||||
|
||||
Whether you're a developer, system integrator, or enterprise user, you'll find step-by-step instructions
|
||||
to set up big-AGI quickly and easily.
|
||||
|
||||
[](docs/installation.md)
|
||||
|
||||
Or bring your API keys and jump straight into our free instance on [big-AGI.com](https://big-agi.com).
|
||||
|
||||
<br/>
|
||||
|
||||
# 🌟 Get Involved!
|
||||
|
||||
[//]: # ([](https://discord.gg/MkH4qj2Jp9))
|
||||
[](https://discord.gg/MkH4qj2Jp9)
|
||||
|
||||
- [ ] 📢️ [**Chat with us** on Discord](https://discord.gg/MkH4qj2Jp9)
|
||||
- [ ] ⭐ **Give us a star** on GitHub 👆
|
||||
- [ ] 🚀 **Do you like code**? You'll love this gem of a project! [_Pick up a task!_](https://github.com/users/enricoros/projects/4/views/4) - _easy_ to _pro_
|
||||
- [ ] 💡 Got a feature suggestion? [_Add your roadmap ideas_](https://github.com/enricoros/big-agi/issues/new?&template=roadmap-request.md)
|
||||
- [ ] ✨ [Deploy](docs/installation.md) your [fork](docs/customizations.md) for your friends and family, or [customize it for work](docs/customizations.md)
|
||||
|
||||
<br/>
|
||||
|
||||
[//]: # ([](https://github.com/enricoros/big-agi/stargazers))
|
||||
|
||||
[//]: # ([](https://github.com/enricoros/big-agi/network))
|
||||
|
||||
[//]: # ([](https://github.com/enricoros/big-agi/pulls))
|
||||
|
||||
[//]: # ([](https://github.com/enricoros/big-agi/LICENSE))
|
||||
|
||||
## 📜 Licensing
|
||||
|
||||
Big-AGI incorporates third-party software components that are subject
|
||||
to separate license terms. For detailed information about these
|
||||
components and their respective licenses, please refer to
|
||||
the [Third-Party Notices](src/modules/3rdparty/THIRD_PARTY_NOTICES.md).
|
||||
| **More** | _integrations_ |
|
||||
|:--------------|:---------------------------------------------------------------------------------------------------------------|
|
||||
| Web Browse | [Browserless](https://www.browserless.io/) · [Puppeteer](https://pptr.dev/)-based |
|
||||
| Web Search | [Google CSE](https://programmablesearchengine.google.com/) |
|
||||
| Observability | [Helicone](https://www.helicone.ai) |
|
||||
|
||||
---
|
||||
|
||||
2023-2025 · Enrico Ros x [Big-AGI](https://big-agi.com) · Like this project? Leave a star! 💫⭐
|
||||
## 🚀 Installation
|
||||
|
||||
Self-host with Docker, deploy on Vercel, or develop locally. Full setup guide:
|
||||
|
||||
[](docs/installation.md)
|
||||
|
||||
Or use the hosted version at [big-agi.com](https://big-agi.com) with your API keys.
|
||||
|
||||
---
|
||||
|
||||
## 👋 Community & Contributing
|
||||
|
||||
### Connect
|
||||
|
||||
[](https://discord.gg/MkH4qj2Jp9)
|
||||
|
||||
⭐ [Star the repo](https://github.com/enricoros/big-agi) if Big-AGI is useful to you
|
||||
|
||||
### Contribute
|
||||
|
||||
**🤖 AI-Powered Issue Assistance**
|
||||
|
||||
When you open an issue, our custom AI triage system (powered by [Claude Code](https://github.com/anthropics/claude-code-action) with Big-AGI architecture documentation) analyzes it, searches the codebase, and provides solutions - typically within 30 minutes. We've trained the system on our modules and subsystems so it handles most issues effectively. Your feedback drives development!
|
||||
|
||||
[](https://github.com/enricoros/big-agi/issues/new?template=ai-triage.yml)
|
||||
[](https://github.com/enricoros/big-agi/issues/new?&template=roadmap-request.md)
|
||||
|
||||
[](https://github.com/users/enricoros/projects/4/views/4)
|
||||
[](docs/customizations.md)
|
||||
[](https://github.com/users/enricoros/projects/4/views/2)
|
||||
|
||||
#### Contributors
|
||||
|
||||
<a href="https://github.com/enricoros/big-agi/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=enricoros/big-agi&max=48&columns=12" />
|
||||
</a>
|
||||
|
||||
---
|
||||
|
||||
## License
|
||||
|
||||
MIT License · [Third-Party Notices](src/modules/3rdparty/THIRD_PARTY_NOTICES.md)
|
||||
|
||||
**2023-2026** · [Enrico Ros](https://www.enricoros.com) × [Token Fabrics](https://www.tokenfabrics.com)
|
||||
|
||||
@@ -2,7 +2,7 @@ import { fetchRequestHandler } from '@trpc/server/adapters/fetch';
|
||||
|
||||
import { appRouterCloud } from '~/server/trpc/trpc.router-cloud';
|
||||
import { createTRPCFetchContext } from '~/server/trpc/trpc.server';
|
||||
import { posthogCaptureServerException } from '~/server/posthog/posthog.server';
|
||||
import { posthogServerSendException } from '~/server/posthog/posthog.server';
|
||||
|
||||
const handlerNodeRoutes = (req: Request) => fetchRequestHandler({
|
||||
endpoint: '/api/cloud',
|
||||
@@ -16,15 +16,15 @@ const handlerNodeRoutes = (req: Request) => fetchRequestHandler({
|
||||
console.error(`❌ tRPC-cloud failed on ${path ?? 'unk-path'}: ${error.message}`);
|
||||
|
||||
// -> Capture node errors
|
||||
await posthogCaptureServerException(error, {
|
||||
await posthogServerSendException(error, undefined, {
|
||||
domain: 'trpc-onerror',
|
||||
runtime: 'nodejs',
|
||||
endpoint: path ?? 'unknown',
|
||||
method: req.method,
|
||||
url: req.url,
|
||||
additionalProperties: {
|
||||
errorCode: error.code,
|
||||
errorType: type,
|
||||
error_code: error.code,
|
||||
error_type: type,
|
||||
},
|
||||
});
|
||||
},
|
||||
@@ -33,7 +33,7 @@ const handlerNodeRoutes = (req: Request) => fetchRequestHandler({
|
||||
|
||||
// NOTE: the following statement breaks the build on non-pro deployments, and conditionals don't work either
|
||||
// so we resorted to raising the timeout from 10s to 60s in the vercel.json file instead
|
||||
export const maxDuration = 60;
|
||||
// export const maxDuration = 60;
|
||||
export const runtime = 'nodejs';
|
||||
export const dynamic = 'force-dynamic';
|
||||
export { handlerNodeRoutes as GET, handlerNodeRoutes as POST };
|
||||
@@ -10,9 +10,11 @@ const handlerEdgeRoutes = (req: Request) => fetchRequestHandler({
|
||||
createContext: createTRPCFetchContext,
|
||||
onError:
|
||||
process.env.NODE_ENV === 'development'
|
||||
? ({ path, error }) => console.error(`❌ tRPC-edge failed on ${path ?? 'unk-path'}: ${error.message}`)
|
||||
? ({ path, error }) => console.error(`\n❌ tRPC-edge failed on ${path ?? 'unk-path'}: ${error.message}`)
|
||||
: undefined,
|
||||
});
|
||||
|
||||
// NOTE: we don't set maxDuration explicitly here - however we set it in the Vercel project settings, raising to the limit of 300s
|
||||
// export const maxDuration = 60;
|
||||
export const runtime = 'edge';
|
||||
export { handlerEdgeRoutes as GET, handlerEdgeRoutes as POST };
|
||||
@@ -2,8 +2,6 @@
|
||||
#
|
||||
# For more examples, such running big-AGI alongside a web browsing service, see the `docs/docker` folder.
|
||||
|
||||
version: '3.9'
|
||||
|
||||
services:
|
||||
big-agi:
|
||||
image: ghcr.io/enricoros/big-agi:latest
|
||||
@@ -11,4 +9,3 @@ services:
|
||||
- "3000:3000"
|
||||
env_file:
|
||||
- .env
|
||||
command: [ "next", "start", "-p", "3000" ]
|
||||
@@ -1,3 +1,7 @@
|
||||
---
|
||||
unlisted: true
|
||||
---
|
||||
|
||||
# AIX dispatch server - API features comparison
|
||||
|
||||
This is updated as of 2024-07-09, and includes the latest features and capabilities of the three major AI APIs: Anthropic, Gemini, and OpenAI.
|
||||
|
||||
+16
-5
@@ -2,12 +2,16 @@
|
||||
|
||||
Information you need to get started, configure, and use big-AGI productively.
|
||||
|
||||
👉 **[Changelog](https://big-agi.com/changes)** - See what's new
|
||||
|
||||
## Getting Started
|
||||
|
||||
Essential guides:
|
||||
|
||||
- **[FAQ](help-faq.md)**: Common questions and answers
|
||||
- **[Enabling Microphone](help-feature-microphone.md)**: Configure speech recognition in your browser
|
||||
- **[Data Ownership](help-data-ownership.md)**: How your data is stored and managed
|
||||
- **[Live File](help-feature-livefile.md)**: Live file attachment feature
|
||||
|
||||
## AI Services
|
||||
|
||||
@@ -19,18 +23,21 @@ How to set up AI models and features in big-AGI.
|
||||
- Easy API key configuration:
|
||||
[Alibaba](https://bailian.console.alibabacloud.com/?apiKey=1#/api-key),
|
||||
[Anthropic](https://console.anthropic.com/settings/keys),
|
||||
[AWS Bedrock](https://console.aws.amazon.com/bedrock/),
|
||||
[Deepseek](https://platform.deepseek.com/api_keys),
|
||||
[Google Gemini](https://aistudio.google.com/app/apikey),
|
||||
[Groq](https://console.groq.com/keys),
|
||||
[Mistral](https://console.mistral.ai/api-keys/),
|
||||
[Moonshot](https://platform.moonshot.cn/console/api-keys),
|
||||
[OpenAI](https://platform.openai.com/api-keys),
|
||||
[OpenPipe](https://app.openpipe.ai/settings),
|
||||
[Perplexity](https://www.perplexity.ai/settings/api),
|
||||
[TogetherAI](https://api.together.xyz/settings/api-keys),
|
||||
[xAI](http://x.ai/api)
|
||||
[xAI](https://x.ai/api),
|
||||
[Z.ai](https://z.ai/)
|
||||
- **[Azure OpenAI](config-azure-openai.md)** guide
|
||||
- **FireworksAI** ([API keys](https://fireworks.ai/account/api-keys), via custom OpenAI endpoint: https://api.fireworks.ai/inference)
|
||||
- **[OpenRouter](config-openrouter.md)** guide
|
||||
- **OpenAI-compatible endpoints**: Any provider with an OpenAI-compatible API works out of the box - models, pricing, and capabilities are auto-detected
|
||||
|
||||
|
||||
- **Local AI Integrations**:
|
||||
@@ -40,8 +47,9 @@ How to set up AI models and features in big-AGI.
|
||||
- **Enhanced AI Features**:
|
||||
- **[Web Browsing](config-feature-browse.md)**: Enable web page download through third-party services or your own cloud
|
||||
- **Web Search**: Google Search API (see '[Environment Variables](environment-variables.md)')
|
||||
- **Image Generation**: GPT Image (gpt-image-1), DALL·E 3 and 2
|
||||
- **Voice Synthesis**: ElevenLabs API for voice generation
|
||||
- **Image Generation**: GPT Image (gpt-image-1), Nano Banana, DALL·E 3 and 2
|
||||
- **Voice Synthesis**: ElevenLabs, Inworld, OpenAI TTS, LocalAI, or browser Web Speech API
|
||||
- **[Google Drive](config-feature-google-drive.md)**: Attach files from Google Drive
|
||||
|
||||
## Deployment & Customization
|
||||
|
||||
@@ -58,12 +66,15 @@ For deploying a custom big-AGI instance:
|
||||
- **Advanced Setup**:
|
||||
- **[Source Code Customization](customizations.md)**: Modify the source code
|
||||
- **[Access Control](deploy-authentication.md)**: Optional, add basic user authentication
|
||||
- **[Database Setup](deploy-database.md)**: Optional, enables "Chat Link Sharing"
|
||||
- **[Reverse Proxy](deploy-reverse-proxy.md)**: Optional, enables custom domains and SSL
|
||||
- **[Docker Deployment](deploy-docker.md)**: Deploy with Docker containers
|
||||
- **[Kubernetes](deploy-k8s.md)**: Deploy on Kubernetes clusters
|
||||
- **[Analytics](deploy-analytics.md)**: Set up usage analytics
|
||||
- **[Environment Variables](environment-variables.md)**: Pre-configures models and services
|
||||
|
||||
## Community & Support
|
||||
|
||||
- Check the [changelog](https://big-agi.com/changes) for the latest updates
|
||||
- Visit our [GitHub repository](https://github.com/enricoros/big-AGI) for source code and issue tracking
|
||||
- Join our [Discord](https://discord.gg/MkH4qj2Jp9) for discussions and help
|
||||
|
||||
|
||||
+19
-7
@@ -1,17 +1,30 @@
|
||||
## Changelog
|
||||
## Archived Versions - Changelog
|
||||
|
||||
This is a high-level changelog. Calls out some of the high level features batched
|
||||
by release.
|
||||
|
||||
- For the live changelog, see [big-agi.com/changes](https://big-agi.com/changes)
|
||||
- For the live roadmap, please see [the GitHub project](https://github.com/users/enricoros/projects/4/views/2)
|
||||
|
||||
### 1.17.0 - Jun 2024
|
||||
> NOTE: with the release of 2.0.0 we switching to [big-agi.com/changes](https://big-agi.com/changes) for the
|
||||
> continuously updated changelog.
|
||||
|
||||
- milestone: [1.17.0](https://github.com/enricoros/big-agi/milestone/17)
|
||||
- work in progress: [big-AGI open roadmap](https://github.com/users/enricoros/projects/4/views/2), [help here](https://github.com/users/enricoros/projects/4/views/4)
|
||||
### What's New in 2 · Oct 31, 2025 · Open
|
||||
|
||||
### What's New in 1.16.1...1.16.9 · Jan 21, 2025 (patch releases)
|
||||
- **Big-AGI Open** is ready and more productive and faster than ever, with:
|
||||
- **Beam 2**: multi-modal, program-based, follow-ups, save presets
|
||||
- Top-notch AI models support including **agentic models** and **reasoning models**
|
||||
- **Image Generation** and editing with Nano Banana and gpt-image-1
|
||||
- **Web Search** with citations for supported models
|
||||
- **UI** & Mobile UI overhaul with peeking and side panels
|
||||
- And all of the [Big-AGI 2 changes](https://github.com/enricoros/big-AGI/issues/567#issuecomment-2262187617) and more
|
||||
- Built for the future, madly optimized
|
||||
|
||||
### What's New in 1.16.1...1.16.13 · (patch releases)
|
||||
|
||||
- 1.16.13: Docker fix (#840)
|
||||
- 1.16.12: Dockerfile update (#840)
|
||||
- 1.16.11: v1 final release, documentation updates
|
||||
- 1.16.10: OpenRouter models support
|
||||
- 1.16.9: Docker Gemini fix, R1 models support
|
||||
- 1.16.8: OpenAI ChatGPT-4o Latest, o1 models support
|
||||
@@ -60,7 +73,7 @@ by release.
|
||||
- New **[Perplexity](https://www.perplexity.ai/)** and **[Groq](https://groq.com/)** integration (thanks @Penagwin). [#407](https://github.com/enricoros/big-AGI/issues/407), [#427](https://github.com/enricoros/big-AGI/issues/427)
|
||||
- **[LocalAI](https://localai.io/models/)** deep integration, including support for [model galleries](https://github.com/enricoros/big-AGI/issues/411)
|
||||
- **Mistral** Large and Google **Gemini 1.5** support
|
||||
- Performance optimizations: runs [much faster](https://twitter.com/enricoros/status/1756553038293303434?utm_source=localhost:3000&utm_medium=big-agi), saves lots of power, reduces memory usage
|
||||
- Performance optimizations: runs [much faster](https://x.com/enricoros/status/1756553038293303434?utm_source=localhost:3000&utm_medium=big-agi), saves lots of power, reduces memory usage
|
||||
- Enhanced UX with auto-sizing charts, refined search and folder functionalities, perfected scaling
|
||||
- And with more UI improvements, documentation, bug fixes (20 tickets), and developer enhancements
|
||||
- [Release notes](https://github.com/enricoros/big-AGI/releases/tag/v1.14.0), and changes [v1.13.1...v1.14.0](https://github.com/enricoros/big-AGI/compare/v1.13.1...v1.14.0) (233 commits, 8,000+ lines changed)
|
||||
@@ -218,7 +231,6 @@ For Developers:
|
||||
- **[Install Mobile APP](../docs/pixels/feature_pwa.png)** 📲 looks like native (@harlanlewis)
|
||||
- **[UI language](../docs/pixels/feature_language.png)** with auto-detect, and future app language! (@tbodyston)
|
||||
- **PDF Summarization** 🧩🤯 - ask questions to a PDF! (@fredliubojin)
|
||||
- **Code Execution: [Codepen](https://codepen.io/)** 💻 (@harlanlewis)
|
||||
- **[SVG Drawing](../docs/pixels/feature_svg_drawing.png)** - draw with AI 🎨
|
||||
- Chats: multiple chats, AI titles, Import/Export, Selection mode
|
||||
- Rendering: Markdown, SVG, improved Code blocks
|
||||
|
||||
@@ -0,0 +1,55 @@
|
||||
# Google Drive Integration
|
||||
|
||||
Attach files from Google Drive directly in the chat composer.
|
||||
|
||||
## Setup
|
||||
|
||||
### 1. Enable APIs
|
||||
|
||||
In [Google Cloud Console](https://console.cloud.google.com/):
|
||||
|
||||
1. Go to **APIs & Services > Library**
|
||||
2. Enable **Google Drive API** and **Google Picker API**
|
||||
|
||||
### 2. Configure OAuth
|
||||
|
||||
1. Go to **APIs & Services > OAuth consent screen**
|
||||
2. Create consent screen (External or Internal)
|
||||
3. Add scope: `https://www.googleapis.com/auth/drive.file`
|
||||
4. Add test users if in testing mode
|
||||
|
||||
### 3. Create Credentials
|
||||
|
||||
1. Go to **APIs & Services > Credentials**
|
||||
2. Create **OAuth client ID** (Web application)
|
||||
3. Add JavaScript origins:
|
||||
- `http://localhost:3000` (dev)
|
||||
- `https://your-domain.com` (prod)
|
||||
|
||||
### 4. Set Environment Variable
|
||||
|
||||
```bash
|
||||
NEXT_PUBLIC_GOOGLE_DRIVE_CLIENT_ID=your-client-id.apps.googleusercontent.com
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
- Click **Drive** button in attachment menu
|
||||
|
||||
## Supported Files
|
||||
|
||||
| Type | Export Format |
|
||||
|-----------------|---------------------|
|
||||
| Regular files | Downloaded directly |
|
||||
| Google Docs | Markdown (.md) |
|
||||
| Google Sheets | CSV (.csv) |
|
||||
| Google Slides | PDF (.pdf) |
|
||||
| Google Drawings | SVG (.svg) |
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
**Picker won't open**: Check `NEXT_PUBLIC_GOOGLE_DRIVE_CLIENT_ID` is set and APIs are enabled.
|
||||
|
||||
**OAuth errors**: Verify your domain is in authorized JavaScript origins. Add yourself as test user if app is in testing mode.
|
||||
|
||||
**Download fails**: Check file permissions and that Drive API is enabled.
|
||||
@@ -41,6 +41,8 @@ In addition to using the UI, configuration can also be done using
|
||||
|
||||
### Integration: Models Gallery
|
||||
|
||||
> Note: The Gallery Admin feature described below may have been removed or renamed in recent versions of big-AGI.
|
||||
|
||||
If the running LocalAI instance is configured with a [Model Gallery](https://localai.io/models/):
|
||||
|
||||
- Go to Models > LocalAI
|
||||
@@ -54,7 +56,7 @@ If the running LocalAI instance is configured with a [Model Gallery](https://loc
|
||||
|
||||
At the time of writing, LocalAI does not publish the model `context window size`.
|
||||
Every model is assumed to be capable of chatting, and with a context window of 4096 tokens.
|
||||
Please update the [src/modules/llms/transports/server/openai/models/models.data.ts](../src/modules/llms/server/openai/models/models.data.ts)
|
||||
Please update the [src/modules/llms/server/models.mappings.ts](../src/modules/llms/server/models.mappings.ts)
|
||||
file with the mapping information between LocalAI model IDs and names/descriptions/tokens, etc.
|
||||
|
||||
# 🤝 Support
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
# OpenRouter Configuration
|
||||
|
||||
[OpenRouter](https://openrouter.ai) is a standalone, premium service
|
||||
that provides access to <Link href='https://openrouter.ai/docs#models' target='_blank'>exclusive AI models</Link>
|
||||
such as GPT-4 32k, Claude, and more. These models are typically not available to the public.
|
||||
that provides access to a wide range of AI models from multiple providers through a single API.
|
||||
This document details the process of integrating OpenRouter with big-AGI.
|
||||
|
||||
### 1. OpenRouter Account Setup and API Key Generation
|
||||
@@ -20,7 +19,7 @@ This document details the process of integrating OpenRouter with big-AGI.
|
||||

|
||||
3. Input the API key into the **OpenRouter API Key** field, and load the Models.
|
||||

|
||||
4. OpenAI GPT4-32k and other models will now be accessible and selectable in the application.
|
||||
4. Models from all supported providers will now be accessible and selectable in the application.
|
||||
|
||||
In addition to using the UI, configuration can also be done using
|
||||
[environment variables](environment-variables.md).
|
||||
@@ -30,5 +29,5 @@ In addition to using the UI, configuration can also be done using
|
||||
OpenRouter independently manages its service and pricing and is not affiliated with big-AGI.
|
||||
For more detailed information, please visit [this page](https://openrouter.ai/docs#models).
|
||||
|
||||
Please note that running large models such as GPT-4 32k can be costly and may rapidly consume
|
||||
credits - a single prompt may cost $1 or more, at the time of writing.
|
||||
Please note that running large models can be costly and may rapidly consume credits.
|
||||
Check model pricing on the OpenRouter website before use.
|
||||
@@ -49,8 +49,8 @@ Edit the `src/data.ts` file to customize personas. This file houses the default
|
||||
Adapt the UI to match your project's aesthetic, incorporate new features, or exclude unnecessary ones.
|
||||
|
||||
- [ ] Adjust `src/common/app.theme.ts` for theme changes: colors, spacing, button appearance, animations, etc
|
||||
- [ ] Modify `src/common/app.config.tsx` to alter the application's name
|
||||
- [ ] Update `src/common/app.nav.tsx` to revise the navigation bar
|
||||
- [ ] Modify `src/common/app.release.ts` to alter the application's name
|
||||
- [ ] Update `src/common/app.nav.ts` to revise the navigation bar
|
||||
|
||||
### Add a Message of the Day
|
||||
|
||||
@@ -71,7 +71,7 @@ Example: `NEXT_PUBLIC_MOTD=🚀 New features available in {{app_build_pkgver}}!
|
||||
|
||||
Test your application thoroughly using local development (refer to README.md for local build instructions). Deploy using your preferred hosting service. big-AGI supports deployment on platforms like Vercel, Docker, or any Node.js-compatible service, especially those supporting NextJS's "Edge Runtime."
|
||||
|
||||
- [deploy-cloudflare.md](deploy-cloudflare.md): for Cloudflare Workers deployment
|
||||
- [deploy-cloudflare.md](deploy-cloudflare.md): for Cloudflare Pages deployment (limited support)
|
||||
- [deploy-docker.md](deploy-docker.md): for Docker deployment instructions and examples
|
||||
- [deploy-k8s.md](deploy-k8s.md): for Kubernetes deployment instructions and examples
|
||||
|
||||
|
||||
@@ -51,13 +51,13 @@ Vercel Analytics and Speed Insights are local API endpoints deployed to your dom
|
||||
domain. Furthermore, the Vercel Analytics service is privacy-friendly, and does not track individual users.
|
||||
|
||||
This service is avaialble to system administrators when deploying to Vercel. It is automatically enabled when deploying to Vercel.
|
||||
The code that activates Vercel Analytics is located in the `src/pages/_app.tsx` file:
|
||||
The code that activates Vercel Analytics is located in the `pages/_app.tsx` file:
|
||||
|
||||
```tsx
|
||||
const MyApp = ({ Component, emotionCache, pageProps }: MyAppProps) => <>
|
||||
...
|
||||
{isVercelFromFrontend && <VercelAnalytics debug={false} />}
|
||||
{isVercelFromFrontend && <VercelSpeedInsights debug={false} sampleRate={1 / 2} />}
|
||||
{Is.Deployment.VercelFromFrontend && <VercelAnalytics debug={false} />}
|
||||
{Is.Deployment.VercelFromFrontend && <VercelSpeedInsights debug={false} sampleRate={1 / 2} />}
|
||||
...
|
||||
</>;
|
||||
```
|
||||
|
||||
@@ -1,18 +1,20 @@
|
||||
---
|
||||
unlisted: true
|
||||
---
|
||||
|
||||
# Deploying a Next.js App on Cloudflare Pages
|
||||
|
||||
> WARNING: Cloudflare Pages does not support traditional NodeJS runtimes, but only Edge Runtime functions.
|
||||
> WARNING: Cloudflare Pages only supports Edge Runtime functions, not the full Node.js runtime.
|
||||
>
|
||||
> In this project we use Prisma connected to serverless Postgres, which at the moment cannot run on
|
||||
> edge functions, so we cannot deploy this project on Cloudflare Pages.
|
||||
> The cloud router in this project requires a Node.js runtime for Supabase SDK, authentication,
|
||||
> sync, and other server-side features that cannot run on Cloudflare's edge runtime.
|
||||
>
|
||||
> Workaround: Step 3.4. has been added below, to DELETE the NodeJS traditional runtime - which means that some
|
||||
> Workaround: Step 3.4. has been added below, to DELETE the Node.js cloud router - which means that some
|
||||
> parts of this application will not work.
|
||||
> - [Side effects](https://github.com/enricoros/big-agi/blob/main/src/apps/chat/trade/server/trade.router.ts#L19):
|
||||
> Sharing functionality to DB, and import from ChatGPT share, and post to Paste.GG will not work
|
||||
> - [Side effects](https://github.com/enricoros/big-agi/blob/main/src/modules/trade/server/trade.router.ts):
|
||||
> Sharing functionality, import from ChatGPT share, and post to Paste.GG will not work
|
||||
> - Cloud features (sync, auth, payments) will not be available
|
||||
> - See [Issue 174](https://github.com/enricoros/big-agi/issues/174).
|
||||
>
|
||||
> Longer term: follow [prisma/prisma: Support Edge Function deployments](https://github.com/prisma/prisma/issues/21394)
|
||||
> and convert the Node runtime to Edge runtime once Prisma supports it.
|
||||
|
||||
This guide provides steps to deploy your Next.js app on Cloudflare Pages.
|
||||
It is based on the [official Cloudflare developer documentation](https://developers.cloudflare.com/pages/framework-guides/deploy-a-nextjs-site/),
|
||||
|
||||
@@ -19,7 +19,6 @@ services:
|
||||
- .env
|
||||
environment:
|
||||
- PUPPETEER_WSS_ENDPOINT=ws://browserless:3000
|
||||
command: [ "next", "start", "-p", "3000" ]
|
||||
depends_on:
|
||||
- browserless
|
||||
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
# Why big-AGI?
|
||||
Placeholder for a document that demonstrates the productivity and unique features of Big-AGI.
|
||||
|
||||
## Exclusive features
|
||||
- [x] Call AGI
|
||||
- [x] Continuous Voice mode
|
||||
- [x] Diagram generation
|
||||
- [ ] ...
|
||||
|
||||
## Productivity Features
|
||||
- [x] Multi-window to never wait
|
||||
- [x] Multi-Chat to explore different solutions
|
||||
- [x] Rendering of graphs, charts, mindmaps
|
||||
- [ ] ...
|
||||
@@ -3,7 +3,7 @@
|
||||
This document provides an explanation of the environment variables used in the big-AGI application.
|
||||
|
||||
**All variables are optional**; and _UI options_ take precedence over _backend environment variables_,
|
||||
which take place over _defaults_. This file is kept in sync with [`../src/server/env.ts`](../src/server/env.ts).
|
||||
which take place over _defaults_. This file is kept in sync with [`../src/server/env.server.ts`](../src/server/env.server.ts).
|
||||
|
||||
### Setting Environment Variables
|
||||
|
||||
@@ -29,12 +29,18 @@ AZURE_OPENAI_API_ENDPOINT=
|
||||
AZURE_OPENAI_API_KEY=
|
||||
ANTHROPIC_API_KEY=
|
||||
ANTHROPIC_API_HOST=
|
||||
BEDROCK_BEARER_TOKEN=
|
||||
BEDROCK_ACCESS_KEY_ID=
|
||||
BEDROCK_SECRET_ACCESS_KEY=
|
||||
BEDROCK_SESSION_TOKEN=
|
||||
BEDROCK_REGION=
|
||||
DEEPSEEK_API_KEY=
|
||||
GEMINI_API_KEY=
|
||||
GROQ_API_KEY=
|
||||
LOCALAI_API_HOST=
|
||||
LOCALAI_API_KEY=
|
||||
MISTRAL_API_KEY=
|
||||
MOONSHOT_API_KEY=
|
||||
OLLAMA_API_HOST=
|
||||
OPENPIPE_API_KEY=
|
||||
OPENROUTER_API_KEY=
|
||||
@@ -65,8 +71,9 @@ HTTP_BASIC_AUTH_PASSWORD=
|
||||
# Frontend variables
|
||||
NEXT_PUBLIC_MOTD=
|
||||
NEXT_PUBLIC_GA4_MEASUREMENT_ID=
|
||||
NEXT_PUBLIC_POSTHOG_KEY=
|
||||
NEXT_PUBLIC_GOOGLE_DRIVE_CLIENT_ID=
|
||||
NEXT_PUBLIC_PLANTUML_SERVER_URL=
|
||||
NEXT_PUBLIC_POSTHOG_KEY=
|
||||
```
|
||||
|
||||
## Backend Variables
|
||||
@@ -98,13 +105,19 @@ requiring the user to enter an API key
|
||||
| `AZURE_OPENAI_API_VERSION` | API version for traditional deployment-based endpoints | Optional, defaults to '2025-04-01-preview' |
|
||||
| `AZURE_DEPLOYMENTS_API_VERSION` | API version for the deployments listing endpoint | Optional, defaults to '2023-03-15-preview' |
|
||||
| `ANTHROPIC_API_KEY` | The API key for Anthropic | Optional |
|
||||
| `ANTHROPIC_API_HOST` | Changes the backend host for the Anthropic vendor, to enable platforms such as AWS Bedrock | Optional |
|
||||
| `ANTHROPIC_API_HOST` | Changes the backend host for the Anthropic vendor, for proxies or custom endpoints | Optional |
|
||||
| `BEDROCK_BEARER_TOKEN` | Bedrock long-term API key (`ABSK...`). Takes priority over IAM credentials. Short-term keys only work for runtime, not model listing | Optional |
|
||||
| `BEDROCK_ACCESS_KEY_ID` | AWS IAM Access Key ID for Bedrock (Claude models via AWS) | Optional, but if set `BEDROCK_SECRET_ACCESS_KEY` must also be set |
|
||||
| `BEDROCK_SECRET_ACCESS_KEY` | AWS IAM Secret Access Key for Bedrock | Optional, but if set `BEDROCK_ACCESS_KEY_ID` must also be set |
|
||||
| `BEDROCK_SESSION_TOKEN` | AWS Session Token for temporary/STS credentials | Optional |
|
||||
| `BEDROCK_REGION` | AWS region for Bedrock (e.g., `us-east-1`, `us-west-2`, `eu-west-1`) | Optional, defaults to `us-east-1` |
|
||||
| `DEEPSEEK_API_KEY` | The API key for Deepseek AI | Optional |
|
||||
| `GEMINI_API_KEY` | The API key for Google AI's Gemini | Optional |
|
||||
| `GROQ_API_KEY` | The API key for Groq Cloud | Optional |
|
||||
| `LOCALAI_API_HOST` | Sets the URL of the LocalAI server, or defaults to http://127.0.0.1:8080 | Optional |
|
||||
| `LOCALAI_API_KEY` | The (Optional) API key for LocalAI | Optional |
|
||||
| `MISTRAL_API_KEY` | The API key for Mistral | Optional |
|
||||
| `MOONSHOT_API_KEY` | The API key for Moonshot AI | Optional |
|
||||
| `OLLAMA_API_HOST` | Changes the backend host for the Ollama vendor. See [config-local-ollama.md](config-local-ollama.md) | |
|
||||
| `OPENPIPE_API_KEY` | The API key for OpenPipe | Optional |
|
||||
| `OPENROUTER_API_KEY` | The API key for OpenRouter | Optional |
|
||||
@@ -130,10 +143,11 @@ Enable the app to Talk, Draw, and Google things up.
|
||||
|
||||
| Variable | Description |
|
||||
|:---------------------------|:------------------------------------------------------------------------------------------------------------------------|
|
||||
| **Text-To-Speech** | [ElevenLabs](https://elevenlabs.io/) is a high quality speech synthesis service |
|
||||
| **Text-To-Speech** | ElevenLabs, Inworld, OpenAI TTS, LocalAI, and browser Web Speech API are supported |
|
||||
| `ELEVENLABS_API_KEY` | ElevenLabs API Key - used for calls, etc. |
|
||||
| `ELEVENLABS_API_HOST` | Custom host for ElevenLabs |
|
||||
| `ELEVENLABS_VOICE_ID` | Default voice ID for ElevenLabs |
|
||||
| | *Note: OpenAI TTS and LocalAI TTS reuse credentials from your configured LLM services (no separate env vars needed)* |
|
||||
| **Google Custom Search** | [Google Programmable Search Engine](https://programmablesearchengine.google.com/about/) produces links to pages |
|
||||
| `GOOGLE_CLOUD_API_KEY` | Google Cloud API Key, used with the '/react' command - [Link to GCP](https://console.cloud.google.com/apis/credentials) |
|
||||
| `GOOGLE_CSE_ID` | Google Custom/Programmable Search Engine ID - [Link to PSE](https://programmablesearchengine.google.com/) |
|
||||
@@ -152,8 +166,9 @@ The value of these variables are passed to the frontend (Web UI) - make sure the
|
||||
| `NEXT_PUBLIC_DEBUG_BREAKS` | (optional, development) When set to 'true', enables automatic debugger breaks on DEV/error/critical logs in development builds |
|
||||
| `NEXT_PUBLIC_MOTD` | Message of the Day - displays a dismissible banner at the top of the app (see [customizations](customizations.md) for the template variables). Example: 🔔 Welcome to our deployment! Version {{app_build_pkgver}} built on {{app_build_time}}. |
|
||||
| `NEXT_PUBLIC_GA4_MEASUREMENT_ID` | (optional) The measurement ID for Google Analytics 4. (see [deploy-analytics](deploy-analytics.md)) |
|
||||
| `NEXT_PUBLIC_POSTHOG_KEY` | (optional) Key for PostHog analytics. (see [deploy-analytics](deploy-analytics.md)) |
|
||||
| `NEXT_PUBLIC_GOOGLE_DRIVE_CLIENT_ID` | (optional) Google OAuth Client ID for Drive Picker. Can reuse `AUTH_GOOGLE_ID`. See [Google Drive](config-feature-google-drive.md) |
|
||||
| `NEXT_PUBLIC_PLANTUML_SERVER_URL` | The URL of the PlantUML server, used for rendering UML diagrams. Allows using custom local servers. |
|
||||
| `NEXT_PUBLIC_POSTHOG_KEY` | (optional) Key for PostHog analytics. (see [deploy-analytics](deploy-analytics.md)) |
|
||||
|
||||
> Important: these variables must be set at build time, which is required by Next.js to pass them to the frontend.
|
||||
> This is in contrast to the backend variables, which can be set when starting the local server/container.
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
---
|
||||
unlisted: true
|
||||
---
|
||||
|
||||
# Big-AGI Advanced Tips & Tricks
|
||||
|
||||
> 🚨 This file is not meant for publication, and it's just been created as a handbook with tips
|
||||
|
||||
@@ -30,6 +30,12 @@ You can see your data in your browser's local storage and IndexedDB - try it you
|
||||
|
||||

|
||||
|
||||
### Sync for Authenticated Users
|
||||
|
||||
Users with accounts on big-agi.com who opt into Sync (a Pro feature) have their entity data - such as conversations and personas - replicated to the server for multi-device access.
|
||||
Server-side data is isolated per-user using Row Level Security (RLS), ensuring that no other user can access your synced data.
|
||||
Sync is entirely optional; without it, all data remains local to your browser.
|
||||
|
||||
### What This Means For You
|
||||
|
||||
Storing data in your browser means:
|
||||
@@ -43,7 +49,7 @@ Storing data in your browser means:
|
||||
|
||||
Big-AGI generates a _device identifier_ that combines timestamp and random components, stored only on your device. This identifier:
|
||||
|
||||
- Is used only for the **optional sync functionality** between your devices (not yet ready)
|
||||
- Is used only for the **optional sync functionality** between your devices
|
||||
- Helps maintain data consistency when using Big-AGI across multiple devices
|
||||
- Remains completely local unless you explicitly enable sync
|
||||
- Is not used for tracking, analytics, or telemetry
|
||||
@@ -74,6 +80,27 @@ and then are send to the upstream AI services.
|
||||
|
||||

|
||||
|
||||
### Direct Connection (Browser → AI Service)
|
||||
|
||||
Most AI services offer a **Direct Connection** toggle (under a service's Advanced settings). When enabled, the browser calls the AI provider's API directly, skipping the Big-AGI server entirely.
|
||||
|
||||
Benefits:
|
||||
|
||||
- **No 4.5 MB upload limit** - the Vercel body-size cap does not apply, so larger attachments and long prompts go through.
|
||||
- **No 300-second timeout** - the Vercel function timeout does not apply, so long-running generations keep streaming.
|
||||
- **More privacy** - connection metadata (IP, timestamp, edge region, Vercel telemetry) is not observable by the Big-AGI edge server.
|
||||
|
||||
Tradeoff:
|
||||
|
||||
- **Slightly more downlink bandwidth**: when traffic passes through the Big-AGI edge, repetitive streaming frames are compacted; direct streams arrive verbatim from the provider.
|
||||
|
||||
Availability requires both:
|
||||
|
||||
1. The API key is set in your browser (client-side), not via server environment variables. Server-key deployments cannot use Direct Connection because the browser has no credential to send.
|
||||
2. The AI service allows CORS (browser-origin requests). Most major providers do; Big-AGI sets any extra headers they require.
|
||||
|
||||
Direct Connection is a net win on speed, limits, and privacy whenever the provider permits it.
|
||||
|
||||
## Security Best Practices
|
||||
|
||||
**Basic Security**:
|
||||
|
||||
@@ -2,6 +2,26 @@
|
||||
|
||||
Quick answers to common questions about Big-AGI. For detailed documentation, see our [Website Docs](https://big-agi.com/docs).
|
||||
|
||||
### Connectivity
|
||||
|
||||
<details open>
|
||||
<summary><b>What is "Direct Connection" and should I enable it?</b></summary>
|
||||
|
||||
Direct Connection lets the browser call the AI provider's API directly, skipping the Big-AGI edge server. It appears as a toggle in each AI service's Advanced settings when your API key is set client-side.
|
||||
|
||||
**When available, it is a net win**: faster, fewer restrictions, more privacy.
|
||||
|
||||
- **No 4.5 MB upload limit** (Vercel body-size cap does not apply).
|
||||
- **No 300-second timeout** (Vercel function timeout does not apply; call length is bound only by the AI service).
|
||||
- **More privacy** - connection metadata (IP, timestamp, edge region, Vercel telemetry) is not observable by the Big-AGI edge server.
|
||||
- **Slightly more downlink bandwidth** - when passing through the edge, Big-AGI sheds repetitive streaming frames; direct streams arrive verbatim.
|
||||
|
||||
**When it is unavailable**:
|
||||
|
||||
1. **Server-side keys** - if the deployment stores API keys in server environment variables, the browser has no credential to send directly.
|
||||
2. **Provider does not allow CORS** - browsers cannot call APIs that block cross-origin requests. Most major providers permit it; Big-AGI sets any required headers.
|
||||
</details>
|
||||
|
||||
### Versions
|
||||
|
||||
<details open>
|
||||
|
||||
+4
-10
@@ -7,7 +7,7 @@ process for your own instance of big-AGI and related products.
|
||||
|
||||
**Try big-AGI** - You don't need to install anything if you want to play with big-AGI
|
||||
and have your API keys to various model services. You can access our free instance on [big-AGI.com](https://big-agi.com).
|
||||
The free instance runs the latest `main-stable` branch from this repository.
|
||||
The free instance runs the latest `main` branch from this repository.
|
||||
|
||||
## 🧩 Build-your-own
|
||||
|
||||
@@ -72,9 +72,8 @@ Create your GitHub fork, create a Vercel project over that fork, and deploy it.
|
||||
|
||||
### Deploy on Cloudflare
|
||||
|
||||
Deploy on Cloudflare's global network by installing big-AGI on
|
||||
Cloudflare Pages. Check out the [Cloudflare Installation Guide](deploy-cloudflare.md)
|
||||
for step-by-step instructions.
|
||||
> Note: Cloudflare Pages deployment has limitations due to Edge Runtime constraints.
|
||||
> See the [Cloudflare guide](deploy-cloudflare.md) for details and known issues.
|
||||
|
||||
### Docker Deployments
|
||||
|
||||
@@ -136,11 +135,6 @@ Deploy big-AGI on a Kubernetes cluster for enhanced scalability and management.
|
||||
|
||||
For more detailed instructions on Kubernetes deployment, including updating and troubleshooting, refer to our [Kubernetes Deployment Guide](deploy-k8s.md).
|
||||
|
||||
### Midori AI Subsystem for Docker Deployment
|
||||
|
||||
Follow the instructions found on [Midori AI Subsystem Site](https://io.midori-ai.xyz/subsystem/manager/)
|
||||
for your host OS. After completing the setup process, install the Big-AGI docker backend to the Midori AI Subsystem.
|
||||
|
||||
## Enterprise-Grade Installation
|
||||
|
||||
For businesses seeking a fully-managed, scalable solution, consider our managed installations.
|
||||
@@ -151,6 +145,6 @@ Enjoy all the features of big-AGI without the hassle of infrastructure managemen
|
||||
Join our vibrant community of developers, researchers, and AI enthusiasts. Share your projects, get help, and collaborate with others.
|
||||
|
||||
- [Discord Community](https://discord.gg/MkH4qj2Jp9)
|
||||
- [Twitter](https://twitter.com/enricoros)
|
||||
- [X (Twitter)](https://x.com/enricoros)
|
||||
|
||||
For any questions or inquiries, please don't hesitate to [reach out to our team](mailto:hello@big-agi.com).
|
||||
|
||||
@@ -28,6 +28,7 @@ stringData:
|
||||
LOCALAI_API_HOST: ""
|
||||
LOCALAI_API_KEY: ""
|
||||
MISTRAL_API_KEY: ""
|
||||
MOONSHOT_API_KEY: ""
|
||||
OLLAMA_API_HOST: ""
|
||||
OPENPIPE_API_KEY: ""
|
||||
OPENROUTER_API_KEY: ""
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
---
|
||||
unlisted: true
|
||||
---
|
||||
|
||||
# ReAct: question answering with Reasoning and Actions
|
||||
|
||||
## What is ReAct?
|
||||
|
||||
@@ -0,0 +1,23 @@
|
||||
import { defineConfig } from "eslint/config";
|
||||
import path from "node:path";
|
||||
import { fileURLToPath } from "node:url";
|
||||
import js from "@eslint/js";
|
||||
import { FlatCompat } from "@eslint/eslintrc";
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = path.dirname(__filename);
|
||||
const compat = new FlatCompat({
|
||||
baseDirectory: __dirname,
|
||||
recommendedConfig: js.configs.recommended,
|
||||
allConfig: js.configs.all
|
||||
});
|
||||
|
||||
export default defineConfig([{
|
||||
extends: compat.extends("next/core-web-vitals"),
|
||||
rules: {
|
||||
//
|
||||
"react-hooks/exhaustive-deps": ["warn", {
|
||||
additionalHooks: "(useMemoShallowStable)",
|
||||
}],
|
||||
},
|
||||
}]);
|
||||
@@ -1,35 +1,39 @@
|
||||
# Knowledge Base
|
||||
## Knowledge Base
|
||||
|
||||
Internal documentation for Big-AGI architecture and systems, for use by AI agents and developers.
|
||||
Architecture and system documentation is available in the `/kb/` knowledge base, for use by AI agents and developers.
|
||||
|
||||
**Structure:**
|
||||
- `/kb/KB.md` - Already in context: this text
|
||||
- `/kb/vision-inlined.md` - Already in context (next section): long-term vision and north stars
|
||||
- `/kb/modules/` - Core business logic (e.g. AIX)
|
||||
- `/kb/systems/` - Infrastructure (routing, startup)
|
||||
|
||||
## Index
|
||||
|
||||
### Modules Documentation
|
||||
|
||||
#### AIX - AI Communication Framework
|
||||
- **[AIX.md](modules/AIX.md)** - AIX streaming architecture documentation
|
||||
- **[AIX-callers-analysis.md](modules/AIX-callers-analysis.md)** - Analysis of AIX entry points, call chains, common and different rendering, error handling, etc.
|
||||
|
||||
#### CSF - Client-Side Fetch
|
||||
- **[CSF.md](systems/client-side-fetch.md)** - Direct browser-to-API communication for LLM requests
|
||||
|
||||
### Systems Documentation
|
||||
|
||||
#### Core Platform Systems
|
||||
- **[app-routing.md](systems/app-routing.md)** - Next.js routing, provider stack, and display state hierarchy
|
||||
- **[LLM-parameters-system.md](systems/LLM-parameters-system.md)** - Language model parameter flow across the system
|
||||
- **[LLM-vendor-integration.md](modules/LLM-vendor-integration.md)** - Adding new LLM providers
|
||||
|
||||
## Guidelines
|
||||
### KB Guidelines
|
||||
|
||||
### Writing Style
|
||||
#### Writing Style
|
||||
|
||||
- **Direct and factual** - No marketing language
|
||||
- **Present tense** - "AIX handles streaming" not "AIX will handle"
|
||||
- **Active voice** - "The system processes" not "Processing is done by"
|
||||
- **Concrete examples** - Show actual code/config when helpful, briefly
|
||||
|
||||
### Maintenance
|
||||
#### Maintenance
|
||||
|
||||
- Remove outdated information when detected!
|
||||
- Remove outdated knowledge base information when detected
|
||||
- Keep cross-references current when files move
|
||||
|
||||
@@ -7,8 +7,8 @@ This document analyzes all AIX function callers and their patterns for message r
|
||||
### Three-Tier Call Hierarchy
|
||||
|
||||
**Core AIX Functions** (Direct tRPC API callers):
|
||||
- `aixChatGenerateContent_DMessage_FromConversation` - 8 callers (conversation streaming)
|
||||
- `aixChatGenerateContent_DMessage` - 6 callers (direct request/response)
|
||||
- `aixChatGenerateContent_DMessage_FromConversation` - 9 callers (conversation streaming)
|
||||
- `aixChatGenerateContent_DMessage_orThrow` - 6 callers (direct request/response)
|
||||
- `aixChatGenerateText_Simple` - 12 callers (text-only utilities)
|
||||
|
||||
**Utility Layer** (Hooks & Functions):
|
||||
@@ -24,6 +24,7 @@ This document analyzes all AIX function callers and their patterns for message r
|
||||
| **Caller** | **Context** | **Message Removal** | **Placeholder** | **Error Handling** |
|
||||
|------------|-------------|-------------------|----------------|-------------------|
|
||||
| **Chat Persona** | `'conversation'` | `messageWasInterruptedAtStart()` → `removeMessage()` | None | Error fragments |
|
||||
| **XE Chat Generate** | `'conversation'` | `messageWasInterruptedAtStart()` → `removeMessage()` | `'...'` placeholder | Error fragments via messageEditor |
|
||||
| **Beam Scatter** | `'beam-scatter'` | `messageWasInterruptedAtStart()` → empty message | `SCATTER_PLACEHOLDER` | Ray status update |
|
||||
| **Beam Gather** | `'beam-gather'` | `messageWasInterruptedAtStart()` → clear fragments | `GATHER_PLACEHOLDER` | Re-throw errors |
|
||||
| **Beam Follow-up** | `'beam-followup'` | `messageWasInterruptedAtStart()` → remove message | `FOLLOWUP_PLACEHOLDER` | Status updates |
|
||||
|
||||
+5
-4
@@ -37,6 +37,7 @@ Built with tRPC, it manages the lifecycle of AI-generated content from request t
|
||||
| Perplexity | ✅ | ❌ (rejected) | | ✅ | Yes + 📦 | |
|
||||
| TogetherAI | ✅ | ✅ | | ✅ | Yes + 📦 | |
|
||||
| xAI | | | | | | |
|
||||
| Z.ai | ✅ | ✅ | Img: ✅ | ✅ | Yes + 📦 | Thinking mode |
|
||||
| Ollama (2) | ❌ (broken) | ? | | | | |
|
||||
|
||||
Notes:
|
||||
@@ -91,12 +92,12 @@ AIX is organized into the following files and folders:
|
||||
|
||||
- Dispatch (`/server/dispatch/`) - Server to AI Provider communication:
|
||||
- `/server/dispatch/chatGenerate/`: Content Generation with chat-style inputs:
|
||||
- `./adapters/`: Adapters for creating API requests for different AI protocols (Anthropic, Gemini, OpenAI).
|
||||
- `./parsers/`: Parsers for parsing streaming/non-streamin responses from different AI protocols (same 3).
|
||||
- `./adapters/`: Adapters for creating API requests for different AI protocols (Anthropic, Bedrock, Gemini, OpenAI Chat Completions, OpenAI Responses, xAI Responses).
|
||||
- `./parsers/`: Parsers for parsing streaming/non-streaming responses from different AI protocols (Anthropic, Bedrock Converse, Gemini, OpenAI, OpenAI Responses).
|
||||
- `chatGenerate.dispatch.ts`: Creates a pipeline to execute Chat Generation to a specific provider.
|
||||
- `ChatGenerateTransmitter.ts`: Used to serialize and transmit AixWire_Particles to the client.
|
||||
- `/server/dispatch/wiretypes/`: AI provider Wire Types:
|
||||
- Type definitions for different AI providers/protocols (Anthropic, Gemini, OpenAI).
|
||||
- Type definitions for different AI providers/protocols (Anthropic, Bedrock Converse, Gemini, OpenAI, xAI).
|
||||
- `stream.demuxers.ts`: Handles demuxing of different stream formats.
|
||||
|
||||
## 3. Architecture Diagram
|
||||
@@ -159,7 +160,7 @@ sequenceDiagram
|
||||
AIX Client ->> AIX Client: Display error message
|
||||
else DMessageDocPart
|
||||
AIX Client ->> AIX Client: Process and display document
|
||||
else DMetaPlaceholderPart
|
||||
else DVoidPlaceholderPart
|
||||
AIX Client ->> AIX Client: Handle placeholder (non-submitted)
|
||||
end
|
||||
end
|
||||
|
||||
@@ -0,0 +1,126 @@
|
||||
# LLM Vendor Integration Guide
|
||||
|
||||
How to add support for new LLM providers in Big-AGI. There are two integration paths, and
|
||||
the dynamic backend path is strongly preferred for new vendors.
|
||||
|
||||
## Integration Paths
|
||||
|
||||
### Path 1: Dynamic Backend (preferred)
|
||||
|
||||
For any provider with an **OpenAI-compatible API** (which is nearly all new providers).
|
||||
|
||||
**Surface area**: 1-2 files, no UI changes, no registry changes.
|
||||
|
||||
A dynamic backend provides:
|
||||
- Hostname-based auto-detection when the user adds the provider's API URL
|
||||
- Automatic model list parsing with vendor-specific metadata (pricing, context windows, capabilities)
|
||||
- Zero UI code - uses the existing "Custom OpenAI-compatible" service setup
|
||||
|
||||
**Files touched**:
|
||||
- `src/modules/llms/server/openai/models/{vendor}.models.ts` (required) - model definitions + hostname heuristic
|
||||
- `src/modules/llms/server/openai/wiretypes/{vendor}.wiretypes.ts` (optional) - Zod schemas for vendor-specific wire format
|
||||
- `src/modules/llms/server/listModels.dispatch.ts` - add heuristic to the detection chain (2 lines)
|
||||
|
||||
**What the model file must export**:
|
||||
```typescript
|
||||
// 1. Hostname heuristic - returns true when the user's API URL matches this vendor
|
||||
export function vendorHeuristic(hostname: string): boolean {
|
||||
return hostname.includes('.vendor-domain.com');
|
||||
}
|
||||
|
||||
// 2. Model converter - transforms vendor's /v1/models response to ModelDescriptionSchema[]
|
||||
export function vendorModelsToModelDescriptions(wireModels: unknown): ModelDescriptionSchema[] {
|
||||
// Parse wire format, map to ModelDescriptionSchema with:
|
||||
// - id, label, description
|
||||
// - contextWindow, maxCompletionTokens
|
||||
// - interfaces (Chat, Vision, Fn, Reasoning, etc.)
|
||||
// - chatPrice (input/output per token)
|
||||
// - parameterSpecs (temperature, etc.)
|
||||
}
|
||||
```
|
||||
|
||||
**Existing examples**: `novita.models.ts`, `chutesai.models.ts`, `fireworksai.models.ts`
|
||||
|
||||
MUST also provide the updated vendor icon like other icons in `src/common/components/icons/vendors/`.
|
||||
Make sure all the information is available if in the future we want to promote those to full registered vendors.
|
||||
|
||||
### Path 2: Registered Vendor (heavyweight, discouraged for new providers)
|
||||
|
||||
Full first-class integration with dedicated UI, own dialect, and registry entry. Reserved for
|
||||
providers with **non-OpenAI protocols** (Anthropic, Gemini, Ollama) or providers with enough
|
||||
user demand to warrant a dedicated setup flow.
|
||||
|
||||
**Surface area**: 5+ files across 3 directories.
|
||||
|
||||
**Files touched**:
|
||||
- `src/modules/llms/vendors/{vendor}/{vendor}.vendor.ts` - IModelVendor implementation
|
||||
- `src/modules/llms/vendors/{vendor}/{VendorName}ServiceSetup.tsx` - React UI setup component
|
||||
- `src/modules/llms/vendors/vendors.registry.ts` - registry entry + ModelVendorId union
|
||||
- `src/modules/llms/server/openai/models/{vendor}.models.ts` - model definitions
|
||||
- `src/modules/llms/server/listModels.dispatch.ts` - dispatch case
|
||||
- Possibly server protocol adapter if not OpenAI-compatible
|
||||
- Possibly more files, e.g. wires, etc.
|
||||
- See existing providers and commits that added them for full scope
|
||||
|
||||
**When to use this path**: Only when the provider has a meaningfully different API protocol
|
||||
(not OpenAI-compatible), or when there is significant user demand AND the provider offers
|
||||
unique capabilities that benefit from dedicated UI (e.g., Ollama's local model management).
|
||||
|
||||
When using this path, please add links to upstream documentation. Make sure all constants
|
||||
are correctly handled everywhere, especially for provider-based switches.
|
||||
|
||||
## Decision Criteria
|
||||
|
||||
| Question | Dynamic | Registered |
|
||||
|----------|---------|------------|
|
||||
| OpenAI-compatible API? | Yes - use dynamic | Only if not OAI-compatible |
|
||||
| Needs custom auth UI? | No - uses generic fields | Yes - custom setup form |
|
||||
| Unique protocol? | No | Yes (Anthropic, Gemini, Ollama) |
|
||||
| User demand level | Any | High + sustained |
|
||||
| Maintenance burden | Minimal | Significant (5+ files) |
|
||||
|
||||
## For External Contributors / Vendor Requests
|
||||
|
||||
When vendors or community members request integration via GitHub issues:
|
||||
|
||||
1. **Point them to the dynamic backend path** - it's faster to implement, review, and maintain
|
||||
2. **Requirements for a dynamic backend PR**:
|
||||
- Model file with heuristic + converter exporting `ModelDescriptionSchema[]`
|
||||
- Wire types if the vendor's `/v1/models` response has non-standard fields
|
||||
- Vendor icon (SVG preferred) in `src/common/components/icons/vendors/`
|
||||
- Two-line addition to the heuristic chain in `listModels.dispatch.ts`
|
||||
3. **Do not accept**: New registered vendors for OpenAI-compatible providers. The maintenance
|
||||
cost of a full vendor (UI component, registry entry, dispatch case) is not justified when
|
||||
dynamic detection achieves the same result with a fraction of the code.
|
||||
|
||||
## Architecture Notes
|
||||
|
||||
### How Dynamic Detection Works
|
||||
|
||||
In `listModels.dispatch.ts`, the `case 'openai':` handler:
|
||||
1. Fetches `/v1/models` from the user-provided API host
|
||||
2. Runs the hostname through a chain of heuristics (in order)
|
||||
3. First matching heuristic's converter is used to parse models
|
||||
4. Falls back to stock OpenAI parsing if no heuristic matches
|
||||
|
||||
### Hostname Security
|
||||
|
||||
Hostname matching uses `llmsHostnameMatches()` from `openai.access.ts` which parses the
|
||||
URL properly to prevent DNS spoofing. Always use `.includes()` on the parsed hostname,
|
||||
never on the raw URL string.
|
||||
|
||||
### Key Types
|
||||
|
||||
- `ModelDescriptionSchema` (`llm.server.types.ts`) - output type for all model converters
|
||||
- `DModelInterfaceV1` (`llms.types.ts`) - capability flags (Chat, Vision, Fn, Reasoning, etc.)
|
||||
- `IModelVendor` (`vendors/IModelVendor.ts`) - interface for registered vendors only
|
||||
- `ManualMappings` / `KnownModel` (`models.mappings.ts`) - server-side model patches
|
||||
|
||||
### File Locations
|
||||
|
||||
- Dynamic backends: `src/modules/llms/server/openai/models/`
|
||||
- Wire types: `src/modules/llms/server/openai/wiretypes/`
|
||||
- Dispatch: `src/modules/llms/server/listModels.dispatch.ts`
|
||||
- Registered vendors: `src/modules/llms/vendors/*/`
|
||||
- Vendor icons: `src/common/components/icons/vendors/`
|
||||
- Type definitions: `src/modules/llms/server/llm.server.types.ts`
|
||||
@@ -13,12 +13,9 @@ The LLM parameters system operates across five layers that transform parameters
|
||||
|
||||
The `DModelParameterRegistry` defines all available parameters with their constraints and metadata. Each parameter includes type information, validation rules, and default behavior.
|
||||
|
||||
**Example**: `llmVndOaiReasoningEffort4` defines a 4-value enum with 'medium' as the required fallback.
|
||||
|
||||
**Default Value System**: The registry supports multiple default mechanisms:
|
||||
- `initialValue` - Parameter's base default (e.g., `llmVndOaiRestoreMarkdown: true`)
|
||||
- `requiredFallback` - Fallback for required parameters (e.g., `llmTemperature: 0.5`)
|
||||
- `nullable` - Parameters that can be explicitly null to skip API transmission
|
||||
- `initialValue` - Parameter's base default (e.g., `llmVndOaiRestoreMarkdown: true`)
|
||||
|
||||
### Layer 2: Model Specifications
|
||||
**File**: `src/modules/llms/server/llm.server.types.ts`
|
||||
@@ -27,7 +24,6 @@ Models declare which parameters they support through `parameterSpecs` arrays. Ea
|
||||
|
||||
```typescript
|
||||
parameterSpecs: [
|
||||
{ paramId: 'llmVndOaiReasoningEffort4' },
|
||||
{ paramId: 'llmVndAntThinkingBudget', initialValue: 1024 }, // Override default
|
||||
{ paramId: 'llmVndGeminiThinkingBudget', rangeOverride: [0, 8192] }, // Custom range
|
||||
]
|
||||
@@ -51,20 +47,14 @@ Shows only parameters that are:
|
||||
- Not marked as `hidden`
|
||||
|
||||
**Value Resolution**: Both UIs use `getAllModelParameterValues()` to merge:
|
||||
1. **Fallback values** - Required parameters get their `requiredFallback` values
|
||||
1. **Fallback values** - Implicit parameters get their `LLMImplicitParametersRuntimeFallback` values
|
||||
2. **Initial values** - Model's `initialParameters` (populated during model creation)
|
||||
3. **User values** - User's `userParameters` (highest priority)
|
||||
|
||||
### Layer 4: AIX Translation
|
||||
**File**: `src/modules/aix/client/aix.client.ts`
|
||||
|
||||
The AIX client transforms DLLM parameters to wire protocol format. This layer handles parameter precedence rules and name transformations:
|
||||
|
||||
```typescript
|
||||
// Parameter precedence: newer 4-value version takes priority over 3-value
|
||||
...((llmVndOaiReasoningEffort4 || llmVndOaiReasoningEffort) ?
|
||||
{ vndOaiReasoningEffort: llmVndOaiReasoningEffort4 || llmVndOaiReasoningEffort } : {})
|
||||
```
|
||||
The AIX client transforms DLLM parameters to wire protocol format. This layer handles parameter precedence rules and name transformations.
|
||||
|
||||
**Client Options**: The system supports parameter overrides through `llmOptionsOverride` and complete replacement via `llmUserParametersReplacement`.
|
||||
|
||||
@@ -73,7 +63,7 @@ The AIX client transforms DLLM parameters to wire protocol format. This layer ha
|
||||
|
||||
Server-side adapters translate AIX parameters to vendor APIs. Each vendor may interpret parameters differently:
|
||||
|
||||
- **OpenAI**: `vndOaiReasoningEffort` → `reasoning_effort`
|
||||
- **OpenAI**: `vndEffort` -> `reasoning_effort`
|
||||
- **Perplexity**: Reuses OpenAI parameter format
|
||||
- **OpenAI Responses API**: Maps to structured reasoning config with additional logic
|
||||
|
||||
@@ -81,8 +71,8 @@ Server-side adapters translate AIX parameters to vendor APIs. Each vendor may in
|
||||
|
||||
When a model is loaded:
|
||||
|
||||
1. **Model Creation**: `modelDescriptionToDLLM()` creates the DLLM with empty `initialParameters`
|
||||
2. **Initial Value Application**: `applyModelParameterInitialValues()` populates initial values from:
|
||||
1. **Model Creation**: `_createDLLMFromModelDescription()` creates the DLLM with empty `initialParameters`
|
||||
2. **Initial Value Application**: `applyModelParameterSpecsInitialValues()` populates initial values from:
|
||||
- Model spec `initialValue` (highest priority)
|
||||
- Registry `initialValue` (fallback)
|
||||
3. **Runtime Resolution**: `getAllModelParameterValues()` creates final parameter set:
|
||||
@@ -105,7 +95,7 @@ When a model is loaded:
|
||||
The system maintains type safety through:
|
||||
- `DModelParameterId` union from registry keys
|
||||
- `DModelParameterValue<T>` conditional types for values
|
||||
- `DModelParameterSpec<T>` interfaces for specifications
|
||||
- `DModelParameterSpecAny` interfaces for specifications
|
||||
- Runtime validation via Zod schemas at API boundaries
|
||||
|
||||
## Model Variant Pattern
|
||||
@@ -117,7 +107,6 @@ Some vendors use model variants to enable features, for instance:
|
||||
## Migration and Compatibility
|
||||
|
||||
The architecture supports parameter evolution:
|
||||
- **Version Coexistence**: Both `llmVndOaiReasoningEffort` and `llmVndOaiReasoningEffort4` exist simultaneously
|
||||
- **Precedence Rules**: Newer parameters take priority during AIX translation
|
||||
- **Graceful Degradation**: Unknown parameters log warnings but don't break functionality
|
||||
|
||||
@@ -128,4 +117,4 @@ The architecture supports parameter evolution:
|
||||
- **UI Controls**: `src/modules/llms/models-modal/LLMParametersEditor.tsx`
|
||||
- **AIX Translation**: `src/modules/aix/client/aix.client.ts`
|
||||
- **Wire Types**: `src/modules/aix/server/api/aix.wiretypes.ts`
|
||||
- **Vendor Adapters**: `src/modules/aix/server/dispatch/chatGenerate/adapters/*.ts`
|
||||
- **Vendor Adapters**: `src/modules/aix/server/dispatch/chatGenerate/adapters/*.ts`
|
||||
|
||||
@@ -0,0 +1,29 @@
|
||||
# CSF - Client-Side Fetch
|
||||
|
||||
Client-Side Fetch (CSF), surfaced to users as **"Direct Connection"**, enables direct browser-to-API communication, bypassing the server for LLM requests. When enabled, the browser makes requests directly to vendor APIs (e.g., `api.openai.com`, `api.groq.com`) instead of routing through the Next.js server. This reduces latency, decreases server load, and is particularly useful for local models where the browser can communicate directly with Ollama or LM Studio.
|
||||
|
||||
## User-facing tradeoffs (Direct Connection vs via-server)
|
||||
|
||||
Wins when Direct Connection is on:
|
||||
- **No 4.5MB upload limit** (Vercel body-size cap does not apply to direct browser-to-API requests).
|
||||
- **No 300s function timeout** (Vercel serverless/edge timeout does not apply; call duration is bound only by the AI service).
|
||||
- **More privacy**: connection metadata (IP, timestamp, edge region, Vercel telemetry) is not observable by the Big-AGI edge server.
|
||||
|
||||
Costs:
|
||||
- **Slightly more downlink bandwidth**: when traffic passes through the Big-AGI server, repetitive streaming frames are shed/compacted; direct streams arrive verbatim.
|
||||
|
||||
Availability requires both:
|
||||
1. The API key is on the **client** (localStorage), not a server-side env var. Server-key deployments cannot use CSF because the browser has no credential to send.
|
||||
2. The AI service **allows CORS** from browsers. Most major providers do; some require specific headers which Big-AGI sets.
|
||||
|
||||
Net: Direct Connection is a win on speed, limits, and privacy whenever the provider permits it. It is unavailable when keys are server-side or the provider blocks browser-origin requests.
|
||||
|
||||
## Implementation
|
||||
|
||||
CSF is implemented as an opt-in setting stored as `csf: boolean` in each vendor's service settings. The vendor interface exposes `csfAvailable?: (setup) => boolean` to determine if CSF can be enabled (typically checking if an API key or host is configured). The actual execution happens in `aix.client.direct-chatGenerate.ts` which dynamically imports when CSF is active, making direct fetch calls using the same wire protocols as the server.
|
||||
|
||||
All 20+ supported vendors (OpenAI, Anthropic, Gemini, Ollama, LocalAI, Deepseek, Groq, Mistral, xAI, OpenRouter, Perplexity, Together AI, Alibaba, Moonshot, OpenPipe, LM Studio, Z.ai, Azure, Bedrock) support CSF. Cloud vendors require CORS support from the API provider (all tested vendors return `access-control-allow-origin: *`). Local vendors (Ollama, LocalAI, LM Studio) require CORS to be enabled on the local server.
|
||||
|
||||
## UI
|
||||
|
||||
The CSF toggle appears in each vendor's setup panel under "Advanced" settings, labeled "Direct Connection". It becomes visible when the prerequisites are met (API key present for cloud vendors, host configured for local vendors). The setting is managed through `useModelServiceClientSideFetch` hook which provides `csfAvailable`, `csfActive`, `csfToggle`, and `csfReset` for UI consumption.
|
||||
@@ -0,0 +1,3 @@
|
||||
## Strategic Vision
|
||||
|
||||
If provided, the following influences the long-term vision, product and architectural goals/north stars for Big-AGI.
|
||||
+29
-8
@@ -1,4 +1,5 @@
|
||||
import type { NextConfig } from 'next';
|
||||
import type { WebpackConfigContext } from 'next/dist/server/config-shared';
|
||||
import { execSync } from 'node:child_process';
|
||||
import { readFileSync } from 'node:fs';
|
||||
|
||||
@@ -17,7 +18,7 @@ process.env.NEXT_PUBLIC_BUILD_HASH = (buildHash || '').slice(0, 10);
|
||||
process.env.NEXT_PUBLIC_BUILD_PKGVER = JSON.parse('' + readFileSync(new URL('./package.json', import.meta.url))).version;
|
||||
process.env.NEXT_PUBLIC_BUILD_TIMESTAMP = new Date().toISOString();
|
||||
process.env.NEXT_PUBLIC_DEPLOYMENT_TYPE = process.env.NEXT_PUBLIC_DEPLOYMENT_TYPE || (process.env.VERCEL_ENV ? `vercel-${process.env.VERCEL_ENV}` : 'local'); // Docker or custom, Vercel
|
||||
console.log(` 🧠 \x1b[1mbig-AGI\x1b[0m v${process.env.NEXT_PUBLIC_BUILD_PKGVER} (@${process.env.NEXT_PUBLIC_BUILD_HASH})`);
|
||||
console.log(` 🧠 \x1b[1mbig-AGI\x1b[0m v${process.env.NEXT_PUBLIC_BUILD_PKGVER} (@${process.env.NEXT_PUBLIC_BUILD_HASH}${process.env.VERCEL_ENV ? `, \x1b[2mV:\x1b[0m${process.env.VERCEL_ENV}` : ''}, \x1b[2mN:\x1b[0m${process.env.NODE_ENV})`);
|
||||
|
||||
// Non-default build types
|
||||
const buildType =
|
||||
@@ -29,7 +30,7 @@ buildType && console.log(` 🧠 big-AGI: building for ${buildType}...\n`);
|
||||
|
||||
/** @type {import('next').NextConfig} */
|
||||
let nextConfig: NextConfig = {
|
||||
reactStrictMode: true,
|
||||
reactStrictMode: !process.env.NO_STRICT_MODE, // default: enabled
|
||||
|
||||
// [exports] https://nextjs.org/docs/advanced-features/static-html-export
|
||||
...(buildType && {
|
||||
@@ -47,7 +48,7 @@ let nextConfig: NextConfig = {
|
||||
// NOTE: we may not be needing this anymore, as we use '@cloudflare/puppeteer'
|
||||
serverExternalPackages: ['puppeteer-core'],
|
||||
|
||||
webpack: (config: any, { isServer }: { isServer: boolean }) => {
|
||||
webpack: (config: any, { isServer, webpack /*, dev, nextRuntime*/ }: WebpackConfigContext) => {
|
||||
// @mui/joy: anything material gets redirected to Joy
|
||||
config.resolve.alias['@mui/material'] = '@mui/joy';
|
||||
|
||||
@@ -57,8 +58,28 @@ let nextConfig: NextConfig = {
|
||||
layers: true,
|
||||
};
|
||||
|
||||
// fix warnings for async functions in the browser (https://github.com/vercel/next.js/issues/64792)
|
||||
// client-side bundling
|
||||
if (!isServer) {
|
||||
/**
|
||||
* AIX client-side
|
||||
* We replace certain server-only modules with client-side mocks, to reuse the exact same imports
|
||||
* while avoiding importing server-only code which would break the build or break at runtime.
|
||||
*/
|
||||
const serverToClientMocks: ReadonlyArray<[RegExp, string]> = [
|
||||
[/\/posthog\.server/, '/posthog.client-mock'],
|
||||
[/\/env\.server/, '/env.client-mock'],
|
||||
];
|
||||
config.plugins = [
|
||||
...config.plugins,
|
||||
...serverToClientMocks.map(([pattern, replacement]) =>
|
||||
new webpack.NormalModuleReplacementPlugin(pattern, (resource: any) => {
|
||||
// console.log(' 🧠 [WEBPACK REPLACEMENT]:', resource.request, '->', resource.request.replace(pattern, replacement));
|
||||
resource.request = resource.request.replace(pattern, replacement);
|
||||
}),
|
||||
),
|
||||
];
|
||||
|
||||
// cosmetic: fix warnings for (absent!) top-level awaits in the browser (https://github.com/vercel/next.js/issues/64792)
|
||||
config.output.environment = { ...config.output.environment, asyncFunction: true };
|
||||
}
|
||||
|
||||
@@ -108,9 +129,9 @@ let nextConfig: NextConfig = {
|
||||
// },
|
||||
};
|
||||
|
||||
// Validate environment variables, if set at build time. Will be actually read and used at runtime.
|
||||
import { verifyBuildTimeVars } from '~/server/env';
|
||||
verifyBuildTimeVars();
|
||||
// Validate environment variables at build time, if required. Server env vars will be actually read and used at runtime (cloud/edge).
|
||||
import { env as validateEnv } from '~/server/env.server';
|
||||
void validateEnv; // Triggers env validation - throws if required vars are missing
|
||||
|
||||
// PostHog error reporting with source maps for production builds
|
||||
import { withPostHogConfig } from '@posthog/nextjs-config';
|
||||
@@ -120,7 +141,7 @@ if (process.env.POSTHOG_API_KEY && process.env.POSTHOG_ENV_ID) {
|
||||
personalApiKey: process.env.POSTHOG_API_KEY,
|
||||
envId: process.env.POSTHOG_ENV_ID,
|
||||
host: 'https://us.i.posthog.com', // backtrace upload host
|
||||
verbose: false,
|
||||
logLevel: 'error', // lowered, too noisy
|
||||
sourcemaps: {
|
||||
enabled: process.env.NODE_ENV === 'production',
|
||||
project: 'big-agi',
|
||||
|
||||
Generated
+2789
-1371
File diff suppressed because it is too large
Load Diff
+41
-33
@@ -1,8 +1,9 @@
|
||||
{
|
||||
"name": "big-agi",
|
||||
"version": "2.0.0",
|
||||
"version": "2.0.4",
|
||||
"private": true,
|
||||
"author": "Enrico Ros <enrico.ros@gmail.com>",
|
||||
"author": "Enrico Ros <enrico@big-agi.com> (https://www.enricoros.com)",
|
||||
"homepage": "https://big-agi.com",
|
||||
"repository": "https://github.com/enricoros/big-agi",
|
||||
"scripts": {
|
||||
"dev": "next dev --turbopack",
|
||||
@@ -11,10 +12,13 @@
|
||||
"build": "next build",
|
||||
"start": "next start",
|
||||
"lint": "next lint",
|
||||
"tsclint": "tsc --noEmit --pretty",
|
||||
"postinstall": "prisma generate --no-hints",
|
||||
"gen:icon-sprites": "node tools/develop/gen-icon-sprites/generate-llm-sprites.ts",
|
||||
"db:push": "prisma db push",
|
||||
"db:studio": "prisma studio",
|
||||
"vercel:env:pull": "npx vercel env pull .env.development.local"
|
||||
"vercel:env:pull": "npx vercel env pull .env.development.local",
|
||||
"sharp:win32_x64": "npm install --os=win32 --cpu=x64 sharp"
|
||||
},
|
||||
"prisma": {
|
||||
"schema": "src/server/prisma/schema.prisma"
|
||||
@@ -28,70 +32,74 @@
|
||||
"@emotion/react": "^11.14.0",
|
||||
"@emotion/server": "^11.11.0",
|
||||
"@emotion/styled": "^11.14.1",
|
||||
"@googleworkspace/drive-picker-react": "^0.2.0",
|
||||
"@mui/icons-material": "^5.18.0",
|
||||
"@mui/joy": "^5.0.0-beta.52",
|
||||
"@next/bundle-analyzer": "~15.1.8",
|
||||
"@next/bundle-analyzer": "~15.1.12",
|
||||
"@prisma/client": "~5.22.0",
|
||||
"@tanstack/react-query": "5.90.3",
|
||||
"@tanstack/react-query": "5.90.21",
|
||||
"@tanstack/react-virtual": "^3.13.22",
|
||||
"@trpc/client": "11.5.1",
|
||||
"@trpc/next": "11.5.1",
|
||||
"@trpc/react-query": "11.5.1",
|
||||
"@trpc/server": "11.5.1",
|
||||
"@vercel/analytics": "^1.5.0",
|
||||
"@vercel/speed-insights": "^1.2.0",
|
||||
"@vercel/analytics": "^1.6.1",
|
||||
"@vercel/speed-insights": "^1.3.1",
|
||||
"aws4fetch": "^1.0.20",
|
||||
"browser-fs-access": "^0.38.0",
|
||||
"cheerio": "^1.1.2",
|
||||
"csv-stringify": "^6.6.0",
|
||||
"dexie": "^4.0.11",
|
||||
"dexie-react-hooks": "^1.1.7",
|
||||
"diff": "^8.0.2",
|
||||
"eventemitter3": "^5.0.1",
|
||||
"dexie": "~4.0.11",
|
||||
"dexie-react-hooks": "~1.1.7",
|
||||
"diff": "^8.0.3",
|
||||
"eventemitter3": "^5.0.4",
|
||||
"idb-keyval": "^6.2.2",
|
||||
"mammoth": "^1.11.0",
|
||||
"nanoid": "^5.1.6",
|
||||
"next": "~15.1.8",
|
||||
"next": "~15.1.12",
|
||||
"nprogress": "^0.2.0",
|
||||
"pdfjs-dist": "5.4.54",
|
||||
"posthog-js": "^1.275.3",
|
||||
"posthog-node": "^5.10.0",
|
||||
"posthog-js": "^1.369.0",
|
||||
"posthog-node": "^5.29.2",
|
||||
"prismjs": "^1.30.0",
|
||||
"puppeteer-core": "^24.25.0",
|
||||
"puppeteer-core": "^24.40.0",
|
||||
"react": "^18.3.1",
|
||||
"react-dom": "^18.3.1",
|
||||
"react-hook-form": "^7.65.0",
|
||||
"react-hook-form": "^7.71.2",
|
||||
"react-markdown": "^10.1.0",
|
||||
"react-player": "^3.3.3",
|
||||
"react-player": "^3.4.0",
|
||||
"react-resizable-panels": "^3.0.6",
|
||||
"react-timeago": "^8.3.0",
|
||||
"rehype-katex": "^7.0.1",
|
||||
"remark-gfm": "^4.0.1",
|
||||
"remark-mark-highlight": "^0.1.1",
|
||||
"remark-math": "^6.0.0",
|
||||
"sharp": "^0.33.5",
|
||||
"superjson": "^2.2.2",
|
||||
"tesseract.js": "^6.0.1",
|
||||
"sharp": "^0.34.5",
|
||||
"superjson": "^2.2.6",
|
||||
"tesseract.js": "^7.0.0",
|
||||
"tiktoken": "^1.0.22",
|
||||
"turndown": "^7.2.1",
|
||||
"zod": "^4.1.12",
|
||||
"turndown": "^7.2.2",
|
||||
"zod": "^4.3.6",
|
||||
"zustand": "5.0.7"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@posthog/nextjs-config": "^1.3.2",
|
||||
"@types/node": "^24.7.2",
|
||||
"@posthog/nextjs-config": "~1.6.4",
|
||||
"@types/node": "^25.6.0",
|
||||
"@types/nprogress": "^0.2.3",
|
||||
"@types/prismjs": "^1.26.5",
|
||||
"@types/react": "^19.2.2",
|
||||
"@types/prismjs": "^1.26.6",
|
||||
"@types/react": "^19.2.14",
|
||||
"@types/react-csv": "^1.1.10",
|
||||
"@types/react-dom": "^19.2.2",
|
||||
"@types/turndown": "^5.0.5",
|
||||
"@types/react-dom": "^19.2.3",
|
||||
"@types/turndown": "^5.0.6",
|
||||
"cross-env": "^10.1.0",
|
||||
"eslint": "^9.37.0",
|
||||
"eslint-config-next": "~15.1.8",
|
||||
"prettier": "^3.6.2",
|
||||
"eslint": "^9.39.4",
|
||||
"eslint-config-next": "~15.1.12",
|
||||
"prettier": "^3.8.2",
|
||||
"prisma": "~5.22.0",
|
||||
"typescript": "^5.9.3"
|
||||
"tsx": "^4.21.0",
|
||||
"typescript": "^6.0.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": "^26.0.0 || ^24.0.0 || ^22.0.0 || ^20.0.0"
|
||||
"node": "^24.0.0 || ^22.0.0 || ^20.0.0"
|
||||
}
|
||||
}
|
||||
|
||||
+9
-4
@@ -1,12 +1,17 @@
|
||||
import * as React from 'react';
|
||||
import Head from 'next/head';
|
||||
import dynamic from 'next/dynamic';
|
||||
import { MyAppProps } from 'next/app';
|
||||
import { Analytics as VercelAnalytics } from '@vercel/analytics/next';
|
||||
import { SpeedInsights as VercelSpeedInsights } from '@vercel/speed-insights/next';
|
||||
|
||||
import { Brand } from '~/common/app.config';
|
||||
import { apiQuery } from '~/common/util/trpc.client';
|
||||
|
||||
|
||||
// [server-client-safe] dynamic imports to avoid webpack bundling issues with next/navigation
|
||||
const VercelAnalytics = dynamic(() => import('@vercel/analytics/next').then(mod => mod.Analytics), { ssr: false });
|
||||
const VercelSpeedInsights = dynamic(() => import('@vercel/speed-insights/next').then(mod => mod.SpeedInsights), { ssr: false });
|
||||
|
||||
|
||||
import 'katex/dist/katex.min.css';
|
||||
import '~/common/styles/CodePrism.css';
|
||||
import '~/common/styles/GithubMarkdown.css';
|
||||
@@ -55,10 +60,10 @@ const Big_AGI_App = ({ Component, emotionCache, pageProps }: MyAppProps) => {
|
||||
</ProviderSingleTab>
|
||||
</ProviderTheming>
|
||||
|
||||
{Is.Deployment.VercelFromFrontend && <VercelAnalytics debug={false} />}
|
||||
{Is.Deployment.VercelFromFrontend && <VercelSpeedInsights debug={false} sampleRate={1 / 2} />}
|
||||
{hasGoogleAnalytics && <OptionalGoogleAnalytics />}
|
||||
{hasPostHogAnalytics && <OptionalPostHogAnalytics />}
|
||||
{Is.Deployment.VercelFromFrontend && <VercelAnalytics debug={false} />}
|
||||
{Is.Deployment.VercelFromFrontend && <VercelSpeedInsights debug={false} sampleRate={1 / 2} />}
|
||||
|
||||
</>;
|
||||
};
|
||||
|
||||
+20
-4
@@ -37,14 +37,31 @@ export default function MyDocument({ emotionStyleTags }: MyDocumentProps) {
|
||||
<meta property='og:site_name' content={Brand.Meta.SiteName} />
|
||||
<meta property='og:type' content='website' />
|
||||
|
||||
{/* Twitter */}
|
||||
<meta property='twitter:card' content='summary_large_image' />
|
||||
{/* Twitter / X */}
|
||||
<meta name='twitter:card' content='summary_large_image' />
|
||||
<meta property='twitter:url' content={Brand.URIs.Home} />
|
||||
<meta property='twitter:title' content={Brand.Title.Common} />
|
||||
<meta property='twitter:description' content={Brand.Meta.Description} />
|
||||
{Brand.URIs.CardImage && <meta property='twitter:image' content={Brand.URIs.CardImage} />}
|
||||
<meta name='twitter:site' content={Brand.Meta.TwitterSite} />
|
||||
<meta name='twitter:card' content='summary_large_image' />
|
||||
<meta name='twitter:creator' content='@enricoros' />
|
||||
<link rel='canonical' href={Brand.URIs.Home} />
|
||||
|
||||
{/* Author & Structured Data */}
|
||||
<meta name='author' content='Enrico Ros' />
|
||||
<link rel='author' href='https://www.enricoros.com' />
|
||||
<script type='application/ld+json' dangerouslySetInnerHTML={{ __html: JSON.stringify({
|
||||
'@context': 'https://schema.org',
|
||||
'@type': 'SoftwareApplication',
|
||||
'name': 'Big-AGI',
|
||||
'url': 'https://big-agi.com',
|
||||
'applicationCategory': 'ProductivityApplication',
|
||||
'operatingSystem': 'All, Web',
|
||||
'description': Brand.Meta.Description,
|
||||
'sameAs': ['https://github.com/enricoros/big-agi', 'https://discord.gg/MkH4qj2Jp9',],
|
||||
'author': { '@type': 'Person', 'name': 'Enrico Ros', 'url': 'https://www.enricoros.com' },
|
||||
'publisher': { '@type': 'Organization', 'name': 'Token Fabrics LLC', 'url': 'https://www.tokenfabrics.com' },
|
||||
}) }} />
|
||||
|
||||
{/* Style Sheets (injected and server-side) */}
|
||||
<meta name='emotion-insertion-point' content='' />
|
||||
@@ -111,7 +128,6 @@ MyDocument.getInitialProps = async (ctx: DocumentContext) => {
|
||||
<style
|
||||
data-emotion={`${style.key} ${style.ids.join(' ')}`}
|
||||
key={style.key}
|
||||
// eslint-disable-next-line react/no-danger
|
||||
dangerouslySetInnerHTML={{ __html: style.css }}
|
||||
/>
|
||||
));
|
||||
|
||||
@@ -18,7 +18,7 @@ import { ROUTE_APP_CHAT, ROUTE_INDEX } from '~/common/app.routes';
|
||||
import { Release } from '~/common/app.release';
|
||||
|
||||
// capabilities access
|
||||
import { useCapabilityBrowserSpeechRecognition, useCapabilityElevenLabs, useCapabilityTextToImage } from '~/common/components/useCapabilities';
|
||||
import { useCapabilityBrowserSpeechRecognition, useCapabilityTextToImage } from '~/common/components/useCapabilities';
|
||||
|
||||
// stores access
|
||||
import { getLLMsDebugInfo } from '~/common/stores/llms/store-llms';
|
||||
@@ -95,7 +95,6 @@ function AppDebug() {
|
||||
const cProduct = {
|
||||
capabilities: {
|
||||
mic: useCapabilityBrowserSpeechRecognition(),
|
||||
elevenLabs: useCapabilityElevenLabs(),
|
||||
textToImage: useCapabilityTextToImage(),
|
||||
},
|
||||
models: getLLMsDebugInfo(),
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
"short_name": "big-AGI",
|
||||
"theme_color": "#32383E",
|
||||
"background_color": "#9FA6AD",
|
||||
"description": "Your Generative AI Suite",
|
||||
"description": "Open-source AI workspace. Multi-model reasoning and personas for maximum control.",
|
||||
"categories": [
|
||||
"productivity",
|
||||
"AI",
|
||||
|
||||
@@ -20,7 +20,7 @@ function initTestConversation(): DConversation {
|
||||
return conversation;
|
||||
}
|
||||
|
||||
function initTestBeamStore(messages: DMessage[], beamStore: BeamStoreApi = createBeamVanillaStore()): BeamStoreApi {
|
||||
function initTestBeamStore(messages: DMessage[], beamStore: BeamStoreApi): BeamStoreApi {
|
||||
beamStore.getState().open(messages, null, false, (content) => alert(content));
|
||||
return beamStore;
|
||||
}
|
||||
|
||||
@@ -6,13 +6,15 @@ import ChatIcon from '@mui/icons-material/Chat';
|
||||
import CheckRoundedIcon from '@mui/icons-material/CheckRounded';
|
||||
import CloseRoundedIcon from '@mui/icons-material/CloseRounded';
|
||||
import MicIcon from '@mui/icons-material/Mic';
|
||||
import RecordVoiceOverTwoToneIcon from '@mui/icons-material/RecordVoiceOverTwoTone';
|
||||
import WarningRoundedIcon from '@mui/icons-material/WarningRounded';
|
||||
|
||||
import { useSpeexGlobalEngine } from '~/modules/speex/store-module-speex';
|
||||
|
||||
import { PhVoice } from '~/common/components/icons/phosphor/PhVoice';
|
||||
import { animationColorRainbow } from '~/common/util/animUtils';
|
||||
import { navigateBack } from '~/common/app.routes';
|
||||
import { optimaOpenPreferences } from '~/common/layout/optima/useOptima';
|
||||
import { useCapabilityBrowserSpeechRecognition, useCapabilityElevenLabs } from '~/common/components/useCapabilities';
|
||||
import { useCapabilityBrowserSpeechRecognition } from '~/common/components/useCapabilities';
|
||||
import { useChatStore } from '~/common/stores/chat/store-chats';
|
||||
import { useUICounter } from '~/common/stores/store-ui';
|
||||
|
||||
@@ -45,7 +47,7 @@ export function CallWizard(props: { strict?: boolean, conversationId: string | n
|
||||
|
||||
// external state
|
||||
const recognition = useCapabilityBrowserSpeechRecognition();
|
||||
const synthesis = useCapabilityElevenLabs();
|
||||
const speexGlobalEngine = useSpeexGlobalEngine();
|
||||
const chatIsEmpty = useChatStore(state => {
|
||||
if (!props.conversationId)
|
||||
return false;
|
||||
@@ -56,17 +58,18 @@ export function CallWizard(props: { strict?: boolean, conversationId: string | n
|
||||
|
||||
// derived state
|
||||
const outOfTheBlue = !props.conversationId;
|
||||
const overriddenEmptyChat = chatEmptyOverride || !chatIsEmpty;
|
||||
const overriddenEmptyChat = outOfTheBlue || chatEmptyOverride || !chatIsEmpty;
|
||||
const overriddenRecognition = recognitionOverride || recognition.mayWork;
|
||||
const allGood = overriddenEmptyChat && overriddenRecognition && synthesis.mayWork;
|
||||
const fatalGood = overriddenRecognition && synthesis.mayWork;
|
||||
const synthesisShallWork = !!speexGlobalEngine;
|
||||
const allGood = overriddenEmptyChat && overriddenRecognition && synthesisShallWork;
|
||||
const fatalGood = overriddenRecognition && synthesisShallWork;
|
||||
|
||||
|
||||
const handleOverrideChatEmpty = React.useCallback(() => setChatEmptyOverride(true), []);
|
||||
|
||||
const handleOverrideRecognition = React.useCallback(() => setRecognitionOverride(true), []);
|
||||
|
||||
const handleConfigureElevenLabs = React.useCallback(() => optimaOpenPreferences('voice'), []);
|
||||
const handleConfigureVoice = React.useCallback(() => optimaOpenPreferences('voice'), []);
|
||||
|
||||
const handleFinishButton = React.useCallback(() => {
|
||||
if (!allGood)
|
||||
@@ -128,17 +131,17 @@ export function CallWizard(props: { strict?: boolean, conversationId: string | n
|
||||
|
||||
{/* Text to Speech status */}
|
||||
<StatusCard
|
||||
icon={<RecordVoiceOverTwoToneIcon />}
|
||||
icon={<PhVoice />}
|
||||
text={
|
||||
(synthesis.mayWork ? 'Voice synthesis should be ready.' : 'There might be an issue with ElevenLabs voice synthesis.')
|
||||
+ (synthesis.isConfiguredServerSide ? '' : (synthesis.isConfiguredClientSide ? '' : ' Please add your API key in the settings.'))
|
||||
(synthesisShallWork ? 'Voice synthesis should be ready.' : 'There might be an issue with voice synthesis.')
|
||||
// + (synthesis.isConfiguredServerSide ? '' : (synthesis.isConfiguredClientSide ? '' : ' Please add your API key in the settings.'))
|
||||
}
|
||||
button={synthesis.mayWork ? undefined : (
|
||||
<Button variant='outlined' onClick={handleConfigureElevenLabs} sx={{ mx: 1 }}>
|
||||
button={synthesisShallWork ? undefined : (
|
||||
<Button variant='outlined' onClick={handleConfigureVoice} sx={{ mx: 1 }}>
|
||||
Configure
|
||||
</Button>
|
||||
)}
|
||||
hasIssue={!synthesis.mayWork}
|
||||
hasIssue={!synthesisShallWork}
|
||||
/>
|
||||
|
||||
{/*<Typography>*/}
|
||||
|
||||
@@ -317,7 +317,7 @@ export function Contacts(props: { setCallIntent: (intent: AppCallIntent) => void
|
||||
issue={354}
|
||||
text='Call App: Support thread and compatibility matrix'
|
||||
note={<>
|
||||
Voice input uses the HTML Web Speech API, and speech output requires an ElevenLabs API Key.
|
||||
Voice input uses the HTML Web Speech API.
|
||||
</>}
|
||||
// note2='Please report any issues you encounter'
|
||||
sx={{
|
||||
|
||||
+24
-37
@@ -7,16 +7,15 @@ import CallEndIcon from '@mui/icons-material/CallEnd';
|
||||
import CallIcon from '@mui/icons-material/Call';
|
||||
import MicIcon from '@mui/icons-material/Mic';
|
||||
import MicNoneIcon from '@mui/icons-material/MicNone';
|
||||
import RecordVoiceOverTwoToneIcon from '@mui/icons-material/RecordVoiceOverTwoTone';
|
||||
|
||||
import { ScrollToBottom } from '~/common/scroll-to-bottom/ScrollToBottom';
|
||||
import { ScrollToBottomButton } from '~/common/scroll-to-bottom/ScrollToBottomButton';
|
||||
import { useChatLLMDropdown } from '../chat/components/layout-bar/useLLMDropdown';
|
||||
|
||||
import { SystemPurposeId, SystemPurposes } from '../../data';
|
||||
import { elevenLabsSpeakText } from '~/modules/elevenlabs/elevenlabs.client';
|
||||
import { AixChatGenerateContent_DMessage, aixChatGenerateContent_DMessage_FromConversation } from '~/modules/aix/client/aix.client';
|
||||
import { useElevenLabsVoiceDropdown } from '~/modules/elevenlabs/useElevenLabsVoiceDropdown';
|
||||
|
||||
import { aixChatGenerateContent_DMessage_FromConversation, AixChatGenerateContent_DMessageGuts } from '~/modules/aix/client/aix.client';
|
||||
import { speakText } from '~/modules/speex/speex.client';
|
||||
|
||||
import type { OptimaBarControlMethods } from '~/common/layout/optima/bar/OptimaBarDropdown';
|
||||
import { AudioPlayer } from '~/common/util/audio/AudioPlayer';
|
||||
@@ -24,13 +23,14 @@ import { Link } from '~/common/components/Link';
|
||||
import { OptimaPanelGroupedList } from '~/common/layout/optima/panel/OptimaPanelGroupedList';
|
||||
import { OptimaPanelIn, OptimaToolbarIn } from '~/common/layout/optima/portals/OptimaPortalsIn';
|
||||
import { SpeechResult, useSpeechRecognition } from '~/common/components/speechrecognition/useSpeechRecognition';
|
||||
import { clipboardInterceptCtrlCForCleanup } from '~/common/util/clipboardUtils';
|
||||
import { conversationTitle, remapMessagesSysToUsr } from '~/common/stores/chat/chat.conversation';
|
||||
import { createDMessageFromFragments, createDMessageTextContent, DMessage, messageFragmentsReduceText, messageWasInterruptedAtStart } from '~/common/stores/chat/chat.message';
|
||||
import { createErrorContentFragment } from '~/common/stores/chat/chat.fragments';
|
||||
import { launchAppChat, navigateToIndex } from '~/common/app.routes';
|
||||
import { useChatStore } from '~/common/stores/chat/store-chats';
|
||||
import { useGlobalShortcuts } from '~/common/components/shortcuts/useGlobalShortcuts';
|
||||
import { usePlayUrl } from '~/common/util/audio/usePlayUrl';
|
||||
import { usePlayUrlInterval } from './state/usePlayUrlInterval';
|
||||
|
||||
import type { AppCallIntent } from './AppCall';
|
||||
import { CallAvatar } from './components/CallAvatar';
|
||||
@@ -43,18 +43,13 @@ import { useAppCallStore } from './state/store-app-call';
|
||||
function CallMenu(props: {
|
||||
pushToTalk: boolean,
|
||||
setPushToTalk: (pushToTalk: boolean) => void,
|
||||
override: boolean,
|
||||
setOverride: (overridePersonaVoice: boolean) => void,
|
||||
}) {
|
||||
|
||||
// external state
|
||||
const { grayUI, toggleGrayUI } = useAppCallStore();
|
||||
const { voicesDropdown } = useElevenLabsVoiceDropdown(false, !props.override);
|
||||
|
||||
const handlePushToTalkToggle = () => props.setPushToTalk(!props.pushToTalk);
|
||||
|
||||
const handleChangeVoiceToggle = () => props.setOverride(!props.override);
|
||||
|
||||
return <OptimaPanelGroupedList title='Call'>
|
||||
|
||||
<MenuItem onClick={handlePushToTalkToggle}>
|
||||
@@ -63,17 +58,6 @@ function CallMenu(props: {
|
||||
<Switch checked={props.pushToTalk} onChange={handlePushToTalkToggle} sx={{ ml: 'auto' }} />
|
||||
</MenuItem>
|
||||
|
||||
<MenuItem onClick={handleChangeVoiceToggle}>
|
||||
<ListItemDecorator><RecordVoiceOverTwoToneIcon /></ListItemDecorator>
|
||||
Change Voice
|
||||
<Switch checked={props.override} onChange={handleChangeVoiceToggle} sx={{ ml: 'auto' }} />
|
||||
</MenuItem>
|
||||
|
||||
<MenuItem>
|
||||
<ListItemDecorator>{' '}</ListItemDecorator>
|
||||
{voicesDropdown}
|
||||
</MenuItem>
|
||||
|
||||
<ListDivider />
|
||||
|
||||
<MenuItem onClick={toggleGrayUI}>
|
||||
@@ -98,7 +82,6 @@ export function Telephone(props: {
|
||||
const [avatarClickCount, setAvatarClickCount] = React.useState<number>(0);// const [micMuted, setMicMuted] = React.useState(false);
|
||||
const [callElapsedTime, setCallElapsedTime] = React.useState<string>('00:00');
|
||||
const [callMessages, setCallMessages] = React.useState<DMessage[]>([]);
|
||||
const [overridePersonaVoice, setOverridePersonaVoice] = React.useState<boolean>(false);
|
||||
const [personaTextInterim, setPersonaTextInterim] = React.useState<string | null>(null);
|
||||
const [pushToTalk, setPushToTalk] = React.useState(true);
|
||||
const [stage, setStage] = React.useState<'ring' | 'declined' | 'connected' | 'ended'>('ring');
|
||||
@@ -118,7 +101,7 @@ export function Telephone(props: {
|
||||
}));
|
||||
const persona = SystemPurposes[props.callIntent.personaId as SystemPurposeId] ?? undefined;
|
||||
const personaCallStarters = persona?.call?.starters ?? undefined;
|
||||
const personaVoiceId = overridePersonaVoice ? undefined : (persona?.voices?.elevenLabs?.voiceId ?? undefined);
|
||||
// const personaVoiceSelector = React.useMemo(() => personaGetVoiceSelector(persona), [persona]);
|
||||
const personaSystemMessage = persona?.systemMessage ?? undefined;
|
||||
|
||||
// hooks and speech
|
||||
@@ -144,11 +127,11 @@ export function Telephone(props: {
|
||||
|
||||
// pickup / hangup
|
||||
React.useEffect(() => {
|
||||
!isRinging && AudioPlayer.playUrl(isConnected ? '/sounds/chat-begin.mp3' : '/sounds/chat-end.mp3');
|
||||
!isRinging && void AudioPlayer.playUrl(isConnected ? '/sounds/chat-begin.mp3' : '/sounds/chat-end.mp3').catch(() => {/* autoplay may be blocked */});
|
||||
}, [isRinging, isConnected]);
|
||||
|
||||
// ringtone
|
||||
usePlayUrl(isRinging ? '/sounds/chat-ringtone.mp3' : null, 300, 2800 * 2);
|
||||
usePlayUrlInterval(isRinging ? '/sounds/chat-ringtone.mp3' : null, 300, 2800 * 2);
|
||||
|
||||
|
||||
/// Shortcuts
|
||||
@@ -165,7 +148,6 @@ export function Telephone(props: {
|
||||
};
|
||||
|
||||
// [E] pickup -> seed message and call timer
|
||||
// FIXME: Overriding the voice will reset the call - not a desired behavior
|
||||
React.useEffect(() => {
|
||||
if (!isConnected) return;
|
||||
|
||||
@@ -185,11 +167,14 @@ export function Telephone(props: {
|
||||
|
||||
setCallMessages([createDMessageTextContent('assistant', firstMessage)]); // [state] set assistant:hello message
|
||||
|
||||
// fire/forget
|
||||
void elevenLabsSpeakText(firstMessage, personaVoiceId, true, true);
|
||||
// fire/forget - use 'fast' priority for real-time conversation
|
||||
void speakText(firstMessage,
|
||||
undefined,
|
||||
{ label: 'Call', priority: 'fast' },
|
||||
);
|
||||
|
||||
return () => clearInterval(interval);
|
||||
}, [isConnected, personaCallStarters, personaVoiceId]);
|
||||
}, [isConnected, personaCallStarters]);
|
||||
|
||||
// [E] persona streaming response - upon new user message
|
||||
React.useEffect(() => {
|
||||
@@ -254,7 +239,7 @@ export function Telephone(props: {
|
||||
'call',
|
||||
callMessages[0].id,
|
||||
{ abortSignal: responseAbortController.current.signal },
|
||||
(update: AixChatGenerateContent_DMessage, _isDone: boolean) => {
|
||||
(update: AixChatGenerateContent_DMessageGuts, _isDone: boolean) => {
|
||||
const updatedText = messageFragmentsReduceText(update.fragments).trim();
|
||||
if (updatedText)
|
||||
setPersonaTextInterim(finalText = updatedText);
|
||||
@@ -265,14 +250,17 @@ export function Telephone(props: {
|
||||
if (messageWasInterruptedAtStart(status.lastDMessage))
|
||||
return;
|
||||
|
||||
// whether status.outcome === 'success' or not, we get a valid DMessage, eventually with Error Fragments inside
|
||||
// whether status.outcome === 'completed' or not, we get a valid DMessage, eventually with Error Fragments inside
|
||||
const fullMessage = createDMessageFromFragments('assistant', status.lastDMessage.fragments);
|
||||
fullMessage.generator = status.lastDMessage.generator;
|
||||
setCallMessages(messages => [...messages, fullMessage]); // [state] append assistant:call_response
|
||||
|
||||
// fire/forget
|
||||
if (status.outcome === 'success' && finalText?.length >= 1)
|
||||
void elevenLabsSpeakText(finalText, personaVoiceId, true, true);
|
||||
// fire/forget - use 'fast' priority for real-time conversation
|
||||
if (status.outcome === 'completed' && finalText?.length >= 1)
|
||||
void speakText(finalText,
|
||||
undefined,
|
||||
{ label: 'Call', priority: 'fast' },
|
||||
);
|
||||
|
||||
}).catch((err: DOMException) => {
|
||||
if (err?.name !== 'AbortError') {
|
||||
@@ -288,7 +276,7 @@ export function Telephone(props: {
|
||||
responseAbortController.current?.abort();
|
||||
responseAbortController.current = null;
|
||||
};
|
||||
}, [isConnected, callMessages, modelId, personaVoiceId, personaSystemMessage, reMessages]);
|
||||
}, [callMessages, isConnected, modelId, personaSystemMessage, reMessages]);
|
||||
|
||||
// [E] Message interrupter
|
||||
const abortTrigger = isConnected && recognitionState.hasSpeech;
|
||||
@@ -325,7 +313,6 @@ export function Telephone(props: {
|
||||
<OptimaPanelIn>
|
||||
<CallMenu
|
||||
pushToTalk={pushToTalk} setPushToTalk={setPushToTalk}
|
||||
override={overridePersonaVoice} setOverride={setOverridePersonaVoice}
|
||||
/>
|
||||
</OptimaPanelIn>
|
||||
|
||||
@@ -373,7 +360,7 @@ export function Telephone(props: {
|
||||
|
||||
<ScrollToBottom stickToBottomInitial>
|
||||
|
||||
<Box sx={{ minHeight: '100%', p: 1, display: 'flex', flexDirection: 'column', gap: 1 }}>
|
||||
<Box onCopy={clipboardInterceptCtrlCForCleanup} sx={{ minHeight: '100%', p: 1, display: 'flex', flexDirection: 'column', gap: 1 }}>
|
||||
|
||||
{/* Call Messages [] */}
|
||||
{callMessages.map((message) =>
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { AudioPlayer } from '~/common/util/audio/AudioPlayer';
|
||||
|
||||
|
||||
@@ -8,15 +9,16 @@ import { AudioPlayer } from '~/common/util/audio/AudioPlayer';
|
||||
* @param firstDelay The delay before the first play, in milliseconds.
|
||||
* @param repeatMs The delay between each repeat, in milliseconds. If 0, the sound will only play once.
|
||||
*/
|
||||
export function usePlayUrl(url: string | null, firstDelay: number = 0, repeatMs: number = 0) {
|
||||
export function usePlayUrlInterval(url: string | null, firstDelay: number = 0, repeatMs: number = 0) {
|
||||
React.useEffect(() => {
|
||||
if (!url) return;
|
||||
|
||||
const abortController = new AbortController();
|
||||
let timer2: any = null;
|
||||
|
||||
const playFirstTime = () => {
|
||||
const playAudio = () => AudioPlayer.playUrl(url);
|
||||
void playAudio();
|
||||
const playAudio = () => void AudioPlayer.playUrl(url, abortController.signal).catch(() => {/* autoplay may be blocked */});
|
||||
playAudio();
|
||||
timer2 = repeatMs > 0 ? setInterval(playAudio, repeatMs) : null;
|
||||
};
|
||||
|
||||
@@ -24,8 +26,8 @@ export function usePlayUrl(url: string | null, firstDelay: number = 0, repeatMs:
|
||||
|
||||
return () => {
|
||||
clearTimeout(timer1);
|
||||
if (timer2)
|
||||
clearInterval(timer2);
|
||||
timer2 && clearInterval(timer2);
|
||||
abortController?.abort();
|
||||
};
|
||||
}, [firstDelay, repeatMs, url]);
|
||||
}
|
||||
+20
-27
@@ -4,13 +4,10 @@ import { Panel, PanelGroup, PanelResizeHandle } from 'react-resizable-panels';
|
||||
import type { SxProps } from '@mui/joy/styles/types';
|
||||
import { Box, useTheme } from '@mui/joy';
|
||||
|
||||
import { DEV_MODE_SETTINGS } from '../settings-modal/UxLabsSettings';
|
||||
|
||||
import type { DiagramConfig } from '~/modules/aifn/digrams/DiagramsModal';
|
||||
import type { TradeConfig } from '~/modules/trade/TradeModal';
|
||||
import { downloadSingleChat, importConversationsFromFilesAtRest, openConversationsAtRestPicker } from '~/modules/trade/trade.client';
|
||||
import { imaginePromptFromTextOrThrow } from '~/modules/aifn/imagine/imaginePromptFromText';
|
||||
import { elevenLabsSpeakText } from '~/modules/elevenlabs/elevenlabs.client';
|
||||
import { useAreBeamsOpen } from '~/modules/beam/store-beam.hooks';
|
||||
import { useCapabilityTextToImage } from '~/modules/t2i/t2i.client';
|
||||
|
||||
@@ -21,7 +18,7 @@ import { ConversationsManager } from '~/common/chat-overlay/ConversationsManager
|
||||
import { ErrorBoundary } from '~/common/components/ErrorBoundary';
|
||||
import { getLLMContextTokens, LLM_IF_ANT_PromptCaching, LLM_IF_OAI_Vision } from '~/common/stores/llms/llms.types';
|
||||
import { OptimaDrawerIn, OptimaPanelIn, OptimaToolbarIn } from '~/common/layout/optima/portals/OptimaPortalsIn';
|
||||
import { PanelResizeInset } from '~/common/components/panes/GoodPanelResizeHandler';
|
||||
import { PanelResizeInset } from '~/common/components/PanelResizeInset';
|
||||
import { Release } from '~/common/app.release';
|
||||
import { ScrollToBottom } from '~/common/scroll-to-bottom/ScrollToBottom';
|
||||
import { ScrollToBottomButton } from '~/common/scroll-to-bottom/ScrollToBottomButton';
|
||||
@@ -33,7 +30,7 @@ import { createErrorContentFragment, createTextContentFragment, DMessageAttachme
|
||||
import { gcChatImageAssets } from '~/common/stores/chat/chat.gc';
|
||||
import { getChatLLMId } from '~/common/stores/llms/store-llms';
|
||||
import { getConversation, getConversationSystemPurposeId, useConversation } from '~/common/stores/chat/store-chats';
|
||||
import { optimaActions, optimaOpenModels, optimaOpenPreferences } from '~/common/layout/optima/useOptima';
|
||||
import { optimaActions, optimaOpenModels, optimaOpenPreferences, useOptimaChromeless } from '~/common/layout/optima/useOptima';
|
||||
import { useFolderStore } from '~/common/stores/folders/store-chat-folders';
|
||||
import { useIsMobile, useIsTallScreen } from '~/common/components/useMatchMedia';
|
||||
import { useLLM } from '~/common/stores/llms/llms.hooks';
|
||||
@@ -41,8 +38,6 @@ import { useModelDomain } from '~/common/stores/llms/hooks/useModelDomain';
|
||||
import { useOverlayComponents } from '~/common/layout/overlays/useOverlayComponents';
|
||||
import { useRouterQuery } from '~/common/app.routes';
|
||||
import { useUIComplexityIsMinimal } from '~/common/stores/store-ui';
|
||||
import { useUXLabsStore } from '~/common/stores/store-ux-labs';
|
||||
|
||||
import { ChatPane } from './components/layout-pane/ChatPane';
|
||||
import { ChatBarBeam } from './components/layout-bar/ChatBarBeam';
|
||||
import { ChatBarAltTitle } from './components/layout-bar/ChatBarAltTitle';
|
||||
@@ -152,8 +147,6 @@ export function AppChat() {
|
||||
|
||||
const intent = useRouterQuery<Partial<AppChatIntent>>();
|
||||
|
||||
const showAltTitleBar = useUXLabsStore(state => DEV_MODE_SETTINGS && state.labsChatBarAlt === 'title');
|
||||
|
||||
const { domainModelId: chatLLMId } = useModelDomain('primaryChat');
|
||||
const chatLLM = useLLM(chatLLMId) ?? null;
|
||||
|
||||
@@ -186,6 +179,7 @@ export function AppChat() {
|
||||
const beamOpenStoreInFocusedPane = focusedPaneIndex === null ? null
|
||||
: !beamsOpens?.[focusedPaneIndex] ? null
|
||||
: paneBeamStores?.[focusedPaneIndex] ?? null;
|
||||
const focusedChatBeamOpen = focusedPaneIndex !== null && !!beamsOpens?.[focusedPaneIndex];
|
||||
|
||||
const {
|
||||
// focused
|
||||
@@ -215,7 +209,8 @@ export function AppChat() {
|
||||
});
|
||||
|
||||
// Composer Auto-hiding
|
||||
const forceComposerHide = !!beamOpenStoreInFocusedPane /* || !focusedPaneConversationId */; // auto-hide when no chat (the 'please select a conversation...' state) doesn't feel good
|
||||
const isChromeless = useOptimaChromeless() && isMobile; // auto-hide on Chromeless too
|
||||
const forceComposerHide = isChromeless || !!beamOpenStoreInFocusedPane /* || !focusedPaneConversationId */; // auto-hide when no chat (the 'please select a conversation...' state) doesn't feel good
|
||||
const composerAutoHide = useComposerAutoHide(forceComposerHide, composerHasContent);
|
||||
|
||||
// Window actions
|
||||
@@ -345,11 +340,6 @@ export function AppChat() {
|
||||
});
|
||||
}, [handleExecuteAndOutcome]);
|
||||
|
||||
const handleTextSpeak = React.useCallback(async (text: string): Promise<void> => {
|
||||
await elevenLabsSpeakText(text, undefined, true, true);
|
||||
}, []);
|
||||
|
||||
|
||||
// Chat actions
|
||||
|
||||
const handleConversationNewInFocusedPane = React.useCallback((forceNoRecycle: boolean, isIncognito: boolean) => {
|
||||
@@ -468,7 +458,7 @@ export function AppChat() {
|
||||
|
||||
// Pluggable Optima components
|
||||
|
||||
const barAltTitle = showAltTitleBar ? focusedChatTitle ?? 'No Chat' : null;
|
||||
const barAltTitle = null;
|
||||
|
||||
const focusedBarContent = React.useMemo(() => beamOpenStoreInFocusedPane
|
||||
? <ChatBarBeam conversationTitle={focusedChatTitle ?? 'No Chat'} beamStore={beamOpenStoreInFocusedPane} isMobile={isMobile} />
|
||||
@@ -479,7 +469,7 @@ export function AppChat() {
|
||||
);
|
||||
|
||||
|
||||
// Disabled by default, as it lags the opening of the drawer and immediatly vanishes during the closing animation
|
||||
// Disabled by default, as it lags the opening of the drawer and immediately vanishes during the closing animation
|
||||
const isDrawerOpen = true; // useOptimaDrawerOpen();
|
||||
|
||||
const drawerContent = React.useMemo(() => !isDrawerOpen ? null :
|
||||
@@ -489,6 +479,7 @@ export function AppChat() {
|
||||
activeFolderId={activeFolderId}
|
||||
chatPanesConversationIds={paneUniqueConversationIds}
|
||||
disableNewButton={disableNewButton}
|
||||
focusedChatBeamOpen={focusedChatBeamOpen}
|
||||
onConversationActivate={handleOpenConversationInFocusedPane}
|
||||
onConversationBranch={handleConversationBranch}
|
||||
onConversationNew={handleConversationNewInFocusedPane}
|
||||
@@ -497,11 +488,12 @@ export function AppChat() {
|
||||
onConversationsImportDialog={handleConversationImportDialog}
|
||||
setActiveFolderId={setActiveFolderId}
|
||||
/>,
|
||||
[activeFolderId, disableNewButton, focusedPaneConversationId, handleConversationBranch, handleConversationExport, handleConversationImportDialog, handleConversationNewInFocusedPane, handleDeleteConversations, handleOpenConversationInFocusedPane, isDrawerOpen, paneUniqueConversationIds],
|
||||
[activeFolderId, disableNewButton, focusedChatBeamOpen, focusedPaneConversationId, handleConversationBranch, handleConversationExport, handleConversationImportDialog, handleConversationNewInFocusedPane, handleDeleteConversations, handleOpenConversationInFocusedPane, isDrawerOpen, paneUniqueConversationIds],
|
||||
);
|
||||
|
||||
const focusedChatPanelContent = React.useMemo(() => !focusedPaneConversationId ? null :
|
||||
<ChatPane
|
||||
isMobile={isMobile}
|
||||
conversationId={focusedPaneConversationId}
|
||||
disableItems={!focusedPaneConversationId || isFocusedChatEmpty}
|
||||
hasConversations={hasConversations}
|
||||
@@ -523,7 +515,7 @@ export function AppChat() {
|
||||
React.useEffect(() => {
|
||||
// Debug: open a null chat
|
||||
if (Release.IsNodeDevBuild && intent.initialConversationId === 'null')
|
||||
openConversationInFocusedPane(null! /* for debugging purporse */);
|
||||
openConversationInFocusedPane(null! /* for debugging purpose */);
|
||||
// Open the initial conversation if set
|
||||
else if (intent.initialConversationId)
|
||||
openConversationInFocusedPane(intent.initialConversationId);
|
||||
@@ -591,9 +583,11 @@ export function AppChat() {
|
||||
}, []);
|
||||
|
||||
useGlobalShortcuts('AppChat', React.useMemo(() => [
|
||||
// focused conversation
|
||||
{ key: 'z', ctrl: true, shift: true, disabled: isFocusedChatEmpty, action: handleMessageRegenerateLastInFocusedPane, description: 'Retry' },
|
||||
{ key: 'b', ctrl: true, shift: true, disabled: isFocusedChatEmpty, action: handleMessageBeamLastInFocusedPane, description: 'Beam Edit' },
|
||||
// focused conversation (excluded when Beam is open so the keystroke passes through to the browser)
|
||||
...(beamOpenStoreInFocusedPane ? [] : [
|
||||
{ key: 'z', ctrl: true, shift: true, disabled: isFocusedChatEmpty, action: handleMessageRegenerateLastInFocusedPane, description: 'Retry' },
|
||||
{ key: 'b', ctrl: true, shift: true, disabled: isFocusedChatEmpty, action: handleMessageBeamLastInFocusedPane, description: 'Beam Edit' },
|
||||
]),
|
||||
{ key: 'o', ctrl: true, action: handleConversationsImportFormFilePicker },
|
||||
{ key: 's', ctrl: true, action: () => handleFileSaveConversation(focusedPaneConversationId) },
|
||||
{ key: 'n', ctrl: true, shift: true, action: () => handleConversationNewInFocusedPane(false, false) },
|
||||
@@ -611,7 +605,7 @@ export function AppChat() {
|
||||
{ key: 'p', ctrl: true, action: () => personaDropdownRef.current?.openListbox() /*, description: 'Open Persona Dropdown'*/ },
|
||||
// focused conversation llm
|
||||
{ key: 'o', ctrl: true, shift: true, action: handleOpenChatLlmOptions },
|
||||
], [focusedPaneConversationId, handleConversationNewInFocusedPane, handleConversationReset, handleConversationsImportFormFilePicker, handleDeleteConversations, handleFileSaveConversation, handleMessageBeamLastInFocusedPane, handleMessageRegenerateLastInFocusedPane, handleMoveFocus, handleNavigateHistoryInFocusedPane, handleOpenChatLlmOptions, isFocusedChatEmpty]));
|
||||
], [beamOpenStoreInFocusedPane, focusedPaneConversationId, handleConversationNewInFocusedPane, handleConversationReset, handleConversationsImportFormFilePicker, handleDeleteConversations, handleFileSaveConversation, handleMessageBeamLastInFocusedPane, handleMessageRegenerateLastInFocusedPane, handleMoveFocus, handleNavigateHistoryInFocusedPane, handleOpenChatLlmOptions, isFocusedChatEmpty]));
|
||||
|
||||
|
||||
return <>
|
||||
@@ -651,7 +645,7 @@ export function AppChat() {
|
||||
setFocusedPaneIndex(idx);
|
||||
}}
|
||||
onCollapse={() => {
|
||||
// NOTE: despite the delay to try to let the draggin settle, there seems to be an issue with the Pane locking the screen
|
||||
// NOTE: despite the delay to try to let the dragging settle, there seems to be an issue with the Pane locking the screen
|
||||
// setTimeout(() => removePane(idx), 50);
|
||||
// more than 2 will result in an assertion from the framework
|
||||
if (chatPanes.length === 2) removePane(idx);
|
||||
@@ -678,7 +672,7 @@ export function AppChat() {
|
||||
// NOTE: this is a workaround for the 'stuck-after-collapse-close' issue. We will collapse the 'other' pane, which
|
||||
// will get it removed (onCollapse), and somehow this pane will be stuck with a pointerEvents: 'none' style, which de-facto
|
||||
// disables further interaction with the chat. This is a workaround to re-enable the pointer events.
|
||||
// The root cause seems to be a Dragstate not being reset properly, however the pointerEvents has been set since 0.0.56 while
|
||||
// The root cause seems to be a Drag state not being reset properly, however the pointerEvents has been set since 0.0.56 while
|
||||
// it was optional before: https://github.com/bvaughn/react-resizable-panels/issues/241
|
||||
pointerEvents: 'auto',
|
||||
}),
|
||||
@@ -723,7 +717,6 @@ export function AppChat() {
|
||||
onConversationNew={handleConversationNewInFocusedPane}
|
||||
onTextDiagram={handleTextDiagram}
|
||||
onTextImagine={handleImagineFromText}
|
||||
onTextSpeak={handleTextSpeak}
|
||||
sx={chatMessageListSx}
|
||||
/>
|
||||
)}
|
||||
@@ -779,7 +772,7 @@ export function AppChat() {
|
||||
</Box>
|
||||
|
||||
{/* Hover zone for auto-hide */}
|
||||
{!forceComposerHide && composerAutoHide.isHidden && <Box {...composerAutoHide.detectorProps} />}
|
||||
{!isChromeless && !forceComposerHide && composerAutoHide.isHidden && <Box {...composerAutoHide.detectorProps} />}
|
||||
|
||||
{/* Diagrams */}
|
||||
{!!diagramConfig && (
|
||||
|
||||
@@ -7,18 +7,18 @@ import { Box, List } from '@mui/joy';
|
||||
import type { SystemPurposeExample } from '../../../data';
|
||||
|
||||
import type { DiagramConfig } from '~/modules/aifn/digrams/DiagramsModal';
|
||||
import { speakText } from '~/modules/speex/speex.client';
|
||||
|
||||
import type { ConversationHandler } from '~/common/chat-overlay/ConversationHandler';
|
||||
import type { DLLMContextTokens } from '~/common/stores/llms/llms.types';
|
||||
import { DConversationId, excludeSystemMessages } from '~/common/stores/chat/chat.conversation';
|
||||
import { ShortcutKey, useGlobalShortcuts } from '~/common/components/shortcuts/useGlobalShortcuts';
|
||||
import { clipboardInterceptCtrlCForCleanup } from '~/common/util/clipboardUtils';
|
||||
import { convertFilesToDAttachmentFragments } from '~/common/attachment-drafts/attachment.pipeline';
|
||||
import { createDMessageFromFragments, createDMessageTextContent, DMessage, DMessageId, DMessageUserFlag, DMetaReferenceItem, MESSAGE_FLAG_AIX_SKIP, messageHasUserFlag } from '~/common/stores/chat/chat.message';
|
||||
import { createDMessageFromFragments, createDMessageTextContent, DMessage, DMessageGenerator, DMessageId, DMessageUserFlag, DMetaReferenceItem, MESSAGE_FLAG_AIX_SKIP, messageHasUserFlag } from '~/common/stores/chat/chat.message';
|
||||
import { createTextContentFragment, DMessageFragment, DMessageFragmentId } from '~/common/stores/chat/chat.fragments';
|
||||
import { openFileForAttaching } from '~/common/components/ButtonAttachFiles';
|
||||
import { optimaOpenPreferences } from '~/common/layout/optima/useOptima';
|
||||
import { useBrowserTranslationWarning } from '~/common/components/useIsBrowserTranslating';
|
||||
import { useCapabilityElevenLabs } from '~/common/components/useCapabilities';
|
||||
import { useChatOverlayStore } from '~/common/chat-overlay/store-perchat_vanilla';
|
||||
import { useChatStore } from '~/common/stores/chat/store-chats';
|
||||
import { useScrollToBottom } from '~/common/scroll-to-bottom/useScrollToBottom';
|
||||
@@ -51,7 +51,6 @@ export function ChatMessageList(props: {
|
||||
onConversationNew: (forceNoRecycle: boolean, isIncognito: boolean) => void,
|
||||
onTextDiagram: (diagramConfig: DiagramConfig | null) => void,
|
||||
onTextImagine: (conversationId: DConversationId, selectedText: string) => Promise<void>,
|
||||
onTextSpeak: (selectedText: string) => Promise<void>,
|
||||
setIsMessageSelectionMode: (isMessageSelectionMode: boolean) => void,
|
||||
sx?: SxProps,
|
||||
}) {
|
||||
@@ -65,7 +64,6 @@ export function ChatMessageList(props: {
|
||||
const { notifyBooting } = useScrollToBottom();
|
||||
const danger_experimentalHtmlWebUi = useChatAutoSuggestHTMLUI();
|
||||
const [showSystemMessages] = useChatShowSystemMessages();
|
||||
const optionalTranslationWarning = useBrowserTranslationWarning();
|
||||
const { conversationMessages, historyTokenCount } = useChatStore(useShallow(({ conversations }) => {
|
||||
const conversation = conversations.find(conversation => conversation.id === props.conversationId);
|
||||
return {
|
||||
@@ -77,10 +75,9 @@ export function ChatMessageList(props: {
|
||||
_composerInReferenceToCount: state.inReferenceTo?.length ?? 0,
|
||||
ephemerals: state.ephemerals?.length ? state.ephemerals : null,
|
||||
})));
|
||||
const { mayWork: isSpeakable } = useCapabilityElevenLabs();
|
||||
|
||||
// derived state
|
||||
const { conversationHandler, conversationId, capabilityHasT2I, onConversationBranch, onConversationExecuteHistory, onTextDiagram, onTextImagine, onTextSpeak } = props;
|
||||
const { conversationHandler, conversationId, capabilityHasT2I, onConversationBranch, onConversationExecuteHistory, onTextDiagram, onTextImagine } = props;
|
||||
const composerCanAddInReferenceTo = _composerInReferenceToCount < 5;
|
||||
const composerHasInReferenceto = _composerInReferenceToCount > 0;
|
||||
|
||||
@@ -126,6 +123,61 @@ export function ChatMessageList(props: {
|
||||
}
|
||||
}, [conversationHandler, conversationId, onConversationExecuteHistory]);
|
||||
|
||||
const handleMessageUpstreamResume = React.useCallback(async (generator: DMessageGenerator, messageId: DMessageId) => {
|
||||
if (!conversationId || !conversationHandler) return;
|
||||
if (!generator.upstreamHandle) throw new Error('No upstream handle on generator');
|
||||
|
||||
// For AIX generators the DLLMId is at .aix.mId
|
||||
const llmId = generator.mgt === 'aix' ? generator.aix.mId : undefined;
|
||||
if (!llmId) throw new Error('No model id on generator');
|
||||
|
||||
const { aixCreateChatGenerateContext, aixReattachContent_DMessage_orThrow } = await import('~/modules/aix/client/aix.client');
|
||||
const result = await aixReattachContent_DMessage_orThrow(
|
||||
llmId,
|
||||
generator,
|
||||
aixCreateChatGenerateContext('conversation', conversationId),
|
||||
{ abortSignal: 'NON_ABORTABLE', throttleParallelThreads: 0 },
|
||||
async (update, isDone) => {
|
||||
conversationHandler.messageEdit(messageId, {
|
||||
fragments: update.fragments,
|
||||
generator: update.generator,
|
||||
pendingIncomplete: update.pendingIncomplete,
|
||||
}, isDone, isDone); // remove the pending state and updte only when done
|
||||
},
|
||||
);
|
||||
|
||||
// Manual reattach is one-shot: on failure (e.g. upstream 404 from expired or already-consumed handle),
|
||||
// drop the upstreamHandle so the Resume button doesn't keep luring the user into the same error.
|
||||
// On 'aborted' we keep it so the user can try again later; on 'completed' the reassembler already cleared it.
|
||||
// 2026-04-22: disabled; it was removing the connect button on a connection error (e.g. wifi drop)
|
||||
// if (result.outcome === 'failed' && result.generator?.upstreamHandle)
|
||||
// conversationHandler.messageEdit(messageId, {
|
||||
// generator: { ...result.generator, upstreamHandle: undefined },
|
||||
// }, false /* messageComplete */, true /* touch */);
|
||||
}, [conversationHandler, conversationId]);
|
||||
|
||||
const handleMessageUpstreamDelete = React.useCallback(async (generator: DMessageGenerator, messageId: DMessageId) => {
|
||||
if (!conversationId || !conversationHandler) return;
|
||||
if (!generator.upstreamHandle) throw new Error('No upstream handle on generator');
|
||||
|
||||
// For AIX generators the DLLMId is at .aix.mId
|
||||
const llmId = generator.mgt === 'aix' ? generator.aix.mId : undefined;
|
||||
if (!llmId) throw new Error('No model id on generator');
|
||||
|
||||
const { aixDeleteUpstreamContent_orThrow } = await import('~/modules/aix/client/aix.client');
|
||||
const result = await aixDeleteUpstreamContent_orThrow(llmId, generator);
|
||||
|
||||
// On success (or 404 already-gone), clear the handle locally so the buttons disappear
|
||||
if (result.ok) {
|
||||
conversationHandler.messageEdit(messageId, {
|
||||
generator: { ...generator, upstreamHandle: undefined },
|
||||
}, false /* messageComplete */, true /* touch */);
|
||||
return;
|
||||
}
|
||||
// On failure: surface to the button's error UI
|
||||
throw new Error(result.message || `Delete failed${result.httpStatus ? ` (HTTP ${result.httpStatus})` : ''}`);
|
||||
}, [conversationHandler, conversationId]);
|
||||
|
||||
|
||||
// message menu methods proxy
|
||||
|
||||
@@ -214,12 +266,15 @@ export function ChatMessageList(props: {
|
||||
}, [capabilityHasT2I, conversationId, onTextImagine]);
|
||||
|
||||
const handleTextSpeak = React.useCallback(async (text: string) => {
|
||||
if (!isSpeakable)
|
||||
return optimaOpenPreferences('voice');
|
||||
// sandwich the speaking with the indicator
|
||||
setIsSpeaking(true);
|
||||
await onTextSpeak(text);
|
||||
const result = await speakText(text, undefined, { label: 'Chat speak' });
|
||||
setIsSpeaking(false);
|
||||
}, [isSpeakable, onTextSpeak]);
|
||||
|
||||
// open voice preferences
|
||||
if (!result.success && (result.errorType === 'tts-no-engine' || result.errorType === 'tts-unconfigured'))
|
||||
optimaOpenPreferences('voice');
|
||||
}, []);
|
||||
|
||||
|
||||
// operate on the local selection set
|
||||
@@ -324,9 +379,7 @@ export function ChatMessageList(props: {
|
||||
);
|
||||
|
||||
return (
|
||||
<List role='chat-messages-list' sx={listSx}>
|
||||
|
||||
{optionalTranslationWarning}
|
||||
<List role='chat-messages-list' sx={listSx} onCopy={clipboardInterceptCtrlCForCleanup}>
|
||||
|
||||
{props.isMessageSelectionMode && (
|
||||
<MessagesSelectionHeader
|
||||
@@ -373,6 +426,8 @@ export function ChatMessageList(props: {
|
||||
onMessageBeam={handleMessageBeam}
|
||||
onMessageBranch={handleMessageBranch}
|
||||
onMessageContinue={handleMessageContinue}
|
||||
onMessageUpstreamResume={handleMessageUpstreamResume}
|
||||
onMessageUpstreamDelete={handleMessageUpstreamDelete}
|
||||
onMessageDelete={handleMessageDelete}
|
||||
onMessageFragmentAppend={handleMessageAppendFragment}
|
||||
onMessageFragmentDelete={handleMessageDeleteFragment}
|
||||
@@ -381,7 +436,7 @@ export function ChatMessageList(props: {
|
||||
onMessageTruncate={handleMessageTruncate}
|
||||
onTextDiagram={handleTextDiagram}
|
||||
onTextImagine={capabilityHasT2I ? handleTextImagine : undefined}
|
||||
onTextSpeak={isSpeakable ? handleTextSpeak : undefined}
|
||||
onTextSpeak={handleTextSpeak}
|
||||
/>
|
||||
|
||||
);
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
import * as React from 'react';
|
||||
import { useShallow } from 'zustand/react/shallow';
|
||||
import type { FileWithHandle } from 'browser-fs-access';
|
||||
|
||||
import { Box, Button, ButtonGroup, Card, Dropdown, Grid, IconButton, Menu, MenuButton, MenuItem, Textarea, Typography } from '@mui/joy';
|
||||
import { ColorPaletteProp, SxProps, VariantProp } from '@mui/joy/styles/types';
|
||||
import AddCircleOutlineIcon from '@mui/icons-material/AddCircleOutline';
|
||||
import type { ColorPaletteProp, SxProps, VariantProp } from '@mui/joy/styles/types';
|
||||
import { Box, Button, ButtonGroup, Card, Grid, IconButton, Textarea, Typography } from '@mui/joy';
|
||||
import ExpandLessIcon from '@mui/icons-material/ExpandLess';
|
||||
import PsychologyIcon from '@mui/icons-material/Psychology';
|
||||
import SendIcon from '@mui/icons-material/Send';
|
||||
@@ -18,6 +16,7 @@ import { useAgiAttachmentPrompts } from '~/modules/aifn/agiattachmentprompts/use
|
||||
import { useBrowseCapability } from '~/modules/browse/store-module-browsing';
|
||||
|
||||
import { DLLM, getLLMContextTokens, LLM_IF_OAI_Vision } from '~/common/stores/llms/llms.types';
|
||||
import { llmChatPricing_adjusted } from '~/common/stores/llms/llms.pricing';
|
||||
import { AudioGenerator } from '~/common/util/audio/AudioGenerator';
|
||||
import { AudioPlayer } from '~/common/util/audio/AudioPlayer';
|
||||
import { ButtonAttachFilesMemo, openFileForAttaching } from '~/common/components/ButtonAttachFiles';
|
||||
@@ -25,6 +24,7 @@ import { ChatBeamIcon } from '~/common/components/icons/ChatBeamIcon';
|
||||
import { ConfirmationModal } from '~/common/components/modals/ConfirmationModal';
|
||||
import { ConversationsManager } from '~/common/chat-overlay/ConversationsManager';
|
||||
import { DMessageId, DMessageMetadata, DMetaReferenceItem, messageFragmentsReduceText } from '~/common/stores/chat/chat.message';
|
||||
import { PhPaintBrush } from '~/common/components/icons/phosphor/PhPaintBrush';
|
||||
import { ShortcutKey, ShortcutObject, useGlobalShortcuts } from '~/common/components/shortcuts/useGlobalShortcuts';
|
||||
import { addSnackbar } from '~/common/components/snackbar/useSnackbarsStore';
|
||||
import { animationEnterBelow } from '~/common/util/animUtils';
|
||||
@@ -34,12 +34,13 @@ import { copyToClipboard, supportsClipboardRead } from '~/common/util/clipboardU
|
||||
import { createTextContentFragment, DMessageAttachmentFragment, DMessageContentFragment, duplicateDMessageFragments } from '~/common/stores/chat/chat.fragments';
|
||||
import { glueForMessageTokens, marshallWrapDocFragments } from '~/common/stores/chat/chat.tokens';
|
||||
import { isValidConversation, useChatStore } from '~/common/stores/chat/store-chats';
|
||||
import { getModelParameterValueOrThrow } from '~/common/stores/llms/llms.parameters';
|
||||
import { getModelParameterValueWithFallback } from '~/common/stores/llms/llms.parameters';
|
||||
import { launchAppCall, removeQueryParam, useRouterQuery } from '~/common/app.routes';
|
||||
import { lineHeightTextareaMd, themeBgAppChatComposer } from '~/common/app.theme';
|
||||
import { optimaOpenPreferences } from '~/common/layout/optima/useOptima';
|
||||
import { platformAwareKeystrokes } from '~/common/components/KeyStroke';
|
||||
import { supportsCameraCapture } from '~/common/components/camera/useCameraCapture';
|
||||
import { supportsScreenCapture } from '~/common/util/screenCaptureUtils';
|
||||
import { useAttachHandler_CameraOpen, useAttachHandler_Files, useAttachHandler_PasteIntercept, useAttachHandler_ScreenCapture, useAttachHandler_UrlWebLinks } from '~/common/attachment-drafts/attachment-sources/useAttachmentSourceHandlers';
|
||||
import { useChatComposerOverlayStore } from '~/common/chat-overlay/store-perchat_vanilla';
|
||||
import { useComposerStartupText, useLogicSherpaStore } from '~/common/logic/store-logic-sherpa';
|
||||
import { useOverlayComponents } from '~/common/layout/overlays/useOverlayComponents';
|
||||
@@ -52,19 +53,15 @@ import { providerCommands } from './actile/providerCommands';
|
||||
import { providerStarredMessages, StarredMessageItem } from './actile/providerStarredMessage';
|
||||
import { useActileManager } from './actile/useActileManager';
|
||||
|
||||
import type { AttachmentDraftId } from '~/common/attachment-drafts/attachment.types';
|
||||
import { LLMAttachmentDraftsAction, LLMAttachmentsList } from './llmattachments/LLMAttachmentsList';
|
||||
import { PhPaintBrush } from '~/common/components/icons/phosphor/PhPaintBrush';
|
||||
import type { AttachmentDraftId, AttachmentDraftsAction } from '~/common/attachment-drafts/attachment.types';
|
||||
import { AttachmentSourcesMemo } from '~/common/attachment-drafts/attachment-sources/AttachmentSources';
|
||||
import { useAttachmentDrafts } from '~/common/attachment-drafts/useAttachmentDrafts';
|
||||
import { useLLMAttachmentDrafts } from './llmattachments/useLLMAttachmentDrafts';
|
||||
import { useAttachmentDraftsEnrichment } from '~/common/attachment-drafts/llm-enrichment/useAttachmentDraftsEnrichment';
|
||||
import { useGoogleDrivePicker } from '~/common/attachment-drafts/attachment-sources/useGoogleDrivePicker';
|
||||
|
||||
import type { ChatExecuteMode } from '../../execute-mode/execute-mode.types';
|
||||
import { chatExecuteModeCanAttach, useChatExecuteMode } from '../../execute-mode/useChatExecuteMode';
|
||||
|
||||
import { ButtonAttachCameraMemo, useCameraCaptureModalDialog } from './buttons/ButtonAttachCamera';
|
||||
import { ButtonAttachClipboardMemo } from './buttons/ButtonAttachClipboard';
|
||||
import { ButtonAttachScreenCaptureMemo } from './buttons/ButtonAttachScreenCapture';
|
||||
import { ButtonAttachWebMemo } from './buttons/ButtonAttachWeb';
|
||||
import { ButtonBeamMemo } from './buttons/ButtonBeam';
|
||||
import { ButtonCallMemo } from './buttons/ButtonCall';
|
||||
import { ButtonGroupDrawRepeat } from './buttons/ButtonGroupDrawRepeat';
|
||||
@@ -72,6 +69,7 @@ import { ButtonMicContinuationMemo } from './buttons/ButtonMicContinuation';
|
||||
import { ButtonMicMemo } from './buttons/ButtonMic';
|
||||
import { ButtonMultiChatMemo } from './buttons/ButtonMultiChat';
|
||||
import { ButtonOptionsDraw } from './buttons/ButtonOptionsDraw';
|
||||
import { ComposerAttachmentDraftsList } from './llmattachments/ComposerAttachmentDraftsList';
|
||||
import { ComposerTextAreaActions } from './textarea/ComposerTextAreaActions';
|
||||
import { ComposerTextAreaDrawActions } from './textarea/ComposerTextAreaDrawActions';
|
||||
import { StatusBarMemo } from '../StatusBar';
|
||||
@@ -79,7 +77,6 @@ import { TokenBadgeMemo } from './tokens/TokenBadge';
|
||||
import { TokenProgressbarMemo } from './tokens/TokenProgressbar';
|
||||
import { useComposerDragDrop } from './useComposerDragDrop';
|
||||
import { useTextTokenCount } from './tokens/useTextTokenCounter';
|
||||
import { useWebInputModal } from './WebInputModal';
|
||||
|
||||
|
||||
// configuration
|
||||
@@ -136,16 +133,13 @@ export function Composer(props: {
|
||||
// external state
|
||||
const { showPromisedOverlay } = useOverlayComponents();
|
||||
const { newChat: appChatNewChatIntent } = useRouterQuery<Partial<AppChatIntent>>();
|
||||
const { labsAttachScreenCapture, labsCameraDesktop, labsShowCost, labsShowShortcutBar } = useUXLabsStore(useShallow(state => ({
|
||||
labsAttachScreenCapture: state.labsAttachScreenCapture,
|
||||
labsCameraDesktop: state.labsCameraDesktop,
|
||||
labsShowCost: state.labsShowCost,
|
||||
const { labsComposerAttachmentsInline, labsShowShortcutBar } = useUXLabsStore(useShallow(state => ({
|
||||
labsComposerAttachmentsInline: state.labsComposerAttachmentsInline,
|
||||
labsShowShortcutBar: state.labsShowShortcutBar,
|
||||
})));
|
||||
const timeToShowTips = useLogicSherpaStore(state => state.usageCount >= SHOW_TIPS_AFTER_RELOADS);
|
||||
const { novel: explainShiftEnter, touch: touchShiftEnter } = useUICounter('composer-shift-enter');
|
||||
const { novel: explainAltEnter, touch: touchAltEnter } = useUICounter('composer-alt-enter');
|
||||
const { novel: explainCtrlEnter, touch: touchCtrlEnter } = useUICounter('composer-ctrl-enter');
|
||||
|
||||
const [startupText, setStartupText] = useComposerStartupText();
|
||||
const enterIsNewline = useUIPreferencesStore(state => state.enterIsNewline);
|
||||
const composerQuickButton = useUIPreferencesStore(state => state.composerQuickButton);
|
||||
@@ -174,8 +168,8 @@ export function Composer(props: {
|
||||
const chatLLMSupportsImages = !!props.chatLLM?.interfaces?.includes(LLM_IF_OAI_Vision);
|
||||
|
||||
// don't load URLs if the user is typing a command or there's no capability
|
||||
const hasComposerBrowseCapability = useBrowseCapability().inComposer;
|
||||
const enableLoadURLsInComposer = hasComposerBrowseCapability && !composeText.startsWith('/');
|
||||
const browseCapability = useBrowseCapability();
|
||||
const enableLoadURLsInComposer = browseCapability.inComposer && !composeText.startsWith('/');
|
||||
|
||||
// user message for attachments
|
||||
const { onConversationBeamEdit, onConversationsImportFromFiles } = props;
|
||||
@@ -197,12 +191,12 @@ export function Composer(props: {
|
||||
const showChatAttachments = chatExecuteModeCanAttach(chatExecuteMode, props.capabilityHasT2IEdit);
|
||||
const {
|
||||
/* items */ attachmentDrafts,
|
||||
/* append */ attachAppendClipboardItems, attachAppendDataTransfer, attachAppendEgoFragments, attachAppendFile, attachAppendUrl,
|
||||
/* append */ attachAppendClipboardItems, attachAppendCloudFile, attachAppendDataTransfer, attachAppendEgoFragments, attachAppendFile, attachAppendUrl,
|
||||
/* take */ attachmentsRemoveAll, attachmentsTakeAllFragments, attachmentsTakeFragmentsByType,
|
||||
} = useAttachmentDrafts(conversationOverlayStore, enableLoadURLsInComposer, chatLLMSupportsImages, handleFilterAGIFile, showChatAttachments === 'only-images');
|
||||
|
||||
// attachments derived state
|
||||
const llmAttachmentDraftsCollection = useLLMAttachmentDrafts(attachmentDrafts, props.chatLLM, chatLLMSupportsImages);
|
||||
const { enrichment: attEnrichment, summary: attEnrichSummary } = useAttachmentDraftsEnrichment(attachmentDrafts, props.chatLLM, chatLLMSupportsImages);
|
||||
|
||||
// drag/drop
|
||||
const { dragContainerSx, dropComponent, handleContainerDragEnter, handleContainerDragStart } = useComposerDragDrop(!props.isMobile, attachAppendDataTransfer);
|
||||
@@ -227,13 +221,13 @@ export function Composer(props: {
|
||||
// tokens derived state
|
||||
|
||||
const tokensComposerTextDebounced = useTextTokenCount(composeText, props.chatLLM, 800, 1600);
|
||||
let tokensComposer = (tokensComposerTextDebounced ?? 0) + (llmAttachmentDraftsCollection.llmTokenCountApprox || 0);
|
||||
let tokensComposer = (tokensComposerTextDebounced ?? 0) + (attEnrichSummary.totalTokensApprox || 0);
|
||||
if (props.chatLLM && tokensComposer > 0)
|
||||
tokensComposer += glueForMessageTokens(props.chatLLM);
|
||||
const tokensHistory = _historyTokenCount;
|
||||
const tokensResponseMax = getModelParameterValueOrThrow('llmResponseTokens', props.chatLLM?.initialParameters, props.chatLLM?.userParameters, 0) ?? 0;
|
||||
const tokensResponseMax = getModelParameterValueWithFallback('llmResponseTokens', props.chatLLM?.initialParameters, props.chatLLM?.userParameters, 0) ?? 0 /* if null, assume 0*/;
|
||||
const tokenLimit = getLLMContextTokens(props.chatLLM) ?? 0;
|
||||
const tokenChatPricing = props.chatLLM?.pricing?.chat;
|
||||
const tokenChatPricing = React.useMemo(() => llmChatPricing_adjusted(props.chatLLM), [props.chatLLM]);
|
||||
|
||||
|
||||
// Effect: load initial text if queued up (e.g. by /link/share_targetF)
|
||||
@@ -271,7 +265,7 @@ export function Composer(props: {
|
||||
// Confirmation Modals
|
||||
|
||||
const confirmProceedIfAttachmentsNotSupported = React.useCallback(async (): Promise<boolean> => {
|
||||
if (llmAttachmentDraftsCollection.canAttachAllFragments) return true;
|
||||
if (attEnrichSummary.allCompatible) return true;
|
||||
return await showPromisedOverlay('composer-unsupported-attachments', { rejectWithValue: false }, ({ onResolve, onUserReject }) => (
|
||||
<ConfirmationModal
|
||||
open
|
||||
@@ -283,7 +277,7 @@ export function Composer(props: {
|
||||
title='Attachment Compatibility Notice'
|
||||
/>
|
||||
));
|
||||
}, [llmAttachmentDraftsCollection.canAttachAllFragments, showPromisedOverlay]);
|
||||
}, [attEnrichSummary.allCompatible, showPromisedOverlay]);
|
||||
|
||||
|
||||
// Primary button
|
||||
@@ -545,20 +539,21 @@ export function Composer(props: {
|
||||
|
||||
// Enter: primary action
|
||||
if (e.key === 'Enter') {
|
||||
// Skip if composing (e.g., CJK input methods) - issue #784
|
||||
if (e.nativeEvent.isComposing)
|
||||
return;
|
||||
|
||||
// Alt (Windows) or Option (Mac) + Enter: append the message instead of sending it
|
||||
if (e.altKey && !e.metaKey && !e.ctrlKey) {
|
||||
if (await handleSendAction('append-user', composeText)) // 'alt+enter' -> write
|
||||
touchAltEnter();
|
||||
e.stopPropagation();
|
||||
return e.preventDefault();
|
||||
}
|
||||
|
||||
// Ctrl (Windows) or Command (Mac) + Enter: send for beaming
|
||||
if (e.ctrlKey && !e.metaKey && !e.altKey) {
|
||||
if (await handleSendAction('beam-content', composeText)) { // 'ctrl+enter' -> beam
|
||||
touchCtrlEnter();
|
||||
if (await handleSendAction('beam-content', composeText)) // 'ctrl+enter' -> beam
|
||||
e.stopPropagation();
|
||||
}
|
||||
return e.preventDefault();
|
||||
}
|
||||
|
||||
@@ -572,7 +567,7 @@ export function Composer(props: {
|
||||
}
|
||||
}
|
||||
|
||||
}, [actileInterceptKeydown, assistantAbortible, chatExecuteMode, composeText, enterIsNewline, handleSendAction, touchAltEnter, touchCtrlEnter, touchShiftEnter]);
|
||||
}, [actileInterceptKeydown, assistantAbortible, chatExecuteMode, composeText, enterIsNewline, handleSendAction, touchShiftEnter]);
|
||||
|
||||
|
||||
// Focus mode
|
||||
@@ -589,41 +584,19 @@ export function Composer(props: {
|
||||
const handleToggleMinimized = React.useCallback(() => setIsMinimized(hide => !hide), []);
|
||||
|
||||
|
||||
// Attachment Up
|
||||
// Attachments Up
|
||||
|
||||
const handleAttachCtrlV = React.useCallback(async (event: React.ClipboardEvent) => {
|
||||
if (await attachAppendDataTransfer(event.clipboardData, 'paste', false) === 'as_files')
|
||||
event.preventDefault();
|
||||
}, [attachAppendDataTransfer]);
|
||||
|
||||
const handleAttachCameraImage = React.useCallback((file: FileWithHandle) => {
|
||||
void attachAppendFile('camera', file);
|
||||
}, [attachAppendFile]);
|
||||
|
||||
const { openCamera, cameraCaptureComponent } = useCameraCaptureModalDialog(handleAttachCameraImage);
|
||||
|
||||
const handleAttachScreenCapture = React.useCallback((file: File) => {
|
||||
void attachAppendFile('screencapture', file);
|
||||
}, [attachAppendFile]);
|
||||
|
||||
const handleAttachFiles = React.useCallback(async (files: FileWithHandle[], errorMessage: string | null) => {
|
||||
if (errorMessage)
|
||||
addSnackbar({ key: 'attach-files-open-fail', message: `Unable to open files: ${errorMessage}`, type: 'issue' });
|
||||
for (let file of files)
|
||||
await attachAppendFile('file-open', file)
|
||||
.catch((error: any) => addSnackbar({ key: 'attach-file-open-fail', message: `Unable to attach the file "${file.name}" (${error?.message || error?.toString() || 'unknown error'})`, type: 'issue' }));
|
||||
}, [attachAppendFile]);
|
||||
|
||||
const handleAttachWebLinks = React.useCallback(async (links: { url: string }[]) => {
|
||||
links.forEach(link => void attachAppendUrl('input-link', link.url));
|
||||
}, [attachAppendUrl]);
|
||||
|
||||
const { openWebInputDialog, webInputDialogComponent } = useWebInputModal(handleAttachWebLinks, composeText);
|
||||
const handleAttachCtrlV = useAttachHandler_PasteIntercept(attachAppendDataTransfer);
|
||||
const handleAttachFiles = useAttachHandler_Files(attachAppendFile);
|
||||
const handleOpenCamera = useAttachHandler_CameraOpen(attachAppendFile);
|
||||
const handleAttachScreenCapture = useAttachHandler_ScreenCapture(attachAppendFile);
|
||||
const { openWebInputDialog, webInputDialogComponent } = useAttachHandler_UrlWebLinks(attachAppendUrl, composeText);
|
||||
const { openGoogleDrivePicker, googleDrivePickerComponent } = useGoogleDrivePicker(attachAppendCloudFile, isMobile);
|
||||
|
||||
|
||||
// Attachments Down
|
||||
|
||||
const handleAttachmentDraftsAction = React.useCallback((attachmentDraftIdOrAll: AttachmentDraftId | null, action: LLMAttachmentDraftsAction) => {
|
||||
const handleAttachmentDraftsAction = React.useCallback((attachmentDraftIdOrAll: AttachmentDraftId | null, action: AttachmentDraftsAction) => {
|
||||
switch (action) {
|
||||
case 'copy-text':
|
||||
const copyFragments = attachmentsTakeFragmentsByType('doc', attachmentDraftIdOrAll, false);
|
||||
@@ -652,7 +625,7 @@ export function Composer(props: {
|
||||
if (supportsClipboardRead())
|
||||
composerShortcuts.push({ key: 'v', ctrl: true, shift: true, action: attachAppendClipboardItems, description: 'Attach Clipboard' });
|
||||
// Future: keep reactive state here to support Live Screen Capture and more
|
||||
// if (labsAttachScreenCapture && supportsScreenCapture)
|
||||
// if (supportsScreenCapture)
|
||||
// composerShortcuts.push({ key: 's', ctrl: true, shift: true, action: openScreenCaptureDialog, description: 'Attach Screen Capture' });
|
||||
}
|
||||
if (recognitionState.isActive) {
|
||||
@@ -685,12 +658,13 @@ export function Composer(props: {
|
||||
|
||||
const showChatInReferenceTo = !!inReferenceTo?.length;
|
||||
const showChatExtras = isText && !showChatInReferenceTo && !assistantAbortible && composerQuickButton !== 'off';
|
||||
const speechMayWork = browserSpeechRecognitionCapability().mayWork;
|
||||
|
||||
const sendButtonVariant: VariantProp = (isAppend || (isMobile && isTextBeam)) ? 'outlined' : 'solid';
|
||||
|
||||
const sendButtonColor: ColorPaletteProp =
|
||||
assistantAbortible ? 'warning'
|
||||
: !llmAttachmentDraftsCollection.canAttachAllFragments ? 'warning'
|
||||
: !attEnrichSummary.allCompatible ? 'warning'
|
||||
: chatExecuteModeSendColor;
|
||||
|
||||
const sendButtonLabel = chatExecuteModeSendLabel;
|
||||
@@ -704,7 +678,7 @@ export function Composer(props: {
|
||||
: <TelegramIcon />;
|
||||
|
||||
const beamButtonColor: ColorPaletteProp | undefined =
|
||||
!llmAttachmentDraftsCollection.canAttachAllFragments ? 'warning'
|
||||
!attEnrichSummary.allCompatible ? 'warning'
|
||||
: undefined;
|
||||
|
||||
const showTint: ColorPaletteProp | undefined = isDraw ? 'warning' : isReAct ? 'success' : undefined;
|
||||
@@ -731,10 +705,6 @@ export function Composer(props: {
|
||||
if (isDesktop && timeToShowTips && !isDraw) {
|
||||
if (explainShiftEnter)
|
||||
textPlaceholder += !enterIsNewline ? '\n\n⏎ Shift + Enter to add a new line' : '\n\n➤ Shift + Enter to send';
|
||||
// else if (explainAltEnter)
|
||||
// textPlaceholder += platformAwareKeystrokes('\n\n⭳ Tip: Alt + Enter to just append the message');
|
||||
else if (explainCtrlEnter)
|
||||
textPlaceholder += platformAwareKeystrokes('\n\n⫷ Tip: Ctrl + Enter to beam');
|
||||
}
|
||||
|
||||
const stableGridSx: SxProps = React.useMemo(() => ({
|
||||
@@ -775,37 +745,24 @@ export function Composer(props: {
|
||||
{/* [mobile] Mic button */}
|
||||
{recognitionState.isAvailable && <ButtonMicMemo variant={micVariant} color={micColor === 'danger' ? 'danger' : showTint || micColor} errorMessage={recognitionState.errorMessage} onClick={handleToggleMic} />}
|
||||
|
||||
{/* Responsive Camera OCR button */}
|
||||
{showChatAttachments && <ButtonAttachCameraMemo color={showTint} isMobile onOpenCamera={openCamera} />}
|
||||
|
||||
{/* [mobile] Attach file button (in draw with image mode) */}
|
||||
{showChatAttachments === 'only-images' && <ButtonAttachFilesMemo color={showTint} isMobile onAttachFiles={handleAttachFiles} fullWidth multiple />}
|
||||
{showChatAttachments === 'only-images' && <ButtonAttachFilesMemo color={showTint} isMobile onAttachFiles={handleAttachFiles} multiple />}
|
||||
|
||||
{/* [mobile] [+] button */}
|
||||
{/* [mobile] [+] attachment sources menu */}
|
||||
{showChatAttachments === true && (
|
||||
<Dropdown>
|
||||
<MenuButton slots={{ root: IconButton }}>
|
||||
<AddCircleOutlineIcon />
|
||||
</MenuButton>
|
||||
<Menu>
|
||||
|
||||
{/* Responsive Open Files button */}
|
||||
<MenuItem>
|
||||
<ButtonAttachFilesMemo onAttachFiles={handleAttachFiles} fullWidth multiple />
|
||||
</MenuItem>
|
||||
|
||||
{/* Responsive Web button */}
|
||||
<MenuItem>
|
||||
<ButtonAttachWebMemo disabled={!hasComposerBrowseCapability} onOpenWebInput={openWebInputDialog} />
|
||||
</MenuItem>
|
||||
|
||||
{/* Responsive Paste button */}
|
||||
{supportsClipboardRead() && <MenuItem>
|
||||
<ButtonAttachClipboardMemo onAttachClipboard={attachAppendClipboardItems} />
|
||||
</MenuItem>}
|
||||
|
||||
</Menu>
|
||||
</Dropdown>
|
||||
<AttachmentSourcesMemo
|
||||
mode='menu-compact'
|
||||
canBrowse={browseCapability.mayWork}
|
||||
hasScreenCapture={supportsScreenCapture}
|
||||
hasCamera={supportsCameraCapture()}
|
||||
onlyImages={false /* because if yes, we only show the attach files above */}
|
||||
onAttachClipboard={attachAppendClipboardItems}
|
||||
onAttachFiles={handleAttachFiles}
|
||||
onAttachScreenCapture={handleAttachScreenCapture}
|
||||
onOpenCamera={handleOpenCamera}
|
||||
onOpenGoogleDrivePicker={openGoogleDrivePicker}
|
||||
onOpenWebInput={openWebInputDialog}
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* [Mobile] MultiChat button */}
|
||||
@@ -816,28 +773,27 @@ export function Composer(props: {
|
||||
|
||||
{/* [Desktop, Col1] Insert Multi-modal content buttons */}
|
||||
{isDesktop && showChatAttachments && (
|
||||
<Box sx={{ flexGrow: 0, display: 'grid', gap: (labsAttachScreenCapture && labsCameraDesktop) ? 0.5 : 1, alignSelf: 'flex-start' }}>
|
||||
<Box sx={{ flexGrow: 0, display: 'grid', gap: 0.5, alignSelf: 'flex-start' }}>
|
||||
|
||||
{/*<FormHelperText sx={{ mx: 'auto' }}>*/}
|
||||
{/* Attach*/}
|
||||
{/*</FormHelperText>*/}
|
||||
{/* [desktop] Attachment Sources: dropdown menu or inline buttons */}
|
||||
<AttachmentSourcesMemo
|
||||
mode={!labsComposerAttachmentsInline ? 'menu-rich' : 'inline-buttons'}
|
||||
color={!labsComposerAttachmentsInline ? (showTint || 'neutral') : showTint}
|
||||
richButtonStandOut={!isText && !isAppend}
|
||||
canBrowse={browseCapability.mayWork}
|
||||
hasScreenCapture={supportsScreenCapture}
|
||||
hasCamera={supportsCameraCapture()}
|
||||
onlyImages={showChatAttachments === 'only-images'}
|
||||
onAttachClipboard={attachAppendClipboardItems}
|
||||
onAttachFiles={handleAttachFiles}
|
||||
onAttachScreenCapture={handleAttachScreenCapture}
|
||||
onOpenCamera={handleOpenCamera}
|
||||
onOpenGoogleDrivePicker={openGoogleDrivePicker}
|
||||
onOpenWebInput={openWebInputDialog}
|
||||
/>
|
||||
|
||||
{/* Responsive Open Files button */}
|
||||
<ButtonAttachFilesMemo color={showTint} onAttachFiles={handleAttachFiles} fullWidth multiple />
|
||||
|
||||
{/* Responsive Web button */}
|
||||
{showChatAttachments !== 'only-images' && <ButtonAttachWebMemo color={showTint} disabled={!hasComposerBrowseCapability} onOpenWebInput={openWebInputDialog} />}
|
||||
|
||||
{/* Responsive Paste button */}
|
||||
{supportsClipboardRead() && showChatAttachments !== 'only-images' && <ButtonAttachClipboardMemo color={showTint} onAttachClipboard={attachAppendClipboardItems} />}
|
||||
|
||||
{/* Responsive Screen Capture button */}
|
||||
{labsAttachScreenCapture && supportsScreenCapture && <ButtonAttachScreenCaptureMemo color={showTint} onAttachScreenCapture={handleAttachScreenCapture} />}
|
||||
|
||||
{/* Responsive Camera OCR button */}
|
||||
{labsCameraDesktop && <ButtonAttachCameraMemo color={showTint} onOpenCamera={openCamera} />}
|
||||
|
||||
</Box>)}
|
||||
</Box>
|
||||
)}
|
||||
|
||||
|
||||
{/* Top: Textarea & Mic & Overlays, Bottom, Attachment Drafts */}
|
||||
@@ -859,7 +815,7 @@ export function Composer(props: {
|
||||
<Textarea
|
||||
variant='outlined'
|
||||
color={isDraw ? 'warning' : isReAct ? 'success' : undefined}
|
||||
autoFocus
|
||||
autoFocus={isDesktop}
|
||||
minRows={isMobile ? 3.5 : isDraw ? 4 : agiAttachmentPrompts.hasData ? 3 : showChatInReferenceTo ? 4 : 5}
|
||||
maxRows={isMobile ? 8 : 10}
|
||||
placeholder={textPlaceholder}
|
||||
@@ -905,7 +861,7 @@ export function Composer(props: {
|
||||
)}
|
||||
|
||||
{!showChatInReferenceTo && !isDraw && tokenLimit > 0 && (
|
||||
<TokenBadgeMemo hideBelowDollars={0.0001} chatPricing={tokenChatPricing} direct={tokensComposer} history={tokensHistory} responseMax={tokensResponseMax} limit={tokenLimit} showCost={labsShowCost} enableHover={!isMobile} showExcess absoluteBottomRight />
|
||||
<TokenBadgeMemo showCost hideBelowDollars={0.01} chatPricing={tokenChatPricing} direct={tokensComposer} history={tokensHistory} responseMax={tokensResponseMax} limit={tokenLimit} enableHover={!isMobile} showExcess absoluteBottomRight />
|
||||
)}
|
||||
|
||||
</Box>
|
||||
@@ -984,11 +940,12 @@ export function Composer(props: {
|
||||
|
||||
{/* Render any Attachments & menu items */}
|
||||
{!!conversationOverlayStore && showChatAttachments && (
|
||||
<LLMAttachmentsList
|
||||
agiAttachmentPrompts={agiAttachmentPrompts}
|
||||
<ComposerAttachmentDraftsList
|
||||
attachmentDraftsStoreApi={conversationOverlayStore}
|
||||
canInlineSomeFragments={llmAttachmentDraftsCollection.canInlineSomeFragments}
|
||||
llmAttachmentDrafts={llmAttachmentDraftsCollection.llmAttachmentDrafts}
|
||||
attachmentDrafts={attachmentDrafts}
|
||||
enrichment={attEnrichment}
|
||||
enrichmentSummary={attEnrichSummary}
|
||||
agiAttachmentPrompts={agiAttachmentPrompts}
|
||||
onAttachmentDraftsAction={handleAttachmentDraftsAction}
|
||||
/>
|
||||
)}
|
||||
@@ -1008,7 +965,7 @@ export function Composer(props: {
|
||||
|
||||
{/* [mobile] bottom-corner secondary button */}
|
||||
{isMobile && (showChatExtras
|
||||
? (composerQuickButton === 'call'
|
||||
? (composerQuickButton === 'call' && speechMayWork
|
||||
? <ButtonCallMemo isMobile disabled={noConversation || noLLM} onClick={handleCallClicked} />
|
||||
: <ButtonBeamMemo isMobile disabled={noConversation /*|| noLLM*/} color={beamButtonColor} hasContent={!!composeText} onClick={handleSendTextBeamClicked} />)
|
||||
: isDraw
|
||||
@@ -1099,8 +1056,8 @@ export function Composer(props: {
|
||||
{/* [desktop] secondary bottom-buttons (aligned to bottom for now, and mutually exclusive) */}
|
||||
{isDesktop && <Box sx={{ mt: 'auto', display: 'grid', gap: 1 }}>
|
||||
|
||||
{/* [desktop] Call secondary button */}
|
||||
{showChatExtras && <ButtonCallMemo disabled={noConversation || noLLM || assistantAbortible} onClick={handleCallClicked} />}
|
||||
{/* [desktop] Call secondary button - hidden when speech recognition is not available */}
|
||||
{showChatExtras && speechMayWork && <ButtonCallMemo disabled={noConversation || noLLM || assistantAbortible} onClick={handleCallClicked} />}
|
||||
|
||||
{/* [desktop] Draw Options secondary button */}
|
||||
{isDraw && <ButtonOptionsDraw onClick={handleDrawOptionsClicked} />}
|
||||
@@ -1120,8 +1077,8 @@ export function Composer(props: {
|
||||
{/* Execution Mode Menu */}
|
||||
{chatExecuteMenuComponent}
|
||||
|
||||
{/* Camera (when open) */}
|
||||
{cameraCaptureComponent}
|
||||
{/* Google Drive Picker (when open) */}
|
||||
{googleDrivePickerComponent}
|
||||
|
||||
{/* Web Input Dialog (when open) */}
|
||||
{webInputDialogComponent}
|
||||
|
||||
@@ -0,0 +1,76 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { CircularProgress, ListDivider, ListItemDecorator, MenuItem } from '@mui/joy';
|
||||
import AutoFixHighIcon from '@mui/icons-material/AutoFixHigh';
|
||||
|
||||
import type { AgiAttachmentPromptsData } from '~/modules/aifn/agiattachmentprompts/useAgiAttachmentPrompts';
|
||||
|
||||
import type { AttachmentDraft, AttachmentDraftId, AttachmentDraftsAction } from '~/common/attachment-drafts/attachment.types';
|
||||
import type { AttachmentDraftsStoreApi } from '~/common/attachment-drafts/store-attachment-drafts_slice';
|
||||
import type { AttachmentEnrichmentSummary, IAttachmentEnrichment } from '~/common/attachment-drafts/llm-enrichment/attachment.enrichment';
|
||||
import { AttachmentDraftsList } from '~/common/attachment-drafts/attachment-drafts-ui/AttachmentDraftsList';
|
||||
|
||||
import { LLMAttachmentsPromptsButtonMemo } from './LLMAttachmentsPromptsButton';
|
||||
import { ViewDocPartModal } from '../../message/fragments-content/ViewDocPartModal';
|
||||
import { ViewImageRefPartModal } from '../../message/fragments-content/ViewImageRefPartModal';
|
||||
|
||||
|
||||
/**
|
||||
* Composer-specific wrapper around the generic AttachmentDraftsList.
|
||||
* Provides: viewer modals, AI prompts button, "What can I do?" menu item.
|
||||
*/
|
||||
export function ComposerAttachmentDraftsList(props: {
|
||||
attachmentDrafts: AttachmentDraft[],
|
||||
attachmentDraftsStoreApi: AttachmentDraftsStoreApi,
|
||||
enrichment: IAttachmentEnrichment,
|
||||
enrichmentSummary: AttachmentEnrichmentSummary,
|
||||
agiAttachmentPrompts: AgiAttachmentPromptsData,
|
||||
onAttachmentDraftsAction: (attachmentDraftId: AttachmentDraftId | null, actionId: AttachmentDraftsAction) => void,
|
||||
}) {
|
||||
|
||||
const { agiAttachmentPrompts, attachmentDrafts } = props;
|
||||
|
||||
|
||||
// memo components
|
||||
|
||||
const startDecorator = React.useMemo(() =>
|
||||
!agiAttachmentPrompts.isVisible && !agiAttachmentPrompts.hasData ? undefined
|
||||
: <LLMAttachmentsPromptsButtonMemo data={agiAttachmentPrompts} />
|
||||
, [agiAttachmentPrompts]);
|
||||
|
||||
|
||||
// memo rendering functions
|
||||
|
||||
const renderDocViewer = React.useCallback(
|
||||
(part: React.ComponentProps<typeof ViewDocPartModal>['docPart'], onClose: () => void) =>
|
||||
<ViewDocPartModal docPart={part} onClose={onClose} />
|
||||
, []);
|
||||
|
||||
const renderImageViewer = React.useCallback(
|
||||
(part: React.ComponentProps<typeof ViewImageRefPartModal>['imageRefPart'], onClose: () => void) =>
|
||||
<ViewImageRefPartModal imageRefPart={part} onClose={onClose} />
|
||||
, []);
|
||||
|
||||
const renderOverallMenuExtra = React.useCallback(() => <>
|
||||
<MenuItem color='primary' variant='soft' onClick={agiAttachmentPrompts.refetch} disabled={!attachmentDrafts.length || agiAttachmentPrompts.isFetching}>
|
||||
<ListItemDecorator>{agiAttachmentPrompts.isFetching ? <CircularProgress size='sm' /> : <AutoFixHighIcon />}</ListItemDecorator>
|
||||
What can I do?
|
||||
</MenuItem>
|
||||
<ListDivider />
|
||||
</>, [agiAttachmentPrompts.isFetching, agiAttachmentPrompts.refetch, attachmentDrafts.length]);
|
||||
|
||||
|
||||
return (
|
||||
<AttachmentDraftsList
|
||||
attachmentDraftsStoreApi={props.attachmentDraftsStoreApi}
|
||||
attachmentDrafts={attachmentDrafts}
|
||||
enrichment={props.enrichment}
|
||||
enrichmentSummary={props.enrichmentSummary}
|
||||
onAttachmentDraftsAction={props.onAttachmentDraftsAction}
|
||||
startDecorator={startDecorator}
|
||||
renderDocViewer={renderDocViewer}
|
||||
renderImageViewer={renderImageViewer}
|
||||
renderOverallMenuExtra={renderOverallMenuExtra}
|
||||
/>
|
||||
);
|
||||
}
|
||||
@@ -1,98 +0,0 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import type { AttachmentDraft } from '~/common/attachment-drafts/attachment.types';
|
||||
import type { DLLM } from '~/common/stores/llms/llms.types';
|
||||
import type { DMessageAttachmentFragment } from '~/common/stores/chat/chat.fragments';
|
||||
import { estimateTokensForFragments } from '~/common/stores/chat/chat.tokens';
|
||||
|
||||
|
||||
export interface LLMAttachmentDraftsCollection {
|
||||
llmAttachmentDrafts: LLMAttachmentDraft[];
|
||||
canAttachAllFragments: boolean;
|
||||
canInlineSomeFragments: boolean;
|
||||
llmTokenCountApprox: number | null;
|
||||
hasImageFragments: boolean;
|
||||
}
|
||||
|
||||
|
||||
export interface LLMAttachmentDraft {
|
||||
attachmentDraft: AttachmentDraft;
|
||||
llmSupportsAllFragments: boolean;
|
||||
llmSupportsTextFragments: boolean;
|
||||
llmTokenCountApprox: number | null;
|
||||
hasImageFragments: boolean;
|
||||
}
|
||||
|
||||
|
||||
export function useLLMAttachmentDrafts(attachmentDrafts: AttachmentDraft[], chatLLM: DLLM | null, chatLLMSupportsImages: boolean): LLMAttachmentDraftsCollection {
|
||||
|
||||
/* [Optimization] Use a Ref to store the previous state of llmAttachmentDrafts and chatLLM
|
||||
*
|
||||
* Note that this works on 2 levels:
|
||||
* - 1. avoids recomputation, but more importantly,
|
||||
* - 2. avoids re-rendering by keeping those llmAttachmentDrafts objects stable.
|
||||
*
|
||||
* Important to notice that the attachmentDraft objects[] are stable to start with, so we can
|
||||
* safely use reference equality to check if internal properties (or order) have changed.
|
||||
*/
|
||||
const prevStateRef = React.useRef<{
|
||||
chatLLM: DLLM | null;
|
||||
llmAttachmentDrafts: LLMAttachmentDraft[];
|
||||
}>({ llmAttachmentDrafts: [], chatLLM: null });
|
||||
|
||||
return React.useMemo(() => {
|
||||
|
||||
// [Optimization]
|
||||
const equalChatLLM = chatLLM === prevStateRef.current.chatLLM;
|
||||
|
||||
// LLM-dependent multi-modal enablement
|
||||
// TODO: consider also Audio inputs, maybe PDF binary inputs
|
||||
// FIXME: reference fragments could refer to non-image as well
|
||||
const imageTypes: DMessageAttachmentFragment['part']['pt'][] = ['reference', 'image_ref'];
|
||||
const supportedTypes: DMessageAttachmentFragment['part']['pt'][] = chatLLMSupportsImages ? [...imageTypes, 'doc'] : ['doc'];
|
||||
const supportedTextTypes: DMessageAttachmentFragment['part']['pt'][] = supportedTypes.filter(pt => pt === 'doc');
|
||||
|
||||
// Add LLM-specific properties to each attachment draft
|
||||
const llmAttachmentDrafts = attachmentDrafts.map((a, index) => {
|
||||
|
||||
// [Optimization] If not change in LLM and the attachmentDraft is the same object reference, reuse the previous LLMAttachmentDraft
|
||||
let prevDraft: LLMAttachmentDraft | undefined = prevStateRef.current.llmAttachmentDrafts[index];
|
||||
// if not found, search by id
|
||||
if (!prevDraft)
|
||||
prevDraft = prevStateRef.current.llmAttachmentDrafts.find(_pd => _pd.attachmentDraft.id === a.id);
|
||||
if (equalChatLLM && prevDraft && prevDraft.attachmentDraft === a)
|
||||
return prevDraft;
|
||||
|
||||
// Otherwise, create a new LLMAttachmentDraft
|
||||
return {
|
||||
attachmentDraft: a,
|
||||
llmSupportsAllFragments: !a.outputFragments ? false : a.outputFragments.every(op => supportedTypes.includes(op.part.pt)),
|
||||
llmSupportsTextFragments: !a.outputFragments ? false : a.outputFragments.some(op => supportedTextTypes.includes(op.part.pt)),
|
||||
llmTokenCountApprox: chatLLM
|
||||
? estimateTokensForFragments(chatLLM, 'user', a.outputFragments, true, 'useLLMAttachmentDrafts')
|
||||
: null,
|
||||
hasImageFragments: !a.outputFragments ? false : a.outputFragments.some(op => imageTypes.includes(op.part.pt)),
|
||||
};
|
||||
});
|
||||
|
||||
// Calculate the overall properties
|
||||
const canAttachAllFragments = llmAttachmentDrafts.every(a => a.llmSupportsAllFragments);
|
||||
const canInlineSomeFragments = llmAttachmentDrafts.some(a => a.llmSupportsTextFragments);
|
||||
const llmTokenCountApprox = chatLLM
|
||||
? llmAttachmentDrafts.reduce((acc, a) => acc + (a.llmTokenCountApprox || 0), 0)
|
||||
: null;
|
||||
const hasImageFragments = llmAttachmentDrafts.some(a => a.hasImageFragments);
|
||||
|
||||
// [Optimization] Update the ref with the new state
|
||||
prevStateRef.current = { llmAttachmentDrafts, chatLLM };
|
||||
|
||||
return {
|
||||
llmAttachmentDrafts,
|
||||
canAttachAllFragments,
|
||||
canInlineSomeFragments,
|
||||
llmTokenCountApprox,
|
||||
hasImageFragments,
|
||||
};
|
||||
|
||||
}, [attachmentDrafts, chatLLM, chatLLMSupportsImages]); // Dependencies for the outer useMemo
|
||||
}
|
||||
@@ -47,9 +47,9 @@ function TokenBadge(props: {
|
||||
const showAltCosts = !!props.showCost && !!costMax && costMin !== undefined;
|
||||
if (showAltCosts) {
|
||||
// Note: switched to 'min cost (>= ...)' on mobile as well, to restore the former behavior, just uncomment the !props.enableHover (a proxy for isMobile)
|
||||
badgeValue = (/*!props.enableHover ||*/ isHovering)
|
||||
? '< ' + formatModelsCost(costMax)
|
||||
: '> ' + formatModelsCost(costMin);
|
||||
badgeValue =
|
||||
// (/*!props.enableHover ||*/ isHovering) ? '< ' + formatModelsCost(costMax) :
|
||||
'> ' + formatModelsCost(costMin);
|
||||
} else {
|
||||
|
||||
// show the direct tokens, unless we exceed the limit and 'showExcess' is enabled
|
||||
@@ -77,7 +77,7 @@ function TokenBadge(props: {
|
||||
slotProps={{
|
||||
root: {
|
||||
sx: {
|
||||
...((props.absoluteBottomRight) && { position: 'absolute', bottom: 8, right: 8 }),
|
||||
...((props.absoluteBottomRight) && { position: 'absolute', bottom: 8, right: '1rem' }),
|
||||
cursor: 'help',
|
||||
...(shallInvisible && {
|
||||
opacity: 0,
|
||||
@@ -92,6 +92,13 @@ function TokenBadge(props: {
|
||||
fontFamily: 'code',
|
||||
fontSize: 'xs',
|
||||
...((props.absoluteBottomRight || props.inline) && { position: 'static', transform: 'none' }),
|
||||
// make it transparent over text
|
||||
// backgroundColor: `rgb(var(--joy-palette-${color}-lightChannel) / 15%)`, // similar to success.50
|
||||
background: 'transparent',
|
||||
boxShadow: 'none', // outline
|
||||
'&:hover': {
|
||||
backgroundColor: `${color}.softHoverBg`,
|
||||
},
|
||||
},
|
||||
},
|
||||
}}
|
||||
|
||||
@@ -8,7 +8,7 @@ import SettingsIcon from '@mui/icons-material/Settings';
|
||||
import { findModelVendor } from '~/modules/llms/vendors/vendors.registry';
|
||||
|
||||
import type { DModelsServiceId } from '~/common/stores/llms/llms.service.types';
|
||||
import { DLLM, DLLMId, isLLMVisible } from '~/common/stores/llms/llms.types';
|
||||
import { DLLM, DLLMId, getLLMLabel, isLLMVisible } from '~/common/stores/llms/llms.types';
|
||||
import { DebouncedInputMemo } from '~/common/components/DebouncedInput';
|
||||
import { GoodTooltip } from '~/common/components/GoodTooltip';
|
||||
import { KeyStroke } from '~/common/components/KeyStroke';
|
||||
@@ -65,7 +65,7 @@ function LLMDropdown(props: {
|
||||
return true;
|
||||
|
||||
// filter-out models that don't contain the search string
|
||||
if (lcFilterString && !llm.label.toLowerCase().includes(lcFilterString))
|
||||
if (lcFilterString && !getLLMLabel(llm).toLowerCase().includes(lcFilterString))
|
||||
return false;
|
||||
|
||||
// filter-out hidden models from the dropdown
|
||||
@@ -89,7 +89,7 @@ function LLMDropdown(props: {
|
||||
|
||||
// add the model item
|
||||
llmItems[llm.id] = {
|
||||
title: llm.label,
|
||||
title: getLLMLabel(llm),
|
||||
...(llm.userStarred ? { symbol: '⭐' } : {}),
|
||||
// icon: llm.id.startsWith('some vendor') ? <VendorIcon /> : undefined,
|
||||
};
|
||||
|
||||
@@ -66,6 +66,7 @@ function ChatDrawer(props: {
|
||||
activeFolderId: string | null,
|
||||
chatPanesConversationIds: DConversationId[],
|
||||
disableNewButton: boolean,
|
||||
focusedChatBeamOpen: boolean,
|
||||
onConversationActivate: (conversationId: DConversationId) => void,
|
||||
onConversationBranch: (conversationId: DConversationId, messageId: string | null, addSplitPane: boolean) => void,
|
||||
onConversationNew: (forceNoRecycle: boolean, isIncognito: boolean) => void,
|
||||
@@ -291,6 +292,17 @@ function ChatDrawer(props: {
|
||||
toggleFilterHasDocFragments, toggleFilterHasImageAssets, toggleFilterHasStars, toggleFilterIsArchived, toggleShowPersonaIcons, toggleShowRelativeSize,
|
||||
]);
|
||||
|
||||
const displayNavItems = React.useMemo(() => {
|
||||
if (renderLimit === Infinity || renderLimit >= renderNavItems.length) return renderNavItems;
|
||||
|
||||
// return sliced if it contains the active conversation
|
||||
const sliced = renderNavItems.slice(0, renderLimit);
|
||||
if (!props.activeConversationId || sliced.some(i => i.type === 'nav-item-chat-data' && i.conversationId === props.activeConversationId)) return sliced;
|
||||
|
||||
// include the active conversation if it's beyond the fold
|
||||
const activeItem = renderNavItems.find((i, idx) => idx >= renderLimit && i.type === 'nav-item-chat-data' && i.conversationId === props.activeConversationId);
|
||||
return activeItem ? [...sliced, activeItem] : sliced;
|
||||
}, [renderNavItems, renderLimit, props.activeConversationId]);
|
||||
|
||||
return <>
|
||||
|
||||
@@ -379,7 +391,7 @@ function ChatDrawer(props: {
|
||||
|
||||
{/* Chat Titles List (shrink as half the rate as the Folders List) */}
|
||||
<Box sx={{ flexGrow: 1, flexShrink: 1, flexBasis: '20rem', overflowY: 'auto', ...themeScalingMap[contentScaling].chatDrawerItemSx }}>
|
||||
{renderNavItems.slice(0, renderLimit).map((item, idx) => item.type === 'nav-item-chat-data' ? (
|
||||
{displayNavItems.map((item, idx) => item.type === 'nav-item-chat-data' ? (
|
||||
<ChatDrawerItemMemo
|
||||
key={'nav-chat-' + item.conversationId}
|
||||
item={item}
|
||||
@@ -456,7 +468,7 @@ function ChatDrawer(props: {
|
||||
{/*<OpenAIIcon sx={{ ml: 'auto' }} />*/}
|
||||
</ListItemButton>
|
||||
|
||||
<ListItemButton disabled={filteredChatsAreEmpty} onClick={handleConversationsExport} sx={{ flex: 1 }}>
|
||||
<ListItemButton disabled={filteredChatsAreEmpty || props.focusedChatBeamOpen} onClick={handleConversationsExport} sx={{ flex: 1 }}>
|
||||
<ListItemDecorator>
|
||||
<FileUploadOutlinedIcon />
|
||||
</ListItemDecorator>
|
||||
|
||||
@@ -282,7 +282,7 @@ function ChatDrawerItem(props: {
|
||||
{searchFrequency > 0 ? (
|
||||
// Display search frequency if it exists and is greater than 0
|
||||
<Typography level='body-sm'>
|
||||
{searchFrequency}
|
||||
{Math.round(searchFrequency * 10) / 10}
|
||||
</Typography>
|
||||
) : (props.showSymbols && (userFlagsSummary || containsDocAttachments || containsImageAssets)) ? (
|
||||
<Box sx={{
|
||||
|
||||
@@ -5,7 +5,7 @@ import { useModuleBeamStore } from '~/modules/beam/store-module-beam';
|
||||
import type { DFolder } from '~/common/stores/folders/store-chat-folders';
|
||||
import { DMessage, DMessageUserFlag, MESSAGE_FLAG_STARRED, messageFragmentsReduceText, messageHasUserFlag, messageUserFlagToEmoji } from '~/common/stores/chat/chat.message';
|
||||
import { conversationTitle, DConversationId } from '~/common/stores/chat/chat.conversation';
|
||||
import { getLocalMidnightInUTCTimestamp, getTimeBucketEn } from '~/common/util/timeUtils';
|
||||
import { createTimeBucketClassifierEn } from '~/common/util/timeUtils';
|
||||
import { isAttachmentFragment, isContentOrAttachmentFragment, isDocPart, isImageRefPart, isZyncAssetImageReferencePart } from '~/common/stores/chat/chat.fragments';
|
||||
import { shallowEquals } from '~/common/util/hooks/useShallowObject';
|
||||
import { useChatStore } from '~/common/stores/chat/store-chats';
|
||||
@@ -235,14 +235,14 @@ export function useChatDrawerRenderItems(
|
||||
break;
|
||||
}
|
||||
|
||||
const midnightTime = getLocalMidnightInUTCTimestamp();
|
||||
const getTimeBucket = createTimeBucketClassifierEn();
|
||||
const grouped = chatNavItems.reduce((acc, item) => {
|
||||
|
||||
// derive the bucket name
|
||||
let bucket: string;
|
||||
switch (grouping) {
|
||||
case 'date':
|
||||
bucket = getTimeBucketEn(item.updatedAt || midnightTime, midnightTime);
|
||||
bucket = getTimeBucket(item.updatedAt || Date.now());
|
||||
break;
|
||||
case 'persona':
|
||||
bucket = item.systemPurposeId;
|
||||
|
||||
@@ -6,7 +6,6 @@ import AddIcon from '@mui/icons-material/Add';
|
||||
import ArchiveOutlinedIcon from '@mui/icons-material/ArchiveOutlined';
|
||||
import CleaningServicesOutlinedIcon from '@mui/icons-material/CleaningServicesOutlined';
|
||||
import CompressIcon from '@mui/icons-material/Compress';
|
||||
import EngineeringIcon from '@mui/icons-material/Engineering';
|
||||
import ForkRightIcon from '@mui/icons-material/ForkRight';
|
||||
import KeyboardArrowDownIcon from '@mui/icons-material/KeyboardArrowDown';
|
||||
import RestartAltIcon from '@mui/icons-material/RestartAlt';
|
||||
@@ -14,15 +13,14 @@ import SettingsSuggestOutlinedIcon from '@mui/icons-material/SettingsSuggestOutl
|
||||
import UnarchiveOutlinedIcon from '@mui/icons-material/UnarchiveOutlined';
|
||||
|
||||
import type { DConversationId } from '~/common/stores/chat/chat.conversation';
|
||||
import { ChromelessItemButton } from '~/common/layout/optima/ChromelessItemButton';
|
||||
import { CodiconSplitHorizontal } from '~/common/components/icons/CodiconSplitHorizontal';
|
||||
import { CodiconSplitHorizontalRemove } from '~/common/components/icons/CodiconSplitHorizontalRemove';
|
||||
import { CodiconSplitVertical } from '~/common/components/icons/CodiconSplitVertical';
|
||||
import { CodiconSplitVerticalRemove } from '~/common/components/icons/CodiconSplitVerticalRemove';
|
||||
import { FormLabelStart } from '~/common/components/forms/FormLabelStart';
|
||||
import { OptimaPanelGroupedList, OptimaPanelGroupGutter } from '~/common/layout/optima/panel/OptimaPanelGroupedList';
|
||||
import { optimaActions } from '~/common/layout/optima/useOptima';
|
||||
import { useChatStore } from '~/common/stores/chat/store-chats'; // may be replaced with a dedicated hook for the chat pane
|
||||
import { useLabsDevMode } from '~/common/stores/store-ux-labs';
|
||||
|
||||
import { useChatShowSystemMessages } from '../../store-app-chat';
|
||||
import { panesManagerActions, usePaneDuplicateOrClose } from '../panes/store-panes-manager';
|
||||
@@ -40,6 +38,7 @@ function VariformPaneFrame() {
|
||||
|
||||
|
||||
export function ChatPane(props: {
|
||||
isMobile: boolean,
|
||||
conversationId: DConversationId | null,
|
||||
disableItems: boolean,
|
||||
hasConversations: boolean,
|
||||
@@ -55,7 +54,6 @@ export function ChatPane(props: {
|
||||
// external state
|
||||
const { canAddPane, isMultiPane } = usePaneDuplicateOrClose();
|
||||
const [showSystemMessages, setShowSystemMessages] = useChatShowSystemMessages();
|
||||
const labsDevMode = useLabsDevMode();
|
||||
|
||||
const { isArchived, setArchived } = useChatStore(useShallow((state) => {
|
||||
const conversation = state.conversations.find(_c => _c.id === props.conversationId);
|
||||
@@ -147,6 +145,8 @@ export function ChatPane(props: {
|
||||
</ListItemButton>
|
||||
</ListItem>
|
||||
|
||||
{props.isMobile && <ChromelessItemButton />}
|
||||
|
||||
</OptimaPanelGroupedList>
|
||||
|
||||
{/* Chat Actions group */}
|
||||
@@ -213,15 +213,5 @@ export function ChatPane(props: {
|
||||
</ListItemButton>
|
||||
</OptimaPanelGroupedList>
|
||||
|
||||
{/* [DEV] Development */}
|
||||
{labsDevMode && (
|
||||
<OptimaPanelGroupedList title='[Developers]'>
|
||||
<MenuItem onClick={optimaActions().openAIXDebugger}>
|
||||
<ListItemDecorator><EngineeringIcon /></ListItemDecorator>
|
||||
AIX: Show Last Request...
|
||||
</MenuItem>
|
||||
</OptimaPanelGroupedList>
|
||||
)}
|
||||
|
||||
</>;
|
||||
}
|
||||
@@ -4,7 +4,8 @@ import type { SxProps } from '@mui/joy/styles/types';
|
||||
import { Box, Button, ColorPaletteProp } from '@mui/joy';
|
||||
|
||||
import type { ContentScaling } from '~/common/app.theme';
|
||||
import { DMessageContentFragment, DMessageTextPart, isTextContentFragment } from '~/common/stores/chat/chat.fragments';
|
||||
import type { InterleavedFragment } from '~/common/stores/chat/hooks/useFragmentBuckets';
|
||||
import { DMessageTextPart, isTextContentFragment } from '~/common/stores/chat/chat.fragments';
|
||||
|
||||
|
||||
// configuration
|
||||
@@ -35,7 +36,7 @@ const optionGroupSx: SxProps = {
|
||||
flexDirection: 'column',
|
||||
alignItems: 'flex-start',
|
||||
gap: 0,
|
||||
};
|
||||
} as const;
|
||||
|
||||
const optionSx: SxProps = {
|
||||
// style
|
||||
@@ -51,10 +52,22 @@ const optionSx: SxProps = {
|
||||
|
||||
// layout
|
||||
justifyContent: 'flex-start',
|
||||
};
|
||||
} as const;
|
||||
|
||||
const optionBoldSx: SxProps = {
|
||||
...optionSx,
|
||||
fontWeight: 'lg',
|
||||
} as const;
|
||||
|
||||
|
||||
export function optionsExtractFromFragments_dangerModifyFragment(enabled: boolean, fragments: DMessageContentFragment[]): { fragments: DMessageContentFragment[], options: string[], } {
|
||||
// '1. **text**' -> '1. text', or: **text** -> text
|
||||
function _stripMarkdownBold(text: string): { text: string; isBold: boolean } {
|
||||
const stripped = text.replace(/(\*{2,})(.+)\1\s*$/, '$2').trimEnd();
|
||||
return { text: stripped, isBold: stripped !== text };
|
||||
}
|
||||
|
||||
|
||||
export function optionsExtractFromFragments_dangerModifyFragment(enabled: boolean, fragments: InterleavedFragment[]): { fragments: InterleavedFragment[], options: string[] } {
|
||||
if (enabled && fragments.length) {
|
||||
const fragment = fragments[fragments.length - 1];
|
||||
if (isTextContentFragment(fragment)) {
|
||||
@@ -163,21 +176,25 @@ export function BlockOpOptions(props: {
|
||||
options: string[],
|
||||
onContinue: (continueText: null | string) => void,
|
||||
}) {
|
||||
const buttonSx = React.useMemo(() => ({ ...optionSx, fontSize: props.contentScaling }), [props.contentScaling]);
|
||||
const normalSx = React.useMemo(() => ({ ...optionSx, fontSize: props.contentScaling }), [props.contentScaling]);
|
||||
const boldSx = React.useMemo(() => ({ ...optionBoldSx, fontSize: props.contentScaling }), [props.contentScaling]);
|
||||
return (
|
||||
<Box sx={optionGroupSx}>
|
||||
{props.options.map((option, index) => (
|
||||
<Button
|
||||
key={index}
|
||||
color={OPTION_ACTIVE_COLOR}
|
||||
variant='soft'
|
||||
size={props.contentScaling === 'md' ? 'md' : 'sm'}
|
||||
onClick={() => props.onContinue(option.endsWith('?') ? option.slice(0, -1) : option)}
|
||||
sx={buttonSx}
|
||||
>
|
||||
{option}
|
||||
</Button>
|
||||
))}
|
||||
{props.options.map((option, index) => {
|
||||
const { text, isBold } = _stripMarkdownBold(option);
|
||||
return (
|
||||
<Button
|
||||
key={index}
|
||||
color={OPTION_ACTIVE_COLOR}
|
||||
variant='soft'
|
||||
size={props.contentScaling === 'md' ? 'md' : 'sm'}
|
||||
onClick={() => props.onContinue(text.endsWith('?') ? text.slice(0, -1) : text)}
|
||||
sx={isBold ? boldSx : normalSx}
|
||||
>
|
||||
{text}
|
||||
</Button>
|
||||
);
|
||||
})}
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,16 +1,22 @@
|
||||
import * as React from 'react';
|
||||
import TimeAgo from 'react-timeago';
|
||||
|
||||
import { Box, Button, ButtonGroup, Tooltip, Typography } from '@mui/joy';
|
||||
import PlayArrowRoundedIcon from '@mui/icons-material/PlayArrowRounded';
|
||||
import StopRoundedIcon from '@mui/icons-material/StopRounded';
|
||||
|
||||
import type { DMessageGenerator } from '~/common/stores/chat/chat.message';
|
||||
|
||||
|
||||
const ARM_TIMEOUT_MS = 4000;
|
||||
|
||||
|
||||
/**
|
||||
* FIXME: COMPLETE THIS
|
||||
*/
|
||||
export function BlockOpUpstreamResume(props: {
|
||||
upstreamHandle: Exclude<DMessageGenerator['upstreamHandle'], undefined>,
|
||||
pending?: boolean; // true while the message is actively streaming; labels the Delete button as "Stop"
|
||||
onResume?: () => void | Promise<void>;
|
||||
onCancel?: () => void | Promise<void>;
|
||||
onDelete?: () => void | Promise<void>;
|
||||
@@ -20,8 +26,14 @@ export function BlockOpUpstreamResume(props: {
|
||||
const [isResuming, setIsResuming] = React.useState(false);
|
||||
const [isCancelling, setIsCancelling] = React.useState(false);
|
||||
const [isDeleting, setIsDeleting] = React.useState(false);
|
||||
const [deleteArmed, setDeleteArmed] = React.useState(false);
|
||||
const [error, setError] = React.useState<string | null>(null);
|
||||
|
||||
// expiration: boolean is evaluated at render (may lag briefly if nothing re-renders past expiry).
|
||||
// TimeAgo handles its own tick for the label; the button's disabled state is the only consumer of this flag.
|
||||
const { expiresAt /*, runId = ''*/ } = props.upstreamHandle;
|
||||
// const isExpired = expiresAt != null && Date.now() > expiresAt;
|
||||
|
||||
// handlers
|
||||
|
||||
const handleResume = React.useCallback(async () => {
|
||||
@@ -50,8 +62,14 @@ export function BlockOpUpstreamResume(props: {
|
||||
}
|
||||
}, [props]);
|
||||
|
||||
// Two-click arm: first click arms (visible red "Confirm?"), second click (within ARM_TIMEOUT_MS) executes.
|
||||
const handleDelete = React.useCallback(async () => {
|
||||
if (!props.onDelete) return;
|
||||
if (!deleteArmed) {
|
||||
setDeleteArmed(true);
|
||||
return;
|
||||
}
|
||||
setDeleteArmed(false);
|
||||
setError(null);
|
||||
setIsDeleting(true);
|
||||
try {
|
||||
@@ -61,7 +79,15 @@ export function BlockOpUpstreamResume(props: {
|
||||
} finally {
|
||||
setIsDeleting(false);
|
||||
}
|
||||
}, [props]);
|
||||
}, [deleteArmed, props]);
|
||||
|
||||
// Auto-disarm after ARM_TIMEOUT_MS so the armed state can't leak into a later session
|
||||
React.useEffect(() => {
|
||||
if (!deleteArmed) return;
|
||||
const t = setTimeout(() => setDeleteArmed(false), ARM_TIMEOUT_MS);
|
||||
return () => clearTimeout(t);
|
||||
}, [deleteArmed]);
|
||||
|
||||
|
||||
return (
|
||||
<Box
|
||||
@@ -79,7 +105,7 @@ export function BlockOpUpstreamResume(props: {
|
||||
<Button
|
||||
disabled={isResuming || isCancelling || isDeleting}
|
||||
loading={isResuming}
|
||||
startDecorator={<PlayArrowRoundedIcon sx={{ color: 'success.solidBg' }} />}
|
||||
startDecorator={<PlayArrowRoundedIcon color='success' />}
|
||||
onClick={handleResume}
|
||||
>
|
||||
Resume
|
||||
@@ -101,14 +127,16 @@ export function BlockOpUpstreamResume(props: {
|
||||
)}
|
||||
|
||||
{props.onDelete && (
|
||||
<Tooltip title='Delete the stored response'>
|
||||
<Tooltip title={deleteArmed ? 'Click again to confirm - cancels the run upstream (no resume after)' : (props.pending ? 'Stop this response and cancel the upstream run' : 'Cancel the upstream run')}>
|
||||
<Button
|
||||
loading={isDeleting}
|
||||
// startDecorator={<DeleteIcon />}
|
||||
color={deleteArmed ? 'danger' : 'neutral'}
|
||||
variant={deleteArmed ? 'solid' : 'outlined'}
|
||||
startDecorator={<StopRoundedIcon />}
|
||||
onClick={handleDelete}
|
||||
disabled={isResuming || isCancelling || isDeleting}
|
||||
>
|
||||
Delete
|
||||
{deleteArmed ? 'Confirm?' : (props.pending ? 'Stop' : 'Cancel')}
|
||||
</Button>
|
||||
</Tooltip>
|
||||
)}
|
||||
@@ -120,9 +148,11 @@ export function BlockOpUpstreamResume(props: {
|
||||
</Typography>
|
||||
)}
|
||||
|
||||
<Typography level='body-xs' sx={{ fontSize: '0.65rem', opacity: 0.6 }}>
|
||||
Response ID: {props.upstreamHandle.responseId.slice(0, 12)}...
|
||||
</Typography>
|
||||
{!!expiresAt && <Typography level='body-xs' sx={{ fontSize: '0.65rem', opacity: 0.6 }}>
|
||||
{/*Run ID: {runId.slice(0, 12)}...*/}
|
||||
{/*{!!expiresAt && <> · Expires <TimeAgo date={expiresAt} /></>}*/}
|
||||
Expires <TimeAgo date={expiresAt} />
|
||||
</Typography>}
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -5,8 +5,6 @@ import TimeAgo from 'react-timeago';
|
||||
import type { SxProps } from '@mui/joy/styles/types';
|
||||
import { Box, ButtonGroup, CircularProgress, Divider, IconButton, ListDivider, ListItem, ListItemDecorator, MenuItem, Switch, Tooltip, Typography } from '@mui/joy';
|
||||
import { ClickAwayListener, Popper } from '@mui/base';
|
||||
import AccountTreeOutlinedIcon from '@mui/icons-material/AccountTreeOutlined';
|
||||
import AlternateEmailIcon from '@mui/icons-material/AlternateEmail';
|
||||
import CheckRoundedIcon from '@mui/icons-material/CheckRounded';
|
||||
import CloseRoundedIcon from '@mui/icons-material/CloseRounded';
|
||||
import ContentCopyIcon from '@mui/icons-material/ContentCopy';
|
||||
@@ -17,11 +15,10 @@ import EditRoundedIcon from '@mui/icons-material/EditRounded';
|
||||
import ForkRightIcon from '@mui/icons-material/ForkRight';
|
||||
import FormatBoldIcon from '@mui/icons-material/FormatBold';
|
||||
import FormatPaintOutlinedIcon from '@mui/icons-material/FormatPaintOutlined';
|
||||
import InsertLinkIcon from '@mui/icons-material/InsertLink';
|
||||
import InfoOutlinedIcon from '@mui/icons-material/InfoOutlined';
|
||||
import MoreVertIcon from '@mui/icons-material/MoreVert';
|
||||
import NotificationsActiveIcon from '@mui/icons-material/NotificationsActive';
|
||||
import NotificationsOutlinedIcon from '@mui/icons-material/NotificationsOutlined';
|
||||
import RecordVoiceOverOutlinedIcon from '@mui/icons-material/RecordVoiceOverOutlined';
|
||||
import ReplayIcon from '@mui/icons-material/Replay';
|
||||
import ReplyAllRoundedIcon from '@mui/icons-material/ReplyAllRounded';
|
||||
import ReplyRoundedIcon from '@mui/icons-material/ReplyRounded';
|
||||
@@ -37,22 +34,26 @@ import { ModelVendorAnthropic } from '~/modules/llms/vendors/anthropic/anthropic
|
||||
import { AnthropicIcon } from '~/common/components/icons/vendors/AnthropicIcon';
|
||||
import { ChatBeamIcon } from '~/common/components/icons/ChatBeamIcon';
|
||||
import { CloseablePopup } from '~/common/components/CloseablePopup';
|
||||
import { DMessage, DMessageId, DMessageUserFlag, DMetaReferenceItem, MESSAGE_FLAG_AIX_SKIP, MESSAGE_FLAG_NOTIFY_COMPLETE, MESSAGE_FLAG_STARRED, MESSAGE_FLAG_VND_ANT_CACHE_AUTO, MESSAGE_FLAG_VND_ANT_CACHE_USER, messageFragmentsReduceText, messageHasUserFlag } from '~/common/stores/chat/chat.message';
|
||||
import { DMessage, DMessageGenerator, DMessageId, DMessageUserFlag, DMetaReferenceItem, MESSAGE_FLAG_AIX_SKIP, MESSAGE_FLAG_NOTIFY_COMPLETE, MESSAGE_FLAG_STARRED, MESSAGE_FLAG_VND_ANT_CACHE_AUTO, MESSAGE_FLAG_VND_ANT_CACHE_USER, messageFragmentsReduceText, messageHasUserFlag } from '~/common/stores/chat/chat.message';
|
||||
import { KeyStroke } from '~/common/components/KeyStroke';
|
||||
import { MarkHighlightIcon } from '~/common/components/icons/MarkHighlightIcon';
|
||||
import { PhTreeStructure } from '~/common/components/icons/phosphor/PhTreeStructure';
|
||||
import { PhVoice } from '~/common/components/icons/phosphor/PhVoice';
|
||||
import { Release } from '~/common/app.release';
|
||||
import { StarredState } from '~/common/components/StarIcons';
|
||||
import { TooltipOutlined } from '~/common/components/TooltipOutlined';
|
||||
import { adjustContentScaling, themeScalingMap, themeZIndexChatBubble } from '~/common/app.theme';
|
||||
import { avatarIconSx, makeMessageAvatarIcon, messageBackground, useMessageAvatarLabel } from '~/common/util/dMessageUtils';
|
||||
import { copyToClipboard } from '~/common/util/clipboardUtils';
|
||||
import { clipboardCopyDOMSelectionOrFallback, copyToClipboard } from '~/common/util/clipboardUtils';
|
||||
import { createTextContentFragment, DMessageFragment, DMessageFragmentId, updateFragmentWithEditedText } from '~/common/stores/chat/chat.fragments';
|
||||
import { useFragmentBuckets } from '~/common/stores/chat/hooks/useFragmentBuckets';
|
||||
import { useUIPreferencesStore } from '~/common/stores/store-ui';
|
||||
import { useUXLabsStore } from '~/common/stores/store-ux-labs';
|
||||
|
||||
import { BlockOpContinue } from './BlockOpContinue';
|
||||
import { BlockOpOptions, optionsExtractFromFragments_dangerModifyFragment } from './BlockOpOptions';
|
||||
import { BlockOpUpstreamResume } from './BlockOpUpstreamResume';
|
||||
import { ChatMessageEditAttachments, type EditModeAttachmentsHandle } from './ChatMessageEditAttachments';
|
||||
import { ChatMessageInfoPopup } from './ChatMessageInfoPopup';
|
||||
import { ContentFragments } from './fragments-content/ContentFragments';
|
||||
import { DocumentAttachmentFragments } from './fragments-attachment-doc/DocumentAttachmentFragments';
|
||||
import { ImageAttachmentFragments } from './fragments-attachment-image/ImageAttachmentFragments';
|
||||
@@ -69,7 +70,7 @@ const ENABLE_BUBBLE = true;
|
||||
export const BUBBLE_MIN_TEXT_LENGTH = 3;
|
||||
|
||||
// Enable the hover button to copy the whole message. The Copy button is also available in Blocks, or in the Avatar Menu.
|
||||
const ENABLE_COPY_MESSAGE_OVERLAY: boolean = false;
|
||||
// const ENABLE_COPY_MESSAGE_OVERLAY: boolean = false;
|
||||
|
||||
|
||||
const messageBodySx: SxProps = {
|
||||
@@ -160,6 +161,8 @@ export function ChatMessage(props: {
|
||||
onMessageBeam?: (messageId: string) => Promise<void>,
|
||||
onMessageBranch?: (messageId: string) => void,
|
||||
onMessageContinue?: (messageId: string, continueText: null | string) => void,
|
||||
onMessageUpstreamResume?: (generator: DMessageGenerator, messageId: string) => Promise<void>,
|
||||
onMessageUpstreamDelete?: (generator: DMessageGenerator, messageId: string) => Promise<void>,
|
||||
onMessageDelete?: (messageId: string) => void,
|
||||
onMessageFragmentAppend?: (messageId: DMessageId, fragment: DMessageFragment) => void
|
||||
onMessageFragmentDelete?: (messageId: DMessageId, fragmentId: DMessageFragmentId) => void,
|
||||
@@ -180,6 +183,8 @@ export function ChatMessage(props: {
|
||||
const [contextMenuAnchor, setContextMenuAnchor] = React.useState<HTMLElement | null>(null);
|
||||
const [opsMenuAnchor, setOpsMenuAnchor] = React.useState<HTMLElement | null>(null);
|
||||
const [textContentEditState, setTextContentEditState] = React.useState<ChatMessageTextPartEditState | null>(null);
|
||||
const [showInfoModal, setShowInfoModal] = React.useState(false);
|
||||
const attachmentsEditRef = React.useRef<EditModeAttachmentsHandle>(null);
|
||||
|
||||
// external state
|
||||
const { adjContentScaling, disableMarkdown, doubleClickToEdit, uiComplexityMode } = useUIPreferencesStore(useShallow(state => ({
|
||||
@@ -188,7 +193,6 @@ export function ChatMessage(props: {
|
||||
doubleClickToEdit: state.doubleClickToEdit,
|
||||
uiComplexityMode: state.complexityMode,
|
||||
})));
|
||||
const labsEnhanceCodeBlocks = useUXLabsStore(state => state.labsEnhanceCodeBlocks);
|
||||
const [showDiff, setShowDiff] = useChatShowTextDiff();
|
||||
|
||||
|
||||
@@ -217,15 +221,15 @@ export function ChatMessage(props: {
|
||||
const isVndAndCacheUser = !!props.showAntPromptCaching && messageHasUserFlag(props.message, MESSAGE_FLAG_VND_ANT_CACHE_USER);
|
||||
|
||||
const {
|
||||
annotationFragments, // Web Citations, References (rendered at top)
|
||||
interleavedFragments, // Reasoning, Placeholders, Text, Code, Tools (interleaved in temporal order)
|
||||
imageAttachments, // Stamp-sized Images
|
||||
voidFragments, // Model-Aux, Placeholders
|
||||
contentFragments, // Text (Markdown + Code + ... blocks), Errors, (large) Images
|
||||
nonImageAttachments, // Document Attachments, likely the User dropped them in
|
||||
lastFragmentIsError,
|
||||
} = useFragmentBuckets(messageFragments);
|
||||
|
||||
const fragmentFlattenedText = React.useMemo(() => messageFragmentsReduceText(messageFragments), [messageFragments]);
|
||||
const handleHighlightSelText = useSelHighlighterMemo(messageId, selText, contentFragments, fromAssistant, props.onMessageFragmentReplace);
|
||||
const handleHighlightSelText = useSelHighlighterMemo(messageId, selText, interleavedFragments.filter(f => f.ft === 'content'), fromAssistant, props.onMessageFragmentReplace);
|
||||
|
||||
const textSubject = selText ? selText : fragmentFlattenedText;
|
||||
const isSpecialT2I = textSubject.startsWith('/draw ') || textSubject.startsWith('/imagine ') || textSubject.startsWith('/img ');
|
||||
@@ -243,7 +247,7 @@ export function ChatMessage(props: {
|
||||
// const wordsDiff = useWordsDifference(textSubject, props.diffPreviousText, showDiff);
|
||||
|
||||
|
||||
const { onMessageAssistantFrom, onMessageDelete, onMessageFragmentAppend, onMessageFragmentDelete, onMessageFragmentReplace, onMessageContinue } = props;
|
||||
const { onMessageAssistantFrom, onMessageDelete, onMessageFragmentAppend, onMessageFragmentDelete, onMessageFragmentReplace, onMessageContinue, onMessageUpstreamResume, onMessageUpstreamDelete } = props;
|
||||
|
||||
const handleFragmentNew = React.useCallback(() => {
|
||||
onMessageFragmentAppend?.(messageId, createTextContentFragment(''));
|
||||
@@ -261,6 +265,16 @@ export function ChatMessage(props: {
|
||||
onMessageContinue?.(messageId, continueText);
|
||||
}, [messageId, onMessageContinue]);
|
||||
|
||||
const handleUpstreamResume = React.useCallback(() => {
|
||||
if (!messageGenerator) return;
|
||||
return onMessageUpstreamResume?.(messageGenerator, messageId);
|
||||
}, [messageGenerator, messageId, onMessageUpstreamResume]);
|
||||
|
||||
const handleUpstreamDelete = React.useCallback(() => {
|
||||
if (!messageGenerator) return;
|
||||
return onMessageUpstreamDelete?.(messageGenerator, messageId);
|
||||
}, [messageGenerator, messageId, onMessageUpstreamDelete]);
|
||||
|
||||
|
||||
// Text Editing
|
||||
|
||||
@@ -280,14 +294,25 @@ export function ChatMessage(props: {
|
||||
}, [handleFragmentDelete, handleFragmentReplace, messageFragments]);
|
||||
|
||||
const handleApplyAllEdits = React.useCallback(async (withControl: boolean) => {
|
||||
const state = textContentEditState || {};
|
||||
// 0. take state, including new attachment drafts BEFORE clearing state
|
||||
const fragmentsEdits = textContentEditState || {};
|
||||
const newFragments = await attachmentsEditRef.current?.takeAllFragments() ?? [];
|
||||
|
||||
// 1. clear edit state (unmounts EditModeAttachments, triggers cleanup)
|
||||
setTextContentEditState(null);
|
||||
for (const [fragmentId, editedText] of Object.entries(state))
|
||||
|
||||
// 2A. apply text fragment edits
|
||||
for (const [fragmentId, editedText] of Object.entries(fragmentsEdits))
|
||||
handleApplyEdit(fragmentId, editedText);
|
||||
// if the user pressed Ctrl, we begin a regeneration from here
|
||||
|
||||
// 2B. append new attachment fragments
|
||||
for (const fragment of newFragments)
|
||||
onMessageFragmentAppend?.(messageId, fragment);
|
||||
|
||||
// 3. if the user pressed Ctrl, we begin a regeneration from here
|
||||
if (withControl && onMessageAssistantFrom)
|
||||
await onMessageAssistantFrom(messageId, 0);
|
||||
}, [handleApplyEdit, messageId, onMessageAssistantFrom, textContentEditState]);
|
||||
}, [handleApplyEdit, messageId, onMessageAssistantFrom, onMessageFragmentAppend, textContentEditState]);
|
||||
|
||||
const handleEditsApplyClicked = React.useCallback(() => handleApplyAllEdits(false), [handleApplyAllEdits]);
|
||||
|
||||
@@ -314,11 +339,17 @@ export function ChatMessage(props: {
|
||||
|
||||
const handleCloseOpsMenu = React.useCallback(() => setOpsMenuAnchor(null), []);
|
||||
|
||||
const handleOpsCopy = (e: React.MouseEvent) => {
|
||||
copyToClipboard(textSubject, 'Text');
|
||||
const handleOpsMessageCopySrc = React.useCallback((e: React.MouseEvent) => {
|
||||
e.preventDefault();
|
||||
// copy full source text (ops menu) - bypasses DOM, always gets pre-collapsed content
|
||||
copyToClipboard(fragmentFlattenedText, 'Message');
|
||||
handleCloseOpsMenu();
|
||||
closeContextMenu();
|
||||
}, [fragmentFlattenedText, handleCloseOpsMenu]);
|
||||
|
||||
const handleBubbleCopyDOM = (e: React.MouseEvent) => {
|
||||
e.preventDefault();
|
||||
// copy cleaned DOM selection (bubble) - rich text for pasting into Google Docs, etc.
|
||||
clipboardCopyDOMSelectionOrFallback(blocksRendererRef.current, textSubject, 'Selection');
|
||||
closeBubble();
|
||||
};
|
||||
|
||||
@@ -342,6 +373,13 @@ export function ChatMessage(props: {
|
||||
onMessageToggleUserFlag?.(messageId, MESSAGE_FLAG_STARRED);
|
||||
}, [messageId, onMessageToggleUserFlag]);
|
||||
|
||||
const handleOpsShowInfo = React.useCallback(() => {
|
||||
setOpsMenuAnchor(null);
|
||||
setShowInfoModal(true);
|
||||
}, []);
|
||||
|
||||
const handleInfoClose = React.useCallback(() => setShowInfoModal(false), []);
|
||||
|
||||
const handleOpsToggleNotifyComplete = React.useCallback(() => {
|
||||
// also remember the preference, for auto-setting flags by the persona
|
||||
setIsNotificationEnabledForModel(messageId, !isUserNotifyComplete);
|
||||
@@ -579,9 +617,9 @@ export function ChatMessage(props: {
|
||||
|
||||
const lookForOptions = props.onMessageContinue !== undefined && props.isBottom === true && messageGenerator?.tokenStopReason !== 'out-of-tokens' && fromAssistant && !messagePendingIncomplete && !isEditingText && uiComplexityMode !== 'minimal' && false;
|
||||
|
||||
const { fragments: renderContentFragments, options: continuationOptions } = React.useMemo(() => {
|
||||
return optionsExtractFromFragments_dangerModifyFragment(lookForOptions, contentFragments);
|
||||
}, [contentFragments, lookForOptions]);
|
||||
const { fragments: renderInterleavedFragments, options: continuationOptions } = React.useMemo(() => {
|
||||
return optionsExtractFromFragments_dangerModifyFragment(lookForOptions, interleavedFragments);
|
||||
}, [interleavedFragments, lookForOptions]);
|
||||
|
||||
|
||||
// style
|
||||
@@ -589,7 +627,7 @@ export function ChatMessage(props: {
|
||||
|
||||
const listItemSx: SxProps = React.useMemo(() => ({
|
||||
// vars
|
||||
'--AGI-overlay-start-opacity': uiComplexityMode === 'extra' ? 0.1 : 0,
|
||||
// '--AGI-overlay-start-opacity': uiComplexityMode === 'extra' ? 0.1 : 0, // disabled - looks worse
|
||||
|
||||
// style
|
||||
backgroundColor: backgroundColor,
|
||||
@@ -773,20 +811,23 @@ export function ChatMessage(props: {
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* Void Fragments */}
|
||||
{voidFragments.length >= 1 && (
|
||||
{/* Annotation Fragments (absolute top: citations, references) */}
|
||||
{annotationFragments.length >= 1 && (
|
||||
<VoidFragments
|
||||
voidFragments={voidFragments}
|
||||
nonVoidFragmentsCount={renderContentFragments.length}
|
||||
voidFragments={annotationFragments}
|
||||
nonVoidFragmentsCount={interleavedFragments.filter(f => f.ft === 'content').length}
|
||||
contentScaling={adjContentScaling}
|
||||
uiComplexityMode={uiComplexityMode}
|
||||
messageRole={messageRole}
|
||||
messagePendingIncomplete={messagePendingIncomplete}
|
||||
onFragmentDelete={!props.onMessageFragmentDelete ? undefined : handleFragmentDelete}
|
||||
onFragmentReplace={!props.onMessageFragmentReplace ? undefined : handleFragmentReplace}
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* Content Fragments */}
|
||||
{/* Interleaved Fragments (reasoning + content in temporal order) */}
|
||||
<ContentFragments
|
||||
contentFragments={renderContentFragments}
|
||||
contentFragments={renderInterleavedFragments}
|
||||
showEmptyNotice={!messageFragments.length && !messagePendingIncomplete}
|
||||
|
||||
contentScaling={adjContentScaling}
|
||||
@@ -794,10 +835,11 @@ export function ChatMessage(props: {
|
||||
fitScreen={props.fitScreen}
|
||||
isMobile={props.isMobile}
|
||||
messageRole={messageRole}
|
||||
messageGeneratorLlmId={messageGenerator?.mgt === 'aix' ? messageGenerator.aix?.mId : undefined}
|
||||
messagePendingIncomplete={messagePendingIncomplete}
|
||||
optiAllowSubBlocksMemo={!!messagePendingIncomplete}
|
||||
disableMarkdownText={disableMarkdown || fromUser /* User messages are edited as text. Try to have them in plain text. NOTE: This may bite. */}
|
||||
showUnsafeHtmlCode={props.showUnsafeHtmlCode}
|
||||
enhanceCodeBlocks={labsEnhanceCodeBlocks}
|
||||
|
||||
textEditsState={textContentEditState}
|
||||
setEditedText={(!props.onMessageFragmentReplace || messagePendingIncomplete) ? undefined : handleEditSetText}
|
||||
@@ -828,6 +870,14 @@ export function ChatMessage(props: {
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* [Edit Mode] Add new attachments (right below the Document Fragments) */}
|
||||
{isEditingText && !fromAssistant && !!onMessageFragmentAppend && (
|
||||
<ChatMessageEditAttachments
|
||||
ref={attachmentsEditRef}
|
||||
isMobile={props.isMobile}
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* [SYSTEM, REAL] Image Attachment Fragments - just for a realistic display below the system instruction text/docs */}
|
||||
{fromSystem && imageAttachments.length >= 1 && (
|
||||
<ImageAttachmentFragments
|
||||
@@ -848,13 +898,13 @@ export function ChatMessage(props: {
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* Upstream Resume... */}
|
||||
{props.isBottom && fromAssistant && lastFragmentIsError && messageGenerator?.upstreamHandle?.responseId && (
|
||||
{/* Upstream Resume - shows whenever there's a stored handle (incl. post-reload, and while streaming so Stop can cancel the upstream run) */}
|
||||
{props.isBottom && fromAssistant && messageGenerator?.upstreamHandle && (!!onMessageUpstreamResume || !!onMessageUpstreamDelete) && (
|
||||
<BlockOpUpstreamResume
|
||||
upstreamHandle={messageGenerator.upstreamHandle}
|
||||
onResume={console.error}
|
||||
onCancel={console.error}
|
||||
onDelete={console.error}
|
||||
pending={messagePendingIncomplete}
|
||||
onResume={(!messagePendingIncomplete && onMessageUpstreamResume) ? handleUpstreamResume : undefined}
|
||||
onDelete={onMessageUpstreamDelete ? handleUpstreamDelete : undefined}
|
||||
/>
|
||||
)}
|
||||
|
||||
@@ -867,6 +917,13 @@ export function ChatMessage(props: {
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* Char & Word count */}
|
||||
{/*{!zenMode && !isEditingText && !messagePendingIncomplete && fragmentFlattenedText.length > 0 && (*/}
|
||||
{/* <Typography level='body-xs' sx={{ mx: 1.5, mt: 0.5, textAlign: fromAssistant ? 'left' : 'right', opacity: 0.5 }}>*/}
|
||||
{/* {fragmentFlattenedText.length.toLocaleString()} chars · {(fragmentFlattenedText.match(/\S+/g) || []).length.toLocaleString()} words*/}
|
||||
{/* </Typography>*/}
|
||||
{/*)}*/}
|
||||
|
||||
</Box>
|
||||
|
||||
|
||||
@@ -888,18 +945,18 @@ export function ChatMessage(props: {
|
||||
|
||||
|
||||
{/* Overlay copy icon */}
|
||||
{ENABLE_COPY_MESSAGE_OVERLAY && !fromSystem && !isEditingText && (
|
||||
<Tooltip title={messagePendingIncomplete ? null : (fromAssistant ? 'Copy message' : 'Copy input')} variant='solid'>
|
||||
<IconButton
|
||||
variant='outlined' onClick={handleOpsCopy}
|
||||
sx={{
|
||||
position: 'absolute', ...(fromAssistant ? { right: { xs: 12, md: 28 } } : { left: { xs: 12, md: 28 } }), zIndex: 10,
|
||||
opacity: 0, transition: 'opacity 0.16s cubic-bezier(.17,.84,.44,1)',
|
||||
}}>
|
||||
<ContentCopyIcon />
|
||||
</IconButton>
|
||||
</Tooltip>
|
||||
)}
|
||||
{/*{ENABLE_COPY_MESSAGE_OVERLAY && !fromSystem && !isEditingText && (*/}
|
||||
{/* <Tooltip title={messagePendingIncomplete ? null : (fromAssistant ? 'Copy message' : 'Copy input')} variant='solid'>*/}
|
||||
{/* <IconButton*/}
|
||||
{/* variant='outlined' onClick={handleOpsMessageCopySrc}*/}
|
||||
{/* sx={{*/}
|
||||
{/* position: 'absolute', ...(fromAssistant ? { right: { xs: 12, md: 28 } } : { left: { xs: 12, md: 28 } }), zIndex: 10,*/}
|
||||
{/* opacity: 0, transition: 'opacity 0.16s cubic-bezier(.17,.84,.44,1)',*/}
|
||||
{/* }}>*/}
|
||||
{/* <ContentCopyIcon />*/}
|
||||
{/* </IconButton>*/}
|
||||
{/* </Tooltip>*/}
|
||||
{/*)}*/}
|
||||
|
||||
|
||||
{/* Message Operations Menu (3 dots) */}
|
||||
@@ -929,25 +986,22 @@ export function ChatMessage(props: {
|
||||
</MenuItem>
|
||||
)}
|
||||
{/* Copy */}
|
||||
<MenuItem onClick={handleOpsCopy} sx={{ flex: 1 }}>
|
||||
<MenuItem onClick={handleOpsMessageCopySrc} sx={{ flex: 1 }}>
|
||||
<ListItemDecorator><ContentCopyIcon /></ListItemDecorator>
|
||||
Copy
|
||||
</MenuItem>
|
||||
{/* Starred */}
|
||||
{!!onMessageToggleUserFlag && (
|
||||
<MenuItem onClick={handleOpsToggleStarred} sx={{ flexGrow: 0, px: 1 }}>
|
||||
<Tooltip disableInteractive title={!isUserStarred ? 'Link message - use @ to refer to it from another chat' : 'Remove link'}>
|
||||
{isUserStarred
|
||||
? <AlternateEmailIcon color='primary' sx={{ fontSize: 'xl' }} />
|
||||
: <InsertLinkIcon sx={{ rotate: '45deg' }} />
|
||||
}
|
||||
{/*{isUserStarred*/}
|
||||
{/* ? <StarRoundedIcon color='primary' sx={{ fontSize: 'xl2' }} />*/}
|
||||
{/* : <StarOutlineRoundedIcon sx={{ fontSize: 'xl2' }} />*/}
|
||||
{/*}*/}
|
||||
<Tooltip disableInteractive title={!isUserStarred ? 'Star message - use @ to refer to it from another chat' : 'Remove star'}>
|
||||
<StarredState isStarred={isUserStarred} />
|
||||
</Tooltip>
|
||||
</MenuItem>
|
||||
)}
|
||||
{/* Info */}
|
||||
<MenuItem onClick={handleOpsShowInfo} sx={{ flexGrow: 0, px: 1 }}>
|
||||
<InfoOutlinedIcon sx={{ fontSize: 'xl' }} />
|
||||
</MenuItem>
|
||||
</Box>
|
||||
|
||||
{/* Notify Complete */}
|
||||
@@ -1010,7 +1064,7 @@ export function ChatMessage(props: {
|
||||
{!!props.onTextDiagram && <ListDivider />}
|
||||
{!!props.onTextDiagram && (
|
||||
<MenuItem onClick={handleOpsDiagram} disabled={!couldDiagram}>
|
||||
<ListItemDecorator><AccountTreeOutlinedIcon /></ListItemDecorator>
|
||||
<ListItemDecorator><PhTreeStructure /></ListItemDecorator>
|
||||
Auto-Diagram ...
|
||||
</MenuItem>
|
||||
)}
|
||||
@@ -1022,7 +1076,7 @@ export function ChatMessage(props: {
|
||||
)}
|
||||
{!!props.onTextSpeak && (
|
||||
<MenuItem onClick={handleOpsSpeak} disabled={!couldSpeak || props.isSpeaking}>
|
||||
<ListItemDecorator>{props.isSpeaking ? <CircularProgress size='sm' /> : <RecordVoiceOverOutlinedIcon />}</ListItemDecorator>
|
||||
<ListItemDecorator>{props.isSpeaking ? <CircularProgress size='sm' /> : <PhVoice />}</ListItemDecorator>
|
||||
Speak
|
||||
</MenuItem>
|
||||
)}
|
||||
@@ -1140,7 +1194,7 @@ export function ChatMessage(props: {
|
||||
{/* Intelligent functions */}
|
||||
{!!props.onTextDiagram && <Tooltip disableInteractive arrow placement='top' title={couldDiagram ? 'Auto-Diagram...' : 'Too short to Auto-Diagram'}>
|
||||
<IconButton color='success' onClick={couldDiagram ? handleOpsDiagram : undefined}>
|
||||
<AccountTreeOutlinedIcon sx={{ color: couldDiagram ? 'primary' : 'neutral.plainDisabledColor' }} />
|
||||
<PhTreeStructure sx={{ color: couldDiagram ? 'primary' : 'neutral.plainDisabledColor' }} />
|
||||
</IconButton>
|
||||
</Tooltip>}
|
||||
{!!props.onTextImagine && <Tooltip disableInteractive arrow placement='top' title='Auto-Draw'>
|
||||
@@ -1150,18 +1204,26 @@ export function ChatMessage(props: {
|
||||
</Tooltip>}
|
||||
{!!props.onTextSpeak && <Tooltip disableInteractive arrow placement='top' title='Speak'>
|
||||
<IconButton color='success' onClick={handleOpsSpeak} disabled={!couldSpeak || props.isSpeaking}>
|
||||
{!props.isSpeaking ? <RecordVoiceOverOutlinedIcon /> : <CircularProgress sx={{ '--CircularProgress-size': '16px' }} />}
|
||||
{!props.isSpeaking ? <PhVoice /> : <CircularProgress sx={{ '--CircularProgress-size': '16px' }} />}
|
||||
</IconButton>
|
||||
</Tooltip>}
|
||||
{(!!props.onTextDiagram || !!props.onTextImagine || !!props.onTextSpeak) && <Divider />}
|
||||
|
||||
{/* Bubble Copy */}
|
||||
<Tooltip disableInteractive arrow placement='top' title='Copy Selection'>
|
||||
<IconButton onClick={handleOpsCopy}>
|
||||
<IconButton onClick={handleBubbleCopyDOM}>
|
||||
<ContentCopyIcon />
|
||||
</IconButton>
|
||||
</Tooltip>
|
||||
|
||||
{/* Selection char & word count */}
|
||||
{!!selText && <Divider />}
|
||||
{!!selText && (
|
||||
<Typography level='body-xs' sx={{ px: 1, whiteSpace: 'nowrap' }}>
|
||||
{selText.length.toLocaleString()}c · {(selText.match(/\S+/g) || []).length.toLocaleString()}w
|
||||
</Typography>
|
||||
)}
|
||||
|
||||
</ButtonGroup>
|
||||
</ClickAwayListener>
|
||||
</Popper>
|
||||
@@ -1176,13 +1238,13 @@ export function ChatMessage(props: {
|
||||
minWidth={220}
|
||||
placement='bottom-start'
|
||||
>
|
||||
<MenuItem onClick={handleOpsCopy} sx={{ flex: 1, alignItems: 'center' }}>
|
||||
<MenuItem onClick={(e) => { handleOpsMessageCopySrc(e); closeContextMenu(); }} sx={{ flex: 1, alignItems: 'center' }}>
|
||||
<ListItemDecorator><ContentCopyIcon /></ListItemDecorator>
|
||||
Copy
|
||||
</MenuItem>
|
||||
{!!props.onTextDiagram && <ListDivider />}
|
||||
{!!props.onTextDiagram && <MenuItem onClick={handleOpsDiagram} disabled={!couldDiagram || props.isImagining}>
|
||||
<ListItemDecorator><AccountTreeOutlinedIcon /></ListItemDecorator>
|
||||
<ListItemDecorator><PhTreeStructure /></ListItemDecorator>
|
||||
Auto-Diagram ...
|
||||
</MenuItem>}
|
||||
{!!props.onTextImagine && <MenuItem onClick={handleOpsImagine} disabled={!couldImagine || props.isImagining}>
|
||||
@@ -1190,12 +1252,22 @@ export function ChatMessage(props: {
|
||||
Auto-Draw
|
||||
</MenuItem>}
|
||||
{!!props.onTextSpeak && <MenuItem onClick={handleOpsSpeak} disabled={!couldSpeak || props.isSpeaking}>
|
||||
<ListItemDecorator>{props.isSpeaking ? <CircularProgress size='sm' /> : <RecordVoiceOverOutlinedIcon />}</ListItemDecorator>
|
||||
<ListItemDecorator>{props.isSpeaking ? <CircularProgress size='sm' /> : <PhVoice />}</ListItemDecorator>
|
||||
Speak
|
||||
</MenuItem>}
|
||||
</CloseablePopup>
|
||||
)}
|
||||
|
||||
|
||||
{/* Message Info Modal */}
|
||||
{showInfoModal && (
|
||||
<ChatMessageInfoPopup
|
||||
open
|
||||
onClose={handleInfoClose}
|
||||
message={props.message}
|
||||
/>
|
||||
)}
|
||||
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -0,0 +1,155 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import type { SxProps } from '@mui/joy/styles/types';
|
||||
import { Sheet } from '@mui/joy';
|
||||
|
||||
import { useBrowseCapability } from '~/modules/browse/store-module-browsing';
|
||||
|
||||
import type { AttachmentDraftsStoreApi } from '~/common/attachment-drafts/store-attachment-drafts_slice';
|
||||
import type { DMessageAttachmentFragment } from '~/common/stores/chat/chat.fragments';
|
||||
import { AttachmentDraftsList } from '~/common/attachment-drafts/attachment-drafts-ui/AttachmentDraftsList';
|
||||
import { AttachmentSourcesMemo } from '~/common/attachment-drafts/attachment-sources/AttachmentSources';
|
||||
import { useAttachHandler_CameraOpen, useAttachHandler_Files, useAttachHandler_ScreenCapture, useAttachHandler_UrlWebLinks } from '~/common/attachment-drafts/attachment-sources/useAttachmentSourceHandlers';
|
||||
import { createAttachmentDraftsVanillaStore } from '~/common/attachment-drafts/store-attachment-drafts_vanilla';
|
||||
import { supportsCameraCapture } from '~/common/components/camera/useCameraCapture';
|
||||
import { supportsScreenCapture } from '~/common/util/screenCaptureUtils';
|
||||
import { useAttachmentDrafts } from '~/common/attachment-drafts/useAttachmentDrafts';
|
||||
import { useGoogleDrivePicker } from '~/common/attachment-drafts/attachment-sources/useGoogleDrivePicker';
|
||||
|
||||
import { ViewDocPartModal } from './fragments-content/ViewDocPartModal';
|
||||
import { ViewImageRefPartModal } from './fragments-content/ViewImageRefPartModal';
|
||||
|
||||
|
||||
/**
|
||||
* Imperative interface used outside
|
||||
*/
|
||||
export interface EditModeAttachmentsHandle {
|
||||
takeAllFragments: () => Promise<DMessageAttachmentFragment[]>;
|
||||
}
|
||||
|
||||
|
||||
const _styles = {
|
||||
box: {
|
||||
overflow: 'hidden',
|
||||
p: 0.5,
|
||||
|
||||
// looks - exactly from BoxTextArea - the Text editor
|
||||
boxShadow: 'inset 1px 0px 3px -2px var(--joy-palette-warning-softColor)',
|
||||
outline: '1px solid',
|
||||
outlineColor: 'var(--joy-palette-warning-solidBg)',
|
||||
borderRadius: 'sm',
|
||||
|
||||
// layout
|
||||
display: 'flex',
|
||||
flexWrap: 'wrap',
|
||||
alignItems: 'center',
|
||||
gap: 1,
|
||||
|
||||
// shade to the buttons inside this > div > div > button
|
||||
'& > div > div > button': {
|
||||
// backgroundColor: 'warning.softActiveBg',
|
||||
borderColor: 'warning.outlinedBorder',
|
||||
borderRadius: 'sm',
|
||||
boxShadow: 'sm',
|
||||
},
|
||||
},
|
||||
} as const satisfies Record<string, SxProps>;
|
||||
|
||||
|
||||
/**
|
||||
* Encapsulates all attachment wiring for ChatMessage edit mode.
|
||||
* Owns a standalone attachment drafts store (one per edit session).
|
||||
* Exposes an imperative handle for the parent to "take" fragments on save.
|
||||
*/
|
||||
export const ChatMessageEditAttachments = React.forwardRef<EditModeAttachmentsHandle, { isMobile: boolean }>(
|
||||
function EditModeAttachments(props, ref) {
|
||||
|
||||
// state
|
||||
const storeApiRef = React.useRef<AttachmentDraftsStoreApi | null>(null);
|
||||
if (!storeApiRef.current) storeApiRef.current = createAttachmentDraftsVanillaStore(); // created only on mount
|
||||
|
||||
// external state
|
||||
const {
|
||||
attachmentDrafts,
|
||||
attachAppendClipboardItems, attachAppendCloudFile, attachAppendFile, attachAppendUrl, // attachAppendDataTransfer
|
||||
attachmentsTakeAllFragments,
|
||||
} = useAttachmentDrafts(storeApiRef.current, false, false, undefined, false);
|
||||
const browseCapability = useBrowseCapability();
|
||||
|
||||
|
||||
// imperative handle for parent to take fragments on save
|
||||
React.useImperativeHandle(ref, () => ({
|
||||
takeAllFragments: () => attachmentsTakeAllFragments('global', 'app-chat'),
|
||||
}), [attachmentsTakeAllFragments]);
|
||||
|
||||
|
||||
// [effect] cleanup on unmount - remove all drafts (deleted their DBlob assets, except for 'taken' ones)
|
||||
React.useEffect(() => {
|
||||
const store = storeApiRef.current;
|
||||
return () => {
|
||||
store?.getState().removeAllAttachmentDrafts();
|
||||
};
|
||||
}, []);
|
||||
|
||||
|
||||
// handlers - composed from shared attachment source hooks
|
||||
|
||||
const handleAttachFiles = useAttachHandler_Files(attachAppendFile);
|
||||
const handleOpenCamera = useAttachHandler_CameraOpen(attachAppendFile);
|
||||
const handleAttachScreenCapture = useAttachHandler_ScreenCapture(attachAppendFile);
|
||||
const { openWebInputDialog, webInputDialogComponent } = useAttachHandler_UrlWebLinks(attachAppendUrl);
|
||||
const { openGoogleDrivePicker, googleDrivePickerComponent } = useGoogleDrivePicker(attachAppendCloudFile, props.isMobile);
|
||||
|
||||
// viewer render props - same pattern as ComposerAttachmentDraftsList.tsx:44-52
|
||||
const renderDocViewer = React.useCallback(
|
||||
(part: React.ComponentProps<typeof ViewDocPartModal>['docPart'], onClose: () => void) =>
|
||||
<ViewDocPartModal docPart={part} onClose={onClose} />,
|
||||
[],
|
||||
);
|
||||
|
||||
const renderImageViewer = React.useCallback(
|
||||
(part: React.ComponentProps<typeof ViewImageRefPartModal>['imageRefPart'], onClose: () => void) =>
|
||||
<ViewImageRefPartModal imageRefPart={part} onClose={onClose} />,
|
||||
[],
|
||||
);
|
||||
|
||||
|
||||
return <>
|
||||
|
||||
<Sheet color='warning' variant='soft' sx={_styles.box}>
|
||||
|
||||
{/* [+] Attachment Sources menu */}
|
||||
<AttachmentSourcesMemo
|
||||
mode='menu-message'
|
||||
canBrowse={browseCapability.mayWork}
|
||||
hasScreenCapture={supportsScreenCapture}
|
||||
hasCamera={supportsCameraCapture()}
|
||||
// onlyImages={showAttachOnlyImages}
|
||||
onAttachClipboard={attachAppendClipboardItems}
|
||||
onAttachFiles={handleAttachFiles}
|
||||
onAttachScreenCapture={handleAttachScreenCapture}
|
||||
onOpenCamera={handleOpenCamera}
|
||||
onOpenGoogleDrivePicker={openGoogleDrivePicker}
|
||||
onOpenWebInput={openWebInputDialog}
|
||||
/>
|
||||
|
||||
{/* Attachment Drafts list */}
|
||||
{attachmentDrafts.length > 0 ? (
|
||||
<AttachmentDraftsList
|
||||
attachmentDraftsStoreApi={storeApiRef.current!}
|
||||
attachmentDrafts={attachmentDrafts}
|
||||
buttonsCanWrap
|
||||
renderDocViewer={renderDocViewer}
|
||||
renderImageViewer={renderImageViewer}
|
||||
/>
|
||||
) : null}
|
||||
|
||||
</Sheet>
|
||||
|
||||
{/* Modal portals */}
|
||||
{webInputDialogComponent}
|
||||
{googleDrivePickerComponent}
|
||||
|
||||
</>;
|
||||
},
|
||||
);
|
||||
@@ -0,0 +1,104 @@
|
||||
import * as React from 'react';
|
||||
import TimeAgo from 'react-timeago';
|
||||
|
||||
import type { SxProps } from '@mui/joy/styles/types';
|
||||
import { Box } from '@mui/joy';
|
||||
|
||||
import { llmsGetVendorIcon } from '~/modules/llms/components/LLMVendorIcon';
|
||||
|
||||
import type { DMessage } from '~/common/stores/chat/chat.message';
|
||||
import type { Immutable } from '~/common/types/immutable.types';
|
||||
import { GoodModal } from '~/common/components/modals/GoodModal';
|
||||
import { tooltipMetricsGridSx, prettyMessageMetrics, prettyShortChatModelName, prettyTokenStopReason } from '~/common/util/dMessageUtils';
|
||||
|
||||
|
||||
const contentSx: SxProps = {
|
||||
fontSize: 'sm',
|
||||
display: 'grid',
|
||||
gap: 1.5,
|
||||
};
|
||||
|
||||
const vendorIconContainerSx: SxProps = {
|
||||
display: 'flex',
|
||||
alignItems: 'center',
|
||||
gap: 1,
|
||||
};
|
||||
|
||||
const timestampSx: SxProps = {
|
||||
fontSize: 'xs',
|
||||
color: 'text.tertiary',
|
||||
};
|
||||
|
||||
|
||||
export function ChatMessageInfoPopup(props: {
|
||||
open: boolean,
|
||||
onClose: () => void,
|
||||
message: Immutable<DMessage>,
|
||||
}) {
|
||||
|
||||
const { message } = props;
|
||||
const { generator, created, updated, tokenCount, role } = message;
|
||||
|
||||
const isAix = generator?.mgt === 'aix';
|
||||
const vendorId = isAix ? generator.aix?.vId ?? null : null;
|
||||
const VendorIcon = vendorId ? llmsGetVendorIcon(vendorId) : null;
|
||||
const metrics = generator?.metrics ? prettyMessageMetrics(generator.metrics, 'extra') : null;
|
||||
const stopReason = generator?.tokenStopReason ? prettyTokenStopReason(generator.tokenStopReason, 'extra') : null;
|
||||
|
||||
return (
|
||||
<GoodModal
|
||||
open={props.open}
|
||||
onClose={props.onClose}
|
||||
title='Message Info'
|
||||
hideBottomClose
|
||||
sx={{ minWidth: { xs: 300, sm: 400 }, maxWidth: 480 }}
|
||||
>
|
||||
<Box sx={contentSx}>
|
||||
|
||||
{/* Model / Generator */}
|
||||
{generator && (
|
||||
<Box sx={tooltipMetricsGridSx}>
|
||||
<div>Model:</div>
|
||||
<div>
|
||||
{VendorIcon
|
||||
? <Box sx={vendorIconContainerSx}><VendorIcon />{prettyShortChatModelName(generator.name)}</Box>
|
||||
: prettyShortChatModelName(generator.name)}
|
||||
</div>
|
||||
{isAix && generator.aix?.mId && <>
|
||||
<div>ID:</div>
|
||||
<div style={{ opacity: 0.75 }}>{generator.aix.mId}</div>
|
||||
</>}
|
||||
{generator.providerInfraLabel && <>
|
||||
<div>Provider:</div>
|
||||
<div>{generator.providerInfraLabel}</div>
|
||||
</>}
|
||||
{stopReason && <>
|
||||
<div>Status:</div>
|
||||
<div>{stopReason}</div>
|
||||
</>}
|
||||
</Box>
|
||||
)}
|
||||
|
||||
{/* Metrics (tokens, speed, cost, time) */}
|
||||
{metrics}
|
||||
|
||||
{/* Message metadata */}
|
||||
<Box sx={tooltipMetricsGridSx}>
|
||||
<div>Role:</div>
|
||||
<div>{role}</div>
|
||||
{tokenCount > 0 && <>
|
||||
<div>Tokens:</div>
|
||||
<div>{tokenCount.toLocaleString()} (visible text ~approx)</div>
|
||||
</>}
|
||||
</Box>
|
||||
|
||||
{/* Timestamps */}
|
||||
<Box sx={timestampSx}>
|
||||
{!!created && <div>Created <TimeAgo date={created} /> - {new Date(created).toLocaleString()}</div>}
|
||||
{!!updated && <div>Updated <TimeAgo date={updated} /> - {new Date(updated).toLocaleString()}</div>}
|
||||
</Box>
|
||||
|
||||
</Box>
|
||||
</GoodModal>
|
||||
);
|
||||
}
|
||||
@@ -5,13 +5,13 @@ import AttachFileRoundedIcon from '@mui/icons-material/AttachFileRounded';
|
||||
import ClearIcon from '@mui/icons-material/Clear';
|
||||
import DeleteOutlineIcon from '@mui/icons-material/DeleteOutline';
|
||||
import ErrorIcon from '@mui/icons-material/ErrorRounded';
|
||||
import ImageIcon from '@mui/icons-material/ImageRounded';
|
||||
import TextFieldsIcon from '@mui/icons-material/TextFieldsRounded';
|
||||
import VisibilityIcon from '@mui/icons-material/Visibility';
|
||||
import VisibilityOffIcon from '@mui/icons-material/VisibilityOff';
|
||||
|
||||
import { DMessage, MESSAGE_FLAG_AIX_SKIP, messageFragmentsReduceText, messageHasUserFlag } from '~/common/stores/chat/chat.message';
|
||||
import { DMessageAttachmentFragment, DMessageFragment, isAttachmentFragment, isContentFragment, isImageRefPart, isZyncAssetImageReferencePart } from '~/common/stores/chat/chat.fragments';
|
||||
import { PhImageSquare } from '~/common/components/icons/phosphor/PhImageSquare';
|
||||
import { makeMessageAvatarIcon, messageBackground } from '~/common/util/dMessageUtils';
|
||||
|
||||
import { TokenBadgeMemo } from '../composer/tokens/TokenBadge';
|
||||
@@ -273,7 +273,7 @@ export function CleanerMessage(props: { message: DMessage, selected: boolean, re
|
||||
</Chip>
|
||||
)}
|
||||
{analysis.imageCount > 0 && (
|
||||
<Chip size='sm' variant='solid' color='success' startDecorator={<ImageIcon />} sx={{ px: 1 }}>
|
||||
<Chip size='sm' variant='solid' color='success' startDecorator={<PhImageSquare />} sx={{ px: 1 }}>
|
||||
{analysis.imageCount} image{analysis.imageCount > 1 ? 's' : ''}
|
||||
</Chip>
|
||||
)}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user