diff --git a/README.md b/README.md index 601fa8451..5c5afed96 100644 --- a/README.md +++ b/README.md @@ -26,10 +26,14 @@ Or click fork & run on Vercel 🚨 **We added cool new features to the app!** (bare-bones was [466a36](https://github.com/enricoros/nextjs-chatgpt-app/tree/466a3667a48060d406d60943af01fe26366563fb)) +- [x] _NEW 04.03_ 🎉 **PDF import** 📄🔀🧠 (fredliubojin) <- "ask questions to a PDF!" 🤯 +- [x] _NEW 04.03_ 🎉 **NEW 04.03** Tokens utilization 📊 [WIP] +

Token Counters

+- [x] _NEW 04.02_ 🎉 **Markdown rendering** 🎨 (nilshulth) [WIP] - [x] 🎉 **NEW 04.01** Typing Avatars

New Typing Avatars

- [x] 🎉 **NEW 03.31** Publish & share chats to paste.gg 📥 -

Export chats

+

Export chats

- [x] Chat with GPT-4 and 3.5 Turbo 🧠💨 - [x] **Private**: user-owned API keys 🔑 and localStorage 🛡️ - [x] **System presets** - including Code, Science, Corporate, and Chat 🎭 diff --git a/components/Chat.tsx b/components/Chat.tsx index c74d3e4ab..8625bec43 100644 --- a/components/Chat.tsx +++ b/components/Chat.tsx @@ -37,7 +37,7 @@ function createDMessage(role: DMessage['role'], text: string): DMessage { async function _streamAssistantResponseMessage( conversationId: string, history: DMessage[], apiKey: string | undefined, apiHost: string | undefined, - chatModelId: string, modelTemperature: number, modelMaxTokens: number, abortSignal: AbortSignal, + chatModelId: string, modelTemperature: number, modelMaxResponseTokens: number, abortSignal: AbortSignal, addMessage: (conversationId: string, message: DMessage) => void, editMessage: (conversationId: string, messageId: string, updatedMessage: Partial, touch: boolean) => void, ) { @@ -58,7 +58,7 @@ async function _streamAssistantResponseMessage( content: text, })), temperature: modelTemperature, - max_tokens: modelMaxTokens, + max_tokens: modelMaxResponseTokens, }; try { @@ -153,9 +153,9 @@ export function Chat(props: { onShowSettings: () => void, sx?: SxProps }) { const controller = new AbortController(); setAbortController(controller); - const { apiKey, modelTemperature, modelMaxTokens, modelApiHost } = useSettingsStore.getState(); + const { apiKey, modelTemperature, modelMaxResponseTokens, modelApiHost } = useSettingsStore.getState(); const { appendMessage, editMessage } = useChatStore.getState(); - await _streamAssistantResponseMessage(conversationId, history, apiKey, modelApiHost, chatModelId, modelTemperature, modelMaxTokens, controller.signal, appendMessage, editMessage); + await _streamAssistantResponseMessage(conversationId, history, apiKey, modelApiHost, chatModelId, modelTemperature, modelMaxResponseTokens, controller.signal, appendMessage, editMessage); // clear to send, again setAbortController(null); diff --git a/components/Composer.tsx b/components/Composer.tsx index 57d2c48ec..771c9f644 100644 --- a/components/Composer.tsx +++ b/components/Composer.tsx @@ -10,10 +10,11 @@ import PostAddIcon from '@mui/icons-material/PostAdd'; import StopOutlinedIcon from '@mui/icons-material/StopOutlined'; import TelegramIcon from '@mui/icons-material/Telegram'; +import { ChatModels } from '@/lib/data'; import { countModelTokens } from '@/lib/token-counters'; import { extractPdfText } from '@/lib/pdf'; import { useActiveConfiguration } from '@/lib/store-chats'; -import { useComposerStore } from '@/lib/store-settings'; +import { useComposerStore, useSettingsStore } from '@/lib/store-settings'; import { useSpeechRecognition } from '@/components/util/useSpeechRecognition'; @@ -54,6 +55,7 @@ export function Composer(props: { disableSend: boolean; isDeveloperMode: boolean // external state const { history, appendMessageToHistory } = useComposerStore(state => ({ history: state.history, appendMessageToHistory: state.appendMessageToHistory }), shallow); const { chatModelId } = useActiveConfiguration(); + const modelMaxResponseTokens = useSettingsStore(state => state.modelMaxResponseTokens); const handleSendClicked = () => { @@ -187,7 +189,13 @@ export function Composer(props: { disableSend: boolean; isDeveloperMode: boolean const hideOnDesktop = { display: { xs: 'flex', md: 'none' } }; // compute tokens (warning: slow - shall have a toggle) - const estimatedTokens = countModelTokens(composeText, chatModelId); + const modelComposerTokens = countModelTokens(composeText, chatModelId); + const modelRestOfChatTokens = 0; + const estimatedTokens = modelComposerTokens + modelRestOfChatTokens; + const modelContextTokens = ChatModels[chatModelId]?.contextWindowSize || 8192; + const remainingTokens = modelContextTokens - estimatedTokens - modelMaxResponseTokens; + const tokensString = `model: ${modelContextTokens.toLocaleString()} - chat: ${estimatedTokens.toLocaleString()} - response: ${modelMaxResponseTokens.toLocaleString()} = remaining: ${remainingTokens.toLocaleString()} ${remainingTokens < 0 ? '⚠️' : ''}`; + const tokenColor = remainingTokens < 1 ? 'danger' : remainingTokens < modelComposerTokens / 4 ? 'warning' : 'primary'; return ( @@ -252,8 +260,8 @@ export function Composer(props: { disableSend: boolean; isDeveloperMode: boolean = (8192 - 2048) ? 'danger' : estimatedTokens >= (4097 - 2048) ? 'warning' : 'primary'} - badgeContent={estimatedTokens} + badgeContent={estimatedTokens > 0 ? {estimatedTokens} : 0} + color={tokenColor} sx={{ position: 'absolute', bottom: 8, right: 8, }} diff --git a/components/dialogs/SettingsModal.tsx b/components/dialogs/SettingsModal.tsx index 30adb8747..39947f5ef 100644 --- a/components/dialogs/SettingsModal.tsx +++ b/components/dialogs/SettingsModal.tsx @@ -54,11 +54,11 @@ function Section(props: { title?: string; collapsible?: boolean, collapsed?: boo */ export function SettingsModal({ open, onClose }: { open: boolean, onClose: () => void; }) { // external state - const { renderMarkdown, setRenderMarkdown, apiKey, setApiKey, modelTemperature, setModelTemperature, modelMaxTokens, setModelMaxTokens, modelApiHost, setModelApiHost } = useSettingsStore(state => ({ + const { renderMarkdown, setRenderMarkdown, apiKey, setApiKey, modelTemperature, setModelTemperature, modelMaxResponseTokens, setModelMaxResponseTokens, modelApiHost, setModelApiHost } = useSettingsStore(state => ({ renderMarkdown: state.renderMarkdown, setRenderMarkdown: state.setRenderMarkdown, apiKey: state.apiKey, setApiKey: state.setApiKey, modelTemperature: state.modelTemperature, setModelTemperature: state.setModelTemperature, - modelMaxTokens: state.modelMaxTokens, setModelMaxTokens: state.setModelMaxTokens, + modelMaxResponseTokens: state.modelMaxResponseTokens, setModelMaxResponseTokens: state.setModelMaxResponseTokens, modelApiHost: state.modelApiHost, setModelApiHost: state.setModelApiHost, }), shallow); @@ -70,7 +70,7 @@ export function SettingsModal({ open, onClose }: { open: boolean, onClose: () => const handleTemperatureChange = (event: Event, newValue: number | number[]) => setModelTemperature(newValue as number); - const handleMaxTokensChange = (event: Event, newValue: number | number[]) => setModelMaxTokens(newValue as number); + const handleMaxTokensChange = (event: Event, newValue: number | number[]) => setModelMaxResponseTokens(newValue as number); const handleModelApiHostChange = (e: React.ChangeEvent) => setModelApiHost((e.target as HTMLInputElement).value); @@ -153,7 +153,7 @@ export function SettingsModal({ open, onClose }: { open: boolean, onClose: () => diff --git a/docs/feature_token_counter.png b/docs/feature_token_counter.png new file mode 100644 index 000000000..51b39cbf9 Binary files /dev/null and b/docs/feature_token_counter.png differ diff --git a/lib/data.ts b/lib/data.ts index ddcb9f254..ffc18e1e4 100644 --- a/lib/data.ts +++ b/lib/data.ts @@ -57,6 +57,7 @@ type ChatModelData = { description: string | JSX.Element; title: string; fullName: string; // seems unused + contextWindowSize: number, } export const ChatModels: { [key in ChatModelId]: ChatModelData } = { @@ -64,10 +65,12 @@ export const ChatModels: { [key in ChatModelId]: ChatModelData } = { description: 'Most insightful, larger problems, but slow, expensive, and may be unavailable', title: 'GPT-4', fullName: 'GPT-4', + contextWindowSize: 8192, }, 'gpt-3.5-turbo': { description: 'A good balance between speed and insight', title: '3.5-Turbo', fullName: 'GPT-3.5 Turbo', + contextWindowSize: 4096, }, }; \ No newline at end of file diff --git a/lib/store-settings.ts b/lib/store-settings.ts index bc47fe6a6..7d1d9ba4e 100644 --- a/lib/store-settings.ts +++ b/lib/store-settings.ts @@ -31,8 +31,8 @@ interface SettingsStore { modelTemperature: number; setModelTemperature: (modelTemperature: number) => void; - modelMaxTokens: number; - setModelMaxTokens: (modelMaxTokens: number) => void; + modelMaxResponseTokens: number; + setModelMaxResponseTokens: (modelMaxResponseTokens: number) => void; } @@ -65,8 +65,8 @@ export const useSettingsStore = create()( modelTemperature: 0.5, setModelTemperature: (modelTemperature: number) => set({ modelTemperature }), - modelMaxTokens: 2048, - setModelMaxTokens: (modelMaxTokens: number) => set({ modelMaxTokens }), + modelMaxResponseTokens: 2048, + setModelMaxResponseTokens: (modelMaxResponseTokens: number) => set({ modelMaxResponseTokens: modelMaxResponseTokens }), }), {