mirror of
https://github.com/enricoros/big-AGI.git
synced 2026-05-11 14:10:15 -07:00
Large improvement to (model-dependent) token utilization
And update README.md
This commit is contained in:
@@ -57,6 +57,7 @@ type ChatModelData = {
|
||||
description: string | JSX.Element;
|
||||
title: string;
|
||||
fullName: string; // seems unused
|
||||
contextWindowSize: number,
|
||||
}
|
||||
|
||||
export const ChatModels: { [key in ChatModelId]: ChatModelData } = {
|
||||
@@ -64,10 +65,12 @@ export const ChatModels: { [key in ChatModelId]: ChatModelData } = {
|
||||
description: 'Most insightful, larger problems, but slow, expensive, and may be unavailable',
|
||||
title: 'GPT-4',
|
||||
fullName: 'GPT-4',
|
||||
contextWindowSize: 8192,
|
||||
},
|
||||
'gpt-3.5-turbo': {
|
||||
description: 'A good balance between speed and insight',
|
||||
title: '3.5-Turbo',
|
||||
fullName: 'GPT-3.5 Turbo',
|
||||
contextWindowSize: 4096,
|
||||
},
|
||||
};
|
||||
@@ -31,8 +31,8 @@ interface SettingsStore {
|
||||
modelTemperature: number;
|
||||
setModelTemperature: (modelTemperature: number) => void;
|
||||
|
||||
modelMaxTokens: number;
|
||||
setModelMaxTokens: (modelMaxTokens: number) => void;
|
||||
modelMaxResponseTokens: number;
|
||||
setModelMaxResponseTokens: (modelMaxResponseTokens: number) => void;
|
||||
|
||||
}
|
||||
|
||||
@@ -65,8 +65,8 @@ export const useSettingsStore = create<SettingsStore>()(
|
||||
modelTemperature: 0.5,
|
||||
setModelTemperature: (modelTemperature: number) => set({ modelTemperature }),
|
||||
|
||||
modelMaxTokens: 2048,
|
||||
setModelMaxTokens: (modelMaxTokens: number) => set({ modelMaxTokens }),
|
||||
modelMaxResponseTokens: 2048,
|
||||
setModelMaxResponseTokens: (modelMaxResponseTokens: number) => set({ modelMaxResponseTokens: modelMaxResponseTokens }),
|
||||
|
||||
}),
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user