2
0

(openai) Truncate messages sequence automatically if reaching token limit

This commit is contained in:
Baptiste Arnaud
2023-05-02 13:37:02 -04:00
parent 94735638a6
commit e58016e43a
6 changed files with 73 additions and 18 deletions

View File

@ -77,6 +77,8 @@ export const TemplatesModal = ({ isOpen, onClose, onTypebotChoose }: Props) => {
borderRightWidth={1}
justify="space-between"
flexShrink={0}
overflowY="scroll"
className="hide-scrollbar"
>
<Stack spacing={5}>
<Stack spacing={2}>

View File

@ -17,7 +17,7 @@ export const useToast = () => {
}: Omit<ToastProps, 'onClose'>) => {
toast({
position: 'top-right',
duration: details ? null : undefined,
duration: details && status === 'error' ? null : undefined,
render: ({ onClose }) => (
<Toast
title={title}

View File

@ -13,14 +13,15 @@
"test:report": "pnpm playwright show-report"
},
"dependencies": {
"@dqbd/tiktoken": "^1.0.7",
"@sentry/nextjs": "7.46.0",
"@trpc/server": "10.18.0",
"@typebot.io/js": "workspace:*",
"@typebot.io/prisma": "workspace:*",
"@typebot.io/react": "workspace:*",
"aws-sdk": "2.1348.0",
"bot-engine": "workspace:*",
"cors": "2.8.5",
"@typebot.io/prisma": "workspace:*",
"google-spreadsheet": "3.3.0",
"got": "12.6.0",
"libphonenumber-js": "1.10.24",
@ -39,6 +40,10 @@
"@faire/mjml-react": "3.2.0",
"@paralleldrive/cuid2": "2.2.0",
"@playwright/test": "1.32.1",
"@typebot.io/emails": "workspace:*",
"@typebot.io/lib": "workspace:*",
"@typebot.io/schemas": "workspace:*",
"@typebot.io/tsconfig": "workspace:*",
"@types/cors": "2.8.13",
"@types/google-spreadsheet": "3.3.1",
"@types/node": "18.15.11",
@ -48,17 +53,13 @@
"@types/react": "18.0.32",
"@types/sanitize-html": "2.9.0",
"dotenv": "16.0.3",
"@typebot.io/emails": "workspace:*",
"eslint": "8.37.0",
"eslint-config-custom": "workspace:*",
"google-auth-library": "8.7.0",
"@typebot.io/schemas": "workspace:*",
"node-fetch": "3.3.1",
"papaparse": "5.4.1",
"superjson": "1.12.2",
"@typebot.io/tsconfig": "workspace:*",
"typescript": "5.0.3",
"@typebot.io/lib": "workspace:*",
"zod": "3.21.4"
}
}

View File

@ -11,6 +11,7 @@ import {
import {
ChatCompletionOpenAIOptions,
OpenAICredentials,
modelLimit,
} from '@typebot.io/schemas/features/blocks/integrations/openai'
import { OpenAIApi, Configuration, ChatCompletionRequestMessage } from 'openai'
import { isDefined, byId, isNotEmpty, isEmpty } from '@typebot.io/lib'
@ -20,6 +21,9 @@ import { updateVariables } from '@/features/variables/updateVariables'
import { parseVariables } from '@/features/variables/parseVariables'
import { saveSuccessLog } from '@/features/logs/saveSuccessLog'
import { parseVariableNumber } from '@/features/variables/parseVariableNumber'
import { encoding_for_model } from '@dqbd/tiktoken'
const minTokenCompletion = 200
export const createChatCompletionOpenAI = async (
state: SessionState,
@ -56,7 +60,8 @@ export const createChatCompletionOpenAI = async (
apiKey,
})
const { variablesTransformedToList, messages } = parseMessages(
newSessionState.typebot.variables
newSessionState.typebot.variables,
options.model
)(options.messages)
if (variablesTransformedToList.length > 0)
newSessionState = await updateVariables(state)(variablesTransformedToList)
@ -148,7 +153,7 @@ export const createChatCompletionOpenAI = async (
}
const parseMessages =
(variables: Variable[]) =>
(variables: Variable[], model: ChatCompletionOpenAIOptions['model']) =>
(
messages: ChatCompletionOpenAIOptions['messages']
): {
@ -156,8 +161,11 @@ const parseMessages =
messages: ChatCompletionRequestMessage[]
} => {
const variablesTransformedToList: VariableWithValue[] = []
const firstMessagesSequenceIndex = messages.findIndex(
(message) => message.role === 'Messages sequence ✨'
)
const parsedMessages = messages
.flatMap((message) => {
.flatMap((message, index) => {
if (!message.role) return
if (message.role === 'Messages sequence ✨') {
if (
@ -189,23 +197,51 @@ const parseMessages =
variable.id === message.content?.assistantMessagesVariableId
)?.value ?? []) as string[]
let allMessages: ChatCompletionRequestMessage[] = []
if (userMessages.length > assistantMessages.length)
return userMessages.flatMap((userMessage, index) => [
allMessages = userMessages.flatMap((userMessage, index) => [
{
role: 'user',
content: userMessage,
},
{ role: 'assistant', content: assistantMessages[index] },
{ role: 'assistant', content: assistantMessages.at(index) ?? '' },
]) satisfies ChatCompletionRequestMessage[]
else {
return assistantMessages.flatMap((assistantMessage, index) => [
{ role: 'assistant', content: assistantMessage },
{
role: 'user',
content: userMessages[index],
},
]) satisfies ChatCompletionRequestMessage[]
allMessages = assistantMessages.flatMap(
(assistantMessage, index) => [
{ role: 'assistant', content: assistantMessage },
{
role: 'user',
content: userMessages.at(index) ?? '',
},
]
) satisfies ChatCompletionRequestMessage[]
}
if (index !== firstMessagesSequenceIndex) return allMessages
const encoder = encoding_for_model(model)
let messagesToSend: ChatCompletionRequestMessage[] = []
let tokenCount = 0
for (let i = allMessages.length - 1; i >= 0; i--) {
const message = allMessages[i]
const tokens = encoder.encode(message.content)
if (
tokenCount + tokens.length - minTokenCompletion >
modelLimit[model]
) {
break
}
tokenCount += tokens.length
messagesToSend = [message, ...messagesToSend]
}
encoder.free()
return messagesToSend
}
return {
role: message.role,

View File

@ -14,6 +14,15 @@ export const chatCompletionModels = [
'gpt-3.5-turbo-0301',
] as const
export const modelLimit = {
'gpt-3.5-turbo': 4096,
'gpt-3.5-turbo-0301': 4096,
'gpt-4': 8192,
'gpt-4-0314': 8192,
'gpt-4-32k': 32768,
'gpt-4-32k-0314': 32768,
} as const
export const chatCompletionMessageRoles = [
'system',
'user',

7
pnpm-lock.yaml generated
View File

@ -502,6 +502,9 @@ importers:
apps/viewer:
dependencies:
'@dqbd/tiktoken':
specifier: ^1.0.7
version: 1.0.7
'@sentry/nextjs':
specifier: 7.46.0
version: 7.46.0(next@13.2.4)(react@18.2.0)
@ -5228,6 +5231,10 @@ packages:
- webpack-cli
dev: false
/@dqbd/tiktoken@1.0.7:
resolution: {integrity: sha512-bhR5k5W+8GLzysjk8zTMVygQZsgvf7W1F0IlL4ZQ5ugjo5rCyiwGM5d8DYriXspytfu98tv59niang3/T+FoDw==}
dev: false
/@emotion/babel-plugin@11.10.6:
resolution: {integrity: sha512-p2dAqtVrkhSa7xz1u/m9eHYdLi+en8NowrmXeF/dKtJpU8lCWli8RUAati7NcSl0afsBott48pdnANuD0wh9QQ==}
dependencies: