⚡ (openai) Add new models and remove tiktoken
Instead of computing total tokens with tiktoken we just attempt retries after trimming the first message
This commit is contained in:
@ -1,5 +1,5 @@
|
||||
import { DropdownList } from '@/components/DropdownList'
|
||||
import { Textarea } from '@/components/inputs'
|
||||
import { Textarea, TextInput } from '@/components/inputs'
|
||||
import { VariableSearchInput } from '@/components/inputs/VariableSearchInput'
|
||||
import { TableListItemProps } from '@/components/TableList'
|
||||
import { Stack } from '@chakra-ui/react'
|
||||
@ -55,6 +55,11 @@ export const ChatCompletionMessageItem = ({ item, onItemChange }: Props) => {
|
||||
})
|
||||
}
|
||||
|
||||
const updateName = (name: string) => {
|
||||
if (item.role === 'Messages sequence ✨') return
|
||||
onItemChange({ ...item, name })
|
||||
}
|
||||
|
||||
return (
|
||||
<Stack p="4" rounded="md" flex="1" borderWidth="1px">
|
||||
<DropdownList
|
||||
@ -77,12 +82,19 @@ export const ChatCompletionMessageItem = ({ item, onItemChange }: Props) => {
|
||||
/>
|
||||
</>
|
||||
) : (
|
||||
<Textarea
|
||||
defaultValue={item.content}
|
||||
onChange={changeSingleMessageContent}
|
||||
placeholder="Content"
|
||||
minH="150px"
|
||||
/>
|
||||
<>
|
||||
<Textarea
|
||||
defaultValue={item.content}
|
||||
onChange={changeSingleMessageContent}
|
||||
placeholder="Content"
|
||||
minH="150px"
|
||||
/>
|
||||
<TextInput
|
||||
defaultValue={item.name}
|
||||
onChange={updateName}
|
||||
placeholder="Name (Optional)"
|
||||
/>
|
||||
</>
|
||||
)}
|
||||
</Stack>
|
||||
)
|
||||
|
@ -1,8 +1,8 @@
|
||||
import { DropdownList } from '@/components/DropdownList'
|
||||
import { TableList } from '@/components/TableList'
|
||||
import {
|
||||
chatCompletionModels,
|
||||
ChatCompletionOpenAIOptions,
|
||||
deprecatedCompletionModels,
|
||||
} from '@typebot.io/schemas/features/blocks/integrations/openai'
|
||||
import { ChatCompletionMessageItem } from './ChatCompletionMessageItem'
|
||||
import {
|
||||
@ -17,6 +17,7 @@ import {
|
||||
import { TextLink } from '@/components/TextLink'
|
||||
import { ChatCompletionResponseItem } from './ChatCompletionResponseItem'
|
||||
import { NumberInput } from '@/components/inputs'
|
||||
import { Select } from '@/components/inputs/Select'
|
||||
|
||||
const apiReferenceUrl =
|
||||
'https://platform.openai.com/docs/api-reference/chat/create'
|
||||
@ -30,7 +31,11 @@ export const OpenAIChatCompletionSettings = ({
|
||||
options,
|
||||
onOptionsChange,
|
||||
}: Props) => {
|
||||
const updateModel = (model: (typeof chatCompletionModels)[number]) => {
|
||||
const updateModel = (
|
||||
_: string | undefined,
|
||||
model: (typeof chatCompletionModels)[number] | undefined
|
||||
) => {
|
||||
if (!model) return
|
||||
onOptionsChange({
|
||||
...options,
|
||||
model,
|
||||
@ -74,10 +79,12 @@ export const OpenAIChatCompletionSettings = ({
|
||||
</TextLink>{' '}
|
||||
to better understand the available options.
|
||||
</Text>
|
||||
<DropdownList
|
||||
currentItem={options.model}
|
||||
items={chatCompletionModels}
|
||||
onItemSelect={updateModel}
|
||||
<Select
|
||||
selectedItem={options.model}
|
||||
items={chatCompletionModels.filter(
|
||||
(model) => deprecatedCompletionModels.indexOf(model) === -1
|
||||
)}
|
||||
onSelect={updateModel}
|
||||
/>
|
||||
<Accordion allowMultiple>
|
||||
<AccordionItem>
|
||||
|
@ -1066,13 +1066,28 @@
|
||||
"additionalProperties": false
|
||||
},
|
||||
"min": {
|
||||
"type": "number"
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "number"
|
||||
},
|
||||
{}
|
||||
]
|
||||
},
|
||||
"max": {
|
||||
"type": "number"
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "number"
|
||||
},
|
||||
{}
|
||||
]
|
||||
},
|
||||
"step": {
|
||||
"type": "number"
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "number"
|
||||
},
|
||||
{}
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
@ -1966,6 +1981,7 @@
|
||||
"Yesterday",
|
||||
"Tomorrow",
|
||||
"Random ID",
|
||||
"Moment of the day",
|
||||
"Map item with same index"
|
||||
]
|
||||
},
|
||||
@ -2833,12 +2849,17 @@
|
||||
"model": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-0301"
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
"gpt-3.5-turbo-0301",
|
||||
"gpt-4",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-0314"
|
||||
]
|
||||
},
|
||||
"messages": {
|
||||
@ -2861,6 +2882,9 @@
|
||||
},
|
||||
"content": {
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
|
@ -639,13 +639,28 @@
|
||||
"additionalProperties": false
|
||||
},
|
||||
"min": {
|
||||
"type": "number"
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "number"
|
||||
},
|
||||
{}
|
||||
]
|
||||
},
|
||||
"max": {
|
||||
"type": "number"
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "number"
|
||||
},
|
||||
{}
|
||||
]
|
||||
},
|
||||
"step": {
|
||||
"type": "number"
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "number"
|
||||
},
|
||||
{}
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
@ -1539,6 +1554,7 @@
|
||||
"Yesterday",
|
||||
"Tomorrow",
|
||||
"Random ID",
|
||||
"Moment of the day",
|
||||
"Map item with same index"
|
||||
]
|
||||
},
|
||||
@ -2406,12 +2422,17 @@
|
||||
"model": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-0301"
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
"gpt-3.5-turbo-0301",
|
||||
"gpt-4",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-0314"
|
||||
]
|
||||
},
|
||||
"messages": {
|
||||
@ -2434,6 +2455,9 @@
|
||||
},
|
||||
"content": {
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
@ -3862,13 +3886,28 @@
|
||||
"additionalProperties": false
|
||||
},
|
||||
"min": {
|
||||
"type": "number"
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "number"
|
||||
},
|
||||
{}
|
||||
]
|
||||
},
|
||||
"max": {
|
||||
"type": "number"
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "number"
|
||||
},
|
||||
{}
|
||||
]
|
||||
},
|
||||
"step": {
|
||||
"type": "number"
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "number"
|
||||
},
|
||||
{}
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
|
@ -13,7 +13,6 @@
|
||||
"test:report": "pnpm playwright show-report"
|
||||
},
|
||||
"dependencies": {
|
||||
"@dqbd/tiktoken": "^1.0.7",
|
||||
"@planetscale/database": "^1.7.0",
|
||||
"@sentry/nextjs": "7.53.1",
|
||||
"@trpc/server": "10.27.3",
|
||||
|
@ -1,36 +1,19 @@
|
||||
import { ExecuteIntegrationResponse } from '@/features/chat/types'
|
||||
import { transformStringVariablesToList } from '@/features/variables/transformVariablesToList'
|
||||
import prisma from '@/lib/prisma'
|
||||
import {
|
||||
ChatReply,
|
||||
SessionState,
|
||||
Variable,
|
||||
VariableWithValue,
|
||||
} from '@typebot.io/schemas'
|
||||
import { SessionState } from '@typebot.io/schemas'
|
||||
import {
|
||||
ChatCompletionOpenAIOptions,
|
||||
OpenAICredentials,
|
||||
modelLimit,
|
||||
} from '@typebot.io/schemas/features/blocks/integrations/openai'
|
||||
import type {
|
||||
ChatCompletionRequestMessage,
|
||||
CreateChatCompletionRequest,
|
||||
CreateChatCompletionResponse,
|
||||
} from 'openai'
|
||||
import { byId, isNotEmpty, isEmpty } from '@typebot.io/lib'
|
||||
import { isEmpty } from '@typebot.io/lib'
|
||||
import { decrypt, isCredentialsV2 } from '@typebot.io/lib/api/encryption'
|
||||
import { saveErrorLog } from '@/features/logs/saveErrorLog'
|
||||
import { updateVariables } from '@/features/variables/updateVariables'
|
||||
import { parseVariables } from '@/features/variables/parseVariables'
|
||||
import { parseVariableNumber } from '@/features/variables/parseVariableNumber'
|
||||
import { encoding_for_model } from '@dqbd/tiktoken'
|
||||
import got from 'got'
|
||||
import { resumeChatCompletion } from './resumeChatCompletion'
|
||||
import { isPlaneteScale } from '@/helpers/api/isPlanetScale'
|
||||
import { isVercel } from '@/helpers/api/isVercel'
|
||||
|
||||
const minTokenCompletion = 200
|
||||
const createChatEndpoint = 'https://api.openai.com/v1/chat/completions'
|
||||
import { parseChatCompletionMessages } from './parseChatCompletionMessages'
|
||||
import { executeChatCompletionOpenAIRequest } from './executeChatCompletionOpenAIRequest'
|
||||
|
||||
export const createChatCompletionOpenAI = async (
|
||||
state: SessionState,
|
||||
@ -63,9 +46,8 @@ export const createChatCompletionOpenAI = async (
|
||||
credentials.data,
|
||||
credentials.iv
|
||||
)) as OpenAICredentials['data']
|
||||
const { variablesTransformedToList, messages } = parseMessages(
|
||||
newSessionState.typebot.variables,
|
||||
options.model
|
||||
const { variablesTransformedToList, messages } = parseChatCompletionMessages(
|
||||
newSessionState.typebot.variables
|
||||
)(options.messages)
|
||||
if (variablesTransformedToList.length > 0)
|
||||
newSessionState = await updateVariables(state)(variablesTransformedToList)
|
||||
@ -74,177 +56,37 @@ export const createChatCompletionOpenAI = async (
|
||||
options.advancedSettings?.temperature
|
||||
)
|
||||
|
||||
try {
|
||||
if (
|
||||
isPlaneteScale() &&
|
||||
isVercel() &&
|
||||
isCredentialsV2(credentials) &&
|
||||
newSessionState.isStreamEnabled
|
||||
)
|
||||
return {
|
||||
clientSideActions: [{ streamOpenAiChatCompletion: { messages } }],
|
||||
outgoingEdgeId,
|
||||
newSessionState,
|
||||
}
|
||||
const response = await got
|
||||
.post(createChatEndpoint, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
},
|
||||
json: {
|
||||
model: options.model,
|
||||
messages,
|
||||
temperature,
|
||||
} satisfies CreateChatCompletionRequest,
|
||||
})
|
||||
.json<CreateChatCompletionResponse>()
|
||||
const messageContent = response.choices.at(0)?.message?.content
|
||||
const totalTokens = response.usage?.total_tokens
|
||||
if (isEmpty(messageContent)) {
|
||||
console.error('OpenAI block returned empty message', response)
|
||||
return { outgoingEdgeId, newSessionState }
|
||||
}
|
||||
return resumeChatCompletion(newSessionState, {
|
||||
options,
|
||||
outgoingEdgeId,
|
||||
})(messageContent, totalTokens)
|
||||
} catch (err) {
|
||||
const log: NonNullable<ChatReply['logs']>[number] = {
|
||||
status: 'error',
|
||||
description: 'OpenAI block returned error',
|
||||
}
|
||||
|
||||
if (err && typeof err === 'object') {
|
||||
if ('response' in err) {
|
||||
const { status, data } = err.response as {
|
||||
status: string
|
||||
data: string
|
||||
}
|
||||
log.details = {
|
||||
status,
|
||||
data,
|
||||
}
|
||||
} else if ('message' in err) {
|
||||
log.details = err.message
|
||||
}
|
||||
}
|
||||
|
||||
state.result &&
|
||||
(await saveErrorLog({
|
||||
resultId: state.result.id,
|
||||
message: log.description,
|
||||
details: log.details,
|
||||
}))
|
||||
if (
|
||||
isPlaneteScale() &&
|
||||
isVercel() &&
|
||||
isCredentialsV2(credentials) &&
|
||||
newSessionState.isStreamEnabled
|
||||
)
|
||||
return {
|
||||
clientSideActions: [{ streamOpenAiChatCompletion: { messages } }],
|
||||
outgoingEdgeId,
|
||||
logs: [log],
|
||||
newSessionState,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const parseMessages =
|
||||
(variables: Variable[], model: ChatCompletionOpenAIOptions['model']) =>
|
||||
(
|
||||
messages: ChatCompletionOpenAIOptions['messages']
|
||||
): {
|
||||
variablesTransformedToList: VariableWithValue[]
|
||||
messages: ChatCompletionRequestMessage[]
|
||||
} => {
|
||||
const variablesTransformedToList: VariableWithValue[] = []
|
||||
const firstMessagesSequenceIndex = messages.findIndex(
|
||||
(message) => message.role === 'Messages sequence ✨'
|
||||
)
|
||||
const parsedMessages = messages
|
||||
.flatMap((message, index) => {
|
||||
if (!message.role) return
|
||||
if (message.role === 'Messages sequence ✨') {
|
||||
if (
|
||||
!message.content?.assistantMessagesVariableId ||
|
||||
!message.content?.userMessagesVariableId
|
||||
)
|
||||
return
|
||||
variablesTransformedToList.push(
|
||||
...transformStringVariablesToList(variables)([
|
||||
message.content.assistantMessagesVariableId,
|
||||
message.content.userMessagesVariableId,
|
||||
])
|
||||
)
|
||||
const updatedVariables = variables.map((variable) => {
|
||||
const variableTransformedToList = variablesTransformedToList.find(
|
||||
byId(variable.id)
|
||||
)
|
||||
if (variableTransformedToList) return variableTransformedToList
|
||||
return variable
|
||||
})
|
||||
|
||||
const userMessages = (updatedVariables.find(
|
||||
(variable) =>
|
||||
variable.id === message.content?.userMessagesVariableId
|
||||
)?.value ?? []) as string[]
|
||||
|
||||
const assistantMessages = (updatedVariables.find(
|
||||
(variable) =>
|
||||
variable.id === message.content?.assistantMessagesVariableId
|
||||
)?.value ?? []) as string[]
|
||||
|
||||
let allMessages: ChatCompletionRequestMessage[] = []
|
||||
|
||||
if (userMessages.length > assistantMessages.length)
|
||||
allMessages = userMessages.flatMap((userMessage, index) => [
|
||||
{
|
||||
role: 'user',
|
||||
content: userMessage,
|
||||
},
|
||||
{ role: 'assistant', content: assistantMessages.at(index) ?? '' },
|
||||
]) satisfies ChatCompletionRequestMessage[]
|
||||
else {
|
||||
allMessages = assistantMessages.flatMap(
|
||||
(assistantMessage, index) => [
|
||||
{ role: 'assistant', content: assistantMessage },
|
||||
{
|
||||
role: 'user',
|
||||
content: userMessages.at(index) ?? '',
|
||||
},
|
||||
]
|
||||
) satisfies ChatCompletionRequestMessage[]
|
||||
}
|
||||
|
||||
if (index !== firstMessagesSequenceIndex) return allMessages
|
||||
|
||||
const encoder = encoding_for_model(model)
|
||||
let messagesToSend: ChatCompletionRequestMessage[] = []
|
||||
let tokenCount = 0
|
||||
|
||||
for (let i = allMessages.length - 1; i >= 0; i--) {
|
||||
const message = allMessages[i]
|
||||
const tokens = encoder.encode(message.content)
|
||||
|
||||
if (
|
||||
tokenCount + tokens.length - minTokenCompletion >
|
||||
modelLimit[model]
|
||||
) {
|
||||
break
|
||||
}
|
||||
tokenCount += tokens.length
|
||||
messagesToSend = [message, ...messagesToSend]
|
||||
}
|
||||
|
||||
encoder.free()
|
||||
|
||||
return messagesToSend
|
||||
}
|
||||
return {
|
||||
role: message.role,
|
||||
content: parseVariables(variables)(message.content),
|
||||
} satisfies ChatCompletionRequestMessage
|
||||
})
|
||||
.filter(
|
||||
(message) => isNotEmpty(message?.role) && isNotEmpty(message?.content)
|
||||
) as ChatCompletionRequestMessage[]
|
||||
|
||||
const { response, logs } = await executeChatCompletionOpenAIRequest({
|
||||
apiKey,
|
||||
messages,
|
||||
model: options.model,
|
||||
temperature,
|
||||
})
|
||||
if (!response)
|
||||
return {
|
||||
variablesTransformedToList,
|
||||
messages: parsedMessages,
|
||||
outgoingEdgeId,
|
||||
logs,
|
||||
}
|
||||
const messageContent = response.choices.at(0)?.message?.content
|
||||
const totalTokens = response.usage?.total_tokens
|
||||
if (isEmpty(messageContent)) {
|
||||
console.error('OpenAI block returned empty message', response)
|
||||
return { outgoingEdgeId, newSessionState }
|
||||
}
|
||||
return resumeChatCompletion(newSessionState, {
|
||||
options,
|
||||
outgoingEdgeId,
|
||||
logs,
|
||||
})(messageContent, totalTokens)
|
||||
}
|
||||
|
@ -0,0 +1,74 @@
|
||||
import { ChatReply } from '@typebot.io/schemas'
|
||||
import got, { HTTPError } from 'got'
|
||||
import type {
|
||||
CreateChatCompletionRequest,
|
||||
CreateChatCompletionResponse,
|
||||
} from 'openai'
|
||||
|
||||
const createChatEndpoint = 'https://api.openai.com/v1/chat/completions'
|
||||
|
||||
type Props = Pick<CreateChatCompletionRequest, 'messages' | 'model'> & {
|
||||
apiKey: string
|
||||
temperature: number | undefined
|
||||
currentLogs?: ChatReply['logs']
|
||||
isRetrying?: boolean
|
||||
}
|
||||
|
||||
export const executeChatCompletionOpenAIRequest = async ({
|
||||
apiKey,
|
||||
model,
|
||||
messages,
|
||||
temperature,
|
||||
currentLogs = [],
|
||||
}: Props): Promise<{
|
||||
response?: CreateChatCompletionResponse
|
||||
logs?: ChatReply['logs']
|
||||
}> => {
|
||||
const logs: ChatReply['logs'] = currentLogs
|
||||
if (messages.length === 0) return { logs }
|
||||
try {
|
||||
const response = await got
|
||||
.post(createChatEndpoint, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
},
|
||||
json: {
|
||||
model,
|
||||
messages,
|
||||
temperature,
|
||||
} satisfies CreateChatCompletionRequest,
|
||||
})
|
||||
.json<CreateChatCompletionResponse>()
|
||||
return { response, logs }
|
||||
} catch (error) {
|
||||
if (error instanceof HTTPError) {
|
||||
if (error.response.statusCode === 400) {
|
||||
const log = {
|
||||
status: 'info',
|
||||
description:
|
||||
'Max tokens limit reached, automatically trimming first message.',
|
||||
}
|
||||
logs.push(log)
|
||||
|
||||
return executeChatCompletionOpenAIRequest({
|
||||
apiKey,
|
||||
model,
|
||||
messages: messages.slice(1),
|
||||
temperature,
|
||||
currentLogs: logs,
|
||||
})
|
||||
}
|
||||
logs.push({
|
||||
status: 'error',
|
||||
description: `OpenAI API error - ${error.response.statusCode}`,
|
||||
details: error.response.body,
|
||||
})
|
||||
return { logs }
|
||||
}
|
||||
logs.push({
|
||||
status: 'error',
|
||||
description: `Internal error`,
|
||||
})
|
||||
return { logs }
|
||||
}
|
||||
}
|
@ -0,0 +1,90 @@
|
||||
import { parseVariables } from '@/features/variables/parseVariables'
|
||||
import { transformStringVariablesToList } from '@/features/variables/transformVariablesToList'
|
||||
import { byId, isNotEmpty } from '@typebot.io/lib'
|
||||
import { Variable, VariableWithValue } from '@typebot.io/schemas'
|
||||
import { ChatCompletionOpenAIOptions } from '@typebot.io/schemas/features/blocks/integrations/openai'
|
||||
import type { ChatCompletionRequestMessage } from 'openai'
|
||||
|
||||
export const parseChatCompletionMessages =
|
||||
(variables: Variable[]) =>
|
||||
(
|
||||
messages: ChatCompletionOpenAIOptions['messages']
|
||||
): {
|
||||
variablesTransformedToList: VariableWithValue[]
|
||||
messages: ChatCompletionRequestMessage[]
|
||||
} => {
|
||||
const variablesTransformedToList: VariableWithValue[] = []
|
||||
const parsedMessages = messages
|
||||
.flatMap((message) => {
|
||||
if (!message.role) return
|
||||
if (message.role === 'Messages sequence ✨') {
|
||||
if (
|
||||
!message.content?.assistantMessagesVariableId ||
|
||||
!message.content?.userMessagesVariableId
|
||||
)
|
||||
return
|
||||
variablesTransformedToList.push(
|
||||
...transformStringVariablesToList(variables)([
|
||||
message.content.assistantMessagesVariableId,
|
||||
message.content.userMessagesVariableId,
|
||||
])
|
||||
)
|
||||
const updatedVariables = variables.map((variable) => {
|
||||
const variableTransformedToList = variablesTransformedToList.find(
|
||||
byId(variable.id)
|
||||
)
|
||||
if (variableTransformedToList) return variableTransformedToList
|
||||
return variable
|
||||
})
|
||||
|
||||
const userMessages = (updatedVariables.find(
|
||||
(variable) =>
|
||||
variable.id === message.content?.userMessagesVariableId
|
||||
)?.value ?? []) as string[]
|
||||
|
||||
const assistantMessages = (updatedVariables.find(
|
||||
(variable) =>
|
||||
variable.id === message.content?.assistantMessagesVariableId
|
||||
)?.value ?? []) as string[]
|
||||
|
||||
let allMessages: ChatCompletionRequestMessage[] = []
|
||||
|
||||
if (userMessages.length > assistantMessages.length)
|
||||
allMessages = userMessages.flatMap((userMessage, index) => [
|
||||
{
|
||||
role: 'user',
|
||||
content: userMessage,
|
||||
},
|
||||
{ role: 'assistant', content: assistantMessages.at(index) ?? '' },
|
||||
]) satisfies ChatCompletionRequestMessage[]
|
||||
else {
|
||||
allMessages = assistantMessages.flatMap(
|
||||
(assistantMessage, index) => [
|
||||
{ role: 'assistant', content: assistantMessage },
|
||||
{
|
||||
role: 'user',
|
||||
content: userMessages.at(index) ?? '',
|
||||
},
|
||||
]
|
||||
) satisfies ChatCompletionRequestMessage[]
|
||||
}
|
||||
|
||||
return allMessages
|
||||
}
|
||||
return {
|
||||
role: message.role,
|
||||
content: parseVariables(variables)(message.content),
|
||||
name: message.name
|
||||
? parseVariables(variables)(message.name)
|
||||
: undefined,
|
||||
} satisfies ChatCompletionRequestMessage
|
||||
})
|
||||
.filter(
|
||||
(message) => isNotEmpty(message?.role) && isNotEmpty(message?.content)
|
||||
) as ChatCompletionRequestMessage[]
|
||||
|
||||
return {
|
||||
variablesTransformedToList,
|
||||
messages: parsedMessages,
|
||||
}
|
||||
}
|
@ -1,7 +1,7 @@
|
||||
import { saveSuccessLog } from '@/features/logs/saveSuccessLog'
|
||||
import { updateVariables } from '@/features/variables/updateVariables'
|
||||
import { byId, isDefined } from '@typebot.io/lib'
|
||||
import { SessionState } from '@typebot.io/schemas'
|
||||
import { ChatReply, SessionState } from '@typebot.io/schemas'
|
||||
import { ChatCompletionOpenAIOptions } from '@typebot.io/schemas/features/blocks/integrations/openai'
|
||||
import { VariableWithUnknowValue } from '@typebot.io/schemas/features/typebot/variable'
|
||||
|
||||
@ -11,7 +11,12 @@ export const resumeChatCompletion =
|
||||
{
|
||||
outgoingEdgeId,
|
||||
options,
|
||||
}: { outgoingEdgeId?: string; options: ChatCompletionOpenAIOptions }
|
||||
logs,
|
||||
}: {
|
||||
outgoingEdgeId?: string
|
||||
options: ChatCompletionOpenAIOptions
|
||||
logs?: ChatReply['logs']
|
||||
}
|
||||
) =>
|
||||
async (message: string, totalTokens?: number) => {
|
||||
let newSessionState = state
|
||||
@ -48,5 +53,6 @@ export const resumeChatCompletion =
|
||||
return {
|
||||
outgoingEdgeId,
|
||||
newSessionState,
|
||||
logs,
|
||||
}
|
||||
}
|
||||
|
@ -6,22 +6,21 @@ import { IntegrationBlockType } from './enums'
|
||||
export const openAITasks = ['Create chat completion', 'Create image'] as const
|
||||
|
||||
export const chatCompletionModels = [
|
||||
'gpt-4',
|
||||
'gpt-4-0314',
|
||||
'gpt-4-32k',
|
||||
'gpt-4-32k-0314',
|
||||
'gpt-3.5-turbo',
|
||||
'gpt-3.5-turbo-0613',
|
||||
'gpt-3.5-turbo-16k',
|
||||
'gpt-3.5-turbo-16k-0613',
|
||||
'gpt-3.5-turbo-0301',
|
||||
'gpt-4',
|
||||
'gpt-4-0613',
|
||||
'gpt-4-32k',
|
||||
'gpt-4-32k-0613',
|
||||
'gpt-4-32k-0314',
|
||||
'gpt-4-0314',
|
||||
] as const
|
||||
|
||||
export const modelLimit = {
|
||||
'gpt-3.5-turbo': 4096,
|
||||
'gpt-3.5-turbo-0301': 4096,
|
||||
'gpt-4': 8192,
|
||||
'gpt-4-0314': 8192,
|
||||
'gpt-4-32k': 32768,
|
||||
'gpt-4-32k-0314': 32768,
|
||||
} as const
|
||||
export const deprecatedCompletionModels: (typeof chatCompletionModels)[number][] =
|
||||
['gpt-3.5-turbo-0301', 'gpt-4-32k-0314', 'gpt-4-0314']
|
||||
|
||||
export const chatCompletionMessageRoles = [
|
||||
'system',
|
||||
@ -52,6 +51,7 @@ export const chatCompletionMessageSchema = z.object({
|
||||
id: z.string(),
|
||||
role: z.enum(chatCompletionMessageRoles).optional(),
|
||||
content: z.string().optional(),
|
||||
name: z.string().optional(),
|
||||
})
|
||||
|
||||
const chatCompletionCustomMessageSchema = z.object({
|
||||
|
7
pnpm-lock.yaml
generated
7
pnpm-lock.yaml
generated
@ -493,9 +493,6 @@ importers:
|
||||
|
||||
apps/viewer:
|
||||
dependencies:
|
||||
'@dqbd/tiktoken':
|
||||
specifier: ^1.0.7
|
||||
version: 1.0.7
|
||||
'@planetscale/database':
|
||||
specifier: ^1.7.0
|
||||
version: 1.7.0
|
||||
@ -4937,10 +4934,6 @@ packages:
|
||||
- webpack-cli
|
||||
dev: false
|
||||
|
||||
/@dqbd/tiktoken@1.0.7:
|
||||
resolution: {integrity: sha512-bhR5k5W+8GLzysjk8zTMVygQZsgvf7W1F0IlL4ZQ5ugjo5rCyiwGM5d8DYriXspytfu98tv59niang3/T+FoDw==}
|
||||
dev: false
|
||||
|
||||
/@emotion/babel-plugin@11.11.0:
|
||||
resolution: {integrity: sha512-m4HEDZleaaCH+XgDDsPF15Ht6wTLsgDTeR3WYj9Q/k76JtWhrJjcP4+/XlG8LGT/Rol9qUfOIztXeA84ATpqPQ==}
|
||||
dependencies:
|
||||
|
Reference in New Issue
Block a user