2
0

⬆️ Upgrade AI SDK (#1641)

This commit is contained in:
Baptiste Arnaud
2024-07-15 14:32:42 +02:00
committed by GitHub
parent a4fb8b6d10
commit 043f0054b0
60 changed files with 2183 additions and 1683 deletions

View File

@ -1,10 +1,20 @@
import { createAction, option } from '@typebot.io/forge'
import { auth } from '../auth'
import { Anthropic } from '@anthropic-ai/sdk'
import { AnthropicStream } from 'ai'
import { anthropicModels, defaultAnthropicOptions } from '../constants'
import { parseChatMessages } from '../helpers/parseChatMessages'
import {
anthropicLegacyModels,
anthropicModelLabels,
anthropicModels,
defaultAnthropicOptions,
maxToolRoundtrips,
} from '../constants'
import { isDefined } from '@typebot.io/lib'
import { createAnthropic } from '@ai-sdk/anthropic'
import { generateText } from 'ai'
import { runChatCompletionStream } from '../helpers/runChatCompletionStream'
import { toolsSchema } from '@typebot.io/ai/schemas'
import { parseTools } from '@typebot.io/ai/parseTools'
import { parseChatCompletionMessages } from '@typebot.io/ai/parseChatCompletionMessages'
import { isModelCompatibleWithVision } from '../helpers/isModelCompatibleWithVision'
const nativeMessageContentSchema = {
content: option.string.layout({
@ -40,7 +50,11 @@ const dialogueMessageItemSchema = option.object({
export const options = option.object({
model: option.enum(anthropicModels).layout({
defaultValue: defaultAnthropicOptions.model,
toLabels: (val) =>
val
? anthropicModelLabels[val as (typeof anthropicModels)[number]]
: undefined,
hiddenItems: anthropicLegacyModels,
}),
messages: option
.array(
@ -51,6 +65,7 @@ export const options = option.object({
])
)
.layout({ accordion: 'Messages', itemLabel: 'message', isOrdered: true }),
tools: toolsSchema,
systemMessage: option.string.layout({
accordion: 'Advanced Settings',
label: 'System prompt',
@ -76,8 +91,12 @@ export const options = option.object({
}),
})
const transformToChatCompletionOptions = (options: any) => ({
const transformToChatCompletionOptions = (
options: any,
resetModel = false
) => ({
...options,
model: resetModel ? undefined : options.model,
action: 'Create chat completion',
responseMapping: options.responseMapping?.map((res: any) =>
res.item === 'Message Content' ? { ...res, item: 'Message content' } : res
@ -91,11 +110,11 @@ export const createChatMessage = createAction({
turnableInto: [
{
blockId: 'mistral',
transform: transformToChatCompletionOptions,
transform: (opts) => transformToChatCompletionOptions(opts, true),
},
{
blockId: 'openai',
transform: transformToChatCompletionOptions,
transform: (opts) => transformToChatCompletionOptions(opts, true),
},
{ blockId: 'open-router', transform: transformToChatCompletionOptions },
{ blockId: 'together-ai', transform: transformToChatCompletionOptions },
@ -104,72 +123,43 @@ export const createChatMessage = createAction({
responseMapping?.map((res) => res.variableId).filter(isDefined) ?? [],
run: {
server: async ({ credentials: { apiKey }, options, variables, logs }) => {
const client = new Anthropic({
apiKey: apiKey,
const modelName = options.model ?? defaultAnthropicOptions.model
const model = createAnthropic({
apiKey,
})(modelName)
const { text } = await generateText({
model,
temperature: options.temperature
? Number(options.temperature)
: undefined,
messages: await parseChatCompletionMessages({
messages: options.messages,
isVisionEnabled: isModelCompatibleWithVision(modelName),
shouldDownloadImages: true,
variables,
}),
tools: parseTools({ tools: options.tools, variables }),
maxToolRoundtrips: maxToolRoundtrips,
})
const messages = await parseChatMessages({ options, variables })
try {
const reply = await client.messages.create({
messages,
model: options.model ?? defaultAnthropicOptions.model,
system: options.systemMessage,
temperature: options.temperature
? Number(options.temperature)
: undefined,
max_tokens: options.maxTokens
? Number(options.maxTokens)
: defaultAnthropicOptions.maxTokens,
})
messages.push(reply)
options.responseMapping?.forEach((mapping) => {
if (!mapping.variableId) return
if (!mapping.item || mapping.item === 'Message Content')
variables.set(mapping.variableId, reply.content[0].text)
})
} catch (error) {
if (error instanceof Anthropic.APIError) {
logs.add({
status: 'error',
description: `${error.status} ${error.name}`,
details: error.message,
})
} else {
throw error
}
}
options.responseMapping?.forEach((mapping) => {
if (!mapping.variableId) return
if (!mapping.item || mapping.item === 'Message Content')
variables.set(mapping.variableId, text)
})
},
stream: {
getStreamVariableId: (options) =>
options.responseMapping?.find(
(res) => res.item === 'Message Content' || !res.item
)?.variableId,
run: async ({ credentials: { apiKey }, options, variables }) => {
const client = new Anthropic({
apiKey: apiKey,
})
const messages = await parseChatMessages({ options, variables })
const response = await client.messages.create({
messages,
model: options.model ?? defaultAnthropicOptions.model,
system: options.systemMessage,
temperature: options.temperature
? Number(options.temperature)
: undefined,
max_tokens: options.maxTokens
? Number(options.maxTokens)
: defaultAnthropicOptions.maxTokens,
stream: true,
})
return { stream: AnthropicStream(response) }
},
run: async ({ credentials: { apiKey }, options, variables }) =>
runChatCompletionStream({
credentials: { apiKey },
options,
variables,
}),
},
},
})

View File

@ -1,4 +1,5 @@
export const anthropicModels = [
'claude-3-5-sonnet-20240620',
'claude-3-opus-20240229',
'claude-3-sonnet-20240229',
'claude-3-haiku-20240307',
@ -7,8 +8,24 @@ export const anthropicModels = [
'claude-instant-1.2',
] as const
export const anthropicLegacyModels = [
'claude-2.1',
'claude-2.0',
'claude-instant-1.2',
]
export const anthropicModelLabels = {
'claude-3-5-sonnet-20240620': 'Claude 3.5 Sonnet',
'claude-3-opus-20240229': 'Claude 3.0 Opus',
'claude-3-sonnet-20240229': 'Claude 3.0 Sonnet',
'claude-3-haiku-20240307': 'Claude 3.0 Haiku',
'claude-2.1': 'Claude 2.1',
'claude-2.0': 'Claude 2.0',
'claude-instant-1.2': 'Claude Instant 1.2',
} satisfies Record<(typeof anthropicModels)[number], string>
export const defaultAnthropicOptions = {
model: anthropicModels[0],
model: 'claude-3-opus-20240229',
temperature: 1,
maxTokens: 1024,
} as const
@ -21,3 +38,5 @@ export const supportedImageTypes = [
'image/gif',
'image/webp',
] as const
export const maxToolRoundtrips = 10

View File

@ -0,0 +1,5 @@
import { wildcardMatch } from '@typebot.io/lib/wildcardMatch'
import { modelsWithImageUrlSupport } from '../constants'
export const isModelCompatibleWithVision = (model: string | undefined) =>
model ? wildcardMatch(modelsWithImageUrlSupport)(model) : false

View File

@ -1,148 +0,0 @@
import { Anthropic } from '@anthropic-ai/sdk'
import { options as createMessageOptions } from '../actions/createChatMessage'
import { VariableStore } from '@typebot.io/forge'
import { isDefined, isEmpty } from '@typebot.io/lib'
import { z } from '@typebot.io/forge/zod'
import ky, { HTTPError } from 'ky'
import {
defaultAnthropicOptions,
modelsWithImageUrlSupport,
supportedImageTypes,
} from '../constants'
import { wildcardMatch } from '@typebot.io/lib/wildcardMatch'
const isModelCompatibleWithImageUrls = (model: string | undefined) =>
model ? wildcardMatch(modelsWithImageUrlSupport)(model) : false
export const parseChatMessages = async ({
options: { messages, model },
variables,
}: {
options: Pick<z.infer<typeof createMessageOptions>, 'messages' | 'model'>
variables: VariableStore
}): Promise<Anthropic.Messages.MessageParam[]> => {
if (!messages) return []
const isVisionEnabled = isModelCompatibleWithImageUrls(
model ?? defaultAnthropicOptions.model
)
const parsedMessages = (
await Promise.all(
messages.map(async (message) => {
if (!message.role) return
if (message.role === 'Dialogue') {
if (!message.dialogueVariableId) return
const dialogue = variables.get(message.dialogueVariableId) ?? []
const dialogueArr = Array.isArray(dialogue) ? dialogue : [dialogue]
return Promise.all(
dialogueArr.map(async (dialogueItem, index) => {
if (index === 0 && message.startsBy === 'assistant')
return {
role: 'assistant',
content: dialogueItem,
}
if (index % (message.startsBy === 'assistant' ? 1 : 2) === 0) {
return {
role: 'user',
content: isVisionEnabled
? await splitUserTextMessageIntoBlocks(dialogueItem ?? '')
: dialogueItem,
}
}
return {
role: 'assistant',
content: dialogueItem,
}
})
)
}
if (!message.content) return
const content = variables.parse(message.content)
if (isEmpty(content)) return
if (message.role === 'user')
return {
role: 'user',
content: isVisionEnabled
? await splitUserTextMessageIntoBlocks(content)
: content,
}
return {
role: message.role,
content,
}
})
)
)
.flat()
.filter((message) => {
return isDefined(message?.role) && isDefined(message.content)
}) as Anthropic.Messages.MessageParam[]
return parsedMessages
}
const splitUserTextMessageIntoBlocks = async (
input: string
): Promise<
| string
| (Anthropic.Messages.TextBlockParam | Anthropic.Messages.ImageBlockParam)[]
> => {
const urlRegex = /(^|\n\n)(https?:\/\/[^\s]+)(\n\n|$)/g
const match = input.match(urlRegex)
if (!match) return input
const parts: (
| Anthropic.Messages.TextBlockParam
| Anthropic.Messages.ImageBlockParam
)[] = []
let processedInput = input
for (const url of match) {
const textBeforeUrl = processedInput.slice(0, processedInput.indexOf(url))
if (textBeforeUrl.trim().length > 0) {
parts.push({ type: 'text', text: textBeforeUrl })
}
const cleanUrl = url.trim()
try {
const response = await ky.get(cleanUrl)
if (
!response.ok ||
!supportedImageTypes.includes(
response.headers.get('content-type') as any
)
) {
parts.push({ type: 'text', text: cleanUrl })
} else {
parts.push({
type: 'image',
source: {
data: Buffer.from(await response.arrayBuffer()).toString('base64'),
type: 'base64',
media_type: response.headers.get('content-type') as any,
},
})
}
} catch (err) {
if (err instanceof HTTPError) {
console.log(err.response.status, await err.response.text())
} else {
console.error(err)
}
}
processedInput = processedInput.slice(
processedInput.indexOf(url) + url.length
)
}
if (processedInput.trim().length > 0) {
parts.push({ type: 'text', text: processedInput })
}
return parts
}

View File

@ -0,0 +1,110 @@
import { createAnthropic } from '@ai-sdk/anthropic'
import { defaultAnthropicOptions, maxToolRoundtrips } from '../constants'
import { APICallError, streamText, ToolCallPart, ToolResultPart } from 'ai'
import { isModelCompatibleWithVision } from './isModelCompatibleWithVision'
import { VariableStore } from '@typebot.io/forge'
import { ChatCompletionOptions } from '@typebot.io/openai-block/shared/parseChatCompletionOptions'
import { parseChatCompletionMessages } from '@typebot.io/ai/parseChatCompletionMessages'
import { parseTools } from '@typebot.io/ai/parseTools'
import { pumpStreamUntilDone } from '@typebot.io/ai/pumpStreamUntilDone'
import { appendToolResultsToMessages } from '@typebot.io/ai/appendToolResultsToMessages'
type Props = {
credentials: { apiKey?: string }
options: {
model?: string
temperature?: ChatCompletionOptions['temperature']
messages?: ChatCompletionOptions['messages']
tools?: ChatCompletionOptions['tools']
}
variables: VariableStore
}
export const runChatCompletionStream = async ({
credentials: { apiKey },
options,
variables,
}: Props): Promise<{
stream?: ReadableStream<any>
httpError?: { status: number; message: string }
}> => {
if (!apiKey) return { httpError: { status: 401, message: 'API key missing' } }
const modelName = options.model?.trim() ?? defaultAnthropicOptions.model
if (!modelName)
return { httpError: { status: 400, message: 'model not found' } }
const model = createAnthropic({
apiKey,
})(modelName)
try {
const streamConfig = {
model,
temperature: options.temperature
? Number(options.temperature)
: undefined,
tools: parseTools({ tools: options.tools, variables }),
messages: await parseChatCompletionMessages({
messages: options.messages,
isVisionEnabled: isModelCompatibleWithVision(modelName),
shouldDownloadImages: false,
variables,
}),
}
const response = await streamText(streamConfig)
let totalToolCalls = 0
let toolCalls: ToolCallPart[] = []
let toolResults: ToolResultPart[] = []
return {
stream: new ReadableStream({
async start(controller) {
const reader = response.toAIStream().getReader()
await pumpStreamUntilDone(controller, reader)
toolCalls = await response.toolCalls
if (toolCalls.length > 0)
toolResults = (await response.toolResults) as ToolResultPart[]
while (
toolCalls &&
toolCalls.length > 0 &&
totalToolCalls < maxToolRoundtrips
) {
totalToolCalls += 1
const newResponse = await streamText({
...streamConfig,
messages: appendToolResultsToMessages({
messages: streamConfig.messages,
toolCalls,
toolResults,
}),
})
const reader = newResponse.toAIStream().getReader()
await pumpStreamUntilDone(controller, reader)
toolCalls = await newResponse.toolCalls
if (toolCalls.length > 0)
toolResults = (await newResponse.toolResults) as ToolResultPart[]
}
controller.close()
},
}),
}
} catch (err) {
if (err instanceof APICallError) {
return {
httpError: { status: err.statusCode ?? 500, message: err.message },
}
}
return {
httpError: {
status: 500,
message: 'An error occured while generating the stream',
},
}
}
}

View File

@ -15,10 +15,10 @@
"typescript": "5.4.5"
},
"dependencies": {
"@anthropic-ai/sdk": "0.20.6",
"@ai-sdk/anthropic": "0.0.21",
"@ai-sdk/anthropic": "0.0.30",
"@typebot.io/openai-block": "workspace:*",
"ai": "3.2.1",
"ai": "3.2.22",
"@typebot.io/ai": "workspace:*",
"ky": "1.2.4"
}
}

View File

@ -14,6 +14,6 @@
"typescript": "5.4.5"
},
"dependencies": {
"ai": "3.2.1"
"ai": "3.2.22"
}
}

View File

@ -24,7 +24,6 @@ export const convertTextToSpeech = createAction({
fetcher: 'fetchModels',
label: 'Model',
placeholder: 'Select a model',
defaultValue: 'eleven_monolingual_v1',
}),
saveUrlInVariableId: option.string.layout({
label: 'Save audio URL in variable',

View File

@ -5,6 +5,11 @@ import { parseMessages } from '../helpers/parseMessages'
import { createMistral } from '@ai-sdk/mistral'
import { generateText, streamText } from 'ai'
import { fetchModels } from '../helpers/fetchModels'
import { toolsSchema } from '@typebot.io/ai/schemas'
import { parseTools } from '@typebot.io/ai/parseTools'
import { maxToolRoundtrips } from '../constants'
import { parseChatCompletionMessages } from '@typebot.io/ai/parseChatCompletionMessages'
import { runChatCompletionStream } from '../helpers/runChatCompletionStream'
const nativeMessageContentSchema = {
content: option.string.layout({
@ -59,6 +64,7 @@ export const options = option.object({
])
)
.layout({ accordion: 'Messages', itemLabel: 'message', isOrdered: true }),
tools: toolsSchema,
responseMapping: option.saveResponseArray(['Message content']).layout({
accordion: 'Save response',
}),
@ -71,6 +77,10 @@ export const createChatCompletion = createAction({
turnableInto: [
{
blockId: 'openai',
transform: (opts) => ({
...opts,
model: undefined,
}),
},
{
blockId: 'together-ai',
@ -110,8 +120,14 @@ export const createChatCompletion = createAction({
const { text } = await generateText({
model,
messages: parseMessages({ options, variables }),
tools: {},
messages: await parseChatCompletionMessages({
messages: options.messages,
variables,
isVisionEnabled: false,
shouldDownloadImages: false,
}),
tools: parseTools({ tools: options.tools, variables }),
maxToolRoundtrips: maxToolRoundtrips,
})
options.responseMapping?.forEach((mapping) => {
@ -125,19 +141,12 @@ export const createChatCompletion = createAction({
options.responseMapping?.find(
(res) => res.item === 'Message content' || !res.item
)?.variableId,
run: async ({ credentials: { apiKey }, options, variables }) => {
if (!options.model) return {}
const model = createMistral({
apiKey,
})(options.model)
const response = await streamText({
model,
messages: parseMessages({ options, variables }),
})
return { stream: response.toAIStream() }
},
run: async ({ credentials: { apiKey }, options, variables }) =>
runChatCompletionStream({
credentials: { apiKey },
options,
variables,
}),
},
},
})

View File

@ -1 +1,3 @@
export const apiBaseUrl = 'https://api.mistral.ai'
export const maxToolRoundtrips = 10

View File

@ -0,0 +1,105 @@
import { createMistral } from '@ai-sdk/mistral'
import { APICallError, streamText, ToolCallPart, ToolResultPart } from 'ai'
import { VariableStore } from '@typebot.io/forge'
import { ChatCompletionOptions } from '@typebot.io/openai-block/shared/parseChatCompletionOptions'
import { parseChatCompletionMessages } from '@typebot.io/ai/parseChatCompletionMessages'
import { parseTools } from '@typebot.io/ai/parseTools'
import { maxToolRoundtrips } from '../constants'
import { pumpStreamUntilDone } from '@typebot.io/ai/pumpStreamUntilDone'
import { appendToolResultsToMessages } from '@typebot.io/ai/appendToolResultsToMessages'
type Props = {
credentials: { apiKey?: string }
options: {
model?: string
temperature?: ChatCompletionOptions['temperature']
messages?: ChatCompletionOptions['messages']
tools?: ChatCompletionOptions['tools']
}
variables: VariableStore
}
export const runChatCompletionStream = async ({
credentials: { apiKey },
options,
variables,
}: Props): Promise<{
stream?: ReadableStream<any>
httpError?: { status: number; message: string }
}> => {
if (!apiKey) return { httpError: { status: 401, message: 'API key missing' } }
const modelName = options.model?.trim()
if (!modelName)
return { httpError: { status: 400, message: 'model not found' } }
const streamConfig = {
model: createMistral({
apiKey,
})(modelName),
messages: await parseChatCompletionMessages({
messages: options.messages,
isVisionEnabled: false,
shouldDownloadImages: false,
variables,
}),
temperature: options.temperature ? Number(options.temperature) : undefined,
tools: parseTools({ tools: options.tools, variables }),
}
try {
const response = await streamText(streamConfig)
let totalToolCalls = 0
let toolCalls: ToolCallPart[] = []
let toolResults: ToolResultPart[] = []
return {
stream: new ReadableStream({
async start(controller) {
const reader = response.toAIStream().getReader()
await pumpStreamUntilDone(controller, reader)
toolCalls = await response.toolCalls
if (toolCalls.length > 0)
toolResults = (await response.toolResults) as ToolResultPart[]
while (
toolCalls &&
toolCalls.length > 0 &&
totalToolCalls < maxToolRoundtrips
) {
totalToolCalls += 1
const newResponse = await streamText({
...streamConfig,
messages: appendToolResultsToMessages({
messages: streamConfig.messages,
toolCalls,
toolResults,
}),
})
const reader = newResponse.toAIStream().getReader()
await pumpStreamUntilDone(controller, reader)
toolCalls = await newResponse.toolCalls
if (toolCalls.length > 0)
toolResults = (await newResponse.toolResults) as ToolResultPart[]
}
controller.close()
},
}),
}
} catch (err) {
if (err instanceof APICallError) {
return {
httpError: { status: err.statusCode ?? 500, message: err.message },
}
}
return {
httpError: {
status: 500,
message: 'An error occured while generating the stream',
},
}
}
}

View File

@ -14,9 +14,10 @@
"typescript": "5.4.5"
},
"dependencies": {
"@ai-sdk/mistral": "0.0.18",
"@ai-sdk/mistral": "0.0.22",
"@typebot.io/openai-block": "workspace:*",
"ai": "3.2.1",
"ky": "1.2.4"
"ai": "3.2.22",
"ky": "1.2.4",
"@typebot.io/ai": "workspace:*"
}
}

View File

@ -3,8 +3,8 @@ import { auth } from '../auth'
import { parseChatCompletionOptions } from '@typebot.io/openai-block/shared/parseChatCompletionOptions'
import { getChatCompletionSetVarIds } from '@typebot.io/openai-block/shared/getChatCompletionSetVarIds'
import { getChatCompletionStreamVarId } from '@typebot.io/openai-block/shared/getChatCompletionStreamVarId'
import { runChatCompletion } from '@typebot.io/openai-block/shared/runChatCompletion'
import { runChatCompletionStream } from '@typebot.io/openai-block/shared/runChatCompletionStream'
import { runOpenAIChatCompletion } from '@typebot.io/openai-block/shared/runOpenAIChatCompletion'
import { runOpenAIChatCompletionStream } from '@typebot.io/openai-block/shared/runOpenAIChatCompletionStream'
import { defaultOpenRouterOptions } from '../constants'
import ky from 'ky'
import { ModelsResponse } from '../types'
@ -24,7 +24,6 @@ export const createChatCompletion = createAction({
blockId: 'anthropic',
transform: (options) => ({
...options,
model: undefined,
action: 'Create Chat Message',
responseMapping: options.responseMapping?.map((res: any) =>
res.item === 'Message content'
@ -36,6 +35,7 @@ export const createChatCompletion = createAction({
],
options: parseChatCompletionOptions({
modelFetchId: 'fetchModels',
defaultTemperature: defaultOpenRouterOptions.temperature,
}),
getSetVariableIds: getChatCompletionSetVarIds,
fetchers: [
@ -56,18 +56,19 @@ export const createChatCompletion = createAction({
],
run: {
server: (params) =>
runChatCompletion({
runOpenAIChatCompletion({
...params,
config: { baseUrl: defaultOpenRouterOptions.baseUrl },
}),
stream: {
getStreamVariableId: getChatCompletionStreamVarId,
run: async (params) => ({
stream: await runChatCompletionStream({
run: async (params) =>
runOpenAIChatCompletionStream({
...params,
config: { baseUrl: defaultOpenRouterOptions.baseUrl },
config: {
baseUrl: defaultOpenRouterOptions.baseUrl,
},
}),
}),
},
},
})

View File

@ -1,3 +1,4 @@
export const defaultOpenRouterOptions = {
baseUrl: 'https://openrouter.ai/api/v1',
temperature: 1,
} as const

View File

@ -11,9 +11,9 @@ import { baseOptions } from '../baseOptions'
import { executeFunction } from '@typebot.io/variables/executeFunction'
import { readDataStream } from 'ai'
import { deprecatedAskAssistantOptions } from '../deprecated'
import { OpenAIAssistantStream } from '../helpers/OpenAIAssistantStream'
import { AssistantStream } from '../helpers/AssistantStream'
import { isModelCompatibleWithVision } from '../helpers/isModelCompatibleWithVision'
import { splitUserTextMessageIntoBlocks } from '../helpers/splitUserTextMessageIntoBlocks'
import { splitUserTextMessageIntoOpenAIBlocks } from '../helpers/splitUserTextMessageIntoOpenAIBlocks'
export const askAssistant = createAction({
auth,
@ -294,19 +294,16 @@ const createAssistantStream = async ({
{
role: 'user',
content: isModelCompatibleWithVision(assistant.model)
? await splitUserTextMessageIntoBlocks(message)
? await splitUserTextMessageIntoOpenAIBlocks(message)
: message,
}
)
return OpenAIAssistantStream(
return AssistantStream(
{ threadId: currentThreadId, messageId: createdMessage.id },
async ({ forwardStream }) => {
const runStream = openai.beta.threads.runs.createAndStream(
currentThreadId,
{
assistant_id: assistantId,
}
)
const runStream = openai.beta.threads.runs.stream(currentThreadId, {
assistant_id: assistantId,
})
let runResult = await forwardStream(runStream)

View File

@ -4,8 +4,8 @@ import { auth } from '../auth'
import { baseOptions } from '../baseOptions'
import { parseChatCompletionOptions } from '../shared/parseChatCompletionOptions'
import { getChatCompletionSetVarIds } from '../shared/getChatCompletionSetVarIds'
import { runChatCompletion } from '../shared/runChatCompletion'
import { runChatCompletionStream } from '../shared/runChatCompletionStream'
import { runOpenAIChatCompletion } from '../shared/runOpenAIChatCompletion'
import { runOpenAIChatCompletionStream } from '../shared/runOpenAIChatCompletionStream'
import { getChatCompletionStreamVarId } from '../shared/getChatCompletionStreamVarId'
import { fetchGPTModels } from '../helpers/fetchModels'
@ -14,7 +14,6 @@ export const createChatCompletion = createAction({
auth,
baseOptions,
options: parseChatCompletionOptions({
defaultModel: defaultOpenAIOptions.model,
defaultTemperature: defaultOpenAIOptions.temperature,
modelFetchId: 'fetchModels',
}),
@ -55,24 +54,25 @@ export const createChatCompletion = createAction({
],
run: {
server: (params) =>
runChatCompletion({
runOpenAIChatCompletion({
...params,
config: {
baseUrl: defaultOpenAIOptions.baseUrl,
defaultModel: defaultOpenAIOptions.model,
},
compatibility: 'strict',
}),
stream: {
getStreamVariableId: getChatCompletionStreamVarId,
run: async (params) => ({
stream: await runChatCompletionStream({
run: async (params) =>
runOpenAIChatCompletionStream({
...params,
config: {
baseUrl: defaultOpenAIOptions.baseUrl,
defaultModel: defaultOpenAIOptions.model,
},
compatibility: 'strict',
}),
}),
},
},
})

View File

@ -1,5 +1,7 @@
// Copied from https://github.com/vercel/ai/blob/f9db8fd6543202a8404a7a1a40f938d6270b08ef/packages/core/streams/assistant-response.ts
// Because the stream is not exported from the package
import { AssistantMessage, DataMessage, formatStreamPart } from 'ai'
import { AssistantStream } from 'openai/lib/AssistantStream'
import { AssistantStream as AssistantStreamType } from 'openai/lib/AssistantStream'
import { Run } from 'openai/resources/beta/threads/runs/runs'
/**
@ -44,14 +46,19 @@ Send a data message to the client. You can use this to provide information for r
/**
Forwards the assistant response stream to the client. Returns the `Run` object after it completes, or when it requires an action.
*/
forwardStream: (stream: AssistantStream) => Promise<Run | undefined>
forwardStream: (stream: AssistantStreamType) => Promise<Run | undefined>
}) => Promise<void>
export const OpenAIAssistantStream = (
/**
The `AssistantResponse` allows you to send a stream of assistant update to `useAssistant`.
It is designed to facilitate streaming assistant responses to the `useAssistant` hook.
It receives an assistant thread and a current message, and can send messages and data messages to the client.
*/
export function AssistantStream(
{ threadId, messageId }: AssistantResponseSettings,
process: AssistantResponseCallback
) =>
new ReadableStream({
) {
return new ReadableStream({
async start(controller) {
const textEncoder = new TextEncoder()
@ -73,7 +80,7 @@ export const OpenAIAssistantStream = (
)
}
const forwardStream = async (stream: AssistantStream) => {
const forwardStream = async (stream: AssistantStreamType) => {
let result: Run | undefined = undefined
for await (const value of stream) {
@ -143,3 +150,4 @@ export const OpenAIAssistantStream = (
pull(controller) {},
cancel() {},
})
}

View File

@ -1,81 +0,0 @@
import type { OpenAI } from 'openai'
import { VariableStore } from '@typebot.io/forge'
import { isDefined, isEmpty } from '@typebot.io/lib'
import { ChatCompletionOptions } from '../shared/parseChatCompletionOptions'
import ky, { HTTPError } from 'ky'
import { defaultOpenAIOptions, modelsWithImageUrlSupport } from '../constants'
import { isModelCompatibleWithVision } from './isModelCompatibleWithVision'
import { splitUserTextMessageIntoBlocks } from './splitUserTextMessageIntoBlocks'
export const parseChatCompletionMessages = async ({
options: { messages, model },
variables,
}: {
options: ChatCompletionOptions
variables: VariableStore
}): Promise<OpenAI.Chat.ChatCompletionMessageParam[]> => {
if (!messages) return []
const isVisionEnabled = isModelCompatibleWithVision(
model ?? defaultOpenAIOptions.model
)
const parsedMessages = (
await Promise.all(
messages.map(async (message) => {
if (!message.role) return
if (message.role === 'Dialogue') {
if (!message.dialogueVariableId) return
const dialogue = variables.get(message.dialogueVariableId) ?? []
const dialogueArr = Array.isArray(dialogue) ? dialogue : [dialogue]
return Promise.all(
dialogueArr.map(async (dialogueItem, index) => {
if (index === 0 && message.startsBy === 'assistant')
return {
role: 'assistant',
content: dialogueItem,
}
if (index % (message.startsBy === 'assistant' ? 1 : 2) === 0) {
return {
role: 'user',
content: isVisionEnabled
? await splitUserTextMessageIntoBlocks(dialogueItem ?? '')
: dialogueItem,
}
}
return {
role: 'assistant',
content: dialogueItem,
}
})
)
}
if (!message.content) return
const content = variables.parse(message.content)
if (isEmpty(content)) return
if (message.role === 'user')
return {
role: 'user',
content: isVisionEnabled
? await splitUserTextMessageIntoBlocks(content)
: content,
}
return {
role: message.role,
content,
}
})
)
)
.flat()
.filter((message) => {
return isDefined(message?.role) && isDefined(message.content)
}) as OpenAI.Chat.ChatCompletionMessageParam[]
return parsedMessages
}

View File

@ -1,23 +0,0 @@
import type { OpenAI } from 'openai'
import { toolParametersSchema } from '../shared/parseChatCompletionOptions'
import { z } from '@typebot.io/forge/zod'
export const parseToolParameters = (
parameters: z.infer<typeof toolParametersSchema>
): OpenAI.FunctionParameters => ({
type: 'object',
properties: parameters?.reduce<{
[x: string]: unknown
}>((acc, param) => {
if (!param.name) return acc
acc[param.name] = {
type: param.type === 'enum' ? 'string' : param.type,
enum: param.type === 'enum' ? param.values : undefined,
description: param.description,
}
return acc
}, {}),
required:
parameters?.filter((param) => param.required).map((param) => param.name) ??
[],
})

View File

@ -1,7 +1,7 @@
import ky, { HTTPError } from 'ky'
import OpenAI from 'openai'
export const splitUserTextMessageIntoBlocks = async (
export const splitUserTextMessageIntoOpenAIBlocks = async (
input: string
): Promise<string | OpenAI.Chat.ChatCompletionContentPart[]> => {
const urlRegex = /(^|\n\n)(https?:\/\/[^\s]+)(\n\n|$)/g

View File

@ -7,9 +7,10 @@
"author": "Baptiste Arnaud",
"license": "AGPL-3.0-or-later",
"dependencies": {
"@ai-sdk/openai": "0.0.31",
"ai": "3.2.1",
"openai": "4.47.1"
"@ai-sdk/openai": "0.0.36",
"ai": "3.2.22",
"openai": "4.52.7",
"@typebot.io/ai": "workspace:*"
},
"devDependencies": {
"@typebot.io/forge": "workspace:*",

View File

@ -1,6 +1,7 @@
import { option } from '@typebot.io/forge'
import { z } from '@typebot.io/forge/zod'
import { baseOptions } from '../baseOptions'
import { toolsSchema } from '@typebot.io/ai/schemas'
const nativeMessageContentSchema = {
content: option.string.layout({
@ -27,77 +28,6 @@ const assistantMessageItemSchema = option
})
.extend(nativeMessageContentSchema)
const parameterBase = {
name: option.string.layout({
label: 'Name',
placeholder: 'myVariable',
withVariableButton: false,
}),
description: option.string.layout({
label: 'Description',
withVariableButton: false,
}),
required: option.boolean.layout({
label: 'Is required?',
}),
}
export const toolParametersSchema = option
.array(
option.discriminatedUnion('type', [
option
.object({
type: option.literal('string'),
})
.extend(parameterBase),
option
.object({
type: option.literal('number'),
})
.extend(parameterBase),
option
.object({
type: option.literal('boolean'),
})
.extend(parameterBase),
option
.object({
type: option.literal('enum'),
values: option
.array(option.string)
.layout({ itemLabel: 'possible value' }),
})
.extend(parameterBase),
])
)
.layout({
accordion: 'Parameters',
itemLabel: 'parameter',
})
const functionToolItemSchema = option.object({
type: option.literal('function'),
name: option.string.layout({
label: 'Name',
placeholder: 'myFunctionName',
withVariableButton: false,
}),
description: option.string.layout({
label: 'Description',
placeholder: 'A brief description of what this function does.',
withVariableButton: false,
}),
parameters: toolParametersSchema,
code: option.string.layout({
inputType: 'code',
label: 'Code',
lang: 'javascript',
moreInfoTooltip:
'A javascript code snippet that can use the defined parameters. It should return a value.',
withVariableButton: false,
}),
})
const dialogueMessageItemSchema = option.object({
role: option.literal('Dialogue'),
dialogueVariableId: option.string.layout({
@ -112,23 +42,20 @@ const dialogueMessageItemSchema = option.object({
})
type Props = {
defaultModel?: string
defaultTemperature?: number
defaultTemperature: number
modelFetchId?: string
modelHelperText?: string
}
export const parseChatCompletionOptions = ({
defaultModel,
defaultTemperature,
modelFetchId,
modelHelperText,
}: Props = {}) =>
}: Props) =>
option.object({
model: option.string.layout({
placeholder: modelFetchId ? 'Select a model' : undefined,
label: modelFetchId ? undefined : 'Model',
defaultValue: defaultModel,
fetcher: modelFetchId,
helperText: modelHelperText,
}),
@ -142,9 +69,7 @@ export const parseChatCompletionOptions = ({
])
)
.layout({ accordion: 'Messages', itemLabel: 'message', isOrdered: true }),
tools: option
.array(option.discriminatedUnion('type', [functionToolItemSchema]))
.layout({ accordion: 'Tools', itemLabel: 'tool' }),
tools: toolsSchema,
temperature: option.number.layout({
accordion: 'Advanced settings',
label: 'Temperature',

View File

@ -69,14 +69,12 @@ export const parseGenerateVariablesOptions = ({
? option.string.layout({
placeholder: 'Select a model',
label: 'Model',
defaultValue: defaultModel,
fetcher: modelFetch,
helperText: modelHelperText,
})
: option.enum(modelFetch).layout({
placeholder: 'Select a model',
label: 'Model',
defaultValue: defaultModel,
helperText: modelHelperText,
}),
prompt: option.string.layout({

View File

@ -1,125 +0,0 @@
import OpenAI, { ClientOptions } from 'openai'
import { parseToolParameters } from '../helpers/parseToolParameters'
import { executeFunction } from '@typebot.io/variables/executeFunction'
import { ChatCompletionTool, ChatCompletionMessage } from 'openai/resources'
import { maxToolCalls } from '../constants'
import { parseChatCompletionMessages } from '../helpers/parseChatCompletionMessages'
import { ChatCompletionOptions } from './parseChatCompletionOptions'
import { LogsStore, VariableStore } from '@typebot.io/forge/types'
type OpenAIConfig = {
baseUrl: string
defaultModel?: string
}
type Props = {
credentials: {
apiKey?: string
}
options: ChatCompletionOptions
variables: VariableStore
logs: LogsStore
config: OpenAIConfig
}
export const runChatCompletion = async ({
credentials: { apiKey },
options,
variables,
config: openAIConfig,
logs,
}: Props) => {
const model = options.model?.trim() ?? openAIConfig.defaultModel
if (!model) return logs.add('No model provided')
const config = {
apiKey,
baseURL: openAIConfig.baseUrl ?? options.baseUrl,
defaultHeaders: options.baseUrl
? {
'api-key': apiKey,
}
: undefined,
defaultQuery: options.apiVersion
? {
'api-version': options.apiVersion,
}
: undefined,
} satisfies ClientOptions
const openai = new OpenAI(config)
const tools = options.tools
?.filter((t) => t.name && t.parameters)
.map((t) => ({
type: 'function',
function: {
name: t.name as string,
description: t.description,
parameters: parseToolParameters(t.parameters!),
},
})) satisfies ChatCompletionTool[] | undefined
const messages = await parseChatCompletionMessages({ options, variables })
const body = {
model,
temperature: options.temperature ? Number(options.temperature) : undefined,
messages,
tools: (tools?.length ?? 0) > 0 ? tools : undefined,
}
let totalTokens = 0
let message: ChatCompletionMessage
for (let i = 0; i < maxToolCalls; i++) {
const response = await openai.chat.completions.create(body)
message = response.choices[0].message
totalTokens += response.usage?.total_tokens || 0
if (!message.tool_calls) break
messages.push(message)
for (const toolCall of message.tool_calls) {
const name = toolCall.function?.name
if (!name) continue
const toolDefinition = options.tools?.find((t) => t.name === name)
if (!toolDefinition?.code || !toolDefinition.parameters) {
messages.push({
tool_call_id: toolCall.id,
role: 'tool',
content: 'Function not found',
})
continue
}
const toolParams = Object.fromEntries(
toolDefinition.parameters.map(({ name }) => [name, null])
)
const toolArgs = toolCall.function?.arguments
? JSON.parse(toolCall.function?.arguments)
: undefined
if (!toolArgs) continue
const { output, newVariables } = await executeFunction({
variables: variables.list(),
args: { ...toolParams, ...toolArgs },
body: toolDefinition.code,
})
newVariables?.forEach((v) => variables.set(v.id, v.value))
messages.push({
tool_call_id: toolCall.id,
role: 'tool',
content: output,
})
}
}
options.responseMapping?.forEach((mapping) => {
if (!mapping.variableId) return
if (!mapping.item || mapping.item === 'Message content')
variables.set(mapping.variableId, message.content)
if (mapping.item === 'Total tokens')
variables.set(mapping.variableId, totalTokens)
})
}

View File

@ -1,107 +0,0 @@
import { VariableStore } from '@typebot.io/forge/types'
import { ChatCompletionOptions } from './parseChatCompletionOptions'
import { executeFunction } from '@typebot.io/variables/executeFunction'
import { OpenAIStream, ToolCallPayload } from 'ai'
import OpenAI, { ClientOptions } from 'openai'
import { ChatCompletionTool } from 'openai/resources'
import { parseChatCompletionMessages } from '../helpers/parseChatCompletionMessages'
import { parseToolParameters } from '../helpers/parseToolParameters'
type Props = {
credentials: { apiKey?: string }
options: ChatCompletionOptions
variables: VariableStore
config: { baseUrl: string; defaultModel?: string }
}
export const runChatCompletionStream = async ({
credentials: { apiKey },
options,
variables,
config: openAIConfig,
}: Props) => {
const model = options.model?.trim() ?? openAIConfig.defaultModel
if (!model) return
const config = {
apiKey,
baseURL: openAIConfig.baseUrl ?? options.baseUrl,
defaultHeaders: {
'api-key': apiKey,
},
defaultQuery: options.apiVersion
? {
'api-version': options.apiVersion,
}
: undefined,
} satisfies ClientOptions
const openai = new OpenAI(config)
const tools = options.tools
?.filter((t) => t.name && t.parameters)
.map((t) => ({
type: 'function',
function: {
name: t.name as string,
description: t.description,
parameters: parseToolParameters(t.parameters!),
},
})) satisfies ChatCompletionTool[] | undefined
const messages = await parseChatCompletionMessages({ options, variables })
const response = await openai.chat.completions.create({
model,
temperature: options.temperature ? Number(options.temperature) : undefined,
stream: true,
messages,
tools: (tools?.length ?? 0) > 0 ? tools : undefined,
})
return OpenAIStream(response, {
experimental_onToolCall: async (
call: ToolCallPayload,
appendToolCallMessage
) => {
for (const toolCall of call.tools) {
const name = toolCall.func?.name
if (!name) continue
const toolDefinition = options.tools?.find((t) => t.name === name)
if (!toolDefinition?.code || !toolDefinition.parameters) {
messages.push({
tool_call_id: toolCall.id,
role: 'tool',
content: 'Function not found',
})
continue
}
const { output, newVariables } = await executeFunction({
variables: variables.list(),
args:
typeof toolCall.func.arguments === 'string'
? JSON.parse(toolCall.func.arguments)
: toolCall.func.arguments,
body: toolDefinition.code,
})
newVariables?.forEach((v) => variables.set(v.id, v.value))
const newMessages = appendToolCallMessage({
tool_call_id: toolCall.id,
function_name: toolCall.func.name,
tool_call_result: output,
})
return openai.chat.completions.create({
messages: [
...messages,
...newMessages,
] as OpenAI.Chat.Completions.ChatCompletionMessageParam[],
model,
stream: true,
tools,
})
}
},
})
}

View File

@ -0,0 +1,87 @@
import { maxToolCalls } from '../constants'
import { ChatCompletionOptions } from './parseChatCompletionOptions'
import { LogsStore, VariableStore } from '@typebot.io/forge/types'
import { createOpenAI } from '@ai-sdk/openai'
import { APICallError, generateText } from 'ai'
import { isModelCompatibleWithVision } from '../helpers/isModelCompatibleWithVision'
import { parseTools } from '@typebot.io/ai/parseTools'
import { parseChatCompletionMessages } from '@typebot.io/ai/parseChatCompletionMessages'
type OpenAIConfig = {
baseUrl: string
defaultModel?: string
}
type Props = {
credentials: {
apiKey?: string
}
options: ChatCompletionOptions
variables: VariableStore
logs: LogsStore
config: OpenAIConfig
compatibility?: 'strict' | 'compatible'
}
export const runOpenAIChatCompletion = async ({
credentials: { apiKey },
options,
variables,
config: openAIConfig,
logs,
compatibility,
}: Props) => {
if (!apiKey) return logs.add('No API key provided')
const modelName = options.model?.trim() ?? openAIConfig.defaultModel
if (!modelName) return logs.add('No model provided')
const model = createOpenAI({
baseURL: openAIConfig.baseUrl ?? options.baseUrl,
headers: options.baseUrl
? {
'api-key': apiKey,
}
: undefined,
apiKey,
compatibility,
})(modelName)
try {
const { text, usage } = await generateText({
model,
temperature: options.temperature
? Number(options.temperature)
: undefined,
messages: await parseChatCompletionMessages({
messages: options.messages,
variables,
isVisionEnabled: isModelCompatibleWithVision(modelName),
shouldDownloadImages: false,
}),
tools: parseTools({ tools: options.tools, variables }),
maxToolRoundtrips: maxToolCalls,
})
options.responseMapping?.forEach((mapping) => {
if (!mapping.variableId) return
if (!mapping.item || mapping.item === 'Message content')
variables.set(mapping.variableId, text)
if (mapping.item === 'Total tokens')
variables.set(mapping.variableId, usage.totalTokens)
})
} catch (err) {
if (err instanceof APICallError) {
logs.add({
status: 'error',
description: 'An API call error occured while generating the response',
details: err.message,
})
return
}
logs.add({
status: 'error',
description: 'An unknown error occured while generating the response',
details: err,
})
}
}

View File

@ -0,0 +1,114 @@
import { VariableStore } from '@typebot.io/forge/types'
import { ChatCompletionOptions } from './parseChatCompletionOptions'
import { APICallError, streamText, ToolCallPart, ToolResultPart } from 'ai'
import { createOpenAI } from '@ai-sdk/openai'
import { maxToolCalls } from '../constants'
import { isModelCompatibleWithVision } from '../helpers/isModelCompatibleWithVision'
import { parseChatCompletionMessages } from '@typebot.io/ai/parseChatCompletionMessages'
import { parseTools } from '@typebot.io/ai/parseTools'
import { pumpStreamUntilDone } from '@typebot.io/ai/pumpStreamUntilDone'
import { appendToolResultsToMessages } from '@typebot.io/ai/appendToolResultsToMessages'
type Props = {
credentials: { apiKey?: string }
options: ChatCompletionOptions
variables: VariableStore
config: { baseUrl: string; defaultModel?: string }
compatibility?: 'strict' | 'compatible'
}
export const runOpenAIChatCompletionStream = async ({
credentials: { apiKey },
options,
variables,
config: openAIConfig,
compatibility,
}: Props): Promise<{
stream?: ReadableStream<any>
httpError?: { status: number; message: string }
}> => {
if (!apiKey) return { httpError: { status: 401, message: 'API key missing' } }
const modelName = options.model?.trim() ?? openAIConfig.defaultModel
if (!modelName)
return { httpError: { status: 400, message: 'model not found' } }
const model = createOpenAI({
baseURL: openAIConfig.baseUrl ?? options.baseUrl,
headers: options.baseUrl
? {
'api-key': apiKey,
}
: undefined,
apiKey,
compatibility,
})(modelName)
const streamConfig = {
model,
messages: await parseChatCompletionMessages({
messages: options.messages,
isVisionEnabled: isModelCompatibleWithVision(modelName),
shouldDownloadImages: false,
variables,
}),
temperature: options.temperature ? Number(options.temperature) : undefined,
tools: parseTools({ tools: options.tools, variables }),
}
try {
const response = await streamText(streamConfig)
let totalToolCalls = 0
let toolCalls: ToolCallPart[] = []
let toolResults: ToolResultPart[] = []
return {
stream: new ReadableStream({
async start(controller) {
const reader = response.toAIStream().getReader()
await pumpStreamUntilDone(controller, reader)
toolCalls = await response.toolCalls
if (toolCalls.length > 0)
toolResults = (await response.toolResults) as ToolResultPart[]
while (
toolCalls &&
toolCalls.length > 0 &&
totalToolCalls < maxToolCalls
) {
totalToolCalls += 1
const newResponse = await streamText({
...streamConfig,
messages: appendToolResultsToMessages({
messages: streamConfig.messages,
toolCalls,
toolResults,
}),
})
const reader = newResponse.toAIStream().getReader()
await pumpStreamUntilDone(controller, reader)
toolCalls = await newResponse.toolCalls
if (toolCalls.length > 0)
toolResults = (await newResponse.toolResults) as ToolResultPart[]
}
controller.close()
},
}),
}
} catch (err) {
if (err instanceof APICallError) {
return {
httpError: { status: err.statusCode ?? 500, message: err.message },
}
}
return {
httpError: {
status: 500,
message: 'An unknown error occured while generating the response',
},
}
}
}

View File

@ -3,8 +3,8 @@ import { auth } from '../auth'
import { parseChatCompletionOptions } from '@typebot.io/openai-block/shared/parseChatCompletionOptions'
import { getChatCompletionSetVarIds } from '@typebot.io/openai-block/shared/getChatCompletionSetVarIds'
import { getChatCompletionStreamVarId } from '@typebot.io/openai-block/shared/getChatCompletionStreamVarId'
import { runChatCompletion } from '@typebot.io/openai-block/shared/runChatCompletion'
import { runChatCompletionStream } from '@typebot.io/openai-block/shared/runChatCompletionStream'
import { runOpenAIChatCompletion } from '@typebot.io/openai-block/shared/runOpenAIChatCompletion'
import { runOpenAIChatCompletionStream } from '@typebot.io/openai-block/shared/runOpenAIChatCompletionStream'
import { defaultTogetherOptions } from '../constants'
export const createChatCompletion = createAction({
@ -13,6 +13,7 @@ export const createChatCompletion = createAction({
options: parseChatCompletionOptions({
modelHelperText:
'You can find the list of all the models available [here](https://docs.together.ai/docs/inference-models#chat-models). Copy the model string for API.',
defaultTemperature: defaultTogetherOptions.temperature,
}),
turnableInto: [
{
@ -26,7 +27,6 @@ export const createChatCompletion = createAction({
blockId: 'anthropic',
transform: (options) => ({
...options,
model: undefined,
action: 'Create Chat Message',
responseMapping: options.responseMapping?.map((res: any) =>
res.item === 'Message content'
@ -39,18 +39,19 @@ export const createChatCompletion = createAction({
getSetVariableIds: getChatCompletionSetVarIds,
run: {
server: (params) =>
runChatCompletion({
runOpenAIChatCompletion({
...params,
config: { baseUrl: defaultTogetherOptions.baseUrl },
}),
stream: {
getStreamVariableId: getChatCompletionStreamVarId,
run: async (params) => ({
stream: await runChatCompletionStream({
run: async (params) =>
runOpenAIChatCompletionStream({
...params,
config: { baseUrl: defaultTogetherOptions.baseUrl },
config: {
baseUrl: defaultTogetherOptions.baseUrl,
},
}),
}),
},
},
})

View File

@ -1,3 +1,4 @@
export const defaultTogetherOptions = {
baseUrl: 'https://api.together.xyz/v1',
temperature: 1,
} as const

View File

@ -24,6 +24,7 @@ export interface ZodLayoutMetadata<
isDebounceDisabled?: boolean
hiddenItems?: string[]
mergeWithLastField?: boolean
toLabels?: (val?: string) => string | undefined
}
declare module 'zod' {