⬆️ Upgrade AI SDK (#1641)
This commit is contained in:
@@ -1,5 +1,7 @@
|
||||
// Copied from https://github.com/vercel/ai/blob/f9db8fd6543202a8404a7a1a40f938d6270b08ef/packages/core/streams/assistant-response.ts
|
||||
// Because the stream is not exported from the package
|
||||
import { AssistantMessage, DataMessage, formatStreamPart } from 'ai'
|
||||
import { AssistantStream } from 'openai/lib/AssistantStream'
|
||||
import { AssistantStream as AssistantStreamType } from 'openai/lib/AssistantStream'
|
||||
import { Run } from 'openai/resources/beta/threads/runs/runs'
|
||||
|
||||
/**
|
||||
@@ -44,14 +46,19 @@ Send a data message to the client. You can use this to provide information for r
|
||||
/**
|
||||
Forwards the assistant response stream to the client. Returns the `Run` object after it completes, or when it requires an action.
|
||||
*/
|
||||
forwardStream: (stream: AssistantStream) => Promise<Run | undefined>
|
||||
forwardStream: (stream: AssistantStreamType) => Promise<Run | undefined>
|
||||
}) => Promise<void>
|
||||
|
||||
export const OpenAIAssistantStream = (
|
||||
/**
|
||||
The `AssistantResponse` allows you to send a stream of assistant update to `useAssistant`.
|
||||
It is designed to facilitate streaming assistant responses to the `useAssistant` hook.
|
||||
It receives an assistant thread and a current message, and can send messages and data messages to the client.
|
||||
*/
|
||||
export function AssistantStream(
|
||||
{ threadId, messageId }: AssistantResponseSettings,
|
||||
process: AssistantResponseCallback
|
||||
) =>
|
||||
new ReadableStream({
|
||||
) {
|
||||
return new ReadableStream({
|
||||
async start(controller) {
|
||||
const textEncoder = new TextEncoder()
|
||||
|
||||
@@ -73,7 +80,7 @@ export const OpenAIAssistantStream = (
|
||||
)
|
||||
}
|
||||
|
||||
const forwardStream = async (stream: AssistantStream) => {
|
||||
const forwardStream = async (stream: AssistantStreamType) => {
|
||||
let result: Run | undefined = undefined
|
||||
|
||||
for await (const value of stream) {
|
||||
@@ -143,3 +150,4 @@ export const OpenAIAssistantStream = (
|
||||
pull(controller) {},
|
||||
cancel() {},
|
||||
})
|
||||
}
|
||||
@@ -1,81 +0,0 @@
|
||||
import type { OpenAI } from 'openai'
|
||||
import { VariableStore } from '@typebot.io/forge'
|
||||
import { isDefined, isEmpty } from '@typebot.io/lib'
|
||||
import { ChatCompletionOptions } from '../shared/parseChatCompletionOptions'
|
||||
import ky, { HTTPError } from 'ky'
|
||||
import { defaultOpenAIOptions, modelsWithImageUrlSupport } from '../constants'
|
||||
import { isModelCompatibleWithVision } from './isModelCompatibleWithVision'
|
||||
import { splitUserTextMessageIntoBlocks } from './splitUserTextMessageIntoBlocks'
|
||||
|
||||
export const parseChatCompletionMessages = async ({
|
||||
options: { messages, model },
|
||||
variables,
|
||||
}: {
|
||||
options: ChatCompletionOptions
|
||||
variables: VariableStore
|
||||
}): Promise<OpenAI.Chat.ChatCompletionMessageParam[]> => {
|
||||
if (!messages) return []
|
||||
const isVisionEnabled = isModelCompatibleWithVision(
|
||||
model ?? defaultOpenAIOptions.model
|
||||
)
|
||||
const parsedMessages = (
|
||||
await Promise.all(
|
||||
messages.map(async (message) => {
|
||||
if (!message.role) return
|
||||
|
||||
if (message.role === 'Dialogue') {
|
||||
if (!message.dialogueVariableId) return
|
||||
const dialogue = variables.get(message.dialogueVariableId) ?? []
|
||||
const dialogueArr = Array.isArray(dialogue) ? dialogue : [dialogue]
|
||||
|
||||
return Promise.all(
|
||||
dialogueArr.map(async (dialogueItem, index) => {
|
||||
if (index === 0 && message.startsBy === 'assistant')
|
||||
return {
|
||||
role: 'assistant',
|
||||
content: dialogueItem,
|
||||
}
|
||||
if (index % (message.startsBy === 'assistant' ? 1 : 2) === 0) {
|
||||
return {
|
||||
role: 'user',
|
||||
content: isVisionEnabled
|
||||
? await splitUserTextMessageIntoBlocks(dialogueItem ?? '')
|
||||
: dialogueItem,
|
||||
}
|
||||
}
|
||||
return {
|
||||
role: 'assistant',
|
||||
content: dialogueItem,
|
||||
}
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
if (!message.content) return
|
||||
|
||||
const content = variables.parse(message.content)
|
||||
|
||||
if (isEmpty(content)) return
|
||||
|
||||
if (message.role === 'user')
|
||||
return {
|
||||
role: 'user',
|
||||
content: isVisionEnabled
|
||||
? await splitUserTextMessageIntoBlocks(content)
|
||||
: content,
|
||||
}
|
||||
|
||||
return {
|
||||
role: message.role,
|
||||
content,
|
||||
}
|
||||
})
|
||||
)
|
||||
)
|
||||
.flat()
|
||||
.filter((message) => {
|
||||
return isDefined(message?.role) && isDefined(message.content)
|
||||
}) as OpenAI.Chat.ChatCompletionMessageParam[]
|
||||
|
||||
return parsedMessages
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
import type { OpenAI } from 'openai'
|
||||
import { toolParametersSchema } from '../shared/parseChatCompletionOptions'
|
||||
import { z } from '@typebot.io/forge/zod'
|
||||
|
||||
export const parseToolParameters = (
|
||||
parameters: z.infer<typeof toolParametersSchema>
|
||||
): OpenAI.FunctionParameters => ({
|
||||
type: 'object',
|
||||
properties: parameters?.reduce<{
|
||||
[x: string]: unknown
|
||||
}>((acc, param) => {
|
||||
if (!param.name) return acc
|
||||
acc[param.name] = {
|
||||
type: param.type === 'enum' ? 'string' : param.type,
|
||||
enum: param.type === 'enum' ? param.values : undefined,
|
||||
description: param.description,
|
||||
}
|
||||
return acc
|
||||
}, {}),
|
||||
required:
|
||||
parameters?.filter((param) => param.required).map((param) => param.name) ??
|
||||
[],
|
||||
})
|
||||
@@ -1,7 +1,7 @@
|
||||
import ky, { HTTPError } from 'ky'
|
||||
import OpenAI from 'openai'
|
||||
|
||||
export const splitUserTextMessageIntoBlocks = async (
|
||||
export const splitUserTextMessageIntoOpenAIBlocks = async (
|
||||
input: string
|
||||
): Promise<string | OpenAI.Chat.ChatCompletionContentPart[]> => {
|
||||
const urlRegex = /(^|\n\n)(https?:\/\/[^\s]+)(\n\n|$)/g
|
||||
Reference in New Issue
Block a user