2
0

⬆️ Upgrade AI SDK (#1641)

This commit is contained in:
Baptiste Arnaud
2024-07-15 14:32:42 +02:00
committed by GitHub
parent a4fb8b6d10
commit 043f0054b0
60 changed files with 2183 additions and 1683 deletions

View File

@ -14,6 +14,7 @@
"license": "AGPL-3.0-or-later",
"dependencies": {
"@ark-ui/solid": "3.3.0",
"@ai-sdk/ui-utils": "0.0.12",
"@stripe/stripe-js": "1.54.1",
"@udecode/plate-common": "30.4.5",
"dompurify": "3.0.6",
@ -25,6 +26,7 @@
"devDependencies": {
"@babel/preset-typescript": "7.22.5",
"@rollup/plugin-babel": "6.0.3",
"@rollup/plugin-commonjs": "26.0.1",
"@rollup/plugin-node-resolve": "15.1.0",
"@rollup/plugin-terser": "0.4.3",
"@rollup/plugin-typescript": "11.1.2",

View File

@ -7,6 +7,7 @@ import tailwindcss from 'tailwindcss'
import typescript from '@rollup/plugin-typescript'
import { typescriptPaths } from 'rollup-plugin-typescript-paths'
import replace from '@rollup/plugin-replace'
import commonjs from '@rollup/plugin-commonjs'
import fs from 'fs'
const extensions = ['.ts', '.tsx']
@ -27,6 +28,7 @@ const indexConfig = {
},
plugins: [
resolve({ extensions }),
commonjs(),
babel({
babelHelpers: 'bundled',
exclude: 'node_modules/**',

View File

@ -1,5 +1,5 @@
import { ClientSideActionContext } from '@/types'
import { readDataStream } from '@/utils/ai/readDataStream'
import { readDataStream } from '@ai-sdk/ui-utils'
import { guessApiHost } from '@/utils/guessApiHost'
import { isNotEmpty } from '@typebot.io/lib/utils'
import { createUniqueId } from 'solid-js'

View File

@ -1,80 +0,0 @@
import { StreamPartType, parseStreamPart } from './streamParts'
const NEWLINE = '\n'.charCodeAt(0)
// concatenates all the chunks into a single Uint8Array
function concatChunks(chunks: Uint8Array[], totalLength: number) {
const concatenatedChunks = new Uint8Array(totalLength)
let offset = 0
for (const chunk of chunks) {
concatenatedChunks.set(chunk, offset)
offset += chunk.length
}
chunks.length = 0
return concatenatedChunks
}
/**
Converts a ReadableStreamDefaultReader into an async generator that yields
StreamPart objects.
@param reader
Reader for the stream to read from.
@param isAborted
Optional function that returns true if the request has been aborted.
If the function returns true, the generator will stop reading the stream.
If the function is not provided, the generator will not stop reading the stream.
*/
export async function* readDataStream(
reader: ReadableStreamDefaultReader<Uint8Array>,
{
isAborted,
}: {
isAborted?: () => boolean
} = {}
): AsyncGenerator<StreamPartType> {
// implementation note: this slightly more complex algorithm is required
// to pass the tests in the edge environment.
const decoder = new TextDecoder()
const chunks: Uint8Array[] = []
let totalLength = 0
while (true) {
const { value } = await reader.read()
if (value) {
chunks.push(value)
totalLength += value.length
if (value[value.length - 1] !== NEWLINE) {
// if the last character is not a newline, we have not read the whole JSON value
continue
}
}
if (chunks.length === 0) {
break // we have reached the end of the stream
}
const concatenatedChunks = concatChunks(chunks, totalLength)
totalLength = 0
const streamParts = decoder
.decode(concatenatedChunks, { stream: true })
.split('\n')
.filter((line) => line !== '') // splitting leaves an empty string at the end
.map(parseStreamPart)
for (const streamPart of streamParts) {
yield streamPart
}
// The request has been aborted, stop reading the stream.
if (isAborted?.()) {
reader.cancel()
break
}
}
}

View File

@ -1,377 +0,0 @@
import {
AssistantMessage,
DataMessage,
FunctionCall,
JSONValue,
ToolCall,
} from './types'
type StreamString =
`${(typeof StreamStringPrefixes)[keyof typeof StreamStringPrefixes]}:${string}\n`
export interface StreamPart<CODE extends string, NAME extends string, TYPE> {
code: CODE
name: NAME
parse: (value: JSONValue) => { type: NAME; value: TYPE }
}
const textStreamPart: StreamPart<'0', 'text', string> = {
code: '0',
name: 'text',
parse: (value: JSONValue) => {
if (typeof value !== 'string') {
throw new Error('"text" parts expect a string value.')
}
return { type: 'text', value }
},
}
const functionCallStreamPart: StreamPart<
'1',
'function_call',
{ function_call: FunctionCall }
> = {
code: '1',
name: 'function_call',
parse: (value: JSONValue) => {
if (
value == null ||
typeof value !== 'object' ||
!('function_call' in value) ||
typeof value.function_call !== 'object' ||
value.function_call == null ||
!('name' in value.function_call) ||
!('arguments' in value.function_call) ||
typeof value.function_call.name !== 'string' ||
typeof value.function_call.arguments !== 'string'
) {
throw new Error(
'"function_call" parts expect an object with a "function_call" property.'
)
}
return {
type: 'function_call',
value: value as unknown as { function_call: FunctionCall },
}
},
}
const dataStreamPart: StreamPart<'2', 'data', Array<JSONValue>> = {
code: '2',
name: 'data',
parse: (value: JSONValue) => {
if (!Array.isArray(value)) {
throw new Error('"data" parts expect an array value.')
}
return { type: 'data', value }
},
}
const errorStreamPart: StreamPart<'3', 'error', string> = {
code: '3',
name: 'error',
parse: (value: JSONValue) => {
if (typeof value !== 'string') {
throw new Error('"error" parts expect a string value.')
}
return { type: 'error', value }
},
}
const assistantMessageStreamPart: StreamPart<
'4',
'assistant_message',
AssistantMessage
> = {
code: '4',
name: 'assistant_message',
parse: (value: JSONValue) => {
if (
value == null ||
typeof value !== 'object' ||
!('id' in value) ||
!('role' in value) ||
!('content' in value) ||
typeof value.id !== 'string' ||
typeof value.role !== 'string' ||
value.role !== 'assistant' ||
!Array.isArray(value.content) ||
!value.content.every(
(item) =>
item != null &&
typeof item === 'object' &&
'type' in item &&
item.type === 'text' &&
'text' in item &&
item.text != null &&
typeof item.text === 'object' &&
'value' in item.text &&
typeof item.text.value === 'string'
)
) {
throw new Error(
'"assistant_message" parts expect an object with an "id", "role", and "content" property.'
)
}
return {
type: 'assistant_message',
value: value as AssistantMessage,
}
},
}
const assistantControlDataStreamPart: StreamPart<
'5',
'assistant_control_data',
{
threadId: string
messageId: string
}
> = {
code: '5',
name: 'assistant_control_data',
parse: (value: JSONValue) => {
if (
value == null ||
typeof value !== 'object' ||
!('threadId' in value) ||
!('messageId' in value) ||
typeof value.threadId !== 'string' ||
typeof value.messageId !== 'string'
) {
throw new Error(
'"assistant_control_data" parts expect an object with a "threadId" and "messageId" property.'
)
}
return {
type: 'assistant_control_data',
value: {
threadId: value.threadId,
messageId: value.messageId,
},
}
},
}
const dataMessageStreamPart: StreamPart<'6', 'data_message', DataMessage> = {
code: '6',
name: 'data_message',
parse: (value: JSONValue) => {
if (
value == null ||
typeof value !== 'object' ||
!('role' in value) ||
!('data' in value) ||
typeof value.role !== 'string' ||
value.role !== 'data'
) {
throw new Error(
'"data_message" parts expect an object with a "role" and "data" property.'
)
}
return {
type: 'data_message',
value: value as DataMessage,
}
},
}
const toolCallStreamPart: StreamPart<
'7',
'tool_calls',
{ tool_calls: ToolCall[] }
> = {
code: '7',
name: 'tool_calls',
parse: (value: JSONValue) => {
if (
value == null ||
typeof value !== 'object' ||
!('tool_calls' in value) ||
typeof value.tool_calls !== 'object' ||
value.tool_calls == null ||
!Array.isArray(value.tool_calls) ||
value.tool_calls.some(
(tc) =>
tc == null ||
typeof tc !== 'object' ||
!('id' in tc) ||
typeof tc.id !== 'string' ||
!('type' in tc) ||
typeof tc.type !== 'string' ||
!('function' in tc) ||
tc.function == null ||
typeof tc.function !== 'object' ||
!('arguments' in tc.function) ||
typeof tc.function.name !== 'string' ||
typeof tc.function.arguments !== 'string'
)
) {
throw new Error(
'"tool_calls" parts expect an object with a ToolCallPayload.'
)
}
return {
type: 'tool_calls',
value: value as unknown as { tool_calls: ToolCall[] },
}
},
}
const messageAnnotationsStreamPart: StreamPart<
'8',
'message_annotations',
Array<JSONValue>
> = {
code: '8',
name: 'message_annotations',
parse: (value: JSONValue) => {
if (!Array.isArray(value)) {
throw new Error('"message_annotations" parts expect an array value.')
}
return { type: 'message_annotations', value }
},
}
const streamParts = [
textStreamPart,
functionCallStreamPart,
dataStreamPart,
errorStreamPart,
assistantMessageStreamPart,
assistantControlDataStreamPart,
dataMessageStreamPart,
toolCallStreamPart,
messageAnnotationsStreamPart,
] as const
// union type of all stream parts
type StreamParts =
| typeof textStreamPart
| typeof functionCallStreamPart
| typeof dataStreamPart
| typeof errorStreamPart
| typeof assistantMessageStreamPart
| typeof assistantControlDataStreamPart
| typeof dataMessageStreamPart
| typeof toolCallStreamPart
| typeof messageAnnotationsStreamPart
/**
* Maps the type of a stream part to its value type.
*/
type StreamPartValueType = {
[P in StreamParts as P['name']]: ReturnType<P['parse']>['value']
}
export type StreamPartType =
| ReturnType<typeof textStreamPart.parse>
| ReturnType<typeof functionCallStreamPart.parse>
| ReturnType<typeof dataStreamPart.parse>
| ReturnType<typeof errorStreamPart.parse>
| ReturnType<typeof assistantMessageStreamPart.parse>
| ReturnType<typeof assistantControlDataStreamPart.parse>
| ReturnType<typeof dataMessageStreamPart.parse>
| ReturnType<typeof toolCallStreamPart.parse>
| ReturnType<typeof messageAnnotationsStreamPart.parse>
export const streamPartsByCode = {
[textStreamPart.code]: textStreamPart,
[functionCallStreamPart.code]: functionCallStreamPart,
[dataStreamPart.code]: dataStreamPart,
[errorStreamPart.code]: errorStreamPart,
[assistantMessageStreamPart.code]: assistantMessageStreamPart,
[assistantControlDataStreamPart.code]: assistantControlDataStreamPart,
[dataMessageStreamPart.code]: dataMessageStreamPart,
[toolCallStreamPart.code]: toolCallStreamPart,
[messageAnnotationsStreamPart.code]: messageAnnotationsStreamPart,
} as const
/**
* The map of prefixes for data in the stream
*
* - 0: Text from the LLM response
* - 1: (OpenAI) function_call responses
* - 2: custom JSON added by the user using `Data`
* - 6: (OpenAI) tool_call responses
*
* Example:
* ```
* 0:Vercel
* 0:'s
* 0: AI
* 0: AI
* 0: SDK
* 0: is great
* 0:!
* 2: { "someJson": "value" }
* 1: {"function_call": {"name": "get_current_weather", "arguments": "{\\n\\"location\\": \\"Charlottesville, Virginia\\",\\n\\"format\\": \\"celsius\\"\\n}"}}
* 6: {"tool_call": {"id": "tool_0", "type": "function", "function": {"name": "get_current_weather", "arguments": "{\\n\\"location\\": \\"Charlottesville, Virginia\\",\\n\\"format\\": \\"celsius\\"\\n}"}}}
*```
*/
export const StreamStringPrefixes = {
[textStreamPart.name]: textStreamPart.code,
[functionCallStreamPart.name]: functionCallStreamPart.code,
[dataStreamPart.name]: dataStreamPart.code,
[errorStreamPart.name]: errorStreamPart.code,
[assistantMessageStreamPart.name]: assistantMessageStreamPart.code,
[assistantControlDataStreamPart.name]: assistantControlDataStreamPart.code,
[dataMessageStreamPart.name]: dataMessageStreamPart.code,
[toolCallStreamPart.name]: toolCallStreamPart.code,
[messageAnnotationsStreamPart.name]: messageAnnotationsStreamPart.code,
} as const
export const validCodes = streamParts.map((part) => part.code)
/**
Parses a stream part from a string.
@param line The string to parse.
@returns The parsed stream part.
@throws An error if the string cannot be parsed.
*/
export const parseStreamPart = (line: string): StreamPartType => {
const firstSeparatorIndex = line.indexOf(':')
if (firstSeparatorIndex === -1) {
throw new Error('Failed to parse stream string. No separator found.')
}
const prefix = line.slice(0, firstSeparatorIndex)
if (!validCodes.includes(prefix as keyof typeof streamPartsByCode)) {
throw new Error(`Failed to parse stream string. Invalid code ${prefix}.`)
}
const code = prefix as keyof typeof streamPartsByCode
const textValue = line.slice(firstSeparatorIndex + 1)
const jsonValue: JSONValue = JSON.parse(textValue)
return streamPartsByCode[code].parse(jsonValue)
}
/**
Prepends a string with a prefix from the `StreamChunkPrefixes`, JSON-ifies it,
and appends a new line.
It ensures type-safety for the part type and value.
*/
export function formatStreamPart<T extends keyof StreamPartValueType>(
type: T,
value: StreamPartValueType[T]
): StreamString {
const streamPart = streamParts.find((part) => part.name === type)
if (!streamPart) {
throw new Error(`Invalid stream part type: ${type}`)
}
return `${streamPart.code}:${JSON.stringify(value)}\n`
}

View File

@ -1,355 +0,0 @@
/* eslint-disable @typescript-eslint/ban-types */
// https://github.com/openai/openai-node/blob/07b3504e1c40fd929f4aae1651b83afc19e3baf8/src/resources/chat/completions.ts#L146-L159
export interface FunctionCall {
/**
* The arguments to call the function with, as generated by the model in JSON
* format. Note that the model does not always generate valid JSON, and may
* hallucinate parameters not defined by your function schema. Validate the
* arguments in your code before calling your function.
*/
arguments?: string
/**
* The name of the function to call.
*/
name?: string
}
/**
* The tool calls generated by the model, such as function calls.
*/
export interface ToolCall {
// The ID of the tool call.
id: string
// The type of the tool. Currently, only `function` is supported.
type: string
// The function that the model called.
function: {
// The name of the function.
name: string
// The arguments to call the function with, as generated by the model in JSON
arguments: string
}
}
/**
* Controls which (if any) function is called by the model.
* - none means the model will not call a function and instead generates a message.
* - auto means the model can pick between generating a message or calling a function.
* - Specifying a particular function via {"type: "function", "function": {"name": "my_function"}} forces the model to call that function.
* none is the default when no functions are present. auto is the default if functions are present.
*/
export type ToolChoice =
| 'none'
| 'auto'
| { type: 'function'; function: { name: string } }
/**
* A list of tools the model may call. Currently, only functions are supported as a tool.
* Use this to provide a list of functions the model may generate JSON inputs for.
*/
export interface Tool {
type: 'function'
function: Function
}
export interface Function {
/**
* The name of the function to be called. Must be a-z, A-Z, 0-9, or contain
* underscores and dashes, with a maximum length of 64.
*/
name: string
/**
* The parameters the functions accepts, described as a JSON Schema object. See the
* [guide](/docs/guides/gpt/function-calling) for examples, and the
* [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
* documentation about the format.
*
* To describe a function that accepts no parameters, provide the value
* `{"type": "object", "properties": {}}`.
*/
parameters: Record<string, unknown>
/**
* A description of what the function does, used by the model to choose when and
* how to call the function.
*/
description?: string
}
export type IdGenerator = () => string
/**
* Shared types between the API and UI packages.
*/
export interface Message {
id: string
tool_call_id?: string
createdAt?: Date
content: string
ui?: string | JSX.Element | JSX.Element[] | null | undefined
role: 'system' | 'user' | 'assistant' | 'function' | 'data' | 'tool'
/**
* If the message has a role of `function`, the `name` field is the name of the function.
* Otherwise, the name field should not be set.
*/
name?: string
/**
* If the assistant role makes a function call, the `function_call` field
* contains the function call name and arguments. Otherwise, the field should
* not be set. (Deprecated and replaced by tool_calls.)
*/
function_call?: string | FunctionCall
data?: JSONValue
/**
* If the assistant role makes a tool call, the `tool_calls` field contains
* the tool call name and arguments. Otherwise, the field should not be set.
*/
tool_calls?: string | ToolCall[]
/**
* Additional message-specific information added on the server via StreamData
*/
annotations?: JSONValue[] | undefined
}
export type CreateMessage = Omit<Message, 'id'> & {
id?: Message['id']
}
export type ChatRequest = {
messages: Message[]
options?: RequestOptions
// @deprecated
functions?: Array<Function>
// @deprecated
function_call?: FunctionCall
data?: Record<string, string>
tools?: Array<Tool>
tool_choice?: ToolChoice
}
export type FunctionCallHandler = (
chatMessages: Message[],
functionCall: FunctionCall
) => Promise<ChatRequest | void>
export type ToolCallHandler = (
chatMessages: Message[],
toolCalls: ToolCall[]
) => Promise<ChatRequest | void>
export type RequestOptions = {
headers?: Record<string, string> | Headers
body?: object
}
export type ChatRequestOptions = {
options?: RequestOptions
functions?: Array<Function>
function_call?: FunctionCall
tools?: Array<Tool>
tool_choice?: ToolChoice
data?: Record<string, string>
}
export type UseChatOptions = {
/**
* The API endpoint that accepts a `{ messages: Message[] }` object and returns
* a stream of tokens of the AI chat response. Defaults to `/api/chat`.
*/
api?: string
/**
* A unique identifier for the chat. If not provided, a random one will be
* generated. When provided, the `useChat` hook with the same `id` will
* have shared states across components.
*/
id?: string
/**
* Initial messages of the chat. Useful to load an existing chat history.
*/
initialMessages?: Message[]
/**
* Initial input of the chat.
*/
initialInput?: string
/**
* Callback function to be called when a function call is received.
* If the function returns a `ChatRequest` object, the request will be sent
* automatically to the API and will be used to update the chat.
*/
experimental_onFunctionCall?: FunctionCallHandler
/**
* Callback function to be called when a tool call is received.
* If the function returns a `ChatRequest` object, the request will be sent
* automatically to the API and will be used to update the chat.
*/
experimental_onToolCall?: ToolCallHandler
/**
* Callback function to be called when the API response is received.
*/
onResponse?: (response: Response) => void | Promise<void>
/**
* Callback function to be called when the chat is finished streaming.
*/
onFinish?: (message: Message) => void
/**
* Callback function to be called when an error is encountered.
*/
onError?: (error: Error) => void
/**
* A way to provide a function that is going to be used for ids for messages.
* If not provided nanoid is used by default.
*/
generateId?: IdGenerator
/**
* The credentials mode to be used for the fetch request.
* Possible values are: 'omit', 'same-origin', 'include'.
* Defaults to 'same-origin'.
*/
credentials?: RequestCredentials
/**
* HTTP headers to be sent with the API request.
*/
headers?: Record<string, string> | Headers
/**
* Extra body object to be sent with the API request.
* @example
* Send a `sessionId` to the API along with the messages.
* ```js
* useChat({
* body: {
* sessionId: '123',
* }
* })
* ```
*/
body?: object
/**
* Whether to send extra message fields such as `message.id` and `message.createdAt` to the API.
* Defaults to `false`. When set to `true`, the API endpoint might need to
* handle the extra fields before forwarding the request to the AI service.
*/
sendExtraMessageFields?: boolean
/** Stream mode (default to "stream-data") */
streamMode?: 'stream-data' | 'text'
}
export type UseCompletionOptions = {
/**
* The API endpoint that accepts a `{ prompt: string }` object and returns
* a stream of tokens of the AI completion response. Defaults to `/api/completion`.
*/
api?: string
/**
* An unique identifier for the chat. If not provided, a random one will be
* generated. When provided, the `useChat` hook with the same `id` will
* have shared states across components.
*/
id?: string
/**
* Initial prompt input of the completion.
*/
initialInput?: string
/**
* Initial completion result. Useful to load an existing history.
*/
initialCompletion?: string
/**
* Callback function to be called when the API response is received.
*/
onResponse?: (response: Response) => void | Promise<void>
/**
* Callback function to be called when the completion is finished streaming.
*/
onFinish?: (prompt: string, completion: string) => void
/**
* Callback function to be called when an error is encountered.
*/
onError?: (error: Error) => void
/**
* The credentials mode to be used for the fetch request.
* Possible values are: 'omit', 'same-origin', 'include'.
* Defaults to 'same-origin'.
*/
credentials?: RequestCredentials
/**
* HTTP headers to be sent with the API request.
*/
headers?: Record<string, string> | Headers
/**
* Extra body object to be sent with the API request.
* @example
* Send a `sessionId` to the API along with the prompt.
* ```js
* useChat({
* body: {
* sessionId: '123',
* }
* })
* ```
*/
body?: object
/** Stream mode (default to "stream-data") */
streamMode?: 'stream-data' | 'text'
}
export type JSONValue =
| null
| string
| number
| boolean
| { [x: string]: JSONValue }
| Array<JSONValue>
export type AssistantMessage = {
id: string
role: 'assistant'
content: Array<{
type: 'text'
text: {
value: string
}
}>
}
/*
* A data message is an application-specific message from the assistant
* that should be shown in order with the other messages.
*
* It can trigger other operations on the frontend, such as annotating
* a map.
*/
export type DataMessage = {
id?: string // optional id, implement if needed (e.g. for persistance)
role: 'data'
data: JSONValue // application-specific data
}