2
0

♻️ Migrate from got to ky (#1416)

Closes #1415
This commit is contained in:
Baptiste Arnaud
2024-04-05 09:01:16 +02:00
committed by GitHub
parent ccc7101dd3
commit d96f384e02
59 changed files with 990 additions and 628 deletions

View File

@@ -1,5 +1,6 @@
import { createAction, option } from '@typebot.io/forge'
import { auth } from '../auth'
import { Anthropic } from '@anthropic-ai/sdk'
import { AnthropicStream } from 'ai'
import { anthropicModels, defaultAnthropicOptions } from '../constants'
import { parseChatMessages } from '../helpers/parseChatMessages'
@@ -103,8 +104,6 @@ export const createChatMessage = createAction({
responseMapping?.map((res) => res.variableId).filter(isDefined) ?? [],
run: {
server: async ({ credentials: { apiKey }, options, variables, logs }) => {
const { Anthropic } = await import('@anthropic-ai/sdk')
const client = new Anthropic({
apiKey: apiKey,
})
@@ -150,8 +149,6 @@ export const createChatMessage = createAction({
(res) => res.item === 'Message Content' || !res.item
)?.variableId,
run: async ({ credentials: { apiKey }, options, variables }) => {
const { Anthropic } = await import('@anthropic-ai/sdk')
const client = new Anthropic({
apiKey: apiKey,
})

View File

@@ -1,6 +1,6 @@
import { createAction, option } from '@typebot.io/forge'
import { isDefined, isEmpty } from '@typebot.io/lib'
import { HTTPError, got } from 'got'
import ky, { HTTPError } from 'ky'
import { apiBaseUrl } from '../constants'
import { auth } from '../auth'
import { ChatNodeResponse } from '../types'
@@ -40,7 +40,7 @@ export const sendMessage = createAction({
logs,
}) => {
try {
const res: ChatNodeResponse = await got
const res: ChatNodeResponse = await ky
.post(apiBaseUrl + botId, {
headers: {
Authorization: `Bearer ${apiKey}`,
@@ -66,7 +66,7 @@ export const sendMessage = createAction({
return logs.add({
status: 'error',
description: error.message,
details: error.response.body,
details: await error.response.text(),
})
console.error(error)
}

View File

@@ -11,6 +11,6 @@
"@typebot.io/tsconfig": "workspace:*",
"@types/react": "18.2.15",
"typescript": "5.3.2",
"got": "12.6.0"
"ky": "1.2.3"
}
}

View File

@@ -1,9 +1,9 @@
import { createAction, option } from '@typebot.io/forge'
import { isDefined, isEmpty, isNotEmpty } from '@typebot.io/lib'
import { HTTPError, got } from 'got'
import { auth } from '../auth'
import { defaultBaseUrl } from '../constants'
import { Chunk } from '../types'
import ky from 'ky'
export const createChatMessage = createAction({
auth,
@@ -44,13 +44,15 @@ export const createChatMessage = createAction({
logs,
}) => {
try {
const stream = got.post(
const response = await ky(
(apiEndpoint ?? defaultBaseUrl) + '/v1/chat-messages',
{
method: 'POST',
headers: {
Authorization: `Bearer ${apiKey}`,
'Content-Type': 'application/json',
},
json: {
body: JSON.stringify({
inputs:
inputs?.reduce((acc, { key, value }) => {
if (isEmpty(key) || isEmpty(value)) return acc
@@ -64,56 +66,70 @@ export const createChatMessage = createAction({
conversation_id,
user,
files: [],
},
isStream: true,
}),
}
)
const reader = response.body?.getReader()
if (!reader)
return logs.add({
status: 'error',
description: 'Failed to read response stream',
})
const { answer, conversationId, totalTokens } = await new Promise<{
answer: string
conversationId: string | undefined
totalTokens: number | undefined
}>((resolve, reject) => {
}>(async (resolve, reject) => {
let jsonChunk = ''
let answer = ''
let conversationId: string | undefined
let totalTokens: number | undefined
stream.on('data', (chunk) => {
const lines = chunk.toString().split('\n') as string[]
lines
.filter((line) => line.length > 0 && line !== '\n')
.forEach((line) => {
jsonChunk += line
if (jsonChunk.startsWith('event: ')) {
try {
while (true) {
const { value, done } = await reader.read()
if (done) {
resolve({ answer, conversationId, totalTokens })
return
}
const chunk = new TextDecoder().decode(value)
const lines = chunk.toString().split('\n') as string[]
lines
.filter((line) => line.length > 0 && line !== '\n')
.forEach((line) => {
jsonChunk += line
if (jsonChunk.startsWith('event: ')) {
jsonChunk = ''
return
}
if (
!jsonChunk.startsWith('data: ') ||
!jsonChunk.endsWith('}')
)
return
const data = JSON.parse(jsonChunk.slice(6)) as Chunk
jsonChunk = ''
return
}
if (!jsonChunk.startsWith('data: ') || !jsonChunk.endsWith('}'))
return
const data = JSON.parse(jsonChunk.slice(6)) as Chunk
jsonChunk = ''
if (
data.event === 'message' ||
data.event === 'agent_message'
) {
answer += data.answer
}
if (data.event === 'message_end') {
totalTokens = data.metadata.usage.total_tokens
conversationId = data.conversation_id
}
})
})
stream.on('end', () => {
resolve({ answer, conversationId, totalTokens })
})
stream.on('error', (error) => {
reject(error)
})
if (
data.event === 'message' ||
data.event === 'agent_message'
) {
answer += data.answer
}
if (data.event === 'message_end') {
totalTokens = data.metadata.usage.total_tokens
conversationId = data.conversation_id
}
})
}
} catch (e) {
reject(e)
}
})
responseMapping?.forEach((mapping) => {
@@ -130,12 +146,10 @@ export const createChatMessage = createAction({
variables.set(mapping.variableId, totalTokens)
})
} catch (error) {
if (error instanceof HTTPError)
return logs.add({
status: 'error',
description: error.message,
details: error.response.body,
})
logs.add({
status: 'error',
description: 'Failed to create chat message',
})
console.error(error)
}
},

View File

@@ -10,7 +10,7 @@
"@typebot.io/lib": "workspace:*",
"@typebot.io/tsconfig": "workspace:*",
"@types/react": "18.2.15",
"got": "12.6.0",
"ky": "1.2.3",
"typescript": "5.3.2"
}
}

View File

@@ -2,7 +2,7 @@ import { createAction, option } from '@typebot.io/forge'
import { auth } from '../auth'
import { baseUrl } from '../constants'
import { ModelsResponse, VoicesResponse } from '../type'
import got, { HTTPError } from 'got'
import got, { HTTPError } from 'ky'
import { uploadFileToBucket } from '@typebot.io/lib/s3/uploadFileToBucket'
import { createId } from '@typebot.io/lib/createId'
@@ -93,10 +93,10 @@ export const convertTextToSpeech = createAction({
text: options.text,
},
})
.buffer()
.arrayBuffer()
const url = await uploadFileToBucket({
file: response,
file: Buffer.from(response),
key: `tmp/elevenlabs/audio/${createId() + createId()}.mp3`,
mimeType: 'audio/mpeg',
})
@@ -107,7 +107,7 @@ export const convertTextToSpeech = createAction({
return logs.add({
status: 'error',
description: err.message,
details: err.response.body,
details: await err.response.text(),
})
}
}

View File

@@ -13,7 +13,7 @@
"typescript": "5.3.2"
},
"dependencies": {
"got": "12.6.0",
"ky": "1.2.3",
"@typebot.io/lib": "workspace:*"
}
}

View File

@@ -3,6 +3,8 @@ import { isDefined } from '@typebot.io/lib'
import { auth } from '../auth'
import { parseMessages } from '../helpers/parseMessages'
import { OpenAIStream } from 'ai'
// @ts-ignore
import MistralClient from '../helpers/client'
const nativeMessageContentSchema = {
content: option.string.layout({
@@ -95,15 +97,17 @@ export const createChatCompletion = createAction({
id: 'fetchModels',
dependencies: [],
fetch: async ({ credentials }) => {
const MistralClient = (await import('@mistralai/mistralai')).default
const client = new MistralClient(credentials.apiKey)
const listModelsResponse = await client.listModels()
const listModelsResponse: any = await client.listModels()
return (
listModelsResponse.data
.sort((a, b) => b.created - a.created)
.map((model) => model.id) ?? []
.sort(
(a: { created: number }, b: { created: number }) =>
b.created - a.created
)
.map((model: { id: any }) => model.id) ?? []
)
},
},
@@ -111,10 +115,9 @@ export const createChatCompletion = createAction({
run: {
server: async ({ credentials: { apiKey }, options, variables, logs }) => {
if (!options.model) return logs.add('No model selected')
const MistralClient = (await import('@mistralai/mistralai')).default
const client = new MistralClient(apiKey)
const response = await client.chat({
const response: any = await client.chat({
model: options.model,
messages: parseMessages({ options, variables }),
})
@@ -132,15 +135,13 @@ export const createChatCompletion = createAction({
)?.variableId,
run: async ({ credentials: { apiKey }, options, variables }) => {
if (!options.model) return
const MistralClient = (await import('@mistralai/mistralai')).default
const client = new MistralClient(apiKey)
const response = client.chatStream({
const response: any = client.chatStream({
model: options.model,
messages: parseMessages({ options, variables }),
})
// @ts-ignore https://github.com/vercel/ai/issues/936
return OpenAIStream(response)
},
},

View File

@@ -0,0 +1,341 @@
// Taken from https://github.com/mistralai/client-js/blob/main/src/client.js
// Lib seems not actively maintained, and we need this patch: https://github.com/mistralai/client-js/pull/42
let isNode = false
let fetch
const VERSION = '0.0.3'
const RETRY_STATUS_CODES = [429, 500, 502, 503, 504]
const ENDPOINT = 'https://api.mistral.ai'
/**
* Initialize fetch
* @return {Promise<void>}
*/
async function initializeFetch() {
if (typeof globalThis.fetch === 'undefined')
throw new Error('No fetch implementation found')
if (typeof window === 'undefined') {
isNode = true
}
fetch = globalThis.fetch
}
initializeFetch()
/**
* MistralAPIError
* @return {MistralAPIError}
* @extends {Error}
*/
class MistralAPIError extends Error {
/**
* A simple error class for Mistral API errors
* @param {*} message
*/
constructor(message) {
super(message)
this.name = 'MistralAPIError'
}
}
/**
* MistralClient
* @return {MistralClient}
*/
class MistralClient {
/**
* A simple and lightweight client for the Mistral API
* @param {*} apiKey can be set as an environment variable MISTRAL_API_KEY,
* or provided in this parameter
* @param {*} endpoint defaults to https://api.mistral.ai
* @param {*} maxRetries defaults to 5
* @param {*} timeout defaults to 120 seconds
*/
constructor(
apiKey = process.env.MISTRAL_API_KEY,
endpoint = ENDPOINT,
maxRetries = 5,
timeout = 120
) {
this.endpoint = endpoint
this.apiKey = apiKey
this.maxRetries = maxRetries
this.timeout = timeout
if (this.endpoint.indexOf('inference.azure.com')) {
this.modelDefault = 'mistral'
}
}
/**
*
* @param {*} method
* @param {*} path
* @param {*} request
* @return {Promise<*>}
*/
_request = async function (method, path, request) {
const url = `${this.endpoint}/${path}`
const options = {
method: method,
headers: {
'User-Agent': `mistral-client-js/${VERSION}`,
Accept: request?.stream ? 'text/event-stream' : 'application/json',
'Content-Type': 'application/json',
Authorization: `Bearer ${this.apiKey}`,
},
body: method !== 'get' ? JSON.stringify(request) : null,
timeout: this.timeout * 1000,
}
for (let attempts = 0; attempts < this.maxRetries; attempts++) {
try {
const response = await fetch(url, options)
if (response.ok) {
if (request?.stream) {
if (isNode) {
return response.body
} else {
const reader = response.body.getReader()
// Chrome does not support async iterators yet, so polyfill it
const asyncIterator = async function* () {
try {
while (true) {
// Read from the stream
const { done, value } = await reader.read()
// Exit if we're done
if (done) return
// Else yield the chunk
yield value
}
} finally {
reader.releaseLock()
}
}
return asyncIterator()
}
}
return await response.json()
} else if (RETRY_STATUS_CODES.includes(response.status)) {
console.debug(
`Retrying request on response status: ${response.status}`,
`Response: ${await response.text()}`,
`Attempt: ${attempts + 1}`
)
// eslint-disable-next-line max-len
await new Promise((resolve) =>
setTimeout(resolve, Math.pow(2, attempts + 1) * 500)
)
} else {
throw new MistralAPIError(
`HTTP error! status: ${response.status} ` +
`Response: \n${await response.text()}`
)
}
} catch (error) {
console.error(`Request failed: ${error.message}`)
if (error.name === 'MistralAPIError') {
throw error
}
if (attempts === this.maxRetries - 1) throw error
// eslint-disable-next-line max-len
await new Promise((resolve) =>
setTimeout(resolve, Math.pow(2, attempts + 1) * 500)
)
}
}
throw new Error('Max retries reached')
}
/**
* Creates a chat completion request
* @param {*} model
* @param {*} messages
* @param {*} tools
* @param {*} temperature
* @param {*} maxTokens
* @param {*} topP
* @param {*} randomSeed
* @param {*} stream
* @param {*} safeMode deprecated use safePrompt instead
* @param {*} safePrompt
* @param {*} toolChoice
* @param {*} responseFormat
* @return {Promise<Object>}
*/
_makeChatCompletionRequest = function (
model,
messages,
tools,
temperature,
maxTokens,
topP,
randomSeed,
stream,
safeMode,
safePrompt,
toolChoice,
responseFormat
) {
// if modelDefault and model are undefined, throw an error
if (!model && !this.modelDefault) {
throw new MistralAPIError('You must provide a model name')
}
return {
model: model ?? this.modelDefault,
messages: messages,
tools: tools ?? undefined,
temperature: temperature ?? undefined,
max_tokens: maxTokens ?? undefined,
top_p: topP ?? undefined,
random_seed: randomSeed ?? undefined,
stream: stream ?? undefined,
safe_prompt: (safeMode || safePrompt) ?? undefined,
tool_choice: toolChoice ?? undefined,
response_format: responseFormat ?? undefined,
}
}
/**
* Returns a list of the available models
* @return {Promise<Object>}
*/
listModels = async function () {
const response = await this._request('get', 'v1/models')
return response
}
/**
* A chat endpoint without streaming
* @param {*} model the name of the model to chat with, e.g. mistral-tiny
* @param {*} messages an array of messages to chat with, e.g.
* [{role: 'user', content: 'What is the best French cheese?'}]
* @param {*} tools a list of tools to use.
* @param {*} temperature the temperature to use for sampling, e.g. 0.5
* @param {*} maxTokens the maximum number of tokens to generate, e.g. 100
* @param {*} topP the cumulative probability of tokens to generate, e.g. 0.9
* @param {*} randomSeed the random seed to use for sampling, e.g. 42
* @param {*} safeMode deprecated use safePrompt instead
* @param {*} safePrompt whether to use safe mode, e.g. true
* @param {*} toolChoice the tool to use, e.g. 'auto'
* @param {*} responseFormat the format of the response, e.g. 'json_format'
* @return {Promise<Object>}
*/
chat = async function ({
model,
messages,
tools,
temperature,
maxTokens,
topP,
randomSeed,
safeMode,
safePrompt,
toolChoice,
responseFormat,
}) {
const request = this._makeChatCompletionRequest(
model,
messages,
tools,
temperature,
maxTokens,
topP,
randomSeed,
false,
safeMode,
safePrompt,
toolChoice,
responseFormat
)
const response = await this._request('post', 'v1/chat/completions', request)
return response
}
/**
* A chat endpoint that streams responses.
* @param {*} model the name of the model to chat with, e.g. mistral-tiny
* @param {*} messages an array of messages to chat with, e.g.
* [{role: 'user', content: 'What is the best French cheese?'}]
* @param {*} tools a list of tools to use.
* @param {*} temperature the temperature to use for sampling, e.g. 0.5
* @param {*} maxTokens the maximum number of tokens to generate, e.g. 100
* @param {*} topP the cumulative probability of tokens to generate, e.g. 0.9
* @param {*} randomSeed the random seed to use for sampling, e.g. 42
* @param {*} safeMode deprecated use safePrompt instead
* @param {*} safePrompt whether to use safe mode, e.g. true
* @param {*} toolChoice the tool to use, e.g. 'auto'
* @param {*} responseFormat the format of the response, e.g. 'json_format'
* @return {Promise<Object>}
*/
chatStream = async function* ({
model,
messages,
tools,
temperature,
maxTokens,
topP,
randomSeed,
safeMode,
safePrompt,
toolChoice,
responseFormat,
}) {
const request = this._makeChatCompletionRequest(
model,
messages,
tools,
temperature,
maxTokens,
topP,
randomSeed,
true,
safeMode,
safePrompt,
toolChoice,
responseFormat
)
const response = await this._request('post', 'v1/chat/completions', request)
let buffer = ''
const decoder = new TextDecoder()
for await (const chunk of response) {
buffer += decoder.decode(chunk, { stream: true })
let firstNewline
while ((firstNewline = buffer.indexOf('\n')) !== -1) {
const chunkLine = buffer.substring(0, firstNewline)
buffer = buffer.substring(firstNewline + 1)
if (chunkLine.startsWith('data:')) {
const json = chunkLine.substring(6).trim()
if (json !== '[DONE]') {
yield JSON.parse(json)
}
}
}
}
}
/**
* An embeddings endpoint that returns embeddings for a single,
* or batch of inputs
* @param {*} model The embedding model to use, e.g. mistral-embed
* @param {*} input The input to embed,
* e.g. ['What is the best French cheese?']
* @return {Promise<Object>}
*/
embeddings = async function ({ model, input }) {
const request = {
model: model,
input: input,
}
const response = await this._request('post', 'v1/embeddings', request)
return response
}
}
export default MistralClient

View File

@@ -9,11 +9,11 @@
"@typebot.io/forge": "workspace:*",
"@typebot.io/lib": "workspace:*",
"@typebot.io/tsconfig": "workspace:*",
"@types/node": "^20.12.4",
"@types/react": "18.2.15",
"typescript": "5.3.2"
},
"dependencies": {
"@mistralai/mistralai": "0.1.3",
"ai": "3.0.12"
}
}

View File

@@ -1,6 +1,6 @@
{
"extends": "@typebot.io/tsconfig/base.json",
"include": ["**/*.ts", "**/*.tsx"],
"include": ["**/*.ts", "**/*.tsx", "helpers/client.ts"],
"exclude": ["node_modules"],
"compilerOptions": {
"lib": ["ESNext", "DOM"],

View File

@@ -6,7 +6,7 @@ import { getChatCompletionStreamVarId } from '@typebot.io/openai-block/shared/ge
import { runChatCompletion } from '@typebot.io/openai-block/shared/runChatCompletion'
import { runChatCompletionStream } from '@typebot.io/openai-block/shared/runChatCompletionStream'
import { defaultOpenRouterOptions } from '../constants'
import { got } from 'got'
import ky from 'ky'
import { ModelsResponse } from '../types'
export const createChatCompletion = createAction({
@@ -42,7 +42,7 @@ export const createChatCompletion = createAction({
id: 'fetchModels',
dependencies: [],
fetch: async () => {
const response = await got
const response = await ky
.get(defaultOpenRouterOptions.baseUrl + '/models')
.json<ModelsResponse>()

View File

@@ -12,6 +12,6 @@
"typescript": "5.3.2",
"@typebot.io/lib": "workspace:*",
"@typebot.io/openai-block": "workspace:*",
"got": "12.6.0"
"ky": "1.2.3"
}
}

View File

@@ -1,4 +1,5 @@
import { createAction, option } from '@typebot.io/forge'
import { toBuffer as generateQrCodeBuffer } from 'qrcode'
import { uploadFileToBucket } from '@typebot.io/lib/s3/uploadFileToBucket'
import { createId } from '@typebot.io/lib/createId'
@@ -28,8 +29,6 @@ export const generateQrCode = createAction({
'QR code image URL is not specified. Please select a variable to save the generated QR code image.'
)
const generateQrCodeBuffer = (await import('qrcode')).toBuffer
const url = await uploadFileToBucket({
file: await generateQrCodeBuffer(options.data),
key: `tmp/qrcodes/${createId() + createId()}.png`,

View File

@@ -1,7 +1,7 @@
import { createAction, option } from '@typebot.io/forge'
import { isDefined } from '@typebot.io/lib'
import { ZemanticAiResponse } from '../types'
import { got } from 'got'
import ky from 'ky'
import { apiBaseUrl } from '../constants'
import { auth } from '../auth'
import { baseOptions } from '../baseOptions'
@@ -63,7 +63,7 @@ export const searchDocuments = createAction({
},
variables,
}) => {
const res: ZemanticAiResponse = await got
const res = await ky
.post(apiBaseUrl, {
headers: {
Authorization: `Bearer ${apiKey}`,
@@ -79,7 +79,7 @@ export const searchDocuments = createAction({
},
},
})
.json()
.json<ZemanticAiResponse>()
responseMapping?.forEach((mapping) => {
if (!mapping.variableId || !mapping.item) return

View File

@@ -8,6 +8,7 @@ export const auth = {
label: 'API key',
isRequired: true,
placeholder: 'ze...',
inputType: 'password',
helperText:
'You can generate an API key [here](https://zemantic.ai/dashboard/settings).',
isDebounceDisabled: true,

View File

@@ -1,6 +1,6 @@
import { createBlock } from '@typebot.io/forge'
import { ZemanticAiLogo } from './logo'
import { got } from 'got'
import ky from 'ky'
import { searchDocuments } from './actions/searchDocuments'
import { auth } from './auth'
import { baseOptions } from './baseOptions'
@@ -19,7 +19,7 @@ export const zemanticAiBlock = createBlock({
fetch: async ({ credentials: { apiKey } }) => {
const url = 'https://api.zemantic.ai/v1/projects'
const response = await got
const response = await ky
.get(url, {
headers: {
Authorization: `Bearer ${apiKey}`,

View File

@@ -11,6 +11,6 @@
"@types/react": "18.2.15",
"typescript": "5.3.2",
"@typebot.io/lib": "workspace:*",
"got": "12.6.0"
"ky": "1.2.3"
}
}