2
0
Files
bot/packages/forge/blocks/mistral/actions/createChatCompletion.ts
Stergios ecec7023b9 Add Anthropic block (#1336)
Hello @baptisteArno,

As we discussed in issue #1315 we created a basic implementation of
Anthropic’s Claude AI block.
This block is based on the OpenAI block and shares a similar structure.

The most notable changes in this PR are:
- Added the Claude AI block.
- Added relevant documentation for the new block.
- Formatted some other source files in order to pass git pre-hook
checks.

Some notes to be made:
- Currently there is no way to dynamically fetch the model’s versions
since there is no endpoint provided by the SDK.
  - All pre version-3 Claude models are hard-coded constant variables.
- We have opened an issue for that on the SDK repository
[here](https://github.com/anthropics/anthropic-sdk-typescript/issues/313).
- We can implement in a new PR Claude’s new [Vision
system](https://docs.anthropic.com/claude/docs/vision) which allows for
image analysis and understanding.
  - This can be done in a later phase, given that you agree of course.


<!-- This is an auto-generated comment: release notes by coderabbit.ai
-->

## Summary by CodeRabbit

- **New Features**
- Introduced the Anthropic block for creating chat messages with Claude
AI in Typebot.
- Added functionality to create chat messages using Anthropic AI SDK
with configurable options.
	- Implemented encrypted credentials for Anthropic account integration.
- Added constants and helpers for better handling of chat messages with
Anthropic models.
- Included Anthropic block in the list of enabled and forged blocks for
broader access.

<!-- end of auto-generated comment: release notes by coderabbit.ai -->

---------

Co-authored-by: Retr0-01 <contact@retr0.dev>
Co-authored-by: Baptiste Arnaud <baptiste.arnaud95@gmail.com>
Co-authored-by: Baptiste Arnaud <contact@baptiste-arnaud.fr>
2024-03-12 17:53:33 +00:00

136 lines
3.6 KiB
TypeScript

import { option, createAction } from '@typebot.io/forge'
import { isDefined } from '@typebot.io/lib'
import { auth } from '../auth'
import MistralClient from '@mistralai/mistralai'
import { parseMessages } from '../helpers/parseMessages'
import { OpenAIStream } from 'ai'
const nativeMessageContentSchema = {
content: option.string.layout({
inputType: 'textarea',
placeholder: 'Content',
}),
}
const systemMessageItemSchema = option
.object({
role: option.literal('system'),
})
.extend(nativeMessageContentSchema)
const userMessageItemSchema = option
.object({
role: option.literal('user'),
})
.extend(nativeMessageContentSchema)
const assistantMessageItemSchema = option
.object({
role: option.literal('assistant'),
})
.extend(nativeMessageContentSchema)
const dialogueMessageItemSchema = option.object({
role: option.literal('Dialogue'),
dialogueVariableId: option.string.layout({
inputType: 'variableDropdown',
placeholder: 'Dialogue variable',
}),
startsBy: option.enum(['user', 'assistant']).layout({
label: 'starts by',
direction: 'row',
defaultValue: 'user',
}),
})
export const options = option.object({
model: option.string.layout({
placeholder: 'Select a model',
fetcher: 'fetchModels',
}),
messages: option
.array(
option.discriminatedUnion('role', [
systemMessageItemSchema,
userMessageItemSchema,
assistantMessageItemSchema,
dialogueMessageItemSchema,
])
)
.layout({ accordion: 'Messages', itemLabel: 'message', isOrdered: true }),
responseMapping: option.saveResponseArray(['Message content']).layout({
accordion: 'Save response',
}),
})
export const createChatCompletion = createAction({
name: 'Create chat completion',
auth,
options,
turnableInto: [
{
blockType: 'openai',
},
{
blockType: 'together-ai',
},
{ blockType: 'open-router' },
{ blockType: 'anthropic' },
],
getSetVariableIds: (options) =>
options.responseMapping?.map((res) => res.variableId).filter(isDefined) ??
[],
fetchers: [
{
id: 'fetchModels',
dependencies: [],
fetch: async ({ credentials }) => {
const client = new MistralClient(credentials.apiKey)
const listModelsResponse = await client.listModels()
return (
listModelsResponse.data
.sort((a, b) => b.created - a.created)
.map((model) => model.id) ?? []
)
},
},
],
run: {
server: async ({ credentials: { apiKey }, options, variables, logs }) => {
if (!options.model) return logs.add('No model selected')
const client = new MistralClient(apiKey)
const response = await client.chat({
model: options.model,
messages: parseMessages({ options, variables }),
})
options.responseMapping?.forEach((mapping) => {
if (!mapping.variableId) return
if (!mapping.item || mapping.item === 'Message content')
variables.set(mapping.variableId, response.choices[0].message.content)
})
},
stream: {
getStreamVariableId: (options) =>
options.responseMapping?.find(
(res) => res.item === 'Message content' || !res.item
)?.variableId,
run: async ({ credentials: { apiKey }, options, variables }) => {
if (!options.model) return
const client = new MistralClient(apiKey)
const response = client.chatStream({
model: options.model,
messages: parseMessages({ options, variables }),
})
// @ts-ignore https://github.com/vercel/ai/issues/936
return OpenAIStream(response)
},
},
},
})