2
0

(openai) Add new models and remove tiktoken

Instead of computing total tokens with tiktoken we just attempt retries after trimming the first message
This commit is contained in:
Baptiste Arnaud
2023-06-16 16:50:23 +02:00
parent e54aab452a
commit 83f2a29faa
11 changed files with 331 additions and 245 deletions

View File

@ -1,5 +1,5 @@
import { DropdownList } from '@/components/DropdownList' import { DropdownList } from '@/components/DropdownList'
import { Textarea } from '@/components/inputs' import { Textarea, TextInput } from '@/components/inputs'
import { VariableSearchInput } from '@/components/inputs/VariableSearchInput' import { VariableSearchInput } from '@/components/inputs/VariableSearchInput'
import { TableListItemProps } from '@/components/TableList' import { TableListItemProps } from '@/components/TableList'
import { Stack } from '@chakra-ui/react' import { Stack } from '@chakra-ui/react'
@ -55,6 +55,11 @@ export const ChatCompletionMessageItem = ({ item, onItemChange }: Props) => {
}) })
} }
const updateName = (name: string) => {
if (item.role === 'Messages sequence ✨') return
onItemChange({ ...item, name })
}
return ( return (
<Stack p="4" rounded="md" flex="1" borderWidth="1px"> <Stack p="4" rounded="md" flex="1" borderWidth="1px">
<DropdownList <DropdownList
@ -77,12 +82,19 @@ export const ChatCompletionMessageItem = ({ item, onItemChange }: Props) => {
/> />
</> </>
) : ( ) : (
<Textarea <>
defaultValue={item.content} <Textarea
onChange={changeSingleMessageContent} defaultValue={item.content}
placeholder="Content" onChange={changeSingleMessageContent}
minH="150px" placeholder="Content"
/> minH="150px"
/>
<TextInput
defaultValue={item.name}
onChange={updateName}
placeholder="Name (Optional)"
/>
</>
)} )}
</Stack> </Stack>
) )

View File

@ -1,8 +1,8 @@
import { DropdownList } from '@/components/DropdownList'
import { TableList } from '@/components/TableList' import { TableList } from '@/components/TableList'
import { import {
chatCompletionModels, chatCompletionModels,
ChatCompletionOpenAIOptions, ChatCompletionOpenAIOptions,
deprecatedCompletionModels,
} from '@typebot.io/schemas/features/blocks/integrations/openai' } from '@typebot.io/schemas/features/blocks/integrations/openai'
import { ChatCompletionMessageItem } from './ChatCompletionMessageItem' import { ChatCompletionMessageItem } from './ChatCompletionMessageItem'
import { import {
@ -17,6 +17,7 @@ import {
import { TextLink } from '@/components/TextLink' import { TextLink } from '@/components/TextLink'
import { ChatCompletionResponseItem } from './ChatCompletionResponseItem' import { ChatCompletionResponseItem } from './ChatCompletionResponseItem'
import { NumberInput } from '@/components/inputs' import { NumberInput } from '@/components/inputs'
import { Select } from '@/components/inputs/Select'
const apiReferenceUrl = const apiReferenceUrl =
'https://platform.openai.com/docs/api-reference/chat/create' 'https://platform.openai.com/docs/api-reference/chat/create'
@ -30,7 +31,11 @@ export const OpenAIChatCompletionSettings = ({
options, options,
onOptionsChange, onOptionsChange,
}: Props) => { }: Props) => {
const updateModel = (model: (typeof chatCompletionModels)[number]) => { const updateModel = (
_: string | undefined,
model: (typeof chatCompletionModels)[number] | undefined
) => {
if (!model) return
onOptionsChange({ onOptionsChange({
...options, ...options,
model, model,
@ -74,10 +79,12 @@ export const OpenAIChatCompletionSettings = ({
</TextLink>{' '} </TextLink>{' '}
to better understand the available options. to better understand the available options.
</Text> </Text>
<DropdownList <Select
currentItem={options.model} selectedItem={options.model}
items={chatCompletionModels} items={chatCompletionModels.filter(
onItemSelect={updateModel} (model) => deprecatedCompletionModels.indexOf(model) === -1
)}
onSelect={updateModel}
/> />
<Accordion allowMultiple> <Accordion allowMultiple>
<AccordionItem> <AccordionItem>

View File

@ -1066,13 +1066,28 @@
"additionalProperties": false "additionalProperties": false
}, },
"min": { "min": {
"type": "number" "anyOf": [
{
"type": "number"
},
{}
]
}, },
"max": { "max": {
"type": "number" "anyOf": [
{
"type": "number"
},
{}
]
}, },
"step": { "step": {
"type": "number" "anyOf": [
{
"type": "number"
},
{}
]
} }
}, },
"required": [ "required": [
@ -1966,6 +1981,7 @@
"Yesterday", "Yesterday",
"Tomorrow", "Tomorrow",
"Random ID", "Random ID",
"Moment of the day",
"Map item with same index" "Map item with same index"
] ]
}, },
@ -2833,12 +2849,17 @@
"model": { "model": {
"type": "string", "type": "string",
"enum": [ "enum": [
"gpt-4",
"gpt-4-0314",
"gpt-4-32k",
"gpt-4-32k-0314",
"gpt-3.5-turbo", "gpt-3.5-turbo",
"gpt-3.5-turbo-0301" "gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-16k-0613",
"gpt-3.5-turbo-0301",
"gpt-4",
"gpt-4-0613",
"gpt-4-32k",
"gpt-4-32k-0613",
"gpt-4-32k-0314",
"gpt-4-0314"
] ]
}, },
"messages": { "messages": {
@ -2861,6 +2882,9 @@
}, },
"content": { "content": {
"type": "string" "type": "string"
},
"name": {
"type": "string"
} }
}, },
"required": [ "required": [

View File

@ -639,13 +639,28 @@
"additionalProperties": false "additionalProperties": false
}, },
"min": { "min": {
"type": "number" "anyOf": [
{
"type": "number"
},
{}
]
}, },
"max": { "max": {
"type": "number" "anyOf": [
{
"type": "number"
},
{}
]
}, },
"step": { "step": {
"type": "number" "anyOf": [
{
"type": "number"
},
{}
]
} }
}, },
"required": [ "required": [
@ -1539,6 +1554,7 @@
"Yesterday", "Yesterday",
"Tomorrow", "Tomorrow",
"Random ID", "Random ID",
"Moment of the day",
"Map item with same index" "Map item with same index"
] ]
}, },
@ -2406,12 +2422,17 @@
"model": { "model": {
"type": "string", "type": "string",
"enum": [ "enum": [
"gpt-4",
"gpt-4-0314",
"gpt-4-32k",
"gpt-4-32k-0314",
"gpt-3.5-turbo", "gpt-3.5-turbo",
"gpt-3.5-turbo-0301" "gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-16k-0613",
"gpt-3.5-turbo-0301",
"gpt-4",
"gpt-4-0613",
"gpt-4-32k",
"gpt-4-32k-0613",
"gpt-4-32k-0314",
"gpt-4-0314"
] ]
}, },
"messages": { "messages": {
@ -2434,6 +2455,9 @@
}, },
"content": { "content": {
"type": "string" "type": "string"
},
"name": {
"type": "string"
} }
}, },
"required": [ "required": [
@ -3862,13 +3886,28 @@
"additionalProperties": false "additionalProperties": false
}, },
"min": { "min": {
"type": "number" "anyOf": [
{
"type": "number"
},
{}
]
}, },
"max": { "max": {
"type": "number" "anyOf": [
{
"type": "number"
},
{}
]
}, },
"step": { "step": {
"type": "number" "anyOf": [
{
"type": "number"
},
{}
]
} }
}, },
"required": [ "required": [

View File

@ -13,7 +13,6 @@
"test:report": "pnpm playwright show-report" "test:report": "pnpm playwright show-report"
}, },
"dependencies": { "dependencies": {
"@dqbd/tiktoken": "^1.0.7",
"@planetscale/database": "^1.7.0", "@planetscale/database": "^1.7.0",
"@sentry/nextjs": "7.53.1", "@sentry/nextjs": "7.53.1",
"@trpc/server": "10.27.3", "@trpc/server": "10.27.3",

View File

@ -1,36 +1,19 @@
import { ExecuteIntegrationResponse } from '@/features/chat/types' import { ExecuteIntegrationResponse } from '@/features/chat/types'
import { transformStringVariablesToList } from '@/features/variables/transformVariablesToList'
import prisma from '@/lib/prisma' import prisma from '@/lib/prisma'
import { import { SessionState } from '@typebot.io/schemas'
ChatReply,
SessionState,
Variable,
VariableWithValue,
} from '@typebot.io/schemas'
import { import {
ChatCompletionOpenAIOptions, ChatCompletionOpenAIOptions,
OpenAICredentials, OpenAICredentials,
modelLimit,
} from '@typebot.io/schemas/features/blocks/integrations/openai' } from '@typebot.io/schemas/features/blocks/integrations/openai'
import type { import { isEmpty } from '@typebot.io/lib'
ChatCompletionRequestMessage,
CreateChatCompletionRequest,
CreateChatCompletionResponse,
} from 'openai'
import { byId, isNotEmpty, isEmpty } from '@typebot.io/lib'
import { decrypt, isCredentialsV2 } from '@typebot.io/lib/api/encryption' import { decrypt, isCredentialsV2 } from '@typebot.io/lib/api/encryption'
import { saveErrorLog } from '@/features/logs/saveErrorLog'
import { updateVariables } from '@/features/variables/updateVariables' import { updateVariables } from '@/features/variables/updateVariables'
import { parseVariables } from '@/features/variables/parseVariables'
import { parseVariableNumber } from '@/features/variables/parseVariableNumber' import { parseVariableNumber } from '@/features/variables/parseVariableNumber'
import { encoding_for_model } from '@dqbd/tiktoken'
import got from 'got'
import { resumeChatCompletion } from './resumeChatCompletion' import { resumeChatCompletion } from './resumeChatCompletion'
import { isPlaneteScale } from '@/helpers/api/isPlanetScale' import { isPlaneteScale } from '@/helpers/api/isPlanetScale'
import { isVercel } from '@/helpers/api/isVercel' import { isVercel } from '@/helpers/api/isVercel'
import { parseChatCompletionMessages } from './parseChatCompletionMessages'
const minTokenCompletion = 200 import { executeChatCompletionOpenAIRequest } from './executeChatCompletionOpenAIRequest'
const createChatEndpoint = 'https://api.openai.com/v1/chat/completions'
export const createChatCompletionOpenAI = async ( export const createChatCompletionOpenAI = async (
state: SessionState, state: SessionState,
@ -63,9 +46,8 @@ export const createChatCompletionOpenAI = async (
credentials.data, credentials.data,
credentials.iv credentials.iv
)) as OpenAICredentials['data'] )) as OpenAICredentials['data']
const { variablesTransformedToList, messages } = parseMessages( const { variablesTransformedToList, messages } = parseChatCompletionMessages(
newSessionState.typebot.variables, newSessionState.typebot.variables
options.model
)(options.messages) )(options.messages)
if (variablesTransformedToList.length > 0) if (variablesTransformedToList.length > 0)
newSessionState = await updateVariables(state)(variablesTransformedToList) newSessionState = await updateVariables(state)(variablesTransformedToList)
@ -74,177 +56,37 @@ export const createChatCompletionOpenAI = async (
options.advancedSettings?.temperature options.advancedSettings?.temperature
) )
try { if (
if ( isPlaneteScale() &&
isPlaneteScale() && isVercel() &&
isVercel() && isCredentialsV2(credentials) &&
isCredentialsV2(credentials) && newSessionState.isStreamEnabled
newSessionState.isStreamEnabled )
)
return {
clientSideActions: [{ streamOpenAiChatCompletion: { messages } }],
outgoingEdgeId,
newSessionState,
}
const response = await got
.post(createChatEndpoint, {
headers: {
Authorization: `Bearer ${apiKey}`,
},
json: {
model: options.model,
messages,
temperature,
} satisfies CreateChatCompletionRequest,
})
.json<CreateChatCompletionResponse>()
const messageContent = response.choices.at(0)?.message?.content
const totalTokens = response.usage?.total_tokens
if (isEmpty(messageContent)) {
console.error('OpenAI block returned empty message', response)
return { outgoingEdgeId, newSessionState }
}
return resumeChatCompletion(newSessionState, {
options,
outgoingEdgeId,
})(messageContent, totalTokens)
} catch (err) {
const log: NonNullable<ChatReply['logs']>[number] = {
status: 'error',
description: 'OpenAI block returned error',
}
if (err && typeof err === 'object') {
if ('response' in err) {
const { status, data } = err.response as {
status: string
data: string
}
log.details = {
status,
data,
}
} else if ('message' in err) {
log.details = err.message
}
}
state.result &&
(await saveErrorLog({
resultId: state.result.id,
message: log.description,
details: log.details,
}))
return { return {
clientSideActions: [{ streamOpenAiChatCompletion: { messages } }],
outgoingEdgeId, outgoingEdgeId,
logs: [log],
newSessionState, newSessionState,
} }
} const { response, logs } = await executeChatCompletionOpenAIRequest({
} apiKey,
messages,
const parseMessages = model: options.model,
(variables: Variable[], model: ChatCompletionOpenAIOptions['model']) => temperature,
( })
messages: ChatCompletionOpenAIOptions['messages'] if (!response)
): {
variablesTransformedToList: VariableWithValue[]
messages: ChatCompletionRequestMessage[]
} => {
const variablesTransformedToList: VariableWithValue[] = []
const firstMessagesSequenceIndex = messages.findIndex(
(message) => message.role === 'Messages sequence ✨'
)
const parsedMessages = messages
.flatMap((message, index) => {
if (!message.role) return
if (message.role === 'Messages sequence ✨') {
if (
!message.content?.assistantMessagesVariableId ||
!message.content?.userMessagesVariableId
)
return
variablesTransformedToList.push(
...transformStringVariablesToList(variables)([
message.content.assistantMessagesVariableId,
message.content.userMessagesVariableId,
])
)
const updatedVariables = variables.map((variable) => {
const variableTransformedToList = variablesTransformedToList.find(
byId(variable.id)
)
if (variableTransformedToList) return variableTransformedToList
return variable
})
const userMessages = (updatedVariables.find(
(variable) =>
variable.id === message.content?.userMessagesVariableId
)?.value ?? []) as string[]
const assistantMessages = (updatedVariables.find(
(variable) =>
variable.id === message.content?.assistantMessagesVariableId
)?.value ?? []) as string[]
let allMessages: ChatCompletionRequestMessage[] = []
if (userMessages.length > assistantMessages.length)
allMessages = userMessages.flatMap((userMessage, index) => [
{
role: 'user',
content: userMessage,
},
{ role: 'assistant', content: assistantMessages.at(index) ?? '' },
]) satisfies ChatCompletionRequestMessage[]
else {
allMessages = assistantMessages.flatMap(
(assistantMessage, index) => [
{ role: 'assistant', content: assistantMessage },
{
role: 'user',
content: userMessages.at(index) ?? '',
},
]
) satisfies ChatCompletionRequestMessage[]
}
if (index !== firstMessagesSequenceIndex) return allMessages
const encoder = encoding_for_model(model)
let messagesToSend: ChatCompletionRequestMessage[] = []
let tokenCount = 0
for (let i = allMessages.length - 1; i >= 0; i--) {
const message = allMessages[i]
const tokens = encoder.encode(message.content)
if (
tokenCount + tokens.length - minTokenCompletion >
modelLimit[model]
) {
break
}
tokenCount += tokens.length
messagesToSend = [message, ...messagesToSend]
}
encoder.free()
return messagesToSend
}
return {
role: message.role,
content: parseVariables(variables)(message.content),
} satisfies ChatCompletionRequestMessage
})
.filter(
(message) => isNotEmpty(message?.role) && isNotEmpty(message?.content)
) as ChatCompletionRequestMessage[]
return { return {
variablesTransformedToList, outgoingEdgeId,
messages: parsedMessages, logs,
} }
const messageContent = response.choices.at(0)?.message?.content
const totalTokens = response.usage?.total_tokens
if (isEmpty(messageContent)) {
console.error('OpenAI block returned empty message', response)
return { outgoingEdgeId, newSessionState }
} }
return resumeChatCompletion(newSessionState, {
options,
outgoingEdgeId,
logs,
})(messageContent, totalTokens)
}

View File

@ -0,0 +1,74 @@
import { ChatReply } from '@typebot.io/schemas'
import got, { HTTPError } from 'got'
import type {
CreateChatCompletionRequest,
CreateChatCompletionResponse,
} from 'openai'
const createChatEndpoint = 'https://api.openai.com/v1/chat/completions'
type Props = Pick<CreateChatCompletionRequest, 'messages' | 'model'> & {
apiKey: string
temperature: number | undefined
currentLogs?: ChatReply['logs']
isRetrying?: boolean
}
export const executeChatCompletionOpenAIRequest = async ({
apiKey,
model,
messages,
temperature,
currentLogs = [],
}: Props): Promise<{
response?: CreateChatCompletionResponse
logs?: ChatReply['logs']
}> => {
const logs: ChatReply['logs'] = currentLogs
if (messages.length === 0) return { logs }
try {
const response = await got
.post(createChatEndpoint, {
headers: {
Authorization: `Bearer ${apiKey}`,
},
json: {
model,
messages,
temperature,
} satisfies CreateChatCompletionRequest,
})
.json<CreateChatCompletionResponse>()
return { response, logs }
} catch (error) {
if (error instanceof HTTPError) {
if (error.response.statusCode === 400) {
const log = {
status: 'info',
description:
'Max tokens limit reached, automatically trimming first message.',
}
logs.push(log)
return executeChatCompletionOpenAIRequest({
apiKey,
model,
messages: messages.slice(1),
temperature,
currentLogs: logs,
})
}
logs.push({
status: 'error',
description: `OpenAI API error - ${error.response.statusCode}`,
details: error.response.body,
})
return { logs }
}
logs.push({
status: 'error',
description: `Internal error`,
})
return { logs }
}
}

View File

@ -0,0 +1,90 @@
import { parseVariables } from '@/features/variables/parseVariables'
import { transformStringVariablesToList } from '@/features/variables/transformVariablesToList'
import { byId, isNotEmpty } from '@typebot.io/lib'
import { Variable, VariableWithValue } from '@typebot.io/schemas'
import { ChatCompletionOpenAIOptions } from '@typebot.io/schemas/features/blocks/integrations/openai'
import type { ChatCompletionRequestMessage } from 'openai'
export const parseChatCompletionMessages =
(variables: Variable[]) =>
(
messages: ChatCompletionOpenAIOptions['messages']
): {
variablesTransformedToList: VariableWithValue[]
messages: ChatCompletionRequestMessage[]
} => {
const variablesTransformedToList: VariableWithValue[] = []
const parsedMessages = messages
.flatMap((message) => {
if (!message.role) return
if (message.role === 'Messages sequence ✨') {
if (
!message.content?.assistantMessagesVariableId ||
!message.content?.userMessagesVariableId
)
return
variablesTransformedToList.push(
...transformStringVariablesToList(variables)([
message.content.assistantMessagesVariableId,
message.content.userMessagesVariableId,
])
)
const updatedVariables = variables.map((variable) => {
const variableTransformedToList = variablesTransformedToList.find(
byId(variable.id)
)
if (variableTransformedToList) return variableTransformedToList
return variable
})
const userMessages = (updatedVariables.find(
(variable) =>
variable.id === message.content?.userMessagesVariableId
)?.value ?? []) as string[]
const assistantMessages = (updatedVariables.find(
(variable) =>
variable.id === message.content?.assistantMessagesVariableId
)?.value ?? []) as string[]
let allMessages: ChatCompletionRequestMessage[] = []
if (userMessages.length > assistantMessages.length)
allMessages = userMessages.flatMap((userMessage, index) => [
{
role: 'user',
content: userMessage,
},
{ role: 'assistant', content: assistantMessages.at(index) ?? '' },
]) satisfies ChatCompletionRequestMessage[]
else {
allMessages = assistantMessages.flatMap(
(assistantMessage, index) => [
{ role: 'assistant', content: assistantMessage },
{
role: 'user',
content: userMessages.at(index) ?? '',
},
]
) satisfies ChatCompletionRequestMessage[]
}
return allMessages
}
return {
role: message.role,
content: parseVariables(variables)(message.content),
name: message.name
? parseVariables(variables)(message.name)
: undefined,
} satisfies ChatCompletionRequestMessage
})
.filter(
(message) => isNotEmpty(message?.role) && isNotEmpty(message?.content)
) as ChatCompletionRequestMessage[]
return {
variablesTransformedToList,
messages: parsedMessages,
}
}

View File

@ -1,7 +1,7 @@
import { saveSuccessLog } from '@/features/logs/saveSuccessLog' import { saveSuccessLog } from '@/features/logs/saveSuccessLog'
import { updateVariables } from '@/features/variables/updateVariables' import { updateVariables } from '@/features/variables/updateVariables'
import { byId, isDefined } from '@typebot.io/lib' import { byId, isDefined } from '@typebot.io/lib'
import { SessionState } from '@typebot.io/schemas' import { ChatReply, SessionState } from '@typebot.io/schemas'
import { ChatCompletionOpenAIOptions } from '@typebot.io/schemas/features/blocks/integrations/openai' import { ChatCompletionOpenAIOptions } from '@typebot.io/schemas/features/blocks/integrations/openai'
import { VariableWithUnknowValue } from '@typebot.io/schemas/features/typebot/variable' import { VariableWithUnknowValue } from '@typebot.io/schemas/features/typebot/variable'
@ -11,7 +11,12 @@ export const resumeChatCompletion =
{ {
outgoingEdgeId, outgoingEdgeId,
options, options,
}: { outgoingEdgeId?: string; options: ChatCompletionOpenAIOptions } logs,
}: {
outgoingEdgeId?: string
options: ChatCompletionOpenAIOptions
logs?: ChatReply['logs']
}
) => ) =>
async (message: string, totalTokens?: number) => { async (message: string, totalTokens?: number) => {
let newSessionState = state let newSessionState = state
@ -48,5 +53,6 @@ export const resumeChatCompletion =
return { return {
outgoingEdgeId, outgoingEdgeId,
newSessionState, newSessionState,
logs,
} }
} }

View File

@ -6,22 +6,21 @@ import { IntegrationBlockType } from './enums'
export const openAITasks = ['Create chat completion', 'Create image'] as const export const openAITasks = ['Create chat completion', 'Create image'] as const
export const chatCompletionModels = [ export const chatCompletionModels = [
'gpt-4',
'gpt-4-0314',
'gpt-4-32k',
'gpt-4-32k-0314',
'gpt-3.5-turbo', 'gpt-3.5-turbo',
'gpt-3.5-turbo-0613',
'gpt-3.5-turbo-16k',
'gpt-3.5-turbo-16k-0613',
'gpt-3.5-turbo-0301', 'gpt-3.5-turbo-0301',
'gpt-4',
'gpt-4-0613',
'gpt-4-32k',
'gpt-4-32k-0613',
'gpt-4-32k-0314',
'gpt-4-0314',
] as const ] as const
export const modelLimit = { export const deprecatedCompletionModels: (typeof chatCompletionModels)[number][] =
'gpt-3.5-turbo': 4096, ['gpt-3.5-turbo-0301', 'gpt-4-32k-0314', 'gpt-4-0314']
'gpt-3.5-turbo-0301': 4096,
'gpt-4': 8192,
'gpt-4-0314': 8192,
'gpt-4-32k': 32768,
'gpt-4-32k-0314': 32768,
} as const
export const chatCompletionMessageRoles = [ export const chatCompletionMessageRoles = [
'system', 'system',
@ -52,6 +51,7 @@ export const chatCompletionMessageSchema = z.object({
id: z.string(), id: z.string(),
role: z.enum(chatCompletionMessageRoles).optional(), role: z.enum(chatCompletionMessageRoles).optional(),
content: z.string().optional(), content: z.string().optional(),
name: z.string().optional(),
}) })
const chatCompletionCustomMessageSchema = z.object({ const chatCompletionCustomMessageSchema = z.object({

7
pnpm-lock.yaml generated
View File

@ -493,9 +493,6 @@ importers:
apps/viewer: apps/viewer:
dependencies: dependencies:
'@dqbd/tiktoken':
specifier: ^1.0.7
version: 1.0.7
'@planetscale/database': '@planetscale/database':
specifier: ^1.7.0 specifier: ^1.7.0
version: 1.7.0 version: 1.7.0
@ -4937,10 +4934,6 @@ packages:
- webpack-cli - webpack-cli
dev: false dev: false
/@dqbd/tiktoken@1.0.7:
resolution: {integrity: sha512-bhR5k5W+8GLzysjk8zTMVygQZsgvf7W1F0IlL4ZQ5ugjo5rCyiwGM5d8DYriXspytfu98tv59niang3/T+FoDw==}
dev: false
/@emotion/babel-plugin@11.11.0: /@emotion/babel-plugin@11.11.0:
resolution: {integrity: sha512-m4HEDZleaaCH+XgDDsPF15Ht6wTLsgDTeR3WYj9Q/k76JtWhrJjcP4+/XlG8LGT/Rol9qUfOIztXeA84ATpqPQ==} resolution: {integrity: sha512-m4HEDZleaaCH+XgDDsPF15Ht6wTLsgDTeR3WYj9Q/k76JtWhrJjcP4+/XlG8LGT/Rol9qUfOIztXeA84ATpqPQ==}
dependencies: dependencies: