2
0

Add audio clips option on text input block

Closes #157
This commit is contained in:
Baptiste Arnaud
2024-08-20 14:35:20 +02:00
parent 984c2bf387
commit 135251d3f7
55 changed files with 1535 additions and 366 deletions

View File

@ -63,20 +63,15 @@ export const continueBotFlow = async (
setVariableHistory: SetVariableHistoryItem[]
}
> => {
let firstBubbleWasStreamed = false
let newSessionState = { ...state }
const visitedEdges: VisitedEdge[] = []
const setVariableHistory: SetVariableHistoryItem[] = []
if (!newSessionState.currentBlockId)
if (!state.currentBlockId)
return startBotFlow({
state: resetSessionState(newSessionState),
state: resetSessionState(state),
version,
textBubbleContentFormat,
})
const { block, group, blockIndex } = getBlockById(
newSessionState.currentBlockId,
state.currentBlockId,
state.typebotsQueue[0].typebot.groups
)
@ -86,7 +81,138 @@ export const continueBotFlow = async (
message: 'Group / block not found',
})
const nonInputProcessResult = await processNonInputBlock({
block,
state,
reply,
})
let newSessionState = nonInputProcessResult.newSessionState
const { setVariableHistory, firstBubbleWasStreamed } = nonInputProcessResult
let formattedReply: string | undefined
if (isInputBlock(block)) {
const parsedReplyResult = await parseReply(newSessionState)(reply, block)
if (parsedReplyResult.status === 'fail')
return {
...(await parseRetryMessage(newSessionState)(
block,
textBubbleContentFormat
)),
newSessionState,
visitedEdges: [],
setVariableHistory: [],
}
formattedReply =
'reply' in parsedReplyResult ? parsedReplyResult.reply : undefined
newSessionState = await processAndSaveAnswer(
state,
block
)(
isDefined(formattedReply)
? { ...reply, type: 'text', text: formattedReply }
: undefined
)
}
const groupHasMoreBlocks = blockIndex < group.blocks.length - 1
const { edgeId: nextEdgeId, isOffDefaultPath } = getOutgoingEdgeId(
newSessionState
)(block, formattedReply)
const lastMessageNewFormat =
reply?.type === 'text' && formattedReply !== reply?.text
? formattedReply
: undefined
if (groupHasMoreBlocks && !nextEdgeId) {
const chatReply = await executeGroup(
{
...group,
blocks: group.blocks.slice(blockIndex + 1),
} as Group,
{
version,
state: newSessionState,
visitedEdges: [],
setVariableHistory,
firstBubbleWasStreamed,
startTime,
textBubbleContentFormat,
}
)
return {
...chatReply,
lastMessageNewFormat,
}
}
if (!nextEdgeId && state.typebotsQueue.length === 1)
return {
messages: [],
newSessionState,
lastMessageNewFormat,
visitedEdges: [],
setVariableHistory,
}
const nextGroup = await getNextGroup({
state: newSessionState,
edgeId: nextEdgeId,
isOffDefaultPath,
})
newSessionState = nextGroup.newSessionState
if (!nextGroup.group)
return {
messages: [],
newSessionState,
lastMessageNewFormat,
visitedEdges: nextGroup.visitedEdge ? [nextGroup.visitedEdge] : [],
setVariableHistory,
}
const chatReply = await executeGroup(nextGroup.group, {
version,
state: newSessionState,
firstBubbleWasStreamed,
visitedEdges: nextGroup.visitedEdge ? [nextGroup.visitedEdge] : [],
setVariableHistory,
startTime,
textBubbleContentFormat,
})
return {
...chatReply,
lastMessageNewFormat,
}
}
const processNonInputBlock = async ({
block,
state,
reply,
}: {
block: Block
state: SessionState
reply: Reply
}) => {
if (reply?.type !== 'text')
return {
newSessionState: state,
setVariableHistory: [],
firstBubbleWasStreamed: false,
}
const setVariableHistory: SetVariableHistoryItem[] = []
let variableToUpdate: Variable | undefined
let newSessionState = state
let firstBubbleWasStreamed = false
if (block.type === LogicBlockType.SET_VARIABLE) {
const existingVariable = state.typebotsQueue[0].typebot.variables.find(
@ -169,107 +295,10 @@ export const continueBotFlow = async (
setVariableHistory.push(...newSetVariableHistory)
}
let formattedReply: string | undefined
if (isInputBlock(block)) {
const parsedReplyResult = await parseReply(newSessionState)(reply, block)
if (parsedReplyResult.status === 'fail')
return {
...(await parseRetryMessage(newSessionState)(
block,
textBubbleContentFormat
)),
newSessionState,
visitedEdges: [],
setVariableHistory: [],
}
formattedReply =
'reply' in parsedReplyResult ? parsedReplyResult.reply : undefined
newSessionState = await processAndSaveAnswer(
state,
block
)(
isDefined(formattedReply)
? { ...reply, type: 'text', text: formattedReply }
: undefined
)
}
const groupHasMoreBlocks = blockIndex < group.blocks.length - 1
const { edgeId: nextEdgeId, isOffDefaultPath } = getOutgoingEdgeId(
newSessionState
)(block, formattedReply)
if (groupHasMoreBlocks && !nextEdgeId) {
const chatReply = await executeGroup(
{
...group,
blocks: group.blocks.slice(blockIndex + 1),
} as Group,
{
version,
state: newSessionState,
visitedEdges,
setVariableHistory,
firstBubbleWasStreamed,
startTime,
textBubbleContentFormat,
}
)
return {
...chatReply,
lastMessageNewFormat:
formattedReply !== reply?.text ? formattedReply : undefined,
}
}
if (!nextEdgeId && state.typebotsQueue.length === 1)
return {
messages: [],
newSessionState,
lastMessageNewFormat:
formattedReply !== reply?.text ? formattedReply : undefined,
visitedEdges,
setVariableHistory,
}
const nextGroup = await getNextGroup({
state: newSessionState,
edgeId: nextEdgeId,
isOffDefaultPath,
})
if (nextGroup.visitedEdge) visitedEdges.push(nextGroup.visitedEdge)
newSessionState = nextGroup.newSessionState
if (!nextGroup.group)
return {
messages: [],
newSessionState,
lastMessageNewFormat:
formattedReply !== reply ? formattedReply : undefined,
visitedEdges,
setVariableHistory,
}
const chatReply = await executeGroup(nextGroup.group, {
version,
state: newSessionState,
firstBubbleWasStreamed,
visitedEdges,
setVariableHistory,
startTime,
textBubbleContentFormat,
})
return {
...chatReply,
lastMessageNewFormat:
formattedReply !== reply?.text ? formattedReply : undefined,
newSessionState,
setVariableHistory,
firstBubbleWasStreamed,
}
}
@ -284,7 +313,8 @@ const saveVariablesValueIfAny =
(state: SessionState, block: InputBlock) =>
(reply: Message): SessionState => {
if (!block.options?.variableId) return state
const newSessionState = saveAttachmentsVarIfAny({ block, reply, state })
let newSessionState = saveAttachmentsVarIfAny({ block, reply, state })
newSessionState = saveAudioClipVarIfAny({ block, reply, state })
return saveInputVarIfAny({ block, reply, state: newSessionState })
}
@ -298,6 +328,7 @@ const saveAttachmentsVarIfAny = ({
state: SessionState
}): SessionState => {
if (
reply.type !== 'text' ||
block.type !== InputBlockType.TEXT ||
!block.options?.attachments?.isEnabled ||
!block.options?.attachments?.saveVariableId ||
@ -330,6 +361,44 @@ const saveAttachmentsVarIfAny = ({
return updatedState
}
const saveAudioClipVarIfAny = ({
block,
reply,
state,
}: {
block: InputBlock
reply: Message
state: SessionState
}): SessionState => {
if (
reply.type !== 'audio' ||
block.type !== InputBlockType.TEXT ||
!block.options?.audioClip?.isEnabled ||
!block.options?.audioClip?.saveVariableId
)
return state
const variable = state.typebotsQueue[0].typebot.variables.find(
(variable) => variable.id === block.options?.attachments?.saveVariableId
)
if (!variable) return state
const { updatedState } = updateVariablesInSession({
newVariables: [
{
id: variable.id,
name: variable.name,
value: reply.url,
},
],
currentBlockId: undefined,
state,
})
return updatedState
}
const saveInputVarIfAny = ({
block,
reply,
@ -339,6 +408,8 @@ const saveInputVarIfAny = ({
reply: Message
state: SessionState
}): SessionState => {
if (reply.type !== 'text') return state
const foundVariable = state.typebotsQueue[0].typebot.variables.find(
(variable) => variable.id === block.options?.variableId
)
@ -411,11 +482,14 @@ const saveAnswerInDb =
(state: SessionState, block: InputBlock) =>
async (reply: Message): Promise<SessionState> => {
let newSessionState = state
const replyContent = reply.type === 'audio' ? reply.url : reply.text
const attachedFileUrls =
reply.type === 'text' ? reply.attachedFileUrls : undefined
await saveAnswer({
answer: {
blockId: block.id,
content: reply.text,
attachedFileUrls: reply.attachedFileUrls,
content: replyContent,
attachedFileUrls,
},
state,
})
@ -428,8 +502,8 @@ const saveAnswerInDb =
...newSessionState.previewMetadata,
answers: (newSessionState.previewMetadata?.answers ?? []).concat({
blockId: block.id,
content: reply.text,
attachedFileUrls: reply.attachedFileUrls,
content: replyContent,
attachedFileUrls,
}),
},
}
@ -443,9 +517,9 @@ const saveAnswerInDb =
return setNewAnswerInState(newSessionState)({
key: key ?? block.id,
value:
(reply.attachedFileUrls ?? []).length > 0
? `${reply.attachedFileUrls!.join(', ')}\n\n${reply.text}`
: reply.text,
(attachedFileUrls ?? []).length > 0
? `${attachedFileUrls!.join(', ')}\n\n${replyContent}`
: replyContent,
})
}
@ -534,13 +608,13 @@ const parseReply =
async (reply: Reply, block: InputBlock): Promise<ParsedReply> => {
switch (block.type) {
case InputBlockType.EMAIL: {
if (!reply) return { status: 'fail' }
if (!reply || reply.type !== 'text') return { status: 'fail' }
const formattedEmail = formatEmail(reply.text)
if (!formattedEmail) return { status: 'fail' }
return { status: 'success', reply: formattedEmail }
}
case InputBlockType.PHONE: {
if (!reply) return { status: 'fail' }
if (!reply || reply.type !== 'text') return { status: 'fail' }
const formattedPhone = formatPhoneNumber(
reply.text,
block.options?.defaultCountryCode
@ -549,17 +623,17 @@ const parseReply =
return { status: 'success', reply: formattedPhone }
}
case InputBlockType.URL: {
if (!reply) return { status: 'fail' }
if (!reply || reply.type !== 'text') return { status: 'fail' }
const isValid = isURL(reply.text, { require_protocol: false })
if (!isValid) return { status: 'fail' }
return { status: 'success', reply: reply.text }
}
case InputBlockType.CHOICE: {
if (!reply) return { status: 'fail' }
if (!reply || reply.type !== 'text') return { status: 'fail' }
return parseButtonsReply(state)(reply.text, block)
}
case InputBlockType.NUMBER: {
if (!reply) return { status: 'fail' }
if (!reply || reply.type !== 'text') return { status: 'fail' }
const isValid = validateNumber(reply.text, {
options: block.options,
variables: state.typebotsQueue[0].typebot.variables,
@ -568,7 +642,7 @@ const parseReply =
return { status: 'success', reply: parseNumber(reply.text) }
}
case InputBlockType.DATE: {
if (!reply) return { status: 'fail' }
if (!reply || reply.type !== 'text') return { status: 'fail' }
return parseDateReply(reply.text, block)
}
case InputBlockType.FILE: {
@ -576,34 +650,38 @@ const parseReply =
return block.options?.isRequired ?? defaultFileInputOptions.isRequired
? { status: 'fail' }
: { status: 'skip' }
const urls = reply.text.split(', ')
const replyValue = reply.type === 'audio' ? reply.url : reply.text
const urls = replyValue.split(', ')
const status = urls.some((url) =>
isURL(url, { require_tld: env.S3_ENDPOINT !== 'localhost' })
)
? 'success'
: 'fail'
if (!block.options?.isMultipleAllowed && urls.length > 1)
return { status, reply: reply.text.split(',')[0] }
return { status, reply: reply.text }
return { status, reply: replyValue.split(',')[0] }
return { status, reply: replyValue }
}
case InputBlockType.PAYMENT: {
if (!reply) return { status: 'fail' }
if (!reply || reply.type !== 'text') return { status: 'fail' }
if (reply.text === 'fail') return { status: 'fail' }
return { status: 'success', reply: reply.text }
}
case InputBlockType.RATING: {
if (!reply) return { status: 'fail' }
if (!reply || reply.type !== 'text') return { status: 'fail' }
const isValid = validateRatingReply(reply.text, block)
if (!isValid) return { status: 'fail' }
return { status: 'success', reply: reply.text }
}
case InputBlockType.PICTURE_CHOICE: {
if (!reply) return { status: 'fail' }
if (!reply || reply.type !== 'text') return { status: 'fail' }
return parsePictureChoicesReply(state)(reply.text, block)
}
case InputBlockType.TEXT: {
if (!reply) return { status: 'fail' }
return { status: 'success', reply: reply.text }
return {
status: 'success',
reply: reply.type === 'audio' ? reply.url : reply.text,
}
}
}
}

View File

@ -232,7 +232,11 @@ const getIncomingMessageContent = async ({
if (message.type === 'document') mediaId = message.document.id
if (!mediaId) return
const fileVisibility =
block?.type === InputBlockType.FILE
block?.type === InputBlockType.TEXT &&
block.options?.audioClip?.isEnabled &&
message.type === 'audio'
? block.options?.audioClip.visibility
: block?.type === InputBlockType.FILE
? block.options?.visibility
: block?.type === InputBlockType.TEXT
? block.options?.attachments?.visibility
@ -259,6 +263,11 @@ const getIncomingMessageContent = async ({
})
fileUrl = url
}
if (message.type === 'audio')
return {
type: 'audio',
url: fileUrl,
}
if (block?.type === InputBlockType.FILE) {
if (text !== '') text += `, ${fileUrl}`
else text = fileUrl

View File

@ -112,7 +112,7 @@ export const messageMatchStartCondition = (
startCondition: NonNullable<Settings['whatsApp']>['startCondition']
) => {
if (!startCondition) return true
if (!message?.text) return false
if (message?.type !== 'text' || !message.text) return false
return startCondition.logicalOperator === LogicalOperator.AND
? startCondition.comparisons.every((comparison) =>
matchComparison(

View File

@ -1,6 +1,6 @@
{
"name": "@typebot.io/js",
"version": "0.3.8",
"version": "0.3.9",
"description": "Javascript library to display typebots on your website",
"type": "module",
"main": "dist/index.js",

View File

@ -21,6 +21,7 @@ const indexConfig = {
output: {
file: 'dist/index.js',
format: 'es',
sourcemap: true,
},
onwarn,
watch: {
@ -63,6 +64,7 @@ const configs = [
output: {
file: 'dist/web.js',
format: 'es',
sourcemap: true,
},
},
]

View File

@ -456,6 +456,27 @@ select option {
transition: width 0.25s ease;
}
.typebot-recorder .left-gradient {
background-image: linear-gradient(
to right,
rgba(var(--typebot-input-bg-rgb), 1),
rgba(var(--typebot-input-bg-rgb), 0)
);
}
.typebot-recorder .right-gradient {
background-image: linear-gradient(
to left,
rgba(var(--typebot-input-bg-rgb), 1),
rgba(var(--typebot-input-bg-rgb), 0)
);
}
.typebot-recorder button {
color: rgba(var(--typebot-button-bg-rgb));
background-color: rgba(var(--typebot-button-bg-rgb), 0.3);
}
@keyframes fadeInFromTop {
0% {
opacity: 0;

View File

@ -38,6 +38,7 @@ import { CorsError } from '@/utils/CorsError'
import { Toaster, Toast } from '@ark-ui/solid'
import { CloseIcon } from './icons/CloseIcon'
import { toaster } from '@/utils/toaster'
import { setBotContainer } from '@/utils/botContainerSignal'
export type BotProps = {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
@ -285,16 +286,18 @@ const BotContent = (props: BotContentProps) => {
key: `typebot-${props.context.typebot.id}-progressValue`,
}
)
let botContainer: HTMLDivElement | undefined
let botContainerElement: HTMLDivElement | undefined
const resizeObserver = new ResizeObserver((entries) => {
return setIsMobile(entries[0].target.clientWidth < 400)
})
onMount(() => {
if (!botContainer) return
resizeObserver.observe(botContainer)
setBotContainerHeight(`${botContainer.clientHeight}px`)
if (!botContainerElement) return
console.log('yes')
setBotContainer(botContainerElement)
resizeObserver.observe(botContainerElement)
setBotContainerHeight(`${botContainerElement.clientHeight}px`)
})
createEffect(() => {
@ -304,22 +307,22 @@ const BotContent = (props: BotContentProps) => {
family: defaultFontFamily,
}
)
if (!botContainer) return
if (!botContainerElement) return
setCssVariablesValue(
props.initialChatReply.typebot.theme,
botContainer,
botContainerElement,
props.context.isPreview
)
})
onCleanup(() => {
if (!botContainer) return
resizeObserver.unobserve(botContainer)
if (!botContainerElement) return
resizeObserver.unobserve(botContainerElement)
})
return (
<div
ref={botContainer}
ref={botContainerElement}
class={clsx(
'relative flex w-full h-full text-base overflow-hidden flex-col justify-center items-center typebot-container',
props.class
@ -358,7 +361,7 @@ const BotContent = (props: BotContentProps) => {
props.initialChatReply.typebot.settings.general?.isBrandingEnabled
}
>
<LiteBadge botContainer={botContainer} />
<LiteBadge botContainer={botContainerElement} />
</Show>
<Toaster toaster={toaster}>
{(toast) => (

View File

@ -1,4 +1,8 @@
import { Answer, BotContext, ChatChunk as ChatChunkType } from '@/types'
import {
InputSubmitContent,
BotContext,
ChatChunk as ChatChunkType,
} from '@/types'
import { isMobile } from '@/utils/isMobileSignal'
import { ContinueChatResponse, Settings, Theme } from '@typebot.io/schemas'
import { createSignal, For, onMount, Show } from 'solid-js'
@ -23,7 +27,7 @@ type Props = Pick<ContinueChatResponse, 'messages' | 'input'> & {
isTransitionDisabled?: boolean
onNewBubbleDisplayed: (blockId: string) => Promise<void>
onScrollToBottom: (ref?: HTMLDivElement, offset?: number) => void
onSubmit: (answer?: string, attachments?: Answer['attachments']) => void
onSubmit: (answer?: InputSubmitContent) => void
onSkip: () => void
onAllBubblesDisplayed: () => void
}

View File

@ -4,6 +4,7 @@ import {
Theme,
ChatLog,
StartChatResponse,
Message,
} from '@typebot.io/schemas'
import {
createEffect,
@ -16,9 +17,9 @@ import {
import { continueChatQuery } from '@/queries/continueChatQuery'
import { ChatChunk } from './ChatChunk'
import {
Answer,
BotContext,
ChatChunk as ChatChunkType,
InputSubmitContent,
OutgoingLog,
} from '@/types'
import { isNotDefined } from '@typebot.io/lib'
@ -33,6 +34,7 @@ import {
import { saveClientLogsQuery } from '@/queries/saveClientLogsQuery'
import { HTTPError } from 'ky'
import { persist } from '@/utils/persist'
import { getAnswerContent } from '@/utils/getAnswerContent'
const autoScrollBottomToleranceScreenPercent = 0.6
const bottomSpacerHeight = 128
@ -142,15 +144,15 @@ export const ConversationContainer = (props: Props) => {
})
}
const sendMessage = async (
message?: string,
attachments?: Answer['attachments']
) => {
const sendMessage = async (answer?: InputSubmitContent) => {
setIsRecovered(false)
setHasError(false)
const currentInputBlock = [...chatChunks()].pop()?.input
if (currentInputBlock?.id && props.onAnswer && message)
props.onAnswer({ message, blockId: currentInputBlock.id })
if (currentInputBlock?.id && props.onAnswer && answer)
props.onAnswer({
message: getAnswerContent(answer),
blockId: currentInputBlock.id,
})
const longRequest = setTimeout(() => {
setIsSending(true)
}, 1000)
@ -158,13 +160,7 @@ export const ConversationContainer = (props: Props) => {
const { data, error } = await continueChatQuery({
apiHost: props.context.apiHost,
sessionId: props.initialChatReply.sessionId,
message: message
? {
type: 'text',
text: message,
attachedFileUrls: attachments?.map((attachment) => attachment.url),
}
: undefined,
message: convertSubmitContentToMessage(answer),
})
clearTimeout(longRequest)
setIsSending(false)
@ -294,7 +290,11 @@ export const ConversationContainer = (props: Props) => {
if (response && 'logs' in response) saveLogs(response.logs)
if (response && 'replyToSend' in response) {
setIsSending(false)
sendMessage(response.replyToSend)
sendMessage(
response.replyToSend
? { type: 'text', value: response.replyToSend }
: undefined
)
return
}
if (response && 'blockedPopupUrl' in response)
@ -364,3 +364,16 @@ const BottomSpacer = () => (
style={{ height: bottomSpacerHeight + 'px' }}
/>
)
const convertSubmitContentToMessage = (
answer: InputSubmitContent | undefined
): Message | undefined => {
if (!answer) return
if (answer.type === 'text')
return {
type: 'text',
text: answer.value,
attachedFileUrls: answer.attachments?.map((attachment) => attachment.url),
}
if (answer.type === 'recording') return { type: 'audio', url: answer.url }
}

View File

@ -15,7 +15,7 @@ import type {
DateInputBlock,
} from '@typebot.io/schemas'
import { GuestBubble } from './bubbles/GuestBubble'
import { Answer, BotContext, InputSubmitContent } from '@/types'
import { BotContext, InputSubmitContent } from '@/types'
import { TextInput } from '@/features/blocks/inputs/textInput'
import { NumberInput } from '@/features/blocks/inputs/number'
import { EmailInput } from '@/features/blocks/inputs/email'
@ -48,33 +48,24 @@ type Props = {
isInputPrefillEnabled: boolean
hasError: boolean
onTransitionEnd: () => void
onSubmit: (answer: string, attachments?: Answer['attachments']) => void
onSubmit: (content: InputSubmitContent) => void
onSkip: () => void
}
export const InputChatBlock = (props: Props) => {
const [answer, setAnswer] = persist(createSignal<Answer>(), {
const [answer, setAnswer] = persist(createSignal<InputSubmitContent>(), {
key: `typebot-${props.context.typebot.id}-input-${props.chunkIndex}`,
storage: props.context.storage,
})
const handleSubmit = async ({
label,
value,
attachments,
}: InputSubmitContent & Pick<Answer, 'attachments'>) => {
setAnswer({
text: props.block.type !== InputBlockType.FILE ? label ?? value : '',
attachments,
})
props.onSubmit(
value ?? label,
props.block.type === InputBlockType.FILE ? undefined : attachments
)
const handleSubmit = async (content: InputSubmitContent) => {
console.log(content)
setAnswer(content)
props.onSubmit(content)
}
const handleSkip = (label: string) => {
setAnswer({ text: label })
setAnswer({ type: 'text', value: label })
props.onSkip()
}
@ -83,14 +74,18 @@ export const InputChatBlock = (props: Props) => {
(message) => props.chunkIndex === message.inputIndex
)?.formattedMessage
if (formattedMessage && props.block.type !== InputBlockType.FILE)
setAnswer((answer) => ({ ...answer, text: formattedMessage }))
setAnswer((answer) =>
answer?.type === 'text'
? { ...answer, label: formattedMessage }
: answer
)
})
return (
<Switch>
<Match when={answer() && !props.hasError}>
<GuestBubble
message={answer() as Answer}
answer={answer()}
showAvatar={
props.guestAvatar?.isEnabled ?? defaultGuestAvatarIsEnabled
}
@ -117,7 +112,9 @@ export const InputChatBlock = (props: Props) => {
block={props.block}
chunkIndex={props.chunkIndex}
isInputPrefillEnabled={props.isInputPrefillEnabled}
existingAnswer={props.hasError ? answer()?.text : undefined}
existingAnswer={
props.hasError ? getAnswerValue(answer()!) : undefined
}
onTransitionEnd={props.onTransitionEnd}
onSubmit={handleSubmit}
onSkip={handleSkip}
@ -128,6 +125,11 @@ export const InputChatBlock = (props: Props) => {
)
}
const getAnswerValue = (answer?: InputSubmitContent) => {
if (!answer) return
return answer.type === 'text' ? answer.value : answer.url
}
const Input = (props: {
context: BotContext
block: NonNullable<ContinueChatResponse['input']>
@ -146,6 +148,7 @@ const Input = (props: {
const submitPaymentSuccess = () =>
props.onSubmit({
type: 'text',
value:
(props.block.options as PaymentInputBlock['options'])?.labels
?.success ?? defaultPaymentInputOptions.labels.success,

View File

@ -1,22 +1,24 @@
import { createSignal, For, Show } from 'solid-js'
import { createSignal, For, Show, Switch, Match } from 'solid-js'
import { Avatar } from '../avatars/Avatar'
import { isMobile } from '@/utils/isMobileSignal'
import { Answer } from '@/types'
import {
InputSubmitContent,
RecordingInputSubmitContent,
TextInputSubmitContent,
} from '@/types'
import { Modal } from '../Modal'
import { isNotEmpty } from '@typebot.io/lib'
import { FilePreview } from '@/features/blocks/inputs/fileUpload/components/FilePreview'
import clsx from 'clsx'
type Props = {
message: Answer
answer?: InputSubmitContent
showAvatar: boolean
avatarSrc?: string
hasHostAvatar: boolean
}
export const GuestBubble = (props: Props) => {
const [clickedImageSrc, setClickedImageSrc] = createSignal<string>()
return (
<div
class="flex justify-end items-end animate-fade-in gap-2 guest-container"
@ -28,65 +30,87 @@ export const GuestBubble = (props: Props) => {
: undefined,
}}
>
<div class="flex flex-col gap-1 items-end">
<Show when={(props.message.attachments ?? []).length > 0}>
<div
class={clsx(
'flex gap-1 overflow-auto max-w-[350px]',
isMobile() ? 'flex-wrap justify-end' : 'items-center'
)}
>
<For
each={props.message.attachments?.filter((attachment) =>
attachment.type.startsWith('image')
)}
>
{(attachment, idx) => (
<img
src={attachment.url}
alt={`Attached image ${idx() + 1}`}
class={clsx(
'typebot-guest-bubble-image-attachment cursor-pointer',
props.message.attachments!.filter((attachment) =>
attachment.type.startsWith('image')
).length > 1 && 'max-w-[90%]'
)}
onClick={() => setClickedImageSrc(attachment.url)}
/>
)}
</For>
</div>
<div
class={clsx(
'flex gap-1 overflow-auto max-w-[350px]',
isMobile() ? 'flex-wrap justify-end' : 'items-center'
)}
>
<For
each={props.message.attachments?.filter(
(attachment) => !attachment.type.startsWith('image')
)}
>
{(attachment) => (
<FilePreview
file={{
name: attachment.url.split('/').at(-1)!,
}}
/>
)}
</For>
</div>
</Show>
<div
class="p-[1px] whitespace-pre-wrap max-w-full typebot-guest-bubble flex flex-col"
data-testid="guest-bubble"
>
<Show when={isNotEmpty(props.message.text)}>
<span class="px-[15px] py-[7px]">{props.message.text}</span>
</Show>
</div>
</div>
<Switch>
<Match when={props.answer?.type === 'text'}>
<TextGuestBubble answer={props.answer as TextInputSubmitContent} />
</Match>
<Match when={props.answer?.type === 'recording'}>
<AudioGuestBubble
answer={props.answer as RecordingInputSubmitContent}
/>
</Match>
</Switch>
<Show when={props.showAvatar}>
<Avatar initialAvatarSrc={props.avatarSrc} />
</Show>
</div>
)
}
const TextGuestBubble = (props: { answer: TextInputSubmitContent }) => {
const [clickedImageSrc, setClickedImageSrc] = createSignal<string>()
return (
<div class="flex flex-col gap-1 items-end">
<Show when={(props.answer.attachments ?? []).length > 0}>
<div
class={clsx(
'flex gap-1 overflow-auto max-w-[350px]',
isMobile() ? 'flex-wrap justify-end' : 'items-center'
)}
>
<For
each={props.answer.attachments?.filter((attachment) =>
attachment.type.startsWith('image')
)}
>
{(attachment, idx) => (
<img
src={attachment.url}
alt={`Attached image ${idx() + 1}`}
class={clsx(
'typebot-guest-bubble-image-attachment cursor-pointer',
props.answer.attachments!.filter((attachment) =>
attachment.type.startsWith('image')
).length > 1 && 'max-w-[90%]'
)}
onClick={() => setClickedImageSrc(attachment.url)}
/>
)}
</For>
</div>
<div
class={clsx(
'flex gap-1 overflow-auto max-w-[350px]',
isMobile() ? 'flex-wrap justify-end' : 'items-center'
)}
>
<For
each={props.answer.attachments?.filter(
(attachment) => !attachment.type.startsWith('image')
)}
>
{(attachment) => (
<FilePreview
file={{
name: attachment.url.split('/').at(-1)!,
}}
/>
)}
</For>
</div>
</Show>
<div
class="p-[1px] whitespace-pre-wrap max-w-full typebot-guest-bubble flex flex-col"
data-testid="guest-bubble"
>
<Show when={isNotEmpty(props.answer.label ?? props.answer.value)}>
<span class="px-[15px] py-[7px]">
{props.answer.label ?? props.answer.value}
</span>
</Show>
</div>
<Modal
isOpen={clickedImageSrc() !== undefined}
onClose={() => setClickedImageSrc(undefined)}
@ -97,9 +121,19 @@ export const GuestBubble = (props: Props) => {
style={{ 'border-radius': '6px' }}
/>
</Modal>
<Show when={props.showAvatar}>
<Avatar initialAvatarSrc={props.avatarSrc} />
</Show>
</div>
)
}
const AudioGuestBubble = (props: { answer: RecordingInputSubmitContent }) => {
return (
<div class="flex flex-col gap-1 items-end w-full">
<div
class="p-2 w-full whitespace-pre-wrap typebot-guest-bubble flex flex-col max-w-[316px]"
data-testid="guest-bubble"
>
<audio controls src={props.answer.url} class="w-full h-[54px]" />
</div>
</div>
)
}

View File

@ -4,6 +4,7 @@ import { CustomEmbedBubble } from '@/features/blocks/bubbles/embed/components/Cu
import { ImageBubble } from '@/features/blocks/bubbles/image'
import { TextBubble } from '@/features/blocks/bubbles/textBubble'
import { VideoBubble } from '@/features/blocks/bubbles/video'
import { InputSubmitContent } from '@/types'
import type {
AudioBubbleBlock,
ChatMessage,
@ -22,7 +23,7 @@ type Props = {
typingEmulation: Settings['typingEmulation']
isTypingSkipped: boolean
onTransitionEnd?: (ref?: HTMLDivElement) => void
onCompleted: (reply?: string) => void
onCompleted: (reply?: InputSubmitContent) => void
}
export const HostBubble = (props: Props) => (

View File

@ -0,0 +1,12 @@
import { JSX } from 'solid-js/jsx-runtime'
export const MicrophoneIcon = (props: JSX.SvgSVGAttributes<SVGSVGElement>) => (
<svg
viewBox="0 0 384 512"
stroke="currentColor"
fill="currentColor"
{...props}
>
<path d="M192 0C139 0 96 43 96 96l0 160c0 53 43 96 96 96s96-43 96-96l0-160c0-53-43-96-96-96zM64 216c0-13.3-10.7-24-24-24s-24 10.7-24 24l0 40c0 89.1 66.2 162.7 152 174.4l0 33.6-48 0c-13.3 0-24 10.7-24 24s10.7 24 24 24l72 0 72 0c13.3 0 24-10.7 24-24s-10.7-24-24-24l-48 0 0-33.6c85.8-11.7 152-85.3 152-174.4l0-40c0-13.3-10.7-24-24-24s-24 10.7-24 24l0 40c0 70.7-57.3 128-128 128s-128-57.3-128-128l0-40z" />
</svg>
)

View File

@ -5,11 +5,12 @@ import { clsx } from 'clsx'
import { CustomEmbedBubble as CustomEmbedBubbleProps } from '@typebot.io/schemas'
import { executeCode } from '@/features/blocks/logic/script/executeScript'
import { botContainerHeight } from '@/utils/botContainerHeightSignal'
import { InputSubmitContent } from '@/types'
type Props = {
content: CustomEmbedBubbleProps['content']
onTransitionEnd?: (ref?: HTMLDivElement) => void
onCompleted: (reply?: string) => void
onCompleted: (reply?: InputSubmitContent) => void
}
let typingTimeout: NodeJS.Timeout
@ -36,7 +37,8 @@ export const CustomEmbedBubble = (props: Props) => {
executeCode({
args: {
...props.content.waitForEventFunction.args,
continueFlow: props.onCompleted,
continueFlow: (text: string) =>
props.onCompleted(text ? { type: 'text', value: text } : undefined),
},
content: props.content.waitForEventFunction.content,
})

View File

@ -5,11 +5,12 @@ import { clsx } from 'clsx'
import { EmbedBubbleBlock } from '@typebot.io/schemas'
import { defaultEmbedBubbleContent } from '@typebot.io/schemas/features/blocks/bubbles/embed/constants'
import { isNotEmpty } from '@typebot.io/lib/utils'
import { InputSubmitContent } from '@/types'
type Props = {
content: EmbedBubbleBlock['content']
onTransitionEnd?: (ref?: HTMLDivElement) => void
onCompleted?: (data?: string) => void
onCompleted?: (data?: InputSubmitContent) => void
}
let typingTimeout: NodeJS.Timeout
@ -32,7 +33,10 @@ export const EmbedBubble = (props: Props) => {
) {
props.onCompleted?.(
props.content.waitForEvent.saveDataInVariableId && event.data.data
? event.data.data
? {
type: 'text',
value: event.data.data,
}
: undefined
)
window.removeEventListener('message', handleMessage)

View File

@ -22,7 +22,10 @@ export const Buttons = (props: Props) => {
})
const handleClick = (itemIndex: number) =>
props.onSubmit({ value: filteredItems()[itemIndex].content ?? '' })
props.onSubmit({
type: 'text',
value: filteredItems()[itemIndex].content ?? '',
})
const filterItems = (inputValue: string) => {
setFilteredItems(

View File

@ -39,6 +39,7 @@ export const MultipleChoicesForm = (props: Props) => {
const handleSubmit = () =>
props.onSubmit({
type: 'text',
value: selectedItemIds()
.map(
(selectedItemId) =>

View File

@ -19,6 +19,7 @@ export const DateForm = (props: Props) => {
const submit = () => {
if (inputValues().from === '' && inputValues().to === '') return
props.onSubmit({
type: 'text',
value: `${inputValues().from}${
props.options?.isRange ? ` to ${inputValues().to}` : ''
}`,

View File

@ -24,7 +24,7 @@ export const EmailInput = (props: Props) => {
const submit = () => {
if (checkIfInputIsValid())
props.onSubmit({ value: inputRef?.value ?? inputValue() })
props.onSubmit({ type: 'text', value: inputRef?.value ?? inputValue() })
else inputRef?.focus()
}

View File

@ -78,6 +78,7 @@ export const FileUploadForm = (props: Props) => {
setIsUploading(false)
if (urls.length && urls[0])
return props.onSubmit({
type: 'text',
label:
props.block.options?.labels?.success?.single ??
defaultFileInputOptions.labels.success.single,
@ -107,6 +108,7 @@ export const FileUploadForm = (props: Props) => {
description: 'An error occured while uploading the files',
})
props.onSubmit({
type: 'text',
label:
urls.length > 1
? (

View File

@ -27,7 +27,10 @@ export const NumberInput = (props: NumberInputProps) => {
const submit = () => {
if (checkIfInputIsValid())
props.onSubmit({ value: inputRef?.value ?? inputValue().toString() })
props.onSubmit({
type: 'text',
value: inputRef?.value ?? inputValue().toString(),
})
else inputRef?.focus()
}

View File

@ -66,6 +66,7 @@ export const PhoneInput = (props: PhoneInputProps) => {
if (checkIfInputIsValid()) {
const val = inputRef?.value ?? inputValue()
props.onSubmit({
type: 'text',
value: val.startsWith('+')
? val
: `${selectedCountryDialCode ?? ''}${val}`,

View File

@ -42,6 +42,7 @@ export const MultiplePictureChoice = (props: Props) => {
const handleSubmit = () =>
props.onSubmit({
type: 'text',
value: selectedItemIds()
.map((selectedItemId) => {
const item = props.defaultItems.find(

View File

@ -24,6 +24,7 @@ export const SinglePictureChoice = (props: Props) => {
const handleClick = (itemIndex: number) => {
const item = filteredItems()[itemIndex]
return props.onSubmit({
type: 'text',
label: isNotEmpty(item.title) ? item.title : item.pictureSrc ?? item.id,
value: item.id,
})

View File

@ -21,12 +21,13 @@ export const RatingForm = (props: Props) => {
e.preventDefault()
const selectedRating = rating()
if (isNotDefined(selectedRating)) return
props.onSubmit({ value: selectedRating.toString() })
props.onSubmit({ type: 'text', value: selectedRating.toString() })
}
const handleClick = (rating: number) => {
if (props.block.options?.isOneClickSubmitEnabled)
props.onSubmit({ value: rating.toString() })
props.onSubmit({ type: 'text', value: rating.toString() })
setRating(rating)
}

View File

@ -1,10 +1,18 @@
import { Textarea, ShortTextInput } from '@/components'
import { SendButton } from '@/components/SendButton'
import { CommandData } from '@/features/commands'
import { Answer, BotContext, InputSubmitContent } from '@/types'
import { Attachment, BotContext, InputSubmitContent } from '@/types'
import { isMobile } from '@/utils/isMobileSignal'
import type { TextInputBlock } from '@typebot.io/schemas'
import { For, Show, createSignal, onCleanup, onMount } from 'solid-js'
import {
For,
Match,
Show,
Switch,
createSignal,
onCleanup,
onMount,
} from 'solid-js'
import { defaultTextInputOptions } from '@typebot.io/schemas/features/blocks/inputs/text/constants'
import clsx from 'clsx'
import { TextInputAddFileButton } from '@/components/TextInputAddFileButton'
@ -15,6 +23,9 @@ import { toaster } from '@/utils/toaster'
import { isDefined } from '@typebot.io/lib'
import { uploadFiles } from '../../fileUpload/helpers/uploadFiles'
import { guessApiHost } from '@/utils/guessApiHost'
import { VoiceRecorder } from './VoiceRecorder'
import { Button } from '@/components/Button'
import { MicrophoneIcon } from '@/components/icons/MicrophoneIcon'
type Props = {
block: TextInputBlock
@ -30,7 +41,10 @@ export const TextInput = (props: Props) => {
{ fileIndex: number; progress: number } | undefined
>(undefined)
const [isDraggingOver, setIsDraggingOver] = createSignal(false)
const [isRecording, setIsRecording] = createSignal(false)
let inputRef: HTMLInputElement | HTMLTextAreaElement | undefined
let mediaRecorder: MediaRecorder | undefined
let recordedChunks: Blob[] = []
const handleInput = (inputValue: string) => setInputValue(inputValue)
@ -38,8 +52,12 @@ export const TextInput = (props: Props) => {
inputRef?.value !== '' && inputRef?.reportValidity()
const submit = async () => {
if (isRecording() && mediaRecorder) {
mediaRecorder.stop()
return
}
if (checkIfInputIsValid()) {
let attachments: Answer['attachments']
let attachments: Attachment[] | undefined
if (selectedFiles().length > 0) {
setUploadProgress(undefined)
const urls = await uploadFiles({
@ -57,6 +75,7 @@ export const TextInput = (props: Props) => {
attachments = urls?.filter(isDefined)
}
props.onSubmit({
type: 'text',
value: inputRef?.value ?? inputValue(),
attachments,
})
@ -137,6 +156,59 @@ export const TextInput = (props: Props) => {
)
}
const recordVoice = () => {
setIsRecording(true)
}
const handleRecordingStart = (stream: MediaStream) => {
mediaRecorder = new MediaRecorder(stream)
mediaRecorder.ondataavailable = (event) => {
if (event.data.size === 0) return
recordedChunks.push(event.data)
}
mediaRecorder.onstop = async () => {
if (!isRecording() || recordedChunks.length === 0) return
const audioFile = new File(
recordedChunks,
`rec-${props.block.id}-${Date.now()}.mp3`,
{
type: 'audio/mp3',
}
)
setUploadProgress(undefined)
const urls = (
await uploadFiles({
apiHost:
props.context.apiHost ?? guessApiHost({ ignoreChatApiUrl: true }),
files: [
{
file: audioFile,
input: {
sessionId: props.context.sessionId,
fileName: audioFile.name,
},
},
],
onUploadProgress: setUploadProgress,
})
)
.filter(isDefined)
.map((url) => url.url)
props.onSubmit({
type: 'recording',
url: urls[0],
})
}
mediaRecorder.start()
}
const handleRecordingAbort = () => {
setIsRecording(false)
mediaRecorder?.stop()
mediaRecorder = undefined
recordedChunks = []
}
return (
<div
class={clsx(
@ -150,85 +222,111 @@ export const TextInput = (props: Props) => {
>
<div
class={clsx(
'typebot-input flex-col w-full',
'relative typebot-input flex-col w-full',
isDraggingOver() && 'filter brightness-95'
)}
>
<Show when={selectedFiles().length}>
<VoiceRecorder
isRecording={isRecording()}
buttonsTheme={props.context.typebot.theme.chat?.buttons}
onRecordingStart={handleRecordingStart}
onAbortRecording={handleRecordingAbort}
/>
<Show when={!isRecording()}>
<Show when={selectedFiles().length}>
<div
class="p-2 flex gap-2 border-gray-100 overflow-auto"
style={{ 'border-bottom-width': '1px' }}
>
<For each={selectedFiles()}>
{(file, index) => (
<SelectedFile
file={file}
uploadProgressPercent={
uploadProgress()
? uploadProgress()?.fileIndex === index()
? 20
: index() < (uploadProgress()?.fileIndex ?? 0)
? 100
: 0
: undefined
}
onRemoveClick={() => removeSelectedFile(index())}
/>
)}
</For>
</div>
</Show>
<div
class="p-2 flex gap-2 border-gray-100 overflow-auto"
style={{ 'border-bottom-width': '1px' }}
class={clsx(
'flex justify-between px-2',
props.block.options?.isLong ? 'items-end' : 'items-center'
)}
>
<For each={selectedFiles()}>
{(file, index) => (
<SelectedFile
file={file}
uploadProgressPercent={
uploadProgress()
? uploadProgress()?.fileIndex === index()
? 20
: index() < (uploadProgress()?.fileIndex ?? 0)
? 100
: 0
: undefined
}
onRemoveClick={() => removeSelectedFile(index())}
/>
)}
</For>
{props.block.options?.isLong ? (
<Textarea
ref={inputRef as HTMLTextAreaElement}
onInput={handleInput}
onKeyDown={submitIfCtrlEnter}
value={inputValue()}
placeholder={
props.block.options?.labels?.placeholder ??
defaultTextInputOptions.labels.placeholder
}
/>
) : (
<ShortTextInput
ref={inputRef as HTMLInputElement}
onInput={handleInput}
value={inputValue()}
placeholder={
props.block.options?.labels?.placeholder ??
defaultTextInputOptions.labels.placeholder
}
/>
)}
<Show
when={
(props.block.options?.attachments?.isEnabled ??
defaultTextInputOptions.attachments.isEnabled) &&
props.block.options?.attachments?.saveVariableId
}
>
<TextInputAddFileButton
onNewFiles={onNewFiles}
class={clsx(props.block.options?.isLong ? 'ml-2' : undefined)}
/>
</Show>
</div>
</Show>
<div
class={clsx(
'flex justify-between px-2',
props.block.options?.isLong ? 'items-end' : 'items-center'
)}
>
{props.block.options?.isLong ? (
<Textarea
ref={inputRef as HTMLTextAreaElement}
onInput={handleInput}
onKeyDown={submitIfCtrlEnter}
value={inputValue()}
placeholder={
props.block.options?.labels?.placeholder ??
defaultTextInputOptions.labels.placeholder
}
/>
) : (
<ShortTextInput
ref={inputRef as HTMLInputElement}
onInput={handleInput}
value={inputValue()}
placeholder={
props.block.options?.labels?.placeholder ??
defaultTextInputOptions.labels.placeholder
}
/>
)}
<Show
when={
(props.block.options?.attachments?.isEnabled ??
defaultTextInputOptions.attachments.isEnabled) &&
props.block.options?.attachments?.saveVariableId
}
>
<TextInputAddFileButton
onNewFiles={onNewFiles}
class={clsx(props.block.options?.isLong ? 'ml-2' : undefined)}
/>
</Show>
</div>
</div>
<SendButton
type="button"
on:click={submit}
isDisabled={Boolean(uploadProgress())}
class="h-[56px]"
>
{props.block.options?.labels?.button}
</SendButton>
<Switch>
<Match
when={
!inputValue() &&
!isRecording() &&
props.block.options?.audioClip?.isEnabled
}
>
<Button
class="h-[56px] flex items-center"
on:click={recordVoice}
aria-label="Record voice"
>
<MicrophoneIcon class="flex w-6 h-6" />
</Button>
</Match>
<Match when={true}>
<SendButton
type="button"
on:click={submit}
isDisabled={Boolean(uploadProgress())}
class="h-[56px]"
>
{props.block.options?.labels?.button}
</SendButton>
</Match>
</Switch>
</div>
)
}

View File

@ -0,0 +1,178 @@
import { createEffect, createSignal, onCleanup } from 'solid-js'
import { volumeProcessorCode } from './VolumeProcessor'
import clsx from 'clsx'
import { CloseIcon } from '@/components/icons/CloseIcon'
import { Theme } from '@typebot.io/schemas'
import { hexToRgb } from '@typebot.io/lib/hexToRgb'
import { defaultButtonsBackgroundColor } from '@typebot.io/schemas/features/typebot/theme/constants'
const barWidth = 3
const barGap = 3
const dx = 53.5
let offset = 0
const initBarsHeightPercent = 10
type Props = {
isRecording: boolean
buttonsTheme: NonNullable<Theme['chat']>['buttons']
onAbortRecording: () => void
onRecordingStart: (stream: MediaStream) => void
}
export const VoiceRecorder = (props: Props) => {
const [recordingTime, setRecordingTime] = createSignal<number>(0)
let canvasElement: HTMLCanvasElement | undefined
let animationFrameId: number
let ctx: CanvasRenderingContext2D | undefined
let audioContext: AudioContext | undefined
let volumeNode: AudioWorkletNode | undefined
let microphone: MediaStreamAudioSourceNode | undefined
let stream: MediaStream | undefined
let bars: number[] = []
let recordTimeInterval: NodeJS.Timer | undefined
let lastFrameTime: DOMHighResTimeStamp | undefined
const fillRgb = hexToRgb(
props.buttonsTheme?.backgroundColor ?? defaultButtonsBackgroundColor
).join(', ')
const animate = () => {
if (!ctx || !canvasElement || !lastFrameTime) return
const currentTime = performance.now()
const deltaTime = currentTime - lastFrameTime
lastFrameTime = currentTime
ctx.clearRect(0, 0, canvasElement.width, canvasElement.height)
// Draw init bars
ctx.fillStyle = `rgba(${fillRgb}, 0.2)`
for (
let i = 0;
i < (canvasElement.width + barGap) / (barWidth + barGap);
i++
) {
const x = i * (barWidth + barGap) - offset
const barHeight = canvasElement.height * (initBarsHeightPercent / 100)
const y = (canvasElement.height - barHeight) / 2
ctx.beginPath()
ctx.roundRect(x, y, barWidth, barHeight, 5)
ctx.fill()
}
ctx.fillStyle = `rgba(${fillRgb}, 1)`
for (let i = 0; i < bars.length; i++) {
const x = canvasElement.width + (i + 1) * (barWidth + barGap) - offset
const barHeight = canvasElement.height * (bars[i] / 100)
const y = (canvasElement.height - barHeight) / 2
ctx.beginPath()
ctx.roundRect(x, y, barWidth, barHeight, 5)
ctx.fill()
}
offset += dx * (deltaTime / 1000)
animationFrameId = requestAnimationFrame(animate)
}
const startRecording = async () => {
if (!canvasElement) return
if (!ctx) ctx = canvasElement.getContext('2d') ?? undefined
lastFrameTime = performance.now()
animate()
recordTimeInterval = setInterval(() => {
setRecordingTime((prev) => (prev += 1))
}, 1000)
stream = await navigator.mediaDevices.getUserMedia({ audio: true })
props.onRecordingStart(stream)
audioContext = new AudioContext()
volumeNode = await loadVolumeProcessorWorklet(audioContext)
microphone = audioContext.createMediaStreamSource(stream)
microphone.connect(volumeNode)
volumeNode.connect(audioContext.destination)
volumeNode.port.onmessage = (event) => {
bars.push(event.data)
}
}
const stopRecording = () => {
if (ctx && canvasElement)
ctx.clearRect(0, 0, canvasElement.width, canvasElement.height)
offset = 0
volumeNode?.disconnect()
volumeNode = undefined
microphone?.disconnect()
microphone = undefined
audioContext?.close()
audioContext = undefined
stream?.getTracks().forEach((track) => track.stop())
stream = undefined
bars = []
clearTimeout(recordTimeInterval)
setRecordingTime(0)
cancelAnimationFrame(animationFrameId)
props.onAbortRecording()
}
createEffect(() => {
if (props.isRecording) {
startRecording()
} else {
stopRecording()
}
})
onCleanup(() => {
stopRecording()
})
return (
<div
class={clsx(
'w-full gap-2 items-center transition-opacity px-2 typebot-recorder',
props.isRecording ? 'opacity-1 flex' : 'opacity-0 hidden'
)}
>
<button
class="p-0.5 rounded-full"
on:click={stopRecording}
aria-label="Stop recording"
>
<CloseIcon class="w-4" />
</button>
<div class="relative flex w-full">
<canvas ref={canvasElement} class="w-full h-[56px]" />
<div class="absolute left-gradient w-2 left-0 h-[56px]" />
<div class="absolute right-gradient w-2 right-0 h-[56px]" />
</div>
<span class="font-bold text-sm">{formatTimeLabel(recordingTime())}</span>
</div>
)
}
const loadVolumeProcessorWorklet = async (audioContext: AudioContext) => {
const blob = new Blob([volumeProcessorCode], {
type: 'application/javascript',
})
const volumeProcessorCodeUrl = URL.createObjectURL(blob)
await audioContext.audioWorklet.addModule(volumeProcessorCodeUrl)
return new AudioWorkletNode(audioContext, 'volume-processor')
}
const formatTimeLabel = (seconds: number): string => {
const minutes = Math.floor(seconds / 60)
const remainingSeconds = seconds % 60
const formattedSeconds = remainingSeconds.toString().padStart(2, '0')
return `${minutes}:${formattedSeconds}`
}

View File

@ -0,0 +1,41 @@
export const volumeProcessorCode = `
const throttleMs = 110;
const maxVolumePercent = 80;
const volumeMultiplier = 3;
class VolumeProcessor extends AudioWorkletProcessor {
constructor() {
super();
this.lastUpdateTime = 0;
this.volumeSum = 0;
this.volumeCount = 1;
}
process(inputs) {
const input = inputs[0];
const currentTime = new Date().getTime();
if (input.length > 0) {
const channelData = input[0];
let sum = 0;
for (let i = 0; i < channelData.length; i++) {
sum += channelData[i] * channelData[i];
}
const rms = Math.sqrt(sum / channelData.length);
const volumePercent = rms * 100;
this.volumeSum += volumePercent;
this.volumeCount += 1;
}
if (currentTime - this.lastUpdateTime >= throttleMs) {
const averageVolume = 1 + this.volumeSum / this.volumeCount;
this.port.postMessage(Math.min(averageVolume * volumeMultiplier, maxVolumePercent));
this.volumeSum = 0;
this.volumeCount = 1;
this.lastUpdateTime = currentTime;
}
return true;
}
}
registerProcessor("volume-processor", VolumeProcessor);
`

View File

@ -28,7 +28,7 @@ export const UrlInput = (props: Props) => {
if (inputRef && !inputRef?.value.startsWith('http'))
inputRef.value = `https://${inputRef.value}`
if (checkIfInputIsValid())
props.onSubmit({ value: inputRef?.value ?? inputValue() })
props.onSubmit({ type: 'text', value: inputRef?.value ?? inputValue() })
else inputRef?.focus()
}

View File

@ -1,11 +1,5 @@
import { ContinueChatResponse, StartChatResponse } from '@typebot.io/schemas'
export type InputSubmitContent = {
label?: string
value: string
attachments?: Answer['attachments']
}
export type BotContext = {
typebot: StartChatResponse['typebot']
resultId?: string
@ -33,10 +27,23 @@ export type ChatChunk = Pick<
streamingMessageId?: string
}
export type Answer = {
text: string
attachments?: {
type: string
url: string
}[]
export type Attachment = {
type: string
url: string
}
export type TextInputSubmitContent = {
type: 'text'
value: string
label?: string
attachments?: Attachment[]
}
export type RecordingInputSubmitContent = {
type: 'recording'
url: string
}
export type InputSubmitContent =
| TextInputSubmitContent
| RecordingInputSubmitContent

View File

@ -0,0 +1,3 @@
import { createSignal } from 'solid-js'
export const [botContainer, setBotContainer] = createSignal<HTMLDivElement>()

View File

@ -0,0 +1,6 @@
import { InputSubmitContent } from '@/types'
export const getAnswerContent = (answer: InputSubmitContent): string => {
if (answer.type === 'text') return answer.label ?? answer.value
return answer.url
}

View File

@ -15,6 +15,8 @@
"declarationMap": true,
"outDir": "dist",
"noEmit": false,
"emitDeclarationOnly": true
"emitDeclarationOnly": true,
"noEmitOnError": true,
"sourceMap": true
}
}

View File

@ -1,6 +1,6 @@
{
"name": "@typebot.io/nextjs",
"version": "0.3.8",
"version": "0.3.9",
"description": "Convenient library to display typebots on your Next.js website",
"main": "dist/index.js",
"types": "dist/index.d.ts",

View File

@ -16,6 +16,7 @@ const indexConfig = {
output: {
dir: './dist',
format: 'es',
sourcemap: true,
},
external: ['next/dynamic.js', 'react', 'react/jsx-runtime'],
watch: {

View File

@ -10,6 +10,8 @@
"declaration": true,
"declarationMap": true,
"noEmit": false,
"emitDeclarationOnly": true
"emitDeclarationOnly": true,
"noEmitOnError": true,
"sourceMap": true
}
}

View File

@ -1,6 +1,6 @@
{
"name": "@typebot.io/react",
"version": "0.3.8",
"version": "0.3.9",
"description": "Convenient library to display typebots on your React app",
"main": "dist/index.js",
"types": "dist/index.d.ts",

View File

@ -16,6 +16,7 @@ const indexConfig = {
output: {
file: './dist/index.js',
format: 'es',
sourcemap: true,
},
external: ['react', 'react/jsx-runtime'],
watch: {

View File

@ -12,7 +12,7 @@ export const leadGenerationTypebot: StartTypebot = {
version: '3',
id: 'clckrl4q5000t3b6sabwokaar',
events: null,
publishedAt: new Date(),
updatedAt: new Date(),
groups: [
{
id: 'clckrl4q5000g3b6skizhd262',

View File

@ -10,6 +10,8 @@
"declaration": true,
"declarationMap": true,
"noEmit": false,
"emitDeclarationOnly": true
"emitDeclarationOnly": true,
"noEmitOnError": true,
"sourceMap": true
}
}

View File

@ -4,6 +4,10 @@ import { TextInputBlock } from './schema'
export const defaultTextInputOptions = {
isLong: false,
labels: { button: defaultButtonLabel, placeholder: 'Type your answer...' },
audioClip: {
isEnabled: false,
visibility: 'Auto',
},
attachments: {
isEnabled: false,
visibility: 'Auto',

View File

@ -17,6 +17,13 @@ export const textInputOptionsSchema = textInputOptionsBaseSchema
.merge(
z.object({
isLong: z.boolean().optional(),
audioClip: z
.object({
isEnabled: z.boolean().optional(),
saveVariableId: z.string().optional(),
visibility: z.enum(fileVisibilityOptions).optional(),
})
.optional(),
attachments: z
.object({
isEnabled: z.boolean().optional(),

View File

@ -42,6 +42,14 @@ export const messageSchema = z.preprocess(
'Can only be provided if current input block is a text input block that allows attachments'
),
}),
z
.object({
type: z.literal('audio'),
url: z.string(),
})
.describe(
'Can only be provided if current input block is a text input that allows audio clips'
),
])
)
export type Message = z.infer<typeof messageSchema>

View File

@ -118,10 +118,11 @@ export const sessionStateSchema = z
])
.transform((state): SessionState => {
if (state.version === '3') return state
// eslint-disable-next-line @typescript-eslint/no-explicit-any
let migratedState: any = state
if (!state.version) migratedState = migrateFromV1ToV2(state)
return migrateFromV2ToV3(migratedState)
})
}) as z.ZodType<SessionState>
const migrateFromV1ToV2 = (
state: z.infer<typeof sessionStateSchemaV1>

View File

@ -14,6 +14,8 @@
"@typebot.io/forge-repository": "workspace:*",
"@typebot.io/prisma": "workspace:*",
"@typebot.io/tsconfig": "workspace:*",
"typescript": "5.4.5"
"typescript": "5.4.5",
"eslint": "8.44.0",
"eslint-config-custom": "workspace:*"
}
}
}