🔒 (radar) Improve scam detection by analyzing the entire typebot
This commit is contained in:
@ -89,14 +89,7 @@ export const publishTypebot = authenticatedProcedure
|
||||
|
||||
const typebotWasVerified = existingTypebot.riskLevel === -1
|
||||
|
||||
const riskLevel = typebotWasVerified
|
||||
? 0
|
||||
: computeRiskLevel({
|
||||
name: existingTypebot.name,
|
||||
groups: parseGroups(existingTypebot.groups, {
|
||||
typebotVersion: existingTypebot.version,
|
||||
}),
|
||||
})
|
||||
const riskLevel = typebotWasVerified ? 0 : computeRiskLevel(existingTypebot)
|
||||
|
||||
if (riskLevel > 0 && riskLevel !== existingTypebot.riskLevel) {
|
||||
if (env.MESSAGE_WEBHOOK_URL && riskLevel !== 100)
|
||||
|
@ -1,56 +1,20 @@
|
||||
import { Group } from '@typebot.io/schemas'
|
||||
import { env } from '@typebot.io/env'
|
||||
import { BubbleBlockType } from '@typebot.io/schemas/features/blocks/bubbles/constants'
|
||||
import { TDescendant, TElement, TText } from '@udecode/plate-common'
|
||||
|
||||
export const computeRiskLevel = ({
|
||||
name,
|
||||
groups,
|
||||
}: {
|
||||
name: string
|
||||
groups: Group[]
|
||||
}) => {
|
||||
export const computeRiskLevel = (typebot: any) => {
|
||||
const stringifiedTypebot = JSON.stringify(typebot)
|
||||
if (!env.RADAR_HIGH_RISK_KEYWORDS) return 0
|
||||
if (
|
||||
env.RADAR_HIGH_RISK_KEYWORDS.some((keyword) =>
|
||||
name.toLowerCase().includes(keyword)
|
||||
stringifiedTypebot.toLowerCase().includes(keyword)
|
||||
)
|
||||
)
|
||||
return 100
|
||||
let hasSuspiciousKeywords = false
|
||||
for (const group of groups) {
|
||||
for (const block of group.blocks) {
|
||||
if (block.type !== BubbleBlockType.TEXT) continue
|
||||
for (const descendant of block.content?.richText as TDescendant[]) {
|
||||
if (
|
||||
env.RADAR_HIGH_RISK_KEYWORDS &&
|
||||
richTextElementContainsKeywords(env.RADAR_HIGH_RISK_KEYWORDS)(
|
||||
descendant
|
||||
)
|
||||
)
|
||||
return 100
|
||||
if (
|
||||
env.RADAR_INTERMEDIATE_RISK_KEYWORDS &&
|
||||
richTextElementContainsKeywords(env.RADAR_INTERMEDIATE_RISK_KEYWORDS)(
|
||||
descendant
|
||||
)
|
||||
)
|
||||
hasSuspiciousKeywords = true
|
||||
}
|
||||
}
|
||||
}
|
||||
return hasSuspiciousKeywords ? 50 : 0
|
||||
if (!env.RADAR_INTERMEDIATE_RISK_KEYWORDS) return 0
|
||||
if (
|
||||
env.RADAR_INTERMEDIATE_RISK_KEYWORDS.some((keyword) =>
|
||||
stringifiedTypebot.toLowerCase().includes(keyword)
|
||||
)
|
||||
)
|
||||
return 50
|
||||
return 0
|
||||
}
|
||||
|
||||
const richTextElementContainsKeywords =
|
||||
(keywords: string[]) => (element: TElement | TText) => {
|
||||
if (element.text)
|
||||
return keywords.some((keyword) =>
|
||||
(element.text as string).toLowerCase().includes(keyword)
|
||||
)
|
||||
if (element.children)
|
||||
return (element.children as TDescendant[]).some(
|
||||
richTextElementContainsKeywords(keywords)
|
||||
)
|
||||
return false
|
||||
}
|
||||
|
Reference in New Issue
Block a user