compose.yaml: - Add hostname n8n.eks-intec.de to fix SMTP HELO rejection - Add NODE_TLS_REJECT_UNAUTHORIZED=0 for internal CA trust workflow-a-http.json: - Replace Set node with Code node for reliable data extraction - Strip HTML from thread bodies before AI analysis - Preserve newlines as ¶ (pilcrow) in DB storage instead of flattening workflow-b-http.json: - Add Prepare Email Body node: restores ¶→\n, strips markdown, converts numbered lists to <ol><li>, generates HTML email template - Switch emailSend from plain text to HTML+text (multipart) - Fix Log Reply to Freescout: use MAX(created_at)+1s to ensure n8n reply appears as newest thread regardless of email header timestamps - Fix emailSend typeVersion 1 with text field for reliable expression support - Correct Freescout thread INSERT: type=2, cc/bcc='[]', customer_id via subquery freescout-templates/: - Modern reply_fancy.blade.php: blue header bar with mailbox name and ticket number badge, quoted thread styling with left border accent, footer - Modern auto_reply.blade.php: matching design for auto-reply emails - Deploy to server: scp to /tmp, apply with sudo cp + artisan view:clear Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
242 lines
10 KiB
JSON
242 lines
10 KiB
JSON
{
|
|
"name": "Workflow A - Mail Processing (HTTP)",
|
|
"description": "Fetch unprocessed conversations from Freescout, analyze with AI, save suggestions",
|
|
"nodes": [
|
|
{
|
|
"id": "uuid-trigger-1",
|
|
"name": "Trigger",
|
|
"type": "n8n-nodes-base.cron",
|
|
"typeVersion": 1,
|
|
"position": [250, 200],
|
|
"parameters": {
|
|
"cronExpression": "*/5 * * * *"
|
|
}
|
|
},
|
|
{
|
|
"id": "uuid-get-conversations",
|
|
"name": "Get Unprocessed Conversations",
|
|
"type": "n8n-nodes-base.httpRequest",
|
|
"typeVersion": 4,
|
|
"position": [450, 200],
|
|
"parameters": {
|
|
"url": "http://host.docker.internal:4000/query/freescout",
|
|
"method": "POST",
|
|
"headers": {
|
|
"Content-Type": "application/json"
|
|
},
|
|
"sendBody": true,
|
|
"specifyBody": "json",
|
|
"jsonBody": "{\"query\":\"SELECT c.id, c.number, c.subject, c.customer_email, c.status, GROUP_CONCAT(t.body SEPARATOR ',') as threads_text FROM conversations c LEFT JOIN threads t ON c.id = t.conversation_id LEFT JOIN conversation_custom_field ccf ON c.id = ccf.conversation_id AND ccf.custom_field_id = 8 WHERE c.status = 1 AND ccf.id IS NULL GROUP BY c.id LIMIT 20\"}"
|
|
}
|
|
},
|
|
{
|
|
"id": "uuid-split-out",
|
|
"name": "Split Array into Items",
|
|
"type": "n8n-nodes-base.splitOut",
|
|
"typeVersion": 1,
|
|
"position": [650, 200],
|
|
"parameters": {
|
|
"fieldToSplitOut": "data",
|
|
"options": {}
|
|
}
|
|
},
|
|
{
|
|
"id": "uuid-extract-data",
|
|
"name": "Extract Conversation Data",
|
|
"type": "n8n-nodes-base.code",
|
|
"typeVersion": 2,
|
|
"position": [850, 200],
|
|
"parameters": {
|
|
"mode": "runOnceForEachItem",
|
|
"jsCode": "const item = $input.item.json;\n// HTML-Tags entfernen damit die AI lesbaren Text bekommt\nconst rawText = item.threads_text || 'Keine Beschreibung vorhanden';\nconst plainText = rawText\n .replace(/<[^>]+>/g, ' ')\n .replace(/ /g, ' ')\n .replace(/&/g, '&')\n .replace(/</g, '<')\n .replace(/>/g, '>')\n .replace(/"/g, '\"')\n .replace(/\\s+/g, ' ')\n .trim()\n .substring(0, 2000);\nreturn { json: {\n ticket_id: item.id,\n ticket_number: item.number,\n subject: item.subject,\n customer_email: item.customer_email,\n problem_text: plainText\n}};"
|
|
}
|
|
},
|
|
{
|
|
"id": "uuid-llm-analyze",
|
|
"name": "LiteLLM AI Analysis",
|
|
"type": "n8n-nodes-base.httpRequest",
|
|
"typeVersion": 4,
|
|
"position": [1050, 200],
|
|
"parameters": {
|
|
"url": "http://llm.eks-ai.apps.asgard.eks-lnx.fft-it.de/v1/chat/completions",
|
|
"method": "POST",
|
|
"headers": {
|
|
"Content-Type": "application/json"
|
|
},
|
|
"sendBody": true,
|
|
"specifyBody": "json",
|
|
"jsonBody": "={{ JSON.stringify({model: 'gpt-oss_120b_128k-gpu', messages: [{role: 'system', content: 'Du bist ein IT-Support-Assistent. Analysiere das folgende IT-Support-Ticket und gib eine strukturierte JSON-Antwort mit folgenden Feldern: kategorie (z.B. Hardware, Software, Netzwerk, Zugriff), lösung_typ (BARAMUNDI_JOB, AUTOMATISCHE_ANTWORT, oder ESKALATION), vertrauen (Dezimal zwischen 0.0 und 1.0 - wie sicher bist du bei dieser Lösung), baramundi_job (Name des Jobs falls BARAMUNDI_JOB), antwort_text (Die Antwort an den Nutzer), begründung (Kurze Erklärung deiner Analyse)'}, {role: 'user', content: 'Ticket-Nummer: ' + $json.ticket_number + '\\nBetreff: ' + $json.subject + '\\nProblembeschreibung:\\n' + $json.problem_text + '\\n\\nBitte antworte NUR mit gültiger JSON in dieser Struktur: {\"kategorie\": \"...\", \"lösung_typ\": \"...\", \"vertrauen\": 0.75, \"baramundi_job\": \"...\", \"antwort_text\": \"...\", \"begründung\": \"...\"}'}], temperature: 0.7, max_tokens: 1000}) }}"
|
|
}
|
|
},
|
|
{
|
|
"id": "uuid-parse-response",
|
|
"name": "Parse AI Response",
|
|
"type": "n8n-nodes-base.code",
|
|
"typeVersion": 2,
|
|
"position": [1250, 200],
|
|
"parameters": {
|
|
"mode": "runOnceForEachItem",
|
|
"jsCode": "const content = $input.item.json.choices[0].message.content;\nconst extractData = $('Extract Conversation Data').item.json;\nconst ticketId = extractData.ticket_id !== undefined ? extractData.ticket_id : extractData.id;\nlet vertrauen = 0.1;\nlet loesung_typ = 'UNBEKANNT';\nlet kategorie = '';\nlet antwort_text = '';\nlet baramundi_job = '';\ntry {\n const parsed = JSON.parse(content);\n vertrauen = typeof parsed.vertrauen === 'number' ? parsed.vertrauen : 0.1;\n loesung_typ = parsed['lösung_typ'] || parsed.loesung_typ || 'UNBEKANNT';\n kategorie = parsed.kategorie || '';\n antwort_text = parsed.antwort_text || '';\n baramundi_job = parsed.baramundi_job || '';\n} catch(e) { vertrauen = 0.1; }\n// Human-readable for Freescout textarea\nconst lines = [loesung_typ + ' | Vertrauen: ' + vertrauen + ' | Kategorie: ' + kategorie];\nif (baramundi_job) lines.push('Baramundi-Job: ' + baramundi_job);\nlines.push('---');\nlines.push(antwort_text);\nconst display_text = lines.join(' | ');\n// SQL-safe: Quotes escapen, Zeilenumbrüche als ¶ (Pilcrow) erhalten damit\n// Workflow B die Struktur der KI-Antwort wiederherstellen kann.\nconst ai_content_sql = display_text.replace(/'/g, \"''\").replace(/\\r/g, '').replace(/\\n/g, '¶');\nconst ai_json_sql = content.replace(/'/g, \"''\").replace(/[\\n\\r]/g, ' ');\nreturn { json: { vertrauen, ticket_id: ticketId, ai_content: content, ai_content_sql, ai_json_sql } };"
|
|
}
|
|
},
|
|
{
|
|
"id": "uuid-check-confidence",
|
|
"name": "Check Confidence >= 0.6",
|
|
"type": "n8n-nodes-base.if",
|
|
"typeVersion": 2,
|
|
"position": [1450, 200],
|
|
"parameters": {
|
|
"conditions": {
|
|
"options": {
|
|
"caseSensitive": true,
|
|
"leftValue": "",
|
|
"typeValidation": "loose"
|
|
},
|
|
"conditions": [
|
|
{
|
|
"id": "cond-confidence",
|
|
"leftValue": "={{ $json.vertrauen }}",
|
|
"rightValue": 0.6,
|
|
"operator": {
|
|
"type": "number",
|
|
"operation": "gte"
|
|
}
|
|
}
|
|
],
|
|
"combinator": "and"
|
|
}
|
|
}
|
|
},
|
|
{
|
|
"id": "uuid-save-ai-suggestion",
|
|
"name": "Save AI Suggestion (field 6)",
|
|
"type": "n8n-nodes-base.httpRequest",
|
|
"typeVersion": 4,
|
|
"position": [1650, 100],
|
|
"parameters": {
|
|
"url": "http://host.docker.internal:4000/query/freescout",
|
|
"method": "POST",
|
|
"headers": {
|
|
"Content-Type": "application/json"
|
|
},
|
|
"sendBody": true,
|
|
"specifyBody": "json",
|
|
"jsonBody": "={{ JSON.stringify({query: \"INSERT INTO conversation_custom_field (conversation_id, custom_field_id, value) VALUES (\" + $json.ticket_id + \", 6, '\" + $json.ai_content_sql + \"') ON DUPLICATE KEY UPDATE value = VALUES(value)\"}) }}"
|
|
}
|
|
},
|
|
{
|
|
"id": "uuid-save-status-pending",
|
|
"name": "Save Status PENDING (field 7)",
|
|
"type": "n8n-nodes-base.httpRequest",
|
|
"typeVersion": 4,
|
|
"position": [1650, 200],
|
|
"parameters": {
|
|
"url": "http://host.docker.internal:4000/query/freescout",
|
|
"method": "POST",
|
|
"headers": {
|
|
"Content-Type": "application/json"
|
|
},
|
|
"sendBody": true,
|
|
"specifyBody": "json",
|
|
"jsonBody": "={{ JSON.stringify({query: \"INSERT INTO conversation_custom_field (conversation_id, custom_field_id, value) VALUES (\" + $('Parse AI Response').item.json.ticket_id + \", 7, '0') ON DUPLICATE KEY UPDATE value = VALUES(value)\"}) }}"
|
|
}
|
|
},
|
|
{
|
|
"id": "uuid-save-processed-flag",
|
|
"name": "Save Processed Flag (field 8)",
|
|
"type": "n8n-nodes-base.httpRequest",
|
|
"typeVersion": 4,
|
|
"position": [1650, 300],
|
|
"parameters": {
|
|
"url": "http://host.docker.internal:4000/query/freescout",
|
|
"method": "POST",
|
|
"headers": {
|
|
"Content-Type": "application/json"
|
|
},
|
|
"sendBody": true,
|
|
"specifyBody": "json",
|
|
"jsonBody": "={{ JSON.stringify({query: \"INSERT INTO conversation_custom_field (conversation_id, custom_field_id, value) VALUES (\" + $('Parse AI Response').item.json.ticket_id + \", 8, '1') ON DUPLICATE KEY UPDATE value = VALUES(value)\"}) }}"
|
|
}
|
|
},
|
|
{
|
|
"id": "uuid-no-action",
|
|
"name": "Skip - Low Confidence",
|
|
"type": "n8n-nodes-base.set",
|
|
"typeVersion": 3,
|
|
"position": [1650, 350],
|
|
"parameters": {
|
|
"mode": "manual",
|
|
"options": {},
|
|
"assignments": {
|
|
"assignments": [
|
|
{
|
|
"id": "assign-skipped",
|
|
"name": "skipped",
|
|
"value": true,
|
|
"type": "boolean"
|
|
},
|
|
{
|
|
"id": "assign-reason",
|
|
"name": "reason",
|
|
"value": "={{ 'Confidence ' + $json.vertrauen + ' < 0.6' }}",
|
|
"type": "string"
|
|
}
|
|
]
|
|
}
|
|
}
|
|
}
|
|
],
|
|
"connections": {
|
|
"Trigger": {
|
|
"main": [
|
|
[{"node": "Get Unprocessed Conversations", "index": 0}]
|
|
]
|
|
},
|
|
"Get Unprocessed Conversations": {
|
|
"main": [
|
|
[{"node": "Split Array into Items", "index": 0}]
|
|
]
|
|
},
|
|
"Split Array into Items": {
|
|
"main": [
|
|
[{"node": "Extract Conversation Data", "index": 0}]
|
|
]
|
|
},
|
|
"Extract Conversation Data": {
|
|
"main": [
|
|
[{"node": "LiteLLM AI Analysis", "index": 0}]
|
|
]
|
|
},
|
|
"LiteLLM AI Analysis": {
|
|
"main": [
|
|
[{"node": "Parse AI Response", "index": 0}]
|
|
]
|
|
},
|
|
"Parse AI Response": {
|
|
"main": [
|
|
[{"node": "Check Confidence >= 0.6", "index": 0}]
|
|
]
|
|
},
|
|
"Check Confidence >= 0.6": {
|
|
"main": [
|
|
[{"node": "Save AI Suggestion (field 6)", "index": 0}],
|
|
[{"node": "Skip - Low Confidence", "index": 0}]
|
|
]
|
|
},
|
|
"Save AI Suggestion (field 6)": {
|
|
"main": [
|
|
[{"node": "Save Status PENDING (field 7)", "index": 0}]
|
|
]
|
|
},
|
|
"Save Status PENDING (field 7)": {
|
|
"main": [
|
|
[{"node": "Save Processed Flag (field 8)", "index": 0}]
|
|
]
|
|
}
|
|
},
|
|
"active": false,
|
|
"settings": {
|
|
"errorHandler": "continueOnError"
|
|
}
|
|
}
|