diff --git a/src/report_generator.py b/src/report_generator.py new file mode 100644 index 0000000..8558f85 --- /dev/null +++ b/src/report_generator.py @@ -0,0 +1,387 @@ +"""Report-Generator: PDF und Word Berichte aus Lage-Daten.""" +import base64 +import io +import json +import logging +import re +from collections import defaultdict +from datetime import datetime +from pathlib import Path + +from jinja2 import Environment, FileSystemLoader +from weasyprint import HTML +from docx import Document +from docx.shared import Inches, Pt, Cm, RGBColor +from docx.enum.text import WD_ALIGN_PARAGRAPH +from docx.enum.table import WD_TABLE_ALIGNMENT + +from config import TIMEZONE, CLAUDE_MODEL_FAST + +logger = logging.getLogger("osint.report") + +TEMPLATE_DIR = Path(__file__).parent / "report_templates" +LOGO_PATH = Path(__file__).parent / "static" / "favicon.svg" + +CLASSIFICATION_LABELS = { + "offen": "Offen", + "dienstgebrauch": "Nur für den Dienstgebrauch", + "vertraulich": "Vertraulich", +} + +FC_STATUS_LABELS = { + "confirmed": "Bestätigt", + "unconfirmed": "Unbestätigt", + "disputed": "Umstritten", + "false": "Falsch", +} + + +def _get_logo_base64() -> str: + """Logo als Base64 für HTML-Embedding.""" + try: + return base64.b64encode(LOGO_PATH.read_bytes()).decode() + except Exception: + return "" + + +def _prepare_sources(incident: dict) -> list: + """Quellenverzeichnis aus sources_json parsen.""" + raw = incident.get("sources_json") + if not raw: + return [] + try: + return json.loads(raw) if isinstance(raw, str) else raw + except (json.JSONDecodeError, TypeError): + return [] + + +def _prepare_source_stats(articles: list) -> list: + """Quellenstatistik: Artikel pro Quelle + Sprachen.""" + source_map = defaultdict(lambda: {"count": 0, "langs": set()}) + for art in articles: + name = art.get("source") or "Unbekannt" + source_map[name]["count"] += 1 + source_map[name]["langs"].add((art.get("language") or "de").upper()) + stats = [] + for name, data in sorted(source_map.items(), key=lambda x: -x[1]["count"]): + stats.append({"name": name, "count": data["count"], "languages": ", ".join(sorted(data["langs"]))}) + return stats + + +def _prepare_fact_checks(fact_checks: list) -> list: + """Faktenchecks mit Label aufbereiten.""" + result = [] + for fc in fact_checks: + fc_copy = dict(fc) + fc_copy["status_label"] = FC_STATUS_LABELS.get(fc.get("status", ""), fc.get("status", "Unbekannt")) + result.append(fc_copy) + return result + + +def _prepare_timeline(articles: list) -> list: + """Timeline aus Artikeln: sortiert nach Datum.""" + timeline = [] + for art in articles: + pub = art.get("published_at") or art.get("collected_at") or "" + headline = art.get("headline_de") or art.get("headline") or "Ohne Titel" + source = art.get("source") or "" + if pub: + try: + dt = datetime.fromisoformat(pub.replace("Z", "+00:00")) + date_str = dt.strftime("%d.%m.%Y %H:%M") + except Exception: + date_str = pub[:16] + else: + date_str = "" + timeline.append({"date": date_str, "headline": headline, "source": source, "sort_key": pub}) + timeline.sort(key=lambda x: x["sort_key"], reverse=True) + return timeline[:100] # Max 100 Einträge + + +def _markdown_to_html(text: str) -> str: + """Einfache Markdown -> HTML Konvertierung für Lagebild.""" + if not text: + return "

Kein Lagebild verfügbar.

" + # Basic Markdown -> HTML + html = text + # Headlines + html = re.sub(r'^### (.+)$', r'

\1

', html, flags=re.MULTILINE) + html = re.sub(r'^## (.+)$', r'

\1

', html, flags=re.MULTILINE) + # Bold + html = re.sub(r'\*\*(.+?)\*\*', r'\1', html) + # Links [text](url) + html = re.sub(r'\[([^\]]+)\]\(([^)]+)\)', r'\1', html) + # Bullet lists + html = re.sub(r'^- (.+)$', r'
  • \1
  • ', html, flags=re.MULTILINE) + html = re.sub(r'(
  • .*
  • \n?)+', lambda m: '', html) + # Paragraphs + paragraphs = html.split('\n\n') + result = [] + for p in paragraphs: + p = p.strip() + if not p: + continue + if p.startswith('{p}

    ') + return '\n'.join(result) + + +async def generate_executive_summary(summary_text: str) -> str: + """KI-verdichtetes Executive Summary aus dem Lagebild.""" + if not summary_text or len(summary_text.strip()) < 50: + return "" + + from agents.claude_client import call_claude + + prompt = f"""Du bist ein Intelligence-Analyst für ein OSINT-Lagemonitoring-System. +Verdichte das folgende Lagebild auf genau 3-5 Kernpunkte. + +REGELN: +- Jeder Punkt: 1-2 Sätze, faktenbasiert +- Fokus: Was ist passiert? Was bedeutet es? Was ist die aktuelle Dynamik? +- Sprache: Deutsch, sachlich, prägnant +- Format: Gib NUR die Bullet Points aus, einen pro Zeile, mit "- " am Anfang +- KEINE Einleitung, KEINE Überschrift, NUR die Punkte + +LAGEBILD: +{summary_text}""" + + try: + result, usage = await call_claude(prompt, tools=None, model=CLAUDE_MODEL_FAST) + # In HTML-Liste umwandeln + lines = [line.strip().lstrip("- ").lstrip("* ") for line in result.strip().split("\n") if line.strip().startswith(("-", "*"))] + if not lines: + lines = [result.strip()] + html = "" + return html + except Exception as e: + logger.error(f"Executive Summary Generierung fehlgeschlagen: {e}") + return "" + + +async def generate_pdf( + incident: dict, articles: list, fact_checks: list, snapshots: list, + scope: str, classification: str, creator: str, executive_summary_html: str, +) -> bytes: + """PDF-Report via WeasyPrint generieren.""" + env = Environment(loader=FileSystemLoader(str(TEMPLATE_DIR))) + template = env.get_template("report.html") + + now = datetime.now(TIMEZONE) + incident_type_label = "Hintergrundrecherche" if incident.get("type") == "research" else "Live-Monitoring" + + html_content = template.render( + incident=incident, + incident_type_label=incident_type_label, + classification=classification, + classification_label=CLASSIFICATION_LABELS.get(classification, classification), + report_date=now.strftime("%d.%m.%Y, %H:%M Uhr"), + creator=creator, + logo_base64=_get_logo_base64(), + executive_summary=executive_summary_html, + scope=scope, + lagebild_html=_markdown_to_html(incident.get("summary", "")), + lagebild_timestamp=(incident.get("updated_at") or "")[:16].replace("T", " "), + sources=_prepare_sources(incident), + fact_checks=_prepare_fact_checks(fact_checks), + source_stats=_prepare_source_stats(articles), + timeline=_prepare_timeline(articles) if scope == "full" else [], + articles=articles if scope == "full" else [], + ) + + # Artikel pub_date aufbereiten + for art in articles: + pub = art.get("published_at") or art.get("collected_at") or "" + try: + dt = datetime.fromisoformat(pub.replace("Z", "+00:00")) + art["pub_date"] = dt.strftime("%d.%m.%Y") + except Exception: + art["pub_date"] = pub[:10] if pub else "" + + pdf_bytes = HTML(string=html_content).write_pdf() + return pdf_bytes + + +async def generate_docx( + incident: dict, articles: list, fact_checks: list, snapshots: list, + scope: str, classification: str, creator: str, executive_summary_text: str, +) -> bytes: + """Word-Report via python-docx generieren.""" + doc = Document() + + # Styles + style = doc.styles['Normal'] + style.font.size = Pt(10) + style.font.name = 'Calibri' + + # --- Deckblatt --- + for _ in range(6): + doc.add_paragraph() + + title_para = doc.add_paragraph() + title_para.alignment = WD_ALIGN_PARAGRAPH.CENTER + run = title_para.add_run("AegisSight Monitor") + run.font.size = Pt(12) + run.font.color.rgb = RGBColor(0x88, 0x88, 0x88) + + doc.add_paragraph() + + type_label = "Hintergrundrecherche" if incident.get("type") == "research" else "Live-Monitoring" + type_para = doc.add_paragraph() + type_para.alignment = WD_ALIGN_PARAGRAPH.CENTER + run = type_para.add_run(type_label) + run.font.size = Pt(10) + run.font.color.rgb = RGBColor(0x88, 0x88, 0x88) + + title_para2 = doc.add_paragraph() + title_para2.alignment = WD_ALIGN_PARAGRAPH.CENTER + run = title_para2.add_run(incident.get("title", "")) + run.font.size = Pt(24) + run.font.bold = True + run.font.color.rgb = RGBColor(0x0a, 0x18, 0x32) + + if incident.get("description"): + desc_para = doc.add_paragraph() + desc_para.alignment = WD_ALIGN_PARAGRAPH.CENTER + run = desc_para.add_run(incident["description"]) + run.font.size = Pt(11) + run.font.color.rgb = RGBColor(0x66, 0x66, 0x66) + + doc.add_paragraph() + + # Klassifizierung + class_label = CLASSIFICATION_LABELS.get(classification, classification) + class_para = doc.add_paragraph() + class_para.alignment = WD_ALIGN_PARAGRAPH.CENTER + run = class_para.add_run(f"— {class_label} —") + run.font.size = Pt(11) + run.font.bold = True + colors = {"offen": RGBColor(0x22, 0xc5, 0x5e), "dienstgebrauch": RGBColor(0xf0, 0xb4, 0x29), "vertraulich": RGBColor(0xef, 0x44, 0x44)} + run.font.color.rgb = colors.get(classification, RGBColor(0x88, 0x88, 0x88)) + + for _ in range(3): + doc.add_paragraph() + + now = datetime.now(TIMEZONE) + meta_para = doc.add_paragraph() + meta_para.alignment = WD_ALIGN_PARAGRAPH.CENTER + run = meta_para.add_run(f"Stand: {now.strftime('%d.%m.%Y, %H:%M Uhr')}\nErstellt von: {creator}") + run.font.size = Pt(9) + run.font.color.rgb = RGBColor(0x88, 0x88, 0x88) + + doc.add_page_break() + + # --- Executive Summary --- + doc.add_heading("Executive Summary", level=1) + + # HTML-Tags entfernen und als Bullet Points + clean_text = re.sub(r'<[^>]+>', '', executive_summary_text) + lines = [line.strip().lstrip("- ").lstrip("* ") for line in clean_text.strip().split("\n") if line.strip()] + for line in lines: + if line: + doc.add_paragraph(line, style='List Bullet') + + if scope in ("report", "full"): + # --- Lagebild --- + doc.add_heading("Lagebild", level=1) + summary = incident.get("summary") or "Kein Lagebild verfügbar." + # Markdown-Formatierung entfernen + clean_summary = re.sub(r'\*\*(.+?)\*\*', r'\1', summary) + clean_summary = re.sub(r'\[([^\]]+)\]\([^)]+\)', r'\1', clean_summary) + clean_summary = re.sub(r'^#{1,3}\s+', '', clean_summary, flags=re.MULTILINE) + for para_text in clean_summary.split("\n\n"): + para_text = para_text.strip() + if para_text: + if para_text.startswith("- "): + for bullet in para_text.split("\n"): + bullet = bullet.lstrip("- ").strip() + if bullet: + doc.add_paragraph(bullet, style='List Bullet') + else: + doc.add_paragraph(para_text) + + # --- Faktencheck --- + if fact_checks: + doc.add_heading("Faktencheck", level=1) + table = doc.add_table(rows=1, cols=3) + table.style = 'Table Grid' + table.alignment = WD_TABLE_ALIGNMENT.CENTER + hdr = table.rows[0].cells + hdr[0].text = "Behauptung" + hdr[1].text = "Status" + hdr[2].text = "Quellen" + for cell in hdr: + for p in cell.paragraphs: + p.runs[0].font.bold = True + p.runs[0].font.size = Pt(9) + for fc in fact_checks: + row = table.add_row().cells + row[0].text = fc.get("claim", "") + row[1].text = FC_STATUS_LABELS.get(fc.get("status", ""), fc.get("status", "")) + row[2].text = str(fc.get("sources_count", 0)) + + # --- Quellenstatistik --- + source_stats = _prepare_source_stats(articles) + if source_stats: + doc.add_heading("Quellenstatistik", level=1) + table = doc.add_table(rows=1, cols=3) + table.style = 'Table Grid' + table.alignment = WD_TABLE_ALIGNMENT.CENTER + hdr = table.rows[0].cells + hdr[0].text = "Quelle" + hdr[1].text = "Artikel" + hdr[2].text = "Sprache" + for cell in hdr: + for p in cell.paragraphs: + p.runs[0].font.bold = True + p.runs[0].font.size = Pt(9) + for stat in source_stats: + row = table.add_row().cells + row[0].text = stat["name"] + row[1].text = str(stat["count"]) + row[2].text = stat["languages"] + + if scope == "full": + # --- Artikelverzeichnis --- + if articles: + doc.add_page_break() + doc.add_heading(f"Artikelverzeichnis ({len(articles)} Artikel)", level=1) + table = doc.add_table(rows=1, cols=4) + table.style = 'Table Grid' + table.alignment = WD_TABLE_ALIGNMENT.CENTER + hdr = table.rows[0].cells + for i, txt in enumerate(["Headline", "Quelle", "Sprache", "Datum"]): + hdr[i].text = txt + for p in hdr[i].paragraphs: + p.runs[0].font.bold = True + p.runs[0].font.size = Pt(8) + for art in articles: + row = table.add_row().cells + row[0].text = art.get("headline_de") or art.get("headline") or "Ohne Titel" + row[1].text = art.get("source") or "" + row[2].text = (art.get("language") or "de").upper() + pub = art.get("published_at") or art.get("collected_at") or "" + try: + dt = datetime.fromisoformat(pub.replace("Z", "+00:00")) + row[3].text = dt.strftime("%d.%m.%Y") + except Exception: + row[3].text = pub[:10] if pub else "" + # Schriftgröße reduzieren + for cell in row: + for p in cell.paragraphs: + for run in p.runs: + run.font.size = Pt(8) + + # --- Footer --- + doc.add_paragraph() + footer = doc.add_paragraph() + footer.alignment = WD_ALIGN_PARAGRAPH.CENTER + run = footer.add_run(f"Erstellt mit AegisSight Monitor — aegis-sight.de — {now.strftime('%d.%m.%Y')}") + run.font.size = Pt(8) + run.font.color.rgb = RGBColor(0x99, 0x99, 0x99) + + buf = io.BytesIO() + doc.save(buf) + return buf.getvalue() diff --git a/src/report_templates/report.html b/src/report_templates/report.html new file mode 100644 index 0000000..14e9cac --- /dev/null +++ b/src/report_templates/report.html @@ -0,0 +1,198 @@ + + + + + + + + +
    + +
    {{ incident_type_label }}
    +
    {{ incident.title }}
    +
    {{ incident.description or '' }}
    +
    {{ classification_label }}
    +
    +
    Stand: {{ report_date }}
    +
    Erstellt von: {{ creator }}
    + {% if incident.organization_name %}
    Organisation: {{ incident.organization_name }}
    {% endif %} +
    +
    AegisSight Monitor
    +
    + + +
    {{ classification_label }}
    + + +
    +

    Executive Summary

    +
    + {{ executive_summary | safe }} +
    +
    + +{% if scope in ('report', 'full') %} + +
    +

    Lagebild

    + {% if lagebild_timestamp %}

    Aktualisiert: {{ lagebild_timestamp }}

    {% endif %} +
    {{ lagebild_html | safe }}
    +
    + + +{% if sources %} +
    +

    Quellenverzeichnis

    + + + + {% for src in sources %} + + {% endfor %} + +
    #QuelleURL
    {{ loop.index }}{{ src.name or src.title or '' }}{{ src.url or '' }}
    +
    +{% endif %} + + +{% if fact_checks %} +
    +

    Faktencheck

    + + + + {% for fc in fact_checks %} + + + + + + {% endfor %} + +
    BehauptungStatusQuellen
    {{ fc.claim or '' }}{{ fc.status_label }}{{ fc.sources_count or 0 }}
    +
    +{% endif %} + + +{% if source_stats %} +
    +

    Quellenstatistik

    + + + + {% for stat in source_stats %} + + {% endfor %} + +
    QuelleArtikelSprache
    {{ stat.name }}{{ stat.count }}{{ stat.languages }}
    +
    +{% endif %} +{% endif %} + +{% if scope == 'full' %} + +{% if timeline %} +
    +

    Ereignis-Timeline

    + {% for event in timeline %} +
    +
    {{ event.date }}
    +
    {{ event.headline }}
    +
    {{ event.source }}
    +
    + {% endfor %} +
    +{% endif %} + + +{% if articles %} +
    +

    Artikelverzeichnis ({{ articles | length }} Artikel)

    + + + + {% for art in articles %} + + + + + + + {% endfor %} + +
    HeadlineQuelleSpracheDatum
    {{ art.headline_de or art.headline or 'Ohne Titel' }}{{ art.source or '' }}{{ (art.language or 'de') | upper }}{{ art.pub_date }}
    +
    +{% endif %} +{% endif %} + + + + diff --git a/src/routers/incidents.py b/src/routers/incidents.py index 32a81c8..fed0d20 100644 --- a/src/routers/incidents.py +++ b/src/routers/incidents.py @@ -9,6 +9,7 @@ from datetime import datetime from config import TIMEZONE import asyncio import aiosqlite +import io import json import logging import re @@ -629,182 +630,18 @@ def _slugify(text: str) -> str: return text[:80].lower() -def _build_markdown_export( - incident: dict, articles: list, fact_checks: list, - snapshots: list, scope: str, creator: str -) -> str: - """Markdown-Dokument zusammenbauen.""" - typ = "Hintergrundrecherche" if incident.get("type") == "research" else "Breaking News" - updated = (incident.get("updated_at") or "")[:16].replace("T", " ") - - lines = [] - lines.append(f"# {incident['title']}") - lines.append(f"> {typ} | Erstellt von {creator} | Stand: {updated}") - lines.append("") - - # Lagebild - summary = incident.get("summary") or "*Noch kein Lagebild verf\u00fcgbar.*" - lines.append("## Lagebild") - lines.append("") - lines.append(summary) - lines.append("") - - # Quellenverzeichnis aus sources_json - sources_json = incident.get("sources_json") - if sources_json: - try: - sources = json.loads(sources_json) if isinstance(sources_json, str) else sources_json - if sources: - lines.append("## Quellenverzeichnis") - lines.append("") - for i, src in enumerate(sources, 1): - name = src.get("name") or src.get("title") or src.get("url", "") - url = src.get("url", "") - if url: - lines.append(f"{i}. [{name}]({url})") - else: - lines.append(f"{i}. {name}") - lines.append("") - except (json.JSONDecodeError, TypeError): - pass - - # Faktencheck - if fact_checks: - lines.append("## Faktencheck") - lines.append("") - for fc in fact_checks: - claim = fc.get("claim", "") - fc_status = fc.get("status", "") - sources_count = fc.get("sources_count", 0) - evidence = fc.get("evidence", "") - status_label = { - "confirmed": "Best\u00e4tigt", "unconfirmed": "Unbest\u00e4tigt", - "disputed": "Umstritten", "false": "Falsch", - }.get(fc_status, fc_status) - line = f"- **{claim}** \u2014 {status_label} ({sources_count} Quellen)" - if evidence: - line += f"\n {evidence}" - lines.append(line) - lines.append("") - - # Scope=full: Artikel\u00fcbersicht - if scope == "full" and articles: - lines.append("## Artikel\u00fcbersicht") - lines.append("") - lines.append("| Headline | Quelle | Sprache | Datum |") - lines.append("|----------|--------|---------|-------|") - for art in articles: - headline = (art.get("headline_de") or art.get("headline") or "").replace("|", "/") - source = (art.get("source") or "").replace("|", "/") - lang = art.get("language", "") - pub = (art.get("published_at") or art.get("collected_at") or "")[:16] - lines.append(f"| {headline} | {source} | {lang} | {pub} |") - lines.append("") - - # Scope=full: Snapshot-Verlauf - if scope == "full" and snapshots: - lines.append("## Snapshot-Verlauf") - lines.append("") - for snap in snapshots: - snap_date = (snap.get("created_at") or "")[:16].replace("T", " ") - art_count = snap.get("article_count", 0) - fc_count = snap.get("fact_check_count", 0) - lines.append(f"### Snapshot vom {snap_date}") - lines.append(f"Artikel: {art_count} | Faktenchecks: {fc_count}") - lines.append("") - snap_summary = snap.get("summary", "") - if snap_summary: - lines.append(snap_summary) - lines.append("") - - now = datetime.now(TIMEZONE).strftime("%Y-%m-%d %H:%M Uhr") - lines.append("---") - lines.append(f"*Exportiert am {now} aus AegisSight Monitor*") - return "\n".join(lines) - - -def _build_json_export( - incident: dict, articles: list, fact_checks: list, - snapshots: list, scope: str, creator: str -) -> dict: - """Strukturiertes JSON fuer Export.""" - now = datetime.now(TIMEZONE).strftime("%Y-%m-%dT%H:%M:%SZ") - - sources = [] - sources_json = incident.get("sources_json") - if sources_json: - try: - sources = json.loads(sources_json) if isinstance(sources_json, str) else sources_json - except (json.JSONDecodeError, TypeError): - pass - - export = { - "export_version": "1.0", - "exported_at": now, - "scope": scope, - "incident": { - "id": incident["id"], - "title": incident["title"], - "description": incident.get("description"), - "type": incident.get("type"), - "status": incident.get("status"), - "visibility": incident.get("visibility"), - "created_by": creator, - "created_at": incident.get("created_at"), - "updated_at": incident.get("updated_at"), - "summary": incident.get("summary"), - "international_sources": bool(incident.get("international_sources")), - "include_telegram": bool(incident.get("include_telegram")), - }, - "sources": sources, - "fact_checks": [ - { - "claim": fc.get("claim"), - "status": fc.get("status"), - "sources_count": fc.get("sources_count"), - "evidence": fc.get("evidence"), - "checked_at": fc.get("checked_at"), - } - for fc in fact_checks - ], - } - - if scope == "full": - export["articles"] = [ - { - "headline": art.get("headline"), - "headline_de": art.get("headline_de"), - "source": art.get("source"), - "source_url": art.get("source_url"), - "language": art.get("language"), - "published_at": art.get("published_at"), - "collected_at": art.get("collected_at"), - "verification_status": art.get("verification_status"), - } - for art in articles - ] - export["snapshots"] = [ - { - "created_at": snap.get("created_at"), - "article_count": snap.get("article_count"), - "fact_check_count": snap.get("fact_check_count"), - "summary": snap.get("summary"), - } - for snap in snapshots - ] - - return export - - @router.get("/{incident_id}/export") async def export_incident( incident_id: int, - format: str = Query(..., pattern="^(md|json)$"), - scope: str = Query("report", pattern="^(report|full)$"), + format: str = Query("pdf", pattern="^(pdf|docx)$"), + scope: str = Query("report", pattern="^(summary|report|full)$"), + classification: str = Query("offen", pattern="^(offen|dienstgebrauch|vertraulich)$"), current_user: dict = Depends(get_current_user), db: aiosqlite.Connection = Depends(db_dependency), ): - """Lage als Markdown oder JSON exportieren.""" + """Lage als PDF oder Word exportieren.""" + from report_generator import generate_pdf, generate_docx, generate_executive_summary + tenant_id = current_user.get("tenant_id") row = await _check_incident_access(db, incident_id, current_user["id"], tenant_id) incident = dict(row) @@ -837,23 +674,35 @@ async def export_incident( ) snapshots = [dict(r) for r in await cursor.fetchall()] - # Dateiname + # Executive Summary (KI-generiert, gecacht) + exec_summary = incident.get("executive_summary") + if not exec_summary: + summary_text = incident.get("summary") or "" + exec_summary = await generate_executive_summary(summary_text) + await db.execute( + "UPDATE incidents SET executive_summary = ? WHERE id = ?", + (exec_summary, incident_id), + ) + await db.commit() + date_str = datetime.now(TIMEZONE).strftime("%Y%m%d") slug = _slugify(incident["title"]) - scope_suffix = "_vollexport" if scope == "full" else "" + scope_labels = {"summary": "executive_summary", "report": "lagebericht", "full": "vollstaendig"} - if format == "md": - body = _build_markdown_export(incident, articles, fact_checks, snapshots, scope, creator) - filename = f"{slug}{scope_suffix}_{date_str}.md" - media_type = "text/markdown; charset=utf-8" + if format == "pdf": + pdf_bytes = await generate_pdf(incident, articles, fact_checks, snapshots, scope, classification, creator, exec_summary) + filename = f"{slug}_{scope_labels[scope]}_{date_str}.pdf" + return StreamingResponse( + io.BytesIO(pdf_bytes), + media_type="application/pdf", + headers={"Content-Disposition": f'attachment; filename="{filename}"'}, + ) else: - export_data = _build_json_export(incident, articles, fact_checks, snapshots, scope, creator) - body = json.dumps(export_data, ensure_ascii=False, indent=2) - filename = f"{slug}{scope_suffix}_{date_str}.json" - media_type = "application/json; charset=utf-8" + docx_bytes = await generate_docx(incident, articles, fact_checks, snapshots, scope, classification, creator, exec_summary) + filename = f"{slug}_{scope_labels[scope]}_{date_str}.docx" + return StreamingResponse( + io.BytesIO(docx_bytes), + media_type="application/vnd.openxmlformats-officedocument.wordprocessingml.document", + headers={"Content-Disposition": f'attachment; filename="{filename}"'}, + ) - return StreamingResponse( - iter([body]), - media_type=media_type, - headers={"Content-Disposition": f'attachment; filename="{filename}"'}, - ) diff --git a/src/static/dashboard.html b/src/static/dashboard.html index ddb03a1..d3b85fe 100644 --- a/src/static/dashboard.html +++ b/src/static/dashboard.html @@ -15,6 +15,15 @@ + @@ -140,18 +149,7 @@
    -
    - - -
    +
    @@ -661,25 +659,39 @@
    - -