Geoparsing von spaCy auf Haiku umgestellt
- geoparsing.py: Komplett-Rewrite (spaCy NER + Nominatim -> Haiku + geonamescache) - orchestrator.py: incident_context an geoparse_articles, category in INSERT - incidents.py: incident_context aus DB laden und an Geoparsing uebergeben - public_api.py: Locations aggregiert im Lagebild-Endpoint - components.js: response-Kategorie neben retaliation (beide akzeptiert) - requirements.txt: spaCy und geopy entfernt Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
Dieser Commit ist enthalten in:
@@ -9,6 +9,4 @@ apscheduler==3.10.4
|
||||
websockets
|
||||
python-multipart
|
||||
aiosmtplib
|
||||
spacy>=3.7,<4.0
|
||||
geonamescache>=2.0
|
||||
geopy>=2.4
|
||||
|
||||
@@ -1,162 +1,15 @@
|
||||
"""Geoparsing-Modul: NER-basierte Ortsextraktion und Geocoding fuer Artikel."""
|
||||
"""Geoparsing-Modul: Haiku-basierte Ortsextraktion und Geocoding fuer Artikel."""
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
from difflib import SequenceMatcher
|
||||
from typing import Optional
|
||||
|
||||
from agents.claude_client import call_claude, ClaudeUsage, UsageAccumulator
|
||||
from config import CLAUDE_MODEL_FAST
|
||||
|
||||
logger = logging.getLogger("osint.geoparsing")
|
||||
|
||||
# Lazy-loaded spaCy-Modelle (erst beim ersten Aufruf geladen)
|
||||
_nlp_de = None
|
||||
_nlp_en = None
|
||||
|
||||
# Stopwords: Entitaeten die von spaCy faelschlicherweise als Orte erkannt werden
|
||||
LOCATION_STOPWORDS = {
|
||||
"EU", "UN", "NATO", "WHO", "OSZE", "OPEC", "G7", "G20", "BRICS",
|
||||
"Nato", "Eu", "Un", "Onu",
|
||||
"Bundesregierung", "Bundestag", "Bundesrat", "Bundeskanzler",
|
||||
"Kreml", "Weisses Haus", "White House", "Pentagon", "Elysee",
|
||||
"Twitter", "Facebook", "Telegram", "Signal", "WhatsApp",
|
||||
"Reuters", "AP", "AFP", "DPA", "dpa",
|
||||
"Internet", "Online", "Web",
|
||||
# Regionale/vage Begriffe (kein einzelner Punkt auf der Karte)
|
||||
"Naher Osten", "Mittlerer Osten", "Middle East", "Near East",
|
||||
"Golf-Staaten", "Golfstaaten", "Golfregion", "Gulf States", "Persian Gulf",
|
||||
"Nordafrika", "Subsahara", "Zentralasien", "Suedostasien",
|
||||
"Westeuropa", "Osteuropa", "Suedeuropa", "Nordeuropa",
|
||||
"Balkan", "Kaukasus", "Levante", "Maghreb", "Sahel",
|
||||
"Arabische Welt", "Arab World",
|
||||
}
|
||||
|
||||
# Maximale Textlaenge fuer NER-Verarbeitung
|
||||
MAX_TEXT_LENGTH = 10000
|
||||
|
||||
|
||||
# Marker-Kategorien fuer Karten-Klassifizierung
|
||||
CATEGORY_KEYWORDS = {
|
||||
"target": [
|
||||
"angriff", "angegriff", "bombardier", "luftschlag", "luftangriff",
|
||||
"beschuss", "beschossen", "getroffen", "zerstoer", "einschlag",
|
||||
"detonation", "explosion", "strike", "attack", "bombed", "hit",
|
||||
"shelled", "destroyed", "targeted", "missile hit", "air strike",
|
||||
"airstrike", "bombardment", "killed", "casualties", "dead",
|
||||
"tote", "opfer", "getoetet",
|
||||
],
|
||||
"retaliation": [
|
||||
"gegenschlag", "vergeltung", "reaktion", "gegenangriff",
|
||||
"abgefeuert", "retaliat", "counter-attack", "counterattack",
|
||||
"counter-strike", "response", "fired back", "launched",
|
||||
"rakete abgefeuert", "vergeltungsschlag", "abfangen",
|
||||
"abgefangen", "intercepted", "eskalation", "escalat",
|
||||
],
|
||||
"actor": [
|
||||
"regierung", "praesident", "ministerium", "hauptquartier",
|
||||
"kommando", "nato", "pentagon", "kongress", "senat", "parlament",
|
||||
"government", "president", "ministry", "headquarters", "command",
|
||||
"congress", "senate", "parliament", "white house", "weisses haus",
|
||||
"verteidigungsminister", "aussenminister", "generalstab",
|
||||
"defense secretary", "secretary of state", "general staff",
|
||||
"un-sicherheitsrat", "security council", "summit", "gipfel",
|
||||
"diplomati", "botschaft", "embassy",
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
def _classify_location(source_text: str, article_text: str = "") -> str:
|
||||
"""Klassifiziert eine Location basierend auf dem Kontext.
|
||||
|
||||
Returns:
|
||||
Kategorie: 'target', 'retaliation', 'actor', oder 'mentioned'
|
||||
"""
|
||||
text = (source_text + " " + article_text[:500]).lower()
|
||||
|
||||
scores = {"target": 0, "retaliation": 0, "actor": 0}
|
||||
for category, keywords in CATEGORY_KEYWORDS.items():
|
||||
for kw in keywords:
|
||||
if kw in text:
|
||||
scores[category] += 1
|
||||
|
||||
best = max(scores, key=scores.get)
|
||||
if scores[best] >= 1:
|
||||
return best
|
||||
return "mentioned"
|
||||
|
||||
|
||||
|
||||
def _load_spacy_model(lang: str):
|
||||
"""Laedt ein spaCy-Modell lazy (nur beim ersten Aufruf)."""
|
||||
global _nlp_de, _nlp_en
|
||||
try:
|
||||
import spacy
|
||||
except ImportError:
|
||||
logger.error("spaCy nicht installiert - pip install spacy")
|
||||
return None
|
||||
|
||||
if lang == "de" and _nlp_de is None:
|
||||
try:
|
||||
_nlp_de = spacy.load("de_core_news_sm", disable=["parser", "lemmatizer", "textcat"])
|
||||
logger.info("spaCy-Modell de_core_news_sm geladen")
|
||||
except OSError:
|
||||
logger.warning("spaCy-Modell de_core_news_sm nicht gefunden - python -m spacy download de_core_news_sm")
|
||||
return None
|
||||
elif lang == "en" and _nlp_en is None:
|
||||
try:
|
||||
_nlp_en = spacy.load("en_core_web_sm", disable=["parser", "lemmatizer", "textcat"])
|
||||
logger.info("spaCy-Modell en_core_web_sm geladen")
|
||||
except OSError:
|
||||
logger.warning("spaCy-Modell en_core_web_sm nicht gefunden - python -m spacy download en_core_web_sm")
|
||||
return None
|
||||
|
||||
return _nlp_de if lang == "de" else _nlp_en
|
||||
|
||||
|
||||
def _extract_locations_from_text(text: str, language: str = "de") -> list[dict]:
|
||||
"""Extrahiert Ortsnamen aus Text via spaCy NER.
|
||||
|
||||
Returns:
|
||||
Liste von dicts: [{name: str, source_text: str}]
|
||||
"""
|
||||
if not text:
|
||||
return []
|
||||
|
||||
text = text[:MAX_TEXT_LENGTH]
|
||||
|
||||
nlp = _load_spacy_model(language)
|
||||
if nlp is None:
|
||||
# Fallback: anderes Modell versuchen
|
||||
fallback = "en" if language == "de" else "de"
|
||||
nlp = _load_spacy_model(fallback)
|
||||
if nlp is None:
|
||||
return []
|
||||
|
||||
doc = nlp(text)
|
||||
|
||||
locations = []
|
||||
seen = set()
|
||||
for ent in doc.ents:
|
||||
if ent.label_ in ("LOC", "GPE"):
|
||||
name = ent.text.strip()
|
||||
# Filter: zu kurz, Stopword, oder nur Zahlen/Sonderzeichen
|
||||
if len(name) < 2:
|
||||
continue
|
||||
if name in LOCATION_STOPWORDS:
|
||||
continue
|
||||
if re.match(r'^[\d\W]+$', name):
|
||||
continue
|
||||
|
||||
name_lower = name.lower()
|
||||
if name_lower not in seen:
|
||||
seen.add(name_lower)
|
||||
# Kontext: 50 Zeichen um die Entitaet herum
|
||||
start = max(0, ent.start_char - 25)
|
||||
end = min(len(text), ent.end_char + 25)
|
||||
source_text = text[start:end].strip()
|
||||
locations.append({"name": name, "source_text": source_text})
|
||||
|
||||
return locations
|
||||
|
||||
|
||||
# Geocoding-Cache (in-memory, lebt solange der Prozess laeuft)
|
||||
_geocode_cache: dict[str, Optional[dict]] = {}
|
||||
|
||||
@@ -178,50 +31,38 @@ def _get_geonamescache():
|
||||
return _gc
|
||||
|
||||
|
||||
def _geocode_location(name: str) -> Optional[dict]:
|
||||
"""Geocoded einen Ortsnamen. Offline via geonamescache, Fallback Nominatim.
|
||||
def _geocode_offline(name: str, country_code: str = "") -> Optional[dict]:
|
||||
"""Geocoding ueber geonamescache (offline).
|
||||
|
||||
Returns:
|
||||
dict mit {lat, lon, country_code, normalized_name, confidence} oder None
|
||||
Args:
|
||||
name: Ortsname (normalisiert von Haiku)
|
||||
country_code: ISO-2 Laendercode (von Haiku) fuer bessere Disambiguierung
|
||||
"""
|
||||
name_lower = name.lower().strip()
|
||||
if name_lower in _geocode_cache:
|
||||
return _geocode_cache[name_lower]
|
||||
|
||||
result = _geocode_offline(name)
|
||||
if result is None:
|
||||
result = _geocode_nominatim(name)
|
||||
|
||||
_geocode_cache[name_lower] = result
|
||||
return result
|
||||
|
||||
|
||||
def _geocode_offline(name: str) -> Optional[dict]:
|
||||
"""Versucht Geocoding ueber geonamescache (offline)."""
|
||||
gc = _get_geonamescache()
|
||||
if gc is None:
|
||||
return None
|
||||
|
||||
name_lower = name.lower().strip()
|
||||
|
||||
# 1. Direkte Suche in Staedten
|
||||
# 1. Stadtsuche
|
||||
cities = gc.get_cities()
|
||||
matches = []
|
||||
for gid, city in cities.items():
|
||||
city_name = city.get("name", "")
|
||||
alt_names = city.get("alternatenames", "")
|
||||
# alternatenames kann String (komma-getrennt) oder Liste sein
|
||||
if isinstance(alt_names, list):
|
||||
alt_list = [n.strip().lower() for n in alt_names if n.strip()]
|
||||
else:
|
||||
alt_list = [n.strip().lower() for n in str(alt_names).split(",") if n.strip()]
|
||||
if city_name.lower() == name_lower:
|
||||
matches.append(city)
|
||||
elif name_lower in alt_list:
|
||||
if city_name.lower() == name_lower or name_lower in alt_list:
|
||||
matches.append(city)
|
||||
|
||||
if matches:
|
||||
# Disambiguierung: groesste Stadt gewinnt
|
||||
# Disambiguierung: country_code bevorzugen, dann Population
|
||||
if country_code:
|
||||
cc_matches = [c for c in matches if c.get("countrycode", "").upper() == country_code.upper()]
|
||||
if cc_matches:
|
||||
matches = cc_matches
|
||||
best = max(matches, key=lambda c: c.get("population", 0))
|
||||
return {
|
||||
"lat": float(best["latitude"]),
|
||||
@@ -235,115 +76,231 @@ def _geocode_offline(name: str) -> Optional[dict]:
|
||||
countries = gc.get_countries()
|
||||
for code, country in countries.items():
|
||||
if country.get("name", "").lower() == name_lower:
|
||||
# Hauptstadt-Koordinaten als Fallback
|
||||
capital = country.get("capital", "")
|
||||
if capital:
|
||||
cap_result = _geocode_offline(capital)
|
||||
if cap_result:
|
||||
cap_result["normalized_name"] = country["name"]
|
||||
cap_result["confidence"] = 0.5 # Land, nicht Stadt
|
||||
cap_result["confidence"] = 0.5
|
||||
return cap_result
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def _geocode_nominatim(name: str) -> Optional[dict]:
|
||||
"""Fallback-Geocoding ueber Nominatim (1 Request/Sekunde)."""
|
||||
try:
|
||||
from geopy.geocoders import Nominatim
|
||||
from geopy.exc import GeocoderTimedOut, GeocoderServiceError
|
||||
except ImportError:
|
||||
return None
|
||||
|
||||
try:
|
||||
geocoder = Nominatim(user_agent="aegissight-monitor/1.0", timeout=5)
|
||||
location = geocoder.geocode(name, language="de", exactly_one=True)
|
||||
if location:
|
||||
# Country-Code aus Address extrahieren falls verfuegbar
|
||||
raw = location.raw or {}
|
||||
country_code = ""
|
||||
if "address" in raw:
|
||||
country_code = raw["address"].get("country_code", "").upper()
|
||||
|
||||
normalized_name = location.address.split(",")[0] if location.address else name
|
||||
|
||||
# Plausibilitaetspruefung: Nominatim-Ergebnis muss zum Suchbegriff passen
|
||||
similarity = SequenceMatcher(None, name.lower(), normalized_name.lower()).ratio()
|
||||
if similarity < 0.3:
|
||||
logger.debug(f"Nominatim-Ergebnis verworfen: '{name}' -> '{normalized_name}' (Aehnlichkeit {similarity:.2f})")
|
||||
return None
|
||||
|
||||
return {
|
||||
"lat": float(location.latitude),
|
||||
"lon": float(location.longitude),
|
||||
"country_code": country_code,
|
||||
"normalized_name": normalized_name,
|
||||
"confidence": 0.4, # Nominatim-Ergebnis = niedrigere Konfidenz
|
||||
}
|
||||
except (GeocoderTimedOut, GeocoderServiceError) as e:
|
||||
logger.debug(f"Nominatim-Fehler fuer '{name}': {e}")
|
||||
except Exception as e:
|
||||
logger.debug(f"Geocoding-Fehler fuer '{name}': {e}")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
async def geoparse_articles(articles: list[dict]) -> dict[int, list[dict]]:
|
||||
"""Geoparsing fuer eine Liste von Artikeln.
|
||||
def _geocode_location(name: str, country_code: str = "", haiku_coords: Optional[dict] = None) -> Optional[dict]:
|
||||
"""Geocoded einen Ortsnamen. Prioritaet: geonamescache > Haiku-Koordinaten.
|
||||
|
||||
Args:
|
||||
articles: Liste von Artikel-Dicts (mit id, content_de, content_original, language, headline, headline_de)
|
||||
name: Ortsname
|
||||
country_code: ISO-2 Code (von Haiku)
|
||||
haiku_coords: {"lat": float, "lon": float} (Fallback von Haiku)
|
||||
"""
|
||||
cache_key = f"{name.lower().strip()}|{country_code.upper()}"
|
||||
if cache_key in _geocode_cache:
|
||||
return _geocode_cache[cache_key]
|
||||
|
||||
result = _geocode_offline(name, country_code)
|
||||
|
||||
# Fallback: Haiku-Koordinaten nutzen
|
||||
if result is None and haiku_coords:
|
||||
lat = haiku_coords.get("lat")
|
||||
lon = haiku_coords.get("lon")
|
||||
if lat is not None and lon is not None:
|
||||
result = {
|
||||
"lat": float(lat),
|
||||
"lon": float(lon),
|
||||
"country_code": country_code.upper() if country_code else "",
|
||||
"normalized_name": name,
|
||||
"confidence": 0.45,
|
||||
}
|
||||
|
||||
_geocode_cache[cache_key] = result
|
||||
return result
|
||||
|
||||
|
||||
HAIKU_GEOPARSE_PROMPT = """Extrahiere alle geographischen Orte aus diesen Nachrichten-Headlines.
|
||||
|
||||
Kontext der Lage: "{incident_context}"
|
||||
|
||||
Regeln:
|
||||
- Nur echte Orte (Staedte, Laender, Regionen)
|
||||
- Keine Personen, Organisationen, Gebaeude, Alltagswoerter
|
||||
- Bei "US-Militaer" etc: Land (USA) extrahieren, nicht das Kompositum
|
||||
- HTML-Tags ignorieren
|
||||
- Jeder Ort nur einmal pro Headline
|
||||
- Regionen wie "Middle East", "Gulf", "Naher Osten" NICHT extrahieren (kein einzelner Punkt auf der Karte)
|
||||
|
||||
Klassifiziere basierend auf dem Lage-Kontext:
|
||||
- "target": Wo das Ereignis passiert / Schaden entsteht
|
||||
- "response": Wo Reaktionen / Gegenmassnahmen stattfinden
|
||||
- "actor": Wo Entscheidungen getroffen werden / Entscheider sitzen
|
||||
- "mentioned": Nur erwaehnt, kein direkter Bezug
|
||||
|
||||
Headlines:
|
||||
{headlines}
|
||||
|
||||
Antwort NUR als JSON-Array, kein anderer Text:
|
||||
[{{"headline_idx": 0, "locations": [
|
||||
{{"name": "Teheran", "normalized": "Tehran", "country_code": "IR",
|
||||
"type": "city", "category": "target",
|
||||
"lat": 35.69, "lon": 51.42}}
|
||||
]}}]"""
|
||||
|
||||
|
||||
async def _extract_locations_haiku(
|
||||
headlines: list[dict], incident_context: str
|
||||
) -> dict[int, list[dict]]:
|
||||
"""Extrahiert Orte aus Headlines via Haiku.
|
||||
|
||||
Args:
|
||||
headlines: [{"idx": article_id, "text": headline_text}, ...]
|
||||
incident_context: Lage-Kontext fuer Klassifizierung
|
||||
|
||||
Returns:
|
||||
dict[article_id -> list[{location_name, location_name_normalized, country_code, lat, lon, confidence, source_text}]]
|
||||
dict[article_id -> list[{name, normalized, country_code, type, category, lat, lon}]]
|
||||
"""
|
||||
if not headlines:
|
||||
return {}
|
||||
|
||||
# Headlines formatieren
|
||||
headline_lines = []
|
||||
for i, h in enumerate(headlines):
|
||||
headline_lines.append(f"[{i}] {h['text']}")
|
||||
|
||||
prompt = HAIKU_GEOPARSE_PROMPT.format(
|
||||
incident_context=incident_context or "Allgemeine Nachrichtenlage",
|
||||
headlines="\n".join(headline_lines),
|
||||
)
|
||||
|
||||
try:
|
||||
result_text, usage = await call_claude(prompt, tools=None, model=CLAUDE_MODEL_FAST)
|
||||
except Exception as e:
|
||||
logger.error(f"Haiku-Geoparsing fehlgeschlagen: {e}")
|
||||
return {}
|
||||
|
||||
# JSON parsen (mit Regex-Fallback)
|
||||
parsed = None
|
||||
try:
|
||||
parsed = json.loads(result_text)
|
||||
except json.JSONDecodeError:
|
||||
match = re.search(r'\[.*\]', result_text, re.DOTALL)
|
||||
if match:
|
||||
try:
|
||||
parsed = json.loads(match.group())
|
||||
except json.JSONDecodeError:
|
||||
logger.warning("Haiku-Geoparsing: JSON-Parse fehlgeschlagen auch mit Regex-Fallback")
|
||||
return {}
|
||||
|
||||
if not parsed or not isinstance(parsed, list):
|
||||
logger.warning("Haiku-Geoparsing: Kein gueltiges JSON-Array erhalten")
|
||||
return {}
|
||||
|
||||
# Ergebnisse den Artikeln zuordnen
|
||||
results = {}
|
||||
for entry in parsed:
|
||||
if not isinstance(entry, dict):
|
||||
continue
|
||||
headline_idx = entry.get("headline_idx")
|
||||
if headline_idx is None or headline_idx >= len(headlines):
|
||||
continue
|
||||
|
||||
article_id = headlines[headline_idx]["idx"]
|
||||
locations = entry.get("locations", [])
|
||||
|
||||
if not locations:
|
||||
continue
|
||||
|
||||
article_locs = []
|
||||
for loc in locations:
|
||||
if not isinstance(loc, dict):
|
||||
continue
|
||||
loc_type = loc.get("type", "city")
|
||||
# Regionen nicht speichern (kein sinnvoller Punkt auf der Karte)
|
||||
if loc_type == "region":
|
||||
continue
|
||||
|
||||
name = loc.get("name", "")
|
||||
if not name:
|
||||
continue
|
||||
|
||||
article_locs.append({
|
||||
"name": name,
|
||||
"normalized": loc.get("normalized", name),
|
||||
"country_code": loc.get("country_code", ""),
|
||||
"type": loc_type,
|
||||
"category": loc.get("category", "mentioned"),
|
||||
"lat": loc.get("lat"),
|
||||
"lon": loc.get("lon"),
|
||||
})
|
||||
|
||||
if article_locs:
|
||||
results[article_id] = article_locs
|
||||
|
||||
return results
|
||||
|
||||
|
||||
async def geoparse_articles(
|
||||
articles: list[dict],
|
||||
incident_context: str = "",
|
||||
) -> dict[int, list[dict]]:
|
||||
"""Geoparsing fuer eine Liste von Artikeln via Haiku + geonamescache.
|
||||
|
||||
Args:
|
||||
articles: Liste von Artikel-Dicts (mit id, headline, headline_de, language)
|
||||
incident_context: Lage-Kontext (Titel + Beschreibung) fuer kontextbewusste Klassifizierung
|
||||
|
||||
Returns:
|
||||
dict[article_id -> list[{location_name, location_name_normalized, country_code,
|
||||
lat, lon, confidence, source_text, category}]]
|
||||
"""
|
||||
if not articles:
|
||||
return {}
|
||||
|
||||
result = {}
|
||||
|
||||
# Headlines sammeln
|
||||
headlines = []
|
||||
for article in articles:
|
||||
article_id = article.get("id")
|
||||
if not article_id:
|
||||
continue
|
||||
|
||||
language = article.get("language", "de")
|
||||
|
||||
# Text zusammenbauen: Headline + Content
|
||||
text_parts = []
|
||||
if language == "de":
|
||||
if article.get("headline_de"):
|
||||
text_parts.append(article["headline_de"])
|
||||
elif article.get("headline"):
|
||||
text_parts.append(article["headline"])
|
||||
if article.get("content_de"):
|
||||
text_parts.append(article["content_de"])
|
||||
elif article.get("content_original"):
|
||||
text_parts.append(article["content_original"])
|
||||
else:
|
||||
if article.get("headline"):
|
||||
text_parts.append(article["headline"])
|
||||
if article.get("content_original"):
|
||||
text_parts.append(article["content_original"])
|
||||
|
||||
text = "\n".join(text_parts)
|
||||
if not text.strip():
|
||||
# Deutsche Headline bevorzugen
|
||||
headline = article.get("headline_de") or article.get("headline") or ""
|
||||
headline = headline.strip()
|
||||
if not headline:
|
||||
continue
|
||||
|
||||
# NER-Extraktion (CPU-bound, in Thread ausfuehren)
|
||||
locations_raw = await asyncio.to_thread(
|
||||
_extract_locations_from_text, text, language
|
||||
headlines.append({"idx": article_id, "text": headline})
|
||||
|
||||
if not headlines:
|
||||
return {}
|
||||
|
||||
# Batches bilden (max 50 Headlines pro Haiku-Call)
|
||||
batch_size = 50
|
||||
all_haiku_results = {}
|
||||
for i in range(0, len(headlines), batch_size):
|
||||
batch = headlines[i:i + batch_size]
|
||||
batch_results = await _extract_locations_haiku(batch, incident_context)
|
||||
all_haiku_results.update(batch_results)
|
||||
|
||||
if not all_haiku_results:
|
||||
return {}
|
||||
|
||||
# Geocoding via geonamescache (mit Haiku-Koordinaten als Fallback)
|
||||
result = {}
|
||||
for article_id, haiku_locs in all_haiku_results.items():
|
||||
locations = []
|
||||
for loc in haiku_locs:
|
||||
haiku_coords = None
|
||||
if loc.get("lat") is not None and loc.get("lon") is not None:
|
||||
haiku_coords = {"lat": loc["lat"], "lon": loc["lon"]}
|
||||
|
||||
geo = _geocode_location(
|
||||
loc["normalized"],
|
||||
loc.get("country_code", ""),
|
||||
haiku_coords,
|
||||
)
|
||||
|
||||
if not locations_raw:
|
||||
continue
|
||||
|
||||
# Geocoding (enthaelt potentiell Netzwerk-Calls)
|
||||
locations = []
|
||||
for loc in locations_raw:
|
||||
geo = await asyncio.to_thread(_geocode_location, loc["name"])
|
||||
if geo:
|
||||
category = _classify_location(loc.get("source_text", ""), text)
|
||||
locations.append({
|
||||
"location_name": loc["name"],
|
||||
"location_name_normalized": geo["normalized_name"],
|
||||
@@ -351,8 +308,8 @@ async def geoparse_articles(articles: list[dict]) -> dict[int, list[dict]]:
|
||||
"lat": geo["lat"],
|
||||
"lon": geo["lon"],
|
||||
"confidence": geo["confidence"],
|
||||
"source_text": loc.get("source_text", ""),
|
||||
"category": category,
|
||||
"source_text": loc["name"],
|
||||
"category": loc.get("category", "mentioned"),
|
||||
})
|
||||
|
||||
if locations:
|
||||
|
||||
@@ -714,19 +714,20 @@ class AgentOrchestrator:
|
||||
if new_articles_for_analysis:
|
||||
try:
|
||||
from agents.geoparsing import geoparse_articles
|
||||
incident_context = f"{title} - {description}"
|
||||
logger.info(f"Geoparsing fuer {len(new_articles_for_analysis)} neue Artikel...")
|
||||
geo_results = await geoparse_articles(new_articles_for_analysis)
|
||||
geo_results = await geoparse_articles(new_articles_for_analysis, incident_context)
|
||||
geo_count = 0
|
||||
for art_id, locations in geo_results.items():
|
||||
for loc in locations:
|
||||
await db.execute(
|
||||
"""INSERT INTO article_locations
|
||||
(article_id, incident_id, location_name, location_name_normalized,
|
||||
country_code, latitude, longitude, confidence, source_text, tenant_id)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""",
|
||||
country_code, latitude, longitude, confidence, source_text, tenant_id, category)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""",
|
||||
(art_id, incident_id, loc["location_name"], loc["location_name_normalized"],
|
||||
loc["country_code"], loc["lat"], loc["lon"], loc["confidence"],
|
||||
loc.get("source_text", ""), tenant_id),
|
||||
loc.get("source_text", ""), tenant_id, loc.get("category", "mentioned")),
|
||||
)
|
||||
geo_count += 1
|
||||
if geo_count > 0:
|
||||
|
||||
@@ -351,6 +351,15 @@ async def _run_geoparse_background(incident_id: int, tenant_id: int | None):
|
||||
from agents.geoparsing import geoparse_articles
|
||||
db = await get_db()
|
||||
|
||||
# Incident-Kontext fuer Haiku laden
|
||||
cursor = await db.execute(
|
||||
"SELECT title, description FROM incidents WHERE id = ?", (incident_id,)
|
||||
)
|
||||
inc_row = await cursor.fetchone()
|
||||
incident_context = ""
|
||||
if inc_row:
|
||||
incident_context = f"{inc_row['title']} - {inc_row['description'] or ''}"
|
||||
|
||||
cursor = await db.execute(
|
||||
"""SELECT a.* FROM articles a
|
||||
WHERE a.incident_id = ?
|
||||
@@ -373,7 +382,7 @@ async def _run_geoparse_background(incident_id: int, tenant_id: int | None):
|
||||
processed = 0
|
||||
for i in range(0, total, batch_size):
|
||||
batch = articles[i:i + batch_size]
|
||||
geo_results = await geoparse_articles(batch)
|
||||
geo_results = await geoparse_articles(batch, incident_context)
|
||||
for art_id, locations in geo_results.items():
|
||||
for loc in locations:
|
||||
await db.execute(
|
||||
|
||||
@@ -108,6 +108,23 @@ async def get_lagebild(db=Depends(db_dependency)):
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
sources_json = []
|
||||
|
||||
# Locations aggregiert nach normalisierten Ortsnamen
|
||||
cursor = await db.execute(
|
||||
f"""SELECT
|
||||
al.location_name_normalized as name,
|
||||
al.latitude as lat,
|
||||
al.longitude as lon,
|
||||
al.country_code,
|
||||
al.category,
|
||||
COUNT(*) as article_count,
|
||||
MAX(al.confidence) as confidence
|
||||
FROM article_locations al
|
||||
WHERE al.incident_id IN ({ids})
|
||||
GROUP BY al.location_name_normalized
|
||||
ORDER BY article_count DESC"""
|
||||
)
|
||||
locations = [dict(r) for r in await cursor.fetchall()]
|
||||
|
||||
return {
|
||||
"generated_at": datetime.now(TIMEZONE).isoformat(),
|
||||
"incident": {
|
||||
@@ -130,6 +147,7 @@ async def get_lagebild(db=Depends(db_dependency)):
|
||||
"articles": articles,
|
||||
"fact_checks": fact_checks,
|
||||
"available_snapshots": available_snapshots,
|
||||
"locations": locations,
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -642,6 +642,7 @@ const UI = {
|
||||
this._markerIcons = {
|
||||
target: this._createSvgIcon('#dc3545', '#a71d2a'),
|
||||
retaliation: this._createSvgIcon('#f39c12', '#c47d0a'),
|
||||
response: this._createSvgIcon('#f39c12', '#c47d0a'),
|
||||
actor: this._createSvgIcon('#2a81cb', '#1a5c8f'),
|
||||
mentioned: this._createSvgIcon('#7b7b7b', '#555555'),
|
||||
};
|
||||
@@ -650,12 +651,14 @@ const UI = {
|
||||
_categoryLabels: {
|
||||
target: 'Angegriffene Ziele',
|
||||
retaliation: 'Vergeltung / Eskalation',
|
||||
response: 'Reaktion / Gegenmassnahmen',
|
||||
actor: 'Strategische Akteure',
|
||||
mentioned: 'Erwaehnt',
|
||||
},
|
||||
_categoryColors: {
|
||||
target: '#cb2b3e',
|
||||
retaliation: '#f39c12',
|
||||
response: '#f39c12',
|
||||
actor: '#2a81cb',
|
||||
mentioned: '#7b7b7b',
|
||||
},
|
||||
@@ -799,7 +802,7 @@ const UI = {
|
||||
legend.onAdd = function() {
|
||||
const div = L.DomUtil.create('div', 'map-legend-ctrl');
|
||||
let html = '<strong style="display:block;margin-bottom:6px;">Legende</strong>';
|
||||
['target', 'retaliation', 'actor', 'mentioned'].forEach(cat => {
|
||||
['target', 'retaliation', 'response', 'actor', 'mentioned'].forEach(cat => {
|
||||
if (usedCategories.has(cat)) {
|
||||
html += `<div style="display:flex;align-items:center;gap:6px;margin:3px 0;"><span style="width:10px;height:10px;border-radius:50%;background:${self2._categoryColors[cat]};flex-shrink:0;"></span><span>${self2._categoryLabels[cat]}</span></div>`;
|
||||
}
|
||||
|
||||
In neuem Issue referenzieren
Einen Benutzer sperren