Quellenvielfalt sicherstellen: Domain-Cap + Balance + Discovery-Verbesserungen

- config.py: MAX_FEEDS_PER_DOMAIN=3, MAX_ARTICLES_PER_DOMAIN_RSS=10
- rss_parser.py: _apply_domain_cap() begrenzt Artikel pro Domain nach RSS-Fetch
- orchestrator.py: Domain-Balance vor Feed-Selektion (max 3 Feeds/Domain),
  Domain-Cap in Background-Discovery
- source_rules.py: article_count in get_feeds_with_metadata(), Content-Hash
  in _validate_feed() für Duplikat-Erkennung bei Discovery
- researcher.py: QUELLENVIELFALT-Regel im Haiku Feed-Selektions-Prompt
- DB: 52 WordPress-Redirect-Duplikate deaktiviert (netzpolitik.org, bashinho.de)

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
Dieser Commit ist enthalten in:
claude-dev
2026-03-04 23:25:04 +01:00
Ursprung 0920d218f5
Commit ff4c54d9a8
5 geänderte Dateien mit 103 neuen und 8 gelöschten Zeilen

Datei anzeigen

@@ -4,8 +4,9 @@ import json
import logging
import re
from datetime import datetime, timezone
from config import TIMEZONE
from config import TIMEZONE, MAX_FEEDS_PER_DOMAIN
from typing import Optional
from collections import defaultdict
from urllib.parse import urlparse, urlunparse
from agents.claude_client import UsageAccumulator
@@ -162,6 +163,14 @@ async def _background_discover_sources(articles: list[dict]):
# 3. Gegen DB prüfen — welche Domains existieren schon?
new_count = 0
for domain, url, category in domains_to_check:
cursor = await db.execute(
"SELECT id FROM sources WHERE LOWER(domain) = ? AND source_type = 'rss_feed' AND status = 'active'",
(domain.lower(),),
)
existing_feeds = await cursor.fetchall()
if len(existing_feeds) >= MAX_FEEDS_PER_DOMAIN:
continue # Domain hat bereits genug aktive Feeds
cursor = await db.execute(
"SELECT id FROM sources WHERE LOWER(domain) = ?",
(domain.lower(),),
@@ -578,6 +587,28 @@ class AgentOrchestrator:
from source_rules import get_feeds_with_metadata
all_feeds = await get_feeds_with_metadata(tenant_id=tenant_id)
# Domain-Balance: Max. MAX_FEEDS_PER_DOMAIN Feeds pro Domain
feeds_by_domain: dict[str, list[dict]] = defaultdict(list)
for feed in all_feeds:
feeds_by_domain[feed.get("domain", "")].append(feed)
balanced_feeds = []
for domain, domain_feeds in feeds_by_domain.items():
if len(domain_feeds) > MAX_FEEDS_PER_DOMAIN:
# Nach article_count sortieren, meistgenutzte behalten
domain_feeds.sort(key=lambda f: f.get("article_count", 0), reverse=True)
kept = domain_feeds[:MAX_FEEDS_PER_DOMAIN]
logger.info(
f"Domain-Balance: {domain} von {len(domain_feeds)} auf {MAX_FEEDS_PER_DOMAIN} Feeds begrenzt"
)
balanced_feeds.extend(kept)
else:
balanced_feeds.extend(domain_feeds)
if len(balanced_feeds) < len(all_feeds):
logger.info(f"Domain-Balance gesamt: {len(all_feeds)}{len(balanced_feeds)} Feeds")
all_feeds = balanced_feeds
feed_usage = None
if len(all_feeds) > 20:
selected_feeds, feed_usage = await rss_researcher.select_relevant_feeds(