Dieser Commit ist enthalten in:
Claude Project Manager
2025-08-01 23:50:28 +02:00
Commit 04585e95b6
290 geänderte Dateien mit 64086 neuen und 0 gelöschten Zeilen

Datei anzeigen

@ -0,0 +1,28 @@
"""
Application Use Cases - Geschäftslogik-Orchestrierung
"""
# Rate Limiting Use Cases
from .adaptive_rate_limit_use_case import AdaptiveRateLimitUseCase
from .detect_rate_limit_use_case import DetectRateLimitUseCase
# Analytics Use Cases
from .log_account_creation_use_case import LogAccountCreationUseCase
from .analyze_failure_rate_use_case import AnalyzeFailureRateUseCase
from .generate_reports_use_case import GenerateReportsUseCase
# Export Use Cases
from .export_accounts_use_case import ExportAccountsUseCase
# Login Use Cases
from .one_click_login_use_case import OneClickLoginUseCase
__all__ = [
'AdaptiveRateLimitUseCase',
'DetectRateLimitUseCase',
'LogAccountCreationUseCase',
'AnalyzeFailureRateUseCase',
'GenerateReportsUseCase',
'ExportAccountsUseCase',
'OneClickLoginUseCase'
]

Datei anzeigen

@ -0,0 +1,221 @@
"""
Adaptive Rate Limit Use Case - Passt Geschwindigkeit dynamisch an
"""
import logging
from typing import Dict, Any, Optional
from datetime import datetime, timedelta
from domain.services.rate_limit_service import IRateLimitService
from domain.value_objects.action_timing import ActionTiming, ActionType
from domain.entities.rate_limit_policy import RateLimitPolicy
logger = logging.getLogger("adaptive_rate_limit_use_case")
class AdaptiveRateLimitUseCase:
"""
Use Case für adaptive Geschwindigkeitsanpassung basierend auf Systemverhalten.
Analysiert Response-Zeiten, passt Delays dynamisch an und erkennt Anomalien.
"""
def __init__(self, rate_limit_service: IRateLimitService):
self.rate_limit_service = rate_limit_service
self.anomaly_threshold = 2.0 # Standardabweichungen für Anomalie
self.adaptation_interval = timedelta(minutes=5)
self.last_adaptation = {}
def execute(self, action_type: ActionType, context: Optional[Dict[str, Any]] = None) -> float:
"""
Führt adaptive Rate Limiting Logik aus.
Args:
action_type: Typ der auszuführenden Aktion
context: Zusätzlicher Kontext (z.B. Session-ID, Platform)
Returns:
Optimale Verzögerung in Sekunden
"""
# Prüfe ob Adaptation notwendig ist
if self._should_adapt(action_type):
self._adapt_policy(action_type)
# Berechne Delay mit aktuellem Policy
delay = self.rate_limit_service.calculate_delay(action_type, context)
# Warte wenn nötig
actual_wait = self.rate_limit_service.wait_if_needed(action_type)
logger.debug(f"Adaptive delay for {action_type.value}: {delay:.2f}s (waited: {actual_wait:.2f}s)")
return delay
def record_timing(self, timing: ActionTiming) -> None:
"""
Zeichnet Timing auf und triggert ggf. Anpassungen.
Args:
timing: Timing-Informationen der ausgeführten Aktion
"""
# Zeichne Timing auf
self.rate_limit_service.record_action(timing)
# Analysiere auf Anomalien
if self._is_anomaly(timing):
logger.warning(f"Anomaly detected for {timing.action_type.value}: "
f"duration={timing.duration}s, success={timing.success}")
self._handle_anomaly(timing)
def _should_adapt(self, action_type: ActionType) -> bool:
"""Prüft ob Policy angepasst werden sollte"""
last = self.last_adaptation.get(action_type, datetime.min)
return datetime.now() - last > self.adaptation_interval
def _adapt_policy(self, action_type: ActionType) -> None:
"""Passt Policy basierend auf gesammelten Daten an"""
# Hole Statistiken
stats = self.rate_limit_service.get_statistics(
action_type,
timeframe=timedelta(hours=1)
)
if not stats or 'success_rate' not in stats:
return
current_policy = self.rate_limit_service.get_policy(action_type)
success_rate = stats['success_rate']
avg_duration = stats.get('avg_duration_ms', 0) / 1000.0
# Neue Policy-Parameter berechnen
new_policy = self._calculate_new_policy(
current_policy,
success_rate,
avg_duration
)
if new_policy != current_policy:
self.rate_limit_service.update_policy(action_type, new_policy)
logger.info(f"Adapted policy for {action_type.value}: "
f"min_delay={new_policy.min_delay:.2f}, "
f"max_delay={new_policy.max_delay:.2f}")
self.last_adaptation[action_type] = datetime.now()
def _calculate_new_policy(self, current: RateLimitPolicy,
success_rate: float,
avg_duration: float) -> RateLimitPolicy:
"""Berechnet neue Policy-Parameter"""
# Kopiere aktuelle Policy
new_min = current.min_delay
new_max = current.max_delay
new_backoff = current.backoff_multiplier
# Anpassung basierend auf Erfolgsrate
if success_rate < 0.7: # Niedrige Erfolgsrate
# Erhöhe Delays signifikant
new_min = min(new_min * 1.3, 10.0)
new_max = min(new_max * 1.3, 30.0)
new_backoff = min(new_backoff * 1.1, 3.0)
elif success_rate < 0.85: # Mittlere Erfolgsrate
# Moderate Erhöhung
new_min = min(new_min * 1.1, 10.0)
new_max = min(new_max * 1.1, 30.0)
elif success_rate > 0.95: # Hohe Erfolgsrate
# Vorsichtige Verringerung
if avg_duration < current.min_delay * 0.8:
new_min = max(new_min * 0.9, 0.1)
new_max = max(new_max * 0.9, new_min * 3)
# Stelle sicher dass max > min
new_max = max(new_max, new_min * 2)
return RateLimitPolicy(
min_delay=round(new_min, 2),
max_delay=round(new_max, 2),
adaptive=current.adaptive,
backoff_multiplier=round(new_backoff, 2),
max_retries=current.max_retries
)
def _is_anomaly(self, timing: ActionTiming) -> bool:
"""Erkennt ob ein Timing eine Anomalie darstellt"""
# Hole Statistiken für Vergleich
stats = self.rate_limit_service.get_statistics(
timing.action_type,
timeframe=timedelta(hours=1)
)
if not stats or 'avg_duration_ms' not in stats:
return False
avg_duration = stats['avg_duration_ms'] / 1000.0
# Sehr langsame Requests sind Anomalien
if timing.duration > avg_duration * self.anomaly_threshold:
return True
# Fehler nach mehreren Erfolgen sind Anomalien
if not timing.success and stats.get('success_rate', 0) > 0.9:
return True
return False
def _handle_anomaly(self, timing: ActionTiming) -> None:
"""Behandelt erkannte Anomalien"""
# Sofortige Policy-Anpassung bei kritischen Anomalien
if not timing.success and timing.error_message:
if any(indicator in timing.error_message.lower()
for indicator in ['rate limit', 'too many', 'blocked']):
# Rate Limit erkannt - sofort anpassen
current_policy = self.rate_limit_service.get_policy(timing.action_type)
emergency_policy = RateLimitPolicy(
min_delay=min(current_policy.min_delay * 2, 10.0),
max_delay=min(current_policy.max_delay * 2, 30.0),
adaptive=current_policy.adaptive,
backoff_multiplier=min(current_policy.backoff_multiplier * 1.5, 3.0),
max_retries=current_policy.max_retries
)
self.rate_limit_service.update_policy(timing.action_type, emergency_policy)
logger.warning(f"Emergency policy update for {timing.action_type.value} due to rate limit")
def get_recommendations(self) -> Dict[str, Any]:
"""Gibt Empfehlungen basierend auf aktuellen Metriken"""
recommendations = {
'actions': [],
'warnings': [],
'optimizations': []
}
# Analysiere alle Action Types
for action_type in ActionType:
stats = self.rate_limit_service.get_statistics(
action_type,
timeframe=timedelta(hours=24)
)
if not stats or stats.get('total_actions', 0) < 10:
continue
success_rate = stats.get('success_rate', 0)
avg_retries = stats.get('avg_retry_count', 0)
# Empfehlungen basierend auf Metriken
if success_rate < 0.5:
recommendations['warnings'].append(
f"{action_type.value}: Sehr niedrige Erfolgsrate ({success_rate:.1%})"
)
recommendations['actions'].append(
f"Erhöhe Delays für {action_type.value} oder prüfe auf Blocking"
)
if avg_retries > 2:
recommendations['warnings'].append(
f"{action_type.value}: Hohe Retry-Rate ({avg_retries:.1f})"
)
if success_rate > 0.98 and stats.get('avg_duration_ms', 0) < 500:
recommendations['optimizations'].append(
f"{action_type.value}: Könnte schneller ausgeführt werden"
)
return recommendations

Datei anzeigen

@ -0,0 +1,352 @@
"""
Analyze Failure Rate Use Case - Analysiert Fehlerquoten und Muster
"""
import logging
from typing import Dict, Any, List, Optional
from datetime import datetime, timedelta
from collections import defaultdict, Counter
from domain.services.analytics_service import IAnalyticsService
from domain.value_objects.error_summary import ErrorSummary
from domain.entities.error_event import ErrorType
logger = logging.getLogger("analyze_failure_rate_use_case")
class AnalyzeFailureRateUseCase:
"""
Use Case für Fehleranalyse.
Implementiert zeitbasierte Fehleranalyse, Fehler-Clustering,
Trend-Erkennung und Empfehlungen für Verbesserungen.
"""
def __init__(self, analytics_service: IAnalyticsService):
self.analytics_service = analytics_service
self.critical_error_types = [
ErrorType.RATE_LIMIT,
ErrorType.CAPTCHA,
ErrorType.AUTHENTICATION
]
self.error_thresholds = {
'critical': 0.5, # 50% Fehlerrate ist kritisch
'warning': 0.3, # 30% Fehlerrate ist Warnung
'acceptable': 0.1 # 10% Fehlerrate ist akzeptabel
}
def execute(self,
platform: Optional[str] = None,
timeframe: timedelta = timedelta(hours=24)) -> Dict[str, Any]:
"""
Analysiert Fehlerquoten und Muster.
Args:
platform: Spezifische Platform oder None für alle
timeframe: Zeitrahmen für Analyse
Returns:
Analyse-Ergebnis mit Metriken und Empfehlungen
"""
# Hole Basis-Metriken
success_rate = self.analytics_service.get_success_rate(timeframe, platform)
failure_rate = 1.0 - success_rate
# Hole häufigste Fehler
common_errors = self.analytics_service.get_common_errors(20, timeframe)
# Analysiere Fehler-Muster
patterns = self.analytics_service.analyze_failure_patterns(timeframe)
# Erstelle Analyse
analysis = {
'timeframe': str(timeframe),
'platform': platform or 'all',
'metrics': {
'overall_failure_rate': failure_rate,
'overall_success_rate': success_rate,
'severity': self._calculate_severity(failure_rate)
},
'error_breakdown': self._analyze_error_types(common_errors),
'temporal_patterns': self._analyze_temporal_patterns(patterns),
'error_clusters': self._cluster_errors(common_errors),
'critical_issues': self._identify_critical_issues(common_errors, failure_rate),
'recommendations': self._generate_recommendations(
failure_rate, common_errors, patterns
)
}
# Logge wichtige Erkenntnisse
self._log_insights(analysis)
return analysis
def _calculate_severity(self, failure_rate: float) -> str:
"""Berechnet Schweregrad basierend auf Fehlerrate"""
if failure_rate >= self.error_thresholds['critical']:
return 'critical'
elif failure_rate >= self.error_thresholds['warning']:
return 'warning'
elif failure_rate >= self.error_thresholds['acceptable']:
return 'moderate'
else:
return 'low'
def _analyze_error_types(self, errors: List[ErrorSummary]) -> List[Dict[str, Any]]:
"""Analysiert Fehlertypen im Detail"""
breakdown = []
for error in errors[:10]: # Top 10 Fehler
analysis = {
'error_type': error.error_type,
'count': error.error_count,
'frequency_per_hour': error.frequency,
'recovery_rate': error.recovery_success_rate,
'severity_score': error.severity_score,
'impact': {
'user_impact': error.total_user_impact,
'system_impact': error.total_system_impact,
'data_loss': error.data_loss_incidents
},
'common_contexts': {
'urls': error.most_common_urls[:3],
'actions': error.most_common_actions[:3],
'steps': error.most_common_steps[:3]
},
'trend': self._calculate_error_trend(error)
}
breakdown.append(analysis)
return breakdown
def _calculate_error_trend(self, error: ErrorSummary) -> str:
"""Berechnet Trend für einen Fehlertyp"""
# Vereinfacht: Basierend auf Frequenz
if error.frequency > 10:
return 'increasing'
elif error.frequency > 5:
return 'stable'
else:
return 'decreasing'
def _analyze_temporal_patterns(self, patterns: Dict[str, Any]) -> Dict[str, Any]:
"""Analysiert zeitliche Muster in Fehlern"""
temporal = {
'peak_error_hours': [],
'low_error_hours': [],
'daily_pattern': 'unknown',
'weekly_pattern': 'unknown'
}
# TODO: Implementiere mit echten Timeline-Daten
# Beispiel-Implementation
if patterns:
# Finde Peak-Zeiten
if 'hourly_distribution' in patterns:
hourly = patterns['hourly_distribution']
sorted_hours = sorted(hourly.items(),
key=lambda x: x[1],
reverse=True)
temporal['peak_error_hours'] = [h[0] for h in sorted_hours[:3]]
temporal['low_error_hours'] = [h[0] for h in sorted_hours[-3:]]
return temporal
def _cluster_errors(self, errors: List[ErrorSummary]) -> List[Dict[str, Any]]:
"""Clustert ähnliche Fehler"""
clusters = []
# Cluster nach Error Type
type_clusters = defaultdict(list)
for error in errors:
# Extrahiere Basis-Typ aus error_type
base_type = error.error_type.split('_')[0] if '_' in error.error_type else error.error_type
type_clusters[base_type].append(error)
# Erstelle Cluster-Analyse
for cluster_name, cluster_errors in type_clusters.items():
if len(cluster_errors) > 1:
total_count = sum(e.error_count for e in cluster_errors)
avg_recovery = sum(e.recovery_success_rate for e in cluster_errors) / len(cluster_errors)
clusters.append({
'cluster_name': cluster_name,
'error_count': len(cluster_errors),
'total_occurrences': total_count,
'avg_recovery_rate': avg_recovery,
'members': [e.error_type for e in cluster_errors]
})
return sorted(clusters, key=lambda x: x['total_occurrences'], reverse=True)
def _identify_critical_issues(self,
errors: List[ErrorSummary],
overall_failure_rate: float) -> List[Dict[str, Any]]:
"""Identifiziert kritische Issues"""
critical_issues = []
# Hohe Gesamt-Fehlerrate
if overall_failure_rate >= self.error_thresholds['critical']:
critical_issues.append({
'issue': 'high_overall_failure_rate',
'severity': 'critical',
'description': f'Fehlerrate von {overall_failure_rate:.1%} überschreitet kritischen Schwellenwert',
'recommendation': 'Sofortige Untersuchung und Maßnahmen erforderlich'
})
# Kritische Fehlertypen
for error in errors:
error_type = ErrorType.UNKNOWN
try:
error_type = ErrorType(error.error_type)
except:
pass
if error_type in self.critical_error_types:
if error.frequency > 5: # Mehr als 5 pro Stunde
critical_issues.append({
'issue': f'high_frequency_{error.error_type}',
'severity': 'critical',
'description': f'{error.error_type} tritt {error.frequency:.1f} mal pro Stunde auf',
'recommendation': self._get_error_specific_recommendation(error_type)
})
# Niedrige Recovery-Rate
low_recovery = [e for e in errors if e.recovery_success_rate < 0.2]
if low_recovery:
critical_issues.append({
'issue': 'low_recovery_rate',
'severity': 'warning',
'description': f'{len(low_recovery)} Fehlertypen haben Recovery-Rate < 20%',
'recommendation': 'Recovery-Strategien überprüfen und verbessern'
})
return critical_issues
def _get_error_specific_recommendation(self, error_type: ErrorType) -> str:
"""Gibt spezifische Empfehlung für Fehlertyp"""
recommendations = {
ErrorType.RATE_LIMIT: 'Rate Limiting Parameter erhöhen und Delays anpassen',
ErrorType.CAPTCHA: 'CAPTCHA-Solving-Service prüfen oder manuelle Intervention',
ErrorType.AUTHENTICATION: 'Credentials und Session-Management überprüfen',
ErrorType.NETWORK: 'Netzwerk-Stabilität und Proxy-Konfiguration prüfen',
ErrorType.TIMEOUT: 'Timeouts erhöhen und Performance optimieren'
}
return recommendations.get(error_type, 'Detaillierte Fehleranalyse durchführen')
def _generate_recommendations(self,
failure_rate: float,
errors: List[ErrorSummary],
patterns: Dict[str, Any]) -> List[str]:
"""Generiert konkrete Handlungsempfehlungen"""
recommendations = []
# Basis-Empfehlungen nach Fehlerrate
severity = self._calculate_severity(failure_rate)
if severity == 'critical':
recommendations.append(
"🚨 KRITISCH: Sofortige Intervention erforderlich - "
"Pausieren Sie neue Account-Erstellungen bis Issues gelöst sind"
)
elif severity == 'warning':
recommendations.append(
"⚠️ WARNUNG: Erhöhte Fehlerrate - "
"Reduzieren Sie Geschwindigkeit und überwachen Sie genau"
)
# Spezifische Empfehlungen basierend auf Top-Fehlern
if errors:
top_error = errors[0]
if top_error.error_type == ErrorType.RATE_LIMIT.value:
recommendations.append(
"📊 Rate Limiting ist Hauptproblem - "
"Erhöhen Sie Delays zwischen Aktionen um 50%"
)
elif top_error.error_type == ErrorType.CAPTCHA.value:
recommendations.append(
"🔐 CAPTCHA-Challenges häufig - "
"Prüfen Sie Fingerprinting und Session-Qualität"
)
# Zeitbasierte Empfehlungen
if patterns and 'peak_hours' in patterns:
recommendations.append(
f"⏰ Vermeiden Sie Aktivität während Peak-Zeiten: "
f"{', '.join(patterns['peak_hours'])}"
)
# Recovery-basierte Empfehlungen
low_recovery = [e for e in errors if e.recovery_success_rate < 0.3]
if len(low_recovery) > 3:
recommendations.append(
"🔄 Viele Fehler ohne erfolgreiche Recovery - "
"Implementieren Sie bessere Retry-Strategien"
)
# Platform-spezifische Empfehlungen
platform_errors = defaultdict(int)
for error in errors:
for url in error.most_common_urls:
if 'instagram' in url.lower():
platform_errors['instagram'] += error.error_count
elif 'tiktok' in url.lower():
platform_errors['tiktok'] += error.error_count
if platform_errors:
worst_platform = max(platform_errors.items(), key=lambda x: x[1])
recommendations.append(
f"📱 {worst_platform[0].title()} hat die meisten Fehler - "
f"Fokussieren Sie Optimierungen auf diese Plattform"
)
return recommendations
def _log_insights(self, analysis: Dict[str, Any]) -> None:
"""Loggt wichtige Erkenntnisse"""
severity = analysis['metrics']['severity']
failure_rate = analysis['metrics']['overall_failure_rate']
log_message = f"Failure analysis completed: {failure_rate:.1%} failure rate ({severity})"
if analysis['critical_issues']:
log_message += f", {len(analysis['critical_issues'])} critical issues found"
if severity in ['critical', 'warning']:
logger.warning(log_message)
else:
logger.info(log_message)
# Logge Top-Empfehlungen
if analysis['recommendations']:
logger.info(f"Top recommendation: {analysis['recommendations'][0]}")
def compare_platforms(self,
timeframe: timedelta = timedelta(days=7)) -> Dict[str, Any]:
"""Vergleicht Fehlerraten zwischen Plattformen"""
comparison = self.analytics_service.get_platform_comparison(timeframe)
# Erweitere mit Fehler-spezifischen Metriken
for platform, stats in comparison.items():
if isinstance(stats, dict):
# Berechne Fehler-Schwerpunkte
platform_errors = self.analytics_service.get_common_errors(10, timeframe)
# Filter für Platform
# TODO: Implementiere Platform-Filter in Error Summary
stats['primary_error_types'] = []
stats['improvement_potential'] = self._calculate_improvement_potential(stats)
return comparison
def _calculate_improvement_potential(self, stats: Dict[str, Any]) -> str:
"""Berechnet Verbesserungspotential"""
success_rate = stats.get('success_rate', 0)
if success_rate < 0.5:
return 'high'
elif success_rate < 0.7:
return 'medium'
elif success_rate < 0.9:
return 'low'
else:
return 'minimal'

Datei anzeigen

@ -0,0 +1,259 @@
"""
Detect Rate Limit Use Case - Erkennt Rate Limits und reagiert entsprechend
"""
import logging
import time
from typing import Any, Dict, Optional, Tuple
from datetime import datetime
from domain.services.rate_limit_service import IRateLimitService
from domain.value_objects.action_timing import ActionTiming, ActionType
from domain.entities.error_event import ErrorEvent, ErrorType, ErrorContext
from domain.entities.rate_limit_policy import RateLimitPolicy
logger = logging.getLogger("detect_rate_limit_use_case")
class DetectRateLimitUseCase:
"""
Use Case für Rate Limit Erkennung und Reaktion.
Analysiert Responses, erkennt Rate Limits und implementiert Backoff-Strategien.
"""
def __init__(self, rate_limit_service: IRateLimitService):
self.rate_limit_service = rate_limit_service
self.detection_patterns = {
'instagram': [
"Bitte warte einige Minuten",
"Please wait a few minutes",
"Try again later",
"Versuche es später erneut",
"too many requests",
"zu viele Anfragen",
"We're sorry, but something went wrong",
"temporarily blocked",
"vorübergehend gesperrt",
"Wir haben deine Anfrage eingeschränkt"
],
'tiktok': [
"Too many attempts",
"Zu viele Versuche",
"Please slow down",
"rate limited",
"Try again in"
],
'general': [
"429",
"rate limit",
"throttled",
"quota exceeded"
]
}
def execute(self, response: Any, context: Optional[Dict[str, Any]] = None) -> Tuple[bool, Optional[ErrorEvent]]:
"""
Analysiert eine Response auf Rate Limiting.
Args:
response: HTTP Response, Page Content oder Error Message
context: Zusätzlicher Kontext (platform, action_type, etc.)
Returns:
Tuple aus (is_rate_limited, error_event)
"""
# Erkenne Rate Limit
is_rate_limited = self._detect_rate_limit(response, context)
if not is_rate_limited:
return False, None
# Erstelle Error Event
error_event = self._create_error_event(response, context)
# Handle Rate Limit
self._handle_rate_limit(error_event, context)
return True, error_event
def _detect_rate_limit(self, response: Any, context: Optional[Dict[str, Any]] = None) -> bool:
"""Erkennt ob Response auf Rate Limiting hindeutet"""
# Nutze Service für Basis-Detection
if self.rate_limit_service.detect_rate_limit(response):
return True
# Erweiterte Detection basierend auf Platform
platform = context.get('platform', 'general') if context else 'general'
patterns = self.detection_patterns.get(platform, []) + self.detection_patterns['general']
# String-basierte Erkennung
response_text = self._extract_text(response)
if response_text:
response_lower = response_text.lower()
for pattern in patterns:
if pattern.lower() in response_lower:
logger.info(f"Rate limit detected: '{pattern}' found in response")
return True
# Status Code Erkennung
status = self._extract_status(response)
if status in [429, 420, 503]: # Common rate limit codes
logger.info(f"Rate limit detected: HTTP {status}")
return True
# Timing-basierte Erkennung
if context and 'timing' in context:
timing = context['timing']
if isinstance(timing, ActionTiming):
# Sehr schnelle Fehler können auf Rate Limits hindeuten
if not timing.success and timing.duration < 0.5:
logger.warning("Possible rate limit: Fast failure detected")
return True
return False
def _extract_text(self, response: Any) -> Optional[str]:
"""Extrahiert Text aus verschiedenen Response-Typen"""
if isinstance(response, str):
return response
elif hasattr(response, 'text'):
try:
return response.text
except:
pass
elif hasattr(response, 'content'):
try:
if callable(response.content):
return response.content()
return str(response.content)
except:
pass
elif hasattr(response, 'message'):
return str(response.message)
return str(response) if response else None
def _extract_status(self, response: Any) -> Optional[int]:
"""Extrahiert Status Code aus Response"""
if hasattr(response, 'status'):
return response.status
elif hasattr(response, 'status_code'):
return response.status_code
elif hasattr(response, 'code'):
try:
return int(response.code)
except:
pass
return None
def _create_error_event(self, response: Any, context: Optional[Dict[str, Any]] = None) -> ErrorEvent:
"""Erstellt Error Event für Rate Limit"""
error_context = ErrorContext(
url=context.get('url') if context else None,
action=context.get('action_type').value if context and 'action_type' in context else None,
step_name=context.get('step_name') if context else None,
screenshot_path=context.get('screenshot_path') if context else None,
additional_data={
'platform': context.get('platform') if context else None,
'response_text': self._extract_text(response)[:500] if self._extract_text(response) else None,
'status_code': self._extract_status(response),
'timestamp': datetime.now().isoformat()
}
)
return ErrorEvent(
error_type=ErrorType.RATE_LIMIT,
error_message="Rate limit detected",
context=error_context,
platform=context.get('platform') if context else None,
session_id=context.get('session_id') if context else None,
correlation_id=context.get('correlation_id') if context else None
)
def _handle_rate_limit(self, error_event: ErrorEvent, context: Optional[Dict[str, Any]] = None) -> None:
"""Behandelt erkanntes Rate Limit"""
# Extrahiere Wait-Zeit aus Response wenn möglich
wait_time = self._extract_wait_time(error_event.context.additional_data.get('response_text', ''))
if not wait_time:
# Verwende exponentielles Backoff
retry_count = context.get('retry_count', 0) if context else 0
wait_time = self._calculate_backoff(retry_count)
logger.warning(f"Rate limit detected - waiting {wait_time}s before retry")
# Update Rate Limit Policy für zukünftige Requests
if context and 'action_type' in context:
action_type = context['action_type']
current_policy = self.rate_limit_service.get_policy(action_type)
# Erhöhe Delays temporär
updated_policy = RateLimitPolicy(
min_delay=min(current_policy.min_delay * 1.5, 10.0),
max_delay=min(current_policy.max_delay * 2, 60.0),
adaptive=current_policy.adaptive,
backoff_multiplier=min(current_policy.backoff_multiplier * 1.2, 3.0),
max_retries=current_policy.max_retries
)
self.rate_limit_service.update_policy(action_type, updated_policy)
def _extract_wait_time(self, response_text: str) -> Optional[float]:
"""Versucht Wait-Zeit aus Response zu extrahieren"""
if not response_text:
return None
import re
# Patterns für Zeitangaben
patterns = [
r'wait (\d+) seconds',
r'warte (\d+) Sekunden',
r'try again in (\d+)s',
r'retry after (\d+)',
r'(\d+) Minuten warten',
r'wait (\d+) minutes'
]
for pattern in patterns:
match = re.search(pattern, response_text.lower())
if match:
value = int(match.group(1))
# Konvertiere Minuten zu Sekunden wenn nötig
if 'minute' in pattern or 'minuten' in pattern:
value *= 60
return float(min(value, 300)) # Max 5 Minuten
return None
def _calculate_backoff(self, retry_count: int) -> float:
"""Berechnet exponentielles Backoff"""
base_wait = 5.0 # 5 Sekunden Basis
max_wait = 300.0 # Max 5 Minuten
# Exponentielles Backoff mit Jitter
wait_time = min(base_wait * (2 ** retry_count), max_wait)
# Füge Jitter hinzu (±20%)
import random
jitter = wait_time * 0.2 * (random.random() - 0.5)
return wait_time + jitter
def analyze_patterns(self, platform: str, timeframe_hours: int = 24) -> Dict[str, Any]:
"""Analysiert Rate Limit Muster für eine Plattform"""
# Diese Methode würde mit einem Analytics Repository arbeiten
# um Muster in Rate Limits zu erkennen
analysis = {
'platform': platform,
'timeframe_hours': timeframe_hours,
'peak_times': [],
'safe_times': [],
'recommended_delays': {},
'incidents': 0
}
# TODO: Implementiere mit Analytics Repository
return analysis

Datei anzeigen

@ -0,0 +1,187 @@
"""
Export Accounts Use Case - Exportiert Account-Daten in verschiedene Formate
"""
import logging
import csv
import json
from io import StringIO
from typing import List, Dict, Any, Optional
from datetime import datetime
logger = logging.getLogger("export_accounts_use_case")
class ExportAccountsUseCase:
"""
Use Case für Account-Export.
Exportiert Account-Daten in verschiedene Formate (CSV, JSON).
"""
def __init__(self, db_manager):
self.db_manager = db_manager
def execute(self,
platform: Optional[str] = None,
format: str = 'csv',
include_passwords: bool = True) -> bytes:
"""
Exportiert Account-Daten.
Args:
platform: Filter für spezifische Plattform (None = alle)
format: Export-Format ('csv' oder 'json')
include_passwords: Ob Passwörter inkludiert werden sollen
Returns:
Exportierte Daten als Bytes
"""
# Hole Account-Daten
if platform and platform.lower() not in ["all", ""]:
accounts = self.db_manager.get_accounts_by_platform(platform.lower())
else:
accounts = self.db_manager.get_all_accounts()
if not accounts:
logger.warning(f"Keine Accounts gefunden für Export (platform: {platform})")
return b""
# Exportiere basierend auf Format
if format.lower() == 'csv':
result = self._export_csv(accounts, include_passwords)
elif format.lower() == 'json':
result = self._export_json(accounts, include_passwords)
else:
raise ValueError(f"Unsupported format: {format}")
logger.info(f"Exported {len(accounts)} accounts as {format}")
return result
def _export_csv(self, accounts: List[Dict[str, Any]], include_passwords: bool) -> bytes:
"""
Exportiert Accounts als CSV.
Args:
accounts: Liste der Account-Daten
include_passwords: Ob Passwörter inkludiert werden sollen
Returns:
CSV-Daten als Bytes
"""
output = StringIO()
# Definiere Header basierend auf Passwort-Einstellung
headers = [
'Plattform',
'Benutzername',
'E-Mail',
'Handynummer',
'Name',
'Geburtstag',
'Erstellt am'
]
if include_passwords:
headers.insert(2, 'Passwort')
writer = csv.DictWriter(output, fieldnames=headers)
writer.writeheader()
# Schreibe Account-Daten
for account in accounts:
row = {
'Plattform': account.get('platform', ''),
'Benutzername': account.get('username', ''),
'E-Mail': account.get('email', ''),
'Handynummer': account.get('phone', ''),
'Name': account.get('full_name', ''),
'Geburtstag': account.get('birthday', ''),
'Erstellt am': account.get('created_at', '')
}
if include_passwords:
row['Passwort'] = account.get('password', '')
writer.writerow(row)
return output.getvalue().encode('utf-8-sig') # UTF-8 mit BOM für Excel
def _export_json(self, accounts: List[Dict[str, Any]], include_passwords: bool) -> bytes:
"""
Exportiert Accounts als JSON.
Args:
accounts: Liste der Account-Daten
include_passwords: Ob Passwörter inkludiert werden sollen
Returns:
JSON-Daten als Bytes
"""
export_data = {
'export_date': datetime.now().isoformat(),
'account_count': len(accounts),
'accounts': []
}
for account in accounts:
account_data = {
'platform': account.get('platform', ''),
'username': account.get('username', ''),
'email': account.get('email', ''),
'phone': account.get('phone', ''),
'full_name': account.get('full_name', ''),
'birthday': account.get('birthday', ''),
'created_at': account.get('created_at', '')
}
if include_passwords:
account_data['password'] = account.get('password', '')
export_data['accounts'].append(account_data)
return json.dumps(export_data, ensure_ascii=False, indent=2).encode('utf-8')
def execute_with_accounts(self,
accounts: List[Dict[str, Any]],
format: str = 'csv',
include_passwords: bool = True) -> bytes:
"""
Exportiert spezifische Account-Daten.
Args:
accounts: Liste der zu exportierenden Accounts
format: Export-Format ('csv' oder 'json')
include_passwords: Ob Passwörter inkludiert werden sollen
Returns:
Exportierte Daten als Bytes
"""
if not accounts:
logger.warning("Keine Accounts zum Export übergeben")
return b""
# Exportiere basierend auf Format
if format.lower() == 'csv':
result = self._export_csv(accounts, include_passwords)
elif format.lower() == 'json':
result = self._export_json(accounts, include_passwords)
else:
raise ValueError(f"Unsupported format: {format}")
logger.info(f"Exported {len(accounts)} specific accounts as {format}")
return result
def get_export_filename(self, platform: Optional[str], format: str) -> str:
"""
Generiert einen passenden Dateinamen für den Export.
Args:
platform: Plattform-Filter
format: Export-Format
Returns:
Vorgeschlagener Dateiname
"""
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
platform_str = platform.lower() if platform else 'alle'
return f"accounts_{platform_str}_{timestamp}.{format}"

Datei anzeigen

@ -0,0 +1,122 @@
"""
Generate Account Fingerprint Use Case - Generiert und verwaltet Fingerprints für Accounts
"""
import logging
import uuid
import json
from typing import Dict, Any, Optional
from datetime import datetime
from domain.entities.browser_fingerprint import BrowserFingerprint
from infrastructure.services.advanced_fingerprint_service import AdvancedFingerprintService
logger = logging.getLogger("generate_account_fingerprint_use_case")
class GenerateAccountFingerprintUseCase:
"""
Use Case für die Generierung und Zuweisung von Browser-Fingerprints zu Accounts.
Stellt sicher, dass jeder Account einen eindeutigen Fingerprint hat.
"""
def __init__(self, db_manager, fingerprint_service=None):
self.db_manager = db_manager
self.fingerprint_service = fingerprint_service or AdvancedFingerprintService()
def execute(self, account_id: int) -> Optional[str]:
"""
Generiert einen Fingerprint für einen Account oder gibt den existierenden zurück.
Args:
account_id: ID des Accounts
Returns:
Fingerprint ID oder None bei Fehler
"""
try:
# Prüfe ob Account bereits einen Fingerprint hat
conn = self.db_manager.get_connection()
cursor = conn.cursor()
cursor.execute(
"SELECT fingerprint_id FROM accounts WHERE id = ?",
(account_id,)
)
result = cursor.fetchone()
if result and result[0]:
logger.info(f"Account {account_id} hat bereits Fingerprint: {result[0]}")
return result[0]
# Generiere neuen Fingerprint über AdvancedFingerprintService
fingerprint = self.fingerprint_service.create_account_fingerprint(
account_id=str(account_id),
profile_type="desktop"
)
# Aktualisiere Account mit Fingerprint ID
cursor.execute(
"UPDATE accounts SET fingerprint_id = ? WHERE id = ?",
(fingerprint.fingerprint_id, account_id)
)
conn.commit()
logger.info(f"Neuer Fingerprint {fingerprint.fingerprint_id} für Account {account_id} generiert und verknüpft")
return fingerprint.fingerprint_id
except Exception as e:
logger.error(f"Fehler beim Generieren des Fingerprints für Account {account_id}: {e}")
return None
finally:
if conn:
conn.close()
def assign_fingerprints_to_all_accounts(self) -> Dict[str, Any]:
"""
Weist allen Accounts ohne Fingerprint einen neuen zu.
Returns:
Statistik über die Zuweisung
"""
stats = {
"total_accounts": 0,
"accounts_without_fingerprint": 0,
"fingerprints_assigned": 0,
"errors": 0
}
try:
# Hole alle Accounts ohne Fingerprint
conn = self.db_manager.get_connection()
cursor = conn.cursor()
cursor.execute("SELECT COUNT(*) FROM accounts")
stats["total_accounts"] = cursor.fetchone()[0]
cursor.execute(
"SELECT id, username, platform FROM accounts WHERE fingerprint_id IS NULL"
)
accounts = cursor.fetchall()
stats["accounts_without_fingerprint"] = len(accounts)
for account_id, username, platform in accounts:
logger.info(f"Generiere Fingerprint für Account {username} ({platform})")
fingerprint_id = self.execute(account_id)
if fingerprint_id:
stats["fingerprints_assigned"] += 1
else:
stats["errors"] += 1
conn.close()
logger.info(f"Fingerprint-Zuweisung abgeschlossen: {stats}")
return stats
except Exception as e:
logger.error(f"Fehler bei der Fingerprint-Zuweisung: {e}")
stats["errors"] += 1
return stats

Datei anzeigen

@ -0,0 +1,548 @@
"""
Generate Reports Use Case - Erstellt detaillierte Berichte
"""
import logging
import json
import csv
from io import StringIO
from typing import Dict, Any, List, Optional
from datetime import datetime, timedelta
import uuid
from domain.services.analytics_service import IAnalyticsService
from domain.value_objects.report import (
Report, ReportType, Metric, PlatformStats,
TimeSeriesData, MetricType
)
logger = logging.getLogger("generate_reports_use_case")
class GenerateReportsUseCase:
"""
Use Case für Report-Generierung.
Erstellt tägliche/wöchentliche Reports mit Erfolgsstatistiken,
Performance-Metriken und Fehler-Zusammenfassungen.
"""
def __init__(self, analytics_service: IAnalyticsService):
self.analytics_service = analytics_service
self.report_templates = {
ReportType.DAILY: self._generate_daily_report,
ReportType.WEEKLY: self._generate_weekly_report,
ReportType.MONTHLY: self._generate_monthly_report,
ReportType.CUSTOM: self._generate_custom_report,
ReportType.REAL_TIME: self._generate_realtime_report
}
def execute(self,
report_type: ReportType,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
platforms: Optional[List[str]] = None,
include_charts: bool = True) -> Report:
"""
Generiert einen Report.
Args:
report_type: Typ des Reports
start_date: Startdatum (optional für vordefinierte Typen)
end_date: Enddatum (optional für vordefinierte Typen)
platforms: Filter für spezifische Plattformen
include_charts: Ob Zeitreihen-Daten inkludiert werden sollen
Returns:
Generierter Report
"""
# Bestimme Zeitrahmen basierend auf Report-Typ
if not start_date or not end_date:
start_date, end_date = self._determine_timeframe(report_type)
# Generiere Report mit entsprechendem Template
generator = self.report_templates.get(report_type, self._generate_custom_report)
report = generator(start_date, end_date, platforms, include_charts)
# Logge Report-Generierung
logger.info(f"Generated {report_type.value} report: {report.report_id} "
f"({report.total_accounts_created} accounts, "
f"{report.overall_success_rate:.1%} success rate)")
return report
def _determine_timeframe(self, report_type: ReportType) -> tuple[datetime, datetime]:
"""Bestimmt Zeitrahmen basierend auf Report-Typ"""
end_date = datetime.now()
if report_type == ReportType.DAILY:
start_date = end_date - timedelta(days=1)
elif report_type == ReportType.WEEKLY:
start_date = end_date - timedelta(weeks=1)
elif report_type == ReportType.MONTHLY:
start_date = end_date - timedelta(days=30)
elif report_type == ReportType.REAL_TIME:
start_date = end_date - timedelta(hours=1)
else:
start_date = end_date - timedelta(days=7) # Default
return start_date, end_date
def _generate_daily_report(self,
start: datetime,
end: datetime,
platforms: Optional[List[str]],
include_charts: bool) -> Report:
"""Generiert täglichen Report"""
# Hole Basis-Report
base_report = self.analytics_service.generate_report(
ReportType.DAILY, start, end, platforms
)
# Erweitere mit täglichen Insights
insights = [
self._generate_daily_summary(base_report),
self._generate_peak_time_insight(base_report),
self._generate_error_trend_insight(base_report)
]
# Füge Empfehlungen hinzu
recommendations = self._generate_daily_recommendations(base_report)
# Erstelle finalen Report
return Report(
report_id=base_report.report_id,
report_type=ReportType.DAILY,
start_date=start,
end_date=end,
generated_at=datetime.now(),
total_accounts_created=base_report.total_accounts_created,
total_attempts=base_report.total_attempts,
overall_success_rate=base_report.overall_success_rate,
avg_creation_time=base_report.avg_creation_time,
metrics=base_report.metrics,
platform_stats=base_report.platform_stats,
error_summaries=base_report.error_summaries,
success_rate_timeline=base_report.success_rate_timeline,
creation_rate_timeline=base_report.creation_rate_timeline,
error_rate_timeline=base_report.error_rate_timeline,
insights=insights,
recommendations=recommendations
)
def _generate_weekly_report(self,
start: datetime,
end: datetime,
platforms: Optional[List[str]],
include_charts: bool) -> Report:
"""Generiert wöchentlichen Report"""
base_report = self.analytics_service.generate_report(
ReportType.WEEKLY, start, end, platforms
)
# Wöchentliche Trends
insights = [
self._generate_weekly_trend(base_report),
self._generate_platform_comparison(base_report),
self._generate_success_pattern_insight(base_report)
]
recommendations = self._generate_weekly_recommendations(base_report)
return Report(
report_id=base_report.report_id,
report_type=ReportType.WEEKLY,
start_date=start,
end_date=end,
generated_at=datetime.now(),
total_accounts_created=base_report.total_accounts_created,
total_attempts=base_report.total_attempts,
overall_success_rate=base_report.overall_success_rate,
avg_creation_time=base_report.avg_creation_time,
metrics=base_report.metrics,
platform_stats=base_report.platform_stats,
error_summaries=base_report.error_summaries,
success_rate_timeline=base_report.success_rate_timeline,
creation_rate_timeline=base_report.creation_rate_timeline,
error_rate_timeline=base_report.error_rate_timeline,
insights=insights,
recommendations=recommendations
)
def _generate_monthly_report(self,
start: datetime,
end: datetime,
platforms: Optional[List[str]],
include_charts: bool) -> Report:
"""Generiert monatlichen Report"""
base_report = self.analytics_service.generate_report(
ReportType.MONTHLY, start, end, platforms
)
# Monatliche Zusammenfassung
insights = [
self._generate_monthly_summary(base_report),
self._generate_growth_analysis(base_report),
self._generate_efficiency_insight(base_report)
]
recommendations = self._generate_strategic_recommendations(base_report)
return Report(
report_id=base_report.report_id,
report_type=ReportType.MONTHLY,
start_date=start,
end_date=end,
generated_at=datetime.now(),
total_accounts_created=base_report.total_accounts_created,
total_attempts=base_report.total_attempts,
overall_success_rate=base_report.overall_success_rate,
avg_creation_time=base_report.avg_creation_time,
metrics=base_report.metrics,
platform_stats=base_report.platform_stats,
error_summaries=base_report.error_summaries,
success_rate_timeline=base_report.success_rate_timeline,
creation_rate_timeline=base_report.creation_rate_timeline,
error_rate_timeline=base_report.error_rate_timeline,
insights=insights,
recommendations=recommendations
)
def _generate_custom_report(self,
start: datetime,
end: datetime,
platforms: Optional[List[str]],
include_charts: bool) -> Report:
"""Generiert benutzerdefinierten Report"""
return self.analytics_service.generate_report(
ReportType.CUSTOM, start, end, platforms
)
def _generate_realtime_report(self,
start: datetime,
end: datetime,
platforms: Optional[List[str]],
include_charts: bool) -> Report:
"""Generiert Echtzeit-Report"""
# Hole aktuelle Metriken
realtime_metrics = self.analytics_service.get_real_time_metrics()
# Konvertiere zu Report-Format
metrics = [
Metric(
name="active_sessions",
value=realtime_metrics.get('active_sessions', 0),
unit="count",
trend=0.0
),
Metric(
name="accounts_last_hour",
value=realtime_metrics.get('accounts_last_hour', 0),
unit="count",
trend=realtime_metrics.get('hourly_trend', 0.0)
),
Metric(
name="current_success_rate",
value=realtime_metrics.get('success_rate_last_hour', 0.0),
unit="percentage",
trend=realtime_metrics.get('success_trend', 0.0)
)
]
return Report(
report_id=str(uuid.uuid4()),
report_type=ReportType.REAL_TIME,
start_date=start,
end_date=end,
generated_at=datetime.now(),
total_accounts_created=realtime_metrics.get('accounts_last_hour', 0),
total_attempts=realtime_metrics.get('attempts_last_hour', 0),
overall_success_rate=realtime_metrics.get('success_rate_last_hour', 0.0),
avg_creation_time=realtime_metrics.get('avg_creation_time', 0.0),
metrics=metrics,
platform_stats=[],
error_summaries=[],
insights=[
f"Aktuell {realtime_metrics.get('active_sessions', 0)} aktive Sessions",
f"Erfolgsrate letzte Stunde: {realtime_metrics.get('success_rate_last_hour', 0):.1%}"
],
recommendations=[]
)
def _generate_daily_summary(self, report: Report) -> str:
"""Generiert tägliche Zusammenfassung"""
if report.overall_success_rate >= 0.9:
performance = "ausgezeichnet"
elif report.overall_success_rate >= 0.7:
performance = "gut"
elif report.overall_success_rate >= 0.5:
performance = "durchschnittlich"
else:
performance = "verbesserungswürdig"
return (f"Tagesleistung war {performance} mit "
f"{report.total_accounts_created} erstellten Accounts "
f"({report.overall_success_rate:.1%} Erfolgsrate)")
def _generate_peak_time_insight(self, report: Report) -> str:
"""Generiert Insight über Peak-Zeiten"""
if report.creation_rate_timeline:
peak_hour = max(zip(report.creation_rate_timeline.timestamps,
report.creation_rate_timeline.values),
key=lambda x: x[1])
return f"Höchste Aktivität um {peak_hour[0].strftime('%H:%M')} Uhr"
return "Keine ausgeprägten Peak-Zeiten erkennbar"
def _generate_error_trend_insight(self, report: Report) -> str:
"""Generiert Insight über Fehler-Trends"""
if report.error_rate_timeline:
trend = report.error_rate_timeline.get_trend()
if trend > 10:
return "⚠️ Fehlerrate steigt - Intervention empfohlen"
elif trend < -10:
return "✅ Fehlerrate sinkt - positive Entwicklung"
else:
return "Fehlerrate stabil"
return "Keine Fehler-Trend-Daten verfügbar"
def _generate_daily_recommendations(self, report: Report) -> List[str]:
"""Generiert tägliche Empfehlungen"""
recommendations = []
if report.overall_success_rate < 0.7:
recommendations.append(
"Erfolgsrate unter 70% - prüfen Sie Rate Limits und Proxy-Qualität"
)
if report.avg_creation_time > 120:
recommendations.append(
"Durchschnittliche Erstellungszeit über 2 Minuten - "
"Performance-Optimierung empfohlen"
)
# Platform-spezifische Empfehlungen
for platform_stat in report.platform_stats:
if platform_stat.success_rate < 0.5:
recommendations.append(
f"{platform_stat.platform}: Niedrige Erfolgsrate - "
f"spezifische Anpassungen erforderlich"
)
if not recommendations:
recommendations.append("Keine dringenden Maßnahmen erforderlich")
return recommendations
def _generate_weekly_trend(self, report: Report) -> str:
"""Generiert wöchentlichen Trend"""
trend_direction = "stabil"
if report.success_rate_timeline:
trend = report.success_rate_timeline.get_trend()
if trend > 5:
trend_direction = "steigend"
elif trend < -5:
trend_direction = "fallend"
return f"Wöchentlicher Trend: {trend_direction} ({report.accounts_per_day:.1f} Accounts/Tag)"
def _generate_platform_comparison(self, report: Report) -> str:
"""Vergleicht Platform-Performance"""
if not report.platform_stats:
return "Keine Platform-Daten verfügbar"
best_platform = max(report.platform_stats, key=lambda p: p.success_rate)
worst_platform = min(report.platform_stats, key=lambda p: p.success_rate)
return (f"Beste Performance: {best_platform.platform} ({best_platform.success_rate:.1%}), "
f"Schlechteste: {worst_platform.platform} ({worst_platform.success_rate:.1%})")
def _generate_success_pattern_insight(self, report: Report) -> str:
"""Analysiert Erfolgsmuster"""
success_metric = report.get_metric(MetricType.SUCCESS_RATE)
if success_metric and success_metric.trend > 0:
return f"Erfolgsrate verbessert sich um {success_metric.trend:.1f}%"
return "Erfolgsrate zeigt keine klare Tendenz"
def _generate_weekly_recommendations(self, report: Report) -> List[str]:
"""Generiert wöchentliche Empfehlungen"""
recommendations = []
# Trend-basierte Empfehlungen
if report.success_rate_timeline:
trend = report.success_rate_timeline.get_trend()
if trend < -10:
recommendations.append(
"Negativer Trend erkannt - analysieren Sie Änderungen der letzten Woche"
)
# Effizienz-Empfehlungen
if report.total_attempts > report.total_accounts_created * 2:
recommendations.append(
"Hohe Retry-Rate - verbessern Sie Fehlerbehandlung"
)
return recommendations
def _generate_monthly_summary(self, report: Report) -> str:
"""Generiert monatliche Zusammenfassung"""
total_value = report.total_accounts_created
daily_avg = report.accounts_per_day
return (f"Monat: {total_value} Accounts erstellt "
f"{daily_avg:.1f}/Tag, {report.overall_success_rate:.1%} Erfolg)")
def _generate_growth_analysis(self, report: Report) -> str:
"""Analysiert Wachstum"""
# Vereinfacht - würde historische Daten vergleichen
return "Wachstumsanalyse: Vergleich mit Vormonat ausstehend"
def _generate_efficiency_insight(self, report: Report) -> str:
"""Analysiert Effizienz"""
efficiency = report.total_accounts_created / report.total_attempts if report.total_attempts > 0 else 0
return f"Effizienz: {efficiency:.1%} der Versuche erfolgreich"
def _generate_strategic_recommendations(self, report: Report) -> List[str]:
"""Generiert strategische Empfehlungen"""
return [
"Monatliche Review der Error-Patterns durchführen",
"Proxy-Pool-Qualität evaluieren",
"Fingerprint-Rotation-Strategie anpassen"
]
def export_report(self,
report: Report,
format: str = 'json',
include_sensitive: bool = False) -> bytes:
"""
Exportiert Report in verschiedenen Formaten.
Args:
report: Zu exportierender Report
format: Export-Format ('json', 'csv', 'html')
include_sensitive: Ob sensitive Daten inkludiert werden sollen
Returns:
Report als Bytes
"""
if format == 'json':
return self._export_json(report, include_sensitive)
elif format == 'csv':
return self._export_csv(report)
elif format == 'html':
return self._export_html(report)
else:
raise ValueError(f"Unsupported format: {format}")
def _export_json(self, report: Report, include_sensitive: bool) -> bytes:
"""Exportiert als JSON"""
data = report.to_dict()
# Entferne sensitive Daten wenn gewünscht
if not include_sensitive:
# Entferne Account-spezifische Daten
for platform_stat in data.get('platform_stats', []):
if 'account_details' in platform_stat:
del platform_stat['account_details']
return json.dumps(data, indent=2).encode('utf-8')
def _export_csv(self, report: Report) -> bytes:
"""Exportiert als CSV"""
output = StringIO()
writer = csv.writer(output)
# Header
writer.writerow(['Metric', 'Value', 'Unit', 'Trend'])
# Metrics
for metric in report.metrics:
writer.writerow([metric.name, metric.value, metric.unit, metric.trend])
# Platform Stats
writer.writerow([])
writer.writerow(['Platform', 'Attempts', 'Success', 'Success Rate', 'Avg Duration'])
for stat in report.platform_stats:
writer.writerow([
stat.platform,
stat.total_attempts,
stat.successful_accounts,
f"{stat.success_rate:.1%}",
f"{stat.avg_duration_seconds:.1f}s"
])
return output.getvalue().encode('utf-8')
def _export_html(self, report: Report) -> bytes:
"""Exportiert als HTML"""
html = f"""
<html>
<head>
<title>Report {report.report_id}</title>
<style>
body {{ font-family: Arial, sans-serif; margin: 20px; }}
h1 {{ color: #333; }}
.metric {{ margin: 10px 0; }}
.success {{ color: green; }}
.warning {{ color: orange; }}
.error {{ color: red; }}
table {{ border-collapse: collapse; width: 100%; }}
th, td {{ border: 1px solid #ddd; padding: 8px; text-align: left; }}
th {{ background-color: #f2f2f2; }}
</style>
</head>
<body>
<h1>{report.report_type.value.title()} Report</h1>
<p>Period: {report.start_date.strftime('%Y-%m-%d')} to {report.end_date.strftime('%Y-%m-%d')}</p>
<h2>Summary</h2>
<div class="metric">Total Accounts: <strong>{report.total_accounts_created}</strong></div>
<div class="metric">Success Rate: <strong class="{'success' if report.overall_success_rate > 0.7 else 'warning'}">{report.overall_success_rate:.1%}</strong></div>
<div class="metric">Average Creation Time: <strong>{report.avg_creation_time:.1f}s</strong></div>
<h2>Platform Statistics</h2>
<table>
<tr>
<th>Platform</th>
<th>Attempts</th>
<th>Success</th>
<th>Success Rate</th>
</tr>
"""
for stat in report.platform_stats:
html += f"""
<tr>
<td>{stat.platform}</td>
<td>{stat.total_attempts}</td>
<td>{stat.successful_accounts}</td>
<td class="{'success' if stat.success_rate > 0.7 else 'warning'}">{stat.success_rate:.1%}</td>
</tr>
"""
html += """
</table>
<h2>Insights</h2>
<ul>
"""
for insight in report.insights:
html += f"<li>{insight}</li>"
html += """
</ul>
<h2>Recommendations</h2>
<ul>
"""
for rec in report.recommendations:
html += f"<li>{rec}</li>"
html += """
</ul>
</body>
</html>
"""
return html.encode('utf-8')

Datei anzeigen

@ -0,0 +1,335 @@
"""
Log Account Creation Use Case - Strukturiertes Logging für Account-Erstellung
"""
import logging
import time
from typing import Dict, Any, Optional, List
from datetime import datetime, timedelta
import uuid
from domain.services.analytics_service import IAnalyticsService
from domain.entities.account_creation_event import (
AccountCreationEvent, AccountData, WorkflowStep,
WorkflowStepStatus, ErrorDetails
)
from domain.value_objects.action_timing import ActionType
logger = logging.getLogger("log_account_creation_use_case")
class LogAccountCreationUseCase:
"""
Use Case für strukturiertes Logging von Account-Erstellungen.
Trackt detaillierte Steps, Performance-Metriken und Fehler-Kontextualisierung.
"""
def __init__(self, analytics_service: IAnalyticsService):
self.analytics_service = analytics_service
self.active_events = {} # event_id -> AccountCreationEvent
def start_tracking(self,
platform: str,
session_id: str,
fingerprint_id: str,
context: Optional[Dict[str, Any]] = None) -> str:
"""
Startet Tracking für neue Account-Erstellung.
Args:
platform: Zielplattform
session_id: Session ID
fingerprint_id: Fingerprint ID
context: Zusätzlicher Kontext
Returns:
Event ID für weiteres Tracking
"""
event = AccountCreationEvent(
event_id=str(uuid.uuid4()),
timestamp=datetime.now(),
session_id=session_id,
fingerprint_id=fingerprint_id,
proxy_used=context.get('proxy_used', False) if context else False,
proxy_type=context.get('proxy_type') if context else None,
browser_type=context.get('browser_type', 'chromium') if context else 'chromium',
headless=context.get('headless', False) if context else False
)
# Speichere temporär für Step-Tracking
self.active_events[event.event_id] = event
logger.info(f"Started tracking account creation {event.event_id} for {platform}")
return event.event_id
def track_step(self,
event_id: str,
step_name: str,
metadata: Optional[Dict[str, Any]] = None) -> None:
"""
Beginnt Tracking eines Workflow-Schritts.
Args:
event_id: Event ID
step_name: Name des Schritts
metadata: Zusätzliche Metadaten
"""
event = self.active_events.get(event_id)
if not event:
logger.error(f"No active event found for {event_id}")
return
step = WorkflowStep(
step_name=step_name,
start_time=datetime.now(),
status=WorkflowStepStatus.IN_PROGRESS,
metadata=metadata or {}
)
event.add_step(step)
logger.debug(f"Started step '{step_name}' for event {event_id}")
def complete_step(self,
event_id: str,
step_name: str,
success: bool = True,
error_message: Optional[str] = None,
retry_count: int = 0) -> None:
"""
Markiert einen Schritt als abgeschlossen.
Args:
event_id: Event ID
step_name: Name des Schritts
success: Ob Schritt erfolgreich war
error_message: Fehlermeldung bei Misserfolg
retry_count: Anzahl der Wiederholungen
"""
event = self.active_events.get(event_id)
if not event:
logger.error(f"No active event found for {event_id}")
return
step = event.get_step(step_name)
if not step:
logger.error(f"Step '{step_name}' not found in event {event_id}")
return
step.end_time = datetime.now()
step.status = WorkflowStepStatus.COMPLETED if success else WorkflowStepStatus.FAILED
step.retry_count = retry_count
step.error_message = error_message
# Update Metriken
event.network_requests += step.metadata.get('network_requests', 0)
event.screenshots_taken += step.metadata.get('screenshots', 0)
logger.debug(f"Completed step '{step_name}' for event {event_id} "
f"(success: {success}, duration: {step.duration})")
def set_account_data(self,
event_id: str,
username: str,
password: str,
email: str,
additional_data: Optional[Dict[str, Any]] = None) -> None:
"""
Setzt Account-Daten für erfolgreich erstellten Account.
Args:
event_id: Event ID
username: Benutzername
password: Passwort
email: E-Mail
additional_data: Zusätzliche Daten
"""
event = self.active_events.get(event_id)
if not event:
logger.error(f"No active event found for {event_id}")
return
event.account_data = AccountData(
platform=additional_data.get('platform', 'unknown') if additional_data else 'unknown',
username=username,
password=password,
email=email,
phone=additional_data.get('phone') if additional_data else None,
full_name=additional_data.get('full_name') if additional_data else None,
birthday=additional_data.get('birthday') if additional_data else None,
verification_status=additional_data.get('verification_status', 'unverified') if additional_data else 'unverified',
metadata=additional_data or {}
)
logger.info(f"Set account data for {username} in event {event_id}")
def log_error(self,
event_id: str,
error_type: str,
error_message: str,
stack_trace: Optional[str] = None,
screenshot_path: Optional[str] = None,
context: Optional[Dict[str, Any]] = None) -> None:
"""
Loggt einen Fehler während der Account-Erstellung.
Args:
event_id: Event ID
error_type: Typ des Fehlers
error_message: Fehlermeldung
stack_trace: Stack Trace
screenshot_path: Pfad zum Fehler-Screenshot
context: Fehler-Kontext
"""
event = self.active_events.get(event_id)
if not event:
logger.error(f"No active event found for {event_id}")
return
event.error_details = ErrorDetails(
error_type=error_type,
error_message=error_message,
stack_trace=stack_trace,
screenshot_path=screenshot_path,
context=context or {}
)
logger.error(f"Logged error for event {event_id}: {error_type} - {error_message}")
def finish_tracking(self,
event_id: str,
success: bool,
final_status: Optional[Dict[str, Any]] = None) -> None:
"""
Beendet Tracking und speichert Event.
Args:
event_id: Event ID
success: Ob Account-Erstellung erfolgreich war
final_status: Finaler Status/Metadaten
"""
event = self.active_events.get(event_id)
if not event:
logger.error(f"No active event found for {event_id}")
return
# Setze finale Eigenschaften
event.success = success
event.calculate_duration()
# Füge finale Metadaten hinzu
if final_status:
if event.account_data:
event.account_data.metadata.update(final_status)
# Logge Event
self.analytics_service.log_event(event)
# Entferne aus aktiven Events
del self.active_events[event_id]
# Log Summary
self._log_summary(event)
def _log_summary(self, event: AccountCreationEvent) -> None:
"""Loggt eine Zusammenfassung des Events"""
summary = f"Account creation {event.event_id} "
if event.success:
summary += f"SUCCEEDED"
if event.account_data:
summary += f" - {event.account_data.username} on {event.account_data.platform}"
else:
summary += f"FAILED"
if event.error_details:
summary += f" - {event.error_details.error_type}: {event.error_details.error_message}"
if event.duration:
summary += f" (duration: {event.duration.total_seconds():.1f}s"
summary += f", steps: {len(event.steps_completed)}"
summary += f", retries: {event.total_retry_count})"
logger.info(summary)
def track_performance_metric(self,
event_id: str,
metric_name: str,
value: float,
tags: Optional[Dict[str, str]] = None) -> None:
"""
Trackt eine Performance-Metrik.
Args:
event_id: Event ID
metric_name: Name der Metrik
value: Wert der Metrik
tags: Zusätzliche Tags
"""
# Tracke über Analytics Service
metric_tags = tags or {}
metric_tags['event_id'] = event_id
self.analytics_service.track_performance(metric_name, value, metric_tags)
def get_active_events(self) -> List[Dict[str, Any]]:
"""Gibt Liste aktiver Events zurück"""
active = []
for event_id, event in self.active_events.items():
duration = (datetime.now() - event.timestamp).total_seconds()
current_step = None
# Finde aktuellen Schritt
for step in event.steps_completed:
if step.status == WorkflowStepStatus.IN_PROGRESS:
current_step = step.step_name
break
active.append({
'event_id': event_id,
'started_at': event.timestamp.isoformat(),
'duration_seconds': duration,
'current_step': current_step,
'steps_completed': len([s for s in event.steps_completed
if s.status == WorkflowStepStatus.COMPLETED]),
'platform': event.account_data.platform if event.account_data else 'unknown'
})
return active
def cleanup_stale_events(self, timeout_minutes: int = 30) -> int:
"""
Bereinigt Events die zu lange laufen.
Args:
timeout_minutes: Timeout in Minuten
Returns:
Anzahl bereinigter Events
"""
stale_events = []
timeout = timedelta(minutes=timeout_minutes)
for event_id, event in self.active_events.items():
if datetime.now() - event.timestamp > timeout:
stale_events.append(event_id)
for event_id in stale_events:
event = self.active_events[event_id]
# Markiere als Timeout
self.log_error(
event_id,
'timeout',
f'Event timed out after {timeout_minutes} minutes',
context={'timeout_minutes': timeout_minutes}
)
# Beende Tracking
self.finish_tracking(event_id, success=False,
final_status={'reason': 'timeout'})
if stale_events:
logger.warning(f"Cleaned up {len(stale_events)} stale events")
return len(stale_events)

Datei anzeigen

@ -0,0 +1,362 @@
"""
Use cases for method rotation system.
Implements business logic for method selection, rotation, and performance tracking.
"""
import uuid
from datetime import datetime, timedelta
from typing import List, Optional, Dict, Any
from dataclasses import dataclass
from domain.entities.method_rotation import (
MethodStrategy, RotationSession, RotationEvent, PlatformMethodState,
RotationEventType, RotationStrategy, RiskLevel
)
from domain.repositories.method_rotation_repository import (
IMethodStrategyRepository, IRotationSessionRepository,
IPlatformMethodStateRepository
)
@dataclass
class RotationContext:
"""Context information for rotation decisions"""
platform: str
account_id: Optional[str] = None
fingerprint_id: Optional[str] = None
excluded_methods: List[str] = None
max_risk_level: RiskLevel = RiskLevel.HIGH
emergency_mode: bool = False
session_metadata: Dict[str, Any] = None
def __post_init__(self):
if self.excluded_methods is None:
self.excluded_methods = []
if self.session_metadata is None:
self.session_metadata = {}
class MethodRotationUseCase:
"""
Core use case for method rotation operations.
Handles method selection, rotation logic, and performance tracking.
"""
def __init__(self,
strategy_repo: IMethodStrategyRepository,
session_repo: IRotationSessionRepository,
state_repo: IPlatformMethodStateRepository):
self.strategy_repo = strategy_repo
self.session_repo = session_repo
self.state_repo = state_repo
def start_rotation_session(self, context: RotationContext) -> RotationSession:
"""
Start a new rotation session and select the optimal initial method.
"""
# Check for existing active session
existing_session = self.session_repo.find_active_session(
context.platform, context.account_id
)
if existing_session:
# Archive the old session and start fresh
self.session_repo.archive_session(existing_session.session_id, False)
# Get optimal method for initial attempt
optimal_method = self.get_optimal_method(context)
if not optimal_method:
raise ValueError(f"No available methods for platform {context.platform}")
# Create new session
session = RotationSession(
session_id=f"session_{uuid.uuid4().hex}",
platform=context.platform,
account_id=context.account_id,
current_method=optimal_method.method_name,
fingerprint_id=context.fingerprint_id,
session_metadata=context.session_metadata.copy()
)
# Update platform state
platform_state = self.state_repo.get_or_create_state(context.platform)
platform_state.increment_daily_attempts(optimal_method.method_name)
self.state_repo.save(platform_state)
# Save session
self.session_repo.save(session)
return session
def get_optimal_method(self, context: RotationContext) -> Optional[MethodStrategy]:
"""
Get the optimal method based on current conditions and strategy.
"""
platform_state = self.state_repo.get_or_create_state(context.platform)
# In emergency mode, use only the safest methods
if context.emergency_mode or platform_state.emergency_mode:
return self._get_emergency_method(context)
# Use platform-specific rotation strategy
if platform_state.rotation_strategy == RotationStrategy.ADAPTIVE:
return self._get_adaptive_method(context, platform_state)
elif platform_state.rotation_strategy == RotationStrategy.SEQUENTIAL:
return self._get_sequential_method(context, platform_state)
elif platform_state.rotation_strategy == RotationStrategy.RANDOM:
return self._get_random_method(context, platform_state)
else:
return self._get_smart_method(context, platform_state)
def rotate_method(self, session_id: str, reason: str = "failure") -> Optional[MethodStrategy]:
"""
Rotate to the next best available method for an active session.
"""
session = self.session_repo.find_by_id(session_id)
if not session or not session.is_active:
return None
# Create context for finding next method
context = RotationContext(
platform=session.platform,
account_id=session.account_id,
fingerprint_id=session.fingerprint_id,
excluded_methods=session.attempted_methods.copy()
)
# Find next method
next_method = self.get_optimal_method(context)
if not next_method:
# No more methods available
self.session_repo.archive_session(session_id, False)
return None
# Update session
session.rotate_to_method(next_method.method_name, reason)
self.session_repo.save(session)
# Update platform state
platform_state = self.state_repo.get_or_create_state(session.platform)
platform_state.increment_daily_attempts(next_method.method_name)
self.state_repo.save(platform_state)
return next_method
def record_method_result(self, session_id: str, method_name: str,
success: bool, execution_time: float = 0.0,
error_details: Optional[Dict] = None) -> None:
"""
Record the result of a method execution and update metrics.
"""
session = self.session_repo.find_by_id(session_id)
if not session:
return
# Update session metrics
error_message = error_details.get('message') if error_details else None
self.session_repo.update_session_metrics(
session_id, success, method_name, error_message
)
# Update method strategy performance
strategy = self.strategy_repo.find_by_platform_and_method(
session.platform, method_name
)
if strategy:
self.strategy_repo.update_performance_metrics(
strategy.strategy_id, success, execution_time
)
# Update platform state
if success:
self.state_repo.record_method_success(session.platform, method_name)
# Archive successful session
self.session_repo.archive_session(session_id, True)
else:
# Handle failure - might trigger automatic rotation
self._handle_method_failure(session, method_name, error_details or {})
def should_rotate_method(self, session_id: str) -> bool:
"""
Determine if method rotation should occur based on current session state.
"""
session = self.session_repo.find_by_id(session_id)
if not session or not session.is_active:
return False
return session.should_rotate
def get_session_status(self, session_id: str) -> Optional[Dict[str, Any]]:
"""
Get detailed status information for a rotation session.
"""
session = self.session_repo.find_by_id(session_id)
if not session:
return None
current_strategy = self.strategy_repo.find_by_platform_and_method(
session.platform, session.current_method
)
return {
'session_id': session.session_id,
'platform': session.platform,
'is_active': session.is_active,
'current_method': session.current_method,
'attempted_methods': session.attempted_methods,
'rotation_count': session.rotation_count,
'success_count': session.success_count,
'failure_count': session.failure_count,
'success_rate': session.success_rate,
'session_duration_minutes': session.session_duration.total_seconds() / 60,
'current_strategy_effectiveness': current_strategy.effectiveness_score if current_strategy else 0.0,
'should_rotate': session.should_rotate
}
def get_platform_method_recommendations(self, platform: str) -> Dict[str, Any]:
"""
Get method recommendations and insights for a platform.
"""
strategies = self.strategy_repo.find_active_by_platform(platform)
platform_stats = self.strategy_repo.get_platform_statistics(platform)
session_stats = self.session_repo.get_session_statistics(platform, days=30)
platform_state = self.state_repo.find_by_platform(platform)
recommendations = []
for strategy in strategies[:3]: # Top 3 methods
recommendations.append({
'method_name': strategy.method_name,
'effectiveness_score': strategy.effectiveness_score,
'success_rate': strategy.success_rate,
'risk_level': strategy.risk_level.value,
'is_on_cooldown': strategy.is_on_cooldown,
'daily_attempts_remaining': strategy.max_daily_attempts - platform_state.daily_attempt_counts.get(strategy.method_name, 0) if platform_state else strategy.max_daily_attempts
})
return {
'platform': platform,
'recommended_methods': recommendations,
'platform_statistics': platform_stats,
'session_statistics': session_stats,
'emergency_mode': platform_state.emergency_mode if platform_state else False,
'rotation_strategy': platform_state.rotation_strategy.value if platform_state else 'adaptive'
}
def enable_emergency_mode(self, platform: str, reason: str = "system_override") -> None:
"""Enable emergency mode for a platform"""
self.state_repo.set_emergency_mode(platform, True)
# Archive all active sessions for safety
active_sessions = self.session_repo.find_active_sessions_by_platform(platform)
for session in active_sessions:
session.session_metadata['emergency_archived'] = True
session.session_metadata['emergency_reason'] = reason
self.session_repo.archive_session(session.session_id, False)
def disable_emergency_mode(self, platform: str) -> None:
"""Disable emergency mode for a platform"""
self.state_repo.set_emergency_mode(platform, False)
def _get_adaptive_method(self, context: RotationContext,
platform_state: PlatformMethodState) -> Optional[MethodStrategy]:
"""Get method using adaptive strategy based on recent performance"""
# Prefer last successful method if it's available
if (platform_state.last_successful_method and
platform_state.last_successful_method not in context.excluded_methods):
strategy = self.strategy_repo.find_by_platform_and_method(
context.platform, platform_state.last_successful_method
)
if (strategy and strategy.is_active and
not strategy.is_on_cooldown and
platform_state.is_method_available(strategy.method_name, strategy.max_daily_attempts)):
return strategy
# Fall back to best available method
return self.strategy_repo.get_next_available_method(
context.platform, context.excluded_methods, context.max_risk_level.value
)
def _get_sequential_method(self, context: RotationContext,
platform_state: PlatformMethodState) -> Optional[MethodStrategy]:
"""Get method using sequential strategy"""
for method_name in platform_state.preferred_methods:
if method_name in context.excluded_methods:
continue
strategy = self.strategy_repo.find_by_platform_and_method(
context.platform, method_name
)
if (strategy and strategy.is_active and
not strategy.is_on_cooldown and
platform_state.is_method_available(method_name, strategy.max_daily_attempts)):
return strategy
return None
def _get_random_method(self, context: RotationContext,
platform_state: PlatformMethodState) -> Optional[MethodStrategy]:
"""Get method using random strategy"""
import random
available_strategies = []
for method_name in platform_state.preferred_methods:
if method_name in context.excluded_methods:
continue
strategy = self.strategy_repo.find_by_platform_and_method(
context.platform, method_name
)
if (strategy and strategy.is_active and
not strategy.is_on_cooldown and
platform_state.is_method_available(method_name, strategy.max_daily_attempts)):
available_strategies.append(strategy)
return random.choice(available_strategies) if available_strategies else None
def _get_smart_method(self, context: RotationContext,
platform_state: PlatformMethodState) -> Optional[MethodStrategy]:
"""Get method using AI-driven smart strategy"""
# For now, smart strategy is the same as adaptive
# This can be enhanced with ML models in the future
return self._get_adaptive_method(context, platform_state)
def _get_emergency_method(self, context: RotationContext) -> Optional[MethodStrategy]:
"""Get the safest available method for emergency mode"""
emergency_strategies = self.strategy_repo.get_emergency_methods(context.platform)
for strategy in emergency_strategies:
if (strategy.method_name not in context.excluded_methods and
not strategy.is_on_cooldown):
return strategy
return None
def _handle_method_failure(self, session: RotationSession, method_name: str,
error_details: Dict) -> None:
"""Handle method failure and determine if action is needed"""
# Check if this is a recurring failure pattern
if error_details.get('error_type') == 'rate_limit':
# Temporarily block the method
self.state_repo.block_method(
session.platform, method_name,
f"Rate limited: {error_details.get('message', 'Unknown')}"
)
elif error_details.get('error_type') == 'account_suspended':
# This might indicate method detection, block temporarily
self.state_repo.block_method(
session.platform, method_name,
f"Possible detection: {error_details.get('message', 'Unknown')}"
)
# Check if we need to enable emergency mode
platform_stats = self.strategy_repo.get_platform_statistics(session.platform)
if platform_stats.get('recent_failures_24h', 0) > 10:
self.enable_emergency_mode(session.platform, "high_failure_rate")

Datei anzeigen

@ -0,0 +1,81 @@
"""
One-Click Login Use Case - Ermöglicht Login mit gespeicherter Session
"""
import logging
from typing import Dict, Any, Optional, Tuple
from datetime import datetime
from domain.value_objects.login_credentials import LoginCredentials
from infrastructure.repositories.fingerprint_repository import FingerprintRepository
from infrastructure.repositories.account_repository import AccountRepository
logger = logging.getLogger("one_click_login_use_case")
class OneClickLoginUseCase:
"""
Use Case für Ein-Klick-Login mit gespeicherter Session.
Lädt Session und Fingerprint für konsistenten Browser-Start.
"""
def __init__(self,
fingerprint_repository: FingerprintRepository = None,
account_repository: AccountRepository = None):
self.fingerprint_repository = fingerprint_repository
self.account_repository = account_repository
def execute(self, account_id: str, platform: str) -> Dict[str, Any]:
"""
Ein-Klick-Login deaktiviert - führt immer normalen Login durch.
Args:
account_id: ID des Accounts
platform: Plattform (instagram, facebook, etc.)
Returns:
Dict mit Anweisung für normalen Login
"""
try:
# Session-Login deaktiviert - führe immer normalen Login durch
logger.info(f"Session-Login deaktiviert für Account {account_id} - verwende normalen Login")
# Account-Daten laden falls Repository verfügbar
account_data = None
if self.account_repository:
try:
account = self.account_repository.get_by_id(int(account_id))
if account:
account_data = {
'username': account.get('username'),
'password': account.get('password'),
'platform': account.get('platform'),
'fingerprint_id': account.get('fingerprint_id')
}
except Exception as e:
logger.error(f"Fehler beim Laden der Account-Daten: {e}")
return {
'success': False, # Kein Session-Login möglich
'can_perform_login': True, # Normaler Login möglich
'account_data': account_data,
'message': 'Session-Login deaktiviert - normaler Login erforderlich',
'requires_manual_login': False
}
except Exception as e:
logger.error(f"Fehler beim One-Click-Login: {e}")
return {
'success': False,
'can_perform_login': True,
'account_data': None,
'message': f'Fehler beim Login: {str(e)}',
'requires_manual_login': False
}
def check_session_status(self, account_id: str) -> Dict[str, Any]:
"""
Session-Status-Check deaktiviert (Session-Funktionalität entfernt).
"""
# Session-Funktionalität wurde entfernt
return {'state': 'unknown', 'message': 'Session-Status deaktiviert'}