Dieser Commit ist enthalten in:
Claude Project Manager
2025-07-05 17:51:16 +02:00
Commit 0d7d888502
1594 geänderte Dateien mit 122839 neuen und 0 gelöschten Zeilen

Datei anzeigen

@ -0,0 +1,428 @@
from flask import Blueprint, render_template, jsonify, request, session, redirect, url_for
from functools import wraps
import psycopg2
from psycopg2.extras import RealDictCursor
import os
import requests
from datetime import datetime, timedelta
import logging
from utils.partition_helper import ensure_partition_exists, check_table_exists
# Configure logging
logger = logging.getLogger(__name__)
# Create a function to get database connection
def get_db_connection():
return psycopg2.connect(
host=os.environ.get('POSTGRES_HOST', 'postgres'),
database=os.environ.get('POSTGRES_DB', 'v2_adminpanel'),
user=os.environ.get('POSTGRES_USER', 'postgres'),
password=os.environ.get('POSTGRES_PASSWORD', 'postgres')
)
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if 'user_id' not in session:
return redirect(url_for('auth.login'))
return f(*args, **kwargs)
return decorated_function
# Create Blueprint
monitoring_bp = Blueprint('monitoring', __name__)
@monitoring_bp.route('/monitoring')
@login_required
def unified_monitoring():
"""Unified monitoring dashboard combining live activity and anomaly detection"""
try:
conn = get_db_connection()
cur = conn.cursor(cursor_factory=RealDictCursor)
# Initialize default values
system_status = 'normal'
status_color = 'success'
active_alerts = 0
live_metrics = {
'active_licenses': 0,
'total_validations': 0,
'unique_devices': 0,
'unique_ips': 0,
'avg_response_time': 0
}
trend_data = []
activity_stream = []
geo_data = []
top_licenses = []
anomaly_distribution = []
performance_data = []
# Check if tables exist before querying
has_heartbeats = check_table_exists(conn, 'license_heartbeats')
has_anomalies = check_table_exists(conn, 'anomaly_detections')
if has_anomalies:
# Get active alerts count
cur.execute("""
SELECT COUNT(*) as count
FROM anomaly_detections
WHERE resolved = false
AND detected_at > NOW() - INTERVAL '24 hours'
""")
active_alerts = cur.fetchone()['count'] or 0
# Determine system status based on alerts
if active_alerts == 0:
system_status = 'normal'
status_color = 'success'
elif active_alerts < 5:
system_status = 'warning'
status_color = 'warning'
else:
system_status = 'critical'
status_color = 'danger'
if has_heartbeats:
# Ensure current month partition exists
ensure_partition_exists(conn, 'license_heartbeats', datetime.now())
# Executive summary metrics
cur.execute("""
SELECT
COUNT(DISTINCT license_id) as active_licenses,
COUNT(*) as total_validations,
COUNT(DISTINCT hardware_id) as unique_devices,
COUNT(DISTINCT ip_address) as unique_ips,
0 as avg_response_time
FROM license_heartbeats
WHERE timestamp > NOW() - INTERVAL '5 minutes'
""")
result = cur.fetchone()
if result:
live_metrics = result
# Get 24h trend data for metrics
cur.execute("""
SELECT
DATE_TRUNC('hour', timestamp) as hour,
COUNT(DISTINCT license_id) as licenses,
COUNT(*) as validations
FROM license_heartbeats
WHERE timestamp > NOW() - INTERVAL '24 hours'
GROUP BY hour
ORDER BY hour
""")
trend_data = cur.fetchall()
# Activity stream - just validations if no anomalies table
if has_anomalies:
cur.execute("""
WITH combined_events AS (
-- Normal validations
SELECT
lh.timestamp,
'validation' as event_type,
'normal' as severity,
l.license_key,
c.name as customer_name,
lh.ip_address,
lh.hardware_id,
NULL as anomaly_type,
NULL as description
FROM license_heartbeats lh
JOIN licenses l ON l.id = lh.license_id
JOIN customers c ON c.id = l.customer_id
WHERE lh.timestamp > NOW() - INTERVAL '1 hour'
UNION ALL
-- Anomalies
SELECT
ad.detected_at as timestamp,
'anomaly' as event_type,
ad.severity,
l.license_key,
c.name as customer_name,
ad.ip_address,
ad.hardware_id,
ad.anomaly_type,
ad.description
FROM anomaly_detections ad
LEFT JOIN licenses l ON l.id = ad.license_id
LEFT JOIN customers c ON c.id = l.customer_id
WHERE ad.detected_at > NOW() - INTERVAL '1 hour'
)
SELECT * FROM combined_events
ORDER BY timestamp DESC
LIMIT 100
""")
else:
# Just show validations
cur.execute("""
SELECT
lh.timestamp,
'validation' as event_type,
'normal' as severity,
l.license_key,
c.name as customer_name,
lh.ip_address,
lh.hardware_id,
NULL as anomaly_type,
NULL as description
FROM license_heartbeats lh
JOIN licenses l ON l.id = lh.license_id
JOIN customers c ON c.id = l.customer_id
WHERE lh.timestamp > NOW() - INTERVAL '1 hour'
ORDER BY lh.timestamp DESC
LIMIT 100
""")
activity_stream = cur.fetchall()
# Geographic distribution
cur.execute("""
SELECT
ip_address,
COUNT(*) as request_count,
COUNT(DISTINCT license_id) as license_count
FROM license_heartbeats
WHERE timestamp > NOW() - INTERVAL '1 hour'
GROUP BY ip_address
ORDER BY request_count DESC
LIMIT 20
""")
geo_data = cur.fetchall()
# Top active licenses
if has_anomalies:
cur.execute("""
SELECT
l.id,
l.license_key,
c.name as customer_name,
COUNT(DISTINCT lh.hardware_id) as device_count,
COUNT(lh.*) as validation_count,
MAX(lh.timestamp) as last_seen,
COUNT(DISTINCT ad.id) as anomaly_count
FROM licenses l
JOIN customers c ON c.id = l.customer_id
LEFT JOIN license_heartbeats lh ON l.id = lh.license_id
AND lh.timestamp > NOW() - INTERVAL '1 hour'
LEFT JOIN anomaly_detections ad ON l.id = ad.license_id
AND ad.detected_at > NOW() - INTERVAL '24 hours'
WHERE lh.license_id IS NOT NULL
GROUP BY l.id, l.license_key, c.name
ORDER BY validation_count DESC
LIMIT 10
""")
else:
cur.execute("""
SELECT
l.id,
l.license_key,
c.name as customer_name,
COUNT(DISTINCT lh.hardware_id) as device_count,
COUNT(lh.*) as validation_count,
MAX(lh.timestamp) as last_seen,
0 as anomaly_count
FROM licenses l
JOIN customers c ON c.id = l.customer_id
LEFT JOIN license_heartbeats lh ON l.id = lh.license_id
AND lh.timestamp > NOW() - INTERVAL '1 hour'
WHERE lh.license_id IS NOT NULL
GROUP BY l.id, l.license_key, c.name
ORDER BY validation_count DESC
LIMIT 10
""")
top_licenses = cur.fetchall()
# Performance metrics
cur.execute("""
SELECT
DATE_TRUNC('minute', timestamp) as minute,
0 as avg_response_time,
0 as max_response_time,
COUNT(*) as request_count
FROM license_heartbeats
WHERE timestamp > NOW() - INTERVAL '30 minutes'
GROUP BY minute
ORDER BY minute DESC
""")
performance_data = cur.fetchall()
if has_anomalies:
# Anomaly distribution
cur.execute("""
SELECT
anomaly_type,
COUNT(*) as count,
MAX(severity) as max_severity
FROM anomaly_detections
WHERE detected_at > NOW() - INTERVAL '24 hours'
GROUP BY anomaly_type
ORDER BY count DESC
""")
anomaly_distribution = cur.fetchall()
cur.close()
conn.close()
return render_template('monitoring/unified_monitoring.html',
system_status=system_status,
status_color=status_color,
active_alerts=active_alerts,
live_metrics=live_metrics,
trend_data=trend_data,
activity_stream=activity_stream,
geo_data=geo_data,
top_licenses=top_licenses,
anomaly_distribution=anomaly_distribution,
performance_data=performance_data)
except Exception as e:
logger.error(f"Error in unified monitoring: {str(e)}")
return render_template('error.html',
error='Fehler beim Laden des Monitorings',
details=str(e))
@monitoring_bp.route('/live-dashboard')
@login_required
def live_dashboard():
"""Redirect to unified monitoring dashboard"""
return redirect(url_for('monitoring.unified_monitoring'))
@monitoring_bp.route('/alerts')
@login_required
def alerts():
"""Show active alerts from Alertmanager"""
alerts = []
try:
# Get alerts from Alertmanager
response = requests.get('http://alertmanager:9093/api/v1/alerts', timeout=2)
if response.status_code == 200:
alerts = response.json()
except:
# Fallback to database anomalies if table exists
conn = get_db_connection()
if check_table_exists(conn, 'anomaly_detections'):
cur = conn.cursor(cursor_factory=RealDictCursor)
cur.execute("""
SELECT
ad.*,
l.license_key,
c.name as company_name
FROM anomaly_detections ad
LEFT JOIN licenses l ON l.id = ad.license_id
LEFT JOIN customers c ON c.id = l.customer_id
WHERE ad.resolved = false
ORDER BY ad.detected_at DESC
LIMIT 50
""")
alerts = cur.fetchall()
cur.close()
conn.close()
return render_template('monitoring/alerts.html', alerts=alerts)
@monitoring_bp.route('/analytics')
@login_required
def analytics():
"""Combined analytics and license server status page"""
try:
conn = get_db_connection()
cur = conn.cursor(cursor_factory=RealDictCursor)
# Initialize default values
live_stats = [0, 0, 0, 0]
validation_rates = []
if check_table_exists(conn, 'license_heartbeats'):
# Get live statistics for the top cards
cur.execute("""
SELECT
COUNT(DISTINCT license_id) as active_licenses,
COUNT(*) as total_validations,
COUNT(DISTINCT hardware_id) as unique_devices,
COUNT(DISTINCT ip_address) as unique_ips
FROM license_heartbeats
WHERE timestamp > NOW() - INTERVAL '5 minutes'
""")
live_stats_data = cur.fetchone()
live_stats = [
live_stats_data['active_licenses'] or 0,
live_stats_data['total_validations'] or 0,
live_stats_data['unique_devices'] or 0,
live_stats_data['unique_ips'] or 0
]
# Get validation rates for the chart (last 30 minutes, aggregated by minute)
cur.execute("""
SELECT
DATE_TRUNC('minute', timestamp) as minute,
COUNT(*) as count
FROM license_heartbeats
WHERE timestamp > NOW() - INTERVAL '30 minutes'
GROUP BY minute
ORDER BY minute DESC
LIMIT 30
""")
validation_rates = [(row['minute'].isoformat(), row['count']) for row in cur.fetchall()]
cur.close()
conn.close()
return render_template('monitoring/analytics.html',
live_stats=live_stats,
validation_rates=validation_rates)
except Exception as e:
logger.error(f"Error in analytics: {str(e)}")
return render_template('error.html',
error='Fehler beim Laden der Analytics',
details=str(e))
@monitoring_bp.route('/analytics/stream')
@login_required
def analytics_stream():
"""Server-sent event stream for live analytics updates"""
def generate():
while True:
try:
conn = get_db_connection()
cur = conn.cursor(cursor_factory=RealDictCursor)
data = {'active_licenses': 0, 'total_validations': 0,
'unique_devices': 0, 'unique_ips': 0}
if check_table_exists(conn, 'license_heartbeats'):
cur.execute("""
SELECT
COUNT(DISTINCT license_id) as active_licenses,
COUNT(*) as total_validations,
COUNT(DISTINCT hardware_id) as unique_devices,
COUNT(DISTINCT ip_address) as unique_ips
FROM license_heartbeats
WHERE timestamp > NOW() - INTERVAL '5 minutes'
""")
result = cur.fetchone()
if result:
data = dict(result)
cur.close()
conn.close()
yield f"data: {jsonify(data).get_data(as_text=True)}\n\n"
except Exception as e:
logger.error(f"Error in analytics stream: {str(e)}")
yield f"data: {jsonify({'error': str(e)}).get_data(as_text=True)}\n\n"
import time
time.sleep(5) # Update every 5 seconds
from flask import Response
return Response(generate(), mimetype="text/event-stream")