Files
dashboard/backend/routes/incidents.py
SOC Analyst 1455e04303 fix: correct CampaignsView, analysis.py IPv4 split, entities date filter
- CampaignsView: update ClusterData interface to match real API response
  (severity/unique_ips/score instead of threat_level/total_ips/confidence_range)
  Fix fetch to use data.items, rewrite ClusterCard and BehavioralTab
  Remove unused getClassificationColor and THREAT_ORDER constants
- analysis.py: fix IPv4Address object has no attribute 'split' on line 322
  Add str() conversion before calling .split('.')
- entities.py: fix Date vs DateTime comparison — log_date is a Date column,
  comparing against now()-INTERVAL HOUR caused yesterday's entries to be excluded
  Use toDate(now() - INTERVAL X HOUR) for correct Date-level comparison

Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com>
2026-03-15 23:10:35 +01:00

273 lines
9.4 KiB
Python

"""
Routes pour la gestion des incidents clusterisés
"""
from fastapi import APIRouter, HTTPException, Query
from typing import List, Optional
from datetime import datetime, timedelta
from ..database import db
from ..models import BaseModel
router = APIRouter(prefix="/api/incidents", tags=["incidents"])
@router.get("/clusters")
async def get_incident_clusters(
hours: int = Query(24, ge=1, le=168, description="Fenêtre temporelle en heures"),
min_severity: str = Query("LOW", description="Niveau de sévérité minimum"),
limit: int = Query(20, ge=1, le=100, description="Nombre maximum de clusters")
):
"""
Récupère les incidents clusterisés automatiquement
Les clusters sont formés par:
- Subnet /24
- JA4 fingerprint
- Pattern temporel
"""
try:
# Cluster par subnet /24 avec une IP exemple
# Note: src_ip est en IPv6, les IPv4 sont stockés comme ::ffff:x.x.x.x
# toIPv4() convertit les IPv4-mapped, IPv4NumToString() retourne l'IPv4 en notation x.x.x.x
cluster_query = """
WITH cleaned_ips AS (
SELECT
replaceRegexpAll(toString(src_ip), '^::ffff:', '') AS clean_ip,
detected_at,
ja4,
country_code,
asn_number,
threat_level,
anomaly_score
FROM ml_detected_anomalies
WHERE detected_at >= now() - INTERVAL %(hours)s HOUR
),
subnet_groups AS (
SELECT
concat(
splitByChar('.', clean_ip)[1], '.',
splitByChar('.', clean_ip)[2], '.',
splitByChar('.', clean_ip)[3], '.0/24'
) AS subnet,
count() AS total_detections,
uniq(clean_ip) AS unique_ips,
min(detected_at) AS first_seen,
max(detected_at) AS last_seen,
argMax(ja4, detected_at) AS ja4,
argMax(country_code, detected_at) AS country_code,
argMax(asn_number, detected_at) AS asn_number,
argMax(threat_level, detected_at) AS threat_level,
avg(anomaly_score) AS avg_score,
argMax(clean_ip, detected_at) AS sample_ip
FROM cleaned_ips
GROUP BY subnet
HAVING total_detections >= 2
)
SELECT
subnet,
total_detections,
unique_ips,
first_seen,
last_seen,
ja4,
country_code,
asn_number,
threat_level,
avg_score,
sample_ip
FROM subnet_groups
ORDER BY avg_score ASC, total_detections DESC
LIMIT %(limit)s
"""
result = db.query(cluster_query, {"hours": hours, "limit": limit})
# Collect sample IPs to fetch real UA and trend data in bulk
sample_ips = [row[10] for row in result.result_rows if row[10]]
subnets_list = [row[0] for row in result.result_rows]
# Fetch real primary UA per sample IP from view_dashboard_entities
ua_by_ip: dict = {}
if sample_ips:
ip_list_sql = ", ".join(f"'{ip}'" for ip in sample_ips[:50])
ua_query = f"""
SELECT entity_value, arrayElement(user_agents, 1) AS top_ua
FROM view_dashboard_entities
WHERE entity_type = 'ip'
AND entity_value IN ({ip_list_sql})
AND notEmpty(user_agents)
GROUP BY entity_value, top_ua
ORDER BY entity_value
"""
try:
ua_result = db.query(ua_query)
for ua_row in ua_result.result_rows:
if ua_row[0] not in ua_by_ip and ua_row[1]:
ua_by_ip[str(ua_row[0])] = str(ua_row[1])
except Exception:
pass # UA enrichment is best-effort
# Compute real trend: compare current window vs previous window of same duration
trend_query = """
WITH cleaned AS (
SELECT
replaceRegexpAll(toString(src_ip), '^::ffff:', '') AS clean_ip,
detected_at,
concat(
splitByChar('.', clean_ip)[1], '.',
splitByChar('.', clean_ip)[2], '.',
splitByChar('.', clean_ip)[3], '.0/24'
) AS subnet
FROM ml_detected_anomalies
),
current_window AS (
SELECT subnet, count() AS cnt
FROM cleaned
WHERE detected_at >= now() - INTERVAL %(hours)s HOUR
GROUP BY subnet
),
prev_window AS (
SELECT subnet, count() AS cnt
FROM cleaned
WHERE detected_at >= now() - INTERVAL %(hours2)s HOUR
AND detected_at < now() - INTERVAL %(hours)s HOUR
GROUP BY subnet
)
SELECT c.subnet, c.cnt AS current_cnt, p.cnt AS prev_cnt
FROM current_window c
LEFT JOIN prev_window p ON c.subnet = p.subnet
"""
trend_by_subnet: dict = {}
try:
trend_result = db.query(trend_query, {"hours": hours, "hours2": hours * 2})
for tr in trend_result.result_rows:
subnet_key = tr[0]
curr = tr[1] or 0
prev = tr[2] or 0
if prev == 0:
trend_by_subnet[subnet_key] = ("new", 100)
else:
pct = round(((curr - prev) / prev) * 100)
trend_by_subnet[subnet_key] = ("up" if pct >= 0 else "down", abs(pct))
except Exception:
pass
clusters = []
for row in result.result_rows:
subnet = row[0]
threat_level = row[8] or 'LOW'
unique_ips = row[2] or 1
avg_score = abs(row[9] or 0)
sample_ip = row[10] if row[10] else subnet.split('/')[0]
critical_count = 1 if threat_level == 'CRITICAL' else 0
high_count = 1 if threat_level == 'HIGH' else 0
risk_score = min(100, round(
(critical_count * 30) +
(high_count * 20) +
(unique_ips * 5) +
(avg_score * 100)
))
if critical_count > 0 or risk_score >= 80:
severity = "CRITICAL"
elif high_count > (row[1] or 1) * 0.3 or risk_score >= 60:
severity = "HIGH"
elif high_count > 0 or risk_score >= 40:
severity = "MEDIUM"
else:
severity = "LOW"
trend_dir, trend_pct = trend_by_subnet.get(subnet, ("stable", 0))
primary_ua = ua_by_ip.get(sample_ip, "")
clusters.append({
"id": f"INC-{datetime.now().strftime('%Y%m%d')}-{len(clusters)+1:03d}",
"score": risk_score,
"severity": severity,
"total_detections": row[1],
"unique_ips": row[2],
"subnet": subnet,
"sample_ip": sample_ip,
"ja4": row[5] or "",
"primary_ua": primary_ua,
"primary_target": row[3].strftime('%H:%M') if row[3] else "Unknown",
"countries": [{"code": row[6] or "XX", "percentage": 100}],
"asn": str(row[7]) if row[7] else "",
"first_seen": row[3].isoformat() if row[3] else "",
"last_seen": row[4].isoformat() if row[4] else "",
"trend": trend_dir,
"trend_percentage": trend_pct,
})
return {
"items": clusters,
"total": len(clusters),
"period_hours": hours
}
except Exception as e:
raise HTTPException(status_code=500, detail=f"Erreur: {str(e)}")
@router.get("/{cluster_id}")
async def get_incident_details(cluster_id: str):
"""
Récupère les détails d'un incident spécifique
"""
try:
# Extraire le subnet du cluster_id (simplifié)
# Dans une implémentation réelle, on aurait une table de mapping
return {
"id": cluster_id,
"details": "Implementation en cours",
"timeline": [],
"entities": [],
"classifications": []
}
except Exception as e:
raise HTTPException(status_code=500, detail=f"Erreur: {str(e)}")
@router.post("/{cluster_id}/classify")
async def classify_incident(
cluster_id: str,
label: str,
tags: List[str] = None,
comment: str = ""
):
"""
Classe un incident rapidement
"""
try:
# Implementation future - sauvegarde dans la table classifications
return {
"status": "success",
"cluster_id": cluster_id,
"label": label,
"tags": tags or [],
"comment": comment
}
except Exception as e:
raise HTTPException(status_code=500, detail=f"Erreur: {str(e)}")
@router.get("")
async def list_incidents(
status: str = Query("active", description="Statut des incidents"),
severity: str = Query(None, description="Filtrer par sévérité"),
hours: int = Query(24, ge=1, le=168)
):
"""
Liste tous les incidents avec filtres
"""
try:
# Redirige vers clusters pour l'instant
return await get_incident_clusters(hours=hours, limit=50)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Erreur: {str(e)}")