refactor: UI improvements and code cleanup

Frontend:
- DetectionsList: Simplify columns, improve truncation and display for IPs, hosts, bot info
- IncidentsView: Replace metric cards with compact stat cards (unique IPs, known bots, ML anomalies, threat levels)
- InvestigationView: Add section navigation anchors, reorganize layout with proper IDs
- ThreatIntelView: Add navigation links to investigation pages, add comment column, improve table layout

Backend:
- Various route and model adjustments
- Configuration updates

Co-authored-by: Qwen-Coder <qwen-coder@alibabacloud.com>
This commit is contained in:
SOC Analyst
2026-03-20 09:56:49 +01:00
parent dbb9bb3f94
commit bd33fbad01
17 changed files with 444 additions and 510 deletions

View File

@ -1,9 +1,9 @@
"""
Endpoints pour l'analyse de corrélations et la classification SOC
"""
from collections import defaultdict
from fastapi import APIRouter, HTTPException, Query
from typing import Optional, List
from datetime import datetime
import ipaddress
import json
@ -17,6 +17,14 @@ from ..models import (
router = APIRouter(prefix="/api/analysis", tags=["analysis"])
# Mapping code ISO → nom lisible (utilisé par analyze_ip_country et analyze_country)
_COUNTRY_NAMES: dict[str, str] = {
"CN": "China", "US": "United States", "DE": "Germany",
"FR": "France", "RU": "Russia", "GB": "United Kingdom",
"NL": "Netherlands", "IN": "India", "BR": "Brazil",
"JP": "Japan", "KR": "South Korea", "IT": "Italy",
"ES": "Spain", "CA": "Canada", "AU": "Australia"
}
# =============================================================================
# ANALYSE SUBNET / ASN
@ -122,15 +130,6 @@ async def analyze_ip_country(ip: str):
ip_country_code = ip_result.result_rows[0][0]
asn_number = ip_result.result_rows[0][1]
# Noms des pays
country_names = {
"CN": "China", "US": "United States", "DE": "Germany",
"FR": "France", "RU": "Russia", "GB": "United Kingdom",
"NL": "Netherlands", "IN": "India", "BR": "Brazil",
"JP": "Japan", "KR": "South Korea", "IT": "Italy",
"ES": "Spain", "CA": "Canada", "AU": "Australia"
}
# Répartition des autres pays du même ASN
asn_countries_query = """
SELECT
@ -150,7 +149,7 @@ async def analyze_ip_country(ip: str):
asn_countries = [
{
"code": row[0],
"name": country_names.get(row[0], row[0]),
"name": _COUNTRY_NAMES.get(row[0], row[0]),
"count": row[1],
"percentage": round((row[1] / total * 100), 2) if total > 0 else 0.0
}
@ -160,7 +159,7 @@ async def analyze_ip_country(ip: str):
return {
"ip_country": {
"code": ip_country_code,
"name": country_names.get(ip_country_code, ip_country_code)
"name": _COUNTRY_NAMES.get(ip_country_code, ip_country_code)
},
"asn_countries": asn_countries
}
@ -196,19 +195,10 @@ async def analyze_country(days: int = Query(1, ge=1, le=30)):
# Calculer le total pour le pourcentage
total = sum(row[1] for row in top_result.result_rows)
# Noms des pays (mapping simple)
country_names = {
"CN": "China", "US": "United States", "DE": "Germany",
"FR": "France", "RU": "Russia", "GB": "United Kingdom",
"NL": "Netherlands", "IN": "India", "BR": "Brazil",
"JP": "Japan", "KR": "South Korea", "IT": "Italy",
"ES": "Spain", "CA": "Canada", "AU": "Australia"
}
top_countries = [
CountryData(
code=row[0],
name=country_names.get(row[0], row[0]),
name=_COUNTRY_NAMES.get(row[0], row[0]),
count=row[1],
percentage=round((row[1] / total * 100), 2) if total > 0 else 0.0
)
@ -311,7 +301,6 @@ async def analyze_ja4(ip: str):
subnets_result = db.query(subnets_query, {"ja4": ja4})
# Grouper par subnet /24
from collections import defaultdict
subnet_counts = defaultdict(int)
for row in subnets_result.result_rows:
ip_addr = str(row[0])
@ -439,24 +428,24 @@ async def get_classification_recommendation(ip: str):
# Récupérer les analyses
try:
subnet_analysis = await analyze_subnet(ip)
except:
except Exception:
subnet_analysis = None
try:
country_analysis = await analyze_country(1)
except:
except Exception:
country_analysis = None
try:
ja4_analysis = await analyze_ja4(ip)
except:
except Exception:
ja4_analysis = None
try:
ua_analysis = await analyze_user_agents(ip)
except:
except Exception:
ua_analysis = None
# Indicateurs par défaut
indicators = CorrelationIndicators(
subnet_ips_count=subnet_analysis.total_in_subnet if subnet_analysis else 0,

View File

@ -1,12 +1,14 @@
"""
Routes pour l'audit et les logs d'activité
"""
import logging
from fastapi import APIRouter, HTTPException, Query, Request
from typing import List, Optional
from datetime import datetime, timedelta
from typing import Optional
from datetime import datetime
from ..database import db
router = APIRouter(prefix="/api/audit", tags=["audit"])
logger = logging.getLogger(__name__)
@router.post("/logs")
@ -50,8 +52,8 @@ async def create_audit_log(
try:
db.query(insert_query, params)
except Exception as e:
# Table might not exist yet, log warning
print(f"Warning: Could not insert audit log: {e}")
# La table peut ne pas encore exister — on logue mais on ne bloque pas l'appelant
logger.warning(f"Could not insert audit log: {e}")
return {
"status": "success",

View File

@ -6,22 +6,20 @@ Clustering d'IPs multi-métriques — WebGL / deck.gl backend.
- Calcul en background thread + cache 30 min
- Endpoints : /clusters, /status, /cluster/{id}/points
"""
from __future__ import annotations
import math
import time
import logging
import threading
from collections import Counter
from concurrent.futures import ThreadPoolExecutor
from typing import Optional, Any
from typing import Any
import numpy as np
from fastapi import APIRouter, HTTPException, Query
from ..database import db
from ..services.clustering_engine import (
FEATURE_KEYS, FEATURE_NAMES, FEATURE_NORMS, N_FEATURES,
FEATURE_NAMES,
build_feature_vector, kmeans_pp, pca_2d, compute_hulls,
name_cluster, risk_score_from_centroid, standardize,
risk_to_gradient_color,

View File

@ -2,9 +2,7 @@
Routes pour l'investigation d'entités (IP, JA4, User-Agent, Client-Header, Host, Path, Query-Param)
"""
from fastapi import APIRouter, HTTPException, Query
from typing import Optional, List, Dict, Any
from datetime import datetime
import json
from typing import Optional, List
from ..database import db
from ..models import (
@ -16,18 +14,10 @@ from ..models import (
router = APIRouter(prefix="/api/entities", tags=["Entities"])
db = db
# Mapping des types d'entités
ENTITY_TYPES = {
'ip': 'ip',
'ja4': 'ja4',
'user_agent': 'user_agent',
'client_header': 'client_header',
'host': 'host',
'path': 'path',
'query_param': 'query_param'
}
# Ensemble des types d'entités valides
VALID_ENTITY_TYPES = frozenset({
'ip', 'ja4', 'user_agent', 'client_header', 'host', 'path', 'query_param'
})
def get_entity_stats(entity_type: str, entity_value: str, hours: int = 24) -> Optional[EntityStats]:

View File

@ -10,7 +10,6 @@ Objectifs:
qui usurpent des UA de navigateurs légitimes
"""
from fastapi import APIRouter, HTTPException, Query
from typing import Optional
import re
from ..database import db

View File

@ -1,11 +1,11 @@
"""
Routes pour la gestion des incidents clusterisés
"""
import hashlib
from fastapi import APIRouter, HTTPException, Query
from typing import List, Optional
from datetime import datetime, timedelta
from datetime import datetime
from ..database import db
from ..models import BaseModel
router = APIRouter(prefix="/api/incidents", tags=["incidents"])
@ -83,7 +83,6 @@ async def get_incident_clusters(
# Collect sample IPs to fetch real UA and trend data in bulk
sample_ips = [row[10] for row in result.result_rows if row[10]]
subnets_list = [row[0] for row in result.result_rows]
# Fetch real primary UA per sample IP from view_dashboard_entities
ua_by_ip: dict = {}
@ -182,7 +181,7 @@ async def get_incident_clusters(
primary_ua = ua_by_ip.get(sample_ip, "")
clusters.append({
"id": f"INC-{datetime.now().strftime('%Y%m%d')}-{len(clusters)+1:03d}",
"id": f"INC-{hashlib.md5(subnet.encode()).hexdigest()[:8].upper()}",
"score": risk_score,
"severity": severity,
"total_detections": row[1],
@ -213,22 +212,13 @@ async def get_incident_clusters(
@router.get("/{cluster_id}")
async def get_incident_details(cluster_id: str):
"""
Récupère les détails d'un incident spécifique
Récupère les détails d'un incident spécifique.
Non encore implémenté — les détails par cluster seront disponibles dans une prochaine version.
"""
try:
# Extraire le subnet du cluster_id (simplifié)
# Dans une implémentation réelle, on aurait une table de mapping
return {
"id": cluster_id,
"details": "Implementation en cours",
"timeline": [],
"entities": [],
"classifications": []
}
except Exception as e:
raise HTTPException(status_code=500, detail=f"Erreur: {str(e)}")
raise HTTPException(
status_code=501,
detail="Détails par incident non encore implémentés. Utilisez /api/incidents/clusters pour la liste."
)
@router.post("/{cluster_id}/classify")
@ -239,34 +229,38 @@ async def classify_incident(
comment: str = ""
):
"""
Classe un incident rapidement
Classe un incident rapidement.
Non encore implémenté — utilisez /api/analysis/{ip}/classify pour classifier une IP.
"""
try:
# Implementation future - sauvegarde dans la table classifications
return {
"status": "success",
"cluster_id": cluster_id,
"label": label,
"tags": tags or [],
"comment": comment
}
except Exception as e:
raise HTTPException(status_code=500, detail=f"Erreur: {str(e)}")
raise HTTPException(
status_code=501,
detail="Classification par incident non encore implémentée. Utilisez /api/analysis/{ip}/classify."
)
@router.get("")
async def list_incidents(
status: str = Query("active", description="Statut des incidents"),
severity: str = Query(None, description="Filtrer par sévérité"),
severity: Optional[str] = Query(None, description="Filtrer par sévérité (LOW/MEDIUM/HIGH/CRITICAL)"),
hours: int = Query(24, ge=1, le=168)
):
"""
Liste tous les incidents avec filtres
Liste tous les incidents avec filtres.
Délègue à get_incident_clusters ; le filtre severity est appliqué post-requête.
"""
try:
# Redirige vers clusters pour l'instant
return await get_incident_clusters(hours=hours, limit=50)
result = await get_incident_clusters(hours=hours, limit=100)
items = result["items"]
if severity:
sev_upper = severity.upper()
items = [c for c in items if c.get("severity") == sev_upper]
return {
"items": items,
"total": len(items),
"period_hours": hours,
}
except Exception as e:
raise HTTPException(status_code=500, detail=f"Erreur: {str(e)}")

View File

@ -1,7 +1,6 @@
"""
Endpoints pour la détection de la rotation de fingerprints JA4 et des menaces persistantes
"""
import math
from fastapi import APIRouter, HTTPException, Query
from ..database import db
@ -110,7 +109,7 @@ async def get_sophistication(limit: int = Query(50, ge=1, le=500)):
try:
sql = """
SELECT
replaceRegexpAll(toString(r.src_ip), '^::ffff:', '') AS ip,
r.ip,
r.distinct_ja4_count,
coalesce(rec.recurrence, 0) AS recurrence,
coalesce(bf.bruteforce_hits, 0) AS bruteforce_hits,
@ -119,18 +118,26 @@ async def get_sophistication(limit: int = Query(50, ge=1, le=500)):
+ coalesce(rec.recurrence, 0) * 20
+ least(30.0, log(coalesce(bf.bruteforce_hits, 0) + 1) * 5)
), 1) AS sophistication_score
FROM mabase_prod.view_host_ip_ja4_rotation r
FROM (
SELECT
replaceRegexpAll(toString(src_ip), '^::ffff:', '') AS ip,
distinct_ja4_count
FROM mabase_prod.view_host_ip_ja4_rotation
) r
LEFT JOIN (
SELECT src_ip, count() AS recurrence
SELECT
replaceRegexpAll(toString(src_ip), '^::ffff:', '') AS ip,
count() AS recurrence
FROM mabase_prod.ml_detected_anomalies FINAL
GROUP BY src_ip
) rec USING(src_ip)
GROUP BY ip
) rec ON r.ip = rec.ip
LEFT JOIN (
SELECT replaceRegexpAll(toString(src_ip),'^::ffff:','') AS src_ip,
sum(hits) AS bruteforce_hits
SELECT
replaceRegexpAll(toString(src_ip), '^::ffff:', '') AS ip,
sum(hits) AS bruteforce_hits
FROM mabase_prod.view_form_bruteforce_detected
GROUP BY src_ip
) bf USING(src_ip)
GROUP BY ip
) bf ON r.ip = bf.ip
ORDER BY sophistication_score DESC
LIMIT %(limit)s
"""