feat(clustering): ajout features headers HTTP (Accept-Language, Accept-Encoding, Sec-Fetch, nb headers)

- SQL: LEFT JOIN sur view_dashboard_entities pour extraire la présence des headers
  Accept-Encoding, Sec-Fetch-* et le nombre de headers par src_ip/ja4 (via subquery)
- SQL: ajout avg(ml.has_accept_language) depuis ml_detected_anomalies
- FEATURES: 23 → 27 dimensions
  [23] Accept-Language  (0=absent=bot-like)
  [24] Accept-Encoding  (0=absent=bot-like)
  [25] Sec-Fetch-*      (1=vrai navigateur)
  [26] Nb Headers       (normalisé /20; 3=bot, 15=browser)
- risk_score_from_centroid(): poids réajustés (somme=1.0), 4 nouveaux termes header
  absence Accept-Language ×0.05, absence Accept-Encoding ×0.05,
  absence Sec-Fetch ×0.04, peu de headers ×0.04
- name_cluster(): nouveau label 'Bot UA simulé' (ua_ch mismatch + sec_fetch absent)
  et 'Scanner pur (no headers)' + 'Navigateur légitime' + 'Headless (no Sec-Fetch)'
- Fix: %% dans les LIKE ClickHouse (échappement paramètres Python %-format)

Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com>
This commit is contained in:
SOC Analyst
2026-03-19 11:04:08 +01:00
parent f456c807db
commit 8fb054c8b7
2 changed files with 103 additions and 47 deletions

View File

@ -89,11 +89,29 @@ SELECT
max(ml.threat_level) AS threat, max(ml.threat_level) AS threat,
any(ml.country_code) AS country, any(ml.country_code) AS country,
any(ml.asn_org) AS asn_org any(ml.asn_org) AS asn_org,
-- Features headers HTTP (depuis view_dashboard_entities)
avg(ml.has_accept_language) AS hdr_accept_lang,
any(vh.hdr_enc) AS hdr_has_encoding,
any(vh.hdr_sec_fetch) AS hdr_has_sec_fetch,
any(vh.hdr_count) AS hdr_count_raw
FROM mabase_prod.agg_host_ip_ja4_1h t FROM mabase_prod.agg_host_ip_ja4_1h t
LEFT JOIN mabase_prod.ml_detected_anomalies ml LEFT JOIN mabase_prod.ml_detected_anomalies ml
ON t.src_ip = ml.src_ip AND t.ja4 = ml.ja4 ON t.src_ip = ml.src_ip AND t.ja4 = ml.ja4
AND ml.detected_at >= now() - INTERVAL %(hours)s HOUR AND ml.detected_at >= now() - INTERVAL %(hours)s HOUR
LEFT JOIN (
SELECT
toIPv6(concat('::ffff:', toString(src_ip))) AS src_ip_v6,
ja4,
any(arrayExists(x -> x LIKE '%%Accept-Encoding%%', client_headers)) AS hdr_enc,
any(arrayExists(x -> x LIKE '%%Sec-Fetch%%', client_headers)) AS hdr_sec_fetch,
any(length(splitByChar(',', client_headers[1]))) AS hdr_count
FROM mabase_prod.view_dashboard_entities
WHERE length(client_headers) > 0
AND log_date >= today() - 2
GROUP BY src_ip_v6, ja4
) vh ON t.src_ip = vh.src_ip_v6 AND t.ja4 = vh.ja4
WHERE t.window_start >= now() - INTERVAL %(hours)s HOUR WHERE t.window_start >= now() - INTERVAL %(hours)s HOUR
AND t.tcp_ttl_raw > 0 AND t.tcp_ttl_raw > 0
GROUP BY t.src_ip, t.ja4 GROUP BY t.src_ip, t.ja4
@ -105,6 +123,7 @@ _SQL_COLS = [
"ip_id_zero", "entropy", "browser_score", "alpn_mismatch", "alpn_missing", "ip_id_zero", "entropy", "browser_score", "alpn_mismatch", "alpn_missing",
"h2_eff", "hdr_conf", "ua_ch_mismatch", "asset_ratio", "direct_ratio", "h2_eff", "hdr_conf", "ua_ch_mismatch", "asset_ratio", "direct_ratio",
"ja4_count", "ua_rotating", "threat", "country", "asn_org", "ja4_count", "ua_rotating", "threat", "country", "asn_org",
"hdr_accept_lang", "hdr_has_encoding", "hdr_has_sec_fetch", "hdr_count_raw",
] ]

View File

@ -6,7 +6,7 @@ Ref:
scipy.spatial.ConvexHull — enveloppe convexe (Graham/Qhull) scipy.spatial.ConvexHull — enveloppe convexe (Graham/Qhull)
sklearn-style API — centroids, labels_, inertia_ sklearn-style API — centroids, labels_, inertia_
Features (23 dimensions, normalisées [0,1]) : Features (27 dimensions, normalisées [0,1]) :
0 ttl_n : TTL initial normalisé 0 ttl_n : TTL initial normalisé
1 mss_n : MSS normalisé → type réseau 1 mss_n : MSS normalisé → type réseau
2 scale_n : facteur de mise à l'échelle TCP 2 scale_n : facteur de mise à l'échelle TCP
@ -30,6 +30,10 @@ Features (23 dimensions, normalisées [0,1]) :
20 ua_rot_n : UA rotatif (booléen) 20 ua_rot_n : UA rotatif (booléen)
21 country_risk_n : risque pays source (CN/RU/KP → 1.0, US/DE/FR → 0.0) 21 country_risk_n : risque pays source (CN/RU/KP → 1.0, US/DE/FR → 0.0)
22 asn_cloud_n : hébergeur cloud/CDN/VPN (Cloudflare/AWS/OVH → 1.0) 22 asn_cloud_n : hébergeur cloud/CDN/VPN (Cloudflare/AWS/OVH → 1.0)
23 hdr_accept_lang_n : présence header Accept-Language (0=absent=bot-like)
24 hdr_encoding_n : présence header Accept-Encoding (0=absent=bot-like)
25 hdr_sec_fetch_n : présence headers Sec-Fetch-* (1=navigateur réel)
26 hdr_count_n : nombre de headers HTTP normalisé (3=bot, 15=browser)
""" """
from __future__ import annotations from __future__ import annotations
@ -144,6 +148,13 @@ FEATURES: list[tuple[str, str, object]] = [
# ── Géographie & infrastructure (nouvelles features) ────────────────── # ── Géographie & infrastructure (nouvelles features) ──────────────────
("country", "Risque Pays", lambda v: country_risk(str(v) if v else None)), ("country", "Risque Pays", lambda v: country_risk(str(v) if v else None)),
("asn_org", "Hébergeur Cloud/VPN", lambda v: asn_cloud_score(str(v) if v else None)), ("asn_org", "Hébergeur Cloud/VPN", lambda v: asn_cloud_score(str(v) if v else None)),
# ── Headers HTTP (présence / profil de la requête) ────────────────────
# Absence d'Accept-Language ou Accept-Encoding = fort signal bot (bots simples l'omettent)
# Sec-Fetch-* = exclusif aux navigateurs réels (fetch metadata)
("hdr_accept_lang", "Accept-Language", lambda v: min(1.0, float(v or 0))),
("hdr_has_encoding", "Accept-Encoding", lambda v: 1.0 if float(v or 0) > 0 else 0.0),
("hdr_has_sec_fetch", "Sec-Fetch Headers", lambda v: 1.0 if float(v or 0) > 0 else 0.0),
("hdr_count_raw", "Nb Headers", lambda v: min(1.0, float(v or 0) / 20.0)),
] ]
FEATURE_KEYS = [f[0] for f in FEATURES] FEATURE_KEYS = [f[0] for f in FEATURES]
@ -325,18 +336,28 @@ def name_cluster(centroid: np.ndarray, raw_stats: dict) -> str:
s = centroid s = centroid
ttl_raw = float(raw_stats.get("mean_ttl", 0)) ttl_raw = float(raw_stats.get("mean_ttl", 0))
mss_raw = float(raw_stats.get("mean_mss", 0)) mss_raw = float(raw_stats.get("mean_mss", 0))
# Indices : 21=country_risk, 22=asn_cloud country_risk_v = s[21] if len(s) > 21 else 0.0
country_risk = s[21] if len(s) > 21 else 0.0
asn_cloud = s[22] if len(s) > 22 else 0.0 asn_cloud = s[22] if len(s) > 22 else 0.0
# Features headers (indices 23-26)
accept_lang = s[23] if len(s) > 23 else 1.0
accept_enc = s[24] if len(s) > 24 else 1.0
sec_fetch = s[25] if len(s) > 25 else 0.0
hdr_count = s[26] if len(s) > 26 else 0.5
# Scanner pur : aucun header browser, peu de headers
if accept_lang < 0.15 and accept_enc < 0.15 and hdr_count < 0.25:
return "🤖 Scanner pur (no headers)"
# Scanners Masscan # Scanners Masscan
if s[0] > 0.16 and s[0] < 0.25 and mss_raw in range(1440, 1460) and s[2] > 0.25: if s[0] > 0.16 and s[0] < 0.25 and mss_raw in range(1440, 1460) and s[2] > 0.25:
return "🤖 Masscan Scanner" return "🤖 Masscan Scanner"
# Bots offensifs agressifs (fuzzing + anomalie + pays risqué) # Bots offensifs agressifs (fuzzing + anomalie + pas de headers browser)
if s[4] > 0.40 and s[6] > 0.3: if s[4] > 0.40 and s[6] > 0.3:
return "🤖 Bot agressif" return "🤖 Bot agressif"
# Bot qui simule un navigateur mais sans les vrais headers (ua_ch + absent sec_fetch)
if s[16] > 0.40 and sec_fetch < 0.2 and accept_lang < 0.3:
return "🤖 Bot UA simulé"
# Pays à très haut risque (CN, RU, KP) avec trafic anormal # Pays à très haut risque (CN, RU, KP) avec trafic anormal
if country_risk > 0.75 and (s[4] > 0.10 or asn_cloud > 0.5): if country_risk_v > 0.75 and (s[4] > 0.10 or asn_cloud > 0.5):
return "🌏 Source pays risqué" return "🌏 Source pays risqué"
# Cloud + UA-CH mismatch = crawler/bot cloud # Cloud + UA-CH mismatch = crawler/bot cloud
if s[16] > 0.50 and asn_cloud > 0.70: if s[16] > 0.50 and asn_cloud > 0.70:
@ -344,9 +365,11 @@ def name_cluster(centroid: np.ndarray, raw_stats: dict) -> str:
# UA-CH mismatch seul # UA-CH mismatch seul
if s[16] > 0.60: if s[16] > 0.60:
return "🤖 UA-CH Mismatch" return "🤖 UA-CH Mismatch"
# Headless browser # Headless browser avec headers browser réels (Puppeteer, Playwright)
if s[7] > 0.50: if s[7] > 0.50 and sec_fetch > 0.5:
return "🤖 Headless Browser" return "🤖 Headless Browser"
if s[7] > 0.50:
return "🤖 Headless (no Sec-Fetch)"
# Anomalie ML significative # Anomalie ML significative
if s[4] > 0.35: if s[4] > 0.35:
return "⚠️ Anomalie ML" return "⚠️ Anomalie ML"
@ -354,8 +377,11 @@ def name_cluster(centroid: np.ndarray, raw_stats: dict) -> str:
if asn_cloud > 0.85 and s[4] < 0.15: if asn_cloud > 0.85 and s[4] < 0.15:
return "☁️ Infrastructure cloud" return "☁️ Infrastructure cloud"
# Pays à risque élevé sans autre signal # Pays à risque élevé sans autre signal
if country_risk > 0.60: if country_risk_v > 0.60:
return "🌏 Trafic suspect (pays)" return "🌏 Trafic suspect (pays)"
# Navigateur légitime : tous les headers présents
if accept_lang > 0.7 and accept_enc > 0.7 and sec_fetch > 0.6 and hdr_count > 0.5:
return "🌐 Navigateur légitime"
# OS fingerprinting # OS fingerprinting
if s[3] > 0.85 and ttl_raw > 120: if s[3] > 0.85 and ttl_raw > 120:
return "🖥️ Windows" return "🖥️ Windows"
@ -372,21 +398,32 @@ def name_cluster(centroid: np.ndarray, raw_stats: dict) -> str:
def risk_score_from_centroid(centroid: np.ndarray) -> float: def risk_score_from_centroid(centroid: np.ndarray) -> float:
""" """
Score de risque [0,1] agrégé depuis le centroïde (espace original [0,1]). Score de risque [0,1] depuis le centroïde (espace original [0,1]).
Intègre pays et infrastructure cloud. Intègre pays, infrastructure cloud et profil headers HTTP.
Poids calibrés pour sommer à 1.0.
""" """
s = centroid s = centroid
country_risk = s[21] if len(s) > 21 else 0.0 country_risk_v = s[21] if len(s) > 21 else 0.0
asn_cloud = s[22] if len(s) > 22 else 0.0 asn_cloud = s[22] if len(s) > 22 else 0.0
# Absence de header = risque → inverser (1 - présence)
no_accept_lang = 1.0 - (s[23] if len(s) > 23 else 1.0)
no_encoding = 1.0 - (s[24] if len(s) > 24 else 1.0)
no_sec_fetch = 1.0 - (s[25] if len(s) > 25 else 0.0)
# Peu de headers → bot : max risque quand hdr_count=0
few_headers = 1.0 - (s[26] if len(s) > 26 else 0.5)
return float(np.clip( return float(np.clip(
0.30 * s[4] + # score ML anomalie (principal) 0.28 * s[4] + # score ML anomalie (principal)
0.12 * s[6] + # fuzzing 0.10 * s[6] + # fuzzing
0.12 * s[16] + # UA-CH mismatch 0.08 * s[16] + # UA-CH mismatch
0.08 * s[7] + # headless 0.07 * s[7] + # headless
0.08 * s[5] + # vélocité 0.06 * s[5] + # vélocité
0.08 * s[9] + # IP-ID zéro 0.06 * s[9] + # IP-ID zéro
0.12 * country_risk + # risque pays source 0.10 * country_risk_v+ # risque pays source
0.10 * asn_cloud, # infrastructure cloud/VPN 0.07 * asn_cloud + # infrastructure cloud/VPN
0.05 * no_accept_lang+ # absence Accept-Language
0.05 * no_encoding + # absence Accept-Encoding
0.04 * no_sec_fetch + # absence Sec-Fetch (pas un vrai navigateur)
0.04 * few_headers, # très peu de headers (scanner/curl)
0.0, 1.0 0.0, 1.0
)) ))