diff --git a/.env.example b/.env.example
new file mode 100644
index 0000000..6a04ac8
--- /dev/null
+++ b/.env.example
@@ -0,0 +1,69 @@
+# ======================================================
+# Homelab Automation Dashboard — Environment Variables
+# ======================================================
+# Copy this file to .env and fill in the values.
+# DO NOT commit the .env file with real credentials!
+
+# --- General ---
+TZ="America/Montreal"
+DEBUG_MODE=NO
+
+# --- API Authentication ---
+# REQUIRED: Set a strong, unique API key
+API_KEY=CHANGE_ME_TO_A_STRONG_API_KEY
+
+# --- JWT Authentication ---
+# REQUIRED: Set a strong secret key (min 32 chars)
+JWT_SECRET_KEY=CHANGE_ME_TO_A_STRONG_SECRET_KEY_MIN_32_CHARS
+JWT_EXPIRE_MINUTES=60
+
+# --- Database ---
+DATABASE_URL=sqlite+aiosqlite:///./data/homelab.db
+DB_PATH=./data/homelab.db
+# DB_ENGINE=mysql
+# MYSQL_HOST=mysql
+# MYSQL_USER=homelab
+# MYSQL_PASSWORD=CHANGE_ME
+# DB_AUTO_MIGRATE=true
+
+# --- Logging ---
+LOGS_DIR=./logs/Server_log
+DIR_LOGS_TASKS=./logs/tasks_logs
+
+# --- Ansible ---
+ANSIBLE_INVENTORY=./ansible/inventory
+ANSIBLE_PLAYBOOKS=./ansible/playbooks
+ANSIBLE_GROUP_VARS=./ansible/inventory/group_vars
+# ANSIBLE_CONFIG=/path/to/ansible.cfg
+
+# --- SSH ---
+SSH_USER=automation
+SSH_REMOTE_USER=automation
+SSH_KEY_DIR=~/.ssh
+SSH_KEY_PATH=~/.ssh/id_automation_ansible
+
+# --- CORS ---
+# Comma-separated list of allowed origins (no wildcard in production!)
+CORS_ORIGINS=http://localhost:3000,http://localhost:8008
+
+# --- Notifications (ntfy) ---
+NTFY_BASE_URL=https://ntfy.sh
+NTFY_DEFAULT_TOPIC=homelab-events
+NTFY_ENABLED=true
+NTFY_MSG_TYPE=ERR
+NTFY_TIMEOUT=5
+# NTFY_USERNAME=
+# NTFY_PASSWORD=CHANGE_ME
+# NTFY_TOKEN=CHANGE_ME
+
+# --- Terminal SSH Web ---
+TERMINAL_SESSION_TTL_MINUTES=30
+TERMINAL_TTYD_INTERFACE=eth0
+TERMINAL_MAX_SESSIONS_PER_USER=3
+TERMINAL_SESSION_IDLE_TIMEOUT_SECONDS=120
+TERMINAL_HEARTBEAT_INTERVAL_SECONDS=15
+TERMINAL_GC_INTERVAL_SECONDS=30
+TERMINAL_PORT_RANGE_START=7682
+TERMINAL_PORT_RANGE_END=7699
+TERMINAL_SSH_USER=automation
+TERMINAL_COMMAND_RETENTION_DAYS=30
diff --git a/ansible.zip b/ansible.zip
new file mode 100644
index 0000000..699aae6
Binary files /dev/null and b/ansible.zip differ
diff --git a/app/app_optimized.py b/app/app_optimized.py
deleted file mode 100644
index 9211d47..0000000
--- a/app/app_optimized.py
+++ /dev/null
@@ -1,6585 +0,0 @@
-"""
-Homelab Automation Dashboard - Backend Optimisé
-API REST moderne avec FastAPI pour la gestion d'homelab
-"""
-
-from datetime import datetime, timezone, timedelta
-from pathlib import Path
-from time import perf_counter, time
-import os
-import re
-import shutil
-import subprocess
-import sqlite3
-import yaml
-from abc import ABC, abstractmethod
-from typing import Literal, Any, List, Dict, Optional
-from threading import Lock
-import asyncio
-import json
-import uuid
-
-# APScheduler imports
-from apscheduler.schedulers.asyncio import AsyncIOScheduler
-from apscheduler.triggers.cron import CronTrigger
-from apscheduler.triggers.date import DateTrigger
-from apscheduler.jobstores.memory import MemoryJobStore
-from apscheduler.executors.asyncio import AsyncIOExecutor
-from croniter import croniter
-import pytz
-
-from fastapi import FastAPI, HTTPException, Depends, Request, Form, WebSocket, WebSocketDisconnect
-from fastapi.responses import HTMLResponse, JSONResponse, FileResponse, Response
-from fastapi.security import APIKeyHeader
-from fastapi.middleware.cors import CORSMiddleware
-from fastapi.staticfiles import StaticFiles
-from io import BytesIO
-from xml.sax.saxutils import escape as _xml_escape
-from pydantic import BaseModel, Field, field_validator, ConfigDict
-from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine, async_sessionmaker
-from sqlalchemy import select
-from app.models.database import get_db, async_session_maker # type: ignore
-from app.crud.host import HostRepository # type: ignore
-from app.crud.bootstrap_status import BootstrapStatusRepository # type: ignore
-from app.crud.log import LogRepository # type: ignore
-from app.crud.task import TaskRepository # type: ignore
-from app.crud.schedule import ScheduleRepository # type: ignore
-from app.crud.schedule_run import ScheduleRunRepository # type: ignore
-from app.schemas.notification import NotificationRequest, NotificationResponse # type: ignore
-
-BASE_DIR = Path(__file__).resolve().parent
-
-# Configuration avancée de l'application
-app = FastAPI(
- title="Homelab Automation Dashboard API",
- version="1.0.0",
- description="API REST moderne pour la gestion automatique d'homelab",
- docs_url="/api/docs",
- redoc_url="/api/redoc"
-)
-
-# Middleware CORS pour le développement
-app.add_middleware(
- CORSMiddleware,
- allow_origins=["*"], # À restreindre en production
- allow_credentials=True,
- allow_methods=["*"],
- allow_headers=["*"],
-)
-
-app.mount("/static", StaticFiles(directory=BASE_DIR, html=False), name="static")
-
-# Configuration des chemins et variables d'environnement
-LOGS_DIR = Path(os.environ.get("LOGS_DIR", "/logs"))
-ANSIBLE_DIR = BASE_DIR.parent / "ansible"
-SSH_KEY_PATH = os.environ.get("SSH_KEY_PATH", str(Path.home() / ".ssh" / "id_rsa"))
-SSH_USER = os.environ.get("SSH_USER", "automation")
-SSH_REMOTE_USER = os.environ.get("SSH_REMOTE_USER", "root")
-DB_PATH = LOGS_DIR / "homelab.db"
-API_KEY = os.environ.get("API_KEY", "dev-key-12345")
-# Répertoire pour les logs de tâches en markdown (format YYYY/MM/JJ)
-DIR_LOGS_TASKS = Path(os.environ.get("DIR_LOGS_TASKS", str(BASE_DIR.parent / "tasks_logs")))
-# Fichier JSON pour l'historique des commandes ad-hoc
-ADHOC_HISTORY_FILE = DIR_LOGS_TASKS / ".adhoc_history.json"
-# Fichier JSON pour les statuts persistés
-BOOTSTRAP_STATUS_FILE = DIR_LOGS_TASKS / ".bootstrap_status.json"
-HOST_STATUS_FILE = ANSIBLE_DIR / ".host_status.json"
-
-# Mapping des actions vers les playbooks
-ACTION_PLAYBOOK_MAP = {
- 'upgrade': 'vm-upgrade.yml',
- 'reboot': 'vm-reboot.yml',
- 'health-check': 'health-check.yml',
- 'backup': 'backup-config.yml',
- 'bootstrap': 'bootstrap-host.yml',
-}
-
-# Gestionnaire de clés API
-api_key_header = APIKeyHeader(name="X-API-Key", auto_error=False)
-
-# Modèles Pydantic améliorés
-class CommandResult(BaseModel):
- status: str
- return_code: int
- stdout: str
- stderr: Optional[str] = None
- execution_time: Optional[float] = None
- timestamp: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
-
-class Host(BaseModel):
- id: str
- name: str
- ip: str
- status: Literal["online", "offline", "warning"]
- os: str
- last_seen: Optional[datetime] = None
- created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
- groups: List[str] = [] # Groupes Ansible auxquels appartient l'hôte
- bootstrap_ok: bool = False # Indique si le bootstrap a été effectué avec succès
- bootstrap_date: Optional[datetime] = None # Date du dernier bootstrap réussi
-
- class Config:
- json_encoders = {
- datetime: lambda v: v.isoformat()
- }
-
-class Task(BaseModel):
- id: str
- name: str
- host: str
- status: Literal["pending", "running", "completed", "failed", "cancelled"]
- progress: int = Field(ge=0, le=100, default=0)
- start_time: Optional[datetime] = None
- end_time: Optional[datetime] = None
- duration: Optional[str] = None
- output: Optional[str] = None
- error: Optional[str] = None
-
- class Config:
- json_encoders = {
- datetime: lambda v: v.isoformat() if v else None
- }
-
-class LogEntry(BaseModel):
- id: int
- timestamp: datetime
- level: Literal["DEBUG", "INFO", "WARN", "ERROR"]
- message: str
- source: Optional[str] = None
- host: Optional[str] = None
-
- class Config:
- json_encoders = {
- datetime: lambda v: v.isoformat()
- }
-
-class SystemMetrics(BaseModel):
- online_hosts: int
- total_tasks: int
- success_rate: float
- uptime: float
- cpu_usage: float
- memory_usage: float
- disk_usage: float
-
-class HealthCheck(BaseModel):
- host: str
- ssh_ok: bool = False
- ansible_ok: bool = False
- sudo_ok: bool = False
- reachable: bool = False
- error_message: Optional[str] = None
- response_time: Optional[float] = None
- cached: bool = False
- cache_age: int = 0
-
-class AnsibleExecutionRequest(BaseModel):
- playbook: str = Field(..., description="Nom du playbook à exécuter")
- target: str = Field(default="all", description="Hôte ou groupe cible")
- extra_vars: Optional[Dict[str, Any]] = Field(default=None, description="Variables supplémentaires")
- check_mode: bool = Field(default=False, description="Mode dry-run (--check)")
- verbose: bool = Field(default=False, description="Mode verbeux")
-
-class AnsibleInventoryHost(BaseModel):
- name: str
- ansible_host: str
- group: str
- groups: List[str] = [] # All groups this host belongs to
- vars: Dict[str, Any] = {}
-
-class TaskRequest(BaseModel):
- host: Optional[str] = Field(default=None, description="Hôte cible")
- group: Optional[str] = Field(default=None, description="Groupe cible")
- action: str = Field(..., description="Action à exécuter")
- cmd: Optional[str] = Field(default=None, description="Commande personnalisée")
- extra_vars: Optional[Dict[str, Any]] = Field(default=None, description="Variables Ansible")
- tags: Optional[List[str]] = Field(default=None, description="Tags Ansible")
- dry_run: bool = Field(default=False, description="Mode simulation")
- ssh_user: Optional[str] = Field(default=None, description="Utilisateur SSH")
- ssh_password: Optional[str] = Field(default=None, description="Mot de passe SSH")
-
- @field_validator('action')
- @classmethod
- def validate_action(cls, v: str) -> str:
- valid_actions = ['upgrade', 'reboot', 'health-check', 'backup', 'deploy', 'rollback', 'maintenance', 'bootstrap']
- if v not in valid_actions:
- raise ValueError(f'Action doit être l\'une de: {", ".join(valid_actions)}')
- return v
-
-class HostRequest(BaseModel):
- name: str = Field(..., min_length=3, max_length=100, description="Hostname (ex: server.domain.home)")
- # ansible_host peut être soit une IPv4, soit un hostname résolvable → on enlève la contrainte de pattern
- ip: Optional[str] = Field(default=None, description="Adresse IP ou hostname (optionnel si hostname résolvable)")
- os: str = Field(default="Linux", min_length=3, max_length=50)
- ssh_user: Optional[str] = Field(default="root", min_length=1, max_length=50)
- ssh_port: int = Field(default=22, ge=1, le=65535)
- description: Optional[str] = Field(default=None, max_length=200)
- env_group: str = Field(..., description="Groupe d'environnement (ex: env_homelab, env_prod)")
- role_groups: List[str] = Field(default=[], description="Groupes de rôles (ex: role_proxmox, role_sbc)")
-
-class HostUpdateRequest(BaseModel):
- """Requête de mise à jour d'un hôte"""
- env_group: Optional[str] = Field(default=None, description="Nouveau groupe d'environnement")
- role_groups: Optional[List[str]] = Field(default=None, description="Nouveaux groupes de rôles")
- ansible_host: Optional[str] = Field(default=None, description="Nouvelle adresse ansible_host")
-
-class GroupRequest(BaseModel):
- """Requête pour créer un groupe"""
- name: str = Field(..., min_length=3, max_length=50, description="Nom du groupe (ex: env_prod, role_web)")
- type: str = Field(..., description="Type de groupe: 'env' ou 'role'")
-
- @field_validator('name')
- @classmethod
- def validate_name(cls, v: str) -> str:
- import re
- if not re.match(r'^[a-zA-Z0-9_-]+$', v):
- raise ValueError('Le nom du groupe ne peut contenir que des lettres, chiffres, tirets et underscores')
- return v
-
- @field_validator('type')
- @classmethod
- def validate_type(cls, v: str) -> str:
- if v not in ['env', 'role']:
- raise ValueError("Le type doit être 'env' ou 'role'")
- return v
-
-class GroupUpdateRequest(BaseModel):
- """Requête pour modifier un groupe"""
- new_name: str = Field(..., min_length=3, max_length=50, description="Nouveau nom du groupe")
-
- @field_validator('new_name')
- @classmethod
- def validate_new_name(cls, v: str) -> str:
- import re
- if not re.match(r'^[a-zA-Z0-9_-]+$', v):
- raise ValueError('Le nom du groupe ne peut contenir que des lettres, chiffres, tirets et underscores')
- return v
-
-class GroupDeleteRequest(BaseModel):
- """Requête pour supprimer un groupe"""
- move_hosts_to: Optional[str] = Field(default=None, description="Groupe vers lequel déplacer les hôtes")
-
-class AdHocCommandRequest(BaseModel):
- """Requête pour exécuter une commande ad-hoc Ansible"""
- target: str = Field(..., description="Hôte ou groupe cible")
- command: str = Field(..., description="Commande shell à exécuter")
- module: str = Field(default="shell", description="Module Ansible (shell, command, raw)")
- become: bool = Field(default=False, description="Exécuter avec sudo")
- timeout: int = Field(default=60, ge=5, le=600, description="Timeout en secondes")
- category: Optional[str] = Field(default="default", description="Catégorie d'historique pour cette commande")
-
-class AdHocCommandResult(BaseModel):
- """Résultat d'une commande ad-hoc"""
- target: str
- command: str
- success: bool
- return_code: int
- stdout: str
- stderr: Optional[str] = None
- duration: float
- hosts_results: Optional[Dict[str, Any]] = None
-
-class AdHocHistoryEntry(BaseModel):
- """Entrée dans l'historique des commandes ad-hoc"""
- id: str
- command: str
- target: str
- module: str
- become: bool
- category: str = "default"
- description: Optional[str] = None
- created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
- last_used: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
- use_count: int = 1
-
-class AdHocHistoryCategory(BaseModel):
- """Catégorie pour organiser les commandes ad-hoc"""
- name: str
- description: Optional[str] = None
- color: str = "#7c3aed"
- icon: str = "fa-folder"
-
-class TaskLogFile(BaseModel):
- """Représentation d'un fichier de log de tâche"""
- id: str
- filename: str
- path: str
- task_name: str
- target: str
- status: str
- date: str # Format YYYY-MM-DD
- year: str
- month: str
- day: str
- created_at: datetime
- size_bytes: int
- # Nouveaux champs pour affichage enrichi
- start_time: Optional[str] = None # Format ISO ou HH:MM:SS
- end_time: Optional[str] = None # Format ISO ou HH:MM:SS
- duration: Optional[str] = None # Durée formatée
- duration_seconds: Optional[int] = None # Durée en secondes
- hosts: List[str] = [] # Liste des hôtes impliqués
- category: Optional[str] = None # Catégorie (Playbook, Ad-hoc, etc.)
- subcategory: Optional[str] = None # Sous-catégorie
- target_type: Optional[str] = None # Type de cible: 'host', 'group', 'role'
- source_type: Optional[str] = None # Source: 'scheduled', 'manual', 'adhoc'
-
-class TasksFilterParams(BaseModel):
- """Paramètres de filtrage des tâches"""
- status: Optional[str] = None # pending, running, completed, failed, all
- year: Optional[str] = None
- month: Optional[str] = None
- day: Optional[str] = None
- hour_start: Optional[str] = None # Heure de début HH:MM
- hour_end: Optional[str] = None # Heure de fin HH:MM
- target: Optional[str] = None
- source_type: Optional[str] = None # scheduled, manual, adhoc
- search: Optional[str] = None
- limit: int = 50 # Pagination côté serveur
- offset: int = 0
-
-# ===== MODÈLES PLANIFICATEUR (SCHEDULER) =====
-
-class ScheduleRecurrence(BaseModel):
- """Configuration de récurrence pour un schedule"""
- type: Literal["daily", "weekly", "monthly", "custom"] = "daily"
- time: str = Field(default="02:00", description="Heure d'exécution HH:MM")
- days: Optional[List[int]] = Field(default=None, description="Jours de la semaine (1-7, lundi=1) pour weekly")
- day_of_month: Optional[int] = Field(default=None, ge=1, le=31, description="Jour du mois (1-31) pour monthly")
- cron_expression: Optional[str] = Field(default=None, description="Expression cron pour custom")
-
-class Schedule(BaseModel):
- """Modèle d'un schedule de playbook"""
- id: str = Field(default_factory=lambda: f"sched_{uuid.uuid4().hex[:12]}")
- name: str = Field(..., min_length=3, max_length=100, description="Nom du schedule")
- description: Optional[str] = Field(default=None, max_length=500)
- playbook: str = Field(..., description="Nom du playbook à exécuter")
- target_type: Literal["group", "host"] = Field(default="group", description="Type de cible")
- target: str = Field(default="all", description="Nom du groupe ou hôte cible")
- extra_vars: Optional[Dict[str, Any]] = Field(default=None, description="Variables supplémentaires")
- schedule_type: Literal["once", "recurring"] = Field(default="recurring")
- recurrence: Optional[ScheduleRecurrence] = Field(default=None)
- timezone: str = Field(default="America/Montreal", description="Fuseau horaire")
- start_at: Optional[datetime] = Field(default=None, description="Date de début (optionnel)")
- end_at: Optional[datetime] = Field(default=None, description="Date de fin (optionnel)")
- next_run_at: Optional[datetime] = Field(default=None, description="Prochaine exécution calculée")
- last_run_at: Optional[datetime] = Field(default=None, description="Dernière exécution")
- last_status: Literal["success", "failed", "running", "never"] = Field(default="never")
- enabled: bool = Field(default=True, description="Schedule actif ou en pause")
- retry_on_failure: int = Field(default=0, ge=0, le=3, description="Nombre de tentatives en cas d'échec")
- timeout: int = Field(default=3600, ge=60, le=86400, description="Timeout en secondes")
- notification_type: Literal["none", "all", "errors"] = Field(default="all", description="Type de notification: none, all, errors")
- tags: List[str] = Field(default=[], description="Tags pour catégorisation")
- run_count: int = Field(default=0, description="Nombre total d'exécutions")
- success_count: int = Field(default=0, description="Nombre de succès")
- failure_count: int = Field(default=0, description="Nombre d'échecs")
- created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
- updated_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
-
- class Config:
- json_encoders = {
- datetime: lambda v: v.isoformat() if v else None
- }
-
- @field_validator('recurrence', mode='before')
- @classmethod
- def validate_recurrence(cls, v, info):
- # Si schedule_type est 'once', recurrence n'est pas obligatoire
- return v
-
-class ScheduleRun(BaseModel):
- """Historique d'une exécution de schedule"""
- id: str = Field(default_factory=lambda: f"run_{uuid.uuid4().hex[:12]}")
- schedule_id: str = Field(..., description="ID du schedule parent")
- task_id: Optional[str] = Field(default=None, description="ID de la tâche créée")
- started_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
- finished_at: Optional[datetime] = Field(default=None)
- status: Literal["running", "success", "failed", "canceled"] = Field(default="running")
- duration_seconds: Optional[float] = Field(default=None)
- hosts_impacted: int = Field(default=0)
- error_message: Optional[str] = Field(default=None)
- retry_attempt: int = Field(default=0, description="Numéro de la tentative (0 = première)")
-
- class Config:
- json_encoders = {
- datetime: lambda v: v.isoformat() if v else None
- }
-
-class ScheduleCreateRequest(BaseModel):
- """Requête de création d'un schedule"""
- name: str = Field(..., min_length=3, max_length=100)
- description: Optional[str] = Field(default=None, max_length=500)
- playbook: str = Field(...)
- target_type: Literal["group", "host"] = Field(default="group")
- target: str = Field(default="all")
- extra_vars: Optional[Dict[str, Any]] = Field(default=None)
- schedule_type: Literal["once", "recurring"] = Field(default="recurring")
- recurrence: Optional[ScheduleRecurrence] = Field(default=None)
- timezone: str = Field(default="America/Montreal")
- start_at: Optional[datetime] = Field(default=None)
- end_at: Optional[datetime] = Field(default=None)
- enabled: bool = Field(default=True)
- retry_on_failure: int = Field(default=0, ge=0, le=3)
- timeout: int = Field(default=3600, ge=60, le=86400)
- notification_type: Literal["none", "all", "errors"] = Field(default="all")
- tags: List[str] = Field(default=[])
-
- @field_validator('timezone')
- @classmethod
- def validate_timezone(cls, v: str) -> str:
- try:
- pytz.timezone(v)
- return v
- except pytz.exceptions.UnknownTimeZoneError:
- raise ValueError(f"Fuseau horaire invalide: {v}")
-
-class ScheduleUpdateRequest(BaseModel):
- """Requête de mise à jour d'un schedule"""
- name: Optional[str] = Field(default=None, min_length=3, max_length=100)
- description: Optional[str] = Field(default=None, max_length=500)
- playbook: Optional[str] = Field(default=None)
- target_type: Optional[Literal["group", "host"]] = Field(default=None)
- target: Optional[str] = Field(default=None)
- extra_vars: Optional[Dict[str, Any]] = Field(default=None)
- schedule_type: Optional[Literal["once", "recurring"]] = Field(default=None)
- recurrence: Optional[ScheduleRecurrence] = Field(default=None)
- timezone: Optional[str] = Field(default=None)
- start_at: Optional[datetime] = Field(default=None)
- end_at: Optional[datetime] = Field(default=None)
- enabled: Optional[bool] = Field(default=None)
- retry_on_failure: Optional[int] = Field(default=None, ge=0, le=3)
- timeout: Optional[int] = Field(default=None, ge=60, le=86400)
- notification_type: Optional[Literal["none", "all", "errors"]] = Field(default=None)
- tags: Optional[List[str]] = Field(default=None)
-
-class ScheduleStats(BaseModel):
- """Statistiques globales des schedules"""
- total: int = 0
- active: int = 0
- paused: int = 0
- expired: int = 0
- next_execution: Optional[datetime] = None
- next_schedule_name: Optional[str] = None
- failures_24h: int = 0
- executions_24h: int = 0
- success_rate_7d: float = 0.0
-
-# ===== SERVICE DE LOGGING MARKDOWN =====
-
-class TaskLogService:
- """Service pour gérer les logs de tâches en fichiers markdown"""
-
- def __init__(self, base_dir: Path):
- self.base_dir = base_dir
- self._ensure_base_dir()
- # Cache des métadonnées pour éviter de relire les fichiers
- self._metadata_cache: Dict[str, Dict[str, Any]] = {}
- self._cache_file = base_dir / ".metadata_cache.json"
- # Index complet des logs (construit une fois, mis à jour incrémentalement)
- self._logs_index: List[Dict[str, Any]] = []
- self._index_built = False
- self._last_scan_time = 0.0
- self._load_cache()
-
- def _ensure_base_dir(self):
- """Crée le répertoire de base s'il n'existe pas"""
- self.base_dir.mkdir(parents=True, exist_ok=True)
-
- def _load_cache(self):
- """Charge le cache des métadonnées depuis le fichier"""
- try:
- if self._cache_file.exists():
- import json
- with open(self._cache_file, 'r', encoding='utf-8') as f:
- self._metadata_cache = json.load(f)
- except Exception:
- self._metadata_cache = {}
-
- def _save_cache(self):
- """Sauvegarde le cache des métadonnées dans le fichier"""
- try:
- import json
- with open(self._cache_file, 'w', encoding='utf-8') as f:
- json.dump(self._metadata_cache, f, ensure_ascii=False)
- except Exception:
- pass
-
- def _get_cached_metadata(self, file_path: str, file_mtime: float) -> Optional[Dict[str, Any]]:
- """Récupère les métadonnées du cache si elles sont valides"""
- cached = self._metadata_cache.get(file_path)
- if cached and cached.get('_mtime') == file_mtime:
- return cached
- return None
-
- def _cache_metadata(self, file_path: str, file_mtime: float, metadata: Dict[str, Any]):
- """Met en cache les métadonnées d'un fichier"""
- metadata['_mtime'] = file_mtime
- self._metadata_cache[file_path] = metadata
-
- def _build_index(self, force: bool = False):
- """Construit l'index complet des logs (appelé une seule fois au démarrage ou après 60s)"""
- import time
- current_time = time.time()
-
- # Ne reconstruire que si nécessaire (toutes les 60 secondes max ou si forcé)
- if self._index_built and not force and (current_time - self._last_scan_time) < 60:
- return
-
- self._logs_index = []
- cache_updated = False
-
- if not self.base_dir.exists():
- self._index_built = True
- self._last_scan_time = current_time
- return
-
- # Parcourir tous les fichiers
- for year_dir in self.base_dir.iterdir():
- if not year_dir.is_dir() or not year_dir.name.isdigit():
- continue
- for month_dir in year_dir.iterdir():
- if not month_dir.is_dir():
- continue
- for day_dir in month_dir.iterdir():
- if not day_dir.is_dir():
- continue
- for md_file in day_dir.glob("*.md"):
- try:
- entry = self._index_file(md_file)
- if entry:
- if entry.get('_cache_updated'):
- cache_updated = True
- del entry['_cache_updated']
- self._logs_index.append(entry)
- except Exception:
- continue
-
- # Trier par date décroissante
- self._logs_index.sort(key=lambda x: x.get('created_at', 0), reverse=True)
-
- self._index_built = True
- self._last_scan_time = current_time
-
- if cache_updated:
- self._save_cache()
-
- def _index_file(self, md_file: Path) -> Optional[Dict[str, Any]]:
- """Indexe un fichier markdown et retourne ses métadonnées"""
- parts = md_file.stem.split("_")
- if len(parts) < 4:
- return None
-
- file_status = parts[-1]
- file_hour_str = parts[1] if len(parts) > 1 else "000000"
-
- # Extraire la date du chemin
- try:
- rel_path = md_file.relative_to(self.base_dir)
- path_parts = rel_path.parts
- if len(path_parts) >= 3:
- log_year, log_month, log_day = path_parts[0], path_parts[1], path_parts[2]
- else:
- return None
- except:
- return None
-
- stat = md_file.stat()
- file_path_str = str(md_file)
- file_mtime = stat.st_mtime
-
- # Vérifier le cache
- cached = self._get_cached_metadata(file_path_str, file_mtime)
- cache_updated = False
-
- if cached:
- task_name = cached.get('task_name', '')
- file_target = cached.get('target', '')
- metadata = cached
- else:
- # Lire le fichier
- if len(parts) >= 5:
- file_target = parts[3]
- task_name_from_file = "_".join(parts[4:-1]) if len(parts) > 5 else parts[4] if len(parts) > 4 else "unknown"
- else:
- file_target = ""
- task_name_from_file = "_".join(parts[3:-1]) if len(parts) > 4 else parts[3] if len(parts) > 3 else "unknown"
-
- try:
- content = md_file.read_text(encoding='utf-8')
- metadata = self._parse_markdown_metadata(content)
-
- task_name_match = re.search(r'^#\s*[✅❌🔄⏳🚫❓]?\s*(.+)$', content, re.MULTILINE)
- if task_name_match:
- task_name = task_name_match.group(1).strip()
- else:
- task_name = task_name_from_file.replace("_", " ")
-
- target_match = re.search(r'\|\s*\*\*Cible\*\*\s*\|\s*`([^`]+)`', content)
- if target_match:
- file_target = target_match.group(1).strip()
-
- detected_source = self._detect_source_type(task_name, content)
- metadata['source_type'] = detected_source
- metadata['task_name'] = task_name
- metadata['target'] = file_target
-
- self._cache_metadata(file_path_str, file_mtime, metadata)
- cache_updated = True
- except Exception:
- metadata = {'source_type': 'manual'}
- task_name = task_name_from_file.replace("_", " ")
-
- return {
- 'id': parts[0] + "_" + parts[1] + "_" + parts[2] if len(parts) > 2 else parts[0],
- 'filename': md_file.name,
- 'path': file_path_str,
- 'task_name': task_name,
- 'target': file_target,
- 'status': file_status,
- 'date': f"{log_year}-{log_month}-{log_day}",
- 'year': log_year,
- 'month': log_month,
- 'day': log_day,
- 'hour_str': file_hour_str,
- 'created_at': stat.st_ctime,
- 'size_bytes': stat.st_size,
- 'start_time': metadata.get('start_time'),
- 'end_time': metadata.get('end_time'),
- 'duration': metadata.get('duration'),
- 'duration_seconds': metadata.get('duration_seconds'),
- 'hosts': metadata.get('hosts', []),
- 'category': metadata.get('category'),
- 'subcategory': metadata.get('subcategory'),
- 'target_type': metadata.get('target_type'),
- 'source_type': metadata.get('source_type'),
- '_cache_updated': cache_updated
- }
-
- def invalidate_index(self):
- """Force la reconstruction de l'index au prochain appel"""
- self._index_built = False
-
- def _get_date_path(self, dt: datetime = None) -> Path:
- """Retourne le chemin du répertoire pour une date donnée (YYYY/MM/JJ)"""
- if dt is None:
- dt = datetime.now(timezone.utc)
-
- # IMPORTANT : on utilise le fuseau horaire local (America/Montreal)
- # pour déterminer la date du dossier de log, afin que les tâches
- # exécutées en soirée ne basculent pas au jour suivant à cause de l'UTC.
- import pytz
- local_tz = pytz.timezone("America/Montreal")
-
- if dt.tzinfo is None:
- # Si la datetime est naïve, on considère qu'elle est déjà en heure locale
- dt_local = local_tz.localize(dt)
- else:
- # Sinon on la convertit dans le fuseau local
- dt_local = dt.astimezone(local_tz)
-
- year = dt_local.strftime("%Y")
- month = dt_local.strftime("%m")
- day = dt_local.strftime("%d")
- return self.base_dir / year / month / day
-
- def _generate_task_id(self) -> str:
- """Génère un ID unique pour une tâche"""
- import uuid
- return f"task_{datetime.now(timezone.utc).strftime('%H%M%S')}_{uuid.uuid4().hex[:6]}"
-
- def save_task_log(self, task: 'Task', output: str = "", error: str = "", source_type: str = None) -> str:
- """Sauvegarde un log de tâche en markdown et retourne le chemin.
-
- Args:
- task: L'objet tâche
- output: La sortie de la tâche
- error: Les erreurs éventuelles
- source_type: Type de source ('scheduled', 'manual', 'adhoc')
- """
- dt = task.start_time or datetime.now(timezone.utc)
- date_path = self._get_date_path(dt)
- date_path.mkdir(parents=True, exist_ok=True)
-
- # Générer le nom du fichier
- task_id = self._generate_task_id()
- status_emoji = {
- "completed": "✅",
- "failed": "❌",
- "running": "🔄",
- "pending": "⏳",
- "cancelled": "🚫"
- }.get(task.status, "❓")
-
- # Détecter le type de source si non fourni
- if not source_type:
- task_name_lower = task.name.lower()
- if '[planifié]' in task_name_lower or '[scheduled]' in task_name_lower:
- source_type = 'scheduled'
- elif 'ad-hoc' in task_name_lower or 'adhoc' in task_name_lower:
- source_type = 'adhoc'
- else:
- source_type = 'manual'
-
- # Labels pour le type de source
- source_labels = {'scheduled': 'Planifié', 'manual': 'Manuel', 'adhoc': 'Ad-hoc'}
- source_label = source_labels.get(source_type, 'Manuel')
-
- # Sanitize task name and host for filename
- safe_name = task.name.replace(' ', '_').replace(':', '').replace('/', '-')[:50]
- safe_host = task.host.replace(' ', '_').replace(':', '').replace('/', '-')[:30] if task.host else 'unknown'
- filename = f"{task_id}_{safe_host}_{safe_name}_{task.status}.md"
- filepath = date_path / filename
-
- # Créer le contenu markdown
- md_content = f"""# {status_emoji} {task.name}
-
-## Informations
-
-| Propriété | Valeur |
-|-----------|--------|
-| **ID** | `{task.id}` |
-| **Nom** | {task.name} |
-| **Cible** | `{task.host}` |
-| **Statut** | {task.status} |
-| **Type** | {source_label} |
-| **Progression** | {task.progress}% |
-| **Début** | {task.start_time.isoformat() if task.start_time else 'N/A'} |
-| **Fin** | {task.end_time.isoformat() if task.end_time else 'N/A'} |
-| **Durée** | {task.duration or 'N/A'} |
-
-## Sortie
-
-```
-{output or task.output or '(Aucune sortie)'}
-```
-
-"""
- if error or task.error:
- md_content += f"""## Erreurs
-
-```
-{error or task.error}
-```
-
-"""
-
- md_content += f"""---
-*Généré automatiquement par Homelab Automation Dashboard*
-*Date: {datetime.now(timezone.utc).isoformat()}*
-"""
-
- # Écrire le fichier
- filepath.write_text(md_content, encoding='utf-8')
-
- # Invalider l'index pour qu'il soit reconstruit au prochain appel
- self.invalidate_index()
-
- return str(filepath)
-
- def _parse_markdown_metadata(self, content: str) -> Dict[str, Any]:
- """Parse le contenu markdown pour extraire les métadonnées enrichies"""
- metadata = {
- 'start_time': None,
- 'end_time': None,
- 'duration': None,
- 'duration_seconds': None,
- 'hosts': [],
- 'category': None,
- 'subcategory': None,
- 'target_type': None,
- 'source_type': None
- }
-
- # Extraire les heures de début et fin
- start_match = re.search(r'\|\s*\*\*Début\*\*\s*\|\s*([^|]+)', content)
- if start_match:
- start_val = start_match.group(1).strip()
- if start_val and start_val != 'N/A':
- metadata['start_time'] = start_val
-
- end_match = re.search(r'\|\s*\*\*Fin\*\*\s*\|\s*([^|]+)', content)
- if end_match:
- end_val = end_match.group(1).strip()
- if end_val and end_val != 'N/A':
- metadata['end_time'] = end_val
-
- duration_match = re.search(r'\|\s*\*\*Durée\*\*\s*\|\s*([^|]+)', content)
- if duration_match:
- dur_val = duration_match.group(1).strip()
- if dur_val and dur_val != 'N/A':
- metadata['duration'] = dur_val
- # Convertir en secondes si possible
- metadata['duration_seconds'] = self._parse_duration_to_seconds(dur_val)
-
- # Extraire les hôtes depuis la sortie Ansible
- # Pattern pour les hôtes dans PLAY RECAP ou les résultats de tâches
- host_patterns = [
- r'^([a-zA-Z0-9][a-zA-Z0-9._-]+)\s*:\s*ok=', # PLAY RECAP format
- r'^\s*([a-zA-Z0-9][a-zA-Z0-9._-]+)\s*\|\s*(SUCCESS|CHANGED|FAILED|UNREACHABLE)', # Ad-hoc format
- ]
- hosts_found = set()
- for pattern in host_patterns:
- for match in re.finditer(pattern, content, re.MULTILINE):
- host = match.group(1).strip()
- if host and len(host) > 2 and '.' in host or len(host) > 5:
- hosts_found.add(host)
- metadata['hosts'] = sorted(list(hosts_found))
-
- # Détecter la catégorie
- task_name_match = re.search(r'^#\s*[✅❌🔄⏳🚫❓]?\s*(.+)$', content, re.MULTILINE)
- if task_name_match:
- task_name = task_name_match.group(1).strip().lower()
- if 'playbook' in task_name:
- metadata['category'] = 'Playbook'
- # Extraire sous-catégorie du nom
- if 'health' in task_name:
- metadata['subcategory'] = 'Health Check'
- elif 'backup' in task_name:
- metadata['subcategory'] = 'Backup'
- elif 'upgrade' in task_name or 'update' in task_name:
- metadata['subcategory'] = 'Upgrade'
- elif 'bootstrap' in task_name:
- metadata['subcategory'] = 'Bootstrap'
- elif 'reboot' in task_name:
- metadata['subcategory'] = 'Reboot'
- elif 'ad-hoc' in task_name or 'adhoc' in task_name:
- metadata['category'] = 'Ad-hoc'
- else:
- metadata['category'] = 'Autre'
-
- # Détecter le type de cible
- target_match = re.search(r'\|\s*\*\*Cible\*\*\s*\|\s*`([^`]+)`', content)
- if target_match:
- target_val = target_match.group(1).strip()
- if target_val == 'all':
- metadata['target_type'] = 'group'
- elif target_val.startswith('env_') or target_val.startswith('role_'):
- metadata['target_type'] = 'group'
- elif '.' in target_val:
- metadata['target_type'] = 'host'
- else:
- metadata['target_type'] = 'group'
-
- # Extraire le type de source depuis le markdown (si présent)
- type_match = re.search(r'\|\s*\*\*Type\*\*\s*\|\s*([^|]+)', content)
- if type_match:
- type_val = type_match.group(1).strip().lower()
- if 'planifié' in type_val or 'scheduled' in type_val:
- metadata['source_type'] = 'scheduled'
- elif 'ad-hoc' in type_val or 'adhoc' in type_val:
- metadata['source_type'] = 'adhoc'
- elif 'manuel' in type_val or 'manual' in type_val:
- metadata['source_type'] = 'manual'
-
- return metadata
-
- def _parse_duration_to_seconds(self, duration_str: str) -> Optional[int]:
- """Convertit une chaîne de durée en secondes"""
- if not duration_str:
- return None
-
- total_seconds = 0
- # Pattern: Xh Xm Xs ou X:XX:XX ou Xs
-
- s_clean = duration_str.strip()
-
- # Gérer explicitement les secondes seules (avec éventuellement des décimales),
- # par ex. "1.69s" ou "2,5 s"
- sec_only_match = re.match(r'^(\d+(?:[\.,]\d+)?)\s*s$', s_clean)
- if sec_only_match:
- sec_val_str = sec_only_match.group(1).replace(',', '.')
- try:
- sec_val = float(sec_val_str)
- except ValueError:
- sec_val = 0.0
- return int(round(sec_val)) if sec_val > 0 else None
-
- # Format HH:MM:SS
- hms_match = re.match(r'^(\d+):(\d+):(\d+)$', s_clean)
- if hms_match:
- h, m, s = map(int, hms_match.groups())
- return h * 3600 + m * 60 + s
-
- # Format avec h, m, s (entiers uniquement, pour éviter de mal parser des décimales)
- hours = re.search(r'(\d+)\s*h', s_clean)
- minutes = re.search(r'(\d+)\s*m', s_clean)
- seconds = re.search(r'(\d+)\s*s', s_clean)
-
- if hours:
- total_seconds += int(hours.group(1)) * 3600
- if minutes:
- total_seconds += int(minutes.group(1)) * 60
- if seconds:
- total_seconds += int(seconds.group(1))
-
- return total_seconds if total_seconds > 0 else None
-
- def get_task_logs(self,
- year: str = None,
- month: str = None,
- day: str = None,
- status: str = None,
- target: str = None,
- category: str = None,
- source_type: str = None,
- hour_start: str = None,
- hour_end: str = None,
- limit: int = 50,
- offset: int = 0) -> tuple[List[TaskLogFile], int]:
- """Récupère la liste des logs de tâches avec filtrage et pagination.
-
- OPTIMISATION: Utilise un index en mémoire construit une seule fois,
- puis filtre rapidement sans relire les fichiers.
-
- Returns:
- tuple: (logs paginés, total count avant pagination)
- """
- # Construire l'index si nécessaire (une seule fois, puis toutes les 60s)
- self._build_index()
-
- # Convertir les heures de filtrage en minutes pour comparaison
- hour_start_minutes = None
- hour_end_minutes = None
- if hour_start:
- try:
- h, m = map(int, hour_start.split(':'))
- hour_start_minutes = h * 60 + m
- except:
- pass
- if hour_end:
- try:
- h, m = map(int, hour_end.split(':'))
- hour_end_minutes = h * 60 + m
- except:
- pass
-
- # Filtrer l'index (très rapide, pas de lecture de fichiers)
- filtered = []
- for entry in self._logs_index:
- # Filtrer par date
- if year and entry['year'] != year:
- continue
- if month and entry['month'] != month:
- continue
- if day and entry['day'] != day:
- continue
-
- # Filtrer par statut
- if status and status != "all" and entry['status'] != status:
- continue
-
- # Filtrer par heure
- if hour_start_minutes is not None or hour_end_minutes is not None:
- try:
- file_hour_str = entry.get('hour_str', '000000')
- file_h = int(file_hour_str[:2])
- file_m = int(file_hour_str[2:4])
- file_minutes = file_h * 60 + file_m
- if hour_start_minutes is not None and file_minutes < hour_start_minutes:
- continue
- if hour_end_minutes is not None and file_minutes > hour_end_minutes:
- continue
- except:
- pass
-
- # Filtrer par target
- if target and target != "all":
- file_target = entry.get('target', '')
- if file_target and target.lower() not in file_target.lower():
- continue
-
- # Filtrer par catégorie
- if category and category != "all":
- file_category = entry.get('category', '')
- if file_category and category.lower() not in file_category.lower():
- continue
-
- # Filtrer par type de source
- if source_type and source_type != "all":
- file_source = entry.get('source_type', '')
- if file_source != source_type:
- continue
-
- filtered.append(entry)
-
- # Convertir en TaskLogFile
- total_count = len(filtered)
- paginated = filtered[offset:offset + limit] if limit > 0 else filtered
-
- logs = [
- TaskLogFile(
- id=e['id'],
- filename=e['filename'],
- path=e['path'],
- task_name=e['task_name'],
- target=e['target'],
- status=e['status'],
- date=e['date'],
- year=e['year'],
- month=e['month'],
- day=e['day'],
- created_at=datetime.fromtimestamp(e['created_at'], tz=timezone.utc),
- size_bytes=e['size_bytes'],
- start_time=e.get('start_time'),
- end_time=e.get('end_time'),
- duration=e.get('duration'),
- duration_seconds=e.get('duration_seconds'),
- hosts=e.get('hosts', []),
- category=e.get('category'),
- subcategory=e.get('subcategory'),
- target_type=e.get('target_type'),
- source_type=e.get('source_type')
- )
- for e in paginated
- ]
-
- return logs, total_count
-
- def _detect_source_type(self, task_name: str, content: str) -> str:
- """Détecte le type de source d'une tâche: scheduled, manual, adhoc"""
- task_name_lower = task_name.lower()
- content_lower = content.lower()
-
- # Détecter les tâches planifiées
- if '[planifié]' in task_name_lower or '[scheduled]' in task_name_lower:
- return 'scheduled'
- if 'schedule_id' in content_lower or 'planifié' in content_lower:
- return 'scheduled'
-
- # Détecter les commandes ad-hoc
- if 'ad-hoc' in task_name_lower or 'adhoc' in task_name_lower:
- return 'adhoc'
- if 'commande ad-hoc' in content_lower or 'ansible ad-hoc' in content_lower:
- return 'adhoc'
- # Pattern ad-hoc: module ansible direct (ping, shell, command, etc.)
- if re.search(r'\|\s*\*\*Module\*\*\s*\|', content):
- return 'adhoc'
-
- # Par défaut, c'est une exécution manuelle de playbook
- return 'manual'
-
- def get_available_dates(self) -> Dict[str, Any]:
- """Retourne la structure des dates disponibles pour le filtrage"""
- dates = {"years": {}}
-
- if not self.base_dir.exists():
- return dates
-
- for year_dir in sorted(self.base_dir.iterdir(), reverse=True):
- if year_dir.is_dir() and year_dir.name.isdigit():
- year = year_dir.name
- dates["years"][year] = {"months": {}}
-
- for month_dir in sorted(year_dir.iterdir(), reverse=True):
- if month_dir.is_dir() and month_dir.name.isdigit():
- month = month_dir.name
- dates["years"][year]["months"][month] = {"days": []}
-
- for day_dir in sorted(month_dir.iterdir(), reverse=True):
- if day_dir.is_dir() and day_dir.name.isdigit():
- day = day_dir.name
- count = len(list(day_dir.glob("*.md")))
- dates["years"][year]["months"][month]["days"].append({
- "day": day,
- "count": count
- })
-
- return dates
-
- def get_stats(self) -> Dict[str, int]:
- """Retourne les statistiques des tâches"""
- stats = {"total": 0, "completed": 0, "failed": 0, "running": 0, "pending": 0}
-
- # Utiliser limit=0 pour récupérer tous les logs (sans pagination)
- logs, _ = self.get_task_logs(limit=0)
- for log in logs:
- stats["total"] += 1
- if log.status in stats:
- stats[log.status] += 1
-
- return stats
-
-
-# ===== SERVICE HISTORIQUE COMMANDES AD-HOC (VERSION BD) =====
-
-class AdHocHistoryService:
- """Service pour gérer l'historique des commandes ad-hoc avec catégories.
-
- Implémentation basée sur la BD (table ``logs``) via LogRepository,
- sans aucun accès aux fichiers JSON (.adhoc_history.json).
- """
-
- def __init__(self) -> None:
- # Pas de fichier, tout est stocké en BD
- pass
-
- async def _get_commands_logs(self, session: AsyncSession) -> List["Log"]:
- from app.models.log import Log
- stmt = (
- select(Log)
- .where(Log.source == "adhoc_history")
- .order_by(Log.created_at.desc())
- )
- result = await session.execute(stmt)
- return result.scalars().all()
-
- async def _get_categories_logs(self, session: AsyncSession) -> List["Log"]:
- from app.models.log import Log
- stmt = (
- select(Log)
- .where(Log.source == "adhoc_category")
- .order_by(Log.created_at.asc())
- )
- result = await session.execute(stmt)
- return result.scalars().all()
-
- async def add_command(
- self,
- command: str,
- target: str,
- module: str,
- become: bool,
- category: str = "default",
- description: str | None = None,
- ) -> AdHocHistoryEntry:
- """Ajoute ou met à jour une commande dans l'historique (stockée dans logs.details)."""
- from app.models.log import Log
- from app.crud.log import LogRepository
-
- async with async_session_maker() as session:
- repo = LogRepository(session)
-
- # Charger tous les logs d'historique et chercher une entrée existante
- logs = await self._get_commands_logs(session)
- existing_log: Optional[Log] = None
- for log in logs:
- details = log.details or {}
- if details.get("command") == command and details.get("target") == target:
- existing_log = log
- break
-
- now = datetime.now(timezone.utc)
-
- if existing_log is not None:
- details = existing_log.details or {}
- details.setdefault("id", details.get("id") or f"adhoc_{existing_log.id}")
- details["command"] = command
- details["target"] = target
- details["module"] = module
- details["become"] = bool(become)
- details["category"] = category or details.get("category", "default")
- if description is not None:
- details["description"] = description
- details["created_at"] = details.get("created_at") or now.isoformat()
- details["last_used"] = now.isoformat()
- details["use_count"] = int(details.get("use_count", 1)) + 1
- existing_log.details = details
- await session.commit()
- data = details
- else:
- import uuid
-
- entry_id = f"adhoc_{uuid.uuid4().hex[:8]}"
- details = {
- "id": entry_id,
- "command": command,
- "target": target,
- "module": module,
- "become": bool(become),
- "category": category or "default",
- "description": description,
- "created_at": now.isoformat(),
- "last_used": now.isoformat(),
- "use_count": 1,
- }
-
- log = await repo.create(
- level="INFO",
- source="adhoc_history",
- message=command,
- details=details,
- )
- await session.commit()
- data = log.details or details
-
- # Construire l'entrée Pydantic
- return AdHocHistoryEntry(
- id=data.get("id"),
- command=data.get("command", command),
- target=data.get("target", target),
- module=data.get("module", module),
- become=bool(data.get("become", become)),
- category=data.get("category", category or "default"),
- description=data.get("description", description),
- created_at=datetime.fromisoformat(data["created_at"].replace("Z", "+00:00"))
- if isinstance(data.get("created_at"), str)
- else now,
- last_used=datetime.fromisoformat(data["last_used"].replace("Z", "+00:00"))
- if isinstance(data.get("last_used"), str)
- else now,
- use_count=int(data.get("use_count", 1)),
- )
-
- async def get_commands(
- self,
- category: str | None = None,
- search: str | None = None,
- limit: int = 50,
- ) -> List[AdHocHistoryEntry]:
- """Récupère les commandes de l'historique depuis la BD."""
- async with async_session_maker() as session:
- logs = await self._get_commands_logs(session)
-
- commands: List[AdHocHistoryEntry] = []
- for log in logs:
- details = log.details or {}
- cmd = details.get("command") or log.message or ""
- if category and details.get("category", "default") != category:
- continue
- if search and search.lower() not in cmd.lower():
- continue
-
- created_at_raw = details.get("created_at")
- last_used_raw = details.get("last_used")
- try:
- created_at = (
- datetime.fromisoformat(created_at_raw.replace("Z", "+00:00"))
- if isinstance(created_at_raw, str)
- else log.created_at
- )
- except Exception:
- created_at = log.created_at
- try:
- last_used = (
- datetime.fromisoformat(last_used_raw.replace("Z", "+00:00"))
- if isinstance(last_used_raw, str)
- else created_at
- )
- except Exception:
- last_used = created_at
-
- entry = AdHocHistoryEntry(
- id=details.get("id") or f"adhoc_{log.id}",
- command=cmd,
- target=details.get("target", ""),
- module=details.get("module", "shell"),
- become=bool(details.get("become", False)),
- category=details.get("category", "default"),
- description=details.get("description"),
- created_at=created_at,
- last_used=last_used,
- use_count=int(details.get("use_count", 1)),
- )
- commands.append(entry)
-
- # Trier par last_used décroissant
- commands.sort(key=lambda x: x.last_used, reverse=True)
- return commands[:limit]
-
- async def get_categories(self) -> List[AdHocHistoryCategory]:
- """Récupère la liste des catégories depuis la BD.
-
- Si aucune catégorie n'est présente, les catégories par défaut sont créées.
- """
- from app.crud.log import LogRepository
-
- async with async_session_maker() as session:
- logs = await self._get_categories_logs(session)
-
- if not logs:
- # Initialiser avec les catégories par défaut
- defaults = [
- {"name": "default", "description": "Commandes générales", "color": "#7c3aed", "icon": "fa-terminal"},
- {"name": "diagnostic", "description": "Commandes de diagnostic", "color": "#10b981", "icon": "fa-stethoscope"},
- {"name": "maintenance", "description": "Commandes de maintenance", "color": "#f59e0b", "icon": "fa-wrench"},
- {"name": "deployment", "description": "Commandes de déploiement", "color": "#3b82f6", "icon": "fa-rocket"},
- ]
- repo = LogRepository(session)
- for cat in defaults:
- await repo.create(
- level="INFO",
- source="adhoc_category",
- message=cat["name"],
- details=cat,
- )
- await session.commit()
- logs = await self._get_categories_logs(session)
-
- categories: List[AdHocHistoryCategory] = []
- for log in logs:
- data = log.details or {}
- categories.append(
- AdHocHistoryCategory(
- name=data.get("name") or log.message,
- description=data.get("description"),
- color=data.get("color", "#7c3aed"),
- icon=data.get("icon", "fa-folder"),
- )
- )
- return categories
-
- async def add_category(
- self,
- name: str,
- description: str | None = None,
- color: str = "#7c3aed",
- icon: str = "fa-folder",
- ) -> AdHocHistoryCategory:
- """Ajoute une nouvelle catégorie en BD (ou renvoie l'existante)."""
- from app.crud.log import LogRepository
-
- async with async_session_maker() as session:
- logs = await self._get_categories_logs(session)
- for log in logs:
- data = log.details or {}
- if data.get("name") == name:
- return AdHocHistoryCategory(
- name=data.get("name"),
- description=data.get("description"),
- color=data.get("color", color),
- icon=data.get("icon", icon),
- )
-
- repo = LogRepository(session)
- details = {
- "name": name,
- "description": description,
- "color": color,
- "icon": icon,
- }
- await repo.create(
- level="INFO",
- source="adhoc_category",
- message=name,
- details=details,
- )
- await session.commit()
- return AdHocHistoryCategory(**details)
-
- async def delete_command(self, command_id: str) -> bool:
- """Supprime une commande de l'historique (ligne dans logs)."""
- from app.models.log import Log
-
- async with async_session_maker() as session:
- stmt = select(Log).where(Log.source == "adhoc_history")
- result = await session.execute(stmt)
- logs = result.scalars().all()
-
- target_log: Optional[Log] = None
- for log in logs:
- details = log.details or {}
- if details.get("id") == command_id:
- target_log = log
- break
-
- if not target_log:
- return False
-
- await session.delete(target_log)
- await session.commit()
- return True
-
- async def update_command_category(
- self,
- command_id: str,
- category: str,
- description: str | None = None,
- ) -> bool:
- """Met à jour la catégorie d'une commande dans l'historique."""
- from app.models.log import Log
-
- async with async_session_maker() as session:
- stmt = select(Log).where(Log.source == "adhoc_history")
- result = await session.execute(stmt)
- logs = result.scalars().all()
-
- for log in logs:
- details = log.details or {}
- if details.get("id") == command_id:
- details["category"] = category
- if description is not None:
- details["description"] = description
- log.details = details
- await session.commit()
- return True
- return False
-
- async def update_category(
- self,
- category_name: str,
- new_name: str,
- description: str,
- color: str,
- icon: str,
- ) -> bool:
- """Met à jour une catégorie existante et les commandes associées."""
- from app.models.log import Log
-
- async with async_session_maker() as session:
- # Mettre à jour la catégorie elle-même
- logs_cat = await self._get_categories_logs(session)
- target_log: Optional[Log] = None
- for log in logs_cat:
- data = log.details or {}
- if data.get("name") == category_name:
- target_log = log
- break
-
- if not target_log:
- return False
-
- data = target_log.details or {}
- old_name = data.get("name", category_name)
- data["name"] = new_name
- data["description"] = description
- data["color"] = color
- data["icon"] = icon
- target_log.details = data
-
- # Mettre à jour les commandes qui référencent cette catégorie
- stmt_cmd = select(Log).where(Log.source == "adhoc_history")
- result_cmd = await session.execute(stmt_cmd)
- for cmd_log in result_cmd.scalars().all():
- det = cmd_log.details or {}
- if det.get("category") == old_name:
- det["category"] = new_name
- cmd_log.details = det
-
- await session.commit()
- return True
-
- async def delete_category(self, category_name: str) -> bool:
- """Supprime une catégorie et déplace ses commandes vers 'default'."""
- if category_name == "default":
- return False
-
- from app.models.log import Log
-
- async with async_session_maker() as session:
- # Trouver la catégorie
- logs_cat = await self._get_categories_logs(session)
- target_log: Optional[Log] = None
- for log in logs_cat:
- data = log.details or {}
- if data.get("name") == category_name:
- target_log = log
- break
-
- if not target_log:
- return False
-
- # Déplacer les commandes vers "default"
- stmt_cmd = select(Log).where(Log.source == "adhoc_history")
- result_cmd = await session.execute(stmt_cmd)
- for cmd_log in result_cmd.scalars().all():
- det = cmd_log.details or {}
- if det.get("category") == category_name:
- det["category"] = "default"
- cmd_log.details = det
-
- # Supprimer la catégorie
- await session.delete(target_log)
- await session.commit()
- return True
-
-
-# ===== SERVICE BOOTSTRAP STATUS (VERSION BD) =====
-
-class BootstrapStatusService:
- """Service pour gérer le statut de bootstrap des hôtes.
-
- Cette version utilise la base de données SQLite via SQLAlchemy async.
- Note: Le modèle BD utilise host_id (FK), mais ce service utilise host_name
- pour la compatibilité avec le code existant. Il fait la correspondance via HostRepository.
- """
-
- def __init__(self):
- # Cache en mémoire pour éviter les requêtes BD répétées
- self._cache: Dict[str, Dict] = {}
-
- async def _get_host_id_by_name(self, session: AsyncSession, host_name: str) -> Optional[str]:
- """Récupère l'ID d'un hôte par son nom"""
- from crud.host import HostRepository
- repo = HostRepository(session)
- host = await repo.get_by_name(host_name)
- return host.id if host else None
-
- def set_bootstrap_status(self, host_name: str, success: bool, details: str = None) -> Dict:
- """Enregistre le statut de bootstrap d'un hôte (version synchrone avec cache)"""
- status_data = {
- "bootstrap_ok": success,
- "bootstrap_date": datetime.now(timezone.utc).isoformat(),
- "details": details
- }
- self._cache[host_name] = status_data
-
- # Planifier la sauvegarde en BD de manière asynchrone
- asyncio.create_task(self._save_to_db(host_name, success, details))
-
- return status_data
-
- async def _save_to_db(self, host_name: str, success: bool, details: str = None):
- """Sauvegarde le statut dans la BD"""
- try:
- async with async_session_maker() as session:
- host_id = await self._get_host_id_by_name(session, host_name)
- if not host_id:
- print(f"Host '{host_name}' non trouvé en BD pour bootstrap status")
- return
-
- from crud.bootstrap_status import BootstrapStatusRepository
- repo = BootstrapStatusRepository(session)
- await repo.create(
- host_id=host_id,
- status="success" if success else "failed",
- last_attempt=datetime.now(timezone.utc),
- error_message=None if success else details,
- )
- await session.commit()
- except Exception as e:
- print(f"Erreur sauvegarde bootstrap status en BD: {e}")
-
- def get_bootstrap_status(self, host_name: str) -> Dict:
- """Récupère le statut de bootstrap d'un hôte depuis le cache"""
- return self._cache.get(host_name, {
- "bootstrap_ok": False,
- "bootstrap_date": None,
- "details": None
- })
-
- def get_all_status(self) -> Dict[str, Dict]:
- """Récupère le statut de tous les hôtes depuis le cache"""
- return self._cache.copy()
-
- def remove_host(self, host_name: str) -> bool:
- """Supprime le statut d'un hôte du cache"""
- if host_name in self._cache:
- del self._cache[host_name]
- return True
- return False
-
- async def load_from_db(self):
- """Charge tous les statuts depuis la BD dans le cache (appelé au démarrage)"""
- try:
- async with async_session_maker() as session:
- from crud.bootstrap_status import BootstrapStatusRepository
- from crud.host import HostRepository
- from sqlalchemy import select
- from models.bootstrap_status import BootstrapStatus
- from models.host import Host
-
- # Récupérer tous les derniers statuts avec les noms d'hôtes
- stmt = (
- select(BootstrapStatus, Host.name)
- .join(Host, BootstrapStatus.host_id == Host.id)
- .order_by(BootstrapStatus.created_at.desc())
- )
- result = await session.execute(stmt)
-
- # Garder seulement le dernier statut par hôte
- seen_hosts = set()
- for bs, host_name in result:
- if host_name not in seen_hosts:
- self._cache[host_name] = {
- "bootstrap_ok": bs.status == "success",
- "bootstrap_date": bs.last_attempt.isoformat() if bs.last_attempt else bs.created_at.isoformat(),
- "details": bs.error_message
- }
- seen_hosts.add(host_name)
-
- print(f"📋 {len(self._cache)} statut(s) bootstrap chargé(s) depuis la BD")
- except Exception as e:
- print(f"Erreur chargement bootstrap status depuis BD: {e}")
-
-
-# ===== SERVICE HOST STATUS =====
-
-class HostStatusService:
- """Service simple pour stocker le statut runtime des hôtes en mémoire.
-
- Cette implémentation ne persiste plus dans un fichier JSON ; les données
- sont conservées uniquement pendant la vie du processus.
- """
-
- def __init__(self):
- # Dictionnaire: host_name -> {"status": str, "last_seen": Optional[datetime|str], "os": Optional[str]}
- self._hosts: Dict[str, Dict[str, Any]] = {}
-
- def set_status(self, host_name: str, status: str, last_seen: Optional[datetime], os_info: Optional[str]) -> Dict:
- """Met à jour le statut d'un hôte en mémoire."""
- entry = {
- "status": status,
- "last_seen": last_seen if isinstance(last_seen, datetime) else last_seen,
- "os": os_info,
- }
- self._hosts[host_name] = entry
- return entry
-
- def get_status(self, host_name: str) -> Dict:
- """Récupère le statut d'un hôte, avec valeurs par défaut si absent."""
- return self._hosts.get(host_name, {"status": "online", "last_seen": None, "os": None})
-
- def get_all_status(self) -> Dict[str, Dict]:
- """Retourne une copie de tous les statuts connus."""
- return dict(self._hosts)
-
- def remove_host(self, host_name: str) -> bool:
- """Supprime le statut d'un hôte de la mémoire."""
- if host_name in self._hosts:
- del self._hosts[host_name]
- return True
- return False
-
-
-# ===== SERVICE PLANIFICATEUR (SCHEDULER) - VERSION BD =====
-
-# Import du modèle SQLAlchemy Schedule (distinct du Pydantic Schedule)
-from models.schedule import Schedule as ScheduleModel
-from models.schedule_run import ScheduleRun as ScheduleRunModel
-
-
-class SchedulerService:
- """Service pour gérer les schedules de playbooks avec APScheduler.
-
- Cette version utilise uniquement la base de données SQLite (via SQLAlchemy async)
- pour stocker les cédules et leur historique d'exécution.
- """
-
- def __init__(self):
- # Configurer APScheduler
- jobstores = {'default': MemoryJobStore()}
- executors = {'default': AsyncIOExecutor()}
- job_defaults = {'coalesce': True, 'max_instances': 1, 'misfire_grace_time': 300}
-
- self.scheduler = AsyncIOScheduler(
- jobstores=jobstores,
- executors=executors,
- job_defaults=job_defaults,
- timezone=pytz.UTC
- )
- self._started = False
- # Cache en mémoire des schedules (Pydantic) pour éviter les requêtes BD répétées
- self._schedules_cache: Dict[str, Schedule] = {}
-
- async def start_async(self):
- """Démarre le scheduler et charge tous les schedules actifs depuis la BD"""
- if not self._started:
- self.scheduler.start()
- self._started = True
- # Charger les schedules actifs depuis la BD
- await self._load_active_schedules_from_db()
- print("📅 Scheduler démarré avec succès (BD)")
-
- def start(self):
- """Démarre le scheduler (version synchrone pour compatibilité)"""
- if not self._started:
- self.scheduler.start()
- self._started = True
- print("📅 Scheduler démarré (chargement BD différé)")
-
- def shutdown(self):
- """Arrête le scheduler proprement"""
- if self._started:
- self.scheduler.shutdown(wait=False)
- self._started = False
-
- async def _load_active_schedules_from_db(self):
- """Charge tous les schedules actifs depuis la BD dans APScheduler"""
- try:
- async with async_session_maker() as session:
- repo = ScheduleRepository(session)
- db_schedules = await repo.list(limit=1000)
-
- for db_sched in db_schedules:
- if db_sched.enabled:
- try:
- # Convertir le modèle SQLAlchemy en Pydantic Schedule
- pydantic_sched = self._db_to_pydantic(db_sched)
- self._schedules_cache[pydantic_sched.id] = pydantic_sched
- self._add_job_for_schedule(pydantic_sched)
- except Exception as e:
- print(f"Erreur chargement schedule {db_sched.id}: {e}")
-
- print(f"📅 {len(self._schedules_cache)} schedule(s) chargé(s) depuis la BD")
- except Exception as e:
- print(f"Erreur chargement schedules depuis BD: {e}")
-
- def _db_to_pydantic(self, db_sched: ScheduleModel) -> Schedule:
- """Convertit un modèle SQLAlchemy Schedule en Pydantic Schedule"""
- # Reconstruire l'objet recurrence depuis les colonnes BD
- recurrence = None
- if db_sched.recurrence_type:
- recurrence = ScheduleRecurrence(
- type=db_sched.recurrence_type,
- time=db_sched.recurrence_time or "02:00",
- days=json.loads(db_sched.recurrence_days) if db_sched.recurrence_days else None,
- cron_expression=db_sched.cron_expression,
- )
-
- return Schedule(
- id=db_sched.id,
- name=db_sched.name,
- description=db_sched.description,
- playbook=db_sched.playbook,
- target_type=db_sched.target_type or "group",
- target=db_sched.target,
- extra_vars=db_sched.extra_vars,
- schedule_type=db_sched.schedule_type,
- recurrence=recurrence,
- timezone=db_sched.timezone or "America/Montreal",
- start_at=db_sched.start_at,
- end_at=db_sched.end_at,
- next_run_at=db_sched.next_run,
- last_run_at=db_sched.last_run,
- last_status=db_sched.last_status or "never",
- enabled=db_sched.enabled,
- retry_on_failure=db_sched.retry_on_failure or 0,
- timeout=db_sched.timeout or 3600,
- notification_type=db_sched.notification_type or "all",
- tags=json.loads(db_sched.tags) if db_sched.tags else [],
- run_count=db_sched.run_count or 0,
- success_count=db_sched.success_count or 0,
- failure_count=db_sched.failure_count or 0,
- created_at=db_sched.created_at,
- updated_at=db_sched.updated_at,
- )
-
- def _build_cron_trigger(self, schedule: Schedule) -> Optional[CronTrigger]:
- """Construit un trigger cron à partir de la configuration du schedule"""
- if schedule.schedule_type == "once":
- return None
-
- recurrence = schedule.recurrence
- if not recurrence:
- return None
-
- tz = pytz.timezone(schedule.timezone)
- hour, minute = recurrence.time.split(':') if recurrence.time else ("2", "0")
-
- try:
- if recurrence.type == "daily":
- return CronTrigger(hour=int(hour), minute=int(minute), timezone=tz)
-
- elif recurrence.type == "weekly":
- # Convertir jours (1-7 lundi=1) en format cron (0-6 lundi=0)
- days = recurrence.days or [1]
- day_of_week = ','.join(str(d - 1) for d in days)
- return CronTrigger(day_of_week=day_of_week, hour=int(hour), minute=int(minute), timezone=tz)
-
- elif recurrence.type == "monthly":
- day = recurrence.day_of_month or 1
- return CronTrigger(day=day, hour=int(hour), minute=int(minute), timezone=tz)
-
- elif recurrence.type == "custom" and recurrence.cron_expression:
- # Parser l'expression cron
- parts = recurrence.cron_expression.split()
- if len(parts) == 5:
- return CronTrigger.from_crontab(recurrence.cron_expression, timezone=tz)
- else:
- # Expression cron étendue (6 champs avec secondes)
- return CronTrigger(
- second=parts[0] if len(parts) > 5 else '0',
- minute=parts[0] if len(parts) == 5 else parts[1],
- hour=parts[1] if len(parts) == 5 else parts[2],
- day=parts[2] if len(parts) == 5 else parts[3],
- month=parts[3] if len(parts) == 5 else parts[4],
- day_of_week=parts[4] if len(parts) == 5 else parts[5],
- timezone=tz
- )
- except Exception as e:
- print(f"Erreur construction trigger cron: {e}")
- return None
-
- return None
-
- def _add_job_for_schedule(self, schedule: Schedule):
- """Ajoute un job APScheduler pour un schedule"""
- job_id = f"schedule_{schedule.id}"
-
- # Supprimer le job existant s'il existe
- try:
- self.scheduler.remove_job(job_id)
- except:
- pass
-
- if schedule.schedule_type == "once":
- # Exécution unique
- if schedule.start_at and schedule.start_at > datetime.now(timezone.utc):
- trigger = DateTrigger(run_date=schedule.start_at, timezone=pytz.UTC)
- self.scheduler.add_job(
- self._execute_schedule,
- trigger,
- id=job_id,
- args=[schedule.id],
- replace_existing=True
- )
- else:
- # Exécution récurrente
- trigger = self._build_cron_trigger(schedule)
- if trigger:
- self.scheduler.add_job(
- self._execute_schedule,
- trigger,
- id=job_id,
- args=[schedule.id],
- replace_existing=True
- )
-
- # Calculer et mettre à jour next_run_at
- self._update_next_run(schedule.id)
-
- def _update_next_run(self, schedule_id: str):
- """Met à jour le champ next_run dans le cache et planifie la mise à jour BD"""
- job_id = f"schedule_{schedule_id}"
- try:
- job = self.scheduler.get_job(job_id)
- if job and job.next_run_time:
- # Mettre à jour le cache
- if schedule_id in self._schedules_cache:
- self._schedules_cache[schedule_id].next_run_at = job.next_run_time
- # Mettre à jour la BD de manière asynchrone
- asyncio.create_task(self._update_next_run_in_db(schedule_id, job.next_run_time))
- except:
- pass
-
- async def _update_next_run_in_db(self, schedule_id: str, next_run: datetime):
- """Met à jour next_run dans la BD"""
- try:
- async with async_session_maker() as session:
- repo = ScheduleRepository(session)
- db_sched = await repo.get(schedule_id)
- if db_sched:
- await repo.update(db_sched, next_run=next_run)
- await session.commit()
- except Exception as e:
- print(f"Erreur mise à jour next_run BD: {e}")
-
- async def _update_schedule_in_db(self, schedule: Schedule):
- """Met à jour un schedule dans la BD"""
- try:
- async with async_session_maker() as session:
- repo = ScheduleRepository(session)
- db_sched = await repo.get(schedule.id)
- if db_sched:
- await repo.update(
- db_sched,
- enabled=schedule.enabled,
- last_run=schedule.last_run_at,
- last_status=schedule.last_status,
- run_count=schedule.run_count,
- success_count=schedule.success_count,
- failure_count=schedule.failure_count,
- )
- await session.commit()
- except Exception as e:
- print(f"Erreur mise à jour schedule BD: {e}")
-
- async def _execute_schedule(self, schedule_id: str):
- """Exécute un schedule (appelé par APScheduler)"""
- # Import circulaire évité en utilisant les variables globales
- global ws_manager, ansible_service, db, task_log_service
-
- # Récupérer le schedule depuis le cache ou la BD
- schedule = self._schedules_cache.get(schedule_id)
- if not schedule:
- # Charger depuis la BD
- try:
- async with async_session_maker() as session:
- repo = ScheduleRepository(session)
- db_sched = await repo.get(schedule_id)
- if db_sched:
- schedule = self._db_to_pydantic(db_sched)
- self._schedules_cache[schedule_id] = schedule
- except Exception as e:
- print(f"Erreur chargement schedule {schedule_id}: {e}")
-
- if not schedule:
- print(f"Schedule {schedule_id} non trouvé")
- return
-
- # Vérifier si le schedule est encore actif
- if not schedule.enabled:
- return
-
- # Vérifier la fenêtre temporelle
- now = datetime.now(timezone.utc)
- if schedule.end_at and now > schedule.end_at:
- # Schedule expiré, le désactiver
- schedule.enabled = False
- self._schedules_cache[schedule_id] = schedule
- await self._update_schedule_in_db(schedule)
- return
-
- # Créer un ScheduleRun Pydantic pour les notifications
- run = ScheduleRun(schedule_id=schedule_id)
-
- # Mettre à jour le schedule
- schedule.last_run_at = now
- schedule.last_status = "running"
- schedule.run_count += 1
- self._schedules_cache[schedule_id] = schedule
-
- # Notifier via WebSocket
- try:
- await ws_manager.broadcast({
- "type": "schedule_run_started",
- "data": {
- "schedule_id": schedule_id,
- "schedule_name": schedule.name,
- "run": run.dict(),
- "status": "running"
- }
- })
- except:
- pass
-
- # Créer une tâche
- task_id = str(db.get_next_id("tasks"))
- playbook_name = schedule.playbook.replace('.yml', '').replace('-', ' ').title()
- task = Task(
- id=task_id,
- name=f"[Planifié] {playbook_name}",
- host=schedule.target,
- status="running",
- progress=0,
- start_time=now
- )
- db.tasks.insert(0, task)
-
- # Mettre à jour le run avec le task_id
- run.task_id = task_id
-
- # Notifier la création de tâche
- try:
- await ws_manager.broadcast({
- "type": "task_created",
- "data": task.dict()
- })
- except:
- pass
-
- # Exécuter le playbook
- start_time = perf_counter()
- try:
- result = await ansible_service.execute_playbook(
- playbook=schedule.playbook,
- target=schedule.target,
- extra_vars=schedule.extra_vars,
- check_mode=False,
- verbose=True
- )
-
- execution_time = perf_counter() - start_time
- success = result.get("success", False)
-
- # Mettre à jour la tâche
- task.status = "completed" if success else "failed"
- task.progress = 100
- task.end_time = datetime.now(timezone.utc)
- task.duration = f"{execution_time:.1f}s"
- task.output = result.get("stdout", "")
- task.error = result.get("stderr", "") if not success else None
-
- # Mettre à jour le run
- run.status = "success" if success else "failed"
- run.finished_at = datetime.now(timezone.utc)
- run.duration_seconds = execution_time
- run.error_message = result.get("stderr", "") if not success else None
-
- # Compter les hôtes impactés
- stdout = result.get("stdout", "")
- host_count = len(re.findall(r'^[a-zA-Z0-9][a-zA-Z0-9._-]+\s*:\s*ok=', stdout, re.MULTILINE))
- run.hosts_impacted = host_count
-
- # Mettre à jour le schedule
- schedule.last_status = "success" if success else "failed"
- if success:
- schedule.success_count += 1
- else:
- schedule.failure_count += 1
-
- # Sauvegarder le schedule dans le cache et la BD
- self._schedules_cache[schedule_id] = schedule
- await self._update_schedule_in_db(schedule)
-
- # Sauvegarder le log markdown (tâche planifiée)
- try:
- task_log_service.save_task_log(
- task=task,
- output=result.get("stdout", ""),
- error=result.get("stderr", ""),
- source_type='scheduled'
- )
- except:
- pass
-
- # Notifier
- await ws_manager.broadcast({
- "type": "schedule_run_finished",
- "data": {
- "schedule_id": schedule_id,
- "schedule_name": schedule.name,
- "run": run.dict(),
- "status": run.status,
- "success": success
- }
- })
-
- await ws_manager.broadcast({
- "type": "task_completed",
- "data": {
- "id": task_id,
- "status": task.status,
- "progress": 100,
- "duration": task.duration,
- "success": success
- }
- })
-
- # Log
- log_entry = LogEntry(
- id=db.get_next_id("logs"),
- timestamp=datetime.now(timezone.utc),
- level="INFO" if success else "ERROR",
- message=f"Schedule '{schedule.name}' exécuté: {'succès' if success else 'échec'}",
- source="scheduler",
- host=schedule.target
- )
- db.logs.insert(0, log_entry)
-
- # Notification NTFY selon le type configuré
- await self._send_schedule_notification(schedule, success, run.error_message)
-
- # Enregistrer l'exécution dans la base de données (schedule_runs)
- try:
- async with async_session_maker() as db_session:
- run_repo = ScheduleRunRepository(db_session)
- await run_repo.create(
- schedule_id=schedule_id,
- task_id=task_id,
- status=run.status,
- started_at=run.started_at,
- completed_at=run.finished_at,
- duration=run.duration_seconds,
- error_message=run.error_message,
- output=result.get("stdout", "") if success else result.get("stderr", ""),
- )
- await db_session.commit()
- except Exception:
- # Ne jamais casser l'exécution du scheduler à cause de la persistance BD
- pass
-
- except Exception as e:
- # Échec de l'exécution
- execution_time = perf_counter() - start_time
-
- task.status = "failed"
- task.end_time = datetime.now(timezone.utc)
- task.error = str(e)
-
- run.status = "failed"
- run.finished_at = datetime.now(timezone.utc)
- run.duration_seconds = execution_time
- run.error_message = str(e)
-
- schedule.last_status = "failed"
- schedule.failure_count += 1
-
- # Sauvegarder le schedule dans le cache et la BD
- self._schedules_cache[schedule_id] = schedule
- await self._update_schedule_in_db(schedule)
-
- # Enregistrer l'échec dans la BD (schedule_runs)
- try:
- async with async_session_maker() as db_session:
- run_repo = ScheduleRunRepository(db_session)
- await run_repo.create(
- schedule_id=schedule_id,
- task_id=task_id,
- status=run.status,
- started_at=run.started_at,
- completed_at=run.finished_at,
- duration=run.duration_seconds,
- error_message=run.error_message,
- output=str(e),
- )
- await db_session.commit()
- except Exception:
- pass
-
- try:
- task_log_service.save_task_log(task=task, error=str(e), source_type='scheduled')
- except:
- pass
-
- try:
- await ws_manager.broadcast({
- "type": "schedule_run_finished",
- "data": {
- "schedule_id": schedule_id,
- "run": run.dict(),
- "status": "failed",
- "error": str(e)
- }
- })
-
- await ws_manager.broadcast({
- "type": "task_failed",
- "data": {"id": task_id, "status": "failed", "error": str(e)}
- })
- except:
- pass
-
- log_entry = LogEntry(
- id=db.get_next_id("logs"),
- timestamp=datetime.now(timezone.utc),
- level="ERROR",
- message=f"Erreur schedule '{schedule.name}': {str(e)}",
- source="scheduler",
- host=schedule.target
- )
- db.logs.insert(0, log_entry)
-
- # Notification NTFY pour l'échec
- await self._send_schedule_notification(schedule, False, str(e))
-
- # Mettre à jour next_run_at
- self._update_next_run(schedule_id)
-
- async def _send_schedule_notification(self, schedule: Schedule, success: bool, error_message: Optional[str] = None):
- """Envoie une notification NTFY selon le type configuré pour le schedule.
-
- Args:
- schedule: Le schedule exécuté
- success: True si l'exécution a réussi
- error_message: Message d'erreur en cas d'échec
- """
- # Vérifier le type de notification configuré
- notification_type = getattr(schedule, 'notification_type', 'all')
-
- # Ne pas notifier si "none"
- if notification_type == "none":
- return
-
- # Ne notifier que les erreurs si "errors"
- if notification_type == "errors" and success:
- return
-
- # Envoyer la notification
- try:
- if success:
- await notification_service.notify_schedule_executed(
- schedule_name=schedule.name,
- success=True,
- details=f"Cible: {schedule.target}"
- )
- else:
- await notification_service.notify_schedule_executed(
- schedule_name=schedule.name,
- success=False,
- details=error_message or "Erreur inconnue"
- )
- except Exception as notif_error:
- print(f"Erreur envoi notification schedule: {notif_error}")
-
- # ===== MÉTHODES PUBLIQUES CRUD (VERSION BD) =====
-
- def get_all_schedules(self,
- enabled: Optional[bool] = None,
- playbook: Optional[str] = None,
- tag: Optional[str] = None) -> List[Schedule]:
- """Récupère tous les schedules depuis le cache avec filtrage optionnel"""
- schedules = list(self._schedules_cache.values())
-
- # Filtres
- if enabled is not None:
- schedules = [s for s in schedules if s.enabled == enabled]
- if playbook:
- schedules = [s for s in schedules if playbook.lower() in s.playbook.lower()]
- if tag:
- schedules = [s for s in schedules if tag in s.tags]
-
- # Trier par prochaine exécution
- schedules.sort(key=lambda x: x.next_run_at or datetime.max.replace(tzinfo=timezone.utc))
- return schedules
-
- def get_schedule(self, schedule_id: str) -> Optional[Schedule]:
- """Récupère un schedule par ID depuis le cache"""
- return self._schedules_cache.get(schedule_id)
-
- def create_schedule(self, request: ScheduleCreateRequest) -> Schedule:
- """Crée un nouveau schedule (sauvegarde en BD via l'endpoint)"""
- schedule = Schedule(
- name=request.name,
- description=request.description,
- playbook=request.playbook,
- target_type=request.target_type,
- target=request.target,
- extra_vars=request.extra_vars,
- schedule_type=request.schedule_type,
- recurrence=request.recurrence,
- timezone=request.timezone,
- start_at=request.start_at,
- end_at=request.end_at,
- enabled=request.enabled,
- retry_on_failure=request.retry_on_failure,
- timeout=request.timeout,
- notification_type=request.notification_type,
- tags=request.tags
- )
-
- # Ajouter au cache
- self._schedules_cache[schedule.id] = schedule
-
- # Ajouter le job si actif
- if schedule.enabled and self._started:
- self._add_job_for_schedule(schedule)
-
- return schedule
-
- def update_schedule(self, schedule_id: str, request: ScheduleUpdateRequest) -> Optional[Schedule]:
- """Met à jour un schedule existant"""
- schedule = self.get_schedule(schedule_id)
- if not schedule:
- return None
-
- # Appliquer les modifications
- update_data = request.dict(exclude_unset=True, exclude_none=True)
- for key, value in update_data.items():
- # La récurrence arrive du frontend comme un dict, il faut la retransformer
- # en objet ScheduleRecurrence pour que _build_cron_trigger fonctionne.
- if key == "recurrence" and isinstance(value, dict):
- try:
- value = ScheduleRecurrence(**value)
- except Exception:
- pass
-
- if hasattr(schedule, key):
- setattr(schedule, key, value)
-
- schedule.updated_at = datetime.now(timezone.utc)
-
- # Mettre à jour le cache
- self._schedules_cache[schedule_id] = schedule
-
- # Mettre à jour le job
- if self._started:
- job_id = f"schedule_{schedule_id}"
- try:
- self.scheduler.remove_job(job_id)
- except:
- pass
-
- if schedule.enabled:
- self._add_job_for_schedule(schedule)
-
- return schedule
-
- def delete_schedule(self, schedule_id: str) -> bool:
- """Supprime un schedule du cache et du scheduler"""
- if schedule_id in self._schedules_cache:
- del self._schedules_cache[schedule_id]
-
- # Supprimer le job
- job_id = f"schedule_{schedule_id}"
- try:
- self.scheduler.remove_job(job_id)
- except:
- pass
-
- return True
-
- def pause_schedule(self, schedule_id: str) -> Optional[Schedule]:
- """Met en pause un schedule"""
- schedule = self.get_schedule(schedule_id)
- if not schedule:
- return None
-
- schedule.enabled = False
- self._schedules_cache[schedule_id] = schedule
-
- # Supprimer le job
- job_id = f"schedule_{schedule_id}"
- try:
- self.scheduler.remove_job(job_id)
- except:
- pass
-
- return schedule
-
- def resume_schedule(self, schedule_id: str) -> Optional[Schedule]:
- """Reprend un schedule en pause"""
- schedule = self.get_schedule(schedule_id)
- if not schedule:
- return None
-
- schedule.enabled = True
- self._schedules_cache[schedule_id] = schedule
-
- # Ajouter le job
- if self._started:
- self._add_job_for_schedule(schedule)
-
- return schedule
-
- async def run_now(self, schedule_id: str) -> Optional[ScheduleRun]:
- """Exécute immédiatement un schedule"""
- schedule = self.get_schedule(schedule_id)
- if not schedule:
- return None
-
- # Exécuter de manière asynchrone
- await self._execute_schedule(schedule_id)
-
- # Retourner un ScheduleRun vide (le vrai est en BD)
- return ScheduleRun(schedule_id=schedule_id, status="running")
-
- def get_stats(self) -> ScheduleStats:
- """Calcule les statistiques globales des schedules depuis le cache"""
- schedules = self.get_all_schedules()
-
- now = datetime.now(timezone.utc)
-
- stats = ScheduleStats()
- stats.total = len(schedules)
- stats.active = len([s for s in schedules if s.enabled])
- stats.paused = len([s for s in schedules if not s.enabled])
-
- # Schedules expirés
- stats.expired = len([s for s in schedules if s.end_at and s.end_at < now])
-
- # Prochaine exécution
- active_schedules = [s for s in schedules if s.enabled and s.next_run_at]
- if active_schedules:
- next_schedule = min(active_schedules, key=lambda x: x.next_run_at)
- stats.next_execution = next_schedule.next_run_at
- stats.next_schedule_name = next_schedule.name
-
- # Stats basées sur les compteurs des schedules (pas besoin de lire les runs)
- total_runs = sum(s.run_count for s in schedules)
- total_success = sum(s.success_count for s in schedules)
- total_failures = sum(s.failure_count for s in schedules)
-
- # Approximation des stats 24h basée sur les compteurs
- stats.executions_24h = total_runs # Approximation
- stats.failures_24h = total_failures # Approximation
-
- if total_runs > 0:
- stats.success_rate_7d = round((total_success / total_runs) * 100, 1)
-
- return stats
-
- def get_upcoming_executions(self, limit: int = 5) -> List[Dict]:
- """Retourne les prochaines exécutions planifiées"""
- schedules = self.get_all_schedules(enabled=True)
- upcoming = []
-
- for s in schedules:
- if s.next_run_at:
- upcoming.append({
- "schedule_id": s.id,
- "schedule_name": s.name,
- "playbook": s.playbook,
- "target": s.target,
- "next_run_at": s.next_run_at.isoformat() if s.next_run_at else None,
- "tags": s.tags
- })
-
- upcoming.sort(key=lambda x: x['next_run_at'] or '')
- return upcoming[:limit]
-
- def validate_cron_expression(self, expression: str) -> Dict:
- """Valide une expression cron et retourne les prochaines exécutions"""
- try:
- cron = croniter(expression, datetime.now())
- next_runs = [cron.get_next(datetime).isoformat() for _ in range(5)]
- return {
- "valid": True,
- "next_runs": next_runs,
- "expression": expression
- }
- except Exception as e:
- return {
- "valid": False,
- "error": str(e),
- "expression": expression
- }
-
- def add_schedule_to_cache(self, schedule: Schedule):
- """Ajoute un schedule au cache (appelé après création en BD)"""
- self._schedules_cache[schedule.id] = schedule
- if schedule.enabled and self._started:
- self._add_job_for_schedule(schedule)
-
- def remove_schedule_from_cache(self, schedule_id: str):
- """Supprime un schedule du cache"""
- if schedule_id in self._schedules_cache:
- del self._schedules_cache[schedule_id]
-
-
-# Instances globales des services
-task_log_service = TaskLogService(DIR_LOGS_TASKS)
-adhoc_history_service = AdHocHistoryService() # Stockage en BD via la table logs
-bootstrap_status_service = BootstrapStatusService() # Plus de fichier JSON, utilise la BD
-host_status_service = HostStatusService() # Ne persiste plus dans .host_status.json
-scheduler_service = SchedulerService() # Plus de fichiers JSON
-
-
-class WebSocketManager:
- def __init__(self):
- self.active_connections: List[WebSocket] = []
- self.lock = Lock()
-
- async def connect(self, websocket: WebSocket):
- await websocket.accept()
- with self.lock:
- self.active_connections.append(websocket)
-
- def disconnect(self, websocket: WebSocket):
- with self.lock:
- if websocket in self.active_connections:
- self.active_connections.remove(websocket)
-
- async def broadcast(self, message: dict):
- with self.lock:
- disconnected = []
- for connection in self.active_connections:
- try:
- await connection.send_json(message)
- except:
- disconnected.append(connection)
-
- # Nettoyer les connexions déconnectées
- for conn in disconnected:
- self.active_connections.remove(conn)
-
-# Instance globale du gestionnaire WebSocket
-ws_manager = WebSocketManager()
-
-# Dictionnaire pour stocker les tâches asyncio et processus en cours (pour annulation)
-# Format: {task_id: {"asyncio_task": Task, "process": Process, "cancelled": bool}}
-running_task_handles: Dict[str, Dict] = {}
-
-
-# Service Ansible
-class AnsibleService:
- """Service pour exécuter les playbooks Ansible"""
-
- def __init__(self, ansible_dir: Path):
- self.ansible_dir = ansible_dir
- self.playbooks_dir = ansible_dir / "playbooks"
- self.inventory_path = ansible_dir / "inventory" / "hosts.yml"
- self._inventory_cache: Optional[Dict] = None
-
- def get_playbooks(self) -> List[Dict[str, Any]]:
- """Liste les playbooks disponibles avec leurs métadonnées (category/subcategory/hosts).
-
- Les métadonnées sont lues en priorité dans play['vars'] pour être compatibles
- avec la syntaxe Ansible (category/subcategory ne sont pas des clés de Play).
- Le champ 'hosts' est extrait pour permettre le filtrage par compatibilité.
- """
- playbooks = []
- if self.playbooks_dir.exists():
- for pb in self.playbooks_dir.glob("*.yml"):
- # Récupérer les infos du fichier
- stat = pb.stat()
- playbook_info = {
- "name": pb.stem,
- "filename": pb.name,
- "path": str(pb),
- "category": "general",
- "subcategory": "other",
- "hosts": "all", # Valeur par défaut
- "size": stat.st_size,
- "modified": datetime.fromtimestamp(stat.st_mtime, tz=timezone.utc).isoformat()
- }
- # Extract category/subcategory/hosts from playbook
- try:
- with open(pb, 'r', encoding='utf-8') as f:
- content = yaml.safe_load(f)
- if content and isinstance(content, list) and len(content) > 0:
- play = content[0]
- vars_ = play.get('vars', {}) or {}
-
- # Lecture de category avec fallback: play puis vars
- if 'category' in play:
- playbook_info['category'] = play['category']
- elif 'category' in vars_:
- playbook_info['category'] = vars_['category']
-
- # Lecture de subcategory avec fallback
- if 'subcategory' in play:
- playbook_info['subcategory'] = play['subcategory']
- elif 'subcategory' in vars_:
- playbook_info['subcategory'] = vars_['subcategory']
-
- # Lecture du champ 'hosts' (cible du playbook)
- if 'hosts' in play:
- playbook_info['hosts'] = play['hosts']
-
- if 'name' in play:
- playbook_info['description'] = play['name']
- except Exception:
- # On ignore les erreurs de parsing individuelles pour ne pas
- # casser l'ensemble de la liste de playbooks.
- pass
- playbooks.append(playbook_info)
- return playbooks
-
- def get_playbook_categories(self) -> Dict[str, List[str]]:
- """Retourne les catégories et sous-catégories des playbooks"""
- categories = {}
- for pb in self.get_playbooks():
- cat = pb.get('category', 'general')
- subcat = pb.get('subcategory', 'other')
- if cat not in categories:
- categories[cat] = []
- if subcat not in categories[cat]:
- categories[cat].append(subcat)
- return categories
-
- def is_target_compatible_with_playbook(self, target: str, playbook_hosts: str) -> bool:
- """Vérifie si une cible (host ou groupe) est compatible avec le champ 'hosts' d'un playbook.
-
- Args:
- target: Nom de l'hôte ou du groupe cible
- playbook_hosts: Valeur du champ 'hosts' du playbook
-
- Returns:
- True si la cible est compatible avec le playbook
-
- Exemples:
- - playbook_hosts='all' → compatible avec tout
- - playbook_hosts='role_proxmox' → compatible avec le groupe role_proxmox et ses hôtes
- - playbook_hosts='server.home' → compatible uniquement avec cet hôte
- """
- # 'all' accepte tout
- if playbook_hosts == 'all':
- return True
-
- # Si la cible correspond exactement au champ hosts
- if target == playbook_hosts:
- return True
-
- # Charger l'inventaire pour vérifier les appartenances
- inventory = self.load_inventory()
-
- # Si playbook_hosts est un groupe, vérifier si target est un hôte de ce groupe
- if self.group_exists(playbook_hosts):
- hosts_in_group = self.get_group_hosts(playbook_hosts)
- if target in hosts_in_group:
- return True
- # Vérifier aussi si target est un sous-groupe du groupe playbook_hosts
- if target in self.get_groups():
- # Vérifier si tous les hôtes du groupe target sont dans playbook_hosts
- target_hosts = set(self.get_group_hosts(target))
- playbook_group_hosts = set(hosts_in_group)
- if target_hosts and target_hosts.issubset(playbook_group_hosts):
- return True
-
- # Si playbook_hosts est un hôte et target est un groupe contenant cet hôte
- if target in self.get_groups():
- hosts_in_target = self.get_group_hosts(target)
- if playbook_hosts in hosts_in_target:
- return True
-
- return False
-
- def get_compatible_playbooks(self, target: str) -> List[Dict[str, Any]]:
- """Retourne la liste des playbooks compatibles avec une cible donnée.
-
- Args:
- target: Nom de l'hôte ou du groupe
-
- Returns:
- Liste des playbooks compatibles avec leurs métadonnées
- """
- all_playbooks = self.get_playbooks()
- compatible = []
-
- for pb in all_playbooks:
- playbook_hosts = pb.get('hosts', 'all')
- if self.is_target_compatible_with_playbook(target, playbook_hosts):
- compatible.append(pb)
-
- return compatible
-
- def load_inventory(self) -> Dict:
- """Charge l'inventaire Ansible depuis le fichier YAML"""
- if self._inventory_cache:
- return self._inventory_cache
-
- if not self.inventory_path.exists():
- return {}
-
- with open(self.inventory_path, 'r') as f:
- self._inventory_cache = yaml.safe_load(f)
- return self._inventory_cache
-
- def get_hosts_from_inventory(self, group_filter: str = None) -> List[AnsibleInventoryHost]:
- """Extrait la liste des hôtes de l'inventaire sans doublons.
-
- Args:
- group_filter: Si spécifié, filtre les hôtes par ce groupe
- """
- inventory = self.load_inventory()
- # Use dict to track unique hosts and accumulate their groups
- hosts_dict: Dict[str, AnsibleInventoryHost] = {}
-
- def extract_hosts(data: Dict, current_group: str = ""):
- if not isinstance(data, dict):
- return
-
- # Extraire les hôtes directs
- if 'hosts' in data:
- for host_name, host_data in data['hosts'].items():
- host_data = host_data or {}
-
- if host_name in hosts_dict:
- # Host already exists, add group to its groups list
- if current_group and current_group not in hosts_dict[host_name].groups:
- hosts_dict[host_name].groups.append(current_group)
- else:
- # New host
- hosts_dict[host_name] = AnsibleInventoryHost(
- name=host_name,
- ansible_host=host_data.get('ansible_host', host_name),
- group=current_group,
- groups=[current_group] if current_group else [],
- vars=host_data
- )
-
- # Parcourir les enfants (sous-groupes)
- if 'children' in data:
- for child_name, child_data in data['children'].items():
- extract_hosts(child_data, child_name)
-
- extract_hosts(inventory.get('all', {}))
-
- # Convert to list
- hosts = list(hosts_dict.values())
-
- # Apply group filter if specified
- if group_filter and group_filter != 'all':
- hosts = [h for h in hosts if group_filter in h.groups]
-
- return hosts
-
- def invalidate_cache(self):
- """Invalide le cache de l'inventaire pour forcer un rechargement"""
- self._inventory_cache = None
-
- def get_groups(self) -> List[str]:
- """Extrait la liste des groupes de l'inventaire"""
- inventory = self.load_inventory()
- groups = set()
-
- def extract_groups(data: Dict, parent: str = ""):
- if not isinstance(data, dict):
- return
- if 'children' in data:
- for child_name in data['children'].keys():
- groups.add(child_name)
- extract_groups(data['children'][child_name], child_name)
-
- extract_groups(inventory.get('all', {}))
- return sorted(list(groups))
-
- def get_env_groups(self) -> List[str]:
- """Retourne uniquement les groupes d'environnement (préfixés par env_)"""
- return [g for g in self.get_groups() if g.startswith('env_')]
-
- def get_role_groups(self) -> List[str]:
- """Retourne uniquement les groupes de rôles (préfixés par role_)"""
- return [g for g in self.get_groups() if g.startswith('role_')]
-
- def _save_inventory(self, inventory: Dict):
- """Sauvegarde l'inventaire dans le fichier YAML"""
- # Créer une sauvegarde avant modification
- backup_path = self.inventory_path.with_suffix('.yml.bak')
- if self.inventory_path.exists():
- import shutil
- shutil.copy2(self.inventory_path, backup_path)
-
- with open(self.inventory_path, 'w', encoding='utf-8') as f:
- yaml.dump(inventory, f, default_flow_style=False, allow_unicode=True, sort_keys=False)
-
- # Invalider le cache
- self.invalidate_cache()
-
- def add_host_to_inventory(self, hostname: str, env_group: str, role_groups: List[str], ansible_host: str = None) -> bool:
- """Ajoute un hôte à l'inventaire Ansible
-
- Args:
- hostname: Nom de l'hôte (ex: server.domain.home)
- env_group: Groupe d'environnement (ex: env_homelab)
- role_groups: Liste des groupes de rôles (ex: ['role_proxmox', 'role_sbc'])
- ansible_host: Adresse IP ou hostname pour ansible_host (optionnel)
-
- Returns:
- True si l'ajout a réussi
- """
- inventory = self.load_inventory()
-
- # S'assurer que la structure existe
- if 'all' not in inventory:
- inventory['all'] = {}
- if 'children' not in inventory['all']:
- inventory['all']['children'] = {}
-
- children = inventory['all']['children']
-
- # Ajouter au groupe d'environnement
- if env_group not in children:
- children[env_group] = {'hosts': {}}
- if 'hosts' not in children[env_group]:
- children[env_group]['hosts'] = {}
-
- # Définir les variables de l'hôte
- host_vars = None
- if ansible_host and ansible_host != hostname:
- host_vars = {'ansible_host': ansible_host}
-
- children[env_group]['hosts'][hostname] = host_vars
-
- # Ajouter aux groupes de rôles
- for role_group in role_groups:
- if role_group not in children:
- children[role_group] = {'hosts': {}}
- if 'hosts' not in children[role_group]:
- children[role_group]['hosts'] = {}
- children[role_group]['hosts'][hostname] = None
-
- self._save_inventory(inventory)
- return True
-
- def remove_host_from_inventory(self, hostname: str) -> bool:
- """Supprime un hôte de tous les groupes de l'inventaire
-
- Args:
- hostname: Nom de l'hôte à supprimer
-
- Returns:
- True si la suppression a réussi
- """
- inventory = self.load_inventory()
-
- if 'all' not in inventory or 'children' not in inventory['all']:
- return False
-
- children = inventory['all']['children']
- removed = False
-
- # Parcourir tous les groupes et supprimer l'hôte
- for group_name, group_data in children.items():
- if isinstance(group_data, dict) and 'hosts' in group_data:
- if hostname in group_data['hosts']:
- del group_data['hosts'][hostname]
- removed = True
-
- if removed:
- self._save_inventory(inventory)
-
- # Supprimer aussi les statuts persistés (bootstrap + health)
- bootstrap_status_service.remove_host(hostname)
- try:
- host_status_service.remove_host(hostname)
- except Exception:
- pass
-
- return removed
-
- def update_host_groups(self, hostname: str, env_group: str = None, role_groups: List[str] = None, ansible_host: str = None) -> bool:
- """Met à jour les groupes d'un hôte existant
-
- Args:
- hostname: Nom de l'hôte à modifier
- env_group: Nouveau groupe d'environnement (None = pas de changement)
- role_groups: Nouvelle liste de groupes de rôles (None = pas de changement)
- ansible_host: Nouvelle adresse ansible_host (None = pas de changement)
-
- Returns:
- True si la mise à jour a réussi
- """
- inventory = self.load_inventory()
-
- if 'all' not in inventory or 'children' not in inventory['all']:
- return False
-
- children = inventory['all']['children']
-
- # Trouver le groupe d'environnement actuel
- current_env_group = None
- current_role_groups = []
- current_ansible_host = None
-
- for group_name, group_data in children.items():
- if isinstance(group_data, dict) and 'hosts' in group_data:
- if hostname in group_data['hosts']:
- if group_name.startswith('env_'):
- current_env_group = group_name
- # Récupérer ansible_host si défini
- host_vars = group_data['hosts'][hostname]
- if isinstance(host_vars, dict) and 'ansible_host' in host_vars:
- current_ansible_host = host_vars['ansible_host']
- elif group_name.startswith('role_'):
- current_role_groups.append(group_name)
-
- if not current_env_group:
- return False # Hôte non trouvé
-
- # Appliquer les changements
- new_env_group = env_group if env_group else current_env_group
- new_role_groups = role_groups if role_groups is not None else current_role_groups
- new_ansible_host = ansible_host if ansible_host else current_ansible_host
-
- # Supprimer l'hôte de tous les groupes actuels
- for group_name, group_data in children.items():
- if isinstance(group_data, dict) and 'hosts' in group_data:
- if hostname in group_data['hosts']:
- del group_data['hosts'][hostname]
-
- # Ajouter au nouveau groupe d'environnement
- if new_env_group not in children:
- children[new_env_group] = {'hosts': {}}
- if 'hosts' not in children[new_env_group]:
- children[new_env_group]['hosts'] = {}
-
- host_vars = None
- if new_ansible_host and new_ansible_host != hostname:
- host_vars = {'ansible_host': new_ansible_host}
- children[new_env_group]['hosts'][hostname] = host_vars
-
- # Ajouter aux nouveaux groupes de rôles
- for role_group in new_role_groups:
- if role_group not in children:
- children[role_group] = {'hosts': {}}
- if 'hosts' not in children[role_group]:
- children[role_group]['hosts'] = {}
- children[role_group]['hosts'][hostname] = None
-
- self._save_inventory(inventory)
- return True
-
- def host_exists(self, hostname: str) -> bool:
- """Vérifie si un hôte existe dans l'inventaire"""
- hosts = self.get_hosts_from_inventory()
- return any(h.name == hostname for h in hosts)
-
- def group_exists(self, group_name: str) -> bool:
- """Vérifie si un groupe existe dans l'inventaire"""
- return group_name in self.get_groups()
-
- def add_group(self, group_name: str) -> bool:
- """Ajoute un nouveau groupe à l'inventaire
-
- Args:
- group_name: Nom du groupe (doit commencer par env_ ou role_)
-
- Returns:
- True si l'ajout a réussi
- """
- if self.group_exists(group_name):
- return False # Groupe existe déjà
-
- inventory = self.load_inventory()
-
- # S'assurer que la structure existe
- if 'all' not in inventory:
- inventory['all'] = {}
- if 'children' not in inventory['all']:
- inventory['all']['children'] = {}
-
- # Ajouter le groupe vide
- inventory['all']['children'][group_name] = {'hosts': {}}
-
- self._save_inventory(inventory)
- return True
-
- def rename_group(self, old_name: str, new_name: str) -> bool:
- """Renomme un groupe dans l'inventaire
-
- Args:
- old_name: Nom actuel du groupe
- new_name: Nouveau nom du groupe
-
- Returns:
- True si le renommage a réussi
- """
- if not self.group_exists(old_name):
- return False # Groupe source n'existe pas
-
- if self.group_exists(new_name):
- return False # Groupe cible existe déjà
-
- inventory = self.load_inventory()
- children = inventory.get('all', {}).get('children', {})
-
- if old_name not in children:
- return False
-
- # Copier les données du groupe vers le nouveau nom
- children[new_name] = children[old_name]
- del children[old_name]
-
- self._save_inventory(inventory)
- return True
-
- def delete_group(self, group_name: str, move_hosts_to: str = None) -> Dict[str, Any]:
- """Supprime un groupe de l'inventaire
-
- Args:
- group_name: Nom du groupe à supprimer
- move_hosts_to: Groupe vers lequel déplacer les hôtes (optionnel)
-
- Returns:
- Dict avec le résultat de l'opération
- """
- if not self.group_exists(group_name):
- return {"success": False, "error": "Groupe non trouvé"}
-
- inventory = self.load_inventory()
- children = inventory.get('all', {}).get('children', {})
-
- if group_name not in children:
- return {"success": False, "error": "Groupe non trouvé dans children"}
-
- group_data = children[group_name]
- hosts_in_group = list(group_data.get('hosts', {}).keys()) if group_data else []
-
- # Si des hôtes sont dans le groupe et qu'on veut les déplacer
- if hosts_in_group and move_hosts_to:
- if not self.group_exists(move_hosts_to) and move_hosts_to != group_name:
- # Créer le groupe cible s'il n'existe pas
- children[move_hosts_to] = {'hosts': {}}
-
- if move_hosts_to in children:
- if 'hosts' not in children[move_hosts_to]:
- children[move_hosts_to]['hosts'] = {}
-
- # Déplacer les hôtes
- for hostname in hosts_in_group:
- host_vars = group_data['hosts'].get(hostname)
- children[move_hosts_to]['hosts'][hostname] = host_vars
-
- # Supprimer le groupe
- del children[group_name]
-
- self._save_inventory(inventory)
- return {
- "success": True,
- "hosts_affected": hosts_in_group,
- "hosts_moved_to": move_hosts_to if hosts_in_group and move_hosts_to else None
- }
-
- def get_group_hosts(self, group_name: str) -> List[str]:
- """Retourne la liste des hôtes dans un groupe
-
- Args:
- group_name: Nom du groupe
-
- Returns:
- Liste des noms d'hôtes
- """
- inventory = self.load_inventory()
- children = inventory.get('all', {}).get('children', {})
-
- if group_name not in children:
- return []
-
- group_data = children[group_name]
- if not group_data or 'hosts' not in group_data:
- return []
-
- return list(group_data['hosts'].keys())
-
- async def execute_playbook(
- self,
- playbook: str,
- target: str = "all",
- extra_vars: Optional[Dict[str, Any]] = None,
- check_mode: bool = False,
- verbose: bool = False
- ) -> Dict[str, Any]:
- """Exécute un playbook Ansible"""
- # Résoudre le chemin du playbook
- # On accepte soit un nom avec extension, soit un nom sans extension (ex: "health-check")
- playbook_path = self.playbooks_dir / playbook
-
- # Si le fichier n'existe pas tel quel, essayer avec des extensions courantes
- if not playbook_path.exists():
- from pathlib import Path
-
- pb_name = Path(playbook).name # enlever d'éventuels chemins
- # Si aucune extension n'est fournie, tester .yml puis .yaml
- if not Path(pb_name).suffix:
- for ext in (".yml", ".yaml"):
- candidate = self.playbooks_dir / f"{pb_name}{ext}"
- if candidate.exists():
- playbook_path = candidate
- break
-
- if not playbook_path.exists():
- # À ce stade, on n'a trouvé aucun fichier correspondant
- raise FileNotFoundError(f"Playbook introuvable: {playbook}")
-
- # Construire la commande ansible-playbook
- cmd = [
- "ansible-playbook",
- str(playbook_path),
- "-i", str(self.inventory_path),
- "--limit", target
- ]
-
- if check_mode:
- cmd.append("--check")
-
- if verbose:
- cmd.append("-v")
-
- if extra_vars:
- cmd.extend(["--extra-vars", json.dumps(extra_vars)])
-
- private_key = find_ssh_private_key()
- if private_key:
- cmd.extend(["--private-key", private_key])
-
- if SSH_USER:
- cmd.extend(["-u", SSH_USER])
-
- start_time = perf_counter()
-
- try:
- # Exécuter la commande
- process = await asyncio.create_subprocess_exec(
- *cmd,
- stdout=asyncio.subprocess.PIPE,
- stderr=asyncio.subprocess.PIPE,
- cwd=str(self.ansible_dir)
- )
-
- stdout, stderr = await process.communicate()
- execution_time = perf_counter() - start_time
-
- return {
- "success": process.returncode == 0,
- "return_code": process.returncode,
- "stdout": stdout.decode('utf-8', errors='replace'),
- "stderr": stderr.decode('utf-8', errors='replace'),
- "execution_time": round(execution_time, 2),
- "command": " ".join(cmd)
- }
- except FileNotFoundError:
- return {
- "success": False,
- "return_code": -1,
- "stdout": "",
- "stderr": "ansible-playbook non trouvé. Vérifiez que Ansible est installé.",
- "execution_time": 0,
- "command": " ".join(cmd)
- }
- except Exception as e:
- return {
- "success": False,
- "return_code": -1,
- "stdout": "",
- "stderr": str(e),
- "execution_time": perf_counter() - start_time,
- "command": " ".join(cmd)
- }
-
-
-# Instance globale du service Ansible
-ansible_service = AnsibleService(ANSIBLE_DIR)
-
-
-# ===== SERVICE BOOTSTRAP SSH =====
-
-class BootstrapRequest(BaseModel):
- """Requête de bootstrap pour un hôte"""
- host: str = Field(..., description="Adresse IP ou hostname de l'hôte")
- root_password: str = Field(..., description="Mot de passe root pour la connexion initiale")
- automation_user: str = Field(default="automation", description="Nom de l'utilisateur d'automatisation à créer")
-
-
-class CommandResult(BaseModel):
- """Résultat d'une commande SSH"""
- status: str
- return_code: int
- stdout: str
- stderr: Optional[str] = None
-
-
-def find_ssh_private_key() -> Optional[str]:
- """Trouve une clé privée SSH disponible en inspectant plusieurs répertoires."""
- candidate_dirs = []
- env_path = Path(SSH_KEY_PATH)
- candidate_dirs.append(env_path.parent)
- candidate_dirs.append(Path("/app/ssh_keys"))
- candidate_dirs.append(Path.home() / ".ssh")
-
- seen = set()
- key_paths: List[str] = []
-
- for directory in candidate_dirs:
- if not directory or not directory.exists():
- continue
- for name in [
- env_path.name,
- "id_automation_ansible",
- "id_rsa",
- "id_ed25519",
- "id_ecdsa",
- ]:
- path = directory / name
- if str(path) not in seen:
- seen.add(str(path))
- key_paths.append(str(path))
- # Ajouter dynamiquement toutes les clés sans extension .pub
- for file in directory.iterdir():
- if file.is_file() and not file.suffix and not file.name.startswith("known_hosts"):
- if str(file) not in seen:
- seen.add(str(file))
- key_paths.append(str(file))
-
- for key_path in key_paths:
- if key_path and Path(key_path).exists():
- return key_path
-
- return None
-
-
-def run_ssh_command(
- host: str,
- command: str,
- ssh_user: str = "root",
- ssh_password: Optional[str] = None,
- timeout: int = 60
-) -> tuple:
- """Exécute une commande SSH sur un hôte distant.
-
- Returns:
- tuple: (return_code, stdout, stderr)
- """
- ssh_cmd = ["ssh"]
-
- # Options SSH communes
- ssh_opts = [
- "-o", "StrictHostKeyChecking=no",
- "-o", "UserKnownHostsFile=/dev/null",
- "-o", "ConnectTimeout=10",
- "-o", "BatchMode=no" if ssh_password else "BatchMode=yes",
- ]
-
- # Si pas de mot de passe, utiliser la clé SSH
- if not ssh_password:
- private_key = find_ssh_private_key()
- if private_key:
- ssh_opts.extend(["-i", private_key])
-
- ssh_cmd.extend(ssh_opts)
- ssh_cmd.append(f"{ssh_user}@{host}")
- ssh_cmd.append(command)
-
- try:
- if ssh_password:
- # Utiliser sshpass pour l'authentification par mot de passe
- full_cmd = ["sshpass", "-p", ssh_password] + ssh_cmd
- else:
- full_cmd = ssh_cmd
-
- result = subprocess.run(
- full_cmd,
- capture_output=True,
- text=True,
- timeout=timeout
- )
- return result.returncode, result.stdout, result.stderr
- except subprocess.TimeoutExpired:
- return -1, "", f"Timeout après {timeout} secondes"
- except FileNotFoundError as e:
- if "sshpass" in str(e):
- return -1, "", "sshpass n'est pas installé. Installez-le avec: apt install sshpass"
- return -1, "", str(e)
- except Exception as e:
- return -1, "", str(e)
-
-
-def bootstrap_host(host: str, root_password: str, automation_user: str = "automation") -> CommandResult:
- """Prépare un hôte pour Ansible (création user, clé SSH, sudo, python3) pour Debian/Alpine/FreeBSD.
-
- Utilise un script shell complet uploadé via heredoc pour éviter les problèmes de quoting.
- """
- import logging
- logger = logging.getLogger("bootstrap")
-
- # Chercher la clé publique dans plusieurs emplacements possibles
- primary_dirs = [
- Path(SSH_KEY_PATH).parent,
- Path("/app/ssh_keys"),
- Path.home() / ".ssh",
- ]
- ssh_dir = primary_dirs[0]
- pub_paths = [
- SSH_KEY_PATH + ".pub",
- "/app/ssh_keys/id_rsa.pub",
- "/app/ssh_keys/id_ed25519.pub",
- "/app/ssh_keys/id_ecdsa.pub",
- "/app/ssh_keys/id_automation_ansible.pub",
- ]
-
- # Ajouter dynamiquement toutes les clés .pub trouvées dans le répertoire SSH
- for directory in primary_dirs:
- if not directory.exists():
- continue
- for f in directory.iterdir():
- if f.is_file() and f.suffix == ".pub" and str(f) not in pub_paths:
- pub_paths.append(str(f))
-
- logger.info(f"SSH_KEY_PATH = {SSH_KEY_PATH}")
- logger.info(f"Recherche de clé publique dans: {pub_paths}")
-
- pub_key = None
- pub_path_used = None
-
- for pub_path in pub_paths:
- try:
- if Path(pub_path).exists():
- pub_key = Path(pub_path).read_text(encoding="utf-8").strip()
- if pub_key:
- pub_path_used = pub_path
- logger.info(f"Clé publique trouvée: {pub_path}")
- break
- except Exception as e:
- logger.warning(f"Erreur lecture {pub_path}: {e}")
- continue
-
- if not pub_key:
- # Lister les fichiers disponibles pour le debug
- ssh_dir = Path(SSH_KEY_PATH).parent
- available_files = []
- if ssh_dir.exists():
- available_files = [f.name for f in ssh_dir.iterdir()]
-
- raise HTTPException(
- status_code=500,
- detail=f"Clé publique SSH non trouvée. Chemins testés: {pub_paths}. Fichiers disponibles dans {ssh_dir}: {available_files}",
- )
-
- # Script shell complet, robuste, avec logs détaillés
- bootstrap_script = f"""#!/bin/sh
-set -e
-
-AUT_USER="{automation_user}"
-
-echo "=== Bootstrap Ansible Host ==="
-echo "User: $AUT_USER"
-echo ""
-
-# 1) Détection OS
-if command -v apk >/dev/null 2>&1; then
- OS_TYPE="alpine"
- echo "[1/7] OS détecté: Alpine Linux"
-elif [ "$(uname -s 2>/dev/null)" = "FreeBSD" ] || \
- command -v pkg >/dev/null 2>&1 || \
- ( [ -f /etc/os-release ] && grep -qi 'ID=freebsd' /etc/os-release ); then
- OS_TYPE="freebsd"
- echo "[1/7] OS détecté: FreeBSD"
-else
- OS_TYPE="debian"
- echo "[1/7] OS détecté: Debian-like"
-fi
-
-# 2) Vérification / préparation utilisateur
-echo "[2/7] Vérification utilisateur/groupe..."
-if id "$AUT_USER" >/dev/null 2>&1; then
- echo " - Utilisateur déjà existant: $AUT_USER (aucune suppression)"
-else
- echo " - Utilisateur inexistant, il sera créé"
-fi
-
-# 3) Création utilisateur (idempotent)
-echo "[3/7] Création utilisateur $AUT_USER..."
-if id "$AUT_USER" >/dev/null 2>&1; then
- echo " - Utilisateur déjà présent, réutilisation"
-elif [ "$OS_TYPE" = "alpine" ]; then
- adduser -D "$AUT_USER"
- echo " - Utilisateur créé (Alpine: adduser -D)"
-elif [ "$OS_TYPE" = "freebsd" ]; then
- pw useradd "$AUT_USER" -m -s /bin/sh
- echo " - Utilisateur créé (FreeBSD: pw useradd)"
-else
- useradd -m -s /bin/bash "$AUT_USER" || useradd -m -s /bin/sh "$AUT_USER"
- echo " - Utilisateur créé (Debian: useradd -m)"
-fi
-
-# 3b) S'assurer que le compte n'est pas verrouillé
-echo " - Vérification du verrouillage du compte..."
-if command -v passwd >/dev/null 2>&1; then
- passwd -u "$AUT_USER" 2>/dev/null || true
-fi
-if command -v usermod >/dev/null 2>&1; then
- usermod -U "$AUT_USER" 2>/dev/null || true
-fi
-
-# 4) Configuration clé SSH
-echo "[4/7] Configuration clé SSH..."
-HOME_DIR=$(getent passwd "$AUT_USER" | cut -d: -f6)
-if [ -z "$HOME_DIR" ]; then
- HOME_DIR="/home/$AUT_USER"
-fi
-echo " - HOME_DIR: $HOME_DIR"
-
-mkdir -p "$HOME_DIR/.ssh"
-chown "$AUT_USER":"$AUT_USER" "$HOME_DIR/.ssh"
-chmod 700 "$HOME_DIR/.ssh"
-echo " - Répertoire .ssh créé et configuré"
-
-cat > "$HOME_DIR/.ssh/authorized_keys" << 'SSHKEY_EOF'
-{pub_key}
-SSHKEY_EOF
-
-chown "$AUT_USER":"$AUT_USER" "$HOME_DIR/.ssh/authorized_keys"
-chmod 600 "$HOME_DIR/.ssh/authorized_keys"
-echo " - Clé publique installée dans authorized_keys"
-
-if [ -s "$HOME_DIR/.ssh/authorized_keys" ]; then
- KEY_COUNT=$(wc -l < "$HOME_DIR/.ssh/authorized_keys")
- echo " - Vérification: $KEY_COUNT clé(s) dans authorized_keys"
-else
- echo " - ERREUR: authorized_keys vide ou absent!"
- exit 1
-fi
-
-# 5) Installation sudo
-echo "[5/7] Installation sudo..."
-if command -v sudo >/dev/null 2>&1; then
- echo " - sudo déjà installé"
-else
- if [ "$OS_TYPE" = "alpine" ]; then
- apk add --no-cache sudo
- echo " - sudo installé (apk)"
- elif [ "$OS_TYPE" = "freebsd" ]; then
- pkg install -y sudo
- echo " - sudo installé (pkg)"
- else
- apt-get update -qq && apt-get install -y sudo
- echo " - sudo installé (apt)"
- fi
-fi
-
-# 6) Configuration sudoers
-echo "[6/7] Configuration sudoers..."
-if [ ! -d /etc/sudoers.d ]; then
- mkdir -p /etc/sudoers.d
- chmod 750 /etc/sudoers.d 2>/dev/null || true
- echo " - Répertoire /etc/sudoers.d créé"
-fi
-echo "$AUT_USER ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/automation
-chmod 440 /etc/sudoers.d/automation
-echo " - Sudoers configuré: /etc/sudoers.d/automation"
-
-# 7) Installation Python3
-echo "[7/7] Installation Python3..."
-if command -v python3 >/dev/null 2>&1; then
- PYTHON_VERSION=$(python3 --version 2>&1)
- echo " - Python3 déjà installé: $PYTHON_VERSION"
-else
- if [ "$OS_TYPE" = "alpine" ]; then
- apk add --no-cache python3
- echo " - Python3 installé (apk)"
- elif [ "$OS_TYPE" = "freebsd" ]; then
- pkg install -y python3
- echo " - Python3 installé (pkg)"
- else
- apt-get update -qq && apt-get install -y python3
- echo " - Python3 installé (apt)"
- fi
-fi
-
-echo ""
-echo "=== Bootstrap terminé avec succès ==="
-echo "Utilisateur: $AUT_USER"
-echo "HOME: $HOME_DIR"
-echo "SSH: $HOME_DIR/.ssh/authorized_keys"
-echo "Sudo: /etc/sudoers.d/automation"
-"""
-
- # Envoyer le script de manière compatible avec tous les shells
- lines = bootstrap_script.splitlines()
-
- def _sh_single_quote(s: str) -> str:
- """Protège une chaîne pour un shell POSIX en simple quotes."""
- return "'" + s.replace("'", "'\"'\"'") + "'"
-
- quoted_lines = " ".join(_sh_single_quote(line) for line in lines)
- remote_cmd = f"printf '%s\\n' {quoted_lines} | sh"
-
- rc, out, err = run_ssh_command(
- host,
- remote_cmd,
- ssh_user="root",
- ssh_password=root_password,
- )
-
- if rc != 0:
- raise HTTPException(
- status_code=500,
- detail={
- "status": "error",
- "return_code": rc,
- "stdout": out,
- "stderr": err,
- },
- )
-
- # Vérification: tester la connexion SSH par clé avec l'utilisateur d'automatisation
- verify_rc, verify_out, verify_err = run_ssh_command(
- host,
- "echo 'ssh_key_ok'",
- ssh_user=automation_user,
- ssh_password=None,
- )
-
- if verify_rc != 0:
- combined_stdout = (out or "") + f"\n\n[SSH VERIFY] Échec de la connexion par clé pour {automation_user}@{host}\n" + (verify_out or "")
- combined_stderr = (err or "") + f"\n\n[SSH VERIFY] " + (verify_err or "Aucune erreur détaillée")
- raise HTTPException(
- status_code=500,
- detail={
- "status": "error",
- "return_code": verify_rc,
- "stdout": combined_stdout,
- "stderr": combined_stderr,
- },
- )
-
- # Succès complet
- final_stdout = (out or "") + f"\n\n[SSH VERIFY] Connexion par clé OK pour {automation_user}@{host}"
- return CommandResult(
- status="ok",
- return_code=0,
- stdout=final_stdout,
- stderr=err,
- )
-
-
-# Base de données hybride : hôtes depuis Ansible, tâches/logs en mémoire
-class HybridDB:
- """Base de données qui charge les hôtes depuis l'inventaire Ansible"""
-
- def __init__(self, ansible_svc: AnsibleService):
- self.ansible_service = ansible_svc
- self._hosts_cache: Optional[List[Host]] = None
- self._hosts_cache_time: float = 0
- self._cache_ttl: float = 60 # Cache de 60 secondes
- # Statuts runtime des hôtes (en mémoire) rechargés depuis le fichier JSON persistant
- self._host_runtime_status: Dict[str, Dict[str, Any]] = {}
- try:
- persisted_hosts = host_status_service.get_all_status()
- for host_name, info in persisted_hosts.items():
- last_seen_raw = info.get("last_seen")
- last_seen_dt: Optional[datetime] = None
- if isinstance(last_seen_raw, str):
- try:
- last_seen_dt = datetime.fromisoformat(last_seen_raw.replace("Z", "+00:00"))
- except Exception:
- last_seen_dt = None
- elif isinstance(last_seen_raw, datetime):
- last_seen_dt = last_seen_raw
-
- self._host_runtime_status[host_name] = {
- "status": info.get("status", "online"),
- "last_seen": last_seen_dt,
- "os": info.get("os"),
- }
- except Exception:
- # En cas de problème de lecture, on repartira d'un état en mémoire vierge
- self._host_runtime_status = {}
-
- # Tâches et logs en mémoire (persistés pendant l'exécution)
- self.tasks: List[Task] = []
-
- self.logs: List[LogEntry] = [
- LogEntry(id=1, timestamp=datetime.now(timezone.utc), level="INFO",
- message="Dashboard démarré - Inventaire Ansible chargé")
- ]
-
- self._id_counters = {"hosts": 100, "tasks": 1, "logs": 2}
-
- @property
- def hosts(self) -> List[Host]:
- """Charge les hôtes depuis l'inventaire Ansible avec cache"""
- current_time = time()
-
- # Retourner le cache si valide
- if self._hosts_cache and (current_time - self._hosts_cache_time) < self._cache_ttl:
- return self._hosts_cache
-
- # Recharger depuis Ansible
- self._hosts_cache = self._load_hosts_from_ansible()
- self._hosts_cache_time = current_time
- return self._hosts_cache
-
- def _load_hosts_from_ansible(self) -> List[Host]:
- """Convertit l'inventaire Ansible en liste d'hôtes (sans doublons)"""
- hosts = []
- ansible_hosts = self.ansible_service.get_hosts_from_inventory()
-
- # Charger tous les statuts de bootstrap
- all_bootstrap_status = bootstrap_status_service.get_all_status()
-
- for idx, ah in enumerate(ansible_hosts, start=1):
- # Extraire le groupe principal depuis les groupes
- primary_group = ah.groups[0] if ah.groups else "unknown"
-
- # Récupérer le statut bootstrap pour cet hôte
- bootstrap_info = all_bootstrap_status.get(ah.name, {})
- bootstrap_ok = bootstrap_info.get("bootstrap_ok", False)
- bootstrap_date_str = bootstrap_info.get("bootstrap_date")
- bootstrap_date = None
- if bootstrap_date_str:
- try:
- bootstrap_date = datetime.fromisoformat(bootstrap_date_str.replace("Z", "+00:00"))
- except:
- pass
-
- runtime_status = self._host_runtime_status.get(ah.name, {})
- status = runtime_status.get("status", "online")
- last_seen = runtime_status.get("last_seen")
- os_label = runtime_status.get("os", f"Linux ({primary_group})")
-
- host = Host(
- id=str(idx),
- name=ah.name,
- ip=ah.ansible_host,
- status=status,
- os=os_label,
- last_seen=last_seen,
- groups=ah.groups, # Tous les groupes de l'hôte
- bootstrap_ok=bootstrap_ok,
- bootstrap_date=bootstrap_date
- )
- hosts.append(host)
-
- return hosts
-
- def refresh_hosts(self):
- """Force le rechargement des hôtes depuis Ansible"""
- self._hosts_cache = None
- return self.hosts
-
- def update_host_status(self, host_name: str, status: str, os_info: str = None):
- """Met à jour le statut d'un hôte après un health-check"""
- for host in self.hosts:
- if host.name == host_name:
- host.status = status
- host.last_seen = datetime.now(timezone.utc)
- if os_info:
- host.os = os_info
- self._host_runtime_status[host_name] = {
- "status": host.status,
- "last_seen": host.last_seen,
- "os": host.os,
- }
- # Persister dans le fichier JSON partagé avec Ansible
- try:
- host_status_service.set_status(host_name, host.status, host.last_seen, host.os)
- except Exception:
- # Ne pas casser l'exécution si la persistance échoue
- pass
- break
-
- @property
- def metrics(self) -> SystemMetrics:
- """Calcule les métriques en temps réel basées sur les logs de tâches"""
- hosts = self.hosts
-
- # Utiliser les statistiques des fichiers de logs de tâches
- task_stats = task_log_service.get_stats()
- total_tasks = task_stats.get("total", 0)
- completed_tasks = task_stats.get("completed", 0)
- failed_tasks = task_stats.get("failed", 0)
- total_finished = completed_tasks + failed_tasks
-
- return SystemMetrics(
- online_hosts=len([h for h in hosts if h.status == "online"]),
- total_tasks=total_tasks,
- success_rate=round((completed_tasks / total_finished * 100) if total_finished > 0 else 100, 1),
- uptime=99.9,
- cpu_usage=0,
- memory_usage=0,
- disk_usage=0
- )
-
- def get_next_id(self, collection: str) -> int:
- self._id_counters[collection] += 1
- return self._id_counters[collection] - 1
-
-
-# Instance globale de la base de données hybride
-db = HybridDB(ansible_service)
-
-# Dépendances FastAPI
-async def verify_api_key(api_key: str = Depends(api_key_header)) -> bool:
- """Vérifie la clé API fournie"""
- if not api_key or api_key != API_KEY:
- raise HTTPException(status_code=401, detail="Clé API invalide ou manquante")
- return True
-
-@app.get("/", response_class=HTMLResponse)
-async def root(request: Request):
- """Page principale du dashboard"""
- return FileResponse(BASE_DIR / "index.html")
-
-
-@app.get("/api", response_class=HTMLResponse)
-async def api_home(request: Request):
- """Page d'accueil de l'API Homelab Dashboard"""
- return """
-
-
-
-
-
- Homelab Dashboard API
-
-
-
-
-
Homelab Dashboard API
-
- API REST moderne pour la gestion automatique d'homelab
-
-
-
-
Documentation API
-
Explorez les endpoints disponibles et testez les fonctionnalités
-
-
-
-
-
Endpoints Principaux
-
-
- GET
- /api/hosts
- - Liste des hôtes
-
-
- POST
- /api/tasks
- - Créer une tâche
-
-
- GET
- /api/metrics
- - Métriques système
-
-
- WS
- /ws
- - WebSocket temps réel
-
-
-
-
-
-
Version 1.0.0 | Développé avec FastAPI et technologies modernes
-
-
-
-
- """
-
-# ===== ENDPOINTS HOSTS - Routes statiques d'abord =====
-
-@app.get("/api/hosts/groups")
-async def get_host_groups(api_key_valid: bool = Depends(verify_api_key)):
- """Récupère les groupes disponibles pour les hôtes (environnements et rôles)"""
- return {
- "env_groups": ansible_service.get_env_groups(),
- "role_groups": ansible_service.get_role_groups(),
- "all_groups": ansible_service.get_groups()
- }
-
-
-# ===== ENDPOINTS GROUPS - Gestion des groupes d'environnement et de rôles =====
-
-@app.get("/api/groups")
-async def get_all_groups(api_key_valid: bool = Depends(verify_api_key)):
- """Récupère tous les groupes avec leurs détails"""
- env_groups = ansible_service.get_env_groups()
- role_groups = ansible_service.get_role_groups()
-
- groups = []
- for g in env_groups:
- hosts = ansible_service.get_group_hosts(g)
- groups.append({
- "name": g,
- "type": "env",
- "display_name": g.replace('env_', ''),
- "hosts_count": len(hosts),
- "hosts": hosts
- })
-
- for g in role_groups:
- hosts = ansible_service.get_group_hosts(g)
- groups.append({
- "name": g,
- "type": "role",
- "display_name": g.replace('role_', ''),
- "hosts_count": len(hosts),
- "hosts": hosts
- })
-
- return {
- "groups": groups,
- "env_count": len(env_groups),
- "role_count": len(role_groups)
- }
-
-
-@app.get("/api/groups/{group_name}")
-async def get_group_details(group_name: str, api_key_valid: bool = Depends(verify_api_key)):
- """Récupère les détails d'un groupe spécifique"""
- if not ansible_service.group_exists(group_name):
- raise HTTPException(status_code=404, detail=f"Groupe '{group_name}' non trouvé")
-
- hosts = ansible_service.get_group_hosts(group_name)
- group_type = "env" if group_name.startswith("env_") else "role" if group_name.startswith("role_") else "other"
-
- return {
- "name": group_name,
- "type": group_type,
- "display_name": group_name.replace('env_', '').replace('role_', ''),
- "hosts_count": len(hosts),
- "hosts": hosts
- }
-
-
-@app.post("/api/groups")
-async def create_group(group_request: GroupRequest, api_key_valid: bool = Depends(verify_api_key)):
- """Crée un nouveau groupe d'environnement ou de rôle"""
- # Construire le nom complet du groupe
- prefix = "env_" if group_request.type == "env" else "role_"
-
- # Si le nom ne commence pas déjà par le préfixe, l'ajouter
- if group_request.name.startswith(prefix):
- full_name = group_request.name
- else:
- full_name = f"{prefix}{group_request.name}"
-
- # Vérifier si le groupe existe déjà
- if ansible_service.group_exists(full_name):
- raise HTTPException(status_code=400, detail=f"Le groupe '{full_name}' existe déjà")
-
- # Créer le groupe
- success = ansible_service.add_group(full_name)
-
- if not success:
- raise HTTPException(status_code=500, detail="Erreur lors de la création du groupe")
-
- return {
- "success": True,
- "message": f"Groupe '{full_name}' créé avec succès",
- "group": {
- "name": full_name,
- "type": group_request.type,
- "display_name": full_name.replace('env_', '').replace('role_', ''),
- "hosts_count": 0,
- "hosts": []
- }
- }
-
-
-@app.put("/api/groups/{group_name}")
-async def update_group(group_name: str, group_update: GroupUpdateRequest, api_key_valid: bool = Depends(verify_api_key)):
- """Renomme un groupe existant"""
- if not ansible_service.group_exists(group_name):
- raise HTTPException(status_code=404, detail=f"Groupe '{group_name}' non trouvé")
-
- # Déterminer le type du groupe
- if group_name.startswith("env_"):
- prefix = "env_"
- group_type = "env"
- elif group_name.startswith("role_"):
- prefix = "role_"
- group_type = "role"
- else:
- raise HTTPException(status_code=400, detail="Seuls les groupes env_ et role_ peuvent être modifiés")
-
- # Construire le nouveau nom
- if group_update.new_name.startswith(prefix):
- new_full_name = group_update.new_name
- else:
- new_full_name = f"{prefix}{group_update.new_name}"
-
- # Vérifier si le nouveau nom existe déjà
- if ansible_service.group_exists(new_full_name):
- raise HTTPException(status_code=400, detail=f"Le groupe '{new_full_name}' existe déjà")
-
- # Renommer le groupe
- success = ansible_service.rename_group(group_name, new_full_name)
-
- if not success:
- raise HTTPException(status_code=500, detail="Erreur lors du renommage du groupe")
-
- hosts = ansible_service.get_group_hosts(new_full_name)
-
- return {
- "success": True,
- "message": f"Groupe renommé de '{group_name}' vers '{new_full_name}'",
- "group": {
- "name": new_full_name,
- "type": group_type,
- "display_name": new_full_name.replace('env_', '').replace('role_', ''),
- "hosts_count": len(hosts),
- "hosts": hosts
- }
- }
-
-
-@app.delete("/api/groups/{group_name}")
-async def delete_group(
- group_name: str,
- move_hosts_to: Optional[str] = None,
- api_key_valid: bool = Depends(verify_api_key)
-):
- """Supprime un groupe existant
-
- Args:
- group_name: Nom du groupe à supprimer
- move_hosts_to: Groupe vers lequel déplacer les hôtes (optionnel, query param)
- """
- if not ansible_service.group_exists(group_name):
- raise HTTPException(status_code=404, detail=f"Groupe '{group_name}' non trouvé")
-
- # Vérifier si le groupe contient des hôtes
- hosts_in_group = ansible_service.get_group_hosts(group_name)
-
- # Si le groupe contient des hôtes et qu'on ne spécifie pas où les déplacer
- if hosts_in_group and not move_hosts_to:
- # Pour les groupes d'environnement, c'est critique car les hôtes doivent avoir un env
- if group_name.startswith("env_"):
- raise HTTPException(
- status_code=400,
- detail=f"Le groupe contient {len(hosts_in_group)} hôte(s). Spécifiez 'move_hosts_to' pour les déplacer."
- )
-
- # Si on veut déplacer les hôtes, vérifier que le groupe cible est valide
- if move_hosts_to:
- # Vérifier que le groupe cible est du même type
- if group_name.startswith("env_") and not move_hosts_to.startswith("env_"):
- raise HTTPException(status_code=400, detail="Les hôtes doivent être déplacés vers un groupe d'environnement")
- if group_name.startswith("role_") and not move_hosts_to.startswith("role_"):
- raise HTTPException(status_code=400, detail="Les hôtes doivent être déplacés vers un groupe de rôle")
-
- # Supprimer le groupe
- result = ansible_service.delete_group(group_name, move_hosts_to)
-
- if not result.get("success"):
- raise HTTPException(status_code=500, detail=result.get("error", "Erreur lors de la suppression"))
-
- return {
- "success": True,
- "message": f"Groupe '{group_name}' supprimé avec succès",
- "hosts_affected": result.get("hosts_affected", []),
- "hosts_moved_to": result.get("hosts_moved_to")
- }
-
-
-def _host_to_response(host_obj, bootstrap_status: Optional["BootstrapStatus"] = None) -> Dict[str, Any]:
- """Map DB host + latest bootstrap to API-compatible payload."""
- return {
- "id": host_obj.id,
- "name": host_obj.name,
- "ip": getattr(host_obj, "ip_address", None),
- "status": host_obj.status,
- "os": "Linux", # valeur par défaut faute d'info stockée
- "last_seen": host_obj.last_seen,
- "created_at": host_obj.created_at,
- "groups": [g for g in [getattr(host_obj, "ansible_group", None)] if g],
- "bootstrap_ok": (bootstrap_status.status == "success") if bootstrap_status else False,
- "bootstrap_date": bootstrap_status.last_attempt if bootstrap_status else None,
- }
-
-
-@app.get("/api/hosts/by-name/{host_name}")
-async def get_host_by_name(
- host_name: str,
- api_key_valid: bool = Depends(verify_api_key),
- db_session: AsyncSession = Depends(get_db),
-):
- repo = HostRepository(db_session)
- bs_repo = BootstrapStatusRepository(db_session)
- host = await repo.get_by_ip(host_name) or await repo.get(host_name)
- if not host:
- raise HTTPException(status_code=404, detail="Hôte non trouvé")
- bootstrap = await bs_repo.latest_for_host(host.id)
- return _host_to_response(host, bootstrap)
-
-
-@app.get("/api/hosts")
-async def get_hosts(
- bootstrap_status: Optional[str] = None,
- limit: int = 100,
- offset: int = 0,
- api_key_valid: bool = Depends(verify_api_key),
- db_session: AsyncSession = Depends(get_db),
-):
- repo = HostRepository(db_session)
- bs_repo = BootstrapStatusRepository(db_session)
- hosts = await repo.list(limit=limit, offset=offset)
- # Si la base ne contient encore aucun hôte, on retombe sur les hôtes Ansible via la DB hybride
- if not hosts:
- hybrid_hosts = db.hosts
- fallback_results = []
- for h in hybrid_hosts:
- # Appliquer les mêmes filtres de bootstrap que pour la version DB
- if bootstrap_status == "ready" and not h.bootstrap_ok:
- continue
- if bootstrap_status == "not_configured" and h.bootstrap_ok:
- continue
-
- fallback_results.append(
- {
- "id": h.id,
- "name": h.name,
- "ip": h.ip,
- "status": h.status,
- "os": h.os,
- "last_seen": h.last_seen,
- # created_at est déjà géré par le modèle Pydantic Host (default_factory)
- "created_at": h.created_at,
- "groups": h.groups,
- "bootstrap_ok": h.bootstrap_ok,
- "bootstrap_date": h.bootstrap_date,
- }
- )
- return fallback_results
-
- results = []
- for host in hosts:
- bootstrap = await bs_repo.latest_for_host(host.id)
- if bootstrap_status == "ready" and not (bootstrap and bootstrap.status == "success"):
- continue
- if bootstrap_status == "not_configured" and bootstrap and bootstrap.status == "success":
- continue
- results.append(_host_to_response(host, bootstrap))
- return results
-
-
-@app.get("/api/hosts/{host_id}")
-async def get_host(
- host_id: str,
- api_key_valid: bool = Depends(verify_api_key),
- db_session: AsyncSession = Depends(get_db),
-):
- repo = HostRepository(db_session)
- bs_repo = BootstrapStatusRepository(db_session)
- host = await repo.get(host_id)
- if not host:
- raise HTTPException(status_code=404, detail="Hôte non trouvé")
- bootstrap = await bs_repo.latest_for_host(host.id)
- return _host_to_response(host, bootstrap)
-
-
-@app.post("/api/hosts")
-async def create_host(
- host_request: HostRequest,
- api_key_valid: bool = Depends(verify_api_key),
- db_session: AsyncSession = Depends(get_db),
-):
- repo = HostRepository(db_session)
- bs_repo = BootstrapStatusRepository(db_session)
-
- # Vérifier si l'hôte existe déjà
- existing = await repo.get_by_ip(host_request.name)
- if existing:
- raise HTTPException(status_code=400, detail=f"L'hôte '{host_request.name}' existe déjà")
-
- # Valider le groupe d'environnement
- env_groups = ansible_service.get_env_groups()
- if host_request.env_group not in env_groups and not host_request.env_group.startswith("env_"):
- raise HTTPException(status_code=400, detail=f"Le groupe d'environnement doit commencer par 'env_'. Groupes existants: {env_groups}")
-
- # Valider les groupes de rôles
- role_groups = ansible_service.get_role_groups()
- for role in host_request.role_groups:
- if role not in role_groups and not role.startswith("role_"):
- raise HTTPException(status_code=400, detail=f"Le groupe de rôle '{role}' doit commencer par 'role_'. Groupes existants: {role_groups}")
-
- try:
- # Ajouter l'hôte à l'inventaire Ansible
- ansible_service.add_host_to_inventory(
- hostname=host_request.name,
- env_group=host_request.env_group,
- role_groups=host_request.role_groups,
- ansible_host=host_request.ip,
- )
-
- # Créer en base
- host = await repo.create(
- id=uuid.uuid4().hex,
- name=host_request.name,
- ip_address=host_request.ip or host_request.name,
- ansible_group=host_request.env_group,
- status="unknown",
- reachable=False,
- last_seen=None,
- )
- bootstrap = await bs_repo.latest_for_host(host.id)
-
- await db_session.commit()
-
- # Notifier les clients WebSocket
- await ws_manager.broadcast(
- {
- "type": "host_created",
- "data": _host_to_response(host, bootstrap),
- }
- )
-
- return {
- "message": f"Hôte '{host_request.name}' ajouté avec succès",
- "host": _host_to_response(host, bootstrap),
- "inventory_updated": True,
- }
-
- except HTTPException:
- raise
- except Exception as e:
- await db_session.rollback()
- raise HTTPException(status_code=500, detail=f"Erreur lors de l'ajout de l'hôte: {str(e)}")
-
-
-@app.put("/api/hosts/{host_name}")
-async def update_host(
- host_name: str,
- update_request: HostUpdateRequest,
- api_key_valid: bool = Depends(verify_api_key),
- db_session: AsyncSession = Depends(get_db),
-):
- repo = HostRepository(db_session)
- bs_repo = BootstrapStatusRepository(db_session)
- host = await repo.get_by_ip(host_name) or await repo.get(host_name)
- if not host:
- raise HTTPException(status_code=404, detail=f"Hôte '{host_name}' non trouvé")
-
- # Valider le groupe d'environnement si fourni
- if update_request.env_group:
- env_groups = ansible_service.get_env_groups()
- if update_request.env_group not in env_groups and not update_request.env_group.startswith("env_"):
- raise HTTPException(status_code=400, detail=f"Le groupe d'environnement doit commencer par 'env_'")
-
- # Valider les groupes de rôles si fournis
- if update_request.role_groups:
- for role in update_request.role_groups:
- if not role.startswith("role_"):
- raise HTTPException(status_code=400, detail=f"Le groupe de rôle '{role}' doit commencer par 'role_'")
-
- try:
- ansible_service.update_host_groups(
- hostname=host_name,
- env_group=update_request.env_group,
- role_groups=update_request.role_groups,
- ansible_host=update_request.ansible_host,
- )
-
- await repo.update(
- host,
- ansible_group=update_request.env_group or host.ansible_group,
- )
- await db_session.commit()
-
- bootstrap = await bs_repo.latest_for_host(host.id)
-
- await ws_manager.broadcast(
- {
- "type": "host_updated",
- "data": _host_to_response(host, bootstrap),
- }
- )
-
- return {
- "message": f"Hôte '{host_name}' mis à jour avec succès",
- "host": _host_to_response(host, bootstrap),
- "inventory_updated": True,
- }
-
- except HTTPException:
- await db_session.rollback()
- raise
- except Exception as e:
- await db_session.rollback()
- raise HTTPException(status_code=500, detail=f"Erreur lors de la mise à jour: {str(e)}")
-
-
-@app.delete("/api/hosts/by-name/{host_name}")
-async def delete_host_by_name(
- host_name: str,
- api_key_valid: bool = Depends(verify_api_key),
- db_session: AsyncSession = Depends(get_db),
-):
- repo = HostRepository(db_session)
- host = await repo.get_by_ip(host_name) or await repo.get(host_name)
- if not host:
- raise HTTPException(status_code=404, detail=f"Hôte '{host_name}' non trouvé")
-
- try:
- ansible_service.remove_host_from_inventory(host_name)
- await repo.soft_delete(host.id)
- await db_session.commit()
-
- await ws_manager.broadcast(
- {
- "type": "host_deleted",
- "data": {"name": host_name},
- }
- )
-
- return {"message": f"Hôte '{host_name}' supprimé avec succès", "inventory_updated": True}
- except HTTPException:
- await db_session.rollback()
- raise
- except Exception as e:
- await db_session.rollback()
- raise HTTPException(status_code=500, detail=f"Erreur lors de la suppression: {str(e)}")
-
-
-@app.delete("/api/hosts/{host_id}")
-async def delete_host(
- host_id: str,
- api_key_valid: bool = Depends(verify_api_key),
- db_session: AsyncSession = Depends(get_db),
-):
- repo = HostRepository(db_session)
- host = await repo.get(host_id)
- if not host:
- raise HTTPException(status_code=404, detail="Hôte non trouvé")
-
- return await delete_host_by_name(host.name, api_key_valid, db_session)
-
-@app.get("/api/tasks")
-async def get_tasks(
- limit: int = 100,
- offset: int = 0,
- api_key_valid: bool = Depends(verify_api_key),
- db_session: AsyncSession = Depends(get_db),
-):
- """Récupère la liste de toutes les tâches"""
- repo = TaskRepository(db_session)
- tasks = await repo.list(limit=limit, offset=offset)
- return [
- {
- "id": t.id,
- "name": t.action,
- "host": t.target,
- "status": t.status,
- "progress": 100 if t.status == "completed" else (50 if t.status == "running" else 0),
- "start_time": t.started_at,
- "end_time": t.completed_at,
- "duration": None,
- "output": t.result_data.get("output") if t.result_data else None,
- "error": t.error_message,
- }
- for t in tasks
- ]
-
-
-@app.post("/api/tasks")
-async def create_task(
- task_request: TaskRequest,
- api_key_valid: bool = Depends(verify_api_key),
- db_session: AsyncSession = Depends(get_db),
-):
- """Crée une nouvelle tâche et exécute le playbook Ansible correspondant"""
- task_names = {
- 'upgrade': 'Mise à jour système',
- 'reboot': 'Redémarrage système',
- 'health-check': 'Vérification de santé',
- 'backup': 'Sauvegarde',
- 'deploy': 'Déploiement',
- 'rollback': 'Rollback',
- 'maintenance': 'Maintenance',
- 'bootstrap': 'Bootstrap Ansible'
- }
-
- repo = TaskRepository(db_session)
- task_id = uuid.uuid4().hex
- target = task_request.host or task_request.group or "all"
- playbook = ACTION_PLAYBOOK_MAP.get(task_request.action)
-
- task_obj = await repo.create(
- id=task_id,
- action=task_request.action,
- target=target,
- playbook=playbook,
- status="running",
- )
- await repo.update(task_obj, started_at=datetime.now(timezone.utc))
- await db_session.commit()
-
- task_name = task_names.get(task_request.action, f"Tâche {task_request.action}")
-
- response_data = {
- "id": task_obj.id,
- "name": task_name,
- "host": target,
- "status": "running",
- "progress": 0,
- "start_time": task_obj.started_at,
- "end_time": None,
- "duration": None,
- "output": None,
- "error": None,
- }
-
- # Ajouter aussi à db.tasks (mémoire) pour la compatibilité avec execute_ansible_task
- mem_task = Task(
- id=task_obj.id,
- name=task_name,
- host=target,
- status="running",
- progress=0,
- start_time=task_obj.started_at
- )
- db.tasks.insert(0, mem_task)
-
- # Notifier les clients WebSocket
- await ws_manager.broadcast({
- "type": "task_created",
- "data": response_data
- })
-
- # Exécuter le playbook Ansible en arrière-plan et stocker le handle
- if playbook:
- async_task = asyncio.create_task(execute_ansible_task(
- task_id=task_obj.id,
- playbook=playbook,
- target=target,
- extra_vars=task_request.extra_vars,
- check_mode=task_request.dry_run
- ))
- running_task_handles[task_obj.id] = {"asyncio_task": async_task, "process": None, "cancelled": False}
- else:
- # Pas de playbook correspondant, simuler
- async_task = asyncio.create_task(simulate_task_execution(task_obj.id))
- running_task_handles[task_obj.id] = {"asyncio_task": async_task, "process": None, "cancelled": False}
-
- return response_data
-
-
-# ===== ENDPOINTS LOGS DE TÂCHES (MARKDOWN) =====
-# IMPORTANT: Ces routes doivent être AVANT /api/tasks/{task_id} pour éviter les conflits
-
-@app.get("/api/tasks/logs")
-async def get_task_logs(
- status: Optional[str] = None,
- year: Optional[str] = None,
- month: Optional[str] = None,
- day: Optional[str] = None,
- hour_start: Optional[str] = None,
- hour_end: Optional[str] = None,
- target: Optional[str] = None,
- category: Optional[str] = None,
- source_type: Optional[str] = None,
- limit: int = 50,
- offset: int = 0,
- api_key_valid: bool = Depends(verify_api_key)
-):
- """Récupère les logs de tâches depuis les fichiers markdown avec filtrage et pagination"""
- logs, total_count = task_log_service.get_task_logs(
- year=year,
- month=month,
- day=day,
- status=status,
- target=target,
- category=category,
- source_type=source_type,
- hour_start=hour_start,
- hour_end=hour_end,
- limit=limit,
- offset=offset
- )
- return {
- "logs": [log.dict() for log in logs],
- "count": len(logs),
- "total_count": total_count,
- "has_more": offset + len(logs) < total_count,
- "filters": {
- "status": status,
- "year": year,
- "month": month,
- "day": day,
- "hour_start": hour_start,
- "hour_end": hour_end,
- "target": target,
- "source_type": source_type
- },
- "pagination": {
- "limit": limit,
- "offset": offset
- }
- }
-
-
-@app.get("/api/tasks/logs/dates")
-async def get_task_logs_dates(api_key_valid: bool = Depends(verify_api_key)):
- """Récupère la structure des dates disponibles pour le filtrage"""
- return task_log_service.get_available_dates()
-
-
-@app.get("/api/tasks/logs/stats")
-async def get_task_logs_stats(api_key_valid: bool = Depends(verify_api_key)):
- """Récupère les statistiques des logs de tâches"""
- return task_log_service.get_stats()
-
-
-@app.get("/api/tasks/logs/{log_id}")
-async def get_task_log_content(log_id: str, api_key_valid: bool = Depends(verify_api_key)):
- """Récupère le contenu d'un log de tâche spécifique"""
- logs, _ = task_log_service.get_task_logs(limit=0)
- log = next((l for l in logs if l.id == log_id), None)
-
- if not log:
- raise HTTPException(status_code=404, detail="Log non trouvé")
-
- try:
- content = Path(log.path).read_text(encoding='utf-8')
- return {
- "log": log.dict(),
- "content": content
- }
- except Exception as e:
- raise HTTPException(status_code=500, detail=f"Erreur lecture du fichier: {str(e)}")
-
-
-@app.delete("/api/tasks/logs/{log_id}")
-async def delete_task_log(log_id: str, api_key_valid: bool = Depends(verify_api_key)):
- """Supprime un fichier markdown de log de tâche."""
- logs, _ = task_log_service.get_task_logs(limit=0)
- log = next((l for l in logs if l.id == log_id), None)
-
- if not log:
- raise HTTPException(status_code=404, detail="Log non trouvé")
-
- try:
- log_path = Path(log.path)
- if log_path.exists():
- log_path.unlink()
- return {"message": "Log supprimé", "id": log_id}
- except Exception as e:
- raise HTTPException(status_code=500, detail=f"Erreur suppression du fichier: {str(e)}")
-
-
-@app.get("/api/tasks/running")
-async def get_running_tasks(
- api_key_valid: bool = Depends(verify_api_key),
- db_session: AsyncSession = Depends(get_db),
-):
- """Récupère uniquement les tâches en cours d'exécution (running ou pending)"""
- repo = TaskRepository(db_session)
- tasks = await repo.list(limit=100, offset=0)
- running_tasks = [t for t in tasks if t.status in ("running", "pending")]
- return {
- "tasks": [
- {
- "id": t.id,
- "name": t.action,
- "host": t.target,
- "status": t.status,
- "progress": 50 if t.status == "running" else 0,
- "start_time": t.started_at,
- "end_time": t.completed_at,
- }
- for t in running_tasks
- ],
- "count": len(running_tasks)
- }
-
-
-@app.post("/api/tasks/{task_id}/cancel")
-async def cancel_task(
- task_id: str,
- api_key_valid: bool = Depends(verify_api_key),
- db_session: AsyncSession = Depends(get_db),
-):
- """Annule une tâche en cours d'exécution"""
- repo = TaskRepository(db_session)
- task = await repo.get(task_id)
-
- if not task:
- raise HTTPException(status_code=404, detail="Tâche non trouvée")
-
- if task.status not in ("running", "pending"):
- raise HTTPException(status_code=400, detail=f"La tâche n'est pas en cours (statut: {task.status})")
-
- # Marquer comme annulée dans le dictionnaire des handles
- if task_id in running_task_handles:
- running_task_handles[task_id]["cancelled"] = True
-
- # Annuler la tâche asyncio
- async_task = running_task_handles[task_id].get("asyncio_task")
- if async_task and not async_task.done():
- async_task.cancel()
-
- # Tuer le processus Ansible si présent
- process = running_task_handles[task_id].get("process")
- if process:
- try:
- process.terminate()
- # Attendre un peu puis forcer si nécessaire
- await asyncio.sleep(0.5)
- if process.returncode is None:
- process.kill()
- except Exception:
- pass
-
- # Nettoyer le handle
- del running_task_handles[task_id]
-
- # Mettre à jour le statut en BD
- await repo.update(
- task,
- status="cancelled",
- completed_at=datetime.now(timezone.utc),
- error_message="Tâche annulée par l'utilisateur"
- )
- await db_session.commit()
-
- # Mettre à jour aussi dans db.tasks (mémoire) si présent
- for t in db.tasks:
- if str(t.id) == str(task_id):
- t.status = "cancelled"
- t.end_time = datetime.now(timezone.utc)
- t.error = "Tâche annulée par l'utilisateur"
- break
-
- # Log
- log_repo = LogRepository(db_session)
- await log_repo.create(
- level="WARNING",
- message=f"Tâche '{task.action}' annulée manuellement",
- source="task",
- task_id=task_id,
- )
- await db_session.commit()
-
- # Notifier les clients WebSocket
- await ws_manager.broadcast({
- "type": "task_cancelled",
- "data": {
- "id": task_id,
- "status": "cancelled",
- "message": "Tâche annulée par l'utilisateur"
- }
- })
-
- return {
- "success": True,
- "message": f"Tâche {task_id} annulée avec succès",
- "task_id": task_id
- }
-
-
-@app.get("/api/tasks/{task_id}")
-async def get_task(
- task_id: str,
- api_key_valid: bool = Depends(verify_api_key),
- db_session: AsyncSession = Depends(get_db),
-):
- """Récupère une tâche spécifique"""
- repo = TaskRepository(db_session)
- task = await repo.get(task_id)
- if not task:
- raise HTTPException(status_code=404, detail="Tâche non trouvée")
- return {
- "id": task.id,
- "name": task.action,
- "host": task.target,
- "status": task.status,
- "progress": 100 if task.status == "completed" else (50 if task.status == "running" else 0),
- "start_time": task.started_at,
- "end_time": task.completed_at,
- "duration": None,
- "output": task.result_data.get("output") if task.result_data else None,
- "error": task.error_message,
- }
-
-
-@app.delete("/api/tasks/{task_id}")
-async def delete_task(
- task_id: str,
- api_key_valid: bool = Depends(verify_api_key),
- db_session: AsyncSession = Depends(get_db),
-):
- """Supprime une tâche (soft delete non implémenté pour tasks, suppression directe)"""
- repo = TaskRepository(db_session)
- task = await repo.get(task_id)
- if not task:
- raise HTTPException(status_code=404, detail="Tâche non trouvée")
-
- await db_session.delete(task)
- await db_session.commit()
-
- # Notifier les clients WebSocket
- await ws_manager.broadcast({
- "type": "task_deleted",
- "data": {"id": task_id}
- })
-
- return {"message": "Tâche supprimée avec succès"}
-
-@app.get("/api/logs")
-async def get_logs(
- limit: int = 50,
- offset: int = 0,
- level: Optional[str] = None,
- source: Optional[str] = None,
- api_key_valid: bool = Depends(verify_api_key),
- db_session: AsyncSession = Depends(get_db),
-):
- """Récupère les logs récents avec filtrage optionnel"""
- repo = LogRepository(db_session)
- logs = await repo.list(limit=limit, offset=offset, level=level, source=source)
- return [
- {
- "id": log.id,
- "timestamp": log.created_at,
- "level": log.level,
- "message": log.message,
- "source": log.source,
- "host": log.host_id,
- }
- for log in logs
- ]
-
-
-@app.post("/api/logs")
-async def create_log(
- level: str,
- message: str,
- source: Optional[str] = None,
- host_id: Optional[str] = None,
- api_key_valid: bool = Depends(verify_api_key),
- db_session: AsyncSession = Depends(get_db),
-):
- """Ajoute une nouvelle entrée de log"""
- repo = LogRepository(db_session)
- log = await repo.create(
- level=level.upper(),
- message=message,
- source=source,
- host_id=host_id,
- )
- await db_session.commit()
-
- response_data = {
- "id": log.id,
- "timestamp": log.created_at,
- "level": log.level,
- "message": log.message,
- "source": log.source,
- "host": log.host_id,
- }
-
- # Notifier les clients WebSocket
- await ws_manager.broadcast({
- "type": "new_log",
- "data": response_data
- })
-
- return response_data
-
-
-@app.delete("/api/logs")
-async def clear_logs(
- api_key_valid: bool = Depends(verify_api_key),
- db_session: AsyncSession = Depends(get_db),
-):
- """Efface tous les logs (attention: opération destructive)"""
- from sqlalchemy import delete
- from models.log import Log as LogModel
- await db_session.execute(delete(LogModel))
- await db_session.commit()
- return {"message": "Tous les logs ont été supprimés"}
-
-@app.get("/api/metrics", response_model=SystemMetrics)
-async def get_metrics(api_key_valid: bool = Depends(verify_api_key)):
- """Récupère les métriques système calculées dynamiquement"""
- return db.metrics
-
-
-@app.post("/api/hosts/refresh")
-async def refresh_hosts(api_key_valid: bool = Depends(verify_api_key)):
- """Force le rechargement des hôtes depuis l'inventaire Ansible"""
- ansible_service.invalidate_cache() # Clear ansible inventory cache first
- hosts = db.refresh_hosts()
-
- # Notifier les clients WebSocket
- await ws_manager.broadcast({
- "type": "hosts_refreshed",
- "data": {"count": len(hosts)}
- })
-
- return {"message": f"{len(hosts)} hôtes rechargés depuis l'inventaire Ansible"}
-
-
-# ===== ENDPOINTS ANSIBLE =====
-
-@app.get("/api/ansible/playbooks")
-async def get_ansible_playbooks(
- target: Optional[str] = None,
- api_key_valid: bool = Depends(verify_api_key)
-):
- """Liste les playbooks Ansible disponibles avec leurs catégories.
-
- Args:
- target: Filtrer les playbooks compatibles avec cet hôte ou groupe (optionnel)
- """
- if target:
- playbooks = ansible_service.get_compatible_playbooks(target)
- else:
- playbooks = ansible_service.get_playbooks()
-
- return {
- "playbooks": playbooks,
- "categories": ansible_service.get_playbook_categories(),
- "ansible_dir": str(ANSIBLE_DIR),
- "filter": target
- }
-
-@app.get("/api/ansible/inventory")
-async def get_ansible_inventory(
- group: Optional[str] = None,
- api_key_valid: bool = Depends(verify_api_key)
-):
- """Récupère l'inventaire Ansible avec les hôtes et groupes.
-
- Args:
- group: Filtrer les hôtes par groupe (optionnel)
- """
- return {
- "hosts": [h.dict() for h in ansible_service.get_hosts_from_inventory(group_filter=group)],
- "groups": ansible_service.get_groups(),
- "inventory_path": str(ansible_service.inventory_path),
- "filter": group
- }
-
-@app.post("/api/ansible/execute")
-async def execute_ansible_playbook(
- request: AnsibleExecutionRequest,
- api_key_valid: bool = Depends(verify_api_key),
- db_session: AsyncSession = Depends(get_db)
-):
- """Exécute un playbook Ansible directement avec validation de compatibilité"""
- start_time_dt = datetime.now(timezone.utc)
-
- # Valider la compatibilité playbook-target
- playbooks = ansible_service.get_playbooks()
- playbook_info = next((pb for pb in playbooks if pb['filename'] == request.playbook or pb['name'] == request.playbook.replace('.yml', '').replace('.yaml', '')), None)
-
- if playbook_info:
- playbook_hosts = playbook_info.get('hosts', 'all')
- if not ansible_service.is_target_compatible_with_playbook(request.target, playbook_hosts):
- raise HTTPException(
- status_code=400,
- detail=f"Le playbook '{request.playbook}' (hosts: {playbook_hosts}) n'est pas compatible avec la cible '{request.target}'. "
- f"Ce playbook ne peut être exécuté que sur: {playbook_hosts}"
- )
-
- # Créer une tâche en BD
- task_repo = TaskRepository(db_session)
- task_id = f"pb_{uuid.uuid4().hex[:12]}"
- playbook_name = request.playbook.replace('.yml', '').replace('-', ' ').title()
-
- db_task = await task_repo.create(
- id=task_id,
- action=f"playbook:{request.playbook}",
- target=request.target,
- playbook=request.playbook,
- status="running",
- )
- await task_repo.update(db_task, started_at=start_time_dt)
- await db_session.commit()
-
- # Créer aussi en mémoire pour la compatibilité
- task = Task(
- id=task_id,
- name=f"Playbook: {playbook_name}",
- host=request.target,
- status="running",
- progress=0,
- start_time=start_time_dt
- )
- db.tasks.insert(0, task)
-
- try:
- result = await ansible_service.execute_playbook(
- playbook=request.playbook,
- target=request.target,
- extra_vars=request.extra_vars,
- check_mode=request.check_mode,
- verbose=request.verbose
- )
-
- # Mettre à jour la tâche
- task.status = "completed" if result["success"] else "failed"
- task.progress = 100
- task.end_time = datetime.now(timezone.utc)
- task.duration = f"{result.get('execution_time', 0):.1f}s"
- task.output = result.get("stdout", "")
- task.error = result.get("stderr", "") if not result["success"] else None
-
- # Ajouter un log
- log_entry = LogEntry(
- id=db.get_next_id("logs"),
- timestamp=datetime.now(timezone.utc),
- level="INFO" if result["success"] else "ERROR",
- message=f"Playbook {request.playbook} exécuté sur {request.target}: {'succès' if result['success'] else 'échec'}",
- source="ansible",
- host=request.target
- )
- db.logs.insert(0, log_entry)
-
- # Sauvegarder le log markdown
- try:
- task_log_service.save_task_log(
- task=task,
- output=result.get("stdout", ""),
- error=result.get("stderr", "")
- )
- except Exception as log_error:
- print(f"Erreur sauvegarde log markdown: {log_error}")
-
- await ws_manager.broadcast({
- "type": "ansible_execution",
- "data": result
- })
-
- # Mettre à jour la BD
- await task_repo.update(
- db_task,
- status=task.status,
- completed_at=task.end_time,
- error_message=task.error,
- result_data={"output": result.get("stdout", "")[:5000]}
- )
- await db_session.commit()
-
- # Envoyer notification ntfy (non-bloquant)
- if result["success"]:
- asyncio.create_task(notification_service.notify_task_completed(
- task_name=task.name,
- target=request.target,
- duration=task.duration
- ))
- else:
- asyncio.create_task(notification_service.notify_task_failed(
- task_name=task.name,
- target=request.target,
- error=result.get("stderr", "Erreur inconnue")[:200]
- ))
-
- # Ajouter task_id au résultat
- result["task_id"] = task_id
-
- return result
- except FileNotFoundError as e:
- task.status = "failed"
- task.end_time = datetime.now(timezone.utc)
- task.error = str(e)
- task_log_service.save_task_log(task=task, error=str(e))
- await task_repo.update(db_task, status="failed", completed_at=task.end_time, error_message=str(e))
- await db_session.commit()
- # Envoyer notification ntfy (non-bloquant)
- asyncio.create_task(notification_service.notify_task_failed(
- task_name=task.name,
- target=request.target,
- error=str(e)[:200]
- ))
- raise HTTPException(status_code=404, detail=str(e))
- except Exception as e:
- task.status = "failed"
- task.end_time = datetime.now(timezone.utc)
- task.error = str(e)
- task_log_service.save_task_log(task=task, error=str(e))
- await task_repo.update(db_task, status="failed", completed_at=task.end_time, error_message=str(e))
- await db_session.commit()
- # Envoyer notification ntfy (non-bloquant)
- asyncio.create_task(notification_service.notify_task_failed(
- task_name=task.name,
- target=request.target,
- error=str(e)[:200]
- ))
- raise HTTPException(status_code=500, detail=str(e))
-
-@app.get("/api/ansible/groups")
-async def get_ansible_groups(api_key_valid: bool = Depends(verify_api_key)):
- """Récupère la liste des groupes Ansible"""
- return {"groups": ansible_service.get_groups()}
-
-
-# ===== ENDPOINTS PLAYBOOKS CRUD =====
-
-class PlaybookContentRequest(BaseModel):
- """Requête pour sauvegarder le contenu d'un playbook"""
- content: str = Field(..., description="Contenu YAML du playbook")
-
-
-@app.get("/api/playbooks/{filename}/content")
-async def get_playbook_content(
- filename: str,
- api_key_valid: bool = Depends(verify_api_key)
-):
- """Récupère le contenu d'un playbook"""
- playbook_path = ansible_service.playbooks_dir / filename
-
- # Vérifier les extensions valides
- if not filename.endswith(('.yml', '.yaml')):
- raise HTTPException(status_code=400, detail="Extension de fichier invalide. Utilisez .yml ou .yaml")
-
- if not playbook_path.exists():
- raise HTTPException(status_code=404, detail=f"Playbook non trouvé: {filename}")
-
- # Vérifier que le fichier est bien dans le répertoire playbooks (sécurité)
- try:
- playbook_path.resolve().relative_to(ansible_service.playbooks_dir.resolve())
- except ValueError:
- raise HTTPException(status_code=403, detail="Accès non autorisé")
-
- try:
- content = playbook_path.read_text(encoding='utf-8')
- stat = playbook_path.stat()
- return {
- "filename": filename,
- "content": content,
- "size": stat.st_size,
- "modified": datetime.fromtimestamp(stat.st_mtime, tz=timezone.utc).isoformat()
- }
- except Exception as e:
- raise HTTPException(status_code=500, detail=f"Erreur lecture fichier: {str(e)}")
-
-
-@app.put("/api/playbooks/{filename}/content")
-async def save_playbook_content(
- filename: str,
- request: PlaybookContentRequest,
- api_key_valid: bool = Depends(verify_api_key)
-):
- """Sauvegarde le contenu d'un playbook (création ou modification)"""
- # Vérifier les extensions valides
- if not filename.endswith(('.yml', '.yaml')):
- raise HTTPException(status_code=400, detail="Extension de fichier invalide. Utilisez .yml ou .yaml")
-
- # Valider le nom de fichier (sécurité)
- import re
- if not re.match(r'^[a-zA-Z0-9_-]+\.(yml|yaml)$', filename):
- raise HTTPException(status_code=400, detail="Nom de fichier invalide")
-
- playbook_path = ansible_service.playbooks_dir / filename
-
- # S'assurer que le répertoire existe
- ansible_service.playbooks_dir.mkdir(parents=True, exist_ok=True)
-
- # Valider le contenu YAML
- try:
- parsed = yaml.safe_load(request.content)
- if parsed is None:
- raise HTTPException(status_code=400, detail="Contenu YAML vide ou invalide")
- except yaml.YAMLError as e:
- raise HTTPException(status_code=400, detail=f"Erreur de syntaxe YAML: {str(e)}")
-
- is_new = not playbook_path.exists()
-
- try:
- playbook_path.write_text(request.content, encoding='utf-8')
- stat = playbook_path.stat()
-
- # Log l'action
- action = "créé" if is_new else "modifié"
- log_entry = LogEntry(
- id=db.get_next_id("logs"),
- timestamp=datetime.now(timezone.utc),
- level="INFO",
- message=f"Playbook {filename} {action}",
- source="playbook_editor"
- )
- db.logs.insert(0, log_entry)
-
- return {
- "success": True,
- "message": f"Playbook {filename} {'créé' if is_new else 'sauvegardé'} avec succès",
- "filename": filename,
- "size": stat.st_size,
- "modified": datetime.fromtimestamp(stat.st_mtime, tz=timezone.utc).isoformat(),
- "is_new": is_new
- }
- except Exception as e:
- raise HTTPException(status_code=500, detail=f"Erreur sauvegarde fichier: {str(e)}")
-
-
-@app.delete("/api/playbooks/{filename}")
-async def delete_playbook(
- filename: str,
- api_key_valid: bool = Depends(verify_api_key)
-):
- """Supprime un playbook"""
- # Vérifier les extensions valides
- if not filename.endswith(('.yml', '.yaml')):
- raise HTTPException(status_code=400, detail="Extension de fichier invalide")
-
- playbook_path = ansible_service.playbooks_dir / filename
-
- if not playbook_path.exists():
- raise HTTPException(status_code=404, detail=f"Playbook non trouvé: {filename}")
-
- # Vérifier que le fichier est bien dans le répertoire playbooks (sécurité)
- try:
- playbook_path.resolve().relative_to(ansible_service.playbooks_dir.resolve())
- except ValueError:
- raise HTTPException(status_code=403, detail="Accès non autorisé")
-
- try:
- playbook_path.unlink()
-
- # Log l'action
- log_entry = LogEntry(
- id=db.get_next_id("logs"),
- timestamp=datetime.now(timezone.utc),
- level="WARN",
- message=f"Playbook {filename} supprimé",
- source="playbook_editor"
- )
- db.logs.insert(0, log_entry)
-
- return {
- "success": True,
- "message": f"Playbook {filename} supprimé avec succès"
- }
- except Exception as e:
- raise HTTPException(status_code=500, detail=f"Erreur suppression fichier: {str(e)}")
-
-
-@app.get("/api/ansible/ssh-config")
-async def get_ssh_config(api_key_valid: bool = Depends(verify_api_key)):
- """Diagnostic de la configuration SSH pour le bootstrap"""
- ssh_key_path = Path(SSH_KEY_PATH)
- ssh_dir = ssh_key_path.parent
-
- # Lister les fichiers dans le répertoire SSH
- available_files = []
- if ssh_dir.exists():
- available_files = [f.name for f in ssh_dir.iterdir()]
-
- # Vérifier les clés
- private_key_exists = ssh_key_path.exists()
- public_key_exists = Path(SSH_KEY_PATH + ".pub").exists()
-
- # Chercher d'autres clés publiques
- pub_keys_found = []
- for ext in [".pub"]:
- for key_type in ["id_rsa", "id_ed25519", "id_ecdsa", "id_dsa"]:
- key_path = ssh_dir / f"{key_type}{ext}"
- if key_path.exists():
- pub_keys_found.append(str(key_path))
-
- # Trouver la clé privée qui sera utilisée
- active_private_key = find_ssh_private_key()
-
- return {
- "ssh_key_path": SSH_KEY_PATH,
- "ssh_dir": str(ssh_dir),
- "ssh_dir_exists": ssh_dir.exists(),
- "private_key_exists": private_key_exists,
- "public_key_exists": public_key_exists,
- "available_files": available_files,
- "public_keys_found": pub_keys_found,
- "active_private_key": active_private_key,
- "ssh_user": SSH_USER,
- "sshpass_available": shutil.which("sshpass") is not None,
- }
-
-
-@app.post("/api/ansible/adhoc", response_model=AdHocCommandResult)
-async def execute_adhoc_command(
- request: AdHocCommandRequest,
- api_key_valid: bool = Depends(verify_api_key),
- db_session: AsyncSession = Depends(get_db)
-):
- """Exécute une commande ad-hoc Ansible sur un ou plusieurs hôtes.
-
- Exemples:
- - Lister les fichiers: {"target": "all", "command": "ls -la /tmp"}
- - Vérifier l'espace disque: {"target": "proxmox", "command": "df -h", "become": true}
- - Redémarrer un service: {"target": "web-servers", "command": "systemctl restart nginx", "become": true}
- """
- start_time_perf = perf_counter()
- start_time_dt = datetime.now(timezone.utc)
-
- # Créer une tâche en BD
- task_repo = TaskRepository(db_session)
- task_id = f"adhoc_{uuid.uuid4().hex[:12]}"
- task_name = f"Ad-hoc: {request.command[:40]}{'...' if len(request.command) > 40 else ''}"
-
- db_task = await task_repo.create(
- id=task_id,
- action=f"adhoc:{request.module}",
- target=request.target,
- playbook=None,
- status="running",
- )
- await task_repo.update(db_task, started_at=start_time_dt)
- await db_session.commit()
-
- # Créer aussi en mémoire pour la compatibilité
- task = Task(
- id=task_id,
- name=task_name,
- host=request.target,
- status="running",
- progress=0,
- start_time=start_time_dt
- )
- db.tasks.insert(0, task)
-
- # Construire la commande ansible
- ansible_cmd = [
- "ansible",
- request.target,
- "-i", str(ANSIBLE_DIR / "inventory" / "hosts.yml"),
- "-m", request.module,
- "-a", request.command,
- "--timeout", str(request.timeout),
- ]
-
- # Ajouter les options
- if request.become:
- ansible_cmd.append("--become")
-
- private_key = find_ssh_private_key()
- if private_key:
- ansible_cmd.extend(["--private-key", private_key])
-
- if SSH_USER:
- ansible_cmd.extend(["-u", SSH_USER])
-
- try:
- result = subprocess.run(
- ansible_cmd,
- capture_output=True,
- text=True,
- timeout=request.timeout + 10,
- cwd=str(ANSIBLE_DIR)
- )
-
- duration = perf_counter() - start_time_perf
- success = result.returncode == 0
-
- # Mettre à jour la tâche
- task.status = "completed" if success else "failed"
- task.progress = 100
- task.end_time = datetime.now(timezone.utc)
- task.duration = f"{round(duration, 2)}s"
- task.output = result.stdout
- task.error = result.stderr if result.stderr else None
-
- # Sauvegarder le log de tâche en markdown (commande ad-hoc)
- task_log_service.save_task_log(task, output=result.stdout, error=result.stderr or "", source_type='adhoc')
-
- # Log de l'exécution
- log_entry = LogEntry(
- id=db.get_next_id("logs"),
- timestamp=datetime.now(timezone.utc),
- level="INFO" if success else "WARN",
- message=f"Ad-hoc [{request.module}] sur {request.target}: {request.command[:50]}{'...' if len(request.command) > 50 else ''}",
- source="ansible-adhoc",
- host=request.target
- )
- db.logs.insert(0, log_entry)
-
- # Notifier via WebSocket
- await ws_manager.broadcast({
- "type": "adhoc_executed",
- "data": {
- "target": request.target,
- "command": request.command,
- "success": success,
- "task_id": task_id
- }
- })
-
- # Sauvegarder dans l'historique des commandes ad-hoc (pour réutilisation)
- await adhoc_history_service.add_command(
- command=request.command,
- target=request.target,
- module=request.module,
- become=request.become,
- category=request.category or "default"
- )
-
- # Mettre à jour la BD
- await task_repo.update(
- db_task,
- status=task.status,
- completed_at=task.end_time,
- error_message=task.error,
- result_data={"output": result.stdout[:5000] if result.stdout else None}
- )
- await db_session.commit()
-
- # Envoyer notification ntfy (non-bloquant)
- if success:
- asyncio.create_task(notification_service.notify_task_completed(
- task_name=task.name,
- target=request.target,
- duration=task.duration
- ))
- else:
- asyncio.create_task(notification_service.notify_task_failed(
- task_name=task.name,
- target=request.target,
- error=(result.stderr or "Erreur inconnue")[:200]
- ))
-
- return AdHocCommandResult(
- target=request.target,
- command=request.command,
- success=success,
- return_code=result.returncode,
- stdout=result.stdout,
- stderr=result.stderr if result.stderr else None,
- duration=round(duration, 2)
- )
-
- except subprocess.TimeoutExpired:
- duration = perf_counter() - start_time_perf
- # Mettre à jour la tâche en échec
- task.status = "failed"
- task.progress = 100
- task.end_time = datetime.now(timezone.utc)
- task.duration = f"{round(duration, 2)}s"
- task.error = f"Timeout après {request.timeout} secondes"
-
- # Sauvegarder le log de tâche (ad-hoc timeout)
- task_log_service.save_task_log(task, error=task.error, source_type='adhoc')
-
- # Mettre à jour la BD
- await task_repo.update(db_task, status="failed", completed_at=task.end_time, error_message=task.error)
- await db_session.commit()
-
- # Envoyer notification ntfy (non-bloquant)
- asyncio.create_task(notification_service.notify_task_failed(
- task_name=task.name,
- target=request.target,
- error=task.error[:200]
- ))
-
- return AdHocCommandResult(
- target=request.target,
- command=request.command,
- success=False,
- return_code=-1,
- stdout="",
- stderr=f"Timeout après {request.timeout} secondes",
- duration=round(duration, 2)
- )
- except FileNotFoundError:
- duration = perf_counter() - start_time_perf
- error_msg = "ansible non trouvé. Vérifiez que Ansible est installé et accessible."
- # Mettre à jour la tâche en échec
- task.status = "failed"
- task.progress = 100
- task.end_time = datetime.now(timezone.utc)
- task.duration = f"{round(duration, 2)}s"
- task.error = error_msg
-
- # Sauvegarder le log de tâche (ad-hoc file not found)
- task_log_service.save_task_log(task, error=error_msg, source_type='adhoc')
-
- # Mettre à jour la BD
- await task_repo.update(db_task, status="failed", completed_at=task.end_time, error_message=error_msg)
- await db_session.commit()
-
- # Envoyer notification ntfy (non-bloquant)
- asyncio.create_task(notification_service.notify_task_failed(
- task_name=task.name,
- target=request.target,
- error=error_msg[:200]
- ))
-
- return AdHocCommandResult(
- target=request.target,
- command=request.command,
- success=False,
- return_code=-1,
- stdout="",
- stderr=error_msg,
- duration=round(duration, 2)
- )
- except Exception as e:
- duration = perf_counter() - start_time_perf
- error_msg = f"Erreur interne: {str(e)}"
- # Mettre à jour la tâche en échec
- task.status = "failed"
- task.progress = 100
- task.end_time = datetime.now(timezone.utc)
- task.duration = f"{round(duration, 2)}s"
- task.error = error_msg
-
- # Sauvegarder le log de tâche (ad-hoc exception)
- task_log_service.save_task_log(task, error=error_msg, source_type='adhoc')
-
- # Mettre à jour la BD
- await task_repo.update(db_task, status="failed", completed_at=task.end_time, error_message=error_msg)
- await db_session.commit()
-
- # Envoyer notification ntfy (non-bloquant)
- asyncio.create_task(notification_service.notify_task_failed(
- task_name=task.name,
- target=request.target,
- error=error_msg[:200]
- ))
-
- # Return a proper result instead of raising HTTP 500
- return AdHocCommandResult(
- target=request.target,
- command=request.command,
- success=False,
- return_code=-1,
- stdout="",
- stderr=error_msg,
- duration=round(duration, 2)
- )
-
-
-@app.post("/api/ansible/bootstrap", response_model=CommandResult)
-async def bootstrap_ansible_host(
- request: BootstrapRequest,
- api_key_valid: bool = Depends(verify_api_key)
-):
- """Bootstrap un hôte pour Ansible.
-
- Cette opération:
- 1. Se connecte à l'hôte via SSH avec le mot de passe root
- 2. Crée l'utilisateur d'automatisation (par défaut: automation)
- 3. Configure la clé SSH publique pour l'authentification sans mot de passe
- 4. Installe et configure sudo pour cet utilisateur
- 5. Installe Python3 (requis par Ansible)
- 6. Vérifie la connexion SSH par clé
-
- Supporte: Debian/Ubuntu, Alpine Linux, FreeBSD
- """
- import logging
- import traceback
- logger = logging.getLogger("bootstrap_endpoint")
-
- try:
- logger.info(f"Bootstrap request for host={request.host}, user={request.automation_user}")
- result = bootstrap_host(
- host=request.host,
- root_password=request.root_password,
- automation_user=request.automation_user
- )
- logger.info(f"Bootstrap result: status={result.status}, return_code={result.return_code}")
-
- # Si le bootstrap a échoué (return_code != 0), lever une exception avec les détails
- if result.return_code != 0:
- raise HTTPException(
- status_code=500,
- detail={
- "status": result.status,
- "return_code": result.return_code,
- "stdout": result.stdout,
- "stderr": result.stderr
- }
- )
-
- # Trouver le nom de l'hôte (peut être IP ou hostname)
- host_name = request.host
- for h in db.hosts:
- if h.ip == request.host or h.name == request.host:
- host_name = h.name
- break
-
- # Enregistrer le statut de bootstrap réussi
- bootstrap_status_service.set_bootstrap_status(
- host_name=host_name,
- success=True,
- details=f"Bootstrap réussi via API (user: {request.automation_user})"
- )
-
- # Invalider le cache des hôtes pour recharger avec le nouveau statut
- db._hosts_cache = None
-
- # Ajouter un log de succès
- log_entry = LogEntry(
- id=db.get_next_id("logs"),
- timestamp=datetime.now(timezone.utc),
- level="INFO",
- message=f"Bootstrap réussi pour {host_name} (user: {request.automation_user})",
- source="bootstrap",
- host=host_name
- )
- db.logs.insert(0, log_entry)
-
- # Notifier via WebSocket
- await ws_manager.broadcast({
- "type": "bootstrap_success",
- "data": {
- "host": host_name,
- "user": request.automation_user,
- "status": "ok",
- "bootstrap_ok": True
- }
- })
-
- # Envoyer notification ntfy (non-bloquant)
- asyncio.create_task(notification_service.notify_bootstrap_success(host_name))
-
- return result
-
- except HTTPException as http_exc:
- # Envoyer notification d'échec ntfy
- error_detail = str(http_exc.detail) if http_exc.detail else "Erreur inconnue"
- asyncio.create_task(notification_service.notify_bootstrap_failed(
- hostname=request.host,
- error=error_detail[:200]
- ))
- raise
- except Exception as e:
- logger.error(f"Bootstrap exception: {e}")
- logger.error(traceback.format_exc())
- # Ajouter un log d'erreur
- log_entry = LogEntry(
- id=db.get_next_id("logs"),
- timestamp=datetime.now(timezone.utc),
- level="ERROR",
- message=f"Échec bootstrap pour {request.host}: {str(e)}",
- source="bootstrap",
- host=request.host
- )
- db.logs.insert(0, log_entry)
-
- # Envoyer notification d'échec ntfy
- asyncio.create_task(notification_service.notify_bootstrap_failed(
- hostname=request.host,
- error=str(e)[:200]
- ))
-
- raise HTTPException(status_code=500, detail=str(e))
-
-
-@app.get("/api/health")
-async def global_health_check():
- """Endpoint de healthcheck global utilisé par Docker.
-
- Ne nécessite pas de clé API pour permettre aux orchestrateurs
- de vérifier l'état du service facilement.
- """
- return {
- "status": "ok",
- "service": "homelab-automation-api",
- "timestamp": datetime.now(timezone.utc).isoformat()
- }
-
-
-# ===== ENDPOINTS BOOTSTRAP STATUS =====
-
-@app.get("/api/bootstrap/status")
-async def get_all_bootstrap_status(api_key_valid: bool = Depends(verify_api_key)):
- """Récupère le statut de bootstrap de tous les hôtes"""
- return {
- "hosts": bootstrap_status_service.get_all_status()
- }
-
-
-@app.get("/api/bootstrap/status/{host_name}")
-async def get_host_bootstrap_status(
- host_name: str,
- api_key_valid: bool = Depends(verify_api_key)
-):
- """Récupère le statut de bootstrap d'un hôte spécifique"""
- status = bootstrap_status_service.get_bootstrap_status(host_name)
- return {
- "host": host_name,
- **status
- }
-
-
-@app.post("/api/bootstrap/status/{host_name}")
-async def set_host_bootstrap_status(
- host_name: str,
- success: bool = True,
- details: Optional[str] = None,
- api_key_valid: bool = Depends(verify_api_key)
-):
- """Définit manuellement le statut de bootstrap d'un hôte"""
- result = bootstrap_status_service.set_bootstrap_status(
- host_name=host_name,
- success=success,
- details=details or f"Status défini manuellement"
- )
-
- # Invalider le cache des hôtes
- db._hosts_cache = None
-
- # Notifier via WebSocket
- await ws_manager.broadcast({
- "type": "bootstrap_status_updated",
- "data": {
- "host": host_name,
- "bootstrap_ok": success
- }
- })
-
- return {
- "host": host_name,
- "status": "updated",
- **result
- }
-
-
-# ===== ENDPOINTS HISTORIQUE AD-HOC =====
-
-@app.get("/api/adhoc/history")
-async def get_adhoc_history(
- category: Optional[str] = None,
- search: Optional[str] = None,
- limit: int = 50,
- api_key_valid: bool = Depends(verify_api_key)
-):
- """Récupère l'historique des commandes ad-hoc"""
- commands = await adhoc_history_service.get_commands(
- category=category,
- search=search,
- limit=limit,
- )
- return {
- "commands": [cmd.dict() for cmd in commands],
- "count": len(commands)
- }
-
-
-@app.get("/api/adhoc/categories")
-async def get_adhoc_categories(api_key_valid: bool = Depends(verify_api_key)):
- """Récupère la liste des catégories de commandes ad-hoc"""
- categories = await adhoc_history_service.get_categories()
- return {"categories": [cat.dict() for cat in categories]}
-
-
-@app.post("/api/adhoc/categories")
-async def create_adhoc_category(
- name: str,
- description: Optional[str] = None,
- color: str = "#7c3aed",
- icon: str = "fa-folder",
- api_key_valid: bool = Depends(verify_api_key)
-):
- """Crée une nouvelle catégorie de commandes ad-hoc"""
- category = await adhoc_history_service.add_category(name, description, color, icon)
- return {"category": category.dict(), "message": "Catégorie créée"}
-
-
-@app.put("/api/adhoc/categories/{category_name}")
-async def update_adhoc_category(
- category_name: str,
- request: Request,
- api_key_valid: bool = Depends(verify_api_key)
-):
- """Met à jour une catégorie existante"""
- try:
- data = await request.json()
- new_name = data.get("name", category_name)
- description = data.get("description", "")
- color = data.get("color", "#7c3aed")
- icon = data.get("icon", "fa-folder")
-
- success = await adhoc_history_service.update_category(category_name, new_name, description, color, icon)
- if not success:
- raise HTTPException(status_code=404, detail="Catégorie non trouvée")
- return {"message": "Catégorie mise à jour", "category": new_name}
- except Exception as e:
- raise HTTPException(status_code=400, detail=str(e))
-
-
-@app.delete("/api/adhoc/categories/{category_name}")
-async def delete_adhoc_category(
- category_name: str,
- api_key_valid: bool = Depends(verify_api_key)
-):
- """Supprime une catégorie et déplace ses commandes vers 'default'"""
- if category_name == "default":
- raise HTTPException(status_code=400, detail="La catégorie 'default' ne peut pas être supprimée")
-
- success = await adhoc_history_service.delete_category(category_name)
- if not success:
- raise HTTPException(status_code=404, detail="Catégorie non trouvée")
- return {"message": "Catégorie supprimée", "category": category_name}
-
-
-@app.put("/api/adhoc/history/{command_id}/category")
-async def update_adhoc_command_category(
- command_id: str,
- category: str,
- description: Optional[str] = None,
- api_key_valid: bool = Depends(verify_api_key)
-):
- """Met à jour la catégorie d'une commande dans l'historique"""
- success = await adhoc_history_service.update_command_category(command_id, category, description)
- if not success:
- raise HTTPException(status_code=404, detail="Commande non trouvée")
- return {"message": "Catégorie mise à jour", "command_id": command_id, "category": category}
-
-
-@app.delete("/api/adhoc/history/{command_id}")
-async def delete_adhoc_command(command_id: str, api_key_valid: bool = Depends(verify_api_key)):
- """Supprime une commande de l'historique"""
- success = await adhoc_history_service.delete_command(command_id)
- if not success:
- raise HTTPException(status_code=404, detail="Commande non trouvée")
- return {"message": "Commande supprimée", "command_id": command_id}
-
-
-@app.get("/api/health/{host_name}", response_model=HealthCheck)
-async def check_host_health(host_name: str, api_key_valid: bool = Depends(verify_api_key)):
- """Effectue un health check sur un hôte spécifique et met à jour son last_seen"""
- host = next((h for h in db.hosts if h.name == host_name), None)
- if not host:
- raise HTTPException(status_code=404, detail="Hôte non trouvé")
-
- # Simuler un health check à partir du statut actuel
- health_check = HealthCheck(
- host=host_name,
- ssh_ok=host.status == "online",
- ansible_ok=host.status == "online",
- sudo_ok=host.status == "online",
- reachable=host.status != "offline",
- response_time=0.123 if host.status == "online" else None,
- error_message=None if host.status != "offline" else "Hôte injoignable"
- )
-
- # Mettre à jour le statut runtime + persistant
- new_status = "online" if health_check.reachable else "offline"
- db.update_host_status(host_name, new_status, host.os)
-
- # Ajouter un log pour le health check
- log_entry = LogEntry(
- timestamp=datetime.now(timezone.utc),
- level="INFO" if health_check.reachable else "ERROR",
- message=f"Health check {'réussi' if health_check.reachable else 'échoué'} pour {host_name}",
- source="health_check",
- host=host_name
- )
-
- db.logs.insert(0, log_entry)
-
- # Notifier les clients WebSocket
- await ws_manager.broadcast({
- "type": "health_check",
- "data": health_check.dict()
- })
-
- return health_check
-
-# WebSocket pour les mises à jour en temps réel
-@app.websocket("/ws")
-async def websocket_endpoint(websocket: WebSocket):
- await ws_manager.connect(websocket)
- try:
- while True:
- # Garder la connexion ouverte
- data = await websocket.receive_text()
- # Traiter les messages entrants si nécessaire
- except WebSocketDisconnect:
- ws_manager.disconnect(websocket)
-
-# Fonctions utilitaires
-async def simulate_task_execution(task_id: str):
- """Simule l'exécution d'une tâche en arrière-plan"""
- task = next((t for t in db.tasks if str(t.id) == str(task_id)), None)
- if not task:
- return
-
- try:
- # Simuler la progression
- for progress in range(0, 101, 10):
- task.progress = progress
-
- # Notifier les clients WebSocket
- await ws_manager.broadcast({
- "type": "task_progress",
- "data": {
- "id": task_id,
- "progress": progress
- }
- })
-
- await asyncio.sleep(0.5) # Attendre 500ms entre chaque mise à jour
-
- # Marquer la tâche comme terminée
- task.status = "completed"
- task.end_time = datetime.now(timezone.utc)
- task.duration = "5s"
-
- # Ajouter un log
- log_entry = LogEntry(
- id=db.get_next_id("logs"),
- timestamp=datetime.now(timezone.utc),
- level="INFO",
- message=f"Tâche '{task.name}' terminée avec succès sur {task.host}",
- source="task_manager",
- host=task.host
- )
- db.logs.insert(0, log_entry)
-
- # Notifier les clients WebSocket
- await ws_manager.broadcast({
- "type": "task_completed",
- "data": {
- "id": task_id,
- "status": "completed",
- "progress": 100
- }
- })
-
- # Sauvegarder le log markdown
- try:
- task_log_service.save_task_log(task=task, output="Tâche simulée terminée avec succès")
- except Exception as log_error:
- print(f"Erreur sauvegarde log markdown: {log_error}")
-
- except asyncio.CancelledError:
- # Tâche annulée
- task.status = "cancelled"
- task.end_time = datetime.now(timezone.utc)
- task.error = "Tâche annulée par l'utilisateur"
-
- await ws_manager.broadcast({
- "type": "task_cancelled",
- "data": {
- "id": task_id,
- "status": "cancelled",
- "message": "Tâche annulée par l'utilisateur"
- }
- })
-
- finally:
- # Nettoyer le handle de la tâche
- if str(task_id) in running_task_handles:
- del running_task_handles[str(task_id)]
-
-
-async def execute_ansible_task(
- task_id: str,
- playbook: str,
- target: str,
- extra_vars: Optional[Dict[str, Any]] = None,
- check_mode: bool = False
-):
- """Exécute un playbook Ansible pour une tâche"""
- task = next((t for t in db.tasks if str(t.id) == str(task_id)), None)
- if not task:
- return
-
- # Notifier le début
- task.progress = 10
- await ws_manager.broadcast({
- "type": "task_progress",
- "data": {"id": task_id, "progress": 10, "message": "Démarrage du playbook Ansible..."}
- })
-
- start_time = perf_counter()
-
- try:
- # Exécuter le playbook
- result = await ansible_service.execute_playbook(
- playbook=playbook,
- target=target,
- extra_vars=extra_vars,
- check_mode=check_mode,
- verbose=True
- )
-
- execution_time = perf_counter() - start_time
-
- # Mettre à jour la tâche
- task.progress = 100
- task.status = "completed" if result["success"] else "failed"
- task.end_time = datetime.now(timezone.utc)
- task.duration = f"{execution_time:.1f}s"
- task.output = result.get("stdout", "")
- task.error = result.get("stderr", "") if not result["success"] else None
-
- # Si c'est un health-check ciblé, mettre à jour le statut/last_seen de l'hôte
- if "health-check" in playbook and target and target != "all":
- try:
- new_status = "online" if result["success"] else "offline"
- db.update_host_status(target, new_status)
- except Exception:
- # Ne pas interrompre la gestion de la tâche si la MAJ de statut échoue
- pass
-
- # Ajouter un log
- log_entry = LogEntry(
- id=db.get_next_id("logs"),
- timestamp=datetime.now(timezone.utc),
- level="INFO" if result["success"] else "ERROR",
- message=f"Tâche '{task.name}' {'terminée avec succès' if result['success'] else 'échouée'} sur {target}",
- source="ansible",
- host=target
- )
- db.logs.insert(0, log_entry)
-
- # Notifier les clients WebSocket
- await ws_manager.broadcast({
- "type": "task_completed",
- "data": {
- "id": task_id,
- "status": task.status,
- "progress": 100,
- "duration": task.duration,
- "success": result["success"],
- "output": result.get("stdout", "")[:500] # Limiter la taille
- }
- })
-
- # Envoyer notification ntfy (non-bloquant)
- if result["success"]:
- asyncio.create_task(notification_service.notify_task_completed(
- task_name=task.name,
- target=target,
- duration=task.duration
- ))
- else:
- asyncio.create_task(notification_service.notify_task_failed(
- task_name=task.name,
- target=target,
- error=result.get("stderr", "Erreur inconnue")[:200]
- ))
-
- # Sauvegarder le log markdown
- try:
- log_path = task_log_service.save_task_log(
- task=task,
- output=result.get("stdout", ""),
- error=result.get("stderr", "")
- )
- log_entry = LogEntry(
- id=db.get_next_id("logs"),
- timestamp=datetime.now(timezone.utc),
- level="DEBUG",
- message=f"Log de tâche sauvegardé: {log_path}",
- source="task_log",
- host=target
- )
- db.logs.insert(0, log_entry)
- except Exception as log_error:
- print(f"Erreur sauvegarde log markdown: {log_error}")
-
- except Exception as e:
- task.status = "failed"
- task.end_time = datetime.now(timezone.utc)
- task.error = str(e)
-
- log_entry = LogEntry(
- id=db.get_next_id("logs"),
- timestamp=datetime.now(timezone.utc),
- level="ERROR",
- message=f"Erreur lors de l'exécution de '{task.name}': {str(e)}",
- source="ansible",
- host=target
- )
- db.logs.insert(0, log_entry)
-
- # Sauvegarder le log markdown même en cas d'échec
- try:
- task_log_service.save_task_log(task=task, error=str(e))
- except Exception:
- pass
-
- await ws_manager.broadcast({
- "type": "task_failed",
- "data": {
- "id": task_id,
- "status": "failed",
- "error": str(e)
- }
- })
-
- except asyncio.CancelledError:
- # Tâche annulée par l'utilisateur
- task.status = "cancelled"
- task.end_time = datetime.now(timezone.utc)
- task.error = "Tâche annulée par l'utilisateur"
-
- log_entry = LogEntry(
- id=db.get_next_id("logs"),
- timestamp=datetime.now(timezone.utc),
- level="WARNING",
- message=f"Tâche '{task.name}' annulée par l'utilisateur",
- source="ansible",
- host=target
- )
- db.logs.insert(0, log_entry)
-
- await ws_manager.broadcast({
- "type": "task_cancelled",
- "data": {
- "id": task_id,
- "status": "cancelled",
- "message": "Tâche annulée par l'utilisateur"
- }
- })
-
- finally:
- # Nettoyer le handle de la tâche
- if str(task_id) in running_task_handles:
- del running_task_handles[str(task_id)]
-
- # Mettre à jour la BD avec le statut final
- try:
- async with async_session_maker() as session:
- from app.crud.task import TaskRepository
- repo = TaskRepository(session)
- db_task = await repo.get(task_id)
- if db_task:
- await repo.update(
- db_task,
- status=task.status if task else "failed",
- completed_at=datetime.now(timezone.utc),
- error_message=task.error if task else None,
- result_data={"output": task.output[:5000] if task and task.output else None}
- )
- await session.commit()
- except Exception as db_error:
- print(f"Erreur mise à jour BD pour tâche {task_id}: {db_error}")
-
-
-# ===== ENDPOINTS PLANIFICATEUR (SCHEDULER) =====
-
-@app.get("/api/schedules")
-async def get_schedules(
- enabled: Optional[bool] = None,
- playbook: Optional[str] = None,
- tag: Optional[str] = None,
- limit: int = 100,
- offset: int = 0,
- api_key_valid: bool = Depends(verify_api_key),
-):
- """Liste tous les schedules avec filtrage optionnel (via SchedulerService)."""
- # Utiliser le SchedulerService comme source de vérité pour next_run_at / last_run_at
- schedules = scheduler_service.get_all_schedules(
- enabled=enabled,
- playbook=playbook,
- tag=tag,
- )
-
- # Pagination simple côté API (les schedules sont déjà triés par next_run_at)
- paginated = schedules[offset : offset + limit]
-
- results = []
- for s in paginated:
- rec = s.recurrence
- results.append(
- {
- "id": s.id,
- "name": s.name,
- "playbook": s.playbook,
- "target": s.target,
- "schedule_type": s.schedule_type,
- "recurrence": rec.model_dump() if rec else None,
- "enabled": s.enabled,
- "notification_type": getattr(s, 'notification_type', 'all'),
- "tags": s.tags,
- # Champs utilisés par le frontend pour "Prochaine" et historique
- "next_run_at": s.next_run_at,
- "last_run_at": s.last_run_at,
- "last_status": s.last_status,
- "run_count": s.run_count,
- "success_count": s.success_count,
- "failure_count": s.failure_count,
- "created_at": s.created_at,
- "updated_at": s.updated_at,
- }
- )
-
- return {"schedules": results, "count": len(schedules)}
-
-
-@app.post("/api/schedules")
-async def create_schedule(
- request: ScheduleCreateRequest,
- api_key_valid: bool = Depends(verify_api_key),
- db_session: AsyncSession = Depends(get_db),
-):
- """Crée un nouveau schedule (stocké en DB) avec validation de compatibilité playbook-target"""
- # Vérifier que le playbook existe
- playbooks = ansible_service.get_playbooks()
- playbook_names = [p['filename'] for p in playbooks] + [p['name'] for p in playbooks]
-
- playbook_file = request.playbook
- if not playbook_file.endswith(('.yml', '.yaml')):
- playbook_file = f"{playbook_file}.yml"
-
- if playbook_file not in playbook_names and request.playbook not in playbook_names:
- raise HTTPException(status_code=400, detail=f"Playbook '{request.playbook}' non trouvé")
-
- # Récupérer les infos du playbook pour validation
- playbook_info = next((pb for pb in playbooks if pb['filename'] == playbook_file or pb['name'] == request.playbook), None)
-
- # Vérifier la cible
- if request.target_type == "group":
- groups = ansible_service.get_groups()
- if request.target not in groups and request.target != "all":
- raise HTTPException(status_code=400, detail=f"Groupe '{request.target}' non trouvé")
- else:
- if not ansible_service.host_exists(request.target):
- raise HTTPException(status_code=400, detail=f"Hôte '{request.target}' non trouvé")
-
- # Valider la compatibilité playbook-target
- if playbook_info:
- playbook_hosts = playbook_info.get('hosts', 'all')
- if not ansible_service.is_target_compatible_with_playbook(request.target, playbook_hosts):
- raise HTTPException(
- status_code=400,
- detail=f"Le playbook '{request.playbook}' (hosts: {playbook_hosts}) n'est pas compatible avec la cible '{request.target}'. "
- f"Ce playbook ne peut être exécuté que sur: {playbook_hosts}"
- )
-
- # Valider la récurrence
- if request.schedule_type == "recurring" and not request.recurrence:
- raise HTTPException(status_code=400, detail="La récurrence est requise pour un schedule récurrent")
-
- if request.recurrence and request.recurrence.type == "custom":
- if not request.recurrence.cron_expression:
- raise HTTPException(status_code=400, detail="Expression cron requise pour le type 'custom'")
- validation = scheduler_service.validate_cron_expression(request.recurrence.cron_expression)
- if not validation["valid"]:
- raise HTTPException(status_code=400, detail=f"Expression cron invalide: {validation.get('error')}")
-
- # Créer en DB
- repo = ScheduleRepository(db_session)
- schedule_id = f"sched_{uuid.uuid4().hex[:12]}"
-
- recurrence = request.recurrence
- schedule_obj = await repo.create(
- id=schedule_id,
- name=request.name,
- description=request.description,
- playbook=playbook_file,
- target_type=request.target_type,
- target=request.target,
- extra_vars=request.extra_vars,
- schedule_type=request.schedule_type,
- schedule_time=request.start_at,
- recurrence_type=recurrence.type if recurrence else None,
- recurrence_time=recurrence.time if recurrence else None,
- recurrence_days=json.dumps(recurrence.days) if recurrence and recurrence.days else None,
- cron_expression=recurrence.cron_expression if recurrence else None,
- timezone=request.timezone,
- start_at=request.start_at,
- end_at=request.end_at,
- enabled=request.enabled,
- retry_on_failure=request.retry_on_failure,
- timeout=request.timeout,
- notification_type=request.notification_type,
- tags=json.dumps(request.tags) if request.tags else None,
- )
- await db_session.commit()
-
- # Créer le schedule Pydantic et l'ajouter au cache du scheduler
- pydantic_schedule = Schedule(
- id=schedule_id,
- name=request.name,
- description=request.description,
- playbook=playbook_file,
- target_type=request.target_type,
- target=request.target,
- extra_vars=request.extra_vars,
- schedule_type=request.schedule_type,
- recurrence=request.recurrence,
- timezone=request.timezone,
- start_at=request.start_at,
- end_at=request.end_at,
- enabled=request.enabled,
- retry_on_failure=request.retry_on_failure,
- timeout=request.timeout,
- notification_type=request.notification_type,
- tags=request.tags or [],
- )
- scheduler_service.add_schedule_to_cache(pydantic_schedule)
-
- # Log en DB
- log_repo = LogRepository(db_session)
- await log_repo.create(
- level="INFO",
- message=f"Schedule '{request.name}' créé pour {playbook_file} sur {request.target}",
- source="scheduler",
- )
- await db_session.commit()
-
- # Notifier via WebSocket
- await ws_manager.broadcast({
- "type": "schedule_created",
- "data": {
- "id": schedule_obj.id,
- "name": schedule_obj.name,
- "playbook": schedule_obj.playbook,
- "target": schedule_obj.target,
- }
- })
-
- return {
- "success": True,
- "message": f"Schedule '{request.name}' créé avec succès",
- "schedule": {
- "id": schedule_obj.id,
- "name": schedule_obj.name,
- "playbook": schedule_obj.playbook,
- "target": schedule_obj.target,
- "enabled": schedule_obj.enabled,
- }
- }
-
-
-@app.get("/api/schedules/stats")
-async def get_schedules_stats(api_key_valid: bool = Depends(verify_api_key)):
- """Récupère les statistiques globales des schedules"""
- stats = scheduler_service.get_stats()
- upcoming = scheduler_service.get_upcoming_executions(limit=5)
-
- return {
- "stats": stats.dict(),
- "upcoming": upcoming
- }
-
-
-@app.get("/api/schedules/upcoming")
-async def get_upcoming_schedules(
- limit: int = 10,
- api_key_valid: bool = Depends(verify_api_key)
-):
- """Récupère les prochaines exécutions planifiées"""
- upcoming = scheduler_service.get_upcoming_executions(limit=limit)
- return {
- "upcoming": upcoming,
- "count": len(upcoming)
- }
-
-
-@app.get("/api/schedules/validate-cron")
-async def validate_cron_expression(
- expression: str,
- api_key_valid: bool = Depends(verify_api_key)
-):
- """Valide une expression cron et retourne les 5 prochaines exécutions"""
- result = scheduler_service.validate_cron_expression(expression)
- return result
-
-
-@app.get("/api/schedules/{schedule_id}")
-async def get_schedule(
- schedule_id: str,
- api_key_valid: bool = Depends(verify_api_key),
- db_session: AsyncSession = Depends(get_db),
-):
- """Récupère les détails d'un schedule spécifique (depuis DB)"""
- repo = ScheduleRepository(db_session)
- schedule = await repo.get(schedule_id)
- if not schedule:
- raise HTTPException(status_code=404, detail=f"Schedule '{schedule_id}' non trouvé")
-
- return {
- "id": schedule.id,
- "name": schedule.name,
- "playbook": schedule.playbook,
- "target": schedule.target,
- "schedule_type": schedule.schedule_type,
- "recurrence_type": schedule.recurrence_type,
- "recurrence_time": schedule.recurrence_time,
- "recurrence_days": json.loads(schedule.recurrence_days) if schedule.recurrence_days else None,
- "cron_expression": schedule.cron_expression,
- "enabled": schedule.enabled,
- "notification_type": schedule.notification_type or "all",
- "tags": json.loads(schedule.tags) if schedule.tags else [],
- "next_run": schedule.next_run,
- "last_run": schedule.last_run,
- "created_at": schedule.created_at,
- "updated_at": schedule.updated_at,
- }
-
-
-@app.put("/api/schedules/{schedule_id}")
-async def update_schedule(
- schedule_id: str,
- request: ScheduleUpdateRequest,
- api_key_valid: bool = Depends(verify_api_key),
- db_session: AsyncSession = Depends(get_db),
-):
- """Met à jour un schedule existant (DB + scheduler_service)"""
- # Essayer d'abord via SchedulerService (source de vérité)
- sched = scheduler_service.get_schedule(schedule_id)
- repo = ScheduleRepository(db_session)
- schedule = await repo.get(schedule_id)
-
- if not sched and not schedule:
- raise HTTPException(status_code=404, detail=f"Schedule '{schedule_id}' non trouvé")
-
- schedule_name = sched.name if sched else schedule.name
-
- # Valider le playbook si modifié
- if request.playbook:
- playbooks = ansible_service.get_playbooks()
- playbook_names = [p['filename'] for p in playbooks] + [p['name'] for p in playbooks]
- playbook_file = request.playbook
- if not playbook_file.endswith(('.yml', '.yaml')):
- playbook_file = f"{playbook_file}.yml"
- if playbook_file not in playbook_names and request.playbook not in playbook_names:
- raise HTTPException(status_code=400, detail=f"Playbook '{request.playbook}' non trouvé")
-
- # Valider l'expression cron si modifiée
- if request.recurrence and request.recurrence.type == "custom":
- if request.recurrence.cron_expression:
- validation = scheduler_service.validate_cron_expression(request.recurrence.cron_expression)
- if not validation["valid"]:
- raise HTTPException(status_code=400, detail=f"Expression cron invalide: {validation.get('error')}")
-
- # Mettre à jour en DB
- update_fields = {}
- if request.name:
- update_fields["name"] = request.name
- if request.description:
- update_fields["description"] = request.description
- if request.playbook:
- update_fields["playbook"] = request.playbook
- if request.target:
- update_fields["target"] = request.target
- if request.schedule_type:
- update_fields["schedule_type"] = request.schedule_type
- if request.timezone:
- update_fields["timezone"] = request.timezone
- if request.enabled is not None:
- update_fields["enabled"] = request.enabled
- if request.retry_on_failure is not None:
- update_fields["retry_on_failure"] = request.retry_on_failure
- if request.timeout is not None:
- update_fields["timeout"] = request.timeout
- if request.notification_type:
- update_fields["notification_type"] = request.notification_type
- if request.tags:
- update_fields["tags"] = json.dumps(request.tags)
- if request.recurrence:
- update_fields["recurrence_type"] = request.recurrence.type
- update_fields["recurrence_time"] = request.recurrence.time
- update_fields["recurrence_days"] = json.dumps(request.recurrence.days) if request.recurrence.days else None
- update_fields["cron_expression"] = request.recurrence.cron_expression
-
- # Mettre à jour en DB si présent
- if schedule:
- await repo.update(schedule, **update_fields)
- await db_session.commit()
-
- # Aussi mettre à jour dans scheduler_service pour APScheduler
- scheduler_service.update_schedule(schedule_id, request)
-
- # Log en DB
- log_repo = LogRepository(db_session)
- await log_repo.create(
- level="INFO",
- message=f"Schedule '{schedule_name}' mis à jour",
- source="scheduler",
- )
- await db_session.commit()
-
- # Notifier via WebSocket
- await ws_manager.broadcast({
- "type": "schedule_updated",
- "data": {"id": schedule_id, "name": schedule_name}
- })
-
- return {
- "success": True,
- "message": f"Schedule '{schedule_name}' mis à jour",
- "schedule": {"id": schedule_id, "name": schedule_name}
- }
-
-
-@app.delete("/api/schedules/{schedule_id}")
-async def delete_schedule(
- schedule_id: str,
- api_key_valid: bool = Depends(verify_api_key),
- db_session: AsyncSession = Depends(get_db),
-):
- """Supprime un schedule (soft delete en DB + suppression scheduler_service)"""
- repo = ScheduleRepository(db_session)
- schedule = await repo.get(schedule_id)
- if not schedule:
- # Aucun enregistrement en DB, mais on tente tout de même de le supprimer
- # du SchedulerService (cas des anciens IDs internes du scheduler).
- try:
- scheduler_service.delete_schedule(schedule_id)
- except Exception:
- pass
- return {
- "success": True,
- "message": f"Schedule '{schedule_id}' déjà supprimé ou inexistant en base, nettoyage scheduler effectué."
- }
-
- schedule_name = schedule.name
-
- # Soft delete en DB
- await repo.soft_delete(schedule_id)
- await db_session.commit()
-
- # Supprimer du scheduler_service
- scheduler_service.delete_schedule(schedule_id)
-
- # Log en DB
- log_repo = LogRepository(db_session)
- await log_repo.create(
- level="WARN",
- message=f"Schedule '{schedule_name}' supprimé",
- source="scheduler",
- )
- await db_session.commit()
-
- # Notifier via WebSocket
- await ws_manager.broadcast({
- "type": "schedule_deleted",
- "data": {"id": schedule_id, "name": schedule_name}
- })
-
- return {
- "success": True,
- "message": f"Schedule '{schedule_name}' supprimé"
- }
-
-
-@app.post("/api/schedules/{schedule_id}/run")
-async def run_schedule_now(
- schedule_id: str,
- api_key_valid: bool = Depends(verify_api_key),
- db_session: AsyncSession = Depends(get_db),
-):
- """Exécute immédiatement un schedule (exécution forcée)"""
- # Essayer d'abord via SchedulerService (source de vérité)
- sched = scheduler_service.get_schedule(schedule_id)
- if not sched:
- # Fallback sur la DB
- repo = ScheduleRepository(db_session)
- schedule = await repo.get(schedule_id)
- if not schedule:
- raise HTTPException(status_code=404, detail=f"Schedule '{schedule_id}' non trouvé")
- schedule_name = schedule.name
- else:
- schedule_name = sched.name
-
- # Lancer l'exécution via scheduler_service
- run = await scheduler_service.run_now(schedule_id)
-
- return {
- "success": True,
- "message": f"Schedule '{schedule_name}' lancé",
- "run": run.dict() if run else None
- }
-
-
-@app.post("/api/schedules/{schedule_id}/pause")
-async def pause_schedule(
- schedule_id: str,
- api_key_valid: bool = Depends(verify_api_key),
- db_session: AsyncSession = Depends(get_db),
-):
- """Met en pause un schedule"""
- # Essayer d'abord via SchedulerService (source de vérité)
- sched = scheduler_service.get_schedule(schedule_id)
- repo = ScheduleRepository(db_session)
- schedule = await repo.get(schedule_id)
-
- if not sched and not schedule:
- raise HTTPException(status_code=404, detail=f"Schedule '{schedule_id}' non trouvé")
-
- schedule_name = sched.name if sched else schedule.name
-
- # Mettre à jour en DB si présent
- if schedule:
- await repo.update(schedule, enabled=False)
- await db_session.commit()
-
- # Mettre à jour dans scheduler_service
- scheduler_service.pause_schedule(schedule_id)
-
- # Log en DB
- log_repo = LogRepository(db_session)
- await log_repo.create(
- level="INFO",
- message=f"Schedule '{schedule_name}' mis en pause",
- source="scheduler",
- )
- await db_session.commit()
-
- # Notifier via WebSocket
- await ws_manager.broadcast({
- "type": "schedule_updated",
- "data": {"id": schedule_id, "name": schedule_name, "enabled": False}
- })
-
- return {
- "success": True,
- "message": f"Schedule '{schedule_name}' mis en pause",
- "schedule": {"id": schedule_id, "name": schedule_name, "enabled": False}
- }
-
-
-@app.post("/api/schedules/{schedule_id}/resume")
-async def resume_schedule(
- schedule_id: str,
- api_key_valid: bool = Depends(verify_api_key),
- db_session: AsyncSession = Depends(get_db),
-):
- """Reprend un schedule en pause"""
- # Essayer d'abord via SchedulerService (source de vérité)
- sched = scheduler_service.get_schedule(schedule_id)
- repo = ScheduleRepository(db_session)
- schedule = await repo.get(schedule_id)
-
- if not sched and not schedule:
- raise HTTPException(status_code=404, detail=f"Schedule '{schedule_id}' non trouvé")
-
- schedule_name = sched.name if sched else schedule.name
-
- # Mettre à jour en DB si présent
- if schedule:
- await repo.update(schedule, enabled=True)
- await db_session.commit()
-
- # Mettre à jour dans scheduler_service
- scheduler_service.resume_schedule(schedule_id)
-
- # Log en DB
- log_repo = LogRepository(db_session)
- await log_repo.create(
- level="INFO",
- message=f"Schedule '{schedule_name}' repris",
- source="scheduler",
- )
- await db_session.commit()
-
- # Notifier via WebSocket
- await ws_manager.broadcast({
- "type": "schedule_updated",
- "data": {"id": schedule_id, "name": schedule_name, "enabled": True}
- })
-
- return {
- "success": True,
- "message": f"Schedule '{schedule_name}' repris",
- "schedule": {"id": schedule_id, "name": schedule_name, "enabled": True}
- }
-
-
-@app.get("/api/schedules/{schedule_id}/runs")
-async def get_schedule_runs(
- schedule_id: str,
- limit: int = 50,
- offset: int = 0,
- api_key_valid: bool = Depends(verify_api_key),
- db_session: AsyncSession = Depends(get_db),
-):
- """Récupère l'historique des exécutions d'un schedule (depuis la base de données)"""
- # Vérifier que le schedule existe soit dans le SchedulerService, soit en BD
- sched = scheduler_service.get_schedule(schedule_id)
- repo = ScheduleRepository(db_session)
- schedule = await repo.get(schedule_id)
-
- if not sched and not schedule:
- raise HTTPException(status_code=404, detail=f"Schedule '{schedule_id}' non trouvé")
-
- schedule_name = sched.name if sched else schedule.name
-
- # Récupérer les runs depuis la BD
- run_repo = ScheduleRunRepository(db_session)
- runs = await run_repo.list_for_schedule(schedule_id, limit=limit, offset=offset)
-
- return {
- "schedule_id": schedule_id,
- "schedule_name": schedule_name,
- "runs": [
- {
- "id": r.id,
- "status": r.status,
- "started_at": r.started_at,
- "finished_at": r.completed_at,
- "duration_seconds": r.duration,
- "error_message": r.error_message,
- }
- for r in runs
- ],
- "count": len(runs)
- }
-
-
-# ===== ENDPOINTS NOTIFICATIONS NTFY =====
-
-@app.get("/api/notifications/config")
-async def get_notification_config(api_key_valid: bool = Depends(verify_api_key)):
- """Récupère la configuration actuelle des notifications ntfy."""
- config = notification_service.config
- return {
- "enabled": config.enabled,
- "base_url": config.base_url,
- "default_topic": config.default_topic,
- "timeout": config.timeout,
- "has_auth": config.has_auth,
- }
-
-
-@app.post("/api/notifications/test")
-async def test_notification(
- topic: Optional[str] = None,
- message: str = "🧪 Test de notification depuis Homelab Automation API",
- api_key_valid: bool = Depends(verify_api_key)
-):
- """Envoie une notification de test pour vérifier la configuration ntfy."""
- success = await notification_service.send(
- topic=topic,
- message=message,
- title="🔔 Test Notification",
- priority=3,
- tags=["test_tube", "robot"]
- )
-
- return {
- "success": success,
- "topic": topic or notification_service.config.default_topic,
- "message": "Notification envoyée" if success else "Échec de l'envoi (voir logs serveur)"
- }
-
-
-@app.post("/api/notifications/send", response_model=NotificationResponse)
-async def send_custom_notification(
- request: NotificationRequest,
- api_key_valid: bool = Depends(verify_api_key)
-):
- """Envoie une notification personnalisée via ntfy."""
- return await notification_service.send_request(request)
-
-
-@app.post("/api/notifications/toggle")
-async def toggle_notifications(
- enabled: bool,
- api_key_valid: bool = Depends(verify_api_key)
-):
- """Active ou désactive les notifications ntfy."""
- from schemas.notification import NtfyConfig
-
- # Reconfigurer le service avec le nouveau statut
- current_config = notification_service.config
- new_config = NtfyConfig(
- base_url=current_config.base_url,
- default_topic=current_config.default_topic,
- enabled=enabled,
- timeout=current_config.timeout,
- username=current_config.username,
- password=current_config.password,
- token=current_config.token,
- )
- notification_service.reconfigure(new_config)
-
- return {
- "enabled": enabled,
- "message": f"Notifications {'activées' if enabled else 'désactivées'}"
- }
-
-
-# ===== ÉVÉNEMENTS STARTUP/SHUTDOWN =====
-
-@app.on_event("startup")
-async def startup_event():
- """Événement de démarrage de l'application"""
- print("🚀 Homelab Automation Dashboard démarré")
-
- # Initialiser la base de données (créer les tables si nécessaire)
- await init_db()
- print("📦 Base de données SQLite initialisée")
-
- # Charger les statuts bootstrap depuis la BD
- await bootstrap_status_service.load_from_db()
-
- # Démarrer le scheduler et charger les schedules depuis la BD
- await scheduler_service.start_async()
-
- # Afficher l'état du service de notification
- ntfy_status = "activé" if notification_service.enabled else "désactivé"
- print(f"🔔 Service de notification ntfy: {ntfy_status} ({notification_service.config.base_url})")
-
- # Log de démarrage en base
- async with async_session_maker() as session:
- repo = LogRepository(session)
- await repo.create(
- level="INFO",
- message="Application démarrée - Services initialisés (BD)",
- source="system",
- )
- await session.commit()
-
- # Notification ntfy au démarrage de l'application
- startup_notif = notification_service.templates.app_started()
- await notification_service.send(
- message=startup_notif.message,
- topic=startup_notif.topic,
- title=startup_notif.title,
- priority=startup_notif.priority,
- tags=startup_notif.tags,
- )
-
-
-@app.on_event("shutdown")
-async def shutdown_event():
- """Événement d'arrêt de l'application"""
- print("👋 Arrêt de l'application...")
-
- # Arrêter le scheduler
- scheduler_service.shutdown()
-
- # Notification ntfy à l'arrêt de l'application
- shutdown_notif = notification_service.templates.app_stopped()
- await notification_service.send(
- message=shutdown_notif.message,
- topic=shutdown_notif.topic,
- title=shutdown_notif.title,
- priority=shutdown_notif.priority,
- tags=shutdown_notif.tags,
- )
-
- # Fermer le client HTTP du service de notification
- await notification_service.close()
- print("✅ Services arrêtés proprement")
-
-
-# Démarrer l'application
-if __name__ == "__main__":
- uvicorn.run(
- "app_optimized:app",
- host="0.0.0.0",
- port=8008,
- reload=True,
- log_level="info"
- )
\ No newline at end of file
diff --git a/app/containers_page.js b/app/containers_page.js
index 0bd89a5..a67943d 100644
--- a/app/containers_page.js
+++ b/app/containers_page.js
@@ -13,12 +13,12 @@ const containersPage = {
inspectData: null,
_initialized: false,
_initPromise: null,
-
+
// View settings
viewMode: 'comfortable', // 'comfortable', 'compact', 'grouped'
currentPage: 1,
perPage: 50,
-
+
// Filter state
filters: {
search: '',
@@ -30,10 +30,10 @@ const containersPage = {
favoritesOnly: false,
// ========== INITIALIZATION ==========
-
+
async init() {
if (this._initPromise) return await this._initPromise;
-
+
this._initPromise = (async () => {
this.setupEventListeners();
this.setupKeyboardShortcuts();
@@ -43,7 +43,7 @@ const containersPage = {
await this.loadData();
this._initialized = true;
})();
-
+
return await this._initPromise;
},
@@ -150,7 +150,7 @@ const containersPage = {
document.addEventListener('keydown', (e) => {
// Only active when on containers page
if (currentPage !== 'docker-containers') return;
-
+
// Ignore if in input
if (e.target.tagName === 'INPUT' || e.target.tagName === 'SELECT' || e.target.tagName === 'TEXTAREA') {
// Escape clears search
@@ -184,7 +184,7 @@ const containersPage = {
async loadData() {
this.showLoading();
-
+
try {
const [containersRes, hostsRes] = await Promise.all([
this.fetchAPI('/api/docker/containers'),
@@ -193,16 +193,16 @@ const containersPage = {
this.containers = containersRes.containers || [];
this.hosts = hostsRes.hosts || [];
-
+
// Update host filter dropdown
this.populateHostFilter();
-
+
// Update stats
this.updateStats(containersRes);
-
+
// Apply filters and render
this.applyFilters();
-
+
} catch (error) {
console.error('Error loading containers:', error);
this.showError(error.message);
@@ -212,9 +212,9 @@ const containersPage = {
async refresh() {
const icon = document.getElementById('containers-refresh-icon');
if (icon) icon.classList.add('fa-spin');
-
+
await this.loadData();
-
+
if (icon) icon.classList.remove('fa-spin');
this.showToast('Données actualisées', 'success');
},
@@ -225,16 +225,16 @@ const containersPage = {
'Content-Type': 'application/json',
...options.headers
};
-
+
if (token) {
headers['Authorization'] = `Bearer ${token}`;
}
-
+
const response = await fetch(`${window.location.origin}${endpoint}`, {
...options,
headers
});
-
+
if (!response.ok) {
if (response.status === 401) {
this.showToast('Session expirée', 'error');
@@ -243,7 +243,7 @@ const containersPage = {
}
throw new Error(`API Error: ${response.status}`);
}
-
+
return response.json();
},
@@ -251,22 +251,22 @@ const containersPage = {
applyFilters() {
let result = [...this.containers];
-
+
// Text search with smart tokens
if (this.filters.search) {
result = this.smartSearch(result, this.filters.search);
}
-
+
// Status filter
if (this.filters.status) {
result = result.filter(c => c.state === this.filters.status);
}
-
+
// Host filter
if (this.filters.host) {
result = result.filter(c => c.host_id === this.filters.host);
}
-
+
// Health filter
if (this.filters.health) {
if (this.filters.health === 'none') {
@@ -280,10 +280,10 @@ const containersPage = {
if (this.favoritesOnly && window.favoritesManager) {
result = result.filter(c => window.favoritesManager.isFavorite(c.host_id, c.container_id));
}
-
+
// Sort
result = this.sortContainers(result, this.sortBy);
-
+
this.filteredContainers = result;
this.currentPage = 1;
this.updateActiveFilters();
@@ -292,7 +292,7 @@ const containersPage = {
smartSearch(containers, query) {
const tokens = this.parseSearchTokens(query);
-
+
return containers.filter(c => {
// Check token filters
for (const token of tokens.filters) {
@@ -319,7 +319,7 @@ const containersPage = {
break;
}
}
-
+
// Free text search
if (tokens.freeText) {
const searchStr = tokens.freeText.toLowerCase();
@@ -330,10 +330,10 @@ const containersPage = {
c.compose_project,
c.container_id?.substring(0, 12)
].filter(Boolean).join(' ').toLowerCase();
-
+
if (!searchable.includes(searchStr)) return false;
}
-
+
return true;
});
},
@@ -341,16 +341,16 @@ const containersPage = {
parseSearchTokens(query) {
const filters = [];
let freeText = query;
-
+
// Match tokens like "host:value" or "status:running"
const tokenRegex = /(\w+):(\S+)/g;
let match;
-
+
while ((match = tokenRegex.exec(query)) !== null) {
filters.push({ key: match[1], value: match[2] });
freeText = freeText.replace(match[0], '').trim();
}
-
+
return { filters, freeText };
},
@@ -363,10 +363,10 @@ const containersPage = {
sortContainers(containers, sortBy) {
const [field, direction] = sortBy.split('-');
const mult = direction === 'desc' ? -1 : 1;
-
+
return containers.sort((a, b) => {
let valA, valB;
-
+
switch (field) {
case 'name':
valA = a.name.toLowerCase();
@@ -389,7 +389,7 @@ const containersPage = {
valA = a.name.toLowerCase();
valB = b.name.toLowerCase();
}
-
+
if (valA < valB) return -1 * mult;
if (valA > valB) return 1 * mult;
return 0;
@@ -403,10 +403,10 @@ const containersPage = {
const empty = document.getElementById('containers-empty');
const error = document.getElementById('containers-error');
const pagination = document.getElementById('containers-pagination');
-
+
// Hide error
error?.classList.add('hidden');
-
+
// Check empty
if (this.filteredContainers.length === 0) {
list.innerHTML = '';
@@ -414,24 +414,24 @@ const containersPage = {
pagination?.classList.add('hidden');
return;
}
-
+
empty?.classList.add('hidden');
-
+
// Paginate
const start = (this.currentPage - 1) * this.perPage;
const end = Math.min(start + this.perPage, this.filteredContainers.length);
const pageContainers = this.filteredContainers.slice(start, end);
-
+
// Render based on view mode
if (this.viewMode === 'grouped') {
list.innerHTML = this.renderGroupedView(pageContainers);
} else {
list.innerHTML = pageContainers.map(c => this.renderContainerRow(c)).join('');
}
-
+
// Update pagination
this.updatePagination(start, end);
-
+
// Update search clear button visibility
const clearBtn = document.getElementById('containers-search-clear');
if (clearBtn) {
@@ -462,19 +462,19 @@ const containersPage = {
const iconHtml = iconKey
? ` `
: '';
-
+
const healthBadge = c.health && c.health !== 'none' ? `
${c.health}
` : '';
-
+
const projectBadge = c.compose_project ? `
${this.escapeHtml(c.compose_project)}
` : '';
-
+
const portLinks = this.renderPortLinks(c);
-
+
if (isCompact) {
return `
`;
}
-
+
return `
`
@@ -586,7 +586,7 @@ const containersPage = {
renderQuickActions(c) {
const isRunning = c.state === 'running';
-
+
return `
${!isRunning ? `
\d+\/tcp/g;
const links = [];
const seenPorts = new Set();
let match;
-
+
while ((match = portRegex.exec(portStr)) !== null) {
const bindIp = match[1] || '0.0.0.0';
const hostPort = match[2];
-
+
if (bindIp === '127.0.0.1' || bindIp === '::1') continue;
if (seenPorts.has(hostPort)) continue;
seenPorts.add(hostPort);
-
+
const protocol = ['443', '8443', '9443'].includes(hostPort) ? 'https' : 'http';
const url = `${protocol}://${c.host_ip}:${hostPort}`;
-
+
links.push(`
`);
}
-
+
return links.slice(0, 3).join(''); // Limit to 3 port links
},
@@ -675,17 +675,17 @@ const containersPage = {
const elem = document.getElementById(id);
if (elem) elem.textContent = val;
};
-
+
el('containers-total', data.total || 0);
el('containers-running', data.running || 0);
el('containers-stopped', data.stopped || 0);
el('containers-paused', data.paused || 0);
el('containers-hosts-count', data.hosts_count || 0);
-
+
// Update last update time
if (data.last_update) {
const date = new Date(data.last_update);
- document.getElementById('containers-last-update').textContent =
+ document.getElementById('containers-last-update').textContent =
`Mis à jour ${this.formatRelativeTime(date)}`;
}
},
@@ -693,12 +693,12 @@ const containersPage = {
updatePagination(start, end) {
const pagination = document.getElementById('containers-pagination');
if (!pagination) return;
-
+
if (this.filteredContainers.length <= this.perPage) {
pagination.classList.add('hidden');
return;
}
-
+
pagination.classList.remove('hidden');
document.getElementById('containers-showing-start').textContent = start + 1;
document.getElementById('containers-showing-end').textContent = end;
@@ -708,9 +708,9 @@ const containersPage = {
updateActiveFilters() {
const container = document.getElementById('containers-active-filters');
if (!container) return;
-
+
const activeFilters = [];
-
+
if (this.filters.status) {
activeFilters.push({ key: 'status', value: this.filters.status, label: `Status: ${this.filters.status}` });
}
@@ -721,12 +721,12 @@ const containersPage = {
if (this.filters.health) {
activeFilters.push({ key: 'health', value: this.filters.health, label: `Health: ${this.filters.health}` });
}
-
+
if (activeFilters.length === 0) {
container.classList.add('hidden');
return;
}
-
+
container.classList.remove('hidden');
container.innerHTML = activeFilters.map(f => `
@@ -745,10 +745,10 @@ const containersPage = {
populateHostFilter() {
const select = document.getElementById('containers-filter-host');
if (!select) return;
-
+
// Keep first option
select.innerHTML = 'Tous les hosts ';
-
+
this.hosts.forEach(host => {
const option = document.createElement('option');
option.value = host.host_id;
@@ -759,7 +759,7 @@ const containersPage = {
setViewMode(mode) {
this.viewMode = mode;
-
+
// Update button states
['comfortable', 'compact', 'grouped'].forEach(m => {
const btn = document.getElementById(`containers-view-${m}`);
@@ -769,7 +769,7 @@ const containersPage = {
btn.setAttribute('aria-pressed', m === mode ? 'true' : 'false');
}
});
-
+
this.render();
},
@@ -794,11 +794,11 @@ const containersPage = {
async containerAction(hostId, containerId, action) {
try {
this.showToast(`${action}...`, 'info');
-
+
const response = await this.fetchAPI(`/api/docker/containers/${hostId}/${containerId}/${action}`, {
method: 'POST'
});
-
+
if (response.success) {
this.showToast(`Container ${action} réussi`, 'success');
// Refresh after a short delay
@@ -816,12 +816,12 @@ const containersPage = {
this.showToast('Aucun container sélectionné', 'warning');
return;
}
-
+
const confirmMsg = `Êtes-vous sûr de vouloir ${action} ${this.selectedIds.size} container(s) ?`;
if (!confirm(confirmMsg)) return;
-
+
this.showToast(`${action} en cours...`, 'info');
-
+
const results = await Promise.allSettled(
Array.from(this.selectedIds).map(id => {
const [hostId, containerId] = id.split('::');
@@ -830,11 +830,11 @@ const containersPage = {
});
})
);
-
+
const successful = results.filter(r => r.status === 'fulfilled' && r.value.success).length;
- this.showToast(`${successful}/${this.selectedIds.size} container(s) ${action}`,
- successful === this.selectedIds.size ? 'success' : 'warning');
-
+ this.showToast(`${successful}/${this.selectedIds.size} container(s) ${action}`,
+ successful === this.selectedIds.size ? 'success' : 'warning');
+
this.selectedIds.clear();
this.updateBulkActionsBar();
setTimeout(() => this.refresh(), 1000);
@@ -865,7 +865,7 @@ const containersPage = {
const bar = document.getElementById('containers-bulk-actions');
const count = document.getElementById('containers-selected-count');
const selectAll = document.getElementById('containers-select-all');
-
+
if (bar) {
bar.classList.toggle('hidden', this.selectedIds.size === 0);
}
@@ -873,8 +873,8 @@ const containersPage = {
count.textContent = this.selectedIds.size;
}
if (selectAll) {
- selectAll.checked = this.selectedIds.size > 0 &&
- this.selectedIds.size === this.filteredContainers.length;
+ selectAll.checked = this.selectedIds.size > 0 &&
+ this.selectedIds.size === this.filteredContainers.length;
}
},
@@ -883,34 +883,34 @@ const containersPage = {
async openDrawer(hostId, containerId, tab = 'overview') {
const wantedHostId = String(hostId);
const wantedContainerId = String(containerId);
-
+
if (!this._initialized || !this.containers || this.containers.length === 0) {
await this.ensureInit();
}
-
+
this.currentContainer = this.containers.find(
c => String(c.host_id) === wantedHostId && String(c.container_id) === wantedContainerId
);
-
+
if (!this.currentContainer) {
this.showToast('Container non trouvé', 'error');
return;
}
-
+
const drawer = document.getElementById('container-drawer');
const backdrop = document.getElementById('container-drawer-backdrop');
-
+
// Populate drawer
this.populateDrawer();
-
+
// Show drawer
drawer.classList.remove('translate-x-full');
backdrop.classList.remove('hidden');
document.body.style.overflow = 'hidden';
-
+
// Switch to requested tab
this.switchDrawerTab(tab);
-
+
// Load tab-specific data
if (tab === 'logs') {
await this.loadLogs();
@@ -922,11 +922,11 @@ const containersPage = {
closeDrawer() {
const drawer = document.getElementById('container-drawer');
const backdrop = document.getElementById('container-drawer-backdrop');
-
+
drawer.classList.add('translate-x-full');
backdrop.classList.add('hidden');
document.body.style.overflow = '';
-
+
this.currentContainer = null;
this.inspectData = null;
},
@@ -939,7 +939,7 @@ const containersPage = {
populateDrawer() {
const c = this.currentContainer;
if (!c) return;
-
+
const stateColors = {
running: 'bg-green-500',
exited: 'bg-red-500',
@@ -948,10 +948,10 @@ const containersPage = {
restarting: 'bg-orange-500',
dead: 'bg-red-500'
};
-
+
// Header
document.getElementById('drawer-container-name').textContent = c.name;
- document.getElementById('drawer-container-state').className =
+ document.getElementById('drawer-container-state').className =
`w-3 h-3 rounded-full ${stateColors[c.state] || 'bg-gray-500'}`;
const custom = c.customization || window.containerCustomizationsManager?.get(c.host_id, c.container_id);
@@ -965,7 +965,7 @@ const containersPage = {
? ` `
: '';
}
-
+
// Overview
document.getElementById('drawer-host-name').textContent = c.host_name;
document.getElementById('drawer-host-ip').textContent = c.host_ip;
@@ -973,7 +973,7 @@ const containersPage = {
document.getElementById('drawer-health').textContent = c.health || 'No healthcheck';
document.getElementById('drawer-image').textContent = c.image || '—';
document.getElementById('drawer-container-id').textContent = c.container_id;
-
+
// Ports
const portsEl = document.getElementById('drawer-ports');
if (c.ports) {
@@ -981,7 +981,7 @@ const containersPage = {
} else {
portsEl.innerHTML = 'Aucun port exposé ';
}
-
+
// Labels
const labelsEl = document.getElementById('drawer-labels');
if (c.labels && Object.keys(c.labels).length > 0) {
@@ -995,7 +995,7 @@ const containersPage = {
} else {
labelsEl.innerHTML = 'Aucun label ';
}
-
+
// Update action buttons based on state
const isRunning = c.state === 'running';
document.getElementById('drawer-btn-start')?.classList.toggle('hidden', isRunning);
@@ -1008,12 +1008,12 @@ const containersPage = {
tab.classList.toggle('active', tab.dataset.tab === tabName);
tab.classList.toggle('text-gray-400', tab.dataset.tab !== tabName);
});
-
+
// Update tab content
document.querySelectorAll('.drawer-tab-content').forEach(content => {
content.classList.toggle('hidden', !content.id.endsWith(tabName));
});
-
+
// Load content if needed
if (tabName === 'logs' && this.currentContainer) {
this.loadLogs();
@@ -1024,18 +1024,18 @@ const containersPage = {
async loadLogs() {
if (!this.currentContainer) return;
-
+
const logsEl = document.getElementById('drawer-logs-content');
logsEl.textContent = 'Chargement...';
-
+
try {
const tail = document.getElementById('drawer-logs-tail')?.value || 200;
const timestamps = document.getElementById('drawer-logs-timestamps')?.checked || false;
-
+
const response = await this.fetchAPI(
`/api/docker/containers/${this.currentContainer.host_id}/${this.currentContainer.container_id}/logs?tail=${tail}×tamps=${timestamps}`
);
-
+
logsEl.textContent = response.logs || 'Aucun log disponible';
} catch (error) {
logsEl.textContent = `Erreur: ${error.message}`;
@@ -1044,15 +1044,15 @@ const containersPage = {
async loadInspect() {
if (!this.currentContainer) return;
-
+
const inspectEl = document.getElementById('drawer-inspect-content');
inspectEl.textContent = 'Chargement...';
-
+
try {
const response = await this.fetchAPI(
`/api/docker/containers/${this.currentContainer.host_id}/${this.currentContainer.container_id}/inspect`
);
-
+
this.inspectData = response.inspect_data || {};
inspectEl.textContent = JSON.stringify(this.inspectData, null, 2);
} catch (error) {
@@ -1065,7 +1065,7 @@ const containersPage = {
this.showToast('Aucune donnée à copier', 'warning');
return;
}
-
+
try {
await navigator.clipboard.writeText(JSON.stringify(this.inspectData, null, 2));
this.showToast('JSON copié !', 'success');
@@ -1105,8 +1105,8 @@ const containersPage = {
const now = new Date();
const d = new Date(date);
const diff = Math.floor((now - d) / 1000);
-
- if (diff < 60) return 'À l\'instant';
+
+ if (diff < 60) return 'À l instant';
if (diff < 3600) return `il y a ${Math.floor(diff / 60)} min`;
if (diff < 86400) return `il y a ${Math.floor(diff / 3600)} h`;
return `il y a ${Math.floor(diff / 86400)} j`;
@@ -1129,7 +1129,7 @@ document.addEventListener('DOMContentLoaded', () => {
// Listen for page navigation
const originalNavigateTo = window.navigateTo;
if (originalNavigateTo) {
- window.navigateTo = function(pageName) {
+ window.navigateTo = function (pageName) {
originalNavigateTo(pageName);
if (pageName === 'docker-containers') {
containersPage.init();
diff --git a/app/core/config.py b/app/core/config.py
index 86ba716..3a6bc42 100644
--- a/app/core/config.py
+++ b/app/core/config.py
@@ -60,14 +60,14 @@ class Settings(BaseSettings):
ssh_remote_user: str = Field(default_factory=lambda: os.environ.get("SSH_REMOTE_USER", "root"))
# === API ===
- api_key: str = Field(default_factory=lambda: os.environ.get("API_KEY", "dev-key-12345"))
+ api_key: str = Field(default_factory=lambda: os.environ.get("API_KEY", ""))
api_title: str = "Homelab Automation Dashboard API"
api_version: str = "1.0.0"
api_description: str = "API REST moderne pour la gestion automatique d'homelab"
# === JWT Authentication ===
jwt_secret_key: str = Field(
- default_factory=lambda: os.environ.get("JWT_SECRET_KEY", "dev-secret-key-change-in-production")
+ default_factory=lambda: os.environ.get("JWT_SECRET_KEY", "")
)
jwt_expire_minutes: int = Field(
default_factory=lambda: int(os.environ.get("JWT_EXPIRE_MINUTES", "1440"))
@@ -85,7 +85,13 @@ class Settings(BaseSettings):
return f"sqlite+aiosqlite:///{self.db_path}"
# === CORS ===
- cors_origins: list = Field(default=["*"])
+ cors_origins: list = Field(
+ default_factory=lambda: [
+ o.strip() for o in os.environ.get(
+ "CORS_ORIGINS", "http://localhost:3000,http://localhost:8008"
+ ).split(",") if o.strip()
+ ]
+ )
cors_allow_credentials: bool = True
cors_allow_methods: list = Field(default=["*"])
cors_allow_headers: list = Field(default=["*"])
diff --git a/app/core/dependencies.py b/app/core/dependencies.py
index 6387d9b..2cc4ad0 100644
--- a/app/core/dependencies.py
+++ b/app/core/dependencies.py
@@ -157,9 +157,8 @@ async def require_admin(
if user.get("type") == "api_key":
return user
- # Vérifier le rôle dans le payload JWT
- payload = user.get("payload", {})
- role = payload.get("role", "viewer")
+ # Vérifier le rôle directement dans le dict utilisateur (pas dans "payload")
+ role = user.get("role", "viewer")
if role != "admin":
raise HTTPException(
diff --git a/app/factory.py b/app/factory.py
index b78c51c..62824d7 100644
--- a/app/factory.py
+++ b/app/factory.py
@@ -5,11 +5,13 @@ Ce module contient la fonction create_app() qui configure et retourne
une instance FastAPI prête à l'emploi.
"""
-from fastapi import FastAPI
+from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
+from fastapi.responses import HTMLResponse, FileResponse
from fastapi.staticfiles import StaticFiles
-from fastapi.responses import HTMLResponse
-from fastapi.responses import FileResponse
+from slowapi import Limiter, _rate_limit_exceeded_handler
+from slowapi.errors import RateLimitExceeded
+from slowapi.util import get_remote_address
from app.core.config import settings
from app.models.database import init_db, async_session_maker
@@ -40,6 +42,11 @@ def create_app() -> FastAPI:
allow_headers=settings.cors_allow_headers,
)
+ # Rate limiting
+ limiter = Limiter(key_func=get_remote_address)
+ app.state.limiter = limiter
+ app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler)
+
# Monter les fichiers statiques (main.js, etc.)
app.mount("/static", StaticFiles(directory=settings.base_dir, html=False), name="static")
diff --git a/app/main.js b/app/main.js
index 7b213a8..6915c57 100644
--- a/app/main.js
+++ b/app/main.js
@@ -7,7 +7,7 @@ class DashboardManager {
this.accessToken = localStorage.getItem('accessToken') || null;
this.currentUser = null;
this.setupRequired = false;
-
+
// Données locales (seront remplies par l'API)
this.hosts = [];
this.tasks = [];
@@ -22,12 +22,12 @@ class DashboardManager {
// Alertes (centre de messages)
this.alerts = [];
this.alertsUnread = 0;
-
+
// Logs de tâches depuis les fichiers markdown
this.taskLogs = [];
this.taskLogsStats = { total: 0, completed: 0, failed: 0, running: 0, pending: 0 };
this.taskLogsDates = { years: {} };
-
+
// Filtres actifs
this.currentStatusFilter = 'all';
this.currentDateFilter = { year: '', month: '', day: '' };
@@ -47,22 +47,22 @@ class DashboardManager {
this.currentTargetFilter = 'all';
this.expandedHostDiskDetails = new Set();
-
+
// Pagination côté serveur
this.tasksTotalCount = 0;
this.tasksHasMore = false;
-
+
// Groupes pour la gestion des hôtes
this.envGroups = [];
this.roleGroups = [];
-
+
// Catégories de playbooks
this.playbookCategories = {};
-
+
// Filtres playbooks
this.currentPlaybookCategoryFilter = 'all';
this.currentPlaybookSearch = '';
-
+
// Historique des commandes ad-hoc
this.adhocHistory = [];
this.adhocCategories = [];
@@ -71,12 +71,12 @@ class DashboardManager {
this.adhocWidgetLogs = [];
this.adhocWidgetTotalCount = 0;
this.adhocWidgetHasMore = false;
-
+
// Métriques des hôtes (collectées par les builtin playbooks)
this.hostMetrics = {}; // Map host_id -> HostMetricsSummary
this.builtinPlaybooks = [];
this.metricsLoading = false;
-
+
// Schedules (Planificateur)
this.schedules = [];
this.schedulesStats = { total: 0, active: 0, paused: 0, failures_24h: 0 };
@@ -86,12 +86,12 @@ class DashboardManager {
this.scheduleCalendarMonth = new Date();
this.editingScheduleId = null;
this.scheduleModalStep = 1;
-
+
// WebSocket
this.ws = null;
this.debugModeEnabled = false;
-
+
// Terminal SSH
this.terminalSession = null;
this.terminalDrawerOpen = false;
@@ -102,7 +102,7 @@ class DashboardManager {
this.terminalHeartbeatInterval = null;
this.terminalHeartbeatIntervalMs = 15000; // 15 seconds
this.terminalOpeningPromise = null; // Prevent double-click
-
+
// Terminal History Enhanced State
this.terminalHistoryPanelOpen = false;
this.terminalHistorySelectedIndex = -1;
@@ -112,15 +112,15 @@ class DashboardManager {
// Configuration: collecte métriques
this.metricsCollectionInterval = 'off';
-
+
// Polling des tâches en cours
this.runningTasksPollingInterval = null;
this.pollingIntervalMs = 2000; // Polling toutes les 2 secondes
-
+
// Pagination des tâches
this.tasksDisplayedCount = 20;
this.tasksPerPage = 20;
-
+
}
renderContainerCustomizationColorPicker(initialColor) {
@@ -164,10 +164,10 @@ class DashboardManager {
${palette.map(c => {
- const label = c || 'Aucune';
- const style = c ? `background:${c}` : 'background:transparent';
- return ` `;
- }).join('')}
+ const label = c || 'Aucune';
+ const style = c ? `background:${c}` : 'background:transparent';
+ return ` `;
+ }).join('')}
`;
@@ -435,7 +435,7 @@ class DashboardManager {
renderFavoriteGroupIconPicker(initialIconKey) {
const iconKey = initialIconKey || '';
const color = document.getElementById('fav-group-color-text')?.value || '#7c3aed';
-
+
return `
@@ -463,7 +463,7 @@ class DashboardManager {
const input = document.getElementById('fav-group-icon-key');
const preview = document.getElementById('fav-group-icon-preview');
const color = document.getElementById('fav-group-color-text')?.value || '#7c3aed';
-
+
if (input) input.value = iconKey;
if (preview) {
preview.innerHTML = `
`;
@@ -473,29 +473,29 @@ class DashboardManager {
clearFavGroupIcon() {
const input = document.getElementById('fav-group-icon-key');
const preview = document.getElementById('fav-group-icon-preview');
-
+
if (input) input.value = '';
if (preview) {
preview.innerHTML = '
';
}
}
-
+
async init() {
this.setupEventListeners();
this.setupScrollAnimations();
this.startAnimations();
this.loadThemePreference();
this.setupTerminalCleanupHandlers();
-
+
// Check authentication status first
const authOk = await this.checkAuthStatus();
-
+
if (!authOk) {
// Show login screen
this.showLoginScreen();
return;
}
-
+
// Hide login screen if visible
this.hideLoginScreen();
@@ -520,7 +520,7 @@ class DashboardManager {
} catch (e) {
}
}
-
+
await this.loadAppConfig();
this.setDebugBadgeVisible(this.isDebugEnabled());
@@ -528,10 +528,10 @@ class DashboardManager {
await this.loadAllData();
this.renderFavoriteContainersWidget();
-
+
// Connecter WebSocket pour les mises à jour temps réel
this.connectWebSocket();
-
+
// Rafraîchir périodiquement les métriques
setInterval(() => this.loadMetrics(), 30000);
@@ -540,7 +540,7 @@ class DashboardManager {
window.favoritesManager.load().catch(() => null);
}, 30000);
}
-
+
// Démarrer le polling des tâches en cours
this.startRunningTasksPolling();
}
@@ -610,7 +610,7 @@ class DashboardManager {
desktopNav.appendChild(badge);
}
}
-
+
setActiveNav(pageName) {
if (typeof navigateTo === 'function') {
navigateTo(pageName);
@@ -624,33 +624,33 @@ class DashboardManager {
const target = document.getElementById(`page-${pageName}`);
if (target) target.classList.add('active');
}
-
+
// ===== AUTHENTICATION =====
-
+
async checkAuthStatus() {
try {
const response = await fetch(`${this.apiBase}/api/auth/status`, {
headers: this.getAuthHeaders()
});
-
+
if (!response.ok) {
return false;
}
-
+
const data = await response.json();
this.setupRequired = data.setup_required;
-
+
if (data.setup_required) {
this.showSetupScreen();
return false;
}
-
+
if (data.authenticated && data.user) {
this.currentUser = data.user;
this.updateUserDisplay();
return true;
}
-
+
return false;
} catch (error) {
console.error('Auth status check failed:', error);
@@ -680,20 +680,20 @@ class DashboardManager {
this.taskLogs = [log, ...current];
this.renderTasks();
}
-
+
getAuthHeaders() {
const headers = {
'Content-Type': 'application/json'
};
-
+
if (this.accessToken) {
headers['Authorization'] = `Bearer ${this.accessToken}`;
}
// No fallback - require JWT authentication
-
+
return headers;
}
-
+
async login(username, password) {
try {
const response = await fetch(`${this.apiBase}/api/auth/login/json`, {
@@ -701,19 +701,19 @@ class DashboardManager {
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ username, password })
});
-
+
if (!response.ok) {
const error = await response.json();
throw new Error(error.detail || 'Échec de connexion');
}
-
+
const data = await response.json();
this.accessToken = data.access_token;
localStorage.setItem('accessToken', data.access_token);
-
+
// Get user info
await this.checkAuthStatus();
-
+
// Re-initialize dashboard
this.hideLoginScreen();
await this.loadAppConfig();
@@ -721,7 +721,7 @@ class DashboardManager {
await this.loadAllData();
this.connectWebSocket();
this.startRunningTasksPolling();
-
+
this.showNotification('Connexion réussie', 'success');
return true;
} catch (error) {
@@ -730,7 +730,7 @@ class DashboardManager {
return false;
}
}
-
+
async setupAdmin(username, password, email = null, displayName = null) {
try {
const response = await fetch(`${this.apiBase}/api/auth/setup`, {
@@ -743,12 +743,12 @@ class DashboardManager {
display_name: displayName || null
})
});
-
+
if (!response.ok) {
const error = await response.json();
throw new Error(error.detail || 'Échec de configuration');
}
-
+
// Auto-login after setup
return await this.login(username, password);
} catch (error) {
@@ -757,30 +757,30 @@ class DashboardManager {
return false;
}
}
-
+
logout() {
this.accessToken = null;
this.currentUser = null;
localStorage.removeItem('accessToken');
-
+
// Stop polling
if (this.runningTasksPollingInterval) {
clearInterval(this.runningTasksPollingInterval);
}
-
+
// Close WebSocket
if (this.ws) {
this.ws.close();
}
-
+
this.showLoginScreen();
this.showNotification('Déconnexion réussie', 'success');
}
-
+
showLoginScreen() {
const loginScreen = document.getElementById('login-screen');
const mainContent = document.getElementById('main-content');
-
+
if (loginScreen) {
loginScreen.classList.remove('hidden');
if (this.setupRequired) {
@@ -795,16 +795,16 @@ class DashboardManager {
mainContent.classList.add('hidden');
}
}
-
+
showSetupScreen() {
this.setupRequired = true;
this.showLoginScreen();
}
-
+
hideLoginScreen() {
const loginScreen = document.getElementById('login-screen');
const mainContent = document.getElementById('main-content');
-
+
if (loginScreen) {
loginScreen.classList.add('hidden');
}
@@ -812,12 +812,12 @@ class DashboardManager {
mainContent.classList.remove('hidden');
}
}
-
+
updateUserDisplay() {
const userNameEl = document.getElementById('current-user-name');
const userMenuNameEl = document.getElementById('user-menu-name');
const userRoleEl = document.getElementById('current-user-role');
-
+
if (this.currentUser) {
const displayName = this.currentUser.display_name || this.currentUser.username;
if (userNameEl) {
@@ -836,15 +836,15 @@ class DashboardManager {
}
}
}
-
+
// ===== API CALLS =====
-
+
async apiCall(endpoint, options = {}) {
const url = `${this.apiBase}${endpoint}`;
const defaultOptions = {
headers: this.getAuthHeaders()
};
-
+
try {
const response = await fetch(url, { ...defaultOptions, ...options });
if (!response.ok) {
@@ -856,7 +856,7 @@ class DashboardManager {
err.status = 401;
throw err;
}
-
+
let errorDetail = null;
try {
const contentType = response.headers.get('content-type') || '';
@@ -872,8 +872,8 @@ class DashboardManager {
const serverMessage =
(errorDetail && (errorDetail.detail || errorDetail.message || errorDetail.error))
- ? (errorDetail.detail || errorDetail.message || errorDetail.error)
- : response.statusText;
+ ? (errorDetail.detail || errorDetail.message || errorDetail.error)
+ : response.statusText;
const err = new Error(`HTTP ${response.status}: ${serverMessage || 'Erreur inconnue'}`);
err.status = response.status;
@@ -886,7 +886,7 @@ class DashboardManager {
throw error;
}
}
-
+
async loadAllData() {
try {
// Charger en parallèle
@@ -910,7 +910,7 @@ class DashboardManager {
this.apiCall('/api/server/logs?limit=500&offset=0').catch(() => ({ logs: [] })),
this.apiCall('/api/alerts/unread-count').catch(() => ({ unread: 0 }))
]);
-
+
this.hosts = hostsData;
this.tasks = tasksData;
this.logs = logsData;
@@ -922,12 +922,12 @@ class DashboardManager {
this.alertsUnread = alertsUnreadData.unread || 0;
this.updateAlertsBadge();
-
+
// Logs de tâches markdown
this.taskLogs = taskLogsData.logs || [];
this.taskLogsStats = taskStatsData;
this.taskLogsDates = taskDatesData;
-
+
// Historique ad-hoc
this.adhocHistory = adhocHistoryData.commands || [];
this.adhocCategories = adhocCategoriesData.categories || [];
@@ -936,16 +936,16 @@ class DashboardManager {
this.adhocWidgetLogs = adhocTaskLogsData.logs || [];
this.adhocWidgetTotalCount = Number(adhocTaskLogsData.total_count || this.adhocWidgetLogs.length || 0);
this.adhocWidgetHasMore = Boolean(adhocTaskLogsData.has_more);
-
+
// Schedules (Planificateur)
this.schedules = schedulesData.schedules || [];
this.schedulesStats = schedulesStatsData.stats || { total: 0, active: 0, paused: 0, failures_24h: 0 };
this.schedulesUpcoming = schedulesStatsData.upcoming || [];
-
+
// Host metrics (builtin playbooks data)
this.hostMetrics = hostMetricsData || {};
this.builtinPlaybooks = builtinPlaybooksData || [];
-
+
console.log('Data loaded:', {
taskLogs: this.taskLogs.length,
taskLogsStats: this.taskLogsStats,
@@ -953,10 +953,10 @@ class DashboardManager {
adhocCategories: this.adhocCategories.length,
schedules: this.schedules.length
});
-
+
// Charger les résultats de lint depuis l'API
await this.loadPlaybookLintResults();
-
+
// Mettre à jour l'affichage
this.renderHosts();
this.renderTasks();
@@ -968,13 +968,13 @@ class DashboardManager {
this.updateMetricsDisplay(metricsData);
this.updateDateFilters();
this.updateTaskCounts();
-
+
} catch (error) {
console.error('Erreur chargement données:', error);
this.showNotification('Erreur de connexion à l\'API', 'error');
}
}
-
+
async loadMetrics() {
try {
const metrics = await this.apiCall('/api/metrics');
@@ -983,17 +983,17 @@ class DashboardManager {
console.error('Erreur chargement métriques:', error);
}
}
-
+
updateMetricsDisplay(metrics) {
if (!metrics) return;
-
+
const elements = {
'online-hosts': metrics.online_hosts,
'total-tasks': metrics.total_tasks,
'success-rate': `${metrics.success_rate}%`,
'uptime': `${metrics.uptime}%`
};
-
+
Object.entries(elements).forEach(([id, value]) => {
const el = document.getElementById(id);
if (el && value !== undefined) {
@@ -1001,30 +1001,30 @@ class DashboardManager {
}
});
}
-
+
// ===== WEBSOCKET =====
-
+
connectWebSocket() {
const wsProtocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:';
const wsUrl = `${wsProtocol}//${window.location.host}/ws`;
-
+
try {
this.ws = new WebSocket(wsUrl);
-
+
this.ws.onopen = () => {
console.log('WebSocket connecté');
};
-
+
this.ws.onmessage = (event) => {
const data = JSON.parse(event.data);
this.handleWebSocketMessage(data);
};
-
+
this.ws.onclose = () => {
console.log('WebSocket déconnecté, reconnexion dans 5s...');
setTimeout(() => this.connectWebSocket(), 5000);
};
-
+
this.ws.onerror = (error) => {
console.error('WebSocket erreur:', error);
};
@@ -1032,7 +1032,7 @@ class DashboardManager {
console.error('Erreur WebSocket:', error);
}
}
-
+
handleWebSocketMessage(data) {
switch (data.type) {
case 'task_created':
@@ -1120,15 +1120,15 @@ class DashboardManager {
break;
}
}
-
+
// ===== HANDLERS WEBSOCKET SCHEDULES =====
-
+
handleScheduleCreated(schedule) {
this.schedules.unshift(schedule);
this.renderSchedules();
this.showNotification(`Schedule "${schedule.name}" créé`, 'success');
}
-
+
handleScheduleUpdated(schedule) {
const index = this.schedules.findIndex(s => s.id === schedule.id);
if (index !== -1) {
@@ -1136,13 +1136,13 @@ class DashboardManager {
}
this.renderSchedules();
}
-
+
handleScheduleDeleted(data) {
this.schedules = this.schedules.filter(s => s.id !== data.id);
this.renderSchedules();
this.showNotification(`Schedule "${data.name}" supprimé`, 'warning');
}
-
+
handleScheduleRunStarted(data) {
this.showNotification(`Schedule "${data.schedule_name}" démarré`, 'info');
// Mettre à jour le statut du schedule
@@ -1152,12 +1152,12 @@ class DashboardManager {
this.renderSchedules();
}
}
-
+
handleScheduleRunFinished(data) {
const statusMsg = data.success ? 'terminé avec succès' : 'échoué';
const notifType = data.success ? 'success' : 'error';
this.showNotification(`Schedule "${data.schedule_name}" ${statusMsg}`, notifType);
-
+
// Mettre à jour le schedule
const schedule = this.schedules.find(s => s.id === data.schedule_id);
if (schedule && data.run) {
@@ -1168,24 +1168,24 @@ class DashboardManager {
// Rafraîchir les stats
this.refreshSchedulesStats();
}
-
+
// ===== POLLING DES TÂCHES EN COURS =====
-
+
startRunningTasksPolling() {
// Arrêter le polling existant si présent
this.stopRunningTasksPolling();
-
+
// Démarrer le polling
this.runningTasksPollingInterval = setInterval(() => {
this.pollRunningTasks();
}, this.pollingIntervalMs);
-
+
// Exécuter immédiatement une première fois
this.pollRunningTasks();
-
+
console.log('Polling des tâches en cours démarré');
}
-
+
stopRunningTasksPolling() {
if (this.runningTasksPollingInterval) {
clearInterval(this.runningTasksPollingInterval);
@@ -1193,52 +1193,52 @@ class DashboardManager {
console.log('Polling des tâches en cours arrêté');
}
}
-
+
async pollRunningTasks() {
try {
const result = await this.apiCall('/api/tasks/running');
const runningTasks = result.tasks || [];
-
+
// Vérifier si des tâches ont changé de statut
const previousRunningIds = this.tasks
.filter(t => t.status === 'running' || t.status === 'pending')
.map(t => t.id);
const currentRunningIds = runningTasks.map(t => t.id);
-
+
// Détecter les tâches terminées
const completedTaskIds = previousRunningIds.filter(id => !currentRunningIds.includes(id));
-
+
if (completedTaskIds.length > 0) {
// Des tâches ont été terminées - rafraîchir les logs
console.log('Tâches terminées détectées:', completedTaskIds);
await this.refreshTaskLogs();
}
-
+
// Mettre à jour les tâches en cours
this.updateRunningTasks(runningTasks);
-
+
} catch (error) {
console.error('Erreur polling tâches:', error);
}
}
-
+
updateRunningTasks(runningTasks) {
// Mettre à jour la liste des tâches en mémoire
const nonRunningTasks = this.tasks.filter(t => t.status !== 'running' && t.status !== 'pending');
this.tasks = [...runningTasks, ...nonRunningTasks];
-
+
// Mettre à jour l'affichage dynamiquement
this.updateRunningTasksUI(runningTasks);
this.updateTaskCounts();
}
-
+
updateRunningTasksUI(runningTasks) {
const container = document.getElementById('tasks-list');
if (!container) return;
-
+
// Trouver ou créer la section des tâches en cours
let runningSection = container.querySelector('.running-tasks-section');
-
+
if (runningTasks.length === 0) {
// Supprimer la section si plus de tâches en cours
if (runningSection) {
@@ -1246,13 +1246,13 @@ class DashboardManager {
}
return;
}
-
+
// Créer la section si elle n'existe pas
if (!runningSection) {
runningSection = document.createElement('div');
runningSection.className = 'running-tasks-section mb-4';
runningSection.innerHTML = '
En cours';
-
+
// Insérer au début du container (après le header)
const header = container.querySelector('.flex.flex-col');
if (header && header.nextSibling) {
@@ -1261,32 +1261,32 @@ class DashboardManager {
container.prepend(runningSection);
}
}
-
+
// Mettre à jour le contenu des tâches en cours
const tasksContainer = runningSection.querySelector('.running-tasks-list') || document.createElement('div');
tasksContainer.className = 'running-tasks-list space-y-2';
-
+
tasksContainer.innerHTML = runningTasks.map(task => this.createRunningTaskHTML(task)).join('');
-
+
if (!runningSection.querySelector('.running-tasks-list')) {
runningSection.appendChild(tasksContainer);
}
-
+
// Mettre à jour le badge "en cours" dans le header
const runningBadge = container.querySelector('.running-badge');
if (runningBadge) {
runningBadge.textContent = `${runningTasks.length} en cours`;
}
}
-
+
createRunningTaskHTML(task) {
- const startTime = task.start_time
+ const startTime = task.start_time
? new Date(task.start_time).toLocaleTimeString('fr-FR', { hour: '2-digit', minute: '2-digit', second: '2-digit' })
: '--';
-
+
const duration = task.duration || this.calculateDuration(task.start_time);
const progress = task.progress || 0;
-
+
return `
@@ -1318,14 +1318,14 @@ class DashboardManager {
`;
}
-
+
calculateDuration(startTime) {
if (!startTime) return '--';
const start = new Date(startTime);
const now = new Date();
const diffMs = now - start;
const diffSec = Math.floor(diffMs / 1000);
-
+
if (diffSec < 60) return `${diffSec}s`;
const diffMin = Math.floor(diffSec / 60);
const remainingSec = diffSec % 60;
@@ -1334,12 +1334,12 @@ class DashboardManager {
const remainingMin = diffMin % 60;
return `${diffHour}h ${remainingMin}m`;
}
-
+
// ===== HANDLERS WEBSOCKET POUR LES TÂCHES =====
-
+
handleTaskCreated(taskData) {
console.log('Nouvelle tâche créée:', taskData);
-
+
// Ajouter la tâche à la liste
const existingIndex = this.tasks.findIndex(t => t.id === taskData.id);
if (existingIndex === -1) {
@@ -1347,26 +1347,26 @@ class DashboardManager {
} else {
this.tasks[existingIndex] = taskData;
}
-
+
// Mettre à jour l'UI immédiatement
this.updateRunningTasksUI(this.tasks.filter(t => t.status === 'running' || t.status === 'pending'));
this.updateTaskCounts();
-
+
// Notification
this.showNotification(`Tâche "${taskData.name}" démarrée`, 'info');
}
-
+
handleTaskProgress(progressData) {
console.log('Progression tâche:', progressData);
const taskId = progressData && (progressData.task_id || progressData.id);
if (!taskId) return;
-
+
// Mettre à jour la tâche dans la liste
const task = this.tasks.find(t => String(t.id) === String(taskId));
if (task) {
task.progress = progressData.progress;
-
+
// Mettre à jour l'UI de cette tâche spécifique
const taskCard = document.querySelector(`.task-card-${taskId}`);
if (taskCard) {
@@ -1381,19 +1381,19 @@ class DashboardManager {
}
}
}
-
+
handleTaskCompleted(taskData) {
console.log('Tâche terminée:', taskData);
const taskId = taskData && (taskData.task_id || taskData.id);
if (!taskId) return;
-
+
// Retirer la tâche de la liste des tâches en cours
this.tasks = this.tasks.filter(t => String(t.id) !== String(taskId));
-
+
// Mettre à jour l'UI
this.updateRunningTasksUI(this.tasks.filter(t => t.status === 'running' || t.status === 'pending'));
-
+
// Rafraîchir les logs de tâches pour voir la tâche terminée
this.refreshTaskLogs();
@@ -1402,7 +1402,7 @@ class DashboardManager {
this.loadAllData().catch((e) => {
console.error('Erreur rafraîchissement données après fin de tâche:', e);
});
-
+
// Notification
const status = taskData.status || 'completed';
const isSuccess = status === 'completed';
@@ -1411,23 +1411,23 @@ class DashboardManager {
isSuccess ? 'success' : 'error'
);
}
-
+
handleTaskCancelled(taskData) {
console.log('Tâche annulée:', taskData);
-
+
// Retirer la tâche de la liste des tâches en cours
this.tasks = this.tasks.filter(t => String(t.id) !== String(taskData.id));
-
+
// Mettre à jour l'UI
this.updateRunningTasksUI(this.tasks.filter(t => t.status === 'running' || t.status === 'pending'));
-
+
// Rafraîchir les logs de tâches
this.refreshTaskLogs();
-
+
// Notification
this.showNotification('Tâche annulée', 'warning');
}
-
+
async loadLogs() {
try {
const logsData = await this.apiCall('/api/logs');
@@ -1477,7 +1477,7 @@ class DashboardManager {
if (!container) return;
container.scrollTop = 0;
}
-
+
setupEventListeners() {
// Theme toggle (desktop + mobile)
const onToggleTheme = () => {
@@ -1536,28 +1536,28 @@ class DashboardManager {
});
obs.observe(alertsPage, { attributes: true, attributeFilter: ['class'] });
}
-
+
// Initialiser le calendrier de filtrage des tâches
this.setupTaskDateCalendar();
-
+
// Event delegation for terminal buttons (avoids inline onclick issues)
document.addEventListener('click', (e) => {
const btn = e.target.closest('[data-action="terminal"], [data-action="terminal-popout"]');
if (!btn || btn.disabled) return;
-
+
e.stopPropagation();
const action = btn.dataset.action;
const hostId = btn.dataset.hostId;
const hostName = btn.dataset.hostName;
const hostIp = btn.dataset.hostIp;
-
+
if (action === 'terminal') {
this.openTerminal(hostId, hostName, hostIp);
} else if (action === 'terminal-popout') {
this.openTerminalPopout(hostId, hostName, hostIp);
}
});
-
+
// Navigation est gérée par le script de navigation des pages dans index.html
}
@@ -1721,8 +1721,8 @@ class DashboardManager {
const today = new Date();
today.setHours(0, 0, 0, 0);
const isToday = date.getFullYear() === today.getFullYear() &&
- date.getMonth() === today.getMonth() &&
- date.getDate() === today.getDate();
+ date.getMonth() === today.getMonth() &&
+ date.getDate() === today.getDate();
let classes = 'w-9 h-9 flex items-center justify-center rounded-full text-xs transition-colors duration-150 ';
@@ -1776,7 +1776,7 @@ class DashboardManager {
const [y, m, d] = key.split('-').map(v => parseInt(v, 10));
return new Date(y, (m || 1) - 1, d || 1);
}
-
+
toggleTheme() {
const body = document.body;
const isLight = body.classList.toggle('light-theme');
@@ -1793,7 +1793,7 @@ class DashboardManager {
localStorage.setItem('theme', isLight ? 'light' : 'dark');
}
-
+
loadThemePreference() {
const savedTheme = localStorage.getItem('theme');
if (savedTheme === 'light') {
@@ -1810,7 +1810,7 @@ class DashboardManager {
}
}
}
-
+
renderHosts() {
const container = document.getElementById('hosts-list');
const hostsPageContainer = document.getElementById('hosts-page-list');
@@ -1824,15 +1824,15 @@ class DashboardManager {
const focusedContainer = wasSearchFocused
? ([container, hostsPageContainer].filter(c => c).find(c => c.contains(activeEl)) || null)
: null;
-
+
// Filtrer les hôtes par groupe si un filtre est actif
let filteredHosts = this.hosts;
if (this.currentGroupFilter && this.currentGroupFilter !== 'all') {
- filteredHosts = this.hosts.filter(h =>
+ filteredHosts = this.hosts.filter(h =>
h.groups && h.groups.includes(this.currentGroupFilter)
);
}
-
+
// Filtrer par statut bootstrap si un filtre est actif
if (this.currentBootstrapFilter && this.currentBootstrapFilter !== 'all') {
if (this.currentBootstrapFilter === 'ready') {
@@ -1853,16 +1853,16 @@ class DashboardManager {
return name.includes(q) || ip.includes(q) || os.includes(q) || groups.includes(q);
});
}
-
+
// Compter les hôtes par statut bootstrap
const readyCount = this.hosts.filter(h => h.bootstrap_ok).length;
const notConfiguredCount = this.hosts.filter(h => !h.bootstrap_ok).length;
-
+
// Options des groupes pour le filtre
- const groupOptions = this.ansibleGroups.map(g =>
+ const groupOptions = this.ansibleGroups.map(g =>
`
${g} `
).join('');
-
+
// Header avec filtres et boutons - Design professionnel
const headerHtml = `
@@ -1943,7 +1943,7 @@ class DashboardManager {
`;
-
+
// Apply to both containers
const containers = [container, hostsPageContainer].filter(c => c);
containers.forEach(c => c.innerHTML = headerHtml);
@@ -1972,7 +1972,7 @@ class DashboardManager {
}
}
}
-
+
if (filteredHosts.length === 0) {
const emptyHtml = `
@@ -1988,18 +1988,18 @@ class DashboardManager {
containers.forEach(c => c.innerHTML += emptyHtml);
return;
}
-
+
filteredHosts.forEach(host => {
const statusClass = `status-${host.status}`;
-
+
// Formater last_seen
- const lastSeen = host.last_seen
+ const lastSeen = host.last_seen
? new Date(host.last_seen).toLocaleString('fr-FR')
: 'Jamais vérifié';
-
+
// Indicateur de bootstrap
const bootstrapOk = host.bootstrap_ok || false;
- const bootstrapDate = host.bootstrap_date
+ const bootstrapDate = host.bootstrap_date
? new Date(host.bootstrap_date).toLocaleDateString('fr-FR')
: null;
const bootstrapIndicator = bootstrapOk
@@ -2009,13 +2009,13 @@ class DashboardManager {
: `
Non configuré
`;
-
+
// Indicateur de qualité de communication
const commQuality = this.getHostCommunicationQuality(host);
const commIndicator = `
- ${[1,2,3,4,5].map(i => `
+ ${[1, 2, 3, 4, 5].map(i => `
`).join('')}
@@ -2023,26 +2023,26 @@ class DashboardManager {
${commQuality.label}
`;
-
+
const hostCard = document.createElement('div');
hostCard.className = 'host-card group';
-
+
// Séparer les groupes env et role
const hostGroups = host.groups || [];
const envGroup = hostGroups.find(g => g.startsWith('env_'));
const roleGroups = hostGroups.filter(g => g.startsWith('role_'));
-
- const envBadge = envGroup
+
+ const envBadge = envGroup
? `
${envGroup.replace('env_', '')} `
: '';
- const roleBadges = roleGroups.map(g =>
+ const roleBadges = roleGroups.map(g =>
`
${g.replace('role_', '')} `
).join('');
-
+
// Get metrics for this host
const hostMetrics = this.hostMetrics[host.id] || null;
const metricsHtml = this.renderHostMetricsSection(hostMetrics, host.id);
-
+
hostCard.innerHTML = `
@@ -2126,7 +2126,7 @@ class DashboardManager {
});
});
}
-
+
filterHostsByBootstrap(status) {
this.currentBootstrapFilter = status;
this.renderHosts();
@@ -2146,7 +2146,7 @@ class DashboardManager {
}
this.renderHosts();
}
-
+
// Render metrics section for a host card
renderHostMetricsSection(metrics, hostId) {
if (!metrics || metrics.collection_status === 'unknown') {
@@ -2159,14 +2159,14 @@ class DashboardManager {
`;
}
-
+
// Format the last collected time
- const lastCollected = metrics.last_collected
- ? new Date(metrics.last_collected).toLocaleString('fr-FR', {
- day: '2-digit', month: '2-digit', hour: '2-digit', minute: '2-digit'
- })
+ const lastCollected = metrics.last_collected
+ ? new Date(metrics.last_collected).toLocaleString('fr-FR', {
+ day: '2-digit', month: '2-digit', hour: '2-digit', minute: '2-digit'
+ })
: 'N/A';
-
+
// CPU gauge
const cpuPercent = metrics.cpu_load_1m ? Math.min(100, metrics.cpu_load_1m * 25) : 0;
const cpuColor = cpuPercent > 80 ? 'bg-red-500' : cpuPercent > 50 ? 'bg-yellow-500' : 'bg-green-500';
@@ -2182,20 +2182,20 @@ class DashboardManager {
if (cpuFreqText) cpuDetailParts.push(cpuFreqText);
const cpuDetailLine = cpuDetailParts.length ? cpuDetailParts.join(' • ') : '';
const cpuTitle = metrics.cpu_model ? this.escapeHtml(metrics.cpu_model) : '';
-
+
// Memory gauge
const memPercent = metrics.memory_usage_percent || 0;
const memColor = memPercent > 80 ? 'bg-red-500' : memPercent > 50 ? 'bg-yellow-500' : 'bg-green-500';
const memText = memPercent ? `${memPercent.toFixed(0)}%` : 'N/A';
- const memDetail = metrics.memory_total_mb
+ const memDetail = metrics.memory_total_mb
? `${Math.round(metrics.memory_used_mb / 1024 * 10) / 10}/${Math.round(metrics.memory_total_mb / 1024 * 10) / 10} GB`
: '';
-
+
// Disk gauge
const diskPercent = metrics.disk_root_usage_percent || 0;
const diskColor = diskPercent > 90 ? 'bg-red-500' : diskPercent > 70 ? 'bg-yellow-500' : 'bg-green-500';
const diskText = diskPercent ? `${diskPercent.toFixed(0)}%` : 'N/A';
- const diskDetail = metrics.disk_root_total_gb
+ const diskDetail = metrics.disk_root_total_gb
? `${metrics.disk_root_used_gb?.toFixed(0) || 0}/${metrics.disk_root_total_gb?.toFixed(0) || 0} GB`
: '';
@@ -2621,27 +2621,27 @@ class DashboardManager {
Used / Free / Total
${(diskCards || mountCards)
- ? `
${diskCards || mountCards}
`
- : `
Aucun détail disponible
`}
+ ? `
${diskCards || mountCards}
`
+ : `
Aucun détail disponible
`}
${renderLvmSection()}
${renderZfsSection()}
`;
};
-
+
// Temperature (if available)
- const tempHtml = metrics.cpu_temperature
+ const tempHtml = metrics.cpu_temperature
? `
${metrics.cpu_temperature}°C
`
: '';
-
+
// Uptime
- const uptimeHtml = metrics.uptime_human
+ const uptimeHtml = metrics.uptime_human
? `
${metrics.uptime_human}`
: '';
-
+
return `
@@ -2694,19 +2694,19 @@ class DashboardManager {
`;
}
-
+
// Render detailed storage section (accordion)
renderStorageDetailsSection(metrics, hostId) {
const storageDetails = metrics?.storage_details;
const isExpanded = this.expandedStorageDetails?.has(hostId) || false;
-
+
// Use storage_details if available, otherwise fallback to existing metrics fields
const hasStorageDetails = !!storageDetails;
const status = storageDetails?.status || 'unknown';
const osType = storageDetails?.os_type || metrics?.os_name || 'unknown';
const flags = storageDetails?.feature_flags || {};
const summary = storageDetails?.summary || {};
-
+
// Filesystems: from storage_details or build from disk_info
let filesystems = storageDetails?.filesystems || [];
if (!filesystems.length && metrics?.disk_info?.length) {
@@ -2720,43 +2720,43 @@ class DashboardManager {
use_pct: d.usage_percent || 0
}));
}
-
+
// Block devices from storage_details or disk_devices
const blockDevices = storageDetails?.block_devices || metrics?.disk_devices || [];
-
+
// ZFS from storage_details or zfs_info
const zfsPools = storageDetails?.zfs?.pools || metrics?.zfs_info?.pools || [];
const zfsDatasets = storageDetails?.zfs?.datasets || metrics?.zfs_info?.datasets || [];
-
+
// LVM from storage_details or lvm_info
const lvmVgs = storageDetails?.lvm?.vgs || metrics?.lvm_info?.vgs || [];
-
+
const commandsRun = storageDetails?.commands_run || [];
const partialFailures = storageDetails?.partial_failures || [];
const collectedAt = storageDetails?.collected_at || '';
-
+
// Check if we have any data to show
const hasData = filesystems.length > 0 || blockDevices.length > 0 || zfsPools.length > 0 || lvmVgs.length > 0;
-
+
// Build summary chips
const chips = [];
if (filesystems.length) chips.push(`${filesystems.length} FS`);
if (blockDevices.length) chips.push(`${blockDevices.length} disques`);
if (zfsPools.length || flags.has_zfs) chips.push('ZFS');
if (lvmVgs.length || flags.has_lvm) chips.push('LVM');
-
+
// Calculate usage from summary or from filesystems
let usedPct = summary.used_pct ? Number(summary.used_pct).toFixed(0) : null;
let totalBytes = summary.total_bytes || 0;
let usedBytes = summary.used_bytes || 0;
-
+
// If no summary, calculate from filesystems (exclude virtual fs)
if (!totalBytes && filesystems.length) {
const realFs = filesystems.filter(fs => {
const mp = (fs.mountpoint || '').toLowerCase();
const dev = (fs.device || '').toLowerCase();
- return !mp.startsWith('/run') && !mp.startsWith('/sys') && !mp.startsWith('/proc') &&
- !dev.includes('tmpfs') && !dev.includes('devtmpfs');
+ return !mp.startsWith('/run') && !mp.startsWith('/sys') && !mp.startsWith('/proc') &&
+ !dev.includes('tmpfs') && !dev.includes('devtmpfs');
});
totalBytes = realFs.reduce((sum, fs) => sum + (fs.size_bytes || 0), 0);
usedBytes = realFs.reduce((sum, fs) => sum + (fs.used_bytes || 0), 0);
@@ -2764,7 +2764,7 @@ class DashboardManager {
usedPct = ((usedBytes / totalBytes) * 100).toFixed(0);
}
}
-
+
const formatBytes = (bytes) => {
if (!bytes || bytes <= 0) return '';
const gb = bytes / (1024 * 1024 * 1024);
@@ -2772,31 +2772,31 @@ class DashboardManager {
if (gb >= 100) return `${gb.toFixed(0)} GB`;
return `${gb.toFixed(1)} GB`;
};
-
+
const summaryLine = chips.length ? chips.join(' • ') : 'Aucune donnée';
const usageLine = usedPct !== null ? `${usedPct}% utilisé` : '';
const sizeLine = totalBytes > 0 ? `${formatBytes(usedBytes)} / ${formatBytes(totalBytes)}` : '';
-
- const statusBadge = status === 'ok' ? 'bg-green-500/20 text-green-400' :
- status === 'partial' ? 'bg-yellow-500/20 text-yellow-400' : 'bg-red-500/20 text-red-400';
+
+ const statusBadge = status === 'ok' ? 'bg-green-500/20 text-green-400' :
+ status === 'partial' ? 'bg-yellow-500/20 text-yellow-400' : 'bg-red-500/20 text-red-400';
const statusText = status === 'ok' ? 'OK' : status === 'partial' ? 'Partiel' : 'Erreur';
-
+
const getPctColor = (pct) => {
if (pct === null || pct === undefined) return 'bg-gray-600';
return pct >= 90 ? 'bg-red-500' : pct >= 75 ? 'bg-yellow-500' : 'bg-green-500';
};
-
+
const renderFilesystemsTable = () => {
if (!filesystems.length) return '
Aucun filesystem détecté
';
-
+
// Filter out virtual filesystems
const filtered = filesystems.filter(fs => {
const mp = (fs.mountpoint || '').toLowerCase();
const dev = (fs.device || '').toLowerCase();
- return !mp.startsWith('/run') && !mp.startsWith('/sys') && !mp.startsWith('/proc') &&
- !dev.includes('tmpfs') && !dev.includes('devtmpfs');
+ return !mp.startsWith('/run') && !mp.startsWith('/sys') && !mp.startsWith('/proc') &&
+ !dev.includes('tmpfs') && !dev.includes('devtmpfs');
});
-
+
return `
@@ -2812,14 +2812,14 @@ class DashboardManager {
${filtered.slice(0, 20).map(fs => {
- const pct = fs.use_pct !== undefined ? Number(fs.use_pct) : null;
- const pctColor = pct >= 90 ? 'text-red-400' : pct >= 75 ? 'text-yellow-400' : 'text-green-400';
- const rowClass = pct >= 85 ? 'bg-red-500/10' : '';
- const rawDevice = (fs.device || '-');
- const deviceDisplay = (typeof rawDevice === 'string' && rawDevice.startsWith('/dev/'))
- ? rawDevice.slice('/dev/'.length)
- : rawDevice;
- return `
+ const pct = fs.use_pct !== undefined ? Number(fs.use_pct) : null;
+ const pctColor = pct >= 90 ? 'text-red-400' : pct >= 75 ? 'text-yellow-400' : 'text-green-400';
+ const rowClass = pct >= 85 ? 'bg-red-500/10' : '';
+ const rawDevice = (fs.device || '-');
+ const deviceDisplay = (typeof rawDevice === 'string' && rawDevice.startsWith('/dev/'))
+ ? rawDevice.slice('/dev/'.length)
+ : rawDevice;
+ return `
${this.escapeHtml(fs.mountpoint || '-')}
${this.escapeHtml(deviceDisplay || '-')}
@@ -2829,21 +2829,21 @@ class DashboardManager {
${pct !== null ? pct + '%' : '-'}
`;
- }).join('')}
+ }).join('')}
`;
};
-
+
const renderZfsSection = () => {
if (!zfsPools.length && !zfsDatasets.length) return '';
-
+
const poolCards = zfsPools.map(pool => {
const pct = pool.cap_pct !== undefined ? Number(pool.cap_pct) : null;
const color = getPctColor(pct);
- const healthColor = pool.health === 'ONLINE' ? 'text-green-400' :
- pool.health === 'DEGRADED' ? 'text-yellow-400' : 'text-red-400';
+ const healthColor = pool.health === 'ONLINE' ? 'text-green-400' :
+ pool.health === 'DEGRADED' ? 'text-yellow-400' : 'text-red-400';
return `
@@ -2860,14 +2860,14 @@ class DashboardManager {
`;
}).join('');
-
+
const datasetsList = zfsDatasets.slice(0, 15).map(ds => `
${this.escapeHtml(ds.name || '')}
${formatBytes(ds.used_bytes)} used
`).join('');
-
+
return `
@@ -2879,10 +2879,10 @@ class DashboardManager {
`;
};
-
+
const renderLvmSection = () => {
if (!lvmVgs.length) return '';
-
+
const vgCards = lvmVgs.map(vg => {
const name = vg.vg_name || vg.name || 'VG';
const size = vg.vg_size || vg.size || '';
@@ -2894,7 +2894,7 @@ class DashboardManager {
`;
}).join('');
-
+
return `
@@ -2905,21 +2905,21 @@ class DashboardManager {
`;
};
-
+
const renderInspectorDrawer = () => {
if (!this.storageInspectorOpen?.has(hostId)) return '';
-
+
const cmdList = commandsRun.map(cmd => `
${this.escapeHtml(cmd.cmd || '')}
${cmd.status || ''}
`).join('') || '
Aucune commande
';
-
+
const failuresList = partialFailures.length ? partialFailures.map(f => `
${this.escapeHtml(f)}
`).join('') : '';
-
+
return `
@@ -2955,16 +2955,16 @@ class DashboardManager {
`;
};
-
+
// Don't show section if no data at all
if (!hasData) {
return '';
}
-
+
// Status badge - show "Données existantes" if using fallback data
const displayStatusBadge = hasStorageDetails ? statusBadge : 'bg-blue-500/20 text-blue-400';
const displayStatusText = hasStorageDetails ? statusText : 'Données';
-
+
return `
@@ -2995,7 +2995,7 @@ class DashboardManager {
`;
}
-
+
// Toggle storage details accordion
toggleStorageDetails(hostId) {
if (!this.expandedStorageDetails) this.expandedStorageDetails = new Set();
@@ -3006,7 +3006,7 @@ class DashboardManager {
}
this.renderHosts();
}
-
+
// Toggle storage inspector drawer
toggleStorageInspector(hostId) {
if (!this.storageInspectorOpen) this.storageInspectorOpen = new Set();
@@ -3017,22 +3017,22 @@ class DashboardManager {
}
this.renderHosts();
}
-
+
// Collect metrics for all hosts
async collectAllHostMetrics() {
if (this.metricsLoading) {
this.showNotification('Collecte déjà en cours...', 'warning');
return;
}
-
+
this.metricsLoading = true;
this.showNotification('Collecte des métriques en cours...', 'info');
-
+
try {
const result = await this.apiCall('/api/builtin-playbooks/collect-all', {
method: 'POST'
});
-
+
if (result.success) {
const message = result.message || `Collecte des métriques lancée pour ${result.hosts_count || 0} hôte(s)`;
this.showNotification(message, 'success');
@@ -3078,7 +3078,7 @@ class DashboardManager {
this.showNotification(`Erreur lors de l'installation: ${error.detail || error.message || 'Erreur inconnue'}`, 'error');
}
}
-
+
// Load host metrics from API
async loadHostMetrics() {
try {
@@ -3088,11 +3088,11 @@ class DashboardManager {
this.hostMetrics = {};
}
}
-
+
// Collect metrics for a single host
async collectHostMetrics(hostName) {
this.showNotification(`Collecte des métriques pour ${hostName}...`, 'info');
-
+
try {
const result = await this.apiCall('/api/builtin-playbooks/execute', {
method: 'POST',
@@ -3101,7 +3101,7 @@ class DashboardManager {
target: hostName
})
});
-
+
if (result.success) {
this.showNotification(`Métriques collectées pour ${hostName}`, 'success');
await this.loadHostMetrics();
@@ -3114,7 +3114,7 @@ class DashboardManager {
this.showNotification('Erreur lors de la collecte des métriques', 'error');
}
}
-
+
// Calcul de la qualité de communication d'un hôte
getHostCommunicationQuality(host) {
// Facteurs de qualité:
@@ -3122,10 +3122,10 @@ class DashboardManager {
// - Dernière vérification (last_seen)
// - Bootstrap configuré
// - Historique des tâches récentes (si disponible)
-
+
let score = 0;
let factors = [];
-
+
// Statut online = +2 points
if (host.status === 'online') {
score += 2;
@@ -3133,19 +3133,19 @@ class DashboardManager {
} else if (host.status === 'offline') {
factors.push('Hors ligne');
}
-
+
// Bootstrap OK = +1 point
if (host.bootstrap_ok) {
score += 1;
factors.push('Ansible configuré');
}
-
+
// Last seen récent = +2 points (moins de 1h), +1 point (moins de 24h)
if (host.last_seen) {
const lastSeenDate = new Date(host.last_seen);
const now = new Date();
const hoursDiff = (now - lastSeenDate) / (1000 * 60 * 60);
-
+
if (hoursDiff < 1) {
score += 2;
factors.push('Vérifié récemment');
@@ -3158,10 +3158,10 @@ class DashboardManager {
} else {
factors.push('Jamais vérifié');
}
-
+
// Convertir le score en niveau (1-5)
const level = Math.min(5, Math.max(1, Math.round(score)));
-
+
// Déterminer couleur et label selon le niveau
let colorClass, textClass, label;
if (level >= 4) {
@@ -3181,7 +3181,7 @@ class DashboardManager {
textClass = 'text-red-400';
label = 'Faible';
}
-
+
return {
level,
colorClass,
@@ -3190,18 +3190,18 @@ class DashboardManager {
tooltip: factors.join(' • ')
};
}
-
+
// Modal pour exécuter un playbook sur un hôte spécifique
async showPlaybookModalForHost(hostName) {
// Récupérer la liste des playbooks compatibles avec cet hôte
try {
const pbResult = await this.apiCall(`/api/ansible/playbooks?target=${encodeURIComponent(hostName)}`);
const playbooks = (pbResult && pbResult.playbooks) ? pbResult.playbooks : [];
-
+
const playbookOptions = playbooks.map(p => `
${p.name}${p.description ? ` - ${p.description}` : ''}
`).join('');
-
+
const modalContent = `
@@ -3254,24 +3254,24 @@ class DashboardManager {
`;
-
+
this.showModal('Exécuter un Playbook', modalContent);
} catch (error) {
this.showNotification(`Erreur chargement playbooks: ${error.message}`, 'error');
}
}
-
+
async executePlaybookOnHost(hostName) {
const playbookSelect = document.getElementById('playbook-select');
const extraVarsInput = document.getElementById('playbook-extra-vars');
const checkModeInput = document.getElementById('playbook-check-mode');
-
+
const playbook = playbookSelect?.value;
if (!playbook) {
this.showNotification('Veuillez sélectionner un playbook', 'warning');
return;
}
-
+
let extraVars = {};
if (extraVarsInput?.value.trim()) {
try {
@@ -3281,9 +3281,9 @@ class DashboardManager {
return;
}
}
-
+
const checkMode = checkModeInput?.checked || false;
-
+
this.closeModal();
if (this._playbookLaunchInFlight) {
this.showNotification('Une exécution de playbook est déjà en cours de lancement', 'info');
@@ -3291,7 +3291,7 @@ class DashboardManager {
}
this._playbookLaunchInFlight = true;
this.showNotification('Lancement du playbook en arrière-plan...', 'info');
-
+
try {
const result = await this.apiCall('/api/ansible/execute', {
method: 'POST',
@@ -3302,16 +3302,16 @@ class DashboardManager {
check_mode: checkMode
})
});
-
+
this.showNotification(`Playbook "${playbook}" lancé sur ${hostName} (tâche ${result.task_id})`, 'success');
-
+
} catch (error) {
this.showNotification(`Erreur: ${error.message}`, 'error');
} finally {
this._playbookLaunchInFlight = false;
}
}
-
+
async refreshHosts() {
this.showLoading();
try {
@@ -3324,19 +3324,19 @@ class DashboardManager {
this.showNotification(`Erreur: ${error.message}`, 'error');
}
}
-
+
async syncHostsFromAnsible() {
this.showLoading();
try {
const result = await this.apiCall('/api/hosts/sync', { method: 'POST' });
await this.loadAllData();
this.hideLoading();
-
+
// Afficher un résumé détaillé
const created = result.created?.length || 0;
const skipped = result.skipped?.length || 0;
const errors = result.errors?.length || 0;
-
+
if (created > 0) {
this.showNotification(
`Import réussi: ${created} hôte(s) importé(s), ${skipped} déjà existant(s)`,
@@ -3350,7 +3350,7 @@ class DashboardManager {
} else {
this.showNotification('Aucun hôte trouvé dans l\'inventaire Ansible', 'warning');
}
-
+
if (errors > 0) {
console.error('Erreurs lors de l\'import:', result.errors);
this.showNotification(`${errors} erreur(s) lors de l'import`, 'error');
@@ -3360,14 +3360,14 @@ class DashboardManager {
this.showNotification(`Erreur: ${error.message}`, 'error');
}
}
-
+
filterHostsByGroup(group) {
this.currentGroupFilter = group;
this.renderHosts();
}
-
+
// ===== GESTION DES HÔTES (CRUD) =====
-
+
async loadHostGroups() {
try {
const result = await this.apiCall('/api/hosts/groups');
@@ -3379,22 +3379,22 @@ class DashboardManager {
return { env_groups: [], role_groups: [] };
}
}
-
+
async showAddHostModal() {
// Charger les groupes disponibles
await this.loadHostGroups();
-
- const envOptions = this.envGroups.map(g =>
+
+ const envOptions = this.envGroups.map(g =>
`
${g.replace('env_', '')} `
).join('');
-
+
const roleCheckboxes = this.roleGroups.map(g => `
${g.replace('role_', '')}
`).join('');
-
+
this.showModal('Ajouter un Host', `