5455 lines
197 KiB
Python
5455 lines
197 KiB
Python
"""
|
|
Homelab Automation Dashboard - Backend Optimisé
|
|
API REST moderne avec FastAPI pour la gestion d'homelab
|
|
"""
|
|
|
|
from datetime import datetime, timezone, timedelta
|
|
from pathlib import Path
|
|
from time import perf_counter, time
|
|
import os
|
|
import re
|
|
import shutil
|
|
import subprocess
|
|
import sqlite3
|
|
import yaml
|
|
from abc import ABC, abstractmethod
|
|
from typing import Literal, Any, List, Dict, Optional
|
|
from threading import Lock
|
|
import asyncio
|
|
import json
|
|
import uuid
|
|
|
|
# APScheduler imports
|
|
from apscheduler.schedulers.asyncio import AsyncIOScheduler
|
|
from apscheduler.triggers.cron import CronTrigger
|
|
from apscheduler.triggers.date import DateTrigger
|
|
from apscheduler.jobstores.memory import MemoryJobStore
|
|
from apscheduler.executors.asyncio import AsyncIOExecutor
|
|
from croniter import croniter
|
|
import pytz
|
|
|
|
from fastapi import FastAPI, HTTPException, Depends, Request, Form, WebSocket, WebSocketDisconnect
|
|
from fastapi.responses import HTMLResponse, JSONResponse, FileResponse
|
|
from fastapi.security import APIKeyHeader
|
|
from fastapi.templating import Jinja2Templates
|
|
from fastapi.middleware.cors import CORSMiddleware
|
|
from fastapi.staticfiles import StaticFiles
|
|
from pydantic import BaseModel, Field, field_validator, ConfigDict
|
|
from sqlalchemy.ext.asyncio import AsyncSession
|
|
import uvicorn
|
|
|
|
# Import DB layer (async SQLAlchemy)
|
|
from models.database import get_db # type: ignore
|
|
from crud.host import HostRepository # type: ignore
|
|
from crud.bootstrap_status import BootstrapStatusRepository # type: ignore
|
|
from crud.log import LogRepository # type: ignore
|
|
from crud.task import TaskRepository # type: ignore
|
|
from crud.schedule import ScheduleRepository # type: ignore
|
|
from crud.schedule_run import ScheduleRunRepository # type: ignore
|
|
from models.database import init_db # type: ignore
|
|
|
|
BASE_DIR = Path(__file__).resolve().parent
|
|
|
|
# Configuration avancée de l'application
|
|
app = FastAPI(
|
|
title="Homelab Automation Dashboard API",
|
|
version="1.0.0",
|
|
description="API REST moderne pour la gestion automatique d'homelab",
|
|
docs_url="/api/docs",
|
|
redoc_url="/api/redoc"
|
|
)
|
|
|
|
# Middleware CORS pour le développement
|
|
app.add_middleware(
|
|
CORSMiddleware,
|
|
allow_origins=["*"], # À restreindre en production
|
|
allow_credentials=True,
|
|
allow_methods=["*"],
|
|
allow_headers=["*"],
|
|
)
|
|
|
|
app.mount("/static", StaticFiles(directory=BASE_DIR, html=False), name="static")
|
|
|
|
# Configuration des chemins et variables d'environnement
|
|
LOGS_DIR = Path(os.environ.get("LOGS_DIR", "/logs"))
|
|
ANSIBLE_DIR = BASE_DIR.parent / "ansible"
|
|
SSH_KEY_PATH = os.environ.get("SSH_KEY_PATH", str(Path.home() / ".ssh" / "id_rsa"))
|
|
SSH_USER = os.environ.get("SSH_USER", "automation")
|
|
SSH_REMOTE_USER = os.environ.get("SSH_REMOTE_USER", "root")
|
|
DB_PATH = LOGS_DIR / "homelab.db"
|
|
API_KEY = os.environ.get("API_KEY", "dev-key-12345")
|
|
# Répertoire pour les logs de tâches en markdown (format YYYY/MM/JJ)
|
|
DIR_LOGS_TASKS = Path(os.environ.get("DIR_LOGS_TASKS", str(BASE_DIR.parent / "tasks_logs")))
|
|
# Fichier JSON pour l'historique des commandes ad-hoc
|
|
ADHOC_HISTORY_FILE = DIR_LOGS_TASKS / ".adhoc_history.json"
|
|
# Fichier JSON pour les statuts persistés
|
|
BOOTSTRAP_STATUS_FILE = DIR_LOGS_TASKS / ".bootstrap_status.json"
|
|
HOST_STATUS_FILE = ANSIBLE_DIR / ".host_status.json"
|
|
|
|
# Mapping des actions vers les playbooks
|
|
ACTION_PLAYBOOK_MAP = {
|
|
'upgrade': 'vm-upgrade.yml',
|
|
'reboot': 'vm-reboot.yml',
|
|
'health-check': 'health-check.yml',
|
|
'backup': 'backup-config.yml',
|
|
'bootstrap': 'bootstrap-host.yml',
|
|
}
|
|
|
|
# Gestionnaire de clés API
|
|
api_key_header = APIKeyHeader(name="X-API-Key", auto_error=False)
|
|
|
|
# Modèles Pydantic améliorés
|
|
class CommandResult(BaseModel):
|
|
status: str
|
|
return_code: int
|
|
stdout: str
|
|
stderr: Optional[str] = None
|
|
execution_time: Optional[float] = None
|
|
timestamp: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
|
|
|
class Host(BaseModel):
|
|
id: str
|
|
name: str
|
|
ip: str
|
|
status: Literal["online", "offline", "warning"]
|
|
os: str
|
|
last_seen: Optional[datetime] = None
|
|
created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
|
groups: List[str] = [] # Groupes Ansible auxquels appartient l'hôte
|
|
bootstrap_ok: bool = False # Indique si le bootstrap a été effectué avec succès
|
|
bootstrap_date: Optional[datetime] = None # Date du dernier bootstrap réussi
|
|
|
|
class Config:
|
|
json_encoders = {
|
|
datetime: lambda v: v.isoformat()
|
|
}
|
|
|
|
class Task(BaseModel):
|
|
id: str
|
|
name: str
|
|
host: str
|
|
status: Literal["pending", "running", "completed", "failed", "cancelled"]
|
|
progress: int = Field(ge=0, le=100, default=0)
|
|
start_time: Optional[datetime] = None
|
|
end_time: Optional[datetime] = None
|
|
duration: Optional[str] = None
|
|
output: Optional[str] = None
|
|
error: Optional[str] = None
|
|
|
|
class Config:
|
|
json_encoders = {
|
|
datetime: lambda v: v.isoformat() if v else None
|
|
}
|
|
|
|
class LogEntry(BaseModel):
|
|
id: int
|
|
timestamp: datetime
|
|
level: Literal["DEBUG", "INFO", "WARN", "ERROR"]
|
|
message: str
|
|
source: Optional[str] = None
|
|
host: Optional[str] = None
|
|
|
|
class Config:
|
|
json_encoders = {
|
|
datetime: lambda v: v.isoformat()
|
|
}
|
|
|
|
class SystemMetrics(BaseModel):
|
|
online_hosts: int
|
|
total_tasks: int
|
|
success_rate: float
|
|
uptime: float
|
|
cpu_usage: float
|
|
memory_usage: float
|
|
disk_usage: float
|
|
|
|
class HealthCheck(BaseModel):
|
|
host: str
|
|
ssh_ok: bool = False
|
|
ansible_ok: bool = False
|
|
sudo_ok: bool = False
|
|
reachable: bool = False
|
|
error_message: Optional[str] = None
|
|
response_time: Optional[float] = None
|
|
cached: bool = False
|
|
cache_age: int = 0
|
|
|
|
class AnsibleExecutionRequest(BaseModel):
|
|
playbook: str = Field(..., description="Nom du playbook à exécuter")
|
|
target: str = Field(default="all", description="Hôte ou groupe cible")
|
|
extra_vars: Optional[Dict[str, Any]] = Field(default=None, description="Variables supplémentaires")
|
|
check_mode: bool = Field(default=False, description="Mode dry-run (--check)")
|
|
verbose: bool = Field(default=False, description="Mode verbeux")
|
|
|
|
class AnsibleInventoryHost(BaseModel):
|
|
name: str
|
|
ansible_host: str
|
|
group: str
|
|
groups: List[str] = [] # All groups this host belongs to
|
|
vars: Dict[str, Any] = {}
|
|
|
|
class TaskRequest(BaseModel):
|
|
host: Optional[str] = Field(default=None, description="Hôte cible")
|
|
group: Optional[str] = Field(default=None, description="Groupe cible")
|
|
action: str = Field(..., description="Action à exécuter")
|
|
cmd: Optional[str] = Field(default=None, description="Commande personnalisée")
|
|
extra_vars: Optional[Dict[str, Any]] = Field(default=None, description="Variables Ansible")
|
|
tags: Optional[List[str]] = Field(default=None, description="Tags Ansible")
|
|
dry_run: bool = Field(default=False, description="Mode simulation")
|
|
ssh_user: Optional[str] = Field(default=None, description="Utilisateur SSH")
|
|
ssh_password: Optional[str] = Field(default=None, description="Mot de passe SSH")
|
|
|
|
@field_validator('action')
|
|
@classmethod
|
|
def validate_action(cls, v: str) -> str:
|
|
valid_actions = ['upgrade', 'reboot', 'health-check', 'backup', 'deploy', 'rollback', 'maintenance', 'bootstrap']
|
|
if v not in valid_actions:
|
|
raise ValueError(f'Action doit être l\'une de: {", ".join(valid_actions)}')
|
|
return v
|
|
|
|
class HostRequest(BaseModel):
|
|
name: str = Field(..., min_length=3, max_length=100, description="Hostname (ex: server.domain.home)")
|
|
# ansible_host peut être soit une IPv4, soit un hostname résolvable → on enlève la contrainte de pattern
|
|
ip: Optional[str] = Field(default=None, description="Adresse IP ou hostname (optionnel si hostname résolvable)")
|
|
os: str = Field(default="Linux", min_length=3, max_length=50)
|
|
ssh_user: Optional[str] = Field(default="root", min_length=1, max_length=50)
|
|
ssh_port: int = Field(default=22, ge=1, le=65535)
|
|
description: Optional[str] = Field(default=None, max_length=200)
|
|
env_group: str = Field(..., description="Groupe d'environnement (ex: env_homelab, env_prod)")
|
|
role_groups: List[str] = Field(default=[], description="Groupes de rôles (ex: role_proxmox, role_sbc)")
|
|
|
|
|
|
class HostUpdateRequest(BaseModel):
|
|
"""Requête de mise à jour d'un hôte"""
|
|
env_group: Optional[str] = Field(default=None, description="Nouveau groupe d'environnement")
|
|
role_groups: Optional[List[str]] = Field(default=None, description="Nouveaux groupes de rôles")
|
|
ansible_host: Optional[str] = Field(default=None, description="Nouvelle adresse ansible_host")
|
|
|
|
|
|
class GroupRequest(BaseModel):
|
|
"""Requête pour créer un groupe"""
|
|
name: str = Field(..., min_length=3, max_length=50, description="Nom du groupe (ex: env_prod, role_web)")
|
|
type: str = Field(..., description="Type de groupe: 'env' ou 'role'")
|
|
|
|
@field_validator('name')
|
|
@classmethod
|
|
def validate_name(cls, v: str) -> str:
|
|
import re
|
|
if not re.match(r'^[a-zA-Z0-9_-]+$', v):
|
|
raise ValueError('Le nom du groupe ne peut contenir que des lettres, chiffres, tirets et underscores')
|
|
return v
|
|
|
|
@field_validator('type')
|
|
@classmethod
|
|
def validate_type(cls, v: str) -> str:
|
|
if v not in ['env', 'role']:
|
|
raise ValueError("Le type doit être 'env' ou 'role'")
|
|
return v
|
|
|
|
|
|
class GroupUpdateRequest(BaseModel):
|
|
"""Requête pour modifier un groupe"""
|
|
new_name: str = Field(..., min_length=3, max_length=50, description="Nouveau nom du groupe")
|
|
|
|
@field_validator('new_name')
|
|
@classmethod
|
|
def validate_new_name(cls, v: str) -> str:
|
|
import re
|
|
if not re.match(r'^[a-zA-Z0-9_-]+$', v):
|
|
raise ValueError('Le nom du groupe ne peut contenir que des lettres, chiffres, tirets et underscores')
|
|
return v
|
|
|
|
|
|
class GroupDeleteRequest(BaseModel):
|
|
"""Requête pour supprimer un groupe"""
|
|
move_hosts_to: Optional[str] = Field(default=None, description="Groupe vers lequel déplacer les hôtes")
|
|
|
|
|
|
class AdHocCommandRequest(BaseModel):
|
|
"""Requête pour exécuter une commande ad-hoc Ansible"""
|
|
target: str = Field(..., description="Hôte ou groupe cible")
|
|
command: str = Field(..., description="Commande shell à exécuter")
|
|
module: str = Field(default="shell", description="Module Ansible (shell, command, raw)")
|
|
become: bool = Field(default=False, description="Exécuter avec sudo")
|
|
timeout: int = Field(default=60, ge=5, le=600, description="Timeout en secondes")
|
|
|
|
|
|
class AdHocCommandResult(BaseModel):
|
|
"""Résultat d'une commande ad-hoc"""
|
|
target: str
|
|
command: str
|
|
success: bool
|
|
return_code: int
|
|
stdout: str
|
|
stderr: Optional[str] = None
|
|
duration: float
|
|
hosts_results: Optional[Dict[str, Any]] = None
|
|
|
|
|
|
class AdHocHistoryEntry(BaseModel):
|
|
"""Entrée dans l'historique des commandes ad-hoc"""
|
|
id: str
|
|
command: str
|
|
target: str
|
|
module: str
|
|
become: bool
|
|
category: str = "default"
|
|
description: Optional[str] = None
|
|
created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
|
last_used: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
|
use_count: int = 1
|
|
|
|
|
|
class AdHocHistoryCategory(BaseModel):
|
|
"""Catégorie pour organiser les commandes ad-hoc"""
|
|
name: str
|
|
description: Optional[str] = None
|
|
color: str = "#7c3aed"
|
|
icon: str = "fa-folder"
|
|
|
|
|
|
class TaskLogFile(BaseModel):
|
|
"""Représentation d'un fichier de log de tâche"""
|
|
id: str
|
|
filename: str
|
|
path: str
|
|
task_name: str
|
|
target: str
|
|
status: str
|
|
date: str # Format YYYY-MM-DD
|
|
year: str
|
|
month: str
|
|
day: str
|
|
created_at: datetime
|
|
size_bytes: int
|
|
# Nouveaux champs pour affichage enrichi
|
|
start_time: Optional[str] = None # Format ISO ou HH:MM:SS
|
|
end_time: Optional[str] = None # Format ISO ou HH:MM:SS
|
|
duration: Optional[str] = None # Durée formatée
|
|
duration_seconds: Optional[int] = None # Durée en secondes
|
|
hosts: List[str] = [] # Liste des hôtes impliqués
|
|
category: Optional[str] = None # Catégorie (Playbook, Ad-hoc, etc.)
|
|
subcategory: Optional[str] = None # Sous-catégorie
|
|
target_type: Optional[str] = None # Type de cible: 'host', 'group', 'role'
|
|
|
|
|
|
class TasksFilterParams(BaseModel):
|
|
"""Paramètres de filtrage des tâches"""
|
|
status: Optional[str] = None # pending, running, completed, failed, all
|
|
year: Optional[str] = None
|
|
month: Optional[str] = None
|
|
day: Optional[str] = None
|
|
target: Optional[str] = None
|
|
search: Optional[str] = None
|
|
|
|
|
|
# ===== MODÈLES PLANIFICATEUR (SCHEDULER) =====
|
|
|
|
class ScheduleRecurrence(BaseModel):
|
|
"""Configuration de récurrence pour un schedule"""
|
|
type: Literal["daily", "weekly", "monthly", "custom"] = "daily"
|
|
time: str = Field(default="02:00", description="Heure d'exécution HH:MM")
|
|
days: Optional[List[int]] = Field(default=None, description="Jours de la semaine (1-7, lundi=1) pour weekly")
|
|
day_of_month: Optional[int] = Field(default=None, ge=1, le=31, description="Jour du mois (1-31) pour monthly")
|
|
cron_expression: Optional[str] = Field(default=None, description="Expression cron pour custom")
|
|
|
|
|
|
class Schedule(BaseModel):
|
|
"""Modèle d'un schedule de playbook"""
|
|
id: str = Field(default_factory=lambda: f"sched_{uuid.uuid4().hex[:12]}")
|
|
name: str = Field(..., min_length=3, max_length=100, description="Nom du schedule")
|
|
description: Optional[str] = Field(default=None, max_length=500)
|
|
playbook: str = Field(..., description="Nom du playbook à exécuter")
|
|
target_type: Literal["group", "host"] = Field(default="group", description="Type de cible")
|
|
target: str = Field(default="all", description="Nom du groupe ou hôte cible")
|
|
extra_vars: Optional[Dict[str, Any]] = Field(default=None, description="Variables supplémentaires")
|
|
schedule_type: Literal["once", "recurring"] = Field(default="recurring")
|
|
recurrence: Optional[ScheduleRecurrence] = Field(default=None)
|
|
timezone: str = Field(default="America/Montreal", description="Fuseau horaire")
|
|
start_at: Optional[datetime] = Field(default=None, description="Date de début (optionnel)")
|
|
end_at: Optional[datetime] = Field(default=None, description="Date de fin (optionnel)")
|
|
next_run_at: Optional[datetime] = Field(default=None, description="Prochaine exécution calculée")
|
|
last_run_at: Optional[datetime] = Field(default=None, description="Dernière exécution")
|
|
last_status: Literal["success", "failed", "running", "never"] = Field(default="never")
|
|
enabled: bool = Field(default=True, description="Schedule actif ou en pause")
|
|
retry_on_failure: int = Field(default=0, ge=0, le=3, description="Nombre de tentatives en cas d'échec")
|
|
timeout: int = Field(default=3600, ge=60, le=86400, description="Timeout en secondes")
|
|
tags: List[str] = Field(default=[], description="Tags pour catégorisation")
|
|
run_count: int = Field(default=0, description="Nombre total d'exécutions")
|
|
success_count: int = Field(default=0, description="Nombre de succès")
|
|
failure_count: int = Field(default=0, description="Nombre d'échecs")
|
|
created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
|
updated_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
|
|
|
class Config:
|
|
json_encoders = {
|
|
datetime: lambda v: v.isoformat() if v else None
|
|
}
|
|
|
|
@field_validator('recurrence', mode='before')
|
|
@classmethod
|
|
def validate_recurrence(cls, v, info):
|
|
# Si schedule_type est 'once', recurrence n'est pas obligatoire
|
|
return v
|
|
|
|
|
|
class ScheduleRun(BaseModel):
|
|
"""Historique d'une exécution de schedule"""
|
|
id: str = Field(default_factory=lambda: f"run_{uuid.uuid4().hex[:12]}")
|
|
schedule_id: str = Field(..., description="ID du schedule parent")
|
|
task_id: Optional[str] = Field(default=None, description="ID de la tâche créée")
|
|
started_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
|
finished_at: Optional[datetime] = Field(default=None)
|
|
status: Literal["running", "success", "failed", "canceled"] = Field(default="running")
|
|
duration_seconds: Optional[float] = Field(default=None)
|
|
hosts_impacted: int = Field(default=0)
|
|
error_message: Optional[str] = Field(default=None)
|
|
retry_attempt: int = Field(default=0, description="Numéro de la tentative (0 = première)")
|
|
|
|
class Config:
|
|
json_encoders = {
|
|
datetime: lambda v: v.isoformat() if v else None
|
|
}
|
|
|
|
|
|
class ScheduleCreateRequest(BaseModel):
|
|
"""Requête de création d'un schedule"""
|
|
name: str = Field(..., min_length=3, max_length=100)
|
|
description: Optional[str] = Field(default=None, max_length=500)
|
|
playbook: str = Field(...)
|
|
target_type: Literal["group", "host"] = Field(default="group")
|
|
target: str = Field(default="all")
|
|
extra_vars: Optional[Dict[str, Any]] = Field(default=None)
|
|
schedule_type: Literal["once", "recurring"] = Field(default="recurring")
|
|
recurrence: Optional[ScheduleRecurrence] = Field(default=None)
|
|
timezone: str = Field(default="America/Montreal")
|
|
start_at: Optional[datetime] = Field(default=None)
|
|
end_at: Optional[datetime] = Field(default=None)
|
|
enabled: bool = Field(default=True)
|
|
retry_on_failure: int = Field(default=0, ge=0, le=3)
|
|
timeout: int = Field(default=3600, ge=60, le=86400)
|
|
tags: List[str] = Field(default=[])
|
|
|
|
@field_validator('timezone')
|
|
@classmethod
|
|
def validate_timezone(cls, v: str) -> str:
|
|
try:
|
|
pytz.timezone(v)
|
|
return v
|
|
except pytz.exceptions.UnknownTimeZoneError:
|
|
raise ValueError(f"Fuseau horaire invalide: {v}")
|
|
|
|
|
|
class ScheduleUpdateRequest(BaseModel):
|
|
"""Requête de mise à jour d'un schedule"""
|
|
name: Optional[str] = Field(default=None, min_length=3, max_length=100)
|
|
description: Optional[str] = Field(default=None, max_length=500)
|
|
playbook: Optional[str] = Field(default=None)
|
|
target_type: Optional[Literal["group", "host"]] = Field(default=None)
|
|
target: Optional[str] = Field(default=None)
|
|
extra_vars: Optional[Dict[str, Any]] = Field(default=None)
|
|
schedule_type: Optional[Literal["once", "recurring"]] = Field(default=None)
|
|
recurrence: Optional[ScheduleRecurrence] = Field(default=None)
|
|
timezone: Optional[str] = Field(default=None)
|
|
start_at: Optional[datetime] = Field(default=None)
|
|
end_at: Optional[datetime] = Field(default=None)
|
|
enabled: Optional[bool] = Field(default=None)
|
|
retry_on_failure: Optional[int] = Field(default=None, ge=0, le=3)
|
|
timeout: Optional[int] = Field(default=None, ge=60, le=86400)
|
|
tags: Optional[List[str]] = Field(default=None)
|
|
|
|
|
|
class ScheduleStats(BaseModel):
|
|
"""Statistiques globales des schedules"""
|
|
total: int = 0
|
|
active: int = 0
|
|
paused: int = 0
|
|
expired: int = 0
|
|
next_execution: Optional[datetime] = None
|
|
next_schedule_name: Optional[str] = None
|
|
failures_24h: int = 0
|
|
executions_24h: int = 0
|
|
success_rate_7d: float = 0.0
|
|
|
|
|
|
# ===== SERVICE DE LOGGING MARKDOWN =====
|
|
|
|
class TaskLogService:
|
|
"""Service pour gérer les logs de tâches en fichiers markdown"""
|
|
|
|
def __init__(self, base_dir: Path):
|
|
self.base_dir = base_dir
|
|
self._ensure_base_dir()
|
|
|
|
def _ensure_base_dir(self):
|
|
"""Crée le répertoire de base s'il n'existe pas"""
|
|
self.base_dir.mkdir(parents=True, exist_ok=True)
|
|
|
|
def _get_date_path(self, dt: datetime = None) -> Path:
|
|
"""Retourne le chemin du répertoire pour une date donnée (YYYY/MM/JJ)"""
|
|
if dt is None:
|
|
dt = datetime.now(timezone.utc)
|
|
year = dt.strftime("%Y")
|
|
month = dt.strftime("%m")
|
|
day = dt.strftime("%d")
|
|
return self.base_dir / year / month / day
|
|
|
|
def _generate_task_id(self) -> str:
|
|
"""Génère un ID unique pour une tâche"""
|
|
import uuid
|
|
return f"task_{datetime.now(timezone.utc).strftime('%H%M%S')}_{uuid.uuid4().hex[:6]}"
|
|
|
|
def save_task_log(self, task: 'Task', output: str = "", error: str = "") -> str:
|
|
"""Sauvegarde un log de tâche en markdown et retourne le chemin"""
|
|
dt = task.start_time or datetime.now(timezone.utc)
|
|
date_path = self._get_date_path(dt)
|
|
date_path.mkdir(parents=True, exist_ok=True)
|
|
|
|
# Générer le nom du fichier
|
|
task_id = self._generate_task_id()
|
|
status_emoji = {
|
|
"completed": "✅",
|
|
"failed": "❌",
|
|
"running": "🔄",
|
|
"pending": "⏳",
|
|
"cancelled": "🚫"
|
|
}.get(task.status, "❓")
|
|
|
|
# Sanitize task name and host for filename
|
|
safe_name = task.name.replace(' ', '_').replace(':', '').replace('/', '-')[:50]
|
|
safe_host = task.host.replace(' ', '_').replace(':', '').replace('/', '-')[:30] if task.host else 'unknown'
|
|
filename = f"{task_id}_{safe_host}_{safe_name}_{task.status}.md"
|
|
filepath = date_path / filename
|
|
|
|
# Créer le contenu markdown
|
|
md_content = f"""# {status_emoji} {task.name}
|
|
|
|
## Informations
|
|
|
|
| Propriété | Valeur |
|
|
|-----------|--------|
|
|
| **ID** | `{task.id}` |
|
|
| **Nom** | {task.name} |
|
|
| **Cible** | `{task.host}` |
|
|
| **Statut** | {task.status} |
|
|
| **Progression** | {task.progress}% |
|
|
| **Début** | {task.start_time.isoformat() if task.start_time else 'N/A'} |
|
|
| **Fin** | {task.end_time.isoformat() if task.end_time else 'N/A'} |
|
|
| **Durée** | {task.duration or 'N/A'} |
|
|
|
|
## Sortie
|
|
|
|
```
|
|
{output or task.output or '(Aucune sortie)'}
|
|
```
|
|
|
|
"""
|
|
if error or task.error:
|
|
md_content += f"""## Erreurs
|
|
|
|
```
|
|
{error or task.error}
|
|
```
|
|
|
|
"""
|
|
|
|
md_content += f"""---
|
|
*Généré automatiquement par Homelab Automation Dashboard*
|
|
*Date: {datetime.now(timezone.utc).isoformat()}*
|
|
"""
|
|
|
|
# Écrire le fichier
|
|
filepath.write_text(md_content, encoding='utf-8')
|
|
|
|
return str(filepath)
|
|
|
|
def _parse_markdown_metadata(self, content: str) -> Dict[str, Any]:
|
|
"""Parse le contenu markdown pour extraire les métadonnées enrichies"""
|
|
metadata = {
|
|
'start_time': None,
|
|
'end_time': None,
|
|
'duration': None,
|
|
'duration_seconds': None,
|
|
'hosts': [],
|
|
'category': None,
|
|
'subcategory': None,
|
|
'target_type': None
|
|
}
|
|
|
|
# Extraire les heures de début et fin
|
|
start_match = re.search(r'\|\s*\*\*Début\*\*\s*\|\s*([^|]+)', content)
|
|
if start_match:
|
|
start_val = start_match.group(1).strip()
|
|
if start_val and start_val != 'N/A':
|
|
metadata['start_time'] = start_val
|
|
|
|
end_match = re.search(r'\|\s*\*\*Fin\*\*\s*\|\s*([^|]+)', content)
|
|
if end_match:
|
|
end_val = end_match.group(1).strip()
|
|
if end_val and end_val != 'N/A':
|
|
metadata['end_time'] = end_val
|
|
|
|
duration_match = re.search(r'\|\s*\*\*Durée\*\*\s*\|\s*([^|]+)', content)
|
|
if duration_match:
|
|
dur_val = duration_match.group(1).strip()
|
|
if dur_val and dur_val != 'N/A':
|
|
metadata['duration'] = dur_val
|
|
# Convertir en secondes si possible
|
|
metadata['duration_seconds'] = self._parse_duration_to_seconds(dur_val)
|
|
|
|
# Extraire les hôtes depuis la sortie Ansible
|
|
# Pattern pour les hôtes dans PLAY RECAP ou les résultats de tâches
|
|
host_patterns = [
|
|
r'^([a-zA-Z0-9][a-zA-Z0-9._-]+)\s*:\s*ok=', # PLAY RECAP format
|
|
r'^\s*([a-zA-Z0-9][a-zA-Z0-9._-]+)\s*\|\s*(SUCCESS|CHANGED|FAILED|UNREACHABLE)', # Ad-hoc format
|
|
]
|
|
hosts_found = set()
|
|
for pattern in host_patterns:
|
|
for match in re.finditer(pattern, content, re.MULTILINE):
|
|
host = match.group(1).strip()
|
|
if host and len(host) > 2 and '.' in host or len(host) > 5:
|
|
hosts_found.add(host)
|
|
metadata['hosts'] = sorted(list(hosts_found))
|
|
|
|
# Détecter la catégorie
|
|
task_name_match = re.search(r'^#\s*[✅❌🔄⏳🚫❓]?\s*(.+)$', content, re.MULTILINE)
|
|
if task_name_match:
|
|
task_name = task_name_match.group(1).strip().lower()
|
|
if 'playbook' in task_name:
|
|
metadata['category'] = 'Playbook'
|
|
# Extraire sous-catégorie du nom
|
|
if 'health' in task_name:
|
|
metadata['subcategory'] = 'Health Check'
|
|
elif 'backup' in task_name:
|
|
metadata['subcategory'] = 'Backup'
|
|
elif 'upgrade' in task_name or 'update' in task_name:
|
|
metadata['subcategory'] = 'Upgrade'
|
|
elif 'bootstrap' in task_name:
|
|
metadata['subcategory'] = 'Bootstrap'
|
|
elif 'reboot' in task_name:
|
|
metadata['subcategory'] = 'Reboot'
|
|
elif 'ad-hoc' in task_name or 'adhoc' in task_name:
|
|
metadata['category'] = 'Ad-hoc'
|
|
else:
|
|
metadata['category'] = 'Autre'
|
|
|
|
# Détecter le type de cible
|
|
target_match = re.search(r'\|\s*\*\*Cible\*\*\s*\|\s*`([^`]+)`', content)
|
|
if target_match:
|
|
target_val = target_match.group(1).strip()
|
|
if target_val == 'all':
|
|
metadata['target_type'] = 'group'
|
|
elif target_val.startswith('env_') or target_val.startswith('role_'):
|
|
metadata['target_type'] = 'group'
|
|
elif '.' in target_val:
|
|
metadata['target_type'] = 'host'
|
|
else:
|
|
metadata['target_type'] = 'group'
|
|
|
|
return metadata
|
|
|
|
def _parse_duration_to_seconds(self, duration_str: str) -> Optional[int]:
|
|
"""Convertit une chaîne de durée en secondes"""
|
|
if not duration_str:
|
|
return None
|
|
|
|
total_seconds = 0
|
|
# Pattern: Xh Xm Xs ou X:XX:XX ou Xs
|
|
|
|
# Format HH:MM:SS
|
|
hms_match = re.match(r'(\d+):(\d+):(\d+)', duration_str)
|
|
if hms_match:
|
|
h, m, s = map(int, hms_match.groups())
|
|
return h * 3600 + m * 60 + s
|
|
|
|
# Format avec h, m, s
|
|
hours = re.search(r'(\d+)\s*h', duration_str)
|
|
minutes = re.search(r'(\d+)\s*m', duration_str)
|
|
seconds = re.search(r'(\d+)\s*s', duration_str)
|
|
|
|
if hours:
|
|
total_seconds += int(hours.group(1)) * 3600
|
|
if minutes:
|
|
total_seconds += int(minutes.group(1)) * 60
|
|
if seconds:
|
|
total_seconds += int(seconds.group(1))
|
|
|
|
return total_seconds if total_seconds > 0 else None
|
|
|
|
def get_task_logs(self,
|
|
year: str = None,
|
|
month: str = None,
|
|
day: str = None,
|
|
status: str = None,
|
|
target: str = None,
|
|
category: str = None) -> List[TaskLogFile]:
|
|
"""Récupère la liste des logs de tâches avec filtrage"""
|
|
logs = []
|
|
|
|
# Déterminer le chemin de recherche
|
|
if year and month and day:
|
|
search_paths = [self.base_dir / year / month / day]
|
|
elif year and month:
|
|
month_path = self.base_dir / year / month
|
|
search_paths = list(month_path.glob("*")) if month_path.exists() else []
|
|
elif year:
|
|
year_path = self.base_dir / year
|
|
search_paths = []
|
|
if year_path.exists():
|
|
for m in year_path.iterdir():
|
|
if m.is_dir():
|
|
search_paths.extend(m.glob("*"))
|
|
else:
|
|
search_paths = []
|
|
if self.base_dir.exists():
|
|
for y in self.base_dir.iterdir():
|
|
if y.is_dir() and y.name.isdigit():
|
|
for m in y.iterdir():
|
|
if m.is_dir():
|
|
search_paths.extend(m.glob("*"))
|
|
|
|
# Parcourir les répertoires
|
|
for path in search_paths:
|
|
if not path.is_dir():
|
|
continue
|
|
|
|
for md_file in path.glob("*.md"):
|
|
try:
|
|
# Extraire les infos du nom de fichier
|
|
# Format: task_HHMMSS_XXXXXX_TARGET_TASKNAME_STATUS.md
|
|
parts = md_file.stem.split("_")
|
|
if len(parts) >= 4:
|
|
file_status = parts[-1]
|
|
# Format nouveau: task_HHMMSS_XXXXXX_target_taskname_status
|
|
# parts[0] = task, parts[1] = HHMMSS, parts[2] = XXXXXX (id)
|
|
# parts[3] = target, parts[4:-1] = task_name, parts[-1] = status
|
|
if len(parts) >= 5:
|
|
file_target = parts[3]
|
|
task_name_from_file = "_".join(parts[4:-1]) if len(parts) > 5 else parts[4] if len(parts) > 4 else "unknown"
|
|
else:
|
|
file_target = ""
|
|
task_name_from_file = "_".join(parts[3:-1]) if len(parts) > 4 else parts[3] if len(parts) > 3 else "unknown"
|
|
|
|
# Filtrer par statut si spécifié
|
|
if status and status != "all" and file_status != status:
|
|
continue
|
|
|
|
# Extraire la date du chemin
|
|
rel_path = md_file.relative_to(self.base_dir)
|
|
path_parts = rel_path.parts
|
|
if len(path_parts) >= 3:
|
|
log_year, log_month, log_day = path_parts[0], path_parts[1], path_parts[2]
|
|
else:
|
|
continue
|
|
|
|
stat = md_file.stat()
|
|
|
|
# Lire le contenu pour extraire les métadonnées enrichies
|
|
try:
|
|
content = md_file.read_text(encoding='utf-8')
|
|
metadata = self._parse_markdown_metadata(content)
|
|
# Extraire le nom de tâche et la cible depuis le contenu markdown
|
|
task_name_match = re.search(r'^#\s*[✅❌🔄⏳🚫❓]?\s*(.+)$', content, re.MULTILINE)
|
|
if task_name_match:
|
|
task_name = task_name_match.group(1).strip()
|
|
else:
|
|
task_name = task_name_from_file.replace("_", " ")
|
|
|
|
# Extraire la cible depuis le contenu
|
|
target_match = re.search(r'\|\s*\*\*Cible\*\*\s*\|\s*`([^`]+)`', content)
|
|
if target_match:
|
|
file_target = target_match.group(1).strip()
|
|
except Exception:
|
|
metadata = {}
|
|
task_name = task_name_from_file.replace("_", " ")
|
|
|
|
# Filtrer par target si spécifié
|
|
if target and target != "all" and file_target:
|
|
if target.lower() not in file_target.lower():
|
|
continue
|
|
|
|
# Filtrer par catégorie si spécifié
|
|
if category and category != "all":
|
|
file_category = metadata.get('category', '')
|
|
if file_category and category.lower() not in file_category.lower():
|
|
continue
|
|
|
|
logs.append(TaskLogFile(
|
|
id=parts[0] + "_" + parts[1] + "_" + parts[2] if len(parts) > 2 else parts[0],
|
|
filename=md_file.name,
|
|
path=str(md_file),
|
|
task_name=task_name,
|
|
target=file_target,
|
|
status=file_status,
|
|
date=f"{log_year}-{log_month}-{log_day}",
|
|
year=log_year,
|
|
month=log_month,
|
|
day=log_day,
|
|
created_at=datetime.fromtimestamp(stat.st_ctime, tz=timezone.utc),
|
|
size_bytes=stat.st_size,
|
|
start_time=metadata.get('start_time'),
|
|
end_time=metadata.get('end_time'),
|
|
duration=metadata.get('duration'),
|
|
duration_seconds=metadata.get('duration_seconds'),
|
|
hosts=metadata.get('hosts', []),
|
|
category=metadata.get('category'),
|
|
subcategory=metadata.get('subcategory'),
|
|
target_type=metadata.get('target_type')
|
|
))
|
|
except Exception:
|
|
continue
|
|
|
|
# Trier par date décroissante
|
|
logs.sort(key=lambda x: x.created_at, reverse=True)
|
|
return logs
|
|
|
|
def get_available_dates(self) -> Dict[str, Any]:
|
|
"""Retourne la structure des dates disponibles pour le filtrage"""
|
|
dates = {"years": {}}
|
|
|
|
if not self.base_dir.exists():
|
|
return dates
|
|
|
|
for year_dir in sorted(self.base_dir.iterdir(), reverse=True):
|
|
if year_dir.is_dir() and year_dir.name.isdigit():
|
|
year = year_dir.name
|
|
dates["years"][year] = {"months": {}}
|
|
|
|
for month_dir in sorted(year_dir.iterdir(), reverse=True):
|
|
if month_dir.is_dir() and month_dir.name.isdigit():
|
|
month = month_dir.name
|
|
dates["years"][year]["months"][month] = {"days": []}
|
|
|
|
for day_dir in sorted(month_dir.iterdir(), reverse=True):
|
|
if day_dir.is_dir() and day_dir.name.isdigit():
|
|
day = day_dir.name
|
|
count = len(list(day_dir.glob("*.md")))
|
|
dates["years"][year]["months"][month]["days"].append({
|
|
"day": day,
|
|
"count": count
|
|
})
|
|
|
|
return dates
|
|
|
|
def get_stats(self) -> Dict[str, int]:
|
|
"""Retourne les statistiques des tâches"""
|
|
stats = {"total": 0, "completed": 0, "failed": 0, "running": 0, "pending": 0}
|
|
|
|
for log in self.get_task_logs():
|
|
stats["total"] += 1
|
|
if log.status in stats:
|
|
stats[log.status] += 1
|
|
|
|
return stats
|
|
|
|
|
|
# ===== SERVICE HISTORIQUE COMMANDES AD-HOC =====
|
|
|
|
class AdHocHistoryService:
|
|
"""Service pour gérer l'historique des commandes ad-hoc avec catégories"""
|
|
|
|
def __init__(self, history_file: Path):
|
|
self.history_file = history_file
|
|
self._ensure_file()
|
|
|
|
def _ensure_file(self):
|
|
"""Crée le fichier d'historique s'il n'existe pas"""
|
|
self.history_file.parent.mkdir(parents=True, exist_ok=True)
|
|
if not self.history_file.exists():
|
|
self._save_data({"commands": [], "categories": [
|
|
{"name": "default", "description": "Commandes générales", "color": "#7c3aed", "icon": "fa-terminal"},
|
|
{"name": "diagnostic", "description": "Commandes de diagnostic", "color": "#10b981", "icon": "fa-stethoscope"},
|
|
{"name": "maintenance", "description": "Commandes de maintenance", "color": "#f59e0b", "icon": "fa-wrench"},
|
|
{"name": "deployment", "description": "Commandes de déploiement", "color": "#3b82f6", "icon": "fa-rocket"},
|
|
]})
|
|
|
|
def _load_data(self) -> Dict:
|
|
"""Charge les données depuis le fichier"""
|
|
try:
|
|
with open(self.history_file, 'r', encoding='utf-8') as f:
|
|
return json.load(f)
|
|
except:
|
|
return {"commands": [], "categories": []}
|
|
|
|
def _save_data(self, data: Dict):
|
|
"""Sauvegarde les données dans le fichier"""
|
|
with open(self.history_file, 'w', encoding='utf-8') as f:
|
|
json.dump(data, f, indent=2, default=str, ensure_ascii=False)
|
|
|
|
def add_command(self, command: str, target: str, module: str, become: bool,
|
|
category: str = "default", description: str = None) -> AdHocHistoryEntry:
|
|
"""Ajoute ou met à jour une commande dans l'historique"""
|
|
data = self._load_data()
|
|
|
|
# Chercher si la commande existe déjà
|
|
existing = None
|
|
for cmd in data["commands"]:
|
|
if cmd["command"] == command and cmd["target"] == target:
|
|
existing = cmd
|
|
break
|
|
|
|
if existing:
|
|
existing["last_used"] = datetime.now(timezone.utc).isoformat()
|
|
existing["use_count"] = existing.get("use_count", 1) + 1
|
|
if category != "default":
|
|
existing["category"] = category
|
|
if description:
|
|
existing["description"] = description
|
|
entry = AdHocHistoryEntry(**existing)
|
|
else:
|
|
import uuid
|
|
entry = AdHocHistoryEntry(
|
|
id=f"adhoc_{uuid.uuid4().hex[:8]}",
|
|
command=command,
|
|
target=target,
|
|
module=module,
|
|
become=become,
|
|
category=category,
|
|
description=description
|
|
)
|
|
data["commands"].append(entry.dict())
|
|
|
|
self._save_data(data)
|
|
return entry
|
|
|
|
def get_commands(self, category: str = None, search: str = None, limit: int = 50) -> List[AdHocHistoryEntry]:
|
|
"""Récupère les commandes de l'historique"""
|
|
data = self._load_data()
|
|
commands = []
|
|
|
|
for cmd in data.get("commands", []):
|
|
if category and cmd.get("category") != category:
|
|
continue
|
|
if search and search.lower() not in cmd.get("command", "").lower():
|
|
continue
|
|
|
|
try:
|
|
# Convertir les dates string en datetime si nécessaire
|
|
if isinstance(cmd.get("created_at"), str):
|
|
cmd["created_at"] = datetime.fromisoformat(cmd["created_at"].replace("Z", "+00:00"))
|
|
if isinstance(cmd.get("last_used"), str):
|
|
cmd["last_used"] = datetime.fromisoformat(cmd["last_used"].replace("Z", "+00:00"))
|
|
commands.append(AdHocHistoryEntry(**cmd))
|
|
except Exception:
|
|
continue
|
|
|
|
# Trier par dernière utilisation
|
|
commands.sort(key=lambda x: x.last_used, reverse=True)
|
|
return commands[:limit]
|
|
|
|
def get_categories(self) -> List[AdHocHistoryCategory]:
|
|
"""Récupère la liste des catégories"""
|
|
data = self._load_data()
|
|
return [AdHocHistoryCategory(**cat) for cat in data.get("categories", [])]
|
|
|
|
def add_category(self, name: str, description: str = None, color: str = "#7c3aed", icon: str = "fa-folder") -> AdHocHistoryCategory:
|
|
"""Ajoute une nouvelle catégorie"""
|
|
data = self._load_data()
|
|
|
|
# Vérifier si la catégorie existe déjà
|
|
for cat in data["categories"]:
|
|
if cat["name"] == name:
|
|
return AdHocHistoryCategory(**cat)
|
|
|
|
new_cat = AdHocHistoryCategory(name=name, description=description, color=color, icon=icon)
|
|
data["categories"].append(new_cat.dict())
|
|
self._save_data(data)
|
|
return new_cat
|
|
|
|
def delete_command(self, command_id: str) -> bool:
|
|
"""Supprime une commande de l'historique"""
|
|
data = self._load_data()
|
|
original_len = len(data["commands"])
|
|
data["commands"] = [c for c in data["commands"] if c.get("id") != command_id]
|
|
|
|
if len(data["commands"]) < original_len:
|
|
self._save_data(data)
|
|
return True
|
|
return False
|
|
|
|
def update_command_category(self, command_id: str, category: str, description: str = None) -> bool:
|
|
"""Met à jour la catégorie d'une commande"""
|
|
data = self._load_data()
|
|
|
|
for cmd in data["commands"]:
|
|
if cmd.get("id") == command_id:
|
|
cmd["category"] = category
|
|
if description:
|
|
cmd["description"] = description
|
|
self._save_data(data)
|
|
return True
|
|
return False
|
|
|
|
def update_category(self, category_name: str, new_name: str, description: str, color: str, icon: str) -> bool:
|
|
"""Met à jour une catégorie existante"""
|
|
data = self._load_data()
|
|
|
|
for cat in data["categories"]:
|
|
if cat["name"] == category_name:
|
|
# Mettre à jour les commandes si le nom change
|
|
if new_name != category_name:
|
|
for cmd in data["commands"]:
|
|
if cmd.get("category") == category_name:
|
|
cmd["category"] = new_name
|
|
|
|
cat["name"] = new_name
|
|
cat["description"] = description
|
|
cat["color"] = color
|
|
cat["icon"] = icon
|
|
self._save_data(data)
|
|
return True
|
|
return False
|
|
|
|
def delete_category(self, category_name: str) -> bool:
|
|
"""Supprime une catégorie et déplace ses commandes vers 'default'"""
|
|
if category_name == "default":
|
|
return False
|
|
|
|
data = self._load_data()
|
|
|
|
# Vérifier si la catégorie existe
|
|
cat_exists = any(cat["name"] == category_name for cat in data["categories"])
|
|
if not cat_exists:
|
|
return False
|
|
|
|
# Déplacer les commandes vers 'default'
|
|
for cmd in data["commands"]:
|
|
if cmd.get("category") == category_name:
|
|
cmd["category"] = "default"
|
|
|
|
# Supprimer la catégorie
|
|
data["categories"] = [cat for cat in data["categories"] if cat["name"] != category_name]
|
|
|
|
self._save_data(data)
|
|
return True
|
|
|
|
|
|
# ===== SERVICE BOOTSTRAP STATUS =====
|
|
|
|
class BootstrapStatusService:
|
|
"""Service pour gérer le statut de bootstrap des hôtes"""
|
|
|
|
def __init__(self, status_file: Path):
|
|
self.status_file = status_file
|
|
self._ensure_file()
|
|
|
|
def _ensure_file(self):
|
|
"""Crée le fichier de statut s'il n'existe pas"""
|
|
self.status_file.parent.mkdir(parents=True, exist_ok=True)
|
|
if not self.status_file.exists():
|
|
self._save_data({"hosts": {}})
|
|
|
|
def _load_data(self) -> Dict:
|
|
"""Charge les données depuis le fichier"""
|
|
try:
|
|
with open(self.status_file, 'r', encoding='utf-8') as f:
|
|
return json.load(f)
|
|
except:
|
|
return {"hosts": {}}
|
|
|
|
def _save_data(self, data: Dict):
|
|
"""Sauvegarde les données dans le fichier"""
|
|
with open(self.status_file, 'w', encoding='utf-8') as f:
|
|
json.dump(data, f, indent=2, default=str, ensure_ascii=False)
|
|
|
|
def set_bootstrap_status(self, host_name: str, success: bool, details: str = None) -> Dict:
|
|
"""Enregistre le statut de bootstrap d'un hôte"""
|
|
data = self._load_data()
|
|
|
|
data["hosts"][host_name] = {
|
|
"bootstrap_ok": success,
|
|
"bootstrap_date": datetime.now(timezone.utc).isoformat(),
|
|
"details": details
|
|
}
|
|
|
|
self._save_data(data)
|
|
return data["hosts"][host_name]
|
|
|
|
def get_bootstrap_status(self, host_name: str) -> Dict:
|
|
"""Récupère le statut de bootstrap d'un hôte"""
|
|
data = self._load_data()
|
|
return data.get("hosts", {}).get(host_name, {
|
|
"bootstrap_ok": False,
|
|
"bootstrap_date": None,
|
|
"details": None
|
|
})
|
|
|
|
def get_all_status(self) -> Dict[str, Dict]:
|
|
"""Récupère le statut de tous les hôtes"""
|
|
data = self._load_data()
|
|
return data.get("hosts", {})
|
|
|
|
def remove_host(self, host_name: str) -> bool:
|
|
"""Supprime le statut d'un hôte"""
|
|
data = self._load_data()
|
|
if host_name in data.get("hosts", {}):
|
|
del data["hosts"][host_name]
|
|
self._save_data(data)
|
|
return True
|
|
return False
|
|
|
|
|
|
# ===== SERVICE HOST STATUS =====
|
|
|
|
class HostStatusService:
|
|
def __init__(self, status_file: Path):
|
|
self.status_file = status_file
|
|
self._ensure_file()
|
|
|
|
def _ensure_file(self):
|
|
self.status_file.parent.mkdir(parents=True, exist_ok=True)
|
|
if not self.status_file.exists():
|
|
self._save_data({"hosts": {}})
|
|
|
|
def _load_data(self) -> Dict:
|
|
try:
|
|
with open(self.status_file, 'r', encoding='utf-8') as f:
|
|
return json.load(f)
|
|
except:
|
|
return {"hosts": {}}
|
|
|
|
def _save_data(self, data: Dict):
|
|
with open(self.status_file, 'w', encoding='utf-8') as f:
|
|
json.dump(data, f, indent=2, default=str, ensure_ascii=False)
|
|
|
|
def set_status(self, host_name: str, status: str, last_seen: Optional[datetime], os_info: Optional[str]) -> Dict:
|
|
data = self._load_data()
|
|
data.setdefault("hosts", {})
|
|
data["hosts"][host_name] = {
|
|
"status": status,
|
|
"last_seen": last_seen.isoformat() if isinstance(last_seen, datetime) else last_seen,
|
|
"os": os_info,
|
|
}
|
|
self._save_data(data)
|
|
return data["hosts"][host_name]
|
|
|
|
def get_status(self, host_name: str) -> Dict:
|
|
data = self._load_data()
|
|
hosts = data.get("hosts", {})
|
|
return hosts.get(host_name, {"status": "online", "last_seen": None, "os": None})
|
|
|
|
def get_all_status(self) -> Dict[str, Dict]:
|
|
data = self._load_data()
|
|
return data.get("hosts", {})
|
|
|
|
def remove_host(self, host_name: str) -> bool:
|
|
data = self._load_data()
|
|
hosts = data.get("hosts", {})
|
|
if host_name in hosts:
|
|
del hosts[host_name]
|
|
data["hosts"] = hosts
|
|
self._save_data(data)
|
|
return True
|
|
return False
|
|
|
|
|
|
# ===== SERVICE PLANIFICATEUR (SCHEDULER) =====
|
|
|
|
SCHEDULES_FILE = DIR_LOGS_TASKS / ".schedules.json"
|
|
SCHEDULE_RUNS_FILE = DIR_LOGS_TASKS / ".schedule_runs.json"
|
|
|
|
|
|
class SchedulerService:
|
|
"""Service pour gérer les schedules de playbooks avec APScheduler"""
|
|
|
|
def __init__(self, schedules_file: Path, runs_file: Path):
|
|
self.schedules_file = schedules_file
|
|
self.runs_file = runs_file
|
|
self._ensure_files()
|
|
|
|
# Configurer APScheduler
|
|
jobstores = {'default': MemoryJobStore()}
|
|
executors = {'default': AsyncIOExecutor()}
|
|
job_defaults = {'coalesce': True, 'max_instances': 1, 'misfire_grace_time': 300}
|
|
|
|
self.scheduler = AsyncIOScheduler(
|
|
jobstores=jobstores,
|
|
executors=executors,
|
|
job_defaults=job_defaults,
|
|
timezone=pytz.UTC
|
|
)
|
|
self._started = False
|
|
|
|
def _ensure_files(self):
|
|
"""Crée les fichiers de données s'ils n'existent pas"""
|
|
self.schedules_file.parent.mkdir(parents=True, exist_ok=True)
|
|
if not self.schedules_file.exists():
|
|
self._save_schedules([])
|
|
if not self.runs_file.exists():
|
|
self._save_runs([])
|
|
|
|
def _load_schedules(self) -> List[Dict]:
|
|
"""Charge les schedules depuis le fichier"""
|
|
try:
|
|
with open(self.schedules_file, 'r', encoding='utf-8') as f:
|
|
data = json.load(f)
|
|
return data.get("schedules", []) if isinstance(data, dict) else data
|
|
except:
|
|
return []
|
|
|
|
def _save_schedules(self, schedules: List[Dict]):
|
|
"""Sauvegarde les schedules dans le fichier"""
|
|
with open(self.schedules_file, 'w', encoding='utf-8') as f:
|
|
json.dump({"schedules": schedules}, f, indent=2, default=str, ensure_ascii=False)
|
|
|
|
def _load_runs(self) -> List[Dict]:
|
|
"""Charge l'historique des exécutions"""
|
|
try:
|
|
with open(self.runs_file, 'r', encoding='utf-8') as f:
|
|
data = json.load(f)
|
|
return data.get("runs", []) if isinstance(data, dict) else data
|
|
except:
|
|
return []
|
|
|
|
def _save_runs(self, runs: List[Dict]):
|
|
"""Sauvegarde l'historique des exécutions"""
|
|
# Garder seulement les 1000 dernières exécutions
|
|
runs = runs[:1000]
|
|
with open(self.runs_file, 'w', encoding='utf-8') as f:
|
|
json.dump({"runs": runs}, f, indent=2, default=str, ensure_ascii=False)
|
|
|
|
def start(self):
|
|
"""Démarre le scheduler et charge tous les schedules actifs"""
|
|
if not self._started:
|
|
self.scheduler.start()
|
|
self._started = True
|
|
# Charger les schedules actifs
|
|
self._load_active_schedules()
|
|
print("📅 Scheduler démarré avec succès")
|
|
|
|
def shutdown(self):
|
|
"""Arrête le scheduler proprement"""
|
|
if self._started:
|
|
self.scheduler.shutdown(wait=False)
|
|
self._started = False
|
|
|
|
def _load_active_schedules(self):
|
|
"""Charge tous les schedules actifs dans APScheduler"""
|
|
schedules = self._load_schedules()
|
|
for sched_data in schedules:
|
|
if sched_data.get('enabled', True):
|
|
try:
|
|
schedule = Schedule(**sched_data)
|
|
self._add_job_for_schedule(schedule)
|
|
except Exception as e:
|
|
print(f"Erreur chargement schedule {sched_data.get('id')}: {e}")
|
|
|
|
def _build_cron_trigger(self, schedule: Schedule) -> Optional[CronTrigger]:
|
|
"""Construit un trigger cron à partir de la configuration du schedule"""
|
|
if schedule.schedule_type == "once":
|
|
return None
|
|
|
|
recurrence = schedule.recurrence
|
|
if not recurrence:
|
|
return None
|
|
|
|
tz = pytz.timezone(schedule.timezone)
|
|
hour, minute = recurrence.time.split(':') if recurrence.time else ("2", "0")
|
|
|
|
try:
|
|
if recurrence.type == "daily":
|
|
return CronTrigger(hour=int(hour), minute=int(minute), timezone=tz)
|
|
|
|
elif recurrence.type == "weekly":
|
|
# Convertir jours (1-7 lundi=1) en format cron (0-6 lundi=0)
|
|
days = recurrence.days or [1]
|
|
day_of_week = ','.join(str(d - 1) for d in days)
|
|
return CronTrigger(day_of_week=day_of_week, hour=int(hour), minute=int(minute), timezone=tz)
|
|
|
|
elif recurrence.type == "monthly":
|
|
day = recurrence.day_of_month or 1
|
|
return CronTrigger(day=day, hour=int(hour), minute=int(minute), timezone=tz)
|
|
|
|
elif recurrence.type == "custom" and recurrence.cron_expression:
|
|
# Parser l'expression cron
|
|
parts = recurrence.cron_expression.split()
|
|
if len(parts) == 5:
|
|
return CronTrigger.from_crontab(recurrence.cron_expression, timezone=tz)
|
|
else:
|
|
# Expression cron étendue (6 champs avec secondes)
|
|
return CronTrigger(
|
|
second=parts[0] if len(parts) > 5 else '0',
|
|
minute=parts[0] if len(parts) == 5 else parts[1],
|
|
hour=parts[1] if len(parts) == 5 else parts[2],
|
|
day=parts[2] if len(parts) == 5 else parts[3],
|
|
month=parts[3] if len(parts) == 5 else parts[4],
|
|
day_of_week=parts[4] if len(parts) == 5 else parts[5],
|
|
timezone=tz
|
|
)
|
|
except Exception as e:
|
|
print(f"Erreur construction trigger cron: {e}")
|
|
return None
|
|
|
|
return None
|
|
|
|
def _add_job_for_schedule(self, schedule: Schedule):
|
|
"""Ajoute un job APScheduler pour un schedule"""
|
|
job_id = f"schedule_{schedule.id}"
|
|
|
|
# Supprimer le job existant s'il existe
|
|
try:
|
|
self.scheduler.remove_job(job_id)
|
|
except:
|
|
pass
|
|
|
|
if schedule.schedule_type == "once":
|
|
# Exécution unique
|
|
if schedule.start_at and schedule.start_at > datetime.now(timezone.utc):
|
|
trigger = DateTrigger(run_date=schedule.start_at, timezone=pytz.UTC)
|
|
self.scheduler.add_job(
|
|
self._execute_schedule,
|
|
trigger,
|
|
id=job_id,
|
|
args=[schedule.id],
|
|
replace_existing=True
|
|
)
|
|
else:
|
|
# Exécution récurrente
|
|
trigger = self._build_cron_trigger(schedule)
|
|
if trigger:
|
|
self.scheduler.add_job(
|
|
self._execute_schedule,
|
|
trigger,
|
|
id=job_id,
|
|
args=[schedule.id],
|
|
replace_existing=True
|
|
)
|
|
|
|
# Calculer et mettre à jour next_run_at
|
|
self._update_next_run(schedule.id)
|
|
|
|
def _update_next_run(self, schedule_id: str):
|
|
"""Met à jour le champ next_run_at d'un schedule"""
|
|
job_id = f"schedule_{schedule_id}"
|
|
try:
|
|
job = self.scheduler.get_job(job_id)
|
|
if job and job.next_run_time:
|
|
schedules = self._load_schedules()
|
|
for s in schedules:
|
|
if s['id'] == schedule_id:
|
|
s['next_run_at'] = job.next_run_time.isoformat()
|
|
break
|
|
self._save_schedules(schedules)
|
|
except:
|
|
pass
|
|
|
|
async def _execute_schedule(self, schedule_id: str):
|
|
"""Exécute un schedule (appelé par APScheduler)"""
|
|
# Import circulaire évité en utilisant les variables globales
|
|
global ws_manager, ansible_service, db, task_log_service
|
|
|
|
schedules = self._load_schedules()
|
|
sched_data = next((s for s in schedules if s['id'] == schedule_id), None)
|
|
|
|
if not sched_data:
|
|
print(f"Schedule {schedule_id} non trouvé")
|
|
return
|
|
|
|
schedule = Schedule(**sched_data)
|
|
|
|
# Vérifier si le schedule est encore actif
|
|
if not schedule.enabled:
|
|
return
|
|
|
|
# Vérifier la fenêtre temporelle
|
|
now = datetime.now(timezone.utc)
|
|
if schedule.end_at and now > schedule.end_at:
|
|
# Schedule expiré, le désactiver
|
|
schedule.enabled = False
|
|
self._update_schedule_in_storage(schedule)
|
|
return
|
|
|
|
# Créer un ScheduleRun
|
|
run = ScheduleRun(schedule_id=schedule_id)
|
|
runs = self._load_runs()
|
|
runs.insert(0, run.dict())
|
|
self._save_runs(runs)
|
|
|
|
# Mettre à jour le schedule
|
|
schedule.last_run_at = now
|
|
schedule.last_status = "running"
|
|
schedule.run_count += 1
|
|
self._update_schedule_in_storage(schedule)
|
|
|
|
# Notifier via WebSocket
|
|
try:
|
|
await ws_manager.broadcast({
|
|
"type": "schedule_run_started",
|
|
"data": {
|
|
"schedule_id": schedule_id,
|
|
"schedule_name": schedule.name,
|
|
"run": run.dict(),
|
|
"status": "running"
|
|
}
|
|
})
|
|
except:
|
|
pass
|
|
|
|
# Créer une tâche
|
|
task_id = str(db.get_next_id("tasks"))
|
|
playbook_name = schedule.playbook.replace('.yml', '').replace('-', ' ').title()
|
|
task = Task(
|
|
id=task_id,
|
|
name=f"[Planifié] {playbook_name}",
|
|
host=schedule.target,
|
|
status="running",
|
|
progress=0,
|
|
start_time=now
|
|
)
|
|
db.tasks.insert(0, task)
|
|
|
|
# Mettre à jour le run avec le task_id
|
|
run.task_id = task_id
|
|
runs = self._load_runs()
|
|
for r in runs:
|
|
if r['id'] == run.id:
|
|
r['task_id'] = task_id
|
|
break
|
|
self._save_runs(runs)
|
|
|
|
# Notifier la création de tâche
|
|
try:
|
|
await ws_manager.broadcast({
|
|
"type": "task_created",
|
|
"data": task.dict()
|
|
})
|
|
except:
|
|
pass
|
|
|
|
# Exécuter le playbook
|
|
start_time = perf_counter()
|
|
try:
|
|
result = await ansible_service.execute_playbook(
|
|
playbook=schedule.playbook,
|
|
target=schedule.target,
|
|
extra_vars=schedule.extra_vars,
|
|
check_mode=False,
|
|
verbose=True
|
|
)
|
|
|
|
execution_time = perf_counter() - start_time
|
|
success = result.get("success", False)
|
|
|
|
# Mettre à jour la tâche
|
|
task.status = "completed" if success else "failed"
|
|
task.progress = 100
|
|
task.end_time = datetime.now(timezone.utc)
|
|
task.duration = f"{execution_time:.1f}s"
|
|
task.output = result.get("stdout", "")
|
|
task.error = result.get("stderr", "") if not success else None
|
|
|
|
# Mettre à jour le run
|
|
run.status = "success" if success else "failed"
|
|
run.finished_at = datetime.now(timezone.utc)
|
|
run.duration_seconds = execution_time
|
|
run.error_message = result.get("stderr", "") if not success else None
|
|
|
|
# Compter les hôtes impactés
|
|
stdout = result.get("stdout", "")
|
|
host_count = len(re.findall(r'^[a-zA-Z0-9][a-zA-Z0-9._-]+\s*:\s*ok=', stdout, re.MULTILINE))
|
|
run.hosts_impacted = host_count
|
|
|
|
# Mettre à jour le schedule
|
|
schedule.last_status = "success" if success else "failed"
|
|
if success:
|
|
schedule.success_count += 1
|
|
else:
|
|
schedule.failure_count += 1
|
|
|
|
# Sauvegarder
|
|
self._update_schedule_in_storage(schedule)
|
|
runs = self._load_runs()
|
|
for r in runs:
|
|
if r['id'] == run.id:
|
|
r.update(run.dict())
|
|
break
|
|
self._save_runs(runs)
|
|
|
|
# Sauvegarder le log markdown
|
|
try:
|
|
task_log_service.save_task_log(
|
|
task=task,
|
|
output=result.get("stdout", ""),
|
|
error=result.get("stderr", "")
|
|
)
|
|
except:
|
|
pass
|
|
|
|
# Notifier
|
|
await ws_manager.broadcast({
|
|
"type": "schedule_run_finished",
|
|
"data": {
|
|
"schedule_id": schedule_id,
|
|
"schedule_name": schedule.name,
|
|
"run": run.dict(),
|
|
"status": run.status,
|
|
"success": success
|
|
}
|
|
})
|
|
|
|
await ws_manager.broadcast({
|
|
"type": "task_completed",
|
|
"data": {
|
|
"id": task_id,
|
|
"status": task.status,
|
|
"progress": 100,
|
|
"duration": task.duration,
|
|
"success": success
|
|
}
|
|
})
|
|
|
|
# Log
|
|
log_entry = LogEntry(
|
|
id=db.get_next_id("logs"),
|
|
timestamp=datetime.now(timezone.utc),
|
|
level="INFO" if success else "ERROR",
|
|
message=f"Schedule '{schedule.name}' exécuté: {'succès' if success else 'échec'}",
|
|
source="scheduler",
|
|
host=schedule.target
|
|
)
|
|
db.logs.insert(0, log_entry)
|
|
|
|
except Exception as e:
|
|
# Échec de l'exécution
|
|
execution_time = perf_counter() - start_time
|
|
|
|
task.status = "failed"
|
|
task.end_time = datetime.now(timezone.utc)
|
|
task.error = str(e)
|
|
|
|
run.status = "failed"
|
|
run.finished_at = datetime.now(timezone.utc)
|
|
run.duration_seconds = execution_time
|
|
run.error_message = str(e)
|
|
|
|
schedule.last_status = "failed"
|
|
schedule.failure_count += 1
|
|
|
|
self._update_schedule_in_storage(schedule)
|
|
runs = self._load_runs()
|
|
for r in runs:
|
|
if r['id'] == run.id:
|
|
r.update(run.dict())
|
|
break
|
|
self._save_runs(runs)
|
|
|
|
try:
|
|
task_log_service.save_task_log(task=task, error=str(e))
|
|
except:
|
|
pass
|
|
|
|
try:
|
|
await ws_manager.broadcast({
|
|
"type": "schedule_run_finished",
|
|
"data": {
|
|
"schedule_id": schedule_id,
|
|
"run": run.dict(),
|
|
"status": "failed",
|
|
"error": str(e)
|
|
}
|
|
})
|
|
|
|
await ws_manager.broadcast({
|
|
"type": "task_failed",
|
|
"data": {"id": task_id, "status": "failed", "error": str(e)}
|
|
})
|
|
except:
|
|
pass
|
|
|
|
log_entry = LogEntry(
|
|
id=db.get_next_id("logs"),
|
|
timestamp=datetime.now(timezone.utc),
|
|
level="ERROR",
|
|
message=f"Erreur schedule '{schedule.name}': {str(e)}",
|
|
source="scheduler",
|
|
host=schedule.target
|
|
)
|
|
db.logs.insert(0, log_entry)
|
|
|
|
# Mettre à jour next_run_at
|
|
self._update_next_run(schedule_id)
|
|
|
|
def _update_schedule_in_storage(self, schedule: Schedule):
|
|
"""Met à jour un schedule dans le stockage"""
|
|
schedule.updated_at = datetime.now(timezone.utc)
|
|
schedules = self._load_schedules()
|
|
for i, s in enumerate(schedules):
|
|
if s['id'] == schedule.id:
|
|
schedules[i] = schedule.dict()
|
|
break
|
|
self._save_schedules(schedules)
|
|
|
|
# ===== MÉTHODES PUBLIQUES CRUD =====
|
|
|
|
def get_all_schedules(self,
|
|
enabled: Optional[bool] = None,
|
|
playbook: Optional[str] = None,
|
|
tag: Optional[str] = None) -> List[Schedule]:
|
|
"""Récupère tous les schedules avec filtrage optionnel"""
|
|
schedules_data = self._load_schedules()
|
|
schedules = []
|
|
|
|
for s in schedules_data:
|
|
try:
|
|
schedule = Schedule(**s)
|
|
|
|
# Filtres
|
|
if enabled is not None and schedule.enabled != enabled:
|
|
continue
|
|
if playbook and playbook.lower() not in schedule.playbook.lower():
|
|
continue
|
|
if tag and tag not in schedule.tags:
|
|
continue
|
|
|
|
schedules.append(schedule)
|
|
except:
|
|
continue
|
|
|
|
# Trier par prochaine exécution
|
|
schedules.sort(key=lambda x: x.next_run_at or datetime.max.replace(tzinfo=timezone.utc))
|
|
return schedules
|
|
|
|
def get_schedule(self, schedule_id: str) -> Optional[Schedule]:
|
|
"""Récupère un schedule par ID"""
|
|
schedules = self._load_schedules()
|
|
for s in schedules:
|
|
if s['id'] == schedule_id:
|
|
return Schedule(**s)
|
|
return None
|
|
|
|
def create_schedule(self, request: ScheduleCreateRequest) -> Schedule:
|
|
"""Crée un nouveau schedule"""
|
|
schedule = Schedule(
|
|
name=request.name,
|
|
description=request.description,
|
|
playbook=request.playbook,
|
|
target_type=request.target_type,
|
|
target=request.target,
|
|
extra_vars=request.extra_vars,
|
|
schedule_type=request.schedule_type,
|
|
recurrence=request.recurrence,
|
|
timezone=request.timezone,
|
|
start_at=request.start_at,
|
|
end_at=request.end_at,
|
|
enabled=request.enabled,
|
|
retry_on_failure=request.retry_on_failure,
|
|
timeout=request.timeout,
|
|
tags=request.tags
|
|
)
|
|
|
|
# Sauvegarder
|
|
schedules = self._load_schedules()
|
|
schedules.append(schedule.dict())
|
|
self._save_schedules(schedules)
|
|
|
|
# Ajouter le job si actif
|
|
if schedule.enabled and self._started:
|
|
self._add_job_for_schedule(schedule)
|
|
|
|
return schedule
|
|
|
|
def update_schedule(self, schedule_id: str, request: ScheduleUpdateRequest) -> Optional[Schedule]:
|
|
"""Met à jour un schedule existant"""
|
|
schedule = self.get_schedule(schedule_id)
|
|
if not schedule:
|
|
return None
|
|
|
|
# Appliquer les modifications
|
|
update_data = request.dict(exclude_unset=True, exclude_none=True)
|
|
for key, value in update_data.items():
|
|
# La récurrence arrive du frontend comme un dict, il faut la retransformer
|
|
# en objet ScheduleRecurrence pour que _build_cron_trigger fonctionne.
|
|
if key == "recurrence" and isinstance(value, dict):
|
|
try:
|
|
value = ScheduleRecurrence(**value)
|
|
except Exception:
|
|
# Si la récurrence est invalide, on laisse passer pour que la
|
|
# validation côté endpoint remonte une erreur explicite.
|
|
pass
|
|
|
|
if hasattr(schedule, key):
|
|
setattr(schedule, key, value)
|
|
|
|
schedule.updated_at = datetime.now(timezone.utc)
|
|
|
|
# Sauvegarder
|
|
self._update_schedule_in_storage(schedule)
|
|
|
|
# Mettre à jour le job
|
|
if self._started:
|
|
job_id = f"schedule_{schedule_id}"
|
|
try:
|
|
self.scheduler.remove_job(job_id)
|
|
except:
|
|
pass
|
|
|
|
if schedule.enabled:
|
|
self._add_job_for_schedule(schedule)
|
|
|
|
return schedule
|
|
|
|
def delete_schedule(self, schedule_id: str) -> bool:
|
|
"""Supprime un schedule"""
|
|
schedules = self._load_schedules()
|
|
original_len = len(schedules)
|
|
schedules = [s for s in schedules if s['id'] != schedule_id]
|
|
|
|
if len(schedules) < original_len:
|
|
self._save_schedules(schedules)
|
|
|
|
# Supprimer le job
|
|
job_id = f"schedule_{schedule_id}"
|
|
try:
|
|
self.scheduler.remove_job(job_id)
|
|
except:
|
|
pass
|
|
|
|
return True
|
|
return False
|
|
|
|
def pause_schedule(self, schedule_id: str) -> Optional[Schedule]:
|
|
"""Met en pause un schedule"""
|
|
schedule = self.get_schedule(schedule_id)
|
|
if not schedule:
|
|
return None
|
|
|
|
schedule.enabled = False
|
|
self._update_schedule_in_storage(schedule)
|
|
|
|
# Supprimer le job
|
|
job_id = f"schedule_{schedule_id}"
|
|
try:
|
|
self.scheduler.remove_job(job_id)
|
|
except:
|
|
pass
|
|
|
|
return schedule
|
|
|
|
def resume_schedule(self, schedule_id: str) -> Optional[Schedule]:
|
|
"""Reprend un schedule en pause"""
|
|
schedule = self.get_schedule(schedule_id)
|
|
if not schedule:
|
|
return None
|
|
|
|
schedule.enabled = True
|
|
self._update_schedule_in_storage(schedule)
|
|
|
|
# Ajouter le job
|
|
if self._started:
|
|
self._add_job_for_schedule(schedule)
|
|
|
|
return schedule
|
|
|
|
async def run_now(self, schedule_id: str) -> Optional[ScheduleRun]:
|
|
"""Exécute immédiatement un schedule"""
|
|
schedule = self.get_schedule(schedule_id)
|
|
if not schedule:
|
|
return None
|
|
|
|
# Exécuter de manière asynchrone
|
|
await self._execute_schedule(schedule_id)
|
|
|
|
# Retourner le dernier run
|
|
runs = self._load_runs()
|
|
for r in runs:
|
|
if r['schedule_id'] == schedule_id:
|
|
return ScheduleRun(**r)
|
|
return None
|
|
|
|
def get_schedule_runs(self, schedule_id: str, limit: int = 50) -> List[ScheduleRun]:
|
|
"""Récupère l'historique des exécutions d'un schedule"""
|
|
runs = self._load_runs()
|
|
schedule_runs = []
|
|
|
|
for r in runs:
|
|
if r['schedule_id'] == schedule_id:
|
|
try:
|
|
schedule_runs.append(ScheduleRun(**r))
|
|
except:
|
|
continue
|
|
|
|
return schedule_runs[:limit]
|
|
|
|
def get_stats(self) -> ScheduleStats:
|
|
"""Calcule les statistiques globales des schedules"""
|
|
schedules = self.get_all_schedules()
|
|
runs = self._load_runs()
|
|
|
|
now = datetime.now(timezone.utc)
|
|
yesterday = now - timedelta(days=1)
|
|
week_ago = now - timedelta(days=7)
|
|
|
|
stats = ScheduleStats()
|
|
stats.total = len(schedules)
|
|
stats.active = len([s for s in schedules if s.enabled])
|
|
stats.paused = len([s for s in schedules if not s.enabled])
|
|
|
|
# Schedules expirés
|
|
stats.expired = len([s for s in schedules if s.end_at and s.end_at < now])
|
|
|
|
# Prochaine exécution
|
|
active_schedules = [s for s in schedules if s.enabled and s.next_run_at]
|
|
if active_schedules:
|
|
next_schedule = min(active_schedules, key=lambda x: x.next_run_at)
|
|
stats.next_execution = next_schedule.next_run_at
|
|
stats.next_schedule_name = next_schedule.name
|
|
|
|
# Stats 24h
|
|
runs_24h = []
|
|
for r in runs:
|
|
try:
|
|
started = datetime.fromisoformat(r['started_at'].replace('Z', '+00:00')) if isinstance(r['started_at'], str) else r['started_at']
|
|
if started >= yesterday:
|
|
runs_24h.append(r)
|
|
except:
|
|
continue
|
|
|
|
stats.executions_24h = len(runs_24h)
|
|
stats.failures_24h = len([r for r in runs_24h if r.get('status') == 'failed'])
|
|
|
|
# Taux de succès 7j
|
|
runs_7d = []
|
|
for r in runs:
|
|
try:
|
|
started = datetime.fromisoformat(r['started_at'].replace('Z', '+00:00')) if isinstance(r['started_at'], str) else r['started_at']
|
|
if started >= week_ago:
|
|
runs_7d.append(r)
|
|
except:
|
|
continue
|
|
|
|
if runs_7d:
|
|
success_count = len([r for r in runs_7d if r.get('status') == 'success'])
|
|
stats.success_rate_7d = round((success_count / len(runs_7d)) * 100, 1)
|
|
|
|
return stats
|
|
|
|
def get_upcoming_executions(self, limit: int = 5) -> List[Dict]:
|
|
"""Retourne les prochaines exécutions planifiées"""
|
|
schedules = self.get_all_schedules(enabled=True)
|
|
upcoming = []
|
|
|
|
for s in schedules:
|
|
if s.next_run_at:
|
|
upcoming.append({
|
|
"schedule_id": s.id,
|
|
"schedule_name": s.name,
|
|
"playbook": s.playbook,
|
|
"target": s.target,
|
|
"next_run_at": s.next_run_at.isoformat() if s.next_run_at else None,
|
|
"tags": s.tags
|
|
})
|
|
|
|
upcoming.sort(key=lambda x: x['next_run_at'] or '')
|
|
return upcoming[:limit]
|
|
|
|
def validate_cron_expression(self, expression: str) -> Dict:
|
|
"""Valide une expression cron et retourne les prochaines exécutions"""
|
|
try:
|
|
cron = croniter(expression, datetime.now())
|
|
next_runs = [cron.get_next(datetime).isoformat() for _ in range(5)]
|
|
return {
|
|
"valid": True,
|
|
"next_runs": next_runs,
|
|
"expression": expression
|
|
}
|
|
except Exception as e:
|
|
return {
|
|
"valid": False,
|
|
"error": str(e),
|
|
"expression": expression
|
|
}
|
|
|
|
def get_runs_for_schedule(self, schedule_id: str, limit: int = 50) -> List[Dict]:
|
|
"""Récupère l'historique des exécutions d'un schedule (retourne des dicts)"""
|
|
runs = self._load_runs()
|
|
schedule_runs = [r for r in runs if r.get('schedule_id') == schedule_id]
|
|
return schedule_runs[:limit]
|
|
|
|
def cleanup_old_runs(self, days: int = 90):
|
|
"""Nettoie les exécutions plus anciennes que X jours"""
|
|
cutoff = datetime.now(timezone.utc) - timedelta(days=days)
|
|
runs = self._load_runs()
|
|
|
|
new_runs = []
|
|
for r in runs:
|
|
try:
|
|
started = datetime.fromisoformat(r['started_at'].replace('Z', '+00:00')) if isinstance(r['started_at'], str) else r['started_at']
|
|
if started >= cutoff:
|
|
new_runs.append(r)
|
|
except:
|
|
new_runs.append(r) # Garder si on ne peut pas parser la date
|
|
|
|
self._save_runs(new_runs)
|
|
return len(runs) - len(new_runs)
|
|
|
|
|
|
# Instances globales des services
|
|
task_log_service = TaskLogService(DIR_LOGS_TASKS)
|
|
adhoc_history_service = AdHocHistoryService(ADHOC_HISTORY_FILE)
|
|
bootstrap_status_service = BootstrapStatusService(BOOTSTRAP_STATUS_FILE)
|
|
host_status_service = HostStatusService(HOST_STATUS_FILE)
|
|
scheduler_service = SchedulerService(SCHEDULES_FILE, SCHEDULE_RUNS_FILE)
|
|
|
|
|
|
class WebSocketManager:
|
|
def __init__(self):
|
|
self.active_connections: List[WebSocket] = []
|
|
self.lock = Lock()
|
|
|
|
async def connect(self, websocket: WebSocket):
|
|
await websocket.accept()
|
|
with self.lock:
|
|
self.active_connections.append(websocket)
|
|
|
|
def disconnect(self, websocket: WebSocket):
|
|
with self.lock:
|
|
if websocket in self.active_connections:
|
|
self.active_connections.remove(websocket)
|
|
|
|
async def broadcast(self, message: dict):
|
|
with self.lock:
|
|
disconnected = []
|
|
for connection in self.active_connections:
|
|
try:
|
|
await connection.send_json(message)
|
|
except:
|
|
disconnected.append(connection)
|
|
|
|
# Nettoyer les connexions déconnectées
|
|
for conn in disconnected:
|
|
self.active_connections.remove(conn)
|
|
|
|
# Instance globale du gestionnaire WebSocket
|
|
ws_manager = WebSocketManager()
|
|
|
|
|
|
# Service Ansible
|
|
class AnsibleService:
|
|
"""Service pour exécuter les playbooks Ansible"""
|
|
|
|
def __init__(self, ansible_dir: Path):
|
|
self.ansible_dir = ansible_dir
|
|
self.playbooks_dir = ansible_dir / "playbooks"
|
|
self.inventory_path = ansible_dir / "inventory" / "hosts.yml"
|
|
self._inventory_cache: Optional[Dict] = None
|
|
|
|
def get_playbooks(self) -> List[Dict[str, Any]]:
|
|
"""Liste les playbooks disponibles avec leurs métadonnées (category/subcategory).
|
|
|
|
Les métadonnées sont lues en priorité dans play['vars'] pour être compatibles
|
|
avec la syntaxe Ansible (category/subcategory ne sont pas des clés de Play).
|
|
"""
|
|
playbooks = []
|
|
if self.playbooks_dir.exists():
|
|
for pb in self.playbooks_dir.glob("*.yml"):
|
|
# Récupérer les infos du fichier
|
|
stat = pb.stat()
|
|
playbook_info = {
|
|
"name": pb.stem,
|
|
"filename": pb.name,
|
|
"path": str(pb),
|
|
"category": "general",
|
|
"subcategory": "other",
|
|
"size": stat.st_size,
|
|
"modified": datetime.fromtimestamp(stat.st_mtime, tz=timezone.utc).isoformat()
|
|
}
|
|
# Extract category/subcategory from playbook
|
|
try:
|
|
with open(pb, 'r', encoding='utf-8') as f:
|
|
content = yaml.safe_load(f)
|
|
if content and isinstance(content, list) and len(content) > 0:
|
|
play = content[0]
|
|
vars_ = play.get('vars', {}) or {}
|
|
|
|
# Lecture de category avec fallback: play puis vars
|
|
if 'category' in play:
|
|
playbook_info['category'] = play['category']
|
|
elif 'category' in vars_:
|
|
playbook_info['category'] = vars_['category']
|
|
|
|
# Lecture de subcategory avec fallback
|
|
if 'subcategory' in play:
|
|
playbook_info['subcategory'] = play['subcategory']
|
|
elif 'subcategory' in vars_:
|
|
playbook_info['subcategory'] = vars_['subcategory']
|
|
|
|
if 'name' in play:
|
|
playbook_info['description'] = play['name']
|
|
except Exception:
|
|
# On ignore les erreurs de parsing individuelles pour ne pas
|
|
# casser l'ensemble de la liste de playbooks.
|
|
pass
|
|
playbooks.append(playbook_info)
|
|
return playbooks
|
|
|
|
def get_playbook_categories(self) -> Dict[str, List[str]]:
|
|
"""Retourne les catégories et sous-catégories des playbooks"""
|
|
categories = {}
|
|
for pb in self.get_playbooks():
|
|
cat = pb.get('category', 'general')
|
|
subcat = pb.get('subcategory', 'other')
|
|
if cat not in categories:
|
|
categories[cat] = []
|
|
if subcat not in categories[cat]:
|
|
categories[cat].append(subcat)
|
|
return categories
|
|
|
|
def load_inventory(self) -> Dict:
|
|
"""Charge l'inventaire Ansible depuis le fichier YAML"""
|
|
if self._inventory_cache:
|
|
return self._inventory_cache
|
|
|
|
if not self.inventory_path.exists():
|
|
return {}
|
|
|
|
with open(self.inventory_path, 'r') as f:
|
|
self._inventory_cache = yaml.safe_load(f)
|
|
return self._inventory_cache
|
|
|
|
def get_hosts_from_inventory(self, group_filter: str = None) -> List[AnsibleInventoryHost]:
|
|
"""Extrait la liste des hôtes de l'inventaire sans doublons.
|
|
|
|
Args:
|
|
group_filter: Si spécifié, filtre les hôtes par ce groupe
|
|
"""
|
|
inventory = self.load_inventory()
|
|
# Use dict to track unique hosts and accumulate their groups
|
|
hosts_dict: Dict[str, AnsibleInventoryHost] = {}
|
|
|
|
def extract_hosts(data: Dict, current_group: str = ""):
|
|
if not isinstance(data, dict):
|
|
return
|
|
|
|
# Extraire les hôtes directs
|
|
if 'hosts' in data:
|
|
for host_name, host_data in data['hosts'].items():
|
|
host_data = host_data or {}
|
|
|
|
if host_name in hosts_dict:
|
|
# Host already exists, add group to its groups list
|
|
if current_group and current_group not in hosts_dict[host_name].groups:
|
|
hosts_dict[host_name].groups.append(current_group)
|
|
else:
|
|
# New host
|
|
hosts_dict[host_name] = AnsibleInventoryHost(
|
|
name=host_name,
|
|
ansible_host=host_data.get('ansible_host', host_name),
|
|
group=current_group,
|
|
groups=[current_group] if current_group else [],
|
|
vars=host_data
|
|
)
|
|
|
|
# Parcourir les enfants (sous-groupes)
|
|
if 'children' in data:
|
|
for child_name, child_data in data['children'].items():
|
|
extract_hosts(child_data, child_name)
|
|
|
|
extract_hosts(inventory.get('all', {}))
|
|
|
|
# Convert to list
|
|
hosts = list(hosts_dict.values())
|
|
|
|
# Apply group filter if specified
|
|
if group_filter and group_filter != 'all':
|
|
hosts = [h for h in hosts if group_filter in h.groups]
|
|
|
|
return hosts
|
|
|
|
def invalidate_cache(self):
|
|
"""Invalide le cache de l'inventaire pour forcer un rechargement"""
|
|
self._inventory_cache = None
|
|
|
|
def get_groups(self) -> List[str]:
|
|
"""Extrait la liste des groupes de l'inventaire"""
|
|
inventory = self.load_inventory()
|
|
groups = set()
|
|
|
|
def extract_groups(data: Dict, parent: str = ""):
|
|
if not isinstance(data, dict):
|
|
return
|
|
if 'children' in data:
|
|
for child_name in data['children'].keys():
|
|
groups.add(child_name)
|
|
extract_groups(data['children'][child_name], child_name)
|
|
|
|
extract_groups(inventory.get('all', {}))
|
|
return sorted(list(groups))
|
|
|
|
def get_env_groups(self) -> List[str]:
|
|
"""Retourne uniquement les groupes d'environnement (préfixés par env_)"""
|
|
return [g for g in self.get_groups() if g.startswith('env_')]
|
|
|
|
def get_role_groups(self) -> List[str]:
|
|
"""Retourne uniquement les groupes de rôles (préfixés par role_)"""
|
|
return [g for g in self.get_groups() if g.startswith('role_')]
|
|
|
|
def _save_inventory(self, inventory: Dict):
|
|
"""Sauvegarde l'inventaire dans le fichier YAML"""
|
|
# Créer une sauvegarde avant modification
|
|
backup_path = self.inventory_path.with_suffix('.yml.bak')
|
|
if self.inventory_path.exists():
|
|
import shutil
|
|
shutil.copy2(self.inventory_path, backup_path)
|
|
|
|
with open(self.inventory_path, 'w', encoding='utf-8') as f:
|
|
yaml.dump(inventory, f, default_flow_style=False, allow_unicode=True, sort_keys=False)
|
|
|
|
# Invalider le cache
|
|
self.invalidate_cache()
|
|
|
|
def add_host_to_inventory(self, hostname: str, env_group: str, role_groups: List[str], ansible_host: str = None) -> bool:
|
|
"""Ajoute un hôte à l'inventaire Ansible
|
|
|
|
Args:
|
|
hostname: Nom de l'hôte (ex: server.domain.home)
|
|
env_group: Groupe d'environnement (ex: env_homelab)
|
|
role_groups: Liste des groupes de rôles (ex: ['role_proxmox', 'role_sbc'])
|
|
ansible_host: Adresse IP ou hostname pour ansible_host (optionnel)
|
|
|
|
Returns:
|
|
True si l'ajout a réussi
|
|
"""
|
|
inventory = self.load_inventory()
|
|
|
|
# S'assurer que la structure existe
|
|
if 'all' not in inventory:
|
|
inventory['all'] = {}
|
|
if 'children' not in inventory['all']:
|
|
inventory['all']['children'] = {}
|
|
|
|
children = inventory['all']['children']
|
|
|
|
# Ajouter au groupe d'environnement
|
|
if env_group not in children:
|
|
children[env_group] = {'hosts': {}}
|
|
if 'hosts' not in children[env_group]:
|
|
children[env_group]['hosts'] = {}
|
|
|
|
# Définir les variables de l'hôte
|
|
host_vars = None
|
|
if ansible_host and ansible_host != hostname:
|
|
host_vars = {'ansible_host': ansible_host}
|
|
|
|
children[env_group]['hosts'][hostname] = host_vars
|
|
|
|
# Ajouter aux groupes de rôles
|
|
for role_group in role_groups:
|
|
if role_group not in children:
|
|
children[role_group] = {'hosts': {}}
|
|
if 'hosts' not in children[role_group]:
|
|
children[role_group]['hosts'] = {}
|
|
children[role_group]['hosts'][hostname] = None
|
|
|
|
self._save_inventory(inventory)
|
|
return True
|
|
|
|
def remove_host_from_inventory(self, hostname: str) -> bool:
|
|
"""Supprime un hôte de tous les groupes de l'inventaire
|
|
|
|
Args:
|
|
hostname: Nom de l'hôte à supprimer
|
|
|
|
Returns:
|
|
True si la suppression a réussi
|
|
"""
|
|
inventory = self.load_inventory()
|
|
|
|
if 'all' not in inventory or 'children' not in inventory['all']:
|
|
return False
|
|
|
|
children = inventory['all']['children']
|
|
removed = False
|
|
|
|
# Parcourir tous les groupes et supprimer l'hôte
|
|
for group_name, group_data in children.items():
|
|
if isinstance(group_data, dict) and 'hosts' in group_data:
|
|
if hostname in group_data['hosts']:
|
|
del group_data['hosts'][hostname]
|
|
removed = True
|
|
|
|
if removed:
|
|
self._save_inventory(inventory)
|
|
|
|
# Supprimer aussi les statuts persistés (bootstrap + health)
|
|
bootstrap_status_service.remove_host(hostname)
|
|
try:
|
|
host_status_service.remove_host(hostname)
|
|
except Exception:
|
|
pass
|
|
|
|
return removed
|
|
|
|
def update_host_groups(self, hostname: str, env_group: str = None, role_groups: List[str] = None, ansible_host: str = None) -> bool:
|
|
"""Met à jour les groupes d'un hôte existant
|
|
|
|
Args:
|
|
hostname: Nom de l'hôte à modifier
|
|
env_group: Nouveau groupe d'environnement (None = pas de changement)
|
|
role_groups: Nouvelle liste de groupes de rôles (None = pas de changement)
|
|
ansible_host: Nouvelle adresse ansible_host (None = pas de changement)
|
|
|
|
Returns:
|
|
True si la mise à jour a réussi
|
|
"""
|
|
inventory = self.load_inventory()
|
|
|
|
if 'all' not in inventory or 'children' not in inventory['all']:
|
|
return False
|
|
|
|
children = inventory['all']['children']
|
|
|
|
# Trouver le groupe d'environnement actuel
|
|
current_env_group = None
|
|
current_role_groups = []
|
|
current_ansible_host = None
|
|
|
|
for group_name, group_data in children.items():
|
|
if isinstance(group_data, dict) and 'hosts' in group_data:
|
|
if hostname in group_data['hosts']:
|
|
if group_name.startswith('env_'):
|
|
current_env_group = group_name
|
|
# Récupérer ansible_host si défini
|
|
host_vars = group_data['hosts'][hostname]
|
|
if isinstance(host_vars, dict) and 'ansible_host' in host_vars:
|
|
current_ansible_host = host_vars['ansible_host']
|
|
elif group_name.startswith('role_'):
|
|
current_role_groups.append(group_name)
|
|
|
|
if not current_env_group:
|
|
return False # Hôte non trouvé
|
|
|
|
# Appliquer les changements
|
|
new_env_group = env_group if env_group else current_env_group
|
|
new_role_groups = role_groups if role_groups is not None else current_role_groups
|
|
new_ansible_host = ansible_host if ansible_host else current_ansible_host
|
|
|
|
# Supprimer l'hôte de tous les groupes actuels
|
|
for group_name, group_data in children.items():
|
|
if isinstance(group_data, dict) and 'hosts' in group_data:
|
|
if hostname in group_data['hosts']:
|
|
del group_data['hosts'][hostname]
|
|
|
|
# Ajouter au nouveau groupe d'environnement
|
|
if new_env_group not in children:
|
|
children[new_env_group] = {'hosts': {}}
|
|
if 'hosts' not in children[new_env_group]:
|
|
children[new_env_group]['hosts'] = {}
|
|
|
|
host_vars = None
|
|
if new_ansible_host and new_ansible_host != hostname:
|
|
host_vars = {'ansible_host': new_ansible_host}
|
|
children[new_env_group]['hosts'][hostname] = host_vars
|
|
|
|
# Ajouter aux nouveaux groupes de rôles
|
|
for role_group in new_role_groups:
|
|
if role_group not in children:
|
|
children[role_group] = {'hosts': {}}
|
|
if 'hosts' not in children[role_group]:
|
|
children[role_group]['hosts'] = {}
|
|
children[role_group]['hosts'][hostname] = None
|
|
|
|
self._save_inventory(inventory)
|
|
return True
|
|
|
|
def host_exists(self, hostname: str) -> bool:
|
|
"""Vérifie si un hôte existe dans l'inventaire"""
|
|
hosts = self.get_hosts_from_inventory()
|
|
return any(h.name == hostname for h in hosts)
|
|
|
|
def group_exists(self, group_name: str) -> bool:
|
|
"""Vérifie si un groupe existe dans l'inventaire"""
|
|
return group_name in self.get_groups()
|
|
|
|
def add_group(self, group_name: str) -> bool:
|
|
"""Ajoute un nouveau groupe à l'inventaire
|
|
|
|
Args:
|
|
group_name: Nom du groupe (doit commencer par env_ ou role_)
|
|
|
|
Returns:
|
|
True si l'ajout a réussi
|
|
"""
|
|
if self.group_exists(group_name):
|
|
return False # Groupe existe déjà
|
|
|
|
inventory = self.load_inventory()
|
|
|
|
# S'assurer que la structure existe
|
|
if 'all' not in inventory:
|
|
inventory['all'] = {}
|
|
if 'children' not in inventory['all']:
|
|
inventory['all']['children'] = {}
|
|
|
|
# Ajouter le groupe vide
|
|
inventory['all']['children'][group_name] = {'hosts': {}}
|
|
|
|
self._save_inventory(inventory)
|
|
return True
|
|
|
|
def rename_group(self, old_name: str, new_name: str) -> bool:
|
|
"""Renomme un groupe dans l'inventaire
|
|
|
|
Args:
|
|
old_name: Nom actuel du groupe
|
|
new_name: Nouveau nom du groupe
|
|
|
|
Returns:
|
|
True si le renommage a réussi
|
|
"""
|
|
if not self.group_exists(old_name):
|
|
return False # Groupe source n'existe pas
|
|
|
|
if self.group_exists(new_name):
|
|
return False # Groupe cible existe déjà
|
|
|
|
inventory = self.load_inventory()
|
|
children = inventory.get('all', {}).get('children', {})
|
|
|
|
if old_name not in children:
|
|
return False
|
|
|
|
# Copier les données du groupe vers le nouveau nom
|
|
children[new_name] = children[old_name]
|
|
del children[old_name]
|
|
|
|
self._save_inventory(inventory)
|
|
return True
|
|
|
|
def delete_group(self, group_name: str, move_hosts_to: str = None) -> Dict[str, Any]:
|
|
"""Supprime un groupe de l'inventaire
|
|
|
|
Args:
|
|
group_name: Nom du groupe à supprimer
|
|
move_hosts_to: Groupe vers lequel déplacer les hôtes (optionnel)
|
|
|
|
Returns:
|
|
Dict avec le résultat de l'opération
|
|
"""
|
|
if not self.group_exists(group_name):
|
|
return {"success": False, "error": "Groupe non trouvé"}
|
|
|
|
inventory = self.load_inventory()
|
|
children = inventory.get('all', {}).get('children', {})
|
|
|
|
if group_name not in children:
|
|
return {"success": False, "error": "Groupe non trouvé dans children"}
|
|
|
|
group_data = children[group_name]
|
|
hosts_in_group = list(group_data.get('hosts', {}).keys()) if group_data else []
|
|
|
|
# Si des hôtes sont dans le groupe et qu'on veut les déplacer
|
|
if hosts_in_group and move_hosts_to:
|
|
if not self.group_exists(move_hosts_to) and move_hosts_to != group_name:
|
|
# Créer le groupe cible s'il n'existe pas
|
|
children[move_hosts_to] = {'hosts': {}}
|
|
|
|
if move_hosts_to in children:
|
|
if 'hosts' not in children[move_hosts_to]:
|
|
children[move_hosts_to]['hosts'] = {}
|
|
|
|
# Déplacer les hôtes
|
|
for hostname in hosts_in_group:
|
|
host_vars = group_data['hosts'].get(hostname)
|
|
children[move_hosts_to]['hosts'][hostname] = host_vars
|
|
|
|
# Supprimer le groupe
|
|
del children[group_name]
|
|
|
|
self._save_inventory(inventory)
|
|
return {
|
|
"success": True,
|
|
"hosts_affected": hosts_in_group,
|
|
"hosts_moved_to": move_hosts_to if hosts_in_group and move_hosts_to else None
|
|
}
|
|
|
|
def get_group_hosts(self, group_name: str) -> List[str]:
|
|
"""Retourne la liste des hôtes dans un groupe
|
|
|
|
Args:
|
|
group_name: Nom du groupe
|
|
|
|
Returns:
|
|
Liste des noms d'hôtes
|
|
"""
|
|
inventory = self.load_inventory()
|
|
children = inventory.get('all', {}).get('children', {})
|
|
|
|
if group_name not in children:
|
|
return []
|
|
|
|
group_data = children[group_name]
|
|
if not group_data or 'hosts' not in group_data:
|
|
return []
|
|
|
|
return list(group_data['hosts'].keys())
|
|
|
|
async def execute_playbook(
|
|
self,
|
|
playbook: str,
|
|
target: str = "all",
|
|
extra_vars: Optional[Dict[str, Any]] = None,
|
|
check_mode: bool = False,
|
|
verbose: bool = False
|
|
) -> Dict[str, Any]:
|
|
"""Exécute un playbook Ansible"""
|
|
# Résoudre le chemin du playbook
|
|
# On accepte soit un nom avec extension, soit un nom sans extension (ex: "health-check")
|
|
playbook_path = self.playbooks_dir / playbook
|
|
|
|
# Si le fichier n'existe pas tel quel, essayer avec des extensions courantes
|
|
if not playbook_path.exists():
|
|
from pathlib import Path
|
|
|
|
pb_name = Path(playbook).name # enlever d'éventuels chemins
|
|
# Si aucune extension n'est fournie, tester .yml puis .yaml
|
|
if not Path(pb_name).suffix:
|
|
for ext in (".yml", ".yaml"):
|
|
candidate = self.playbooks_dir / f"{pb_name}{ext}"
|
|
if candidate.exists():
|
|
playbook_path = candidate
|
|
break
|
|
|
|
if not playbook_path.exists():
|
|
# À ce stade, on n'a trouvé aucun fichier correspondant
|
|
raise FileNotFoundError(f"Playbook introuvable: {playbook}")
|
|
|
|
# Construire la commande ansible-playbook
|
|
cmd = [
|
|
"ansible-playbook",
|
|
str(playbook_path),
|
|
"-i", str(self.inventory_path),
|
|
"--limit", target
|
|
]
|
|
|
|
if check_mode:
|
|
cmd.append("--check")
|
|
|
|
if verbose:
|
|
cmd.append("-v")
|
|
|
|
if extra_vars:
|
|
cmd.extend(["--extra-vars", json.dumps(extra_vars)])
|
|
|
|
private_key = find_ssh_private_key()
|
|
if private_key:
|
|
cmd.extend(["--private-key", private_key])
|
|
|
|
if SSH_USER:
|
|
cmd.extend(["-u", SSH_USER])
|
|
|
|
start_time = perf_counter()
|
|
|
|
try:
|
|
# Exécuter la commande
|
|
process = await asyncio.create_subprocess_exec(
|
|
*cmd,
|
|
stdout=asyncio.subprocess.PIPE,
|
|
stderr=asyncio.subprocess.PIPE,
|
|
cwd=str(self.ansible_dir)
|
|
)
|
|
|
|
stdout, stderr = await process.communicate()
|
|
execution_time = perf_counter() - start_time
|
|
|
|
return {
|
|
"success": process.returncode == 0,
|
|
"return_code": process.returncode,
|
|
"stdout": stdout.decode('utf-8', errors='replace'),
|
|
"stderr": stderr.decode('utf-8', errors='replace'),
|
|
"execution_time": round(execution_time, 2),
|
|
"command": " ".join(cmd)
|
|
}
|
|
except FileNotFoundError:
|
|
return {
|
|
"success": False,
|
|
"return_code": -1,
|
|
"stdout": "",
|
|
"stderr": "ansible-playbook non trouvé. Vérifiez que Ansible est installé.",
|
|
"execution_time": 0,
|
|
"command": " ".join(cmd)
|
|
}
|
|
except Exception as e:
|
|
return {
|
|
"success": False,
|
|
"return_code": -1,
|
|
"stdout": "",
|
|
"stderr": str(e),
|
|
"execution_time": perf_counter() - start_time,
|
|
"command": " ".join(cmd)
|
|
}
|
|
|
|
|
|
# Instance globale du service Ansible
|
|
ansible_service = AnsibleService(ANSIBLE_DIR)
|
|
|
|
|
|
# ===== SERVICE BOOTSTRAP SSH =====
|
|
|
|
class BootstrapRequest(BaseModel):
|
|
"""Requête de bootstrap pour un hôte"""
|
|
host: str = Field(..., description="Adresse IP ou hostname de l'hôte")
|
|
root_password: str = Field(..., description="Mot de passe root pour la connexion initiale")
|
|
automation_user: str = Field(default="automation", description="Nom de l'utilisateur d'automatisation à créer")
|
|
|
|
|
|
class CommandResult(BaseModel):
|
|
"""Résultat d'une commande SSH"""
|
|
status: str
|
|
return_code: int
|
|
stdout: str
|
|
stderr: Optional[str] = None
|
|
|
|
|
|
def find_ssh_private_key() -> Optional[str]:
|
|
"""Trouve une clé privée SSH disponible en inspectant plusieurs répertoires."""
|
|
candidate_dirs = []
|
|
env_path = Path(SSH_KEY_PATH)
|
|
candidate_dirs.append(env_path.parent)
|
|
candidate_dirs.append(Path("/app/ssh_keys"))
|
|
candidate_dirs.append(Path.home() / ".ssh")
|
|
|
|
seen = set()
|
|
key_paths: List[str] = []
|
|
|
|
for directory in candidate_dirs:
|
|
if not directory or not directory.exists():
|
|
continue
|
|
for name in [
|
|
env_path.name,
|
|
"id_automation_ansible",
|
|
"id_rsa",
|
|
"id_ed25519",
|
|
"id_ecdsa",
|
|
]:
|
|
path = directory / name
|
|
if str(path) not in seen:
|
|
seen.add(str(path))
|
|
key_paths.append(str(path))
|
|
# Ajouter dynamiquement toutes les clés sans extension .pub
|
|
for file in directory.iterdir():
|
|
if file.is_file() and not file.suffix and not file.name.startswith("known_hosts"):
|
|
if str(file) not in seen:
|
|
seen.add(str(file))
|
|
key_paths.append(str(file))
|
|
|
|
for key_path in key_paths:
|
|
if key_path and Path(key_path).exists():
|
|
return key_path
|
|
|
|
return None
|
|
|
|
|
|
def run_ssh_command(
|
|
host: str,
|
|
command: str,
|
|
ssh_user: str = "root",
|
|
ssh_password: Optional[str] = None,
|
|
timeout: int = 60
|
|
) -> tuple:
|
|
"""Exécute une commande SSH sur un hôte distant.
|
|
|
|
Returns:
|
|
tuple: (return_code, stdout, stderr)
|
|
"""
|
|
ssh_cmd = ["ssh"]
|
|
|
|
# Options SSH communes
|
|
ssh_opts = [
|
|
"-o", "StrictHostKeyChecking=no",
|
|
"-o", "UserKnownHostsFile=/dev/null",
|
|
"-o", "ConnectTimeout=10",
|
|
"-o", "BatchMode=no" if ssh_password else "BatchMode=yes",
|
|
]
|
|
|
|
# Si pas de mot de passe, utiliser la clé SSH
|
|
if not ssh_password:
|
|
private_key = find_ssh_private_key()
|
|
if private_key:
|
|
ssh_opts.extend(["-i", private_key])
|
|
|
|
ssh_cmd.extend(ssh_opts)
|
|
ssh_cmd.append(f"{ssh_user}@{host}")
|
|
ssh_cmd.append(command)
|
|
|
|
try:
|
|
if ssh_password:
|
|
# Utiliser sshpass pour l'authentification par mot de passe
|
|
full_cmd = ["sshpass", "-p", ssh_password] + ssh_cmd
|
|
else:
|
|
full_cmd = ssh_cmd
|
|
|
|
result = subprocess.run(
|
|
full_cmd,
|
|
capture_output=True,
|
|
text=True,
|
|
timeout=timeout
|
|
)
|
|
return result.returncode, result.stdout, result.stderr
|
|
except subprocess.TimeoutExpired:
|
|
return -1, "", f"Timeout après {timeout} secondes"
|
|
except FileNotFoundError as e:
|
|
if "sshpass" in str(e):
|
|
return -1, "", "sshpass n'est pas installé. Installez-le avec: apt install sshpass"
|
|
return -1, "", str(e)
|
|
except Exception as e:
|
|
return -1, "", str(e)
|
|
|
|
|
|
def bootstrap_host(host: str, root_password: str, automation_user: str = "automation") -> CommandResult:
|
|
"""Prépare un hôte pour Ansible (création user, clé SSH, sudo, python3) pour Debian/Alpine/FreeBSD.
|
|
|
|
Utilise un script shell complet uploadé via heredoc pour éviter les problèmes de quoting.
|
|
"""
|
|
import logging
|
|
logger = logging.getLogger("bootstrap")
|
|
|
|
# Chercher la clé publique dans plusieurs emplacements possibles
|
|
primary_dirs = [
|
|
Path(SSH_KEY_PATH).parent,
|
|
Path("/app/ssh_keys"),
|
|
Path.home() / ".ssh",
|
|
]
|
|
ssh_dir = primary_dirs[0]
|
|
pub_paths = [
|
|
SSH_KEY_PATH + ".pub",
|
|
"/app/ssh_keys/id_rsa.pub",
|
|
"/app/ssh_keys/id_ed25519.pub",
|
|
"/app/ssh_keys/id_ecdsa.pub",
|
|
"/app/ssh_keys/id_automation_ansible.pub",
|
|
]
|
|
|
|
# Ajouter dynamiquement toutes les clés .pub trouvées dans le répertoire SSH
|
|
for directory in primary_dirs:
|
|
if not directory.exists():
|
|
continue
|
|
for f in directory.iterdir():
|
|
if f.is_file() and f.suffix == ".pub" and str(f) not in pub_paths:
|
|
pub_paths.append(str(f))
|
|
|
|
logger.info(f"SSH_KEY_PATH = {SSH_KEY_PATH}")
|
|
logger.info(f"Recherche de clé publique dans: {pub_paths}")
|
|
|
|
pub_key = None
|
|
pub_path_used = None
|
|
|
|
for pub_path in pub_paths:
|
|
try:
|
|
if Path(pub_path).exists():
|
|
pub_key = Path(pub_path).read_text(encoding="utf-8").strip()
|
|
if pub_key:
|
|
pub_path_used = pub_path
|
|
logger.info(f"Clé publique trouvée: {pub_path}")
|
|
break
|
|
except Exception as e:
|
|
logger.warning(f"Erreur lecture {pub_path}: {e}")
|
|
continue
|
|
|
|
if not pub_key:
|
|
# Lister les fichiers disponibles pour le debug
|
|
ssh_dir = Path(SSH_KEY_PATH).parent
|
|
available_files = []
|
|
if ssh_dir.exists():
|
|
available_files = [f.name for f in ssh_dir.iterdir()]
|
|
|
|
raise HTTPException(
|
|
status_code=500,
|
|
detail=f"Clé publique SSH non trouvée. Chemins testés: {pub_paths}. Fichiers disponibles dans {ssh_dir}: {available_files}",
|
|
)
|
|
|
|
# Script shell complet, robuste, avec logs détaillés
|
|
bootstrap_script = f"""#!/bin/sh
|
|
set -e
|
|
|
|
AUT_USER="{automation_user}"
|
|
|
|
echo "=== Bootstrap Ansible Host ==="
|
|
echo "User: $AUT_USER"
|
|
echo ""
|
|
|
|
# 1) Détection OS
|
|
if command -v apk >/dev/null 2>&1; then
|
|
OS_TYPE="alpine"
|
|
echo "[1/7] OS détecté: Alpine Linux"
|
|
elif [ "$(uname -s 2>/dev/null)" = "FreeBSD" ] || \
|
|
command -v pkg >/dev/null 2>&1 || \
|
|
( [ -f /etc/os-release ] && grep -qi 'ID=freebsd' /etc/os-release ); then
|
|
OS_TYPE="freebsd"
|
|
echo "[1/7] OS détecté: FreeBSD"
|
|
else
|
|
OS_TYPE="debian"
|
|
echo "[1/7] OS détecté: Debian-like"
|
|
fi
|
|
|
|
# 2) Vérification / préparation utilisateur
|
|
echo "[2/7] Vérification utilisateur/groupe..."
|
|
if id "$AUT_USER" >/dev/null 2>&1; then
|
|
echo " - Utilisateur déjà existant: $AUT_USER (aucune suppression)"
|
|
else
|
|
echo " - Utilisateur inexistant, il sera créé"
|
|
fi
|
|
|
|
# 3) Création utilisateur (idempotent)
|
|
echo "[3/7] Création utilisateur $AUT_USER..."
|
|
if id "$AUT_USER" >/dev/null 2>&1; then
|
|
echo " - Utilisateur déjà présent, réutilisation"
|
|
elif [ "$OS_TYPE" = "alpine" ]; then
|
|
adduser -D "$AUT_USER"
|
|
echo " - Utilisateur créé (Alpine: adduser -D)"
|
|
elif [ "$OS_TYPE" = "freebsd" ]; then
|
|
pw useradd "$AUT_USER" -m -s /bin/sh
|
|
echo " - Utilisateur créé (FreeBSD: pw useradd)"
|
|
else
|
|
useradd -m -s /bin/bash "$AUT_USER" || useradd -m -s /bin/sh "$AUT_USER"
|
|
echo " - Utilisateur créé (Debian: useradd -m)"
|
|
fi
|
|
|
|
# 3b) S'assurer que le compte n'est pas verrouillé
|
|
echo " - Vérification du verrouillage du compte..."
|
|
if command -v passwd >/dev/null 2>&1; then
|
|
passwd -u "$AUT_USER" 2>/dev/null || true
|
|
fi
|
|
if command -v usermod >/dev/null 2>&1; then
|
|
usermod -U "$AUT_USER" 2>/dev/null || true
|
|
fi
|
|
|
|
# 4) Configuration clé SSH
|
|
echo "[4/7] Configuration clé SSH..."
|
|
HOME_DIR=$(getent passwd "$AUT_USER" | cut -d: -f6)
|
|
if [ -z "$HOME_DIR" ]; then
|
|
HOME_DIR="/home/$AUT_USER"
|
|
fi
|
|
echo " - HOME_DIR: $HOME_DIR"
|
|
|
|
mkdir -p "$HOME_DIR/.ssh"
|
|
chown "$AUT_USER":"$AUT_USER" "$HOME_DIR/.ssh"
|
|
chmod 700 "$HOME_DIR/.ssh"
|
|
echo " - Répertoire .ssh créé et configuré"
|
|
|
|
cat > "$HOME_DIR/.ssh/authorized_keys" << 'SSHKEY_EOF'
|
|
{pub_key}
|
|
SSHKEY_EOF
|
|
|
|
chown "$AUT_USER":"$AUT_USER" "$HOME_DIR/.ssh/authorized_keys"
|
|
chmod 600 "$HOME_DIR/.ssh/authorized_keys"
|
|
echo " - Clé publique installée dans authorized_keys"
|
|
|
|
if [ -s "$HOME_DIR/.ssh/authorized_keys" ]; then
|
|
KEY_COUNT=$(wc -l < "$HOME_DIR/.ssh/authorized_keys")
|
|
echo " - Vérification: $KEY_COUNT clé(s) dans authorized_keys"
|
|
else
|
|
echo " - ERREUR: authorized_keys vide ou absent!"
|
|
exit 1
|
|
fi
|
|
|
|
# 5) Installation sudo
|
|
echo "[5/7] Installation sudo..."
|
|
if command -v sudo >/dev/null 2>&1; then
|
|
echo " - sudo déjà installé"
|
|
else
|
|
if [ "$OS_TYPE" = "alpine" ]; then
|
|
apk add --no-cache sudo
|
|
echo " - sudo installé (apk)"
|
|
elif [ "$OS_TYPE" = "freebsd" ]; then
|
|
pkg install -y sudo
|
|
echo " - sudo installé (pkg)"
|
|
else
|
|
apt-get update -qq && apt-get install -y sudo
|
|
echo " - sudo installé (apt)"
|
|
fi
|
|
fi
|
|
|
|
# 6) Configuration sudoers
|
|
echo "[6/7] Configuration sudoers..."
|
|
if [ ! -d /etc/sudoers.d ]; then
|
|
mkdir -p /etc/sudoers.d
|
|
chmod 750 /etc/sudoers.d 2>/dev/null || true
|
|
echo " - Répertoire /etc/sudoers.d créé"
|
|
fi
|
|
echo "$AUT_USER ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/automation
|
|
chmod 440 /etc/sudoers.d/automation
|
|
echo " - Sudoers configuré: /etc/sudoers.d/automation"
|
|
|
|
# 7) Installation Python3
|
|
echo "[7/7] Installation Python3..."
|
|
if command -v python3 >/dev/null 2>&1; then
|
|
PYTHON_VERSION=$(python3 --version 2>&1)
|
|
echo " - Python3 déjà installé: $PYTHON_VERSION"
|
|
else
|
|
if [ "$OS_TYPE" = "alpine" ]; then
|
|
apk add --no-cache python3
|
|
echo " - Python3 installé (apk)"
|
|
elif [ "$OS_TYPE" = "freebsd" ]; then
|
|
pkg install -y python3
|
|
echo " - Python3 installé (pkg)"
|
|
else
|
|
apt-get update -qq && apt-get install -y python3
|
|
echo " - Python3 installé (apt)"
|
|
fi
|
|
fi
|
|
|
|
echo ""
|
|
echo "=== Bootstrap terminé avec succès ==="
|
|
echo "Utilisateur: $AUT_USER"
|
|
echo "HOME: $HOME_DIR"
|
|
echo "SSH: $HOME_DIR/.ssh/authorized_keys"
|
|
echo "Sudo: /etc/sudoers.d/automation"
|
|
"""
|
|
|
|
# Envoyer le script de manière compatible avec tous les shells
|
|
lines = bootstrap_script.splitlines()
|
|
|
|
def _sh_single_quote(s: str) -> str:
|
|
"""Protège une chaîne pour un shell POSIX en simple quotes."""
|
|
return "'" + s.replace("'", "'\"'\"'") + "'"
|
|
|
|
quoted_lines = " ".join(_sh_single_quote(line) for line in lines)
|
|
remote_cmd = f"printf '%s\\n' {quoted_lines} | sh"
|
|
|
|
rc, out, err = run_ssh_command(
|
|
host,
|
|
remote_cmd,
|
|
ssh_user="root",
|
|
ssh_password=root_password,
|
|
)
|
|
|
|
if rc != 0:
|
|
raise HTTPException(
|
|
status_code=500,
|
|
detail={
|
|
"status": "error",
|
|
"return_code": rc,
|
|
"stdout": out,
|
|
"stderr": err,
|
|
},
|
|
)
|
|
|
|
# Vérification: tester la connexion SSH par clé avec l'utilisateur d'automatisation
|
|
verify_rc, verify_out, verify_err = run_ssh_command(
|
|
host,
|
|
"echo 'ssh_key_ok'",
|
|
ssh_user=automation_user,
|
|
ssh_password=None,
|
|
)
|
|
|
|
if verify_rc != 0:
|
|
combined_stdout = (out or "") + f"\n\n[SSH VERIFY] Échec de la connexion par clé pour {automation_user}@{host}\n" + (verify_out or "")
|
|
combined_stderr = (err or "") + f"\n\n[SSH VERIFY] " + (verify_err or "Aucune erreur détaillée")
|
|
raise HTTPException(
|
|
status_code=500,
|
|
detail={
|
|
"status": "error",
|
|
"return_code": verify_rc,
|
|
"stdout": combined_stdout,
|
|
"stderr": combined_stderr,
|
|
},
|
|
)
|
|
|
|
# Succès complet
|
|
final_stdout = (out or "") + f"\n\n[SSH VERIFY] Connexion par clé OK pour {automation_user}@{host}"
|
|
return CommandResult(
|
|
status="ok",
|
|
return_code=0,
|
|
stdout=final_stdout,
|
|
stderr=err,
|
|
)
|
|
|
|
|
|
# Base de données hybride : hôtes depuis Ansible, tâches/logs en mémoire
|
|
class HybridDB:
|
|
"""Base de données qui charge les hôtes depuis l'inventaire Ansible"""
|
|
|
|
def __init__(self, ansible_svc: AnsibleService):
|
|
self.ansible_service = ansible_svc
|
|
self._hosts_cache: Optional[List[Host]] = None
|
|
self._hosts_cache_time: float = 0
|
|
self._cache_ttl: float = 60 # Cache de 60 secondes
|
|
# Statuts runtime des hôtes (en mémoire) rechargés depuis le fichier JSON persistant
|
|
self._host_runtime_status: Dict[str, Dict[str, Any]] = {}
|
|
try:
|
|
persisted_hosts = host_status_service.get_all_status()
|
|
for host_name, info in persisted_hosts.items():
|
|
last_seen_raw = info.get("last_seen")
|
|
last_seen_dt: Optional[datetime] = None
|
|
if isinstance(last_seen_raw, str):
|
|
try:
|
|
last_seen_dt = datetime.fromisoformat(last_seen_raw.replace("Z", "+00:00"))
|
|
except Exception:
|
|
last_seen_dt = None
|
|
elif isinstance(last_seen_raw, datetime):
|
|
last_seen_dt = last_seen_raw
|
|
|
|
self._host_runtime_status[host_name] = {
|
|
"status": info.get("status", "online"),
|
|
"last_seen": last_seen_dt,
|
|
"os": info.get("os"),
|
|
}
|
|
except Exception:
|
|
# En cas de problème de lecture, on repartira d'un état en mémoire vierge
|
|
self._host_runtime_status = {}
|
|
|
|
# Tâches et logs en mémoire (persistés pendant l'exécution)
|
|
self.tasks: List[Task] = []
|
|
|
|
self.logs: List[LogEntry] = [
|
|
LogEntry(id=1, timestamp=datetime.now(timezone.utc), level="INFO",
|
|
message="Dashboard démarré - Inventaire Ansible chargé")
|
|
]
|
|
|
|
self._id_counters = {"hosts": 100, "tasks": 1, "logs": 2}
|
|
|
|
@property
|
|
def hosts(self) -> List[Host]:
|
|
"""Charge les hôtes depuis l'inventaire Ansible avec cache"""
|
|
current_time = time()
|
|
|
|
# Retourner le cache si valide
|
|
if self._hosts_cache and (current_time - self._hosts_cache_time) < self._cache_ttl:
|
|
return self._hosts_cache
|
|
|
|
# Recharger depuis Ansible
|
|
self._hosts_cache = self._load_hosts_from_ansible()
|
|
self._hosts_cache_time = current_time
|
|
return self._hosts_cache
|
|
|
|
def _load_hosts_from_ansible(self) -> List[Host]:
|
|
"""Convertit l'inventaire Ansible en liste d'hôtes (sans doublons)"""
|
|
hosts = []
|
|
ansible_hosts = self.ansible_service.get_hosts_from_inventory()
|
|
|
|
# Charger tous les statuts de bootstrap
|
|
all_bootstrap_status = bootstrap_status_service.get_all_status()
|
|
|
|
for idx, ah in enumerate(ansible_hosts, start=1):
|
|
# Extraire le groupe principal depuis les groupes
|
|
primary_group = ah.groups[0] if ah.groups else "unknown"
|
|
|
|
# Récupérer le statut bootstrap pour cet hôte
|
|
bootstrap_info = all_bootstrap_status.get(ah.name, {})
|
|
bootstrap_ok = bootstrap_info.get("bootstrap_ok", False)
|
|
bootstrap_date_str = bootstrap_info.get("bootstrap_date")
|
|
bootstrap_date = None
|
|
if bootstrap_date_str:
|
|
try:
|
|
bootstrap_date = datetime.fromisoformat(bootstrap_date_str.replace("Z", "+00:00"))
|
|
except:
|
|
pass
|
|
|
|
runtime_status = self._host_runtime_status.get(ah.name, {})
|
|
status = runtime_status.get("status", "online")
|
|
last_seen = runtime_status.get("last_seen")
|
|
os_label = runtime_status.get("os", f"Linux ({primary_group})")
|
|
|
|
host = Host(
|
|
id=str(idx),
|
|
name=ah.name,
|
|
ip=ah.ansible_host,
|
|
status=status,
|
|
os=os_label,
|
|
last_seen=last_seen,
|
|
groups=ah.groups, # Tous les groupes de l'hôte
|
|
bootstrap_ok=bootstrap_ok,
|
|
bootstrap_date=bootstrap_date
|
|
)
|
|
hosts.append(host)
|
|
|
|
return hosts
|
|
|
|
def refresh_hosts(self):
|
|
"""Force le rechargement des hôtes depuis Ansible"""
|
|
self._hosts_cache = None
|
|
return self.hosts
|
|
|
|
def update_host_status(self, host_name: str, status: str, os_info: str = None):
|
|
"""Met à jour le statut d'un hôte après un health-check"""
|
|
for host in self.hosts:
|
|
if host.name == host_name:
|
|
host.status = status
|
|
host.last_seen = datetime.now(timezone.utc)
|
|
if os_info:
|
|
host.os = os_info
|
|
self._host_runtime_status[host_name] = {
|
|
"status": host.status,
|
|
"last_seen": host.last_seen,
|
|
"os": host.os,
|
|
}
|
|
# Persister dans le fichier JSON partagé avec Ansible
|
|
try:
|
|
host_status_service.set_status(host_name, host.status, host.last_seen, host.os)
|
|
except Exception:
|
|
# Ne pas casser l'exécution si la persistance échoue
|
|
pass
|
|
break
|
|
|
|
@property
|
|
def metrics(self) -> SystemMetrics:
|
|
"""Calcule les métriques en temps réel basées sur les logs de tâches"""
|
|
hosts = self.hosts
|
|
|
|
# Utiliser les statistiques des fichiers de logs de tâches
|
|
task_stats = task_log_service.get_stats()
|
|
total_tasks = task_stats.get("total", 0)
|
|
completed_tasks = task_stats.get("completed", 0)
|
|
failed_tasks = task_stats.get("failed", 0)
|
|
total_finished = completed_tasks + failed_tasks
|
|
|
|
return SystemMetrics(
|
|
online_hosts=len([h for h in hosts if h.status == "online"]),
|
|
total_tasks=total_tasks,
|
|
success_rate=round((completed_tasks / total_finished * 100) if total_finished > 0 else 100, 1),
|
|
uptime=99.9,
|
|
cpu_usage=0,
|
|
memory_usage=0,
|
|
disk_usage=0
|
|
)
|
|
|
|
def get_next_id(self, collection: str) -> int:
|
|
self._id_counters[collection] += 1
|
|
return self._id_counters[collection] - 1
|
|
|
|
|
|
# Instance globale de la base de données hybride
|
|
db = HybridDB(ansible_service)
|
|
|
|
# Dépendances FastAPI
|
|
async def verify_api_key(api_key: str = Depends(api_key_header)) -> bool:
|
|
"""Vérifie la clé API fournie"""
|
|
if not api_key or api_key != API_KEY:
|
|
raise HTTPException(status_code=401, detail="Clé API invalide ou manquante")
|
|
return True
|
|
|
|
# Routes API
|
|
@app.get("/", response_class=HTMLResponse)
|
|
async def root(request: Request):
|
|
"""Page principale du dashboard"""
|
|
return FileResponse(BASE_DIR / "index.html")
|
|
|
|
|
|
@app.get("/api", response_class=HTMLResponse)
|
|
async def api_home(request: Request):
|
|
"""Page d'accueil de l'API Homelab Dashboard"""
|
|
return """
|
|
<!DOCTYPE html>
|
|
<html lang="fr">
|
|
<head>
|
|
<meta charset="UTF-8">
|
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
|
<title>Homelab Dashboard API</title>
|
|
<style>
|
|
body { font-family: 'Inter', sans-serif; background: #0a0a0a; color: white; margin: 0; padding: 40px; }
|
|
.container { max-width: 800px; margin: 0 auto; text-align: center; }
|
|
.gradient-text { background: linear-gradient(135deg, #7c3aed 0%, #3b82f6 100%); -webkit-background-clip: text; -webkit-text-fill-color: transparent; }
|
|
.card { background: rgba(42, 42, 42, 0.8); border: 1px solid rgba(255, 255, 255, 0.1); border-radius: 16px; padding: 24px; margin: 20px 0; }
|
|
.btn { background: linear-gradient(135deg, #7c3aed 0%, #8b5cf6 100%); color: white; padding: 12px 24px; border: none; border-radius: 8px; text-decoration: none; display: inline-block; margin: 10px; transition: all 0.3s ease; }
|
|
.btn:hover { transform: translateY(-2px); box-shadow: 0 10px 25px rgba(124, 58, 237, 0.3); }
|
|
</style>
|
|
</head>
|
|
<body>
|
|
<div class="container">
|
|
<h1 class="gradient-text" style="font-size: 3rem; margin-bottom: 1rem;">Homelab Dashboard API</h1>
|
|
<p style="font-size: 1.2rem; color: #a1a1aa; margin-bottom: 2rem;">
|
|
API REST moderne pour la gestion automatique d'homelab
|
|
</p>
|
|
|
|
<div class="card">
|
|
<h2 style="color: #7c3aed; margin-bottom: 1rem;">Documentation API</h2>
|
|
<p style="margin-bottom: 1.5rem;">Explorez les endpoints disponibles et testez les fonctionnalités</p>
|
|
<div>
|
|
<a href="/api/docs" class="btn">
|
|
<i class="fas fa-book"></i> Documentation Interactive
|
|
</a>
|
|
<a href="/api/redoc" class="btn">
|
|
<i class="fas fa-file-alt"></i> Documentation Alternative
|
|
</a>
|
|
</div>
|
|
</div>
|
|
|
|
<div class="card">
|
|
<h2 style="color: #7c3aed; margin-bottom: 1rem;">Endpoints Principaux</h2>
|
|
<div style="text-align: left; max-width: 600px; margin: 0 auto;">
|
|
<div style="margin-bottom: 1rem;">
|
|
<strong style="color: #10b981;">GET</strong>
|
|
<code style="background: #1f2937; padding: 4px 8px; border-radius: 4px;">/api/hosts</code>
|
|
<span style="color: #a1a1aa;"> - Liste des hôtes</span>
|
|
</div>
|
|
<div style="margin-bottom: 1rem;">
|
|
<strong style="color: #3b82f6;">POST</strong>
|
|
<code style="background: #1f2937; padding: 4px 8px; border-radius: 4px;">/api/tasks</code>
|
|
<span style="color: #a1a1aa;"> - Créer une tâche</span>
|
|
</div>
|
|
<div style="margin-bottom: 1rem;">
|
|
<strong style="color: #10b981;">GET</strong>
|
|
<code style="background: #1f2937; padding: 4px 8px; border-radius: 4px;">/api/metrics</code>
|
|
<span style="color: #a1a1aa;"> - Métriques système</span>
|
|
</div>
|
|
<div style="margin-bottom: 1rem;">
|
|
<strong style="color: #f59e0b;">WS</strong>
|
|
<code style="background: #1f2937; padding: 4px 8px; border-radius: 4px;">/ws</code>
|
|
<span style="color: #a1a1aa;"> - WebSocket temps réel</span>
|
|
</div>
|
|
</div>
|
|
</div>
|
|
|
|
<div style="margin-top: 2rem; color: #6b7280; font-size: 0.9rem;">
|
|
<p>Version 1.0.0 | Développé avec FastAPI et technologies modernes</p>
|
|
</div>
|
|
</div>
|
|
</body>
|
|
</html>
|
|
"""
|
|
|
|
# ===== ENDPOINTS HOSTS - Routes statiques d'abord =====
|
|
|
|
@app.get("/api/hosts/groups")
|
|
async def get_host_groups(api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Récupère les groupes disponibles pour les hôtes (environnements et rôles)"""
|
|
return {
|
|
"env_groups": ansible_service.get_env_groups(),
|
|
"role_groups": ansible_service.get_role_groups(),
|
|
"all_groups": ansible_service.get_groups()
|
|
}
|
|
|
|
|
|
# ===== ENDPOINTS GROUPS - Gestion des groupes d'environnement et de rôles =====
|
|
|
|
@app.get("/api/groups")
|
|
async def get_all_groups(api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Récupère tous les groupes avec leurs détails"""
|
|
env_groups = ansible_service.get_env_groups()
|
|
role_groups = ansible_service.get_role_groups()
|
|
|
|
groups = []
|
|
for g in env_groups:
|
|
hosts = ansible_service.get_group_hosts(g)
|
|
groups.append({
|
|
"name": g,
|
|
"type": "env",
|
|
"display_name": g.replace('env_', ''),
|
|
"hosts_count": len(hosts),
|
|
"hosts": hosts
|
|
})
|
|
|
|
for g in role_groups:
|
|
hosts = ansible_service.get_group_hosts(g)
|
|
groups.append({
|
|
"name": g,
|
|
"type": "role",
|
|
"display_name": g.replace('role_', ''),
|
|
"hosts_count": len(hosts),
|
|
"hosts": hosts
|
|
})
|
|
|
|
return {
|
|
"groups": groups,
|
|
"env_count": len(env_groups),
|
|
"role_count": len(role_groups)
|
|
}
|
|
|
|
|
|
@app.get("/api/groups/{group_name}")
|
|
async def get_group_details(group_name: str, api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Récupère les détails d'un groupe spécifique"""
|
|
if not ansible_service.group_exists(group_name):
|
|
raise HTTPException(status_code=404, detail=f"Groupe '{group_name}' non trouvé")
|
|
|
|
hosts = ansible_service.get_group_hosts(group_name)
|
|
group_type = "env" if group_name.startswith("env_") else "role" if group_name.startswith("role_") else "other"
|
|
|
|
return {
|
|
"name": group_name,
|
|
"type": group_type,
|
|
"display_name": group_name.replace('env_', '').replace('role_', ''),
|
|
"hosts_count": len(hosts),
|
|
"hosts": hosts
|
|
}
|
|
|
|
|
|
@app.post("/api/groups")
|
|
async def create_group(group_request: GroupRequest, api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Crée un nouveau groupe d'environnement ou de rôle"""
|
|
# Construire le nom complet du groupe
|
|
prefix = "env_" if group_request.type == "env" else "role_"
|
|
|
|
# Si le nom ne commence pas déjà par le préfixe, l'ajouter
|
|
if group_request.name.startswith(prefix):
|
|
full_name = group_request.name
|
|
else:
|
|
full_name = f"{prefix}{group_request.name}"
|
|
|
|
# Vérifier si le groupe existe déjà
|
|
if ansible_service.group_exists(full_name):
|
|
raise HTTPException(status_code=400, detail=f"Le groupe '{full_name}' existe déjà")
|
|
|
|
# Créer le groupe
|
|
success = ansible_service.add_group(full_name)
|
|
|
|
if not success:
|
|
raise HTTPException(status_code=500, detail="Erreur lors de la création du groupe")
|
|
|
|
return {
|
|
"success": True,
|
|
"message": f"Groupe '{full_name}' créé avec succès",
|
|
"group": {
|
|
"name": full_name,
|
|
"type": group_request.type,
|
|
"display_name": full_name.replace('env_', '').replace('role_', ''),
|
|
"hosts_count": 0,
|
|
"hosts": []
|
|
}
|
|
}
|
|
|
|
|
|
@app.put("/api/groups/{group_name}")
|
|
async def update_group(group_name: str, group_update: GroupUpdateRequest, api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Renomme un groupe existant"""
|
|
if not ansible_service.group_exists(group_name):
|
|
raise HTTPException(status_code=404, detail=f"Groupe '{group_name}' non trouvé")
|
|
|
|
# Déterminer le type du groupe
|
|
if group_name.startswith("env_"):
|
|
prefix = "env_"
|
|
group_type = "env"
|
|
elif group_name.startswith("role_"):
|
|
prefix = "role_"
|
|
group_type = "role"
|
|
else:
|
|
raise HTTPException(status_code=400, detail="Seuls les groupes env_ et role_ peuvent être modifiés")
|
|
|
|
# Construire le nouveau nom
|
|
if group_update.new_name.startswith(prefix):
|
|
new_full_name = group_update.new_name
|
|
else:
|
|
new_full_name = f"{prefix}{group_update.new_name}"
|
|
|
|
# Vérifier si le nouveau nom existe déjà
|
|
if ansible_service.group_exists(new_full_name):
|
|
raise HTTPException(status_code=400, detail=f"Le groupe '{new_full_name}' existe déjà")
|
|
|
|
# Renommer le groupe
|
|
success = ansible_service.rename_group(group_name, new_full_name)
|
|
|
|
if not success:
|
|
raise HTTPException(status_code=500, detail="Erreur lors du renommage du groupe")
|
|
|
|
hosts = ansible_service.get_group_hosts(new_full_name)
|
|
|
|
return {
|
|
"success": True,
|
|
"message": f"Groupe renommé de '{group_name}' vers '{new_full_name}'",
|
|
"group": {
|
|
"name": new_full_name,
|
|
"type": group_type,
|
|
"display_name": new_full_name.replace('env_', '').replace('role_', ''),
|
|
"hosts_count": len(hosts),
|
|
"hosts": hosts
|
|
}
|
|
}
|
|
|
|
|
|
@app.delete("/api/groups/{group_name}")
|
|
async def delete_group(
|
|
group_name: str,
|
|
move_hosts_to: Optional[str] = None,
|
|
api_key_valid: bool = Depends(verify_api_key)
|
|
):
|
|
"""Supprime un groupe existant
|
|
|
|
Args:
|
|
group_name: Nom du groupe à supprimer
|
|
move_hosts_to: Groupe vers lequel déplacer les hôtes (optionnel, query param)
|
|
"""
|
|
if not ansible_service.group_exists(group_name):
|
|
raise HTTPException(status_code=404, detail=f"Groupe '{group_name}' non trouvé")
|
|
|
|
# Vérifier si le groupe contient des hôtes
|
|
hosts_in_group = ansible_service.get_group_hosts(group_name)
|
|
|
|
# Si le groupe contient des hôtes et qu'on ne spécifie pas où les déplacer
|
|
if hosts_in_group and not move_hosts_to:
|
|
# Pour les groupes d'environnement, c'est critique car les hôtes doivent avoir un env
|
|
if group_name.startswith("env_"):
|
|
raise HTTPException(
|
|
status_code=400,
|
|
detail=f"Le groupe contient {len(hosts_in_group)} hôte(s). Spécifiez 'move_hosts_to' pour les déplacer."
|
|
)
|
|
|
|
# Si on veut déplacer les hôtes, vérifier que le groupe cible est valide
|
|
if move_hosts_to:
|
|
# Vérifier que le groupe cible est du même type
|
|
if group_name.startswith("env_") and not move_hosts_to.startswith("env_"):
|
|
raise HTTPException(status_code=400, detail="Les hôtes doivent être déplacés vers un groupe d'environnement")
|
|
if group_name.startswith("role_") and not move_hosts_to.startswith("role_"):
|
|
raise HTTPException(status_code=400, detail="Les hôtes doivent être déplacés vers un groupe de rôle")
|
|
|
|
# Supprimer le groupe
|
|
result = ansible_service.delete_group(group_name, move_hosts_to)
|
|
|
|
if not result.get("success"):
|
|
raise HTTPException(status_code=500, detail=result.get("error", "Erreur lors de la suppression"))
|
|
|
|
return {
|
|
"success": True,
|
|
"message": f"Groupe '{group_name}' supprimé avec succès",
|
|
"hosts_affected": result.get("hosts_affected", []),
|
|
"hosts_moved_to": result.get("hosts_moved_to")
|
|
}
|
|
|
|
|
|
def _host_to_response(host_obj, bootstrap_status: Optional["BootstrapStatus"] = None) -> Dict[str, Any]:
|
|
"""Map DB host + latest bootstrap to API-compatible payload."""
|
|
return {
|
|
"id": host_obj.id,
|
|
"name": host_obj.name,
|
|
"ip": getattr(host_obj, "ip_address", None),
|
|
"status": host_obj.status,
|
|
"os": "Linux", # valeur par défaut faute d'info stockée
|
|
"last_seen": host_obj.last_seen,
|
|
"created_at": host_obj.created_at,
|
|
"groups": [g for g in [getattr(host_obj, "ansible_group", None)] if g],
|
|
"bootstrap_ok": (bootstrap_status.status == "success") if bootstrap_status else False,
|
|
"bootstrap_date": bootstrap_status.last_attempt if bootstrap_status else None,
|
|
}
|
|
|
|
|
|
@app.get("/api/hosts/by-name/{host_name}")
|
|
async def get_host_by_name(
|
|
host_name: str,
|
|
api_key_valid: bool = Depends(verify_api_key),
|
|
db_session: AsyncSession = Depends(get_db),
|
|
):
|
|
repo = HostRepository(db_session)
|
|
bs_repo = BootstrapStatusRepository(db_session)
|
|
host = await repo.get_by_ip(host_name) or await repo.get(host_name)
|
|
if not host:
|
|
raise HTTPException(status_code=404, detail="Hôte non trouvé")
|
|
bootstrap = await bs_repo.latest_for_host(host.id)
|
|
return _host_to_response(host, bootstrap)
|
|
|
|
|
|
@app.get("/api/hosts")
|
|
async def get_hosts(
|
|
bootstrap_status: Optional[str] = None,
|
|
limit: int = 100,
|
|
offset: int = 0,
|
|
api_key_valid: bool = Depends(verify_api_key),
|
|
db_session: AsyncSession = Depends(get_db),
|
|
):
|
|
repo = HostRepository(db_session)
|
|
bs_repo = BootstrapStatusRepository(db_session)
|
|
hosts = await repo.list(limit=limit, offset=offset)
|
|
# Si la base ne contient encore aucun hôte, on retombe sur les hôtes Ansible via la DB hybride
|
|
if not hosts:
|
|
hybrid_hosts = db.hosts
|
|
fallback_results = []
|
|
for h in hybrid_hosts:
|
|
# Appliquer les mêmes filtres de bootstrap que pour la version DB
|
|
if bootstrap_status == "ready" and not h.bootstrap_ok:
|
|
continue
|
|
if bootstrap_status == "not_configured" and h.bootstrap_ok:
|
|
continue
|
|
|
|
fallback_results.append(
|
|
{
|
|
"id": h.id,
|
|
"name": h.name,
|
|
"ip": h.ip,
|
|
"status": h.status,
|
|
"os": h.os,
|
|
"last_seen": h.last_seen,
|
|
# created_at est déjà géré par le modèle Pydantic Host (default_factory)
|
|
"created_at": h.created_at,
|
|
"groups": h.groups,
|
|
"bootstrap_ok": h.bootstrap_ok,
|
|
"bootstrap_date": h.bootstrap_date,
|
|
}
|
|
)
|
|
return fallback_results
|
|
|
|
results = []
|
|
for host in hosts:
|
|
bootstrap = await bs_repo.latest_for_host(host.id)
|
|
if bootstrap_status == "ready" and not (bootstrap and bootstrap.status == "success"):
|
|
continue
|
|
if bootstrap_status == "not_configured" and bootstrap and bootstrap.status == "success":
|
|
continue
|
|
results.append(_host_to_response(host, bootstrap))
|
|
return results
|
|
|
|
|
|
@app.get("/api/hosts/{host_id}")
|
|
async def get_host(
|
|
host_id: str,
|
|
api_key_valid: bool = Depends(verify_api_key),
|
|
db_session: AsyncSession = Depends(get_db),
|
|
):
|
|
repo = HostRepository(db_session)
|
|
bs_repo = BootstrapStatusRepository(db_session)
|
|
host = await repo.get(host_id)
|
|
if not host:
|
|
raise HTTPException(status_code=404, detail="Hôte non trouvé")
|
|
bootstrap = await bs_repo.latest_for_host(host.id)
|
|
return _host_to_response(host, bootstrap)
|
|
|
|
|
|
@app.post("/api/hosts")
|
|
async def create_host(
|
|
host_request: HostRequest,
|
|
api_key_valid: bool = Depends(verify_api_key),
|
|
db_session: AsyncSession = Depends(get_db),
|
|
):
|
|
repo = HostRepository(db_session)
|
|
bs_repo = BootstrapStatusRepository(db_session)
|
|
|
|
# Vérifier si l'hôte existe déjà
|
|
existing = await repo.get_by_ip(host_request.name)
|
|
if existing:
|
|
raise HTTPException(status_code=400, detail=f"L'hôte '{host_request.name}' existe déjà")
|
|
|
|
# Valider le groupe d'environnement
|
|
env_groups = ansible_service.get_env_groups()
|
|
if host_request.env_group not in env_groups and not host_request.env_group.startswith("env_"):
|
|
raise HTTPException(status_code=400, detail=f"Le groupe d'environnement doit commencer par 'env_'. Groupes existants: {env_groups}")
|
|
|
|
# Valider les groupes de rôles
|
|
role_groups = ansible_service.get_role_groups()
|
|
for role in host_request.role_groups:
|
|
if role not in role_groups and not role.startswith("role_"):
|
|
raise HTTPException(status_code=400, detail=f"Le groupe de rôle '{role}' doit commencer par 'role_'. Groupes existants: {role_groups}")
|
|
|
|
try:
|
|
# Ajouter l'hôte à l'inventaire Ansible
|
|
ansible_service.add_host_to_inventory(
|
|
hostname=host_request.name,
|
|
env_group=host_request.env_group,
|
|
role_groups=host_request.role_groups,
|
|
ansible_host=host_request.ip,
|
|
)
|
|
|
|
# Créer en base
|
|
host = await repo.create(
|
|
id=uuid.uuid4().hex,
|
|
name=host_request.name,
|
|
ip_address=host_request.ip or host_request.name,
|
|
ansible_group=host_request.env_group,
|
|
status="unknown",
|
|
reachable=False,
|
|
last_seen=None,
|
|
)
|
|
bootstrap = await bs_repo.latest_for_host(host.id)
|
|
|
|
await db_session.commit()
|
|
|
|
# Notifier les clients WebSocket
|
|
await ws_manager.broadcast(
|
|
{
|
|
"type": "host_created",
|
|
"data": _host_to_response(host, bootstrap),
|
|
}
|
|
)
|
|
|
|
return {
|
|
"message": f"Hôte '{host_request.name}' ajouté avec succès",
|
|
"host": _host_to_response(host, bootstrap),
|
|
"inventory_updated": True,
|
|
}
|
|
|
|
except HTTPException:
|
|
raise
|
|
except Exception as e:
|
|
await db_session.rollback()
|
|
raise HTTPException(status_code=500, detail=f"Erreur lors de l'ajout de l'hôte: {str(e)}")
|
|
|
|
|
|
@app.put("/api/hosts/{host_name}")
|
|
async def update_host(
|
|
host_name: str,
|
|
update_request: HostUpdateRequest,
|
|
api_key_valid: bool = Depends(verify_api_key),
|
|
db_session: AsyncSession = Depends(get_db),
|
|
):
|
|
repo = HostRepository(db_session)
|
|
bs_repo = BootstrapStatusRepository(db_session)
|
|
host = await repo.get_by_ip(host_name) or await repo.get(host_name)
|
|
if not host:
|
|
raise HTTPException(status_code=404, detail=f"Hôte '{host_name}' non trouvé")
|
|
|
|
# Valider le groupe d'environnement si fourni
|
|
if update_request.env_group:
|
|
env_groups = ansible_service.get_env_groups()
|
|
if update_request.env_group not in env_groups and not update_request.env_group.startswith("env_"):
|
|
raise HTTPException(status_code=400, detail=f"Le groupe d'environnement doit commencer par 'env_'")
|
|
|
|
# Valider les groupes de rôles si fournis
|
|
if update_request.role_groups:
|
|
for role in update_request.role_groups:
|
|
if not role.startswith("role_"):
|
|
raise HTTPException(status_code=400, detail=f"Le groupe de rôle '{role}' doit commencer par 'role_'")
|
|
|
|
try:
|
|
ansible_service.update_host_groups(
|
|
hostname=host_name,
|
|
env_group=update_request.env_group,
|
|
role_groups=update_request.role_groups,
|
|
ansible_host=update_request.ansible_host,
|
|
)
|
|
|
|
await repo.update(
|
|
host,
|
|
ansible_group=update_request.env_group or host.ansible_group,
|
|
)
|
|
await db_session.commit()
|
|
|
|
bootstrap = await bs_repo.latest_for_host(host.id)
|
|
|
|
await ws_manager.broadcast(
|
|
{
|
|
"type": "host_updated",
|
|
"data": _host_to_response(host, bootstrap),
|
|
}
|
|
)
|
|
|
|
return {
|
|
"message": f"Hôte '{host_name}' mis à jour avec succès",
|
|
"host": _host_to_response(host, bootstrap),
|
|
"inventory_updated": True,
|
|
}
|
|
|
|
except HTTPException:
|
|
await db_session.rollback()
|
|
raise
|
|
except Exception as e:
|
|
await db_session.rollback()
|
|
raise HTTPException(status_code=500, detail=f"Erreur lors de la mise à jour: {str(e)}")
|
|
|
|
|
|
@app.delete("/api/hosts/by-name/{host_name}")
|
|
async def delete_host_by_name(
|
|
host_name: str,
|
|
api_key_valid: bool = Depends(verify_api_key),
|
|
db_session: AsyncSession = Depends(get_db),
|
|
):
|
|
repo = HostRepository(db_session)
|
|
host = await repo.get_by_ip(host_name) or await repo.get(host_name)
|
|
if not host:
|
|
raise HTTPException(status_code=404, detail=f"Hôte '{host_name}' non trouvé")
|
|
|
|
try:
|
|
ansible_service.remove_host_from_inventory(host_name)
|
|
await repo.soft_delete(host.id)
|
|
await db_session.commit()
|
|
|
|
await ws_manager.broadcast(
|
|
{
|
|
"type": "host_deleted",
|
|
"data": {"name": host_name},
|
|
}
|
|
)
|
|
|
|
return {"message": f"Hôte '{host_name}' supprimé avec succès", "inventory_updated": True}
|
|
except HTTPException:
|
|
await db_session.rollback()
|
|
raise
|
|
except Exception as e:
|
|
await db_session.rollback()
|
|
raise HTTPException(status_code=500, detail=f"Erreur lors de la suppression: {str(e)}")
|
|
|
|
|
|
@app.delete("/api/hosts/{host_id}")
|
|
async def delete_host(
|
|
host_id: str,
|
|
api_key_valid: bool = Depends(verify_api_key),
|
|
db_session: AsyncSession = Depends(get_db),
|
|
):
|
|
repo = HostRepository(db_session)
|
|
host = await repo.get(host_id)
|
|
if not host:
|
|
raise HTTPException(status_code=404, detail="Hôte non trouvé")
|
|
|
|
return await delete_host_by_name(host.name, api_key_valid, db_session)
|
|
|
|
@app.get("/api/tasks")
|
|
async def get_tasks(
|
|
limit: int = 100,
|
|
offset: int = 0,
|
|
api_key_valid: bool = Depends(verify_api_key),
|
|
db_session: AsyncSession = Depends(get_db),
|
|
):
|
|
"""Récupère la liste de toutes les tâches"""
|
|
repo = TaskRepository(db_session)
|
|
tasks = await repo.list(limit=limit, offset=offset)
|
|
return [
|
|
{
|
|
"id": t.id,
|
|
"name": t.action,
|
|
"host": t.target,
|
|
"status": t.status,
|
|
"progress": 100 if t.status == "completed" else (50 if t.status == "running" else 0),
|
|
"start_time": t.started_at,
|
|
"end_time": t.completed_at,
|
|
"duration": None,
|
|
"output": t.result_data.get("output") if t.result_data else None,
|
|
"error": t.error_message,
|
|
}
|
|
for t in tasks
|
|
]
|
|
|
|
|
|
@app.post("/api/tasks")
|
|
async def create_task(
|
|
task_request: TaskRequest,
|
|
api_key_valid: bool = Depends(verify_api_key),
|
|
db_session: AsyncSession = Depends(get_db),
|
|
):
|
|
"""Crée une nouvelle tâche et exécute le playbook Ansible correspondant"""
|
|
task_names = {
|
|
'upgrade': 'Mise à jour système',
|
|
'reboot': 'Redémarrage système',
|
|
'health-check': 'Vérification de santé',
|
|
'backup': 'Sauvegarde',
|
|
'deploy': 'Déploiement',
|
|
'rollback': 'Rollback',
|
|
'maintenance': 'Maintenance',
|
|
'bootstrap': 'Bootstrap Ansible'
|
|
}
|
|
|
|
repo = TaskRepository(db_session)
|
|
task_id = uuid.uuid4().hex
|
|
target = task_request.host or task_request.group or "all"
|
|
playbook = ACTION_PLAYBOOK_MAP.get(task_request.action)
|
|
|
|
task_obj = await repo.create(
|
|
id=task_id,
|
|
action=task_request.action,
|
|
target=target,
|
|
playbook=playbook,
|
|
status="running",
|
|
)
|
|
await repo.update(task_obj, started_at=datetime.now(timezone.utc))
|
|
await db_session.commit()
|
|
|
|
response_data = {
|
|
"id": task_obj.id,
|
|
"name": task_names.get(task_request.action, f"Tâche {task_request.action}"),
|
|
"host": target,
|
|
"status": "running",
|
|
"progress": 0,
|
|
"start_time": task_obj.started_at,
|
|
"end_time": None,
|
|
"duration": None,
|
|
"output": None,
|
|
"error": None,
|
|
}
|
|
|
|
# Notifier les clients WebSocket
|
|
await ws_manager.broadcast({
|
|
"type": "task_created",
|
|
"data": response_data
|
|
})
|
|
|
|
# Exécuter le playbook Ansible en arrière-plan
|
|
if playbook:
|
|
asyncio.create_task(execute_ansible_task(
|
|
task_id=task_obj.id,
|
|
playbook=playbook,
|
|
target=target,
|
|
extra_vars=task_request.extra_vars,
|
|
check_mode=task_request.dry_run
|
|
))
|
|
else:
|
|
# Pas de playbook correspondant, simuler
|
|
asyncio.create_task(simulate_task_execution(task_obj.id))
|
|
|
|
return response_data
|
|
|
|
|
|
# ===== ENDPOINTS LOGS DE TÂCHES (MARKDOWN) =====
|
|
# IMPORTANT: Ces routes doivent être AVANT /api/tasks/{task_id} pour éviter les conflits
|
|
|
|
@app.get("/api/tasks/logs")
|
|
async def get_task_logs(
|
|
status: Optional[str] = None,
|
|
year: Optional[str] = None,
|
|
month: Optional[str] = None,
|
|
day: Optional[str] = None,
|
|
target: Optional[str] = None,
|
|
category: Optional[str] = None,
|
|
api_key_valid: bool = Depends(verify_api_key)
|
|
):
|
|
"""Récupère les logs de tâches depuis les fichiers markdown avec filtrage"""
|
|
logs = task_log_service.get_task_logs(
|
|
year=year,
|
|
month=month,
|
|
day=day,
|
|
status=status,
|
|
target=target,
|
|
category=category
|
|
)
|
|
return {
|
|
"logs": [log.dict() for log in logs],
|
|
"count": len(logs),
|
|
"filters": {
|
|
"status": status,
|
|
"year": year,
|
|
"month": month,
|
|
"day": day,
|
|
"target": target
|
|
}
|
|
}
|
|
|
|
|
|
@app.get("/api/tasks/logs/dates")
|
|
async def get_task_logs_dates(api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Récupère la structure des dates disponibles pour le filtrage"""
|
|
return task_log_service.get_available_dates()
|
|
|
|
|
|
@app.get("/api/tasks/logs/stats")
|
|
async def get_task_logs_stats(api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Récupère les statistiques des logs de tâches"""
|
|
return task_log_service.get_stats()
|
|
|
|
|
|
@app.get("/api/tasks/logs/{log_id}")
|
|
async def get_task_log_content(log_id: str, api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Récupère le contenu d'un log de tâche spécifique"""
|
|
logs = task_log_service.get_task_logs()
|
|
log = next((l for l in logs if l.id == log_id), None)
|
|
|
|
if not log:
|
|
raise HTTPException(status_code=404, detail="Log non trouvé")
|
|
|
|
try:
|
|
content = Path(log.path).read_text(encoding='utf-8')
|
|
return {
|
|
"log": log.dict(),
|
|
"content": content
|
|
}
|
|
except Exception as e:
|
|
raise HTTPException(status_code=500, detail=f"Erreur lecture du fichier: {str(e)}")
|
|
|
|
|
|
@app.delete("/api/tasks/logs/{log_id}")
|
|
async def delete_task_log(log_id: str, api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Supprime un fichier markdown de log de tâche."""
|
|
logs = task_log_service.get_task_logs()
|
|
log = next((l for l in logs if l.id == log_id), None)
|
|
|
|
if not log:
|
|
raise HTTPException(status_code=404, detail="Log non trouvé")
|
|
|
|
try:
|
|
log_path = Path(log.path)
|
|
if log_path.exists():
|
|
log_path.unlink()
|
|
return {"message": "Log supprimé", "id": log_id}
|
|
except Exception as e:
|
|
raise HTTPException(status_code=500, detail=f"Erreur suppression du fichier: {str(e)}")
|
|
|
|
|
|
@app.get("/api/tasks/running")
|
|
async def get_running_tasks(
|
|
api_key_valid: bool = Depends(verify_api_key),
|
|
db_session: AsyncSession = Depends(get_db),
|
|
):
|
|
"""Récupère uniquement les tâches en cours d'exécution (running ou pending)"""
|
|
repo = TaskRepository(db_session)
|
|
tasks = await repo.list(limit=100, offset=0)
|
|
running_tasks = [t for t in tasks if t.status in ("running", "pending")]
|
|
return {
|
|
"tasks": [
|
|
{
|
|
"id": t.id,
|
|
"name": t.action,
|
|
"host": t.target,
|
|
"status": t.status,
|
|
"progress": 50 if t.status == "running" else 0,
|
|
"start_time": t.started_at,
|
|
"end_time": t.completed_at,
|
|
}
|
|
for t in running_tasks
|
|
],
|
|
"count": len(running_tasks)
|
|
}
|
|
|
|
|
|
@app.get("/api/tasks/{task_id}")
|
|
async def get_task(
|
|
task_id: str,
|
|
api_key_valid: bool = Depends(verify_api_key),
|
|
db_session: AsyncSession = Depends(get_db),
|
|
):
|
|
"""Récupère une tâche spécifique"""
|
|
repo = TaskRepository(db_session)
|
|
task = await repo.get(task_id)
|
|
if not task:
|
|
raise HTTPException(status_code=404, detail="Tâche non trouvée")
|
|
return {
|
|
"id": task.id,
|
|
"name": task.action,
|
|
"host": task.target,
|
|
"status": task.status,
|
|
"progress": 100 if task.status == "completed" else (50 if task.status == "running" else 0),
|
|
"start_time": task.started_at,
|
|
"end_time": task.completed_at,
|
|
"duration": None,
|
|
"output": task.result_data.get("output") if task.result_data else None,
|
|
"error": task.error_message,
|
|
}
|
|
|
|
|
|
@app.delete("/api/tasks/{task_id}")
|
|
async def delete_task(
|
|
task_id: str,
|
|
api_key_valid: bool = Depends(verify_api_key),
|
|
db_session: AsyncSession = Depends(get_db),
|
|
):
|
|
"""Supprime une tâche (soft delete non implémenté pour tasks, suppression directe)"""
|
|
repo = TaskRepository(db_session)
|
|
task = await repo.get(task_id)
|
|
if not task:
|
|
raise HTTPException(status_code=404, detail="Tâche non trouvée")
|
|
|
|
await db_session.delete(task)
|
|
await db_session.commit()
|
|
|
|
# Notifier les clients WebSocket
|
|
await ws_manager.broadcast({
|
|
"type": "task_deleted",
|
|
"data": {"id": task_id}
|
|
})
|
|
|
|
return {"message": "Tâche supprimée avec succès"}
|
|
|
|
@app.get("/api/logs")
|
|
async def get_logs(
|
|
limit: int = 50,
|
|
offset: int = 0,
|
|
level: Optional[str] = None,
|
|
source: Optional[str] = None,
|
|
api_key_valid: bool = Depends(verify_api_key),
|
|
db_session: AsyncSession = Depends(get_db),
|
|
):
|
|
"""Récupère les logs récents avec filtrage optionnel"""
|
|
repo = LogRepository(db_session)
|
|
logs = await repo.list(limit=limit, offset=offset, level=level, source=source)
|
|
return [
|
|
{
|
|
"id": log.id,
|
|
"timestamp": log.created_at,
|
|
"level": log.level,
|
|
"message": log.message,
|
|
"source": log.source,
|
|
"host": log.host_id,
|
|
}
|
|
for log in logs
|
|
]
|
|
|
|
|
|
@app.post("/api/logs")
|
|
async def create_log(
|
|
level: str,
|
|
message: str,
|
|
source: Optional[str] = None,
|
|
host_id: Optional[str] = None,
|
|
api_key_valid: bool = Depends(verify_api_key),
|
|
db_session: AsyncSession = Depends(get_db),
|
|
):
|
|
"""Ajoute une nouvelle entrée de log"""
|
|
repo = LogRepository(db_session)
|
|
log = await repo.create(
|
|
level=level.upper(),
|
|
message=message,
|
|
source=source,
|
|
host_id=host_id,
|
|
)
|
|
await db_session.commit()
|
|
|
|
response_data = {
|
|
"id": log.id,
|
|
"timestamp": log.created_at,
|
|
"level": log.level,
|
|
"message": log.message,
|
|
"source": log.source,
|
|
"host": log.host_id,
|
|
}
|
|
|
|
# Notifier les clients WebSocket
|
|
await ws_manager.broadcast({
|
|
"type": "new_log",
|
|
"data": response_data
|
|
})
|
|
|
|
return response_data
|
|
|
|
|
|
@app.delete("/api/logs")
|
|
async def clear_logs(
|
|
api_key_valid: bool = Depends(verify_api_key),
|
|
db_session: AsyncSession = Depends(get_db),
|
|
):
|
|
"""Efface tous les logs (attention: opération destructive)"""
|
|
from sqlalchemy import delete
|
|
from models.log import Log as LogModel
|
|
await db_session.execute(delete(LogModel))
|
|
await db_session.commit()
|
|
return {"message": "Tous les logs ont été supprimés"}
|
|
|
|
@app.get("/api/metrics", response_model=SystemMetrics)
|
|
async def get_metrics(api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Récupère les métriques système calculées dynamiquement"""
|
|
return db.metrics
|
|
|
|
|
|
@app.post("/api/hosts/refresh")
|
|
async def refresh_hosts(api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Force le rechargement des hôtes depuis l'inventaire Ansible"""
|
|
ansible_service.invalidate_cache() # Clear ansible inventory cache first
|
|
hosts = db.refresh_hosts()
|
|
|
|
# Notifier les clients WebSocket
|
|
await ws_manager.broadcast({
|
|
"type": "hosts_refreshed",
|
|
"data": {"count": len(hosts)}
|
|
})
|
|
|
|
return {"message": f"{len(hosts)} hôtes rechargés depuis l'inventaire Ansible"}
|
|
|
|
|
|
# ===== ENDPOINTS ANSIBLE =====
|
|
|
|
@app.get("/api/ansible/playbooks")
|
|
async def get_ansible_playbooks(api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Liste les playbooks Ansible disponibles avec leurs catégories"""
|
|
return {
|
|
"playbooks": ansible_service.get_playbooks(),
|
|
"categories": ansible_service.get_playbook_categories(),
|
|
"ansible_dir": str(ANSIBLE_DIR)
|
|
}
|
|
|
|
@app.get("/api/ansible/inventory")
|
|
async def get_ansible_inventory(
|
|
group: Optional[str] = None,
|
|
api_key_valid: bool = Depends(verify_api_key)
|
|
):
|
|
"""Récupère l'inventaire Ansible avec les hôtes et groupes.
|
|
|
|
Args:
|
|
group: Filtrer les hôtes par groupe (optionnel)
|
|
"""
|
|
return {
|
|
"hosts": [h.dict() for h in ansible_service.get_hosts_from_inventory(group_filter=group)],
|
|
"groups": ansible_service.get_groups(),
|
|
"inventory_path": str(ansible_service.inventory_path),
|
|
"filter": group
|
|
}
|
|
|
|
@app.post("/api/ansible/execute")
|
|
async def execute_ansible_playbook(
|
|
request: AnsibleExecutionRequest,
|
|
api_key_valid: bool = Depends(verify_api_key)
|
|
):
|
|
"""Exécute un playbook Ansible directement"""
|
|
start_time_dt = datetime.now(timezone.utc)
|
|
|
|
# Créer une tâche pour l'historique
|
|
task_id = db.get_next_id("tasks")
|
|
playbook_name = request.playbook.replace('.yml', '').replace('-', ' ').title()
|
|
task = Task(
|
|
id=task_id,
|
|
name=f"Playbook: {playbook_name}",
|
|
host=request.target,
|
|
status="running",
|
|
progress=0,
|
|
start_time=start_time_dt
|
|
)
|
|
db.tasks.insert(0, task)
|
|
|
|
try:
|
|
result = await ansible_service.execute_playbook(
|
|
playbook=request.playbook,
|
|
target=request.target,
|
|
extra_vars=request.extra_vars,
|
|
check_mode=request.check_mode,
|
|
verbose=request.verbose
|
|
)
|
|
|
|
# Mettre à jour la tâche
|
|
task.status = "completed" if result["success"] else "failed"
|
|
task.progress = 100
|
|
task.end_time = datetime.now(timezone.utc)
|
|
task.duration = f"{result.get('execution_time', 0):.1f}s"
|
|
task.output = result.get("stdout", "")
|
|
task.error = result.get("stderr", "") if not result["success"] else None
|
|
|
|
# Ajouter un log
|
|
log_entry = LogEntry(
|
|
id=db.get_next_id("logs"),
|
|
timestamp=datetime.now(timezone.utc),
|
|
level="INFO" if result["success"] else "ERROR",
|
|
message=f"Playbook {request.playbook} exécuté sur {request.target}: {'succès' if result['success'] else 'échec'}",
|
|
source="ansible",
|
|
host=request.target
|
|
)
|
|
db.logs.insert(0, log_entry)
|
|
|
|
# Sauvegarder le log markdown
|
|
try:
|
|
task_log_service.save_task_log(
|
|
task=task,
|
|
output=result.get("stdout", ""),
|
|
error=result.get("stderr", "")
|
|
)
|
|
except Exception as log_error:
|
|
print(f"Erreur sauvegarde log markdown: {log_error}")
|
|
|
|
await ws_manager.broadcast({
|
|
"type": "ansible_execution",
|
|
"data": result
|
|
})
|
|
|
|
# Ajouter task_id au résultat
|
|
result["task_id"] = task_id
|
|
|
|
return result
|
|
except FileNotFoundError as e:
|
|
task.status = "failed"
|
|
task.end_time = datetime.now(timezone.utc)
|
|
task.error = str(e)
|
|
task_log_service.save_task_log(task=task, error=str(e))
|
|
raise HTTPException(status_code=404, detail=str(e))
|
|
except Exception as e:
|
|
task.status = "failed"
|
|
task.end_time = datetime.now(timezone.utc)
|
|
task.error = str(e)
|
|
task_log_service.save_task_log(task=task, error=str(e))
|
|
raise HTTPException(status_code=500, detail=str(e))
|
|
|
|
@app.get("/api/ansible/groups")
|
|
async def get_ansible_groups(api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Récupère la liste des groupes Ansible"""
|
|
return {"groups": ansible_service.get_groups()}
|
|
|
|
|
|
# ===== ENDPOINTS PLAYBOOKS CRUD =====
|
|
|
|
class PlaybookContentRequest(BaseModel):
|
|
"""Requête pour sauvegarder le contenu d'un playbook"""
|
|
content: str = Field(..., description="Contenu YAML du playbook")
|
|
|
|
|
|
@app.get("/api/playbooks/{filename}/content")
|
|
async def get_playbook_content(
|
|
filename: str,
|
|
api_key_valid: bool = Depends(verify_api_key)
|
|
):
|
|
"""Récupère le contenu d'un playbook"""
|
|
playbook_path = ansible_service.playbooks_dir / filename
|
|
|
|
# Vérifier les extensions valides
|
|
if not filename.endswith(('.yml', '.yaml')):
|
|
raise HTTPException(status_code=400, detail="Extension de fichier invalide. Utilisez .yml ou .yaml")
|
|
|
|
if not playbook_path.exists():
|
|
raise HTTPException(status_code=404, detail=f"Playbook non trouvé: {filename}")
|
|
|
|
# Vérifier que le fichier est bien dans le répertoire playbooks (sécurité)
|
|
try:
|
|
playbook_path.resolve().relative_to(ansible_service.playbooks_dir.resolve())
|
|
except ValueError:
|
|
raise HTTPException(status_code=403, detail="Accès non autorisé")
|
|
|
|
try:
|
|
content = playbook_path.read_text(encoding='utf-8')
|
|
stat = playbook_path.stat()
|
|
return {
|
|
"filename": filename,
|
|
"content": content,
|
|
"size": stat.st_size,
|
|
"modified": datetime.fromtimestamp(stat.st_mtime, tz=timezone.utc).isoformat()
|
|
}
|
|
except Exception as e:
|
|
raise HTTPException(status_code=500, detail=f"Erreur lecture fichier: {str(e)}")
|
|
|
|
|
|
@app.put("/api/playbooks/{filename}/content")
|
|
async def save_playbook_content(
|
|
filename: str,
|
|
request: PlaybookContentRequest,
|
|
api_key_valid: bool = Depends(verify_api_key)
|
|
):
|
|
"""Sauvegarde le contenu d'un playbook (création ou modification)"""
|
|
# Vérifier les extensions valides
|
|
if not filename.endswith(('.yml', '.yaml')):
|
|
raise HTTPException(status_code=400, detail="Extension de fichier invalide. Utilisez .yml ou .yaml")
|
|
|
|
# Valider le nom de fichier (sécurité)
|
|
import re
|
|
if not re.match(r'^[a-zA-Z0-9_-]+\.(yml|yaml)$', filename):
|
|
raise HTTPException(status_code=400, detail="Nom de fichier invalide")
|
|
|
|
playbook_path = ansible_service.playbooks_dir / filename
|
|
|
|
# S'assurer que le répertoire existe
|
|
ansible_service.playbooks_dir.mkdir(parents=True, exist_ok=True)
|
|
|
|
# Valider le contenu YAML
|
|
try:
|
|
parsed = yaml.safe_load(request.content)
|
|
if parsed is None:
|
|
raise HTTPException(status_code=400, detail="Contenu YAML vide ou invalide")
|
|
except yaml.YAMLError as e:
|
|
raise HTTPException(status_code=400, detail=f"Erreur de syntaxe YAML: {str(e)}")
|
|
|
|
is_new = not playbook_path.exists()
|
|
|
|
try:
|
|
playbook_path.write_text(request.content, encoding='utf-8')
|
|
stat = playbook_path.stat()
|
|
|
|
# Log l'action
|
|
action = "créé" if is_new else "modifié"
|
|
log_entry = LogEntry(
|
|
id=db.get_next_id("logs"),
|
|
timestamp=datetime.now(timezone.utc),
|
|
level="INFO",
|
|
message=f"Playbook {filename} {action}",
|
|
source="playbook_editor"
|
|
)
|
|
db.logs.insert(0, log_entry)
|
|
|
|
return {
|
|
"success": True,
|
|
"message": f"Playbook {filename} {'créé' if is_new else 'sauvegardé'} avec succès",
|
|
"filename": filename,
|
|
"size": stat.st_size,
|
|
"modified": datetime.fromtimestamp(stat.st_mtime, tz=timezone.utc).isoformat(),
|
|
"is_new": is_new
|
|
}
|
|
except Exception as e:
|
|
raise HTTPException(status_code=500, detail=f"Erreur sauvegarde fichier: {str(e)}")
|
|
|
|
|
|
@app.delete("/api/playbooks/{filename}")
|
|
async def delete_playbook(
|
|
filename: str,
|
|
api_key_valid: bool = Depends(verify_api_key)
|
|
):
|
|
"""Supprime un playbook"""
|
|
# Vérifier les extensions valides
|
|
if not filename.endswith(('.yml', '.yaml')):
|
|
raise HTTPException(status_code=400, detail="Extension de fichier invalide")
|
|
|
|
playbook_path = ansible_service.playbooks_dir / filename
|
|
|
|
if not playbook_path.exists():
|
|
raise HTTPException(status_code=404, detail=f"Playbook non trouvé: {filename}")
|
|
|
|
# Vérifier que le fichier est bien dans le répertoire playbooks (sécurité)
|
|
try:
|
|
playbook_path.resolve().relative_to(ansible_service.playbooks_dir.resolve())
|
|
except ValueError:
|
|
raise HTTPException(status_code=403, detail="Accès non autorisé")
|
|
|
|
try:
|
|
playbook_path.unlink()
|
|
|
|
# Log l'action
|
|
log_entry = LogEntry(
|
|
id=db.get_next_id("logs"),
|
|
timestamp=datetime.now(timezone.utc),
|
|
level="WARN",
|
|
message=f"Playbook {filename} supprimé",
|
|
source="playbook_editor"
|
|
)
|
|
db.logs.insert(0, log_entry)
|
|
|
|
return {
|
|
"success": True,
|
|
"message": f"Playbook {filename} supprimé avec succès"
|
|
}
|
|
except Exception as e:
|
|
raise HTTPException(status_code=500, detail=f"Erreur suppression fichier: {str(e)}")
|
|
|
|
|
|
@app.get("/api/ansible/ssh-config")
|
|
async def get_ssh_config(api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Diagnostic de la configuration SSH pour le bootstrap"""
|
|
ssh_key_path = Path(SSH_KEY_PATH)
|
|
ssh_dir = ssh_key_path.parent
|
|
|
|
# Lister les fichiers dans le répertoire SSH
|
|
available_files = []
|
|
if ssh_dir.exists():
|
|
available_files = [f.name for f in ssh_dir.iterdir()]
|
|
|
|
# Vérifier les clés
|
|
private_key_exists = ssh_key_path.exists()
|
|
public_key_exists = Path(SSH_KEY_PATH + ".pub").exists()
|
|
|
|
# Chercher d'autres clés publiques
|
|
pub_keys_found = []
|
|
for ext in [".pub"]:
|
|
for key_type in ["id_rsa", "id_ed25519", "id_ecdsa", "id_dsa"]:
|
|
key_path = ssh_dir / f"{key_type}{ext}"
|
|
if key_path.exists():
|
|
pub_keys_found.append(str(key_path))
|
|
|
|
# Trouver la clé privée qui sera utilisée
|
|
active_private_key = find_ssh_private_key()
|
|
|
|
return {
|
|
"ssh_key_path": SSH_KEY_PATH,
|
|
"ssh_dir": str(ssh_dir),
|
|
"ssh_dir_exists": ssh_dir.exists(),
|
|
"private_key_exists": private_key_exists,
|
|
"public_key_exists": public_key_exists,
|
|
"available_files": available_files,
|
|
"public_keys_found": pub_keys_found,
|
|
"active_private_key": active_private_key,
|
|
"ssh_user": SSH_USER,
|
|
"sshpass_available": shutil.which("sshpass") is not None,
|
|
}
|
|
|
|
|
|
@app.post("/api/ansible/adhoc", response_model=AdHocCommandResult)
|
|
async def execute_adhoc_command(
|
|
request: AdHocCommandRequest,
|
|
api_key_valid: bool = Depends(verify_api_key)
|
|
):
|
|
"""Exécute une commande ad-hoc Ansible sur un ou plusieurs hôtes.
|
|
|
|
Exemples:
|
|
- Lister les fichiers: {"target": "all", "command": "ls -la /tmp"}
|
|
- Vérifier l'espace disque: {"target": "proxmox", "command": "df -h", "become": true}
|
|
- Redémarrer un service: {"target": "web-servers", "command": "systemctl restart nginx", "become": true}
|
|
"""
|
|
start_time_perf = perf_counter()
|
|
start_time_dt = datetime.now(timezone.utc)
|
|
|
|
# Créer une tâche pour l'historique
|
|
task_id = db.get_next_id("tasks")
|
|
task_name = f"Ad-hoc: {request.command[:40]}{'...' if len(request.command) > 40 else ''}"
|
|
task = Task(
|
|
id=task_id,
|
|
name=task_name,
|
|
host=request.target,
|
|
status="running",
|
|
progress=0,
|
|
start_time=start_time_dt
|
|
)
|
|
db.tasks.insert(0, task)
|
|
|
|
# Construire la commande ansible
|
|
ansible_cmd = [
|
|
"ansible",
|
|
request.target,
|
|
"-i", str(ANSIBLE_DIR / "inventory" / "hosts.yml"),
|
|
"-m", request.module,
|
|
"-a", request.command,
|
|
"--timeout", str(request.timeout),
|
|
]
|
|
|
|
# Ajouter les options
|
|
if request.become:
|
|
ansible_cmd.append("--become")
|
|
|
|
private_key = find_ssh_private_key()
|
|
if private_key:
|
|
ansible_cmd.extend(["--private-key", private_key])
|
|
|
|
if SSH_USER:
|
|
ansible_cmd.extend(["-u", SSH_USER])
|
|
|
|
try:
|
|
result = subprocess.run(
|
|
ansible_cmd,
|
|
capture_output=True,
|
|
text=True,
|
|
timeout=request.timeout + 10,
|
|
cwd=str(ANSIBLE_DIR)
|
|
)
|
|
|
|
duration = perf_counter() - start_time_perf
|
|
success = result.returncode == 0
|
|
|
|
# Mettre à jour la tâche
|
|
task.status = "completed" if success else "failed"
|
|
task.progress = 100
|
|
task.end_time = datetime.now(timezone.utc)
|
|
task.duration = f"{round(duration, 2)}s"
|
|
task.output = result.stdout
|
|
task.error = result.stderr if result.stderr else None
|
|
|
|
# Sauvegarder le log de tâche en markdown
|
|
task_log_service.save_task_log(task, output=result.stdout, error=result.stderr or "")
|
|
|
|
# Log de l'exécution
|
|
log_entry = LogEntry(
|
|
id=db.get_next_id("logs"),
|
|
timestamp=datetime.now(timezone.utc),
|
|
level="INFO" if success else "WARN",
|
|
message=f"Ad-hoc [{request.module}] sur {request.target}: {request.command[:50]}{'...' if len(request.command) > 50 else ''}",
|
|
source="ansible-adhoc",
|
|
host=request.target
|
|
)
|
|
db.logs.insert(0, log_entry)
|
|
|
|
# Notifier via WebSocket
|
|
await ws_manager.broadcast({
|
|
"type": "adhoc_executed",
|
|
"data": {
|
|
"target": request.target,
|
|
"command": request.command,
|
|
"success": success,
|
|
"task_id": task_id
|
|
}
|
|
})
|
|
|
|
# Sauvegarder dans l'historique des commandes ad-hoc (pour réutilisation)
|
|
adhoc_history_service.add_command(
|
|
command=request.command,
|
|
target=request.target,
|
|
module=request.module,
|
|
become=request.become
|
|
)
|
|
|
|
return AdHocCommandResult(
|
|
target=request.target,
|
|
command=request.command,
|
|
success=success,
|
|
return_code=result.returncode,
|
|
stdout=result.stdout,
|
|
stderr=result.stderr if result.stderr else None,
|
|
duration=round(duration, 2)
|
|
)
|
|
|
|
except subprocess.TimeoutExpired:
|
|
duration = perf_counter() - start_time_perf
|
|
# Mettre à jour la tâche en échec
|
|
task.status = "failed"
|
|
task.progress = 100
|
|
task.end_time = datetime.now(timezone.utc)
|
|
task.duration = f"{round(duration, 2)}s"
|
|
task.error = f"Timeout après {request.timeout} secondes"
|
|
|
|
# Sauvegarder le log de tâche
|
|
task_log_service.save_task_log(task, error=task.error)
|
|
|
|
return AdHocCommandResult(
|
|
target=request.target,
|
|
command=request.command,
|
|
success=False,
|
|
return_code=-1,
|
|
stdout="",
|
|
stderr=f"Timeout après {request.timeout} secondes",
|
|
duration=round(duration, 2)
|
|
)
|
|
except FileNotFoundError:
|
|
duration = perf_counter() - start_time_perf
|
|
error_msg = "ansible non trouvé. Vérifiez que Ansible est installé et accessible."
|
|
# Mettre à jour la tâche en échec
|
|
task.status = "failed"
|
|
task.progress = 100
|
|
task.end_time = datetime.now(timezone.utc)
|
|
task.duration = f"{round(duration, 2)}s"
|
|
task.error = error_msg
|
|
|
|
# Sauvegarder le log de tâche
|
|
task_log_service.save_task_log(task, error=error_msg)
|
|
|
|
return AdHocCommandResult(
|
|
target=request.target,
|
|
command=request.command,
|
|
success=False,
|
|
return_code=-1,
|
|
stdout="",
|
|
stderr=error_msg,
|
|
duration=round(duration, 2)
|
|
)
|
|
except Exception as e:
|
|
duration = perf_counter() - start_time_perf
|
|
error_msg = f"Erreur interne: {str(e)}"
|
|
# Mettre à jour la tâche en échec
|
|
task.status = "failed"
|
|
task.progress = 100
|
|
task.end_time = datetime.now(timezone.utc)
|
|
task.duration = f"{round(duration, 2)}s"
|
|
task.error = error_msg
|
|
|
|
# Sauvegarder le log de tâche
|
|
task_log_service.save_task_log(task, error=error_msg)
|
|
|
|
# Return a proper result instead of raising HTTP 500
|
|
return AdHocCommandResult(
|
|
target=request.target,
|
|
command=request.command,
|
|
success=False,
|
|
return_code=-1,
|
|
stdout="",
|
|
stderr=error_msg,
|
|
duration=round(duration, 2)
|
|
)
|
|
|
|
|
|
@app.post("/api/ansible/bootstrap", response_model=CommandResult)
|
|
async def bootstrap_ansible_host(
|
|
request: BootstrapRequest,
|
|
api_key_valid: bool = Depends(verify_api_key)
|
|
):
|
|
"""Bootstrap un hôte pour Ansible.
|
|
|
|
Cette opération:
|
|
1. Se connecte à l'hôte via SSH avec le mot de passe root
|
|
2. Crée l'utilisateur d'automatisation (par défaut: automation)
|
|
3. Configure la clé SSH publique pour l'authentification sans mot de passe
|
|
4. Installe et configure sudo pour cet utilisateur
|
|
5. Installe Python3 (requis par Ansible)
|
|
6. Vérifie la connexion SSH par clé
|
|
|
|
Supporte: Debian/Ubuntu, Alpine Linux, FreeBSD
|
|
"""
|
|
import logging
|
|
import traceback
|
|
logger = logging.getLogger("bootstrap_endpoint")
|
|
|
|
try:
|
|
logger.info(f"Bootstrap request for host={request.host}, user={request.automation_user}")
|
|
result = bootstrap_host(
|
|
host=request.host,
|
|
root_password=request.root_password,
|
|
automation_user=request.automation_user
|
|
)
|
|
logger.info(f"Bootstrap result: status={result.status}, return_code={result.return_code}")
|
|
|
|
# Si le bootstrap a échoué (return_code != 0), lever une exception avec les détails
|
|
if result.return_code != 0:
|
|
raise HTTPException(
|
|
status_code=500,
|
|
detail={
|
|
"status": result.status,
|
|
"return_code": result.return_code,
|
|
"stdout": result.stdout,
|
|
"stderr": result.stderr
|
|
}
|
|
)
|
|
|
|
# Trouver le nom de l'hôte (peut être IP ou hostname)
|
|
host_name = request.host
|
|
for h in db.hosts:
|
|
if h.ip == request.host or h.name == request.host:
|
|
host_name = h.name
|
|
break
|
|
|
|
# Enregistrer le statut de bootstrap réussi
|
|
bootstrap_status_service.set_bootstrap_status(
|
|
host_name=host_name,
|
|
success=True,
|
|
details=f"Bootstrap réussi via API (user: {request.automation_user})"
|
|
)
|
|
|
|
# Invalider le cache des hôtes pour recharger avec le nouveau statut
|
|
db._hosts_cache = None
|
|
|
|
# Ajouter un log de succès
|
|
log_entry = LogEntry(
|
|
id=db.get_next_id("logs"),
|
|
timestamp=datetime.now(timezone.utc),
|
|
level="INFO",
|
|
message=f"Bootstrap réussi pour {host_name} (user: {request.automation_user})",
|
|
source="bootstrap",
|
|
host=host_name
|
|
)
|
|
db.logs.insert(0, log_entry)
|
|
|
|
# Notifier via WebSocket
|
|
await ws_manager.broadcast({
|
|
"type": "bootstrap_success",
|
|
"data": {
|
|
"host": host_name,
|
|
"user": request.automation_user,
|
|
"status": "ok",
|
|
"bootstrap_ok": True
|
|
}
|
|
})
|
|
|
|
return result
|
|
|
|
except HTTPException:
|
|
raise
|
|
except Exception as e:
|
|
logger.error(f"Bootstrap exception: {e}")
|
|
logger.error(traceback.format_exc())
|
|
# Ajouter un log d'erreur
|
|
log_entry = LogEntry(
|
|
id=db.get_next_id("logs"),
|
|
timestamp=datetime.now(timezone.utc),
|
|
level="ERROR",
|
|
message=f"Échec bootstrap pour {request.host}: {str(e)}",
|
|
source="bootstrap",
|
|
host=request.host
|
|
)
|
|
db.logs.insert(0, log_entry)
|
|
|
|
raise HTTPException(status_code=500, detail=str(e))
|
|
|
|
|
|
@app.get("/api/health")
|
|
async def global_health_check():
|
|
"""Endpoint de healthcheck global utilisé par Docker.
|
|
|
|
Ne nécessite pas de clé API pour permettre aux orchestrateurs
|
|
de vérifier l'état du service facilement.
|
|
"""
|
|
return {
|
|
"status": "ok",
|
|
"service": "homelab-automation-api",
|
|
"timestamp": datetime.now(timezone.utc).isoformat()
|
|
}
|
|
|
|
|
|
# ===== ENDPOINTS BOOTSTRAP STATUS =====
|
|
|
|
@app.get("/api/bootstrap/status")
|
|
async def get_all_bootstrap_status(api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Récupère le statut de bootstrap de tous les hôtes"""
|
|
return {
|
|
"hosts": bootstrap_status_service.get_all_status()
|
|
}
|
|
|
|
|
|
@app.get("/api/bootstrap/status/{host_name}")
|
|
async def get_host_bootstrap_status(
|
|
host_name: str,
|
|
api_key_valid: bool = Depends(verify_api_key)
|
|
):
|
|
"""Récupère le statut de bootstrap d'un hôte spécifique"""
|
|
status = bootstrap_status_service.get_bootstrap_status(host_name)
|
|
return {
|
|
"host": host_name,
|
|
**status
|
|
}
|
|
|
|
|
|
@app.post("/api/bootstrap/status/{host_name}")
|
|
async def set_host_bootstrap_status(
|
|
host_name: str,
|
|
success: bool = True,
|
|
details: Optional[str] = None,
|
|
api_key_valid: bool = Depends(verify_api_key)
|
|
):
|
|
"""Définit manuellement le statut de bootstrap d'un hôte"""
|
|
result = bootstrap_status_service.set_bootstrap_status(
|
|
host_name=host_name,
|
|
success=success,
|
|
details=details or f"Status défini manuellement"
|
|
)
|
|
|
|
# Invalider le cache des hôtes
|
|
db._hosts_cache = None
|
|
|
|
# Notifier via WebSocket
|
|
await ws_manager.broadcast({
|
|
"type": "bootstrap_status_updated",
|
|
"data": {
|
|
"host": host_name,
|
|
"bootstrap_ok": success
|
|
}
|
|
})
|
|
|
|
return {
|
|
"host": host_name,
|
|
"status": "updated",
|
|
**result
|
|
}
|
|
|
|
|
|
# ===== ENDPOINTS HISTORIQUE AD-HOC =====
|
|
|
|
@app.get("/api/adhoc/history")
|
|
async def get_adhoc_history(
|
|
category: Optional[str] = None,
|
|
search: Optional[str] = None,
|
|
limit: int = 50,
|
|
api_key_valid: bool = Depends(verify_api_key)
|
|
):
|
|
"""Récupère l'historique des commandes ad-hoc"""
|
|
commands = adhoc_history_service.get_commands(
|
|
category=category,
|
|
search=search,
|
|
limit=limit
|
|
)
|
|
return {
|
|
"commands": [cmd.dict() for cmd in commands],
|
|
"count": len(commands)
|
|
}
|
|
|
|
|
|
@app.get("/api/adhoc/categories")
|
|
async def get_adhoc_categories(api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Récupère la liste des catégories de commandes ad-hoc"""
|
|
categories = adhoc_history_service.get_categories()
|
|
return {"categories": [cat.dict() for cat in categories]}
|
|
|
|
|
|
@app.post("/api/adhoc/categories")
|
|
async def create_adhoc_category(
|
|
name: str,
|
|
description: Optional[str] = None,
|
|
color: str = "#7c3aed",
|
|
icon: str = "fa-folder",
|
|
api_key_valid: bool = Depends(verify_api_key)
|
|
):
|
|
"""Crée une nouvelle catégorie de commandes ad-hoc"""
|
|
category = adhoc_history_service.add_category(name, description, color, icon)
|
|
return {"category": category.dict(), "message": "Catégorie créée"}
|
|
|
|
|
|
@app.put("/api/adhoc/categories/{category_name}")
|
|
async def update_adhoc_category(
|
|
category_name: str,
|
|
request: Request,
|
|
api_key_valid: bool = Depends(verify_api_key)
|
|
):
|
|
"""Met à jour une catégorie existante"""
|
|
try:
|
|
data = await request.json()
|
|
new_name = data.get("name", category_name)
|
|
description = data.get("description", "")
|
|
color = data.get("color", "#7c3aed")
|
|
icon = data.get("icon", "fa-folder")
|
|
|
|
success = adhoc_history_service.update_category(category_name, new_name, description, color, icon)
|
|
if not success:
|
|
raise HTTPException(status_code=404, detail="Catégorie non trouvée")
|
|
return {"message": "Catégorie mise à jour", "category": new_name}
|
|
except Exception as e:
|
|
raise HTTPException(status_code=400, detail=str(e))
|
|
|
|
|
|
@app.delete("/api/adhoc/categories/{category_name}")
|
|
async def delete_adhoc_category(
|
|
category_name: str,
|
|
api_key_valid: bool = Depends(verify_api_key)
|
|
):
|
|
"""Supprime une catégorie et déplace ses commandes vers 'default'"""
|
|
if category_name == "default":
|
|
raise HTTPException(status_code=400, detail="La catégorie 'default' ne peut pas être supprimée")
|
|
|
|
success = adhoc_history_service.delete_category(category_name)
|
|
if not success:
|
|
raise HTTPException(status_code=404, detail="Catégorie non trouvée")
|
|
return {"message": "Catégorie supprimée", "category": category_name}
|
|
|
|
|
|
@app.put("/api/adhoc/history/{command_id}/category")
|
|
async def update_adhoc_command_category(
|
|
command_id: str,
|
|
category: str,
|
|
description: Optional[str] = None,
|
|
api_key_valid: bool = Depends(verify_api_key)
|
|
):
|
|
"""Met à jour la catégorie d'une commande dans l'historique"""
|
|
success = adhoc_history_service.update_command_category(command_id, category, description)
|
|
if not success:
|
|
raise HTTPException(status_code=404, detail="Commande non trouvée")
|
|
return {"message": "Catégorie mise à jour", "command_id": command_id, "category": category}
|
|
|
|
|
|
@app.delete("/api/adhoc/history/{command_id}")
|
|
async def delete_adhoc_command(command_id: str, api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Supprime une commande de l'historique"""
|
|
success = adhoc_history_service.delete_command(command_id)
|
|
if not success:
|
|
raise HTTPException(status_code=404, detail="Commande non trouvée")
|
|
return {"message": "Commande supprimée", "command_id": command_id}
|
|
|
|
|
|
@app.get("/api/health/{host_name}", response_model=HealthCheck)
|
|
async def check_host_health(host_name: str, api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Effectue un health check sur un hôte spécifique et met à jour son last_seen"""
|
|
host = next((h for h in db.hosts if h.name == host_name), None)
|
|
if not host:
|
|
raise HTTPException(status_code=404, detail="Hôte non trouvé")
|
|
|
|
# Simuler un health check à partir du statut actuel
|
|
health_check = HealthCheck(
|
|
host=host_name,
|
|
ssh_ok=host.status == "online",
|
|
ansible_ok=host.status == "online",
|
|
sudo_ok=host.status == "online",
|
|
reachable=host.status != "offline",
|
|
response_time=0.123 if host.status == "online" else None,
|
|
error_message=None if host.status != "offline" else "Hôte injoignable"
|
|
)
|
|
|
|
# Mettre à jour le statut runtime + persistant
|
|
new_status = "online" if health_check.reachable else "offline"
|
|
db.update_host_status(host_name, new_status, host.os)
|
|
|
|
# Ajouter un log pour le health check
|
|
log_entry = LogEntry(
|
|
timestamp=datetime.now(timezone.utc),
|
|
level="INFO" if health_check.reachable else "ERROR",
|
|
message=f"Health check {'réussi' if health_check.reachable else 'échoué'} pour {host_name}",
|
|
source="health_check",
|
|
host=host_name
|
|
)
|
|
|
|
db.logs.insert(0, log_entry)
|
|
|
|
# Notifier les clients WebSocket
|
|
await ws_manager.broadcast({
|
|
"type": "health_check",
|
|
"data": health_check.dict()
|
|
})
|
|
|
|
return health_check
|
|
|
|
# WebSocket pour les mises à jour en temps réel
|
|
@app.websocket("/ws")
|
|
async def websocket_endpoint(websocket: WebSocket):
|
|
await ws_manager.connect(websocket)
|
|
try:
|
|
while True:
|
|
# Garder la connexion ouverte
|
|
data = await websocket.receive_text()
|
|
# Traiter les messages entrants si nécessaire
|
|
except WebSocketDisconnect:
|
|
ws_manager.disconnect(websocket)
|
|
|
|
# Fonctions utilitaires
|
|
async def simulate_task_execution(task_id: int):
|
|
"""Simule l'exécution d'une tâche en arrière-plan"""
|
|
task = next((t for t in db.tasks if t.id == task_id), None)
|
|
if not task:
|
|
return
|
|
|
|
# Simuler la progression
|
|
for progress in range(0, 101, 10):
|
|
task.progress = progress
|
|
|
|
# Notifier les clients WebSocket
|
|
await ws_manager.broadcast({
|
|
"type": "task_progress",
|
|
"data": {
|
|
"id": task_id,
|
|
"progress": progress
|
|
}
|
|
})
|
|
|
|
await asyncio.sleep(0.5) # Attendre 500ms entre chaque mise à jour
|
|
|
|
# Marquer la tâche comme terminée
|
|
task.status = "completed"
|
|
task.end_time = datetime.now(timezone.utc)
|
|
task.duration = "2m 30s"
|
|
|
|
# Ajouter un log
|
|
log_entry = LogEntry(
|
|
timestamp=datetime.now(timezone.utc),
|
|
level="INFO",
|
|
message=f"Tâche '{task.name}' terminée avec succès sur {task.host}",
|
|
source="task_manager",
|
|
host=task.host
|
|
)
|
|
db.logs.insert(0, log_entry)
|
|
|
|
# Notifier les clients WebSocket
|
|
await ws_manager.broadcast({
|
|
"type": "task_completed",
|
|
"data": {
|
|
"id": task_id,
|
|
"status": "completed",
|
|
"progress": 100
|
|
}
|
|
})
|
|
|
|
# Sauvegarder le log markdown
|
|
try:
|
|
task_log_service.save_task_log(task=task, output="Tâche simulée terminée avec succès")
|
|
except Exception as log_error:
|
|
print(f"Erreur sauvegarde log markdown: {log_error}")
|
|
|
|
|
|
async def execute_ansible_task(
|
|
task_id: int,
|
|
playbook: str,
|
|
target: str,
|
|
extra_vars: Optional[Dict[str, Any]] = None,
|
|
check_mode: bool = False
|
|
):
|
|
"""Exécute un playbook Ansible pour une tâche"""
|
|
task = next((t for t in db.tasks if t.id == task_id), None)
|
|
if not task:
|
|
return
|
|
|
|
# Notifier le début
|
|
task.progress = 10
|
|
await ws_manager.broadcast({
|
|
"type": "task_progress",
|
|
"data": {"id": task_id, "progress": 10, "message": "Démarrage du playbook Ansible..."}
|
|
})
|
|
|
|
start_time = perf_counter()
|
|
|
|
try:
|
|
# Exécuter le playbook
|
|
result = await ansible_service.execute_playbook(
|
|
playbook=playbook,
|
|
target=target,
|
|
extra_vars=extra_vars,
|
|
check_mode=check_mode,
|
|
verbose=True
|
|
)
|
|
|
|
execution_time = perf_counter() - start_time
|
|
|
|
# Mettre à jour la tâche
|
|
task.progress = 100
|
|
task.status = "completed" if result["success"] else "failed"
|
|
task.end_time = datetime.now(timezone.utc)
|
|
task.duration = f"{execution_time:.1f}s"
|
|
task.output = result.get("stdout", "")
|
|
task.error = result.get("stderr", "") if not result["success"] else None
|
|
|
|
# Si c'est un health-check ciblé, mettre à jour le statut/last_seen de l'hôte
|
|
if "health-check" in playbook and target and target != "all":
|
|
try:
|
|
new_status = "online" if result["success"] else "offline"
|
|
db.update_host_status(target, new_status)
|
|
except Exception:
|
|
# Ne pas interrompre la gestion de la tâche si la MAJ de statut échoue
|
|
pass
|
|
|
|
# Ajouter un log
|
|
log_entry = LogEntry(
|
|
id=db.get_next_id("logs"),
|
|
timestamp=datetime.now(timezone.utc),
|
|
level="INFO" if result["success"] else "ERROR",
|
|
message=f"Tâche '{task.name}' {'terminée avec succès' if result['success'] else 'échouée'} sur {target}",
|
|
source="ansible",
|
|
host=target
|
|
)
|
|
db.logs.insert(0, log_entry)
|
|
|
|
# Notifier les clients WebSocket
|
|
await ws_manager.broadcast({
|
|
"type": "task_completed",
|
|
"data": {
|
|
"id": task_id,
|
|
"status": task.status,
|
|
"progress": 100,
|
|
"duration": task.duration,
|
|
"success": result["success"],
|
|
"output": result.get("stdout", "")[:500] # Limiter la taille
|
|
}
|
|
})
|
|
|
|
# Sauvegarder le log markdown
|
|
try:
|
|
log_path = task_log_service.save_task_log(
|
|
task=task,
|
|
output=result.get("stdout", ""),
|
|
error=result.get("stderr", "")
|
|
)
|
|
log_entry = LogEntry(
|
|
id=db.get_next_id("logs"),
|
|
timestamp=datetime.now(timezone.utc),
|
|
level="DEBUG",
|
|
message=f"Log de tâche sauvegardé: {log_path}",
|
|
source="task_log",
|
|
host=target
|
|
)
|
|
db.logs.insert(0, log_entry)
|
|
except Exception as log_error:
|
|
print(f"Erreur sauvegarde log markdown: {log_error}")
|
|
|
|
except Exception as e:
|
|
task.status = "failed"
|
|
task.end_time = datetime.now(timezone.utc)
|
|
task.error = str(e)
|
|
|
|
log_entry = LogEntry(
|
|
id=db.get_next_id("logs"),
|
|
timestamp=datetime.now(timezone.utc),
|
|
level="ERROR",
|
|
message=f"Erreur lors de l'exécution de '{task.name}': {str(e)}",
|
|
source="ansible",
|
|
host=target
|
|
)
|
|
db.logs.insert(0, log_entry)
|
|
|
|
# Sauvegarder le log markdown même en cas d'échec
|
|
try:
|
|
task_log_service.save_task_log(task=task, error=str(e))
|
|
except Exception:
|
|
pass
|
|
|
|
await ws_manager.broadcast({
|
|
"type": "task_failed",
|
|
"data": {
|
|
"id": task_id,
|
|
"status": "failed",
|
|
"error": str(e)
|
|
}
|
|
})
|
|
|
|
|
|
# ===== ENDPOINTS PLANIFICATEUR (SCHEDULER) =====
|
|
|
|
@app.get("/api/schedules")
|
|
async def get_schedules(
|
|
enabled: Optional[bool] = None,
|
|
playbook: Optional[str] = None,
|
|
tag: Optional[str] = None,
|
|
limit: int = 100,
|
|
offset: int = 0,
|
|
api_key_valid: bool = Depends(verify_api_key),
|
|
):
|
|
"""Liste tous les schedules avec filtrage optionnel (via SchedulerService)."""
|
|
# Utiliser le SchedulerService comme source de vérité pour next_run_at / last_run_at
|
|
schedules = scheduler_service.get_all_schedules(
|
|
enabled=enabled,
|
|
playbook=playbook,
|
|
tag=tag,
|
|
)
|
|
|
|
# Pagination simple côté API (les schedules sont déjà triés par next_run_at)
|
|
paginated = schedules[offset : offset + limit]
|
|
|
|
results = []
|
|
for s in paginated:
|
|
rec = s.recurrence
|
|
results.append(
|
|
{
|
|
"id": s.id,
|
|
"name": s.name,
|
|
"playbook": s.playbook,
|
|
"target": s.target,
|
|
"schedule_type": s.schedule_type,
|
|
"recurrence": rec.model_dump() if rec else None,
|
|
"enabled": s.enabled,
|
|
"tags": s.tags,
|
|
# Champs utilisés par le frontend pour "Prochaine" et historique
|
|
"next_run_at": s.next_run_at,
|
|
"last_run_at": s.last_run_at,
|
|
"last_status": s.last_status,
|
|
"run_count": s.run_count,
|
|
"success_count": s.success_count,
|
|
"failure_count": s.failure_count,
|
|
"created_at": s.created_at,
|
|
"updated_at": s.updated_at,
|
|
}
|
|
)
|
|
|
|
return {"schedules": results, "count": len(schedules)}
|
|
|
|
|
|
@app.post("/api/schedules")
|
|
async def create_schedule(
|
|
request: ScheduleCreateRequest,
|
|
api_key_valid: bool = Depends(verify_api_key),
|
|
db_session: AsyncSession = Depends(get_db),
|
|
):
|
|
"""Crée un nouveau schedule (stocké en DB)"""
|
|
# Vérifier que le playbook existe
|
|
playbooks = ansible_service.get_playbooks()
|
|
playbook_names = [p['filename'] for p in playbooks] + [p['name'] for p in playbooks]
|
|
|
|
playbook_file = request.playbook
|
|
if not playbook_file.endswith(('.yml', '.yaml')):
|
|
playbook_file = f"{playbook_file}.yml"
|
|
|
|
if playbook_file not in playbook_names and request.playbook not in playbook_names:
|
|
raise HTTPException(status_code=400, detail=f"Playbook '{request.playbook}' non trouvé")
|
|
|
|
# Vérifier la cible
|
|
if request.target_type == "group":
|
|
groups = ansible_service.get_groups()
|
|
if request.target not in groups and request.target != "all":
|
|
raise HTTPException(status_code=400, detail=f"Groupe '{request.target}' non trouvé")
|
|
else:
|
|
if not ansible_service.host_exists(request.target):
|
|
raise HTTPException(status_code=400, detail=f"Hôte '{request.target}' non trouvé")
|
|
|
|
# Valider la récurrence
|
|
if request.schedule_type == "recurring" and not request.recurrence:
|
|
raise HTTPException(status_code=400, detail="La récurrence est requise pour un schedule récurrent")
|
|
|
|
if request.recurrence and request.recurrence.type == "custom":
|
|
if not request.recurrence.cron_expression:
|
|
raise HTTPException(status_code=400, detail="Expression cron requise pour le type 'custom'")
|
|
validation = scheduler_service.validate_cron_expression(request.recurrence.cron_expression)
|
|
if not validation["valid"]:
|
|
raise HTTPException(status_code=400, detail=f"Expression cron invalide: {validation.get('error')}")
|
|
|
|
# Créer en DB
|
|
repo = ScheduleRepository(db_session)
|
|
schedule_id = uuid.uuid4().hex
|
|
|
|
recurrence = request.recurrence
|
|
schedule_obj = await repo.create(
|
|
id=schedule_id,
|
|
name=request.name,
|
|
playbook=playbook_file,
|
|
target=request.target,
|
|
schedule_type=request.schedule_type,
|
|
schedule_time=request.start_at,
|
|
recurrence_type=recurrence.type if recurrence else None,
|
|
recurrence_time=recurrence.time if recurrence else None,
|
|
recurrence_days=json.dumps(recurrence.days) if recurrence and recurrence.days else None,
|
|
cron_expression=recurrence.cron_expression if recurrence else None,
|
|
enabled=request.enabled,
|
|
tags=json.dumps(request.tags) if request.tags else None,
|
|
)
|
|
await db_session.commit()
|
|
|
|
# Aussi créer dans le scheduler_service pour APScheduler
|
|
scheduler_service.create_schedule(request)
|
|
|
|
# Log en DB
|
|
log_repo = LogRepository(db_session)
|
|
await log_repo.create(
|
|
level="INFO",
|
|
message=f"Schedule '{request.name}' créé pour {playbook_file} sur {request.target}",
|
|
source="scheduler",
|
|
)
|
|
await db_session.commit()
|
|
|
|
# Notifier via WebSocket
|
|
await ws_manager.broadcast({
|
|
"type": "schedule_created",
|
|
"data": {
|
|
"id": schedule_obj.id,
|
|
"name": schedule_obj.name,
|
|
"playbook": schedule_obj.playbook,
|
|
"target": schedule_obj.target,
|
|
}
|
|
})
|
|
|
|
return {
|
|
"success": True,
|
|
"message": f"Schedule '{request.name}' créé avec succès",
|
|
"schedule": {
|
|
"id": schedule_obj.id,
|
|
"name": schedule_obj.name,
|
|
"playbook": schedule_obj.playbook,
|
|
"target": schedule_obj.target,
|
|
"enabled": schedule_obj.enabled,
|
|
}
|
|
}
|
|
|
|
|
|
@app.get("/api/schedules/stats")
|
|
async def get_schedules_stats(api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Récupère les statistiques globales des schedules"""
|
|
stats = scheduler_service.get_stats()
|
|
upcoming = scheduler_service.get_upcoming_executions(limit=5)
|
|
|
|
return {
|
|
"stats": stats.dict(),
|
|
"upcoming": upcoming
|
|
}
|
|
|
|
|
|
@app.get("/api/schedules/upcoming")
|
|
async def get_upcoming_schedules(
|
|
limit: int = 10,
|
|
api_key_valid: bool = Depends(verify_api_key)
|
|
):
|
|
"""Récupère les prochaines exécutions planifiées"""
|
|
upcoming = scheduler_service.get_upcoming_executions(limit=limit)
|
|
return {
|
|
"upcoming": upcoming,
|
|
"count": len(upcoming)
|
|
}
|
|
|
|
|
|
@app.get("/api/schedules/validate-cron")
|
|
async def validate_cron_expression(
|
|
expression: str,
|
|
api_key_valid: bool = Depends(verify_api_key)
|
|
):
|
|
"""Valide une expression cron et retourne les 5 prochaines exécutions"""
|
|
result = scheduler_service.validate_cron_expression(expression)
|
|
return result
|
|
|
|
|
|
@app.get("/api/schedules/{schedule_id}")
|
|
async def get_schedule(
|
|
schedule_id: str,
|
|
api_key_valid: bool = Depends(verify_api_key),
|
|
db_session: AsyncSession = Depends(get_db),
|
|
):
|
|
"""Récupère les détails d'un schedule spécifique (depuis DB)"""
|
|
repo = ScheduleRepository(db_session)
|
|
schedule = await repo.get(schedule_id)
|
|
if not schedule:
|
|
raise HTTPException(status_code=404, detail=f"Schedule '{schedule_id}' non trouvé")
|
|
|
|
return {
|
|
"id": schedule.id,
|
|
"name": schedule.name,
|
|
"playbook": schedule.playbook,
|
|
"target": schedule.target,
|
|
"schedule_type": schedule.schedule_type,
|
|
"recurrence_type": schedule.recurrence_type,
|
|
"recurrence_time": schedule.recurrence_time,
|
|
"recurrence_days": json.loads(schedule.recurrence_days) if schedule.recurrence_days else None,
|
|
"cron_expression": schedule.cron_expression,
|
|
"enabled": schedule.enabled,
|
|
"tags": json.loads(schedule.tags) if schedule.tags else [],
|
|
"next_run": schedule.next_run,
|
|
"last_run": schedule.last_run,
|
|
"created_at": schedule.created_at,
|
|
"updated_at": schedule.updated_at,
|
|
}
|
|
|
|
|
|
@app.put("/api/schedules/{schedule_id}")
|
|
async def update_schedule(
|
|
schedule_id: str,
|
|
request: ScheduleUpdateRequest,
|
|
api_key_valid: bool = Depends(verify_api_key),
|
|
db_session: AsyncSession = Depends(get_db),
|
|
):
|
|
"""Met à jour un schedule existant (DB + scheduler_service)"""
|
|
# Essayer d'abord via SchedulerService (source de vérité)
|
|
sched = scheduler_service.get_schedule(schedule_id)
|
|
repo = ScheduleRepository(db_session)
|
|
schedule = await repo.get(schedule_id)
|
|
|
|
if not sched and not schedule:
|
|
raise HTTPException(status_code=404, detail=f"Schedule '{schedule_id}' non trouvé")
|
|
|
|
schedule_name = sched.name if sched else schedule.name
|
|
|
|
# Valider le playbook si modifié
|
|
if request.playbook:
|
|
playbooks = ansible_service.get_playbooks()
|
|
playbook_names = [p['filename'] for p in playbooks] + [p['name'] for p in playbooks]
|
|
playbook_file = request.playbook
|
|
if not playbook_file.endswith(('.yml', '.yaml')):
|
|
playbook_file = f"{playbook_file}.yml"
|
|
if playbook_file not in playbook_names and request.playbook not in playbook_names:
|
|
raise HTTPException(status_code=400, detail=f"Playbook '{request.playbook}' non trouvé")
|
|
|
|
# Valider l'expression cron si modifiée
|
|
if request.recurrence and request.recurrence.type == "custom":
|
|
if request.recurrence.cron_expression:
|
|
validation = scheduler_service.validate_cron_expression(request.recurrence.cron_expression)
|
|
if not validation["valid"]:
|
|
raise HTTPException(status_code=400, detail=f"Expression cron invalide: {validation.get('error')}")
|
|
|
|
# Mettre à jour en DB
|
|
update_fields = {}
|
|
if request.name:
|
|
update_fields["name"] = request.name
|
|
if request.playbook:
|
|
update_fields["playbook"] = request.playbook
|
|
if request.target:
|
|
update_fields["target"] = request.target
|
|
if request.enabled is not None:
|
|
update_fields["enabled"] = request.enabled
|
|
if request.tags:
|
|
update_fields["tags"] = json.dumps(request.tags)
|
|
if request.recurrence:
|
|
update_fields["recurrence_type"] = request.recurrence.type
|
|
update_fields["recurrence_time"] = request.recurrence.time
|
|
update_fields["recurrence_days"] = json.dumps(request.recurrence.days) if request.recurrence.days else None
|
|
update_fields["cron_expression"] = request.recurrence.cron_expression
|
|
|
|
# Mettre à jour en DB si présent
|
|
if schedule:
|
|
await repo.update(schedule, **update_fields)
|
|
await db_session.commit()
|
|
|
|
# Aussi mettre à jour dans scheduler_service pour APScheduler
|
|
scheduler_service.update_schedule(schedule_id, request)
|
|
|
|
# Log en DB
|
|
log_repo = LogRepository(db_session)
|
|
await log_repo.create(
|
|
level="INFO",
|
|
message=f"Schedule '{schedule_name}' mis à jour",
|
|
source="scheduler",
|
|
)
|
|
await db_session.commit()
|
|
|
|
# Notifier via WebSocket
|
|
await ws_manager.broadcast({
|
|
"type": "schedule_updated",
|
|
"data": {"id": schedule_id, "name": schedule_name}
|
|
})
|
|
|
|
return {
|
|
"success": True,
|
|
"message": f"Schedule '{schedule_name}' mis à jour",
|
|
"schedule": {"id": schedule_id, "name": schedule_name}
|
|
}
|
|
|
|
|
|
@app.delete("/api/schedules/{schedule_id}")
|
|
async def delete_schedule(
|
|
schedule_id: str,
|
|
api_key_valid: bool = Depends(verify_api_key),
|
|
db_session: AsyncSession = Depends(get_db),
|
|
):
|
|
"""Supprime un schedule (soft delete en DB + suppression scheduler_service)"""
|
|
repo = ScheduleRepository(db_session)
|
|
schedule = await repo.get(schedule_id)
|
|
if not schedule:
|
|
# Aucun enregistrement en DB, mais on tente tout de même de le supprimer
|
|
# du SchedulerService (cas des anciens IDs internes du scheduler).
|
|
try:
|
|
scheduler_service.delete_schedule(schedule_id)
|
|
except Exception:
|
|
pass
|
|
return {
|
|
"success": True,
|
|
"message": f"Schedule '{schedule_id}' déjà supprimé ou inexistant en base, nettoyage scheduler effectué."
|
|
}
|
|
|
|
schedule_name = schedule.name
|
|
|
|
# Soft delete en DB
|
|
await repo.soft_delete(schedule_id)
|
|
await db_session.commit()
|
|
|
|
# Supprimer du scheduler_service
|
|
scheduler_service.delete_schedule(schedule_id)
|
|
|
|
# Log en DB
|
|
log_repo = LogRepository(db_session)
|
|
await log_repo.create(
|
|
level="WARN",
|
|
message=f"Schedule '{schedule_name}' supprimé",
|
|
source="scheduler",
|
|
)
|
|
await db_session.commit()
|
|
|
|
# Notifier via WebSocket
|
|
await ws_manager.broadcast({
|
|
"type": "schedule_deleted",
|
|
"data": {"id": schedule_id, "name": schedule_name}
|
|
})
|
|
|
|
return {
|
|
"success": True,
|
|
"message": f"Schedule '{schedule_name}' supprimé"
|
|
}
|
|
|
|
|
|
@app.post("/api/schedules/{schedule_id}/run")
|
|
async def run_schedule_now(
|
|
schedule_id: str,
|
|
api_key_valid: bool = Depends(verify_api_key),
|
|
db_session: AsyncSession = Depends(get_db),
|
|
):
|
|
"""Exécute immédiatement un schedule (exécution forcée)"""
|
|
# Essayer d'abord via SchedulerService (source de vérité)
|
|
sched = scheduler_service.get_schedule(schedule_id)
|
|
if not sched:
|
|
# Fallback sur la DB
|
|
repo = ScheduleRepository(db_session)
|
|
schedule = await repo.get(schedule_id)
|
|
if not schedule:
|
|
raise HTTPException(status_code=404, detail=f"Schedule '{schedule_id}' non trouvé")
|
|
schedule_name = schedule.name
|
|
else:
|
|
schedule_name = sched.name
|
|
|
|
# Lancer l'exécution via scheduler_service
|
|
run = await scheduler_service.run_now(schedule_id)
|
|
|
|
return {
|
|
"success": True,
|
|
"message": f"Schedule '{schedule_name}' lancé",
|
|
"run": run.dict() if run else None
|
|
}
|
|
|
|
|
|
@app.post("/api/schedules/{schedule_id}/pause")
|
|
async def pause_schedule(
|
|
schedule_id: str,
|
|
api_key_valid: bool = Depends(verify_api_key),
|
|
db_session: AsyncSession = Depends(get_db),
|
|
):
|
|
"""Met en pause un schedule"""
|
|
# Essayer d'abord via SchedulerService (source de vérité)
|
|
sched = scheduler_service.get_schedule(schedule_id)
|
|
repo = ScheduleRepository(db_session)
|
|
schedule = await repo.get(schedule_id)
|
|
|
|
if not sched and not schedule:
|
|
raise HTTPException(status_code=404, detail=f"Schedule '{schedule_id}' non trouvé")
|
|
|
|
schedule_name = sched.name if sched else schedule.name
|
|
|
|
# Mettre à jour en DB si présent
|
|
if schedule:
|
|
await repo.update(schedule, enabled=False)
|
|
await db_session.commit()
|
|
|
|
# Mettre à jour dans scheduler_service
|
|
scheduler_service.pause_schedule(schedule_id)
|
|
|
|
# Log en DB
|
|
log_repo = LogRepository(db_session)
|
|
await log_repo.create(
|
|
level="INFO",
|
|
message=f"Schedule '{schedule_name}' mis en pause",
|
|
source="scheduler",
|
|
)
|
|
await db_session.commit()
|
|
|
|
# Notifier via WebSocket
|
|
await ws_manager.broadcast({
|
|
"type": "schedule_updated",
|
|
"data": {"id": schedule_id, "name": schedule_name, "enabled": False}
|
|
})
|
|
|
|
return {
|
|
"success": True,
|
|
"message": f"Schedule '{schedule_name}' mis en pause",
|
|
"schedule": {"id": schedule_id, "name": schedule_name, "enabled": False}
|
|
}
|
|
|
|
|
|
@app.post("/api/schedules/{schedule_id}/resume")
|
|
async def resume_schedule(
|
|
schedule_id: str,
|
|
api_key_valid: bool = Depends(verify_api_key),
|
|
db_session: AsyncSession = Depends(get_db),
|
|
):
|
|
"""Reprend un schedule en pause"""
|
|
# Essayer d'abord via SchedulerService (source de vérité)
|
|
sched = scheduler_service.get_schedule(schedule_id)
|
|
repo = ScheduleRepository(db_session)
|
|
schedule = await repo.get(schedule_id)
|
|
|
|
if not sched and not schedule:
|
|
raise HTTPException(status_code=404, detail=f"Schedule '{schedule_id}' non trouvé")
|
|
|
|
schedule_name = sched.name if sched else schedule.name
|
|
|
|
# Mettre à jour en DB si présent
|
|
if schedule:
|
|
await repo.update(schedule, enabled=True)
|
|
await db_session.commit()
|
|
|
|
# Mettre à jour dans scheduler_service
|
|
scheduler_service.resume_schedule(schedule_id)
|
|
|
|
# Log en DB
|
|
log_repo = LogRepository(db_session)
|
|
await log_repo.create(
|
|
level="INFO",
|
|
message=f"Schedule '{schedule_name}' repris",
|
|
source="scheduler",
|
|
)
|
|
await db_session.commit()
|
|
|
|
# Notifier via WebSocket
|
|
await ws_manager.broadcast({
|
|
"type": "schedule_updated",
|
|
"data": {"id": schedule_id, "name": schedule_name, "enabled": True}
|
|
})
|
|
|
|
return {
|
|
"success": True,
|
|
"message": f"Schedule '{schedule_name}' repris",
|
|
"schedule": {"id": schedule_id, "name": schedule_name, "enabled": True}
|
|
}
|
|
|
|
|
|
@app.get("/api/schedules/{schedule_id}/runs")
|
|
async def get_schedule_runs(
|
|
schedule_id: str,
|
|
limit: int = 50,
|
|
offset: int = 0,
|
|
api_key_valid: bool = Depends(verify_api_key),
|
|
db_session: AsyncSession = Depends(get_db),
|
|
):
|
|
"""Récupère l'historique des exécutions d'un schedule (depuis DB ou SchedulerService)"""
|
|
# Essayer d'abord via SchedulerService (source de vérité)
|
|
sched = scheduler_service.get_schedule(schedule_id)
|
|
repo = ScheduleRepository(db_session)
|
|
schedule = await repo.get(schedule_id)
|
|
|
|
if not sched and not schedule:
|
|
raise HTTPException(status_code=404, detail=f"Schedule '{schedule_id}' non trouvé")
|
|
|
|
schedule_name = sched.name if sched else schedule.name
|
|
|
|
# Récupérer les runs depuis le SchedulerService (JSON) si pas en DB
|
|
runs_from_service = scheduler_service.get_runs_for_schedule(schedule_id, limit=limit)
|
|
|
|
return {
|
|
"schedule_id": schedule_id,
|
|
"schedule_name": schedule_name,
|
|
"runs": [
|
|
{
|
|
"id": r.get("id"),
|
|
"status": r.get("status"),
|
|
"started_at": r.get("started_at"),
|
|
"finished_at": r.get("finished_at"),
|
|
"duration_seconds": r.get("duration_seconds"),
|
|
"error_message": r.get("error_message"),
|
|
}
|
|
for r in runs_from_service
|
|
],
|
|
"count": len(runs_from_service)
|
|
}
|
|
|
|
|
|
# ===== ÉVÉNEMENTS STARTUP/SHUTDOWN =====
|
|
|
|
@app.on_event("startup")
|
|
async def startup_event():
|
|
"""Événement de démarrage de l'application"""
|
|
print("🚀 Homelab Automation Dashboard démarré")
|
|
|
|
# Initialiser la base de données (créer les tables si nécessaire)
|
|
await init_db()
|
|
print("📦 Base de données SQLite initialisée")
|
|
|
|
# Démarrer le scheduler
|
|
scheduler_service.start()
|
|
|
|
# Log de démarrage en base
|
|
from models.database import async_session_maker
|
|
async with async_session_maker() as session:
|
|
repo = LogRepository(session)
|
|
await repo.create(
|
|
level="INFO",
|
|
message="Application démarrée - Scheduler initialisé",
|
|
source="system",
|
|
)
|
|
await session.commit()
|
|
|
|
# Nettoyer les anciennes exécutions (>90 jours)
|
|
cleaned = scheduler_service.cleanup_old_runs(days=90)
|
|
if cleaned > 0:
|
|
print(f"🧹 {cleaned} anciennes exécutions nettoyées")
|
|
|
|
|
|
@app.on_event("shutdown")
|
|
async def shutdown_event():
|
|
"""Événement d'arrêt de l'application"""
|
|
print("👋 Arrêt de l'application...")
|
|
|
|
# Arrêter le scheduler
|
|
scheduler_service.shutdown()
|
|
|
|
print("✅ Scheduler arrêté proprement")
|
|
|
|
|
|
# Démarrer l'application
|
|
if __name__ == "__main__":
|
|
uvicorn.run(
|
|
"app_optimized:app",
|
|
host="0.0.0.0",
|
|
port=8008,
|
|
reload=True,
|
|
log_level="info"
|
|
) |