3843 lines
140 KiB
Python
3843 lines
140 KiB
Python
"""
|
|
Homelab Automation Dashboard - Backend Optimisé
|
|
API REST moderne avec FastAPI pour la gestion d'homelab
|
|
"""
|
|
|
|
from datetime import datetime, timezone
|
|
from pathlib import Path
|
|
from time import perf_counter, time
|
|
import os
|
|
import re
|
|
import shutil
|
|
import subprocess
|
|
import sqlite3
|
|
import yaml
|
|
from abc import ABC, abstractmethod
|
|
from typing import Literal, Any, List, Dict, Optional
|
|
from threading import Lock
|
|
import asyncio
|
|
import json
|
|
|
|
from fastapi import FastAPI, HTTPException, Depends, Request, Form, WebSocket, WebSocketDisconnect
|
|
from fastapi.responses import HTMLResponse, JSONResponse, FileResponse
|
|
from fastapi.security import APIKeyHeader
|
|
from fastapi.templating import Jinja2Templates
|
|
from fastapi.middleware.cors import CORSMiddleware
|
|
from fastapi.staticfiles import StaticFiles
|
|
from pydantic import BaseModel, Field, field_validator
|
|
import uvicorn
|
|
|
|
BASE_DIR = Path(__file__).resolve().parent
|
|
|
|
# Configuration avancée de l'application
|
|
app = FastAPI(
|
|
title="Homelab Automation Dashboard API",
|
|
version="1.0.0",
|
|
description="API REST moderne pour la gestion automatique d'homelab",
|
|
docs_url="/api/docs",
|
|
redoc_url="/api/redoc"
|
|
)
|
|
|
|
# Middleware CORS pour le développement
|
|
app.add_middleware(
|
|
CORSMiddleware,
|
|
allow_origins=["*"], # À restreindre en production
|
|
allow_credentials=True,
|
|
allow_methods=["*"],
|
|
allow_headers=["*"],
|
|
)
|
|
|
|
app.mount("/static", StaticFiles(directory=BASE_DIR, html=False), name="static")
|
|
|
|
# Configuration des chemins et variables d'environnement
|
|
LOGS_DIR = Path(os.environ.get("LOGS_DIR", "/logs"))
|
|
ANSIBLE_DIR = BASE_DIR.parent / "ansible"
|
|
SSH_KEY_PATH = os.environ.get("SSH_KEY_PATH", str(Path.home() / ".ssh" / "id_rsa"))
|
|
SSH_USER = os.environ.get("SSH_USER", "automation")
|
|
SSH_REMOTE_USER = os.environ.get("SSH_REMOTE_USER", "root")
|
|
DB_PATH = LOGS_DIR / "homelab.db"
|
|
API_KEY = os.environ.get("API_KEY", "dev-key-12345")
|
|
# Répertoire pour les logs de tâches en markdown (format YYYY/MM/JJ)
|
|
DIR_LOGS_TASKS = Path(os.environ.get("DIR_LOGS_TASKS", str(BASE_DIR.parent / "tasks_logs")))
|
|
# Fichier JSON pour l'historique des commandes ad-hoc
|
|
ADHOC_HISTORY_FILE = DIR_LOGS_TASKS / ".adhoc_history.json"
|
|
# Fichier JSON pour les statuts persistés
|
|
BOOTSTRAP_STATUS_FILE = DIR_LOGS_TASKS / ".bootstrap_status.json"
|
|
HOST_STATUS_FILE = ANSIBLE_DIR / ".host_status.json"
|
|
|
|
# Mapping des actions vers les playbooks
|
|
ACTION_PLAYBOOK_MAP = {
|
|
'upgrade': 'vm-upgrade.yml',
|
|
'reboot': 'vm-reboot.yml',
|
|
'health-check': 'health-check.yml',
|
|
'backup': 'backup-config.yml',
|
|
'bootstrap': 'bootstrap-host.yml',
|
|
}
|
|
|
|
# Gestionnaire de clés API
|
|
api_key_header = APIKeyHeader(name="X-API-Key", auto_error=False)
|
|
|
|
# Modèles Pydantic améliorés
|
|
class CommandResult(BaseModel):
|
|
status: str
|
|
return_code: int
|
|
stdout: str
|
|
stderr: Optional[str] = None
|
|
execution_time: Optional[float] = None
|
|
timestamp: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
|
|
|
class Host(BaseModel):
|
|
id: int
|
|
name: str
|
|
ip: str
|
|
status: Literal["online", "offline", "warning"]
|
|
os: str
|
|
last_seen: Optional[datetime] = None
|
|
created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
|
groups: List[str] = [] # Groupes Ansible auxquels appartient l'hôte
|
|
bootstrap_ok: bool = False # Indique si le bootstrap a été effectué avec succès
|
|
bootstrap_date: Optional[datetime] = None # Date du dernier bootstrap réussi
|
|
|
|
class Config:
|
|
json_encoders = {
|
|
datetime: lambda v: v.isoformat()
|
|
}
|
|
|
|
class Task(BaseModel):
|
|
id: int
|
|
name: str
|
|
host: str
|
|
status: Literal["pending", "running", "completed", "failed", "cancelled"]
|
|
progress: int = Field(ge=0, le=100, default=0)
|
|
start_time: Optional[datetime] = None
|
|
end_time: Optional[datetime] = None
|
|
duration: Optional[str] = None
|
|
output: Optional[str] = None
|
|
error: Optional[str] = None
|
|
|
|
class Config:
|
|
json_encoders = {
|
|
datetime: lambda v: v.isoformat() if v else None
|
|
}
|
|
|
|
class LogEntry(BaseModel):
|
|
id: int
|
|
timestamp: datetime
|
|
level: Literal["DEBUG", "INFO", "WARN", "ERROR"]
|
|
message: str
|
|
source: Optional[str] = None
|
|
host: Optional[str] = None
|
|
|
|
class Config:
|
|
json_encoders = {
|
|
datetime: lambda v: v.isoformat()
|
|
}
|
|
|
|
class SystemMetrics(BaseModel):
|
|
online_hosts: int
|
|
total_tasks: int
|
|
success_rate: float
|
|
uptime: float
|
|
cpu_usage: float
|
|
memory_usage: float
|
|
disk_usage: float
|
|
|
|
class HealthCheck(BaseModel):
|
|
host: str
|
|
ssh_ok: bool = False
|
|
ansible_ok: bool = False
|
|
sudo_ok: bool = False
|
|
reachable: bool = False
|
|
error_message: Optional[str] = None
|
|
response_time: Optional[float] = None
|
|
cached: bool = False
|
|
cache_age: int = 0
|
|
|
|
class AnsibleExecutionRequest(BaseModel):
|
|
playbook: str = Field(..., description="Nom du playbook à exécuter")
|
|
target: str = Field(default="all", description="Hôte ou groupe cible")
|
|
extra_vars: Optional[Dict[str, Any]] = Field(default=None, description="Variables supplémentaires")
|
|
check_mode: bool = Field(default=False, description="Mode dry-run (--check)")
|
|
verbose: bool = Field(default=False, description="Mode verbeux")
|
|
|
|
class AnsibleInventoryHost(BaseModel):
|
|
name: str
|
|
ansible_host: str
|
|
group: str
|
|
groups: List[str] = [] # All groups this host belongs to
|
|
vars: Dict[str, Any] = {}
|
|
|
|
class TaskRequest(BaseModel):
|
|
host: Optional[str] = Field(default=None, description="Hôte cible")
|
|
group: Optional[str] = Field(default=None, description="Groupe cible")
|
|
action: str = Field(..., description="Action à exécuter")
|
|
cmd: Optional[str] = Field(default=None, description="Commande personnalisée")
|
|
extra_vars: Optional[Dict[str, Any]] = Field(default=None, description="Variables Ansible")
|
|
tags: Optional[List[str]] = Field(default=None, description="Tags Ansible")
|
|
dry_run: bool = Field(default=False, description="Mode simulation")
|
|
ssh_user: Optional[str] = Field(default=None, description="Utilisateur SSH")
|
|
ssh_password: Optional[str] = Field(default=None, description="Mot de passe SSH")
|
|
|
|
@field_validator('action')
|
|
@classmethod
|
|
def validate_action(cls, v: str) -> str:
|
|
valid_actions = ['upgrade', 'reboot', 'health-check', 'backup', 'deploy', 'rollback', 'maintenance', 'bootstrap']
|
|
if v not in valid_actions:
|
|
raise ValueError(f'Action doit être l\'une de: {", ".join(valid_actions)}')
|
|
return v
|
|
|
|
class HostRequest(BaseModel):
|
|
name: str = Field(..., min_length=3, max_length=100, description="Hostname (ex: server.domain.home)")
|
|
# ansible_host peut être soit une IPv4, soit un hostname résolvable → on enlève la contrainte de pattern
|
|
ip: Optional[str] = Field(default=None, description="Adresse IP ou hostname (optionnel si hostname résolvable)")
|
|
os: str = Field(default="Linux", min_length=3, max_length=50)
|
|
ssh_user: Optional[str] = Field(default="root", min_length=1, max_length=50)
|
|
ssh_port: int = Field(default=22, ge=1, le=65535)
|
|
description: Optional[str] = Field(default=None, max_length=200)
|
|
env_group: str = Field(..., description="Groupe d'environnement (ex: env_homelab, env_prod)")
|
|
role_groups: List[str] = Field(default=[], description="Groupes de rôles (ex: role_proxmox, role_sbc)")
|
|
|
|
|
|
class HostUpdateRequest(BaseModel):
|
|
"""Requête de mise à jour d'un hôte"""
|
|
env_group: Optional[str] = Field(default=None, description="Nouveau groupe d'environnement")
|
|
role_groups: Optional[List[str]] = Field(default=None, description="Nouveaux groupes de rôles")
|
|
ansible_host: Optional[str] = Field(default=None, description="Nouvelle adresse ansible_host")
|
|
|
|
|
|
class GroupRequest(BaseModel):
|
|
"""Requête pour créer un groupe"""
|
|
name: str = Field(..., min_length=3, max_length=50, description="Nom du groupe (ex: env_prod, role_web)")
|
|
type: str = Field(..., description="Type de groupe: 'env' ou 'role'")
|
|
|
|
@field_validator('name')
|
|
@classmethod
|
|
def validate_name(cls, v: str) -> str:
|
|
import re
|
|
if not re.match(r'^[a-zA-Z0-9_-]+$', v):
|
|
raise ValueError('Le nom du groupe ne peut contenir que des lettres, chiffres, tirets et underscores')
|
|
return v
|
|
|
|
@field_validator('type')
|
|
@classmethod
|
|
def validate_type(cls, v: str) -> str:
|
|
if v not in ['env', 'role']:
|
|
raise ValueError("Le type doit être 'env' ou 'role'")
|
|
return v
|
|
|
|
|
|
class GroupUpdateRequest(BaseModel):
|
|
"""Requête pour modifier un groupe"""
|
|
new_name: str = Field(..., min_length=3, max_length=50, description="Nouveau nom du groupe")
|
|
|
|
@field_validator('new_name')
|
|
@classmethod
|
|
def validate_new_name(cls, v: str) -> str:
|
|
import re
|
|
if not re.match(r'^[a-zA-Z0-9_-]+$', v):
|
|
raise ValueError('Le nom du groupe ne peut contenir que des lettres, chiffres, tirets et underscores')
|
|
return v
|
|
|
|
|
|
class GroupDeleteRequest(BaseModel):
|
|
"""Requête pour supprimer un groupe"""
|
|
move_hosts_to: Optional[str] = Field(default=None, description="Groupe vers lequel déplacer les hôtes")
|
|
|
|
|
|
class AdHocCommandRequest(BaseModel):
|
|
"""Requête pour exécuter une commande ad-hoc Ansible"""
|
|
target: str = Field(..., description="Hôte ou groupe cible")
|
|
command: str = Field(..., description="Commande shell à exécuter")
|
|
module: str = Field(default="shell", description="Module Ansible (shell, command, raw)")
|
|
become: bool = Field(default=False, description="Exécuter avec sudo")
|
|
timeout: int = Field(default=60, ge=5, le=600, description="Timeout en secondes")
|
|
|
|
|
|
class AdHocCommandResult(BaseModel):
|
|
"""Résultat d'une commande ad-hoc"""
|
|
target: str
|
|
command: str
|
|
success: bool
|
|
return_code: int
|
|
stdout: str
|
|
stderr: Optional[str] = None
|
|
duration: float
|
|
hosts_results: Optional[Dict[str, Any]] = None
|
|
|
|
|
|
class AdHocHistoryEntry(BaseModel):
|
|
"""Entrée dans l'historique des commandes ad-hoc"""
|
|
id: str
|
|
command: str
|
|
target: str
|
|
module: str
|
|
become: bool
|
|
category: str = "default"
|
|
description: Optional[str] = None
|
|
created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
|
last_used: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
|
use_count: int = 1
|
|
|
|
|
|
class AdHocHistoryCategory(BaseModel):
|
|
"""Catégorie pour organiser les commandes ad-hoc"""
|
|
name: str
|
|
description: Optional[str] = None
|
|
color: str = "#7c3aed"
|
|
icon: str = "fa-folder"
|
|
|
|
|
|
class TaskLogFile(BaseModel):
|
|
"""Représentation d'un fichier de log de tâche"""
|
|
id: str
|
|
filename: str
|
|
path: str
|
|
task_name: str
|
|
target: str
|
|
status: str
|
|
date: str # Format YYYY-MM-DD
|
|
year: str
|
|
month: str
|
|
day: str
|
|
created_at: datetime
|
|
size_bytes: int
|
|
# Nouveaux champs pour affichage enrichi
|
|
start_time: Optional[str] = None # Format ISO ou HH:MM:SS
|
|
end_time: Optional[str] = None # Format ISO ou HH:MM:SS
|
|
duration: Optional[str] = None # Durée formatée
|
|
duration_seconds: Optional[int] = None # Durée en secondes
|
|
hosts: List[str] = [] # Liste des hôtes impliqués
|
|
category: Optional[str] = None # Catégorie (Playbook, Ad-hoc, etc.)
|
|
subcategory: Optional[str] = None # Sous-catégorie
|
|
target_type: Optional[str] = None # Type de cible: 'host', 'group', 'role'
|
|
|
|
|
|
class TasksFilterParams(BaseModel):
|
|
"""Paramètres de filtrage des tâches"""
|
|
status: Optional[str] = None # pending, running, completed, failed, all
|
|
year: Optional[str] = None
|
|
month: Optional[str] = None
|
|
day: Optional[str] = None
|
|
target: Optional[str] = None
|
|
search: Optional[str] = None
|
|
|
|
|
|
# ===== SERVICE DE LOGGING MARKDOWN =====
|
|
|
|
class TaskLogService:
|
|
"""Service pour gérer les logs de tâches en fichiers markdown"""
|
|
|
|
def __init__(self, base_dir: Path):
|
|
self.base_dir = base_dir
|
|
self._ensure_base_dir()
|
|
|
|
def _ensure_base_dir(self):
|
|
"""Crée le répertoire de base s'il n'existe pas"""
|
|
self.base_dir.mkdir(parents=True, exist_ok=True)
|
|
|
|
def _get_date_path(self, dt: datetime = None) -> Path:
|
|
"""Retourne le chemin du répertoire pour une date donnée (YYYY/MM/JJ)"""
|
|
if dt is None:
|
|
dt = datetime.now(timezone.utc)
|
|
year = dt.strftime("%Y")
|
|
month = dt.strftime("%m")
|
|
day = dt.strftime("%d")
|
|
return self.base_dir / year / month / day
|
|
|
|
def _generate_task_id(self) -> str:
|
|
"""Génère un ID unique pour une tâche"""
|
|
import uuid
|
|
return f"task_{datetime.now(timezone.utc).strftime('%H%M%S')}_{uuid.uuid4().hex[:6]}"
|
|
|
|
def save_task_log(self, task: 'Task', output: str = "", error: str = "") -> str:
|
|
"""Sauvegarde un log de tâche en markdown et retourne le chemin"""
|
|
dt = task.start_time or datetime.now(timezone.utc)
|
|
date_path = self._get_date_path(dt)
|
|
date_path.mkdir(parents=True, exist_ok=True)
|
|
|
|
# Générer le nom du fichier
|
|
task_id = self._generate_task_id()
|
|
status_emoji = {
|
|
"completed": "✅",
|
|
"failed": "❌",
|
|
"running": "🔄",
|
|
"pending": "⏳",
|
|
"cancelled": "🚫"
|
|
}.get(task.status, "❓")
|
|
|
|
# Sanitize task name and host for filename
|
|
safe_name = task.name.replace(' ', '_').replace(':', '').replace('/', '-')[:50]
|
|
safe_host = task.host.replace(' ', '_').replace(':', '').replace('/', '-')[:30] if task.host else 'unknown'
|
|
filename = f"{task_id}_{safe_host}_{safe_name}_{task.status}.md"
|
|
filepath = date_path / filename
|
|
|
|
# Créer le contenu markdown
|
|
md_content = f"""# {status_emoji} {task.name}
|
|
|
|
## Informations
|
|
|
|
| Propriété | Valeur |
|
|
|-----------|--------|
|
|
| **ID** | `{task.id}` |
|
|
| **Nom** | {task.name} |
|
|
| **Cible** | `{task.host}` |
|
|
| **Statut** | {task.status} |
|
|
| **Progression** | {task.progress}% |
|
|
| **Début** | {task.start_time.isoformat() if task.start_time else 'N/A'} |
|
|
| **Fin** | {task.end_time.isoformat() if task.end_time else 'N/A'} |
|
|
| **Durée** | {task.duration or 'N/A'} |
|
|
|
|
## Sortie
|
|
|
|
```
|
|
{output or task.output or '(Aucune sortie)'}
|
|
```
|
|
|
|
"""
|
|
if error or task.error:
|
|
md_content += f"""## Erreurs
|
|
|
|
```
|
|
{error or task.error}
|
|
```
|
|
|
|
"""
|
|
|
|
md_content += f"""---
|
|
*Généré automatiquement par Homelab Automation Dashboard*
|
|
*Date: {datetime.now(timezone.utc).isoformat()}*
|
|
"""
|
|
|
|
# Écrire le fichier
|
|
filepath.write_text(md_content, encoding='utf-8')
|
|
|
|
return str(filepath)
|
|
|
|
def _parse_markdown_metadata(self, content: str) -> Dict[str, Any]:
|
|
"""Parse le contenu markdown pour extraire les métadonnées enrichies"""
|
|
metadata = {
|
|
'start_time': None,
|
|
'end_time': None,
|
|
'duration': None,
|
|
'duration_seconds': None,
|
|
'hosts': [],
|
|
'category': None,
|
|
'subcategory': None,
|
|
'target_type': None
|
|
}
|
|
|
|
# Extraire les heures de début et fin
|
|
start_match = re.search(r'\|\s*\*\*Début\*\*\s*\|\s*([^|]+)', content)
|
|
if start_match:
|
|
start_val = start_match.group(1).strip()
|
|
if start_val and start_val != 'N/A':
|
|
metadata['start_time'] = start_val
|
|
|
|
end_match = re.search(r'\|\s*\*\*Fin\*\*\s*\|\s*([^|]+)', content)
|
|
if end_match:
|
|
end_val = end_match.group(1).strip()
|
|
if end_val and end_val != 'N/A':
|
|
metadata['end_time'] = end_val
|
|
|
|
duration_match = re.search(r'\|\s*\*\*Durée\*\*\s*\|\s*([^|]+)', content)
|
|
if duration_match:
|
|
dur_val = duration_match.group(1).strip()
|
|
if dur_val and dur_val != 'N/A':
|
|
metadata['duration'] = dur_val
|
|
# Convertir en secondes si possible
|
|
metadata['duration_seconds'] = self._parse_duration_to_seconds(dur_val)
|
|
|
|
# Extraire les hôtes depuis la sortie Ansible
|
|
# Pattern pour les hôtes dans PLAY RECAP ou les résultats de tâches
|
|
host_patterns = [
|
|
r'^([a-zA-Z0-9][a-zA-Z0-9._-]+)\s*:\s*ok=', # PLAY RECAP format
|
|
r'^\s*([a-zA-Z0-9][a-zA-Z0-9._-]+)\s*\|\s*(SUCCESS|CHANGED|FAILED|UNREACHABLE)', # Ad-hoc format
|
|
]
|
|
hosts_found = set()
|
|
for pattern in host_patterns:
|
|
for match in re.finditer(pattern, content, re.MULTILINE):
|
|
host = match.group(1).strip()
|
|
if host and len(host) > 2 and '.' in host or len(host) > 5:
|
|
hosts_found.add(host)
|
|
metadata['hosts'] = sorted(list(hosts_found))
|
|
|
|
# Détecter la catégorie
|
|
task_name_match = re.search(r'^#\s*[✅❌🔄⏳🚫❓]?\s*(.+)$', content, re.MULTILINE)
|
|
if task_name_match:
|
|
task_name = task_name_match.group(1).strip().lower()
|
|
if 'playbook' in task_name:
|
|
metadata['category'] = 'Playbook'
|
|
# Extraire sous-catégorie du nom
|
|
if 'health' in task_name:
|
|
metadata['subcategory'] = 'Health Check'
|
|
elif 'backup' in task_name:
|
|
metadata['subcategory'] = 'Backup'
|
|
elif 'upgrade' in task_name or 'update' in task_name:
|
|
metadata['subcategory'] = 'Upgrade'
|
|
elif 'bootstrap' in task_name:
|
|
metadata['subcategory'] = 'Bootstrap'
|
|
elif 'reboot' in task_name:
|
|
metadata['subcategory'] = 'Reboot'
|
|
elif 'ad-hoc' in task_name or 'adhoc' in task_name:
|
|
metadata['category'] = 'Ad-hoc'
|
|
else:
|
|
metadata['category'] = 'Autre'
|
|
|
|
# Détecter le type de cible
|
|
target_match = re.search(r'\|\s*\*\*Cible\*\*\s*\|\s*`([^`]+)`', content)
|
|
if target_match:
|
|
target_val = target_match.group(1).strip()
|
|
if target_val == 'all':
|
|
metadata['target_type'] = 'group'
|
|
elif target_val.startswith('env_') or target_val.startswith('role_'):
|
|
metadata['target_type'] = 'group'
|
|
elif '.' in target_val:
|
|
metadata['target_type'] = 'host'
|
|
else:
|
|
metadata['target_type'] = 'group'
|
|
|
|
return metadata
|
|
|
|
def _parse_duration_to_seconds(self, duration_str: str) -> Optional[int]:
|
|
"""Convertit une chaîne de durée en secondes"""
|
|
if not duration_str:
|
|
return None
|
|
|
|
total_seconds = 0
|
|
# Pattern: Xh Xm Xs ou X:XX:XX ou Xs
|
|
|
|
# Format HH:MM:SS
|
|
hms_match = re.match(r'(\d+):(\d+):(\d+)', duration_str)
|
|
if hms_match:
|
|
h, m, s = map(int, hms_match.groups())
|
|
return h * 3600 + m * 60 + s
|
|
|
|
# Format avec h, m, s
|
|
hours = re.search(r'(\d+)\s*h', duration_str)
|
|
minutes = re.search(r'(\d+)\s*m', duration_str)
|
|
seconds = re.search(r'(\d+)\s*s', duration_str)
|
|
|
|
if hours:
|
|
total_seconds += int(hours.group(1)) * 3600
|
|
if minutes:
|
|
total_seconds += int(minutes.group(1)) * 60
|
|
if seconds:
|
|
total_seconds += int(seconds.group(1))
|
|
|
|
return total_seconds if total_seconds > 0 else None
|
|
|
|
def get_task_logs(self,
|
|
year: str = None,
|
|
month: str = None,
|
|
day: str = None,
|
|
status: str = None,
|
|
target: str = None,
|
|
category: str = None) -> List[TaskLogFile]:
|
|
"""Récupère la liste des logs de tâches avec filtrage"""
|
|
logs = []
|
|
|
|
# Déterminer le chemin de recherche
|
|
if year and month and day:
|
|
search_paths = [self.base_dir / year / month / day]
|
|
elif year and month:
|
|
month_path = self.base_dir / year / month
|
|
search_paths = list(month_path.glob("*")) if month_path.exists() else []
|
|
elif year:
|
|
year_path = self.base_dir / year
|
|
search_paths = []
|
|
if year_path.exists():
|
|
for m in year_path.iterdir():
|
|
if m.is_dir():
|
|
search_paths.extend(m.glob("*"))
|
|
else:
|
|
search_paths = []
|
|
if self.base_dir.exists():
|
|
for y in self.base_dir.iterdir():
|
|
if y.is_dir() and y.name.isdigit():
|
|
for m in y.iterdir():
|
|
if m.is_dir():
|
|
search_paths.extend(m.glob("*"))
|
|
|
|
# Parcourir les répertoires
|
|
for path in search_paths:
|
|
if not path.is_dir():
|
|
continue
|
|
|
|
for md_file in path.glob("*.md"):
|
|
try:
|
|
# Extraire les infos du nom de fichier
|
|
# Format: task_HHMMSS_XXXXXX_TARGET_TASKNAME_STATUS.md
|
|
parts = md_file.stem.split("_")
|
|
if len(parts) >= 4:
|
|
file_status = parts[-1]
|
|
# Format nouveau: task_HHMMSS_XXXXXX_target_taskname_status
|
|
# parts[0] = task, parts[1] = HHMMSS, parts[2] = XXXXXX (id)
|
|
# parts[3] = target, parts[4:-1] = task_name, parts[-1] = status
|
|
if len(parts) >= 5:
|
|
file_target = parts[3]
|
|
task_name_from_file = "_".join(parts[4:-1]) if len(parts) > 5 else parts[4] if len(parts) > 4 else "unknown"
|
|
else:
|
|
file_target = ""
|
|
task_name_from_file = "_".join(parts[3:-1]) if len(parts) > 4 else parts[3] if len(parts) > 3 else "unknown"
|
|
|
|
# Filtrer par statut si spécifié
|
|
if status and status != "all" and file_status != status:
|
|
continue
|
|
|
|
# Extraire la date du chemin
|
|
rel_path = md_file.relative_to(self.base_dir)
|
|
path_parts = rel_path.parts
|
|
if len(path_parts) >= 3:
|
|
log_year, log_month, log_day = path_parts[0], path_parts[1], path_parts[2]
|
|
else:
|
|
continue
|
|
|
|
stat = md_file.stat()
|
|
|
|
# Lire le contenu pour extraire les métadonnées enrichies
|
|
try:
|
|
content = md_file.read_text(encoding='utf-8')
|
|
metadata = self._parse_markdown_metadata(content)
|
|
# Extraire le nom de tâche et la cible depuis le contenu markdown
|
|
task_name_match = re.search(r'^#\s*[✅❌🔄⏳🚫❓]?\s*(.+)$', content, re.MULTILINE)
|
|
if task_name_match:
|
|
task_name = task_name_match.group(1).strip()
|
|
else:
|
|
task_name = task_name_from_file.replace("_", " ")
|
|
|
|
# Extraire la cible depuis le contenu
|
|
target_match = re.search(r'\|\s*\*\*Cible\*\*\s*\|\s*`([^`]+)`', content)
|
|
if target_match:
|
|
file_target = target_match.group(1).strip()
|
|
except Exception:
|
|
metadata = {}
|
|
task_name = task_name_from_file.replace("_", " ")
|
|
|
|
# Filtrer par target si spécifié
|
|
if target and target != "all" and file_target:
|
|
if target.lower() not in file_target.lower():
|
|
continue
|
|
|
|
# Filtrer par catégorie si spécifié
|
|
if category and category != "all":
|
|
file_category = metadata.get('category', '')
|
|
if file_category and category.lower() not in file_category.lower():
|
|
continue
|
|
|
|
logs.append(TaskLogFile(
|
|
id=parts[0] + "_" + parts[1] + "_" + parts[2] if len(parts) > 2 else parts[0],
|
|
filename=md_file.name,
|
|
path=str(md_file),
|
|
task_name=task_name,
|
|
target=file_target,
|
|
status=file_status,
|
|
date=f"{log_year}-{log_month}-{log_day}",
|
|
year=log_year,
|
|
month=log_month,
|
|
day=log_day,
|
|
created_at=datetime.fromtimestamp(stat.st_ctime, tz=timezone.utc),
|
|
size_bytes=stat.st_size,
|
|
start_time=metadata.get('start_time'),
|
|
end_time=metadata.get('end_time'),
|
|
duration=metadata.get('duration'),
|
|
duration_seconds=metadata.get('duration_seconds'),
|
|
hosts=metadata.get('hosts', []),
|
|
category=metadata.get('category'),
|
|
subcategory=metadata.get('subcategory'),
|
|
target_type=metadata.get('target_type')
|
|
))
|
|
except Exception:
|
|
continue
|
|
|
|
# Trier par date décroissante
|
|
logs.sort(key=lambda x: x.created_at, reverse=True)
|
|
return logs
|
|
|
|
def get_available_dates(self) -> Dict[str, Any]:
|
|
"""Retourne la structure des dates disponibles pour le filtrage"""
|
|
dates = {"years": {}}
|
|
|
|
if not self.base_dir.exists():
|
|
return dates
|
|
|
|
for year_dir in sorted(self.base_dir.iterdir(), reverse=True):
|
|
if year_dir.is_dir() and year_dir.name.isdigit():
|
|
year = year_dir.name
|
|
dates["years"][year] = {"months": {}}
|
|
|
|
for month_dir in sorted(year_dir.iterdir(), reverse=True):
|
|
if month_dir.is_dir() and month_dir.name.isdigit():
|
|
month = month_dir.name
|
|
dates["years"][year]["months"][month] = {"days": []}
|
|
|
|
for day_dir in sorted(month_dir.iterdir(), reverse=True):
|
|
if day_dir.is_dir() and day_dir.name.isdigit():
|
|
day = day_dir.name
|
|
count = len(list(day_dir.glob("*.md")))
|
|
dates["years"][year]["months"][month]["days"].append({
|
|
"day": day,
|
|
"count": count
|
|
})
|
|
|
|
return dates
|
|
|
|
def get_stats(self) -> Dict[str, int]:
|
|
"""Retourne les statistiques des tâches"""
|
|
stats = {"total": 0, "completed": 0, "failed": 0, "running": 0, "pending": 0}
|
|
|
|
for log in self.get_task_logs():
|
|
stats["total"] += 1
|
|
if log.status in stats:
|
|
stats[log.status] += 1
|
|
|
|
return stats
|
|
|
|
|
|
# ===== SERVICE HISTORIQUE COMMANDES AD-HOC =====
|
|
|
|
class AdHocHistoryService:
|
|
"""Service pour gérer l'historique des commandes ad-hoc avec catégories"""
|
|
|
|
def __init__(self, history_file: Path):
|
|
self.history_file = history_file
|
|
self._ensure_file()
|
|
|
|
def _ensure_file(self):
|
|
"""Crée le fichier d'historique s'il n'existe pas"""
|
|
self.history_file.parent.mkdir(parents=True, exist_ok=True)
|
|
if not self.history_file.exists():
|
|
self._save_data({"commands": [], "categories": [
|
|
{"name": "default", "description": "Commandes générales", "color": "#7c3aed", "icon": "fa-terminal"},
|
|
{"name": "diagnostic", "description": "Commandes de diagnostic", "color": "#10b981", "icon": "fa-stethoscope"},
|
|
{"name": "maintenance", "description": "Commandes de maintenance", "color": "#f59e0b", "icon": "fa-wrench"},
|
|
{"name": "deployment", "description": "Commandes de déploiement", "color": "#3b82f6", "icon": "fa-rocket"},
|
|
]})
|
|
|
|
def _load_data(self) -> Dict:
|
|
"""Charge les données depuis le fichier"""
|
|
try:
|
|
with open(self.history_file, 'r', encoding='utf-8') as f:
|
|
return json.load(f)
|
|
except:
|
|
return {"commands": [], "categories": []}
|
|
|
|
def _save_data(self, data: Dict):
|
|
"""Sauvegarde les données dans le fichier"""
|
|
with open(self.history_file, 'w', encoding='utf-8') as f:
|
|
json.dump(data, f, indent=2, default=str, ensure_ascii=False)
|
|
|
|
def add_command(self, command: str, target: str, module: str, become: bool,
|
|
category: str = "default", description: str = None) -> AdHocHistoryEntry:
|
|
"""Ajoute ou met à jour une commande dans l'historique"""
|
|
data = self._load_data()
|
|
|
|
# Chercher si la commande existe déjà
|
|
existing = None
|
|
for cmd in data["commands"]:
|
|
if cmd["command"] == command and cmd["target"] == target:
|
|
existing = cmd
|
|
break
|
|
|
|
if existing:
|
|
existing["last_used"] = datetime.now(timezone.utc).isoformat()
|
|
existing["use_count"] = existing.get("use_count", 1) + 1
|
|
if category != "default":
|
|
existing["category"] = category
|
|
if description:
|
|
existing["description"] = description
|
|
entry = AdHocHistoryEntry(**existing)
|
|
else:
|
|
import uuid
|
|
entry = AdHocHistoryEntry(
|
|
id=f"adhoc_{uuid.uuid4().hex[:8]}",
|
|
command=command,
|
|
target=target,
|
|
module=module,
|
|
become=become,
|
|
category=category,
|
|
description=description
|
|
)
|
|
data["commands"].append(entry.dict())
|
|
|
|
self._save_data(data)
|
|
return entry
|
|
|
|
def get_commands(self, category: str = None, search: str = None, limit: int = 50) -> List[AdHocHistoryEntry]:
|
|
"""Récupère les commandes de l'historique"""
|
|
data = self._load_data()
|
|
commands = []
|
|
|
|
for cmd in data.get("commands", []):
|
|
if category and cmd.get("category") != category:
|
|
continue
|
|
if search and search.lower() not in cmd.get("command", "").lower():
|
|
continue
|
|
|
|
try:
|
|
# Convertir les dates string en datetime si nécessaire
|
|
if isinstance(cmd.get("created_at"), str):
|
|
cmd["created_at"] = datetime.fromisoformat(cmd["created_at"].replace("Z", "+00:00"))
|
|
if isinstance(cmd.get("last_used"), str):
|
|
cmd["last_used"] = datetime.fromisoformat(cmd["last_used"].replace("Z", "+00:00"))
|
|
commands.append(AdHocHistoryEntry(**cmd))
|
|
except Exception:
|
|
continue
|
|
|
|
# Trier par dernière utilisation
|
|
commands.sort(key=lambda x: x.last_used, reverse=True)
|
|
return commands[:limit]
|
|
|
|
def get_categories(self) -> List[AdHocHistoryCategory]:
|
|
"""Récupère la liste des catégories"""
|
|
data = self._load_data()
|
|
return [AdHocHistoryCategory(**cat) for cat in data.get("categories", [])]
|
|
|
|
def add_category(self, name: str, description: str = None, color: str = "#7c3aed", icon: str = "fa-folder") -> AdHocHistoryCategory:
|
|
"""Ajoute une nouvelle catégorie"""
|
|
data = self._load_data()
|
|
|
|
# Vérifier si la catégorie existe déjà
|
|
for cat in data["categories"]:
|
|
if cat["name"] == name:
|
|
return AdHocHistoryCategory(**cat)
|
|
|
|
new_cat = AdHocHistoryCategory(name=name, description=description, color=color, icon=icon)
|
|
data["categories"].append(new_cat.dict())
|
|
self._save_data(data)
|
|
return new_cat
|
|
|
|
def delete_command(self, command_id: str) -> bool:
|
|
"""Supprime une commande de l'historique"""
|
|
data = self._load_data()
|
|
original_len = len(data["commands"])
|
|
data["commands"] = [c for c in data["commands"] if c.get("id") != command_id]
|
|
|
|
if len(data["commands"]) < original_len:
|
|
self._save_data(data)
|
|
return True
|
|
return False
|
|
|
|
def update_command_category(self, command_id: str, category: str, description: str = None) -> bool:
|
|
"""Met à jour la catégorie d'une commande"""
|
|
data = self._load_data()
|
|
|
|
for cmd in data["commands"]:
|
|
if cmd.get("id") == command_id:
|
|
cmd["category"] = category
|
|
if description:
|
|
cmd["description"] = description
|
|
self._save_data(data)
|
|
return True
|
|
return False
|
|
|
|
def update_category(self, category_name: str, new_name: str, description: str, color: str, icon: str) -> bool:
|
|
"""Met à jour une catégorie existante"""
|
|
data = self._load_data()
|
|
|
|
for cat in data["categories"]:
|
|
if cat["name"] == category_name:
|
|
# Mettre à jour les commandes si le nom change
|
|
if new_name != category_name:
|
|
for cmd in data["commands"]:
|
|
if cmd.get("category") == category_name:
|
|
cmd["category"] = new_name
|
|
|
|
cat["name"] = new_name
|
|
cat["description"] = description
|
|
cat["color"] = color
|
|
cat["icon"] = icon
|
|
self._save_data(data)
|
|
return True
|
|
return False
|
|
|
|
def delete_category(self, category_name: str) -> bool:
|
|
"""Supprime une catégorie et déplace ses commandes vers 'default'"""
|
|
if category_name == "default":
|
|
return False
|
|
|
|
data = self._load_data()
|
|
|
|
# Vérifier si la catégorie existe
|
|
cat_exists = any(cat["name"] == category_name for cat in data["categories"])
|
|
if not cat_exists:
|
|
return False
|
|
|
|
# Déplacer les commandes vers 'default'
|
|
for cmd in data["commands"]:
|
|
if cmd.get("category") == category_name:
|
|
cmd["category"] = "default"
|
|
|
|
# Supprimer la catégorie
|
|
data["categories"] = [cat for cat in data["categories"] if cat["name"] != category_name]
|
|
|
|
self._save_data(data)
|
|
return True
|
|
|
|
|
|
# ===== SERVICE BOOTSTRAP STATUS =====
|
|
|
|
class BootstrapStatusService:
|
|
"""Service pour gérer le statut de bootstrap des hôtes"""
|
|
|
|
def __init__(self, status_file: Path):
|
|
self.status_file = status_file
|
|
self._ensure_file()
|
|
|
|
def _ensure_file(self):
|
|
"""Crée le fichier de statut s'il n'existe pas"""
|
|
self.status_file.parent.mkdir(parents=True, exist_ok=True)
|
|
if not self.status_file.exists():
|
|
self._save_data({"hosts": {}})
|
|
|
|
def _load_data(self) -> Dict:
|
|
"""Charge les données depuis le fichier"""
|
|
try:
|
|
with open(self.status_file, 'r', encoding='utf-8') as f:
|
|
return json.load(f)
|
|
except:
|
|
return {"hosts": {}}
|
|
|
|
def _save_data(self, data: Dict):
|
|
"""Sauvegarde les données dans le fichier"""
|
|
with open(self.status_file, 'w', encoding='utf-8') as f:
|
|
json.dump(data, f, indent=2, default=str, ensure_ascii=False)
|
|
|
|
def set_bootstrap_status(self, host_name: str, success: bool, details: str = None) -> Dict:
|
|
"""Enregistre le statut de bootstrap d'un hôte"""
|
|
data = self._load_data()
|
|
|
|
data["hosts"][host_name] = {
|
|
"bootstrap_ok": success,
|
|
"bootstrap_date": datetime.now(timezone.utc).isoformat(),
|
|
"details": details
|
|
}
|
|
|
|
self._save_data(data)
|
|
return data["hosts"][host_name]
|
|
|
|
def get_bootstrap_status(self, host_name: str) -> Dict:
|
|
"""Récupère le statut de bootstrap d'un hôte"""
|
|
data = self._load_data()
|
|
return data.get("hosts", {}).get(host_name, {
|
|
"bootstrap_ok": False,
|
|
"bootstrap_date": None,
|
|
"details": None
|
|
})
|
|
|
|
def get_all_status(self) -> Dict[str, Dict]:
|
|
"""Récupère le statut de tous les hôtes"""
|
|
data = self._load_data()
|
|
return data.get("hosts", {})
|
|
|
|
def remove_host(self, host_name: str) -> bool:
|
|
"""Supprime le statut d'un hôte"""
|
|
data = self._load_data()
|
|
if host_name in data.get("hosts", {}):
|
|
del data["hosts"][host_name]
|
|
self._save_data(data)
|
|
return True
|
|
return False
|
|
|
|
|
|
# ===== SERVICE HOST STATUS =====
|
|
|
|
class HostStatusService:
|
|
def __init__(self, status_file: Path):
|
|
self.status_file = status_file
|
|
self._ensure_file()
|
|
|
|
def _ensure_file(self):
|
|
self.status_file.parent.mkdir(parents=True, exist_ok=True)
|
|
if not self.status_file.exists():
|
|
self._save_data({"hosts": {}})
|
|
|
|
def _load_data(self) -> Dict:
|
|
try:
|
|
with open(self.status_file, 'r', encoding='utf-8') as f:
|
|
return json.load(f)
|
|
except:
|
|
return {"hosts": {}}
|
|
|
|
def _save_data(self, data: Dict):
|
|
with open(self.status_file, 'w', encoding='utf-8') as f:
|
|
json.dump(data, f, indent=2, default=str, ensure_ascii=False)
|
|
|
|
def set_status(self, host_name: str, status: str, last_seen: Optional[datetime], os_info: Optional[str]) -> Dict:
|
|
data = self._load_data()
|
|
data.setdefault("hosts", {})
|
|
data["hosts"][host_name] = {
|
|
"status": status,
|
|
"last_seen": last_seen.isoformat() if isinstance(last_seen, datetime) else last_seen,
|
|
"os": os_info,
|
|
}
|
|
self._save_data(data)
|
|
return data["hosts"][host_name]
|
|
|
|
def get_status(self, host_name: str) -> Dict:
|
|
data = self._load_data()
|
|
hosts = data.get("hosts", {})
|
|
return hosts.get(host_name, {"status": "online", "last_seen": None, "os": None})
|
|
|
|
def get_all_status(self) -> Dict[str, Dict]:
|
|
data = self._load_data()
|
|
return data.get("hosts", {})
|
|
|
|
def remove_host(self, host_name: str) -> bool:
|
|
data = self._load_data()
|
|
hosts = data.get("hosts", {})
|
|
if host_name in hosts:
|
|
del hosts[host_name]
|
|
data["hosts"] = hosts
|
|
self._save_data(data)
|
|
return True
|
|
return False
|
|
|
|
|
|
# Instances globales des services
|
|
task_log_service = TaskLogService(DIR_LOGS_TASKS)
|
|
adhoc_history_service = AdHocHistoryService(ADHOC_HISTORY_FILE)
|
|
bootstrap_status_service = BootstrapStatusService(BOOTSTRAP_STATUS_FILE)
|
|
host_status_service = HostStatusService(HOST_STATUS_FILE)
|
|
|
|
|
|
class WebSocketManager:
|
|
def __init__(self):
|
|
self.active_connections: List[WebSocket] = []
|
|
self.lock = Lock()
|
|
|
|
async def connect(self, websocket: WebSocket):
|
|
await websocket.accept()
|
|
with self.lock:
|
|
self.active_connections.append(websocket)
|
|
|
|
def disconnect(self, websocket: WebSocket):
|
|
with self.lock:
|
|
if websocket in self.active_connections:
|
|
self.active_connections.remove(websocket)
|
|
|
|
async def broadcast(self, message: dict):
|
|
with self.lock:
|
|
disconnected = []
|
|
for connection in self.active_connections:
|
|
try:
|
|
await connection.send_json(message)
|
|
except:
|
|
disconnected.append(connection)
|
|
|
|
# Nettoyer les connexions déconnectées
|
|
for conn in disconnected:
|
|
self.active_connections.remove(conn)
|
|
|
|
# Instance globale du gestionnaire WebSocket
|
|
ws_manager = WebSocketManager()
|
|
|
|
|
|
# Service Ansible
|
|
class AnsibleService:
|
|
"""Service pour exécuter les playbooks Ansible"""
|
|
|
|
def __init__(self, ansible_dir: Path):
|
|
self.ansible_dir = ansible_dir
|
|
self.playbooks_dir = ansible_dir / "playbooks"
|
|
self.inventory_path = ansible_dir / "inventory" / "hosts.yml"
|
|
self._inventory_cache: Optional[Dict] = None
|
|
|
|
def get_playbooks(self) -> List[Dict[str, Any]]:
|
|
"""Liste les playbooks disponibles avec leurs métadonnées (category/subcategory).
|
|
|
|
Les métadonnées sont lues en priorité dans play['vars'] pour être compatibles
|
|
avec la syntaxe Ansible (category/subcategory ne sont pas des clés de Play).
|
|
"""
|
|
playbooks = []
|
|
if self.playbooks_dir.exists():
|
|
for pb in self.playbooks_dir.glob("*.yml"):
|
|
# Récupérer les infos du fichier
|
|
stat = pb.stat()
|
|
playbook_info = {
|
|
"name": pb.stem,
|
|
"filename": pb.name,
|
|
"path": str(pb),
|
|
"category": "general",
|
|
"subcategory": "other",
|
|
"size": stat.st_size,
|
|
"modified": datetime.fromtimestamp(stat.st_mtime, tz=timezone.utc).isoformat()
|
|
}
|
|
# Extract category/subcategory from playbook
|
|
try:
|
|
with open(pb, 'r', encoding='utf-8') as f:
|
|
content = yaml.safe_load(f)
|
|
if content and isinstance(content, list) and len(content) > 0:
|
|
play = content[0]
|
|
vars_ = play.get('vars', {}) or {}
|
|
|
|
# Lecture de category avec fallback: play puis vars
|
|
if 'category' in play:
|
|
playbook_info['category'] = play['category']
|
|
elif 'category' in vars_:
|
|
playbook_info['category'] = vars_['category']
|
|
|
|
# Lecture de subcategory avec fallback
|
|
if 'subcategory' in play:
|
|
playbook_info['subcategory'] = play['subcategory']
|
|
elif 'subcategory' in vars_:
|
|
playbook_info['subcategory'] = vars_['subcategory']
|
|
|
|
if 'name' in play:
|
|
playbook_info['description'] = play['name']
|
|
except Exception:
|
|
# On ignore les erreurs de parsing individuelles pour ne pas
|
|
# casser l'ensemble de la liste de playbooks.
|
|
pass
|
|
playbooks.append(playbook_info)
|
|
return playbooks
|
|
|
|
def get_playbook_categories(self) -> Dict[str, List[str]]:
|
|
"""Retourne les catégories et sous-catégories des playbooks"""
|
|
categories = {}
|
|
for pb in self.get_playbooks():
|
|
cat = pb.get('category', 'general')
|
|
subcat = pb.get('subcategory', 'other')
|
|
if cat not in categories:
|
|
categories[cat] = []
|
|
if subcat not in categories[cat]:
|
|
categories[cat].append(subcat)
|
|
return categories
|
|
|
|
def load_inventory(self) -> Dict:
|
|
"""Charge l'inventaire Ansible depuis le fichier YAML"""
|
|
if self._inventory_cache:
|
|
return self._inventory_cache
|
|
|
|
if not self.inventory_path.exists():
|
|
return {}
|
|
|
|
with open(self.inventory_path, 'r') as f:
|
|
self._inventory_cache = yaml.safe_load(f)
|
|
return self._inventory_cache
|
|
|
|
def get_hosts_from_inventory(self, group_filter: str = None) -> List[AnsibleInventoryHost]:
|
|
"""Extrait la liste des hôtes de l'inventaire sans doublons.
|
|
|
|
Args:
|
|
group_filter: Si spécifié, filtre les hôtes par ce groupe
|
|
"""
|
|
inventory = self.load_inventory()
|
|
# Use dict to track unique hosts and accumulate their groups
|
|
hosts_dict: Dict[str, AnsibleInventoryHost] = {}
|
|
|
|
def extract_hosts(data: Dict, current_group: str = ""):
|
|
if not isinstance(data, dict):
|
|
return
|
|
|
|
# Extraire les hôtes directs
|
|
if 'hosts' in data:
|
|
for host_name, host_data in data['hosts'].items():
|
|
host_data = host_data or {}
|
|
|
|
if host_name in hosts_dict:
|
|
# Host already exists, add group to its groups list
|
|
if current_group and current_group not in hosts_dict[host_name].groups:
|
|
hosts_dict[host_name].groups.append(current_group)
|
|
else:
|
|
# New host
|
|
hosts_dict[host_name] = AnsibleInventoryHost(
|
|
name=host_name,
|
|
ansible_host=host_data.get('ansible_host', host_name),
|
|
group=current_group,
|
|
groups=[current_group] if current_group else [],
|
|
vars=host_data
|
|
)
|
|
|
|
# Parcourir les enfants (sous-groupes)
|
|
if 'children' in data:
|
|
for child_name, child_data in data['children'].items():
|
|
extract_hosts(child_data, child_name)
|
|
|
|
extract_hosts(inventory.get('all', {}))
|
|
|
|
# Convert to list
|
|
hosts = list(hosts_dict.values())
|
|
|
|
# Apply group filter if specified
|
|
if group_filter and group_filter != 'all':
|
|
hosts = [h for h in hosts if group_filter in h.groups]
|
|
|
|
return hosts
|
|
|
|
def invalidate_cache(self):
|
|
"""Invalide le cache de l'inventaire pour forcer un rechargement"""
|
|
self._inventory_cache = None
|
|
|
|
def get_groups(self) -> List[str]:
|
|
"""Extrait la liste des groupes de l'inventaire"""
|
|
inventory = self.load_inventory()
|
|
groups = set()
|
|
|
|
def extract_groups(data: Dict, parent: str = ""):
|
|
if not isinstance(data, dict):
|
|
return
|
|
if 'children' in data:
|
|
for child_name in data['children'].keys():
|
|
groups.add(child_name)
|
|
extract_groups(data['children'][child_name], child_name)
|
|
|
|
extract_groups(inventory.get('all', {}))
|
|
return sorted(list(groups))
|
|
|
|
def get_env_groups(self) -> List[str]:
|
|
"""Retourne uniquement les groupes d'environnement (préfixés par env_)"""
|
|
return [g for g in self.get_groups() if g.startswith('env_')]
|
|
|
|
def get_role_groups(self) -> List[str]:
|
|
"""Retourne uniquement les groupes de rôles (préfixés par role_)"""
|
|
return [g for g in self.get_groups() if g.startswith('role_')]
|
|
|
|
def _save_inventory(self, inventory: Dict):
|
|
"""Sauvegarde l'inventaire dans le fichier YAML"""
|
|
# Créer une sauvegarde avant modification
|
|
backup_path = self.inventory_path.with_suffix('.yml.bak')
|
|
if self.inventory_path.exists():
|
|
import shutil
|
|
shutil.copy2(self.inventory_path, backup_path)
|
|
|
|
with open(self.inventory_path, 'w', encoding='utf-8') as f:
|
|
yaml.dump(inventory, f, default_flow_style=False, allow_unicode=True, sort_keys=False)
|
|
|
|
# Invalider le cache
|
|
self.invalidate_cache()
|
|
|
|
def add_host_to_inventory(self, hostname: str, env_group: str, role_groups: List[str], ansible_host: str = None) -> bool:
|
|
"""Ajoute un hôte à l'inventaire Ansible
|
|
|
|
Args:
|
|
hostname: Nom de l'hôte (ex: server.domain.home)
|
|
env_group: Groupe d'environnement (ex: env_homelab)
|
|
role_groups: Liste des groupes de rôles (ex: ['role_proxmox', 'role_sbc'])
|
|
ansible_host: Adresse IP ou hostname pour ansible_host (optionnel)
|
|
|
|
Returns:
|
|
True si l'ajout a réussi
|
|
"""
|
|
inventory = self.load_inventory()
|
|
|
|
# S'assurer que la structure existe
|
|
if 'all' not in inventory:
|
|
inventory['all'] = {}
|
|
if 'children' not in inventory['all']:
|
|
inventory['all']['children'] = {}
|
|
|
|
children = inventory['all']['children']
|
|
|
|
# Ajouter au groupe d'environnement
|
|
if env_group not in children:
|
|
children[env_group] = {'hosts': {}}
|
|
if 'hosts' not in children[env_group]:
|
|
children[env_group]['hosts'] = {}
|
|
|
|
# Définir les variables de l'hôte
|
|
host_vars = None
|
|
if ansible_host and ansible_host != hostname:
|
|
host_vars = {'ansible_host': ansible_host}
|
|
|
|
children[env_group]['hosts'][hostname] = host_vars
|
|
|
|
# Ajouter aux groupes de rôles
|
|
for role_group in role_groups:
|
|
if role_group not in children:
|
|
children[role_group] = {'hosts': {}}
|
|
if 'hosts' not in children[role_group]:
|
|
children[role_group]['hosts'] = {}
|
|
children[role_group]['hosts'][hostname] = None
|
|
|
|
self._save_inventory(inventory)
|
|
return True
|
|
|
|
def remove_host_from_inventory(self, hostname: str) -> bool:
|
|
"""Supprime un hôte de tous les groupes de l'inventaire
|
|
|
|
Args:
|
|
hostname: Nom de l'hôte à supprimer
|
|
|
|
Returns:
|
|
True si la suppression a réussi
|
|
"""
|
|
inventory = self.load_inventory()
|
|
|
|
if 'all' not in inventory or 'children' not in inventory['all']:
|
|
return False
|
|
|
|
children = inventory['all']['children']
|
|
removed = False
|
|
|
|
# Parcourir tous les groupes et supprimer l'hôte
|
|
for group_name, group_data in children.items():
|
|
if isinstance(group_data, dict) and 'hosts' in group_data:
|
|
if hostname in group_data['hosts']:
|
|
del group_data['hosts'][hostname]
|
|
removed = True
|
|
|
|
if removed:
|
|
self._save_inventory(inventory)
|
|
|
|
# Supprimer aussi les statuts persistés (bootstrap + health)
|
|
bootstrap_status_service.remove_host(hostname)
|
|
try:
|
|
host_status_service.remove_host(hostname)
|
|
except Exception:
|
|
pass
|
|
|
|
return removed
|
|
|
|
def update_host_groups(self, hostname: str, env_group: str = None, role_groups: List[str] = None, ansible_host: str = None) -> bool:
|
|
"""Met à jour les groupes d'un hôte existant
|
|
|
|
Args:
|
|
hostname: Nom de l'hôte à modifier
|
|
env_group: Nouveau groupe d'environnement (None = pas de changement)
|
|
role_groups: Nouvelle liste de groupes de rôles (None = pas de changement)
|
|
ansible_host: Nouvelle adresse ansible_host (None = pas de changement)
|
|
|
|
Returns:
|
|
True si la mise à jour a réussi
|
|
"""
|
|
inventory = self.load_inventory()
|
|
|
|
if 'all' not in inventory or 'children' not in inventory['all']:
|
|
return False
|
|
|
|
children = inventory['all']['children']
|
|
|
|
# Trouver le groupe d'environnement actuel
|
|
current_env_group = None
|
|
current_role_groups = []
|
|
current_ansible_host = None
|
|
|
|
for group_name, group_data in children.items():
|
|
if isinstance(group_data, dict) and 'hosts' in group_data:
|
|
if hostname in group_data['hosts']:
|
|
if group_name.startswith('env_'):
|
|
current_env_group = group_name
|
|
# Récupérer ansible_host si défini
|
|
host_vars = group_data['hosts'][hostname]
|
|
if isinstance(host_vars, dict) and 'ansible_host' in host_vars:
|
|
current_ansible_host = host_vars['ansible_host']
|
|
elif group_name.startswith('role_'):
|
|
current_role_groups.append(group_name)
|
|
|
|
if not current_env_group:
|
|
return False # Hôte non trouvé
|
|
|
|
# Appliquer les changements
|
|
new_env_group = env_group if env_group else current_env_group
|
|
new_role_groups = role_groups if role_groups is not None else current_role_groups
|
|
new_ansible_host = ansible_host if ansible_host else current_ansible_host
|
|
|
|
# Supprimer l'hôte de tous les groupes actuels
|
|
for group_name, group_data in children.items():
|
|
if isinstance(group_data, dict) and 'hosts' in group_data:
|
|
if hostname in group_data['hosts']:
|
|
del group_data['hosts'][hostname]
|
|
|
|
# Ajouter au nouveau groupe d'environnement
|
|
if new_env_group not in children:
|
|
children[new_env_group] = {'hosts': {}}
|
|
if 'hosts' not in children[new_env_group]:
|
|
children[new_env_group]['hosts'] = {}
|
|
|
|
host_vars = None
|
|
if new_ansible_host and new_ansible_host != hostname:
|
|
host_vars = {'ansible_host': new_ansible_host}
|
|
children[new_env_group]['hosts'][hostname] = host_vars
|
|
|
|
# Ajouter aux nouveaux groupes de rôles
|
|
for role_group in new_role_groups:
|
|
if role_group not in children:
|
|
children[role_group] = {'hosts': {}}
|
|
if 'hosts' not in children[role_group]:
|
|
children[role_group]['hosts'] = {}
|
|
children[role_group]['hosts'][hostname] = None
|
|
|
|
self._save_inventory(inventory)
|
|
return True
|
|
|
|
def host_exists(self, hostname: str) -> bool:
|
|
"""Vérifie si un hôte existe dans l'inventaire"""
|
|
hosts = self.get_hosts_from_inventory()
|
|
return any(h.name == hostname for h in hosts)
|
|
|
|
def group_exists(self, group_name: str) -> bool:
|
|
"""Vérifie si un groupe existe dans l'inventaire"""
|
|
return group_name in self.get_groups()
|
|
|
|
def add_group(self, group_name: str) -> bool:
|
|
"""Ajoute un nouveau groupe à l'inventaire
|
|
|
|
Args:
|
|
group_name: Nom du groupe (doit commencer par env_ ou role_)
|
|
|
|
Returns:
|
|
True si l'ajout a réussi
|
|
"""
|
|
if self.group_exists(group_name):
|
|
return False # Groupe existe déjà
|
|
|
|
inventory = self.load_inventory()
|
|
|
|
# S'assurer que la structure existe
|
|
if 'all' not in inventory:
|
|
inventory['all'] = {}
|
|
if 'children' not in inventory['all']:
|
|
inventory['all']['children'] = {}
|
|
|
|
# Ajouter le groupe vide
|
|
inventory['all']['children'][group_name] = {'hosts': {}}
|
|
|
|
self._save_inventory(inventory)
|
|
return True
|
|
|
|
def rename_group(self, old_name: str, new_name: str) -> bool:
|
|
"""Renomme un groupe dans l'inventaire
|
|
|
|
Args:
|
|
old_name: Nom actuel du groupe
|
|
new_name: Nouveau nom du groupe
|
|
|
|
Returns:
|
|
True si le renommage a réussi
|
|
"""
|
|
if not self.group_exists(old_name):
|
|
return False # Groupe source n'existe pas
|
|
|
|
if self.group_exists(new_name):
|
|
return False # Groupe cible existe déjà
|
|
|
|
inventory = self.load_inventory()
|
|
children = inventory.get('all', {}).get('children', {})
|
|
|
|
if old_name not in children:
|
|
return False
|
|
|
|
# Copier les données du groupe vers le nouveau nom
|
|
children[new_name] = children[old_name]
|
|
del children[old_name]
|
|
|
|
self._save_inventory(inventory)
|
|
return True
|
|
|
|
def delete_group(self, group_name: str, move_hosts_to: str = None) -> Dict[str, Any]:
|
|
"""Supprime un groupe de l'inventaire
|
|
|
|
Args:
|
|
group_name: Nom du groupe à supprimer
|
|
move_hosts_to: Groupe vers lequel déplacer les hôtes (optionnel)
|
|
|
|
Returns:
|
|
Dict avec le résultat de l'opération
|
|
"""
|
|
if not self.group_exists(group_name):
|
|
return {"success": False, "error": "Groupe non trouvé"}
|
|
|
|
inventory = self.load_inventory()
|
|
children = inventory.get('all', {}).get('children', {})
|
|
|
|
if group_name not in children:
|
|
return {"success": False, "error": "Groupe non trouvé dans children"}
|
|
|
|
group_data = children[group_name]
|
|
hosts_in_group = list(group_data.get('hosts', {}).keys()) if group_data else []
|
|
|
|
# Si des hôtes sont dans le groupe et qu'on veut les déplacer
|
|
if hosts_in_group and move_hosts_to:
|
|
if not self.group_exists(move_hosts_to) and move_hosts_to != group_name:
|
|
# Créer le groupe cible s'il n'existe pas
|
|
children[move_hosts_to] = {'hosts': {}}
|
|
|
|
if move_hosts_to in children:
|
|
if 'hosts' not in children[move_hosts_to]:
|
|
children[move_hosts_to]['hosts'] = {}
|
|
|
|
# Déplacer les hôtes
|
|
for hostname in hosts_in_group:
|
|
host_vars = group_data['hosts'].get(hostname)
|
|
children[move_hosts_to]['hosts'][hostname] = host_vars
|
|
|
|
# Supprimer le groupe
|
|
del children[group_name]
|
|
|
|
self._save_inventory(inventory)
|
|
return {
|
|
"success": True,
|
|
"hosts_affected": hosts_in_group,
|
|
"hosts_moved_to": move_hosts_to if hosts_in_group and move_hosts_to else None
|
|
}
|
|
|
|
def get_group_hosts(self, group_name: str) -> List[str]:
|
|
"""Retourne la liste des hôtes dans un groupe
|
|
|
|
Args:
|
|
group_name: Nom du groupe
|
|
|
|
Returns:
|
|
Liste des noms d'hôtes
|
|
"""
|
|
inventory = self.load_inventory()
|
|
children = inventory.get('all', {}).get('children', {})
|
|
|
|
if group_name not in children:
|
|
return []
|
|
|
|
group_data = children[group_name]
|
|
if not group_data or 'hosts' not in group_data:
|
|
return []
|
|
|
|
return list(group_data['hosts'].keys())
|
|
|
|
async def execute_playbook(
|
|
self,
|
|
playbook: str,
|
|
target: str = "all",
|
|
extra_vars: Optional[Dict[str, Any]] = None,
|
|
check_mode: bool = False,
|
|
verbose: bool = False
|
|
) -> Dict[str, Any]:
|
|
"""Exécute un playbook Ansible"""
|
|
# Résoudre le chemin du playbook
|
|
# On accepte soit un nom avec extension, soit un nom sans extension (ex: "health-check")
|
|
playbook_path = self.playbooks_dir / playbook
|
|
|
|
# Si le fichier n'existe pas tel quel, essayer avec des extensions courantes
|
|
if not playbook_path.exists():
|
|
from pathlib import Path
|
|
|
|
pb_name = Path(playbook).name # enlever d'éventuels chemins
|
|
# Si aucune extension n'est fournie, tester .yml puis .yaml
|
|
if not Path(pb_name).suffix:
|
|
for ext in (".yml", ".yaml"):
|
|
candidate = self.playbooks_dir / f"{pb_name}{ext}"
|
|
if candidate.exists():
|
|
playbook_path = candidate
|
|
break
|
|
|
|
if not playbook_path.exists():
|
|
# À ce stade, on n'a trouvé aucun fichier correspondant
|
|
raise FileNotFoundError(f"Playbook introuvable: {playbook}")
|
|
|
|
# Construire la commande ansible-playbook
|
|
cmd = [
|
|
"ansible-playbook",
|
|
str(playbook_path),
|
|
"-i", str(self.inventory_path),
|
|
"--limit", target
|
|
]
|
|
|
|
if check_mode:
|
|
cmd.append("--check")
|
|
|
|
if verbose:
|
|
cmd.append("-v")
|
|
|
|
if extra_vars:
|
|
cmd.extend(["--extra-vars", json.dumps(extra_vars)])
|
|
|
|
private_key = find_ssh_private_key()
|
|
if private_key:
|
|
cmd.extend(["--private-key", private_key])
|
|
|
|
if SSH_USER:
|
|
cmd.extend(["-u", SSH_USER])
|
|
|
|
start_time = perf_counter()
|
|
|
|
try:
|
|
# Exécuter la commande
|
|
process = await asyncio.create_subprocess_exec(
|
|
*cmd,
|
|
stdout=asyncio.subprocess.PIPE,
|
|
stderr=asyncio.subprocess.PIPE,
|
|
cwd=str(self.ansible_dir)
|
|
)
|
|
|
|
stdout, stderr = await process.communicate()
|
|
execution_time = perf_counter() - start_time
|
|
|
|
return {
|
|
"success": process.returncode == 0,
|
|
"return_code": process.returncode,
|
|
"stdout": stdout.decode('utf-8', errors='replace'),
|
|
"stderr": stderr.decode('utf-8', errors='replace'),
|
|
"execution_time": round(execution_time, 2),
|
|
"command": " ".join(cmd)
|
|
}
|
|
except FileNotFoundError:
|
|
return {
|
|
"success": False,
|
|
"return_code": -1,
|
|
"stdout": "",
|
|
"stderr": "ansible-playbook non trouvé. Vérifiez que Ansible est installé.",
|
|
"execution_time": 0,
|
|
"command": " ".join(cmd)
|
|
}
|
|
except Exception as e:
|
|
return {
|
|
"success": False,
|
|
"return_code": -1,
|
|
"stdout": "",
|
|
"stderr": str(e),
|
|
"execution_time": perf_counter() - start_time,
|
|
"command": " ".join(cmd)
|
|
}
|
|
|
|
|
|
# Instance globale du service Ansible
|
|
ansible_service = AnsibleService(ANSIBLE_DIR)
|
|
|
|
|
|
# ===== SERVICE BOOTSTRAP SSH =====
|
|
|
|
class BootstrapRequest(BaseModel):
|
|
"""Requête de bootstrap pour un hôte"""
|
|
host: str = Field(..., description="Adresse IP ou hostname de l'hôte")
|
|
root_password: str = Field(..., description="Mot de passe root pour la connexion initiale")
|
|
automation_user: str = Field(default="automation", description="Nom de l'utilisateur d'automatisation à créer")
|
|
|
|
|
|
class CommandResult(BaseModel):
|
|
"""Résultat d'une commande SSH"""
|
|
status: str
|
|
return_code: int
|
|
stdout: str
|
|
stderr: Optional[str] = None
|
|
|
|
|
|
def find_ssh_private_key() -> Optional[str]:
|
|
"""Trouve une clé privée SSH disponible en inspectant plusieurs répertoires."""
|
|
candidate_dirs = []
|
|
env_path = Path(SSH_KEY_PATH)
|
|
candidate_dirs.append(env_path.parent)
|
|
candidate_dirs.append(Path("/app/ssh_keys"))
|
|
candidate_dirs.append(Path.home() / ".ssh")
|
|
|
|
seen = set()
|
|
key_paths: List[str] = []
|
|
|
|
for directory in candidate_dirs:
|
|
if not directory or not directory.exists():
|
|
continue
|
|
for name in [
|
|
env_path.name,
|
|
"id_automation_ansible",
|
|
"id_rsa",
|
|
"id_ed25519",
|
|
"id_ecdsa",
|
|
]:
|
|
path = directory / name
|
|
if str(path) not in seen:
|
|
seen.add(str(path))
|
|
key_paths.append(str(path))
|
|
# Ajouter dynamiquement toutes les clés sans extension .pub
|
|
for file in directory.iterdir():
|
|
if file.is_file() and not file.suffix and not file.name.startswith("known_hosts"):
|
|
if str(file) not in seen:
|
|
seen.add(str(file))
|
|
key_paths.append(str(file))
|
|
|
|
for key_path in key_paths:
|
|
if key_path and Path(key_path).exists():
|
|
return key_path
|
|
|
|
return None
|
|
|
|
|
|
def run_ssh_command(
|
|
host: str,
|
|
command: str,
|
|
ssh_user: str = "root",
|
|
ssh_password: Optional[str] = None,
|
|
timeout: int = 60
|
|
) -> tuple:
|
|
"""Exécute une commande SSH sur un hôte distant.
|
|
|
|
Returns:
|
|
tuple: (return_code, stdout, stderr)
|
|
"""
|
|
ssh_cmd = ["ssh"]
|
|
|
|
# Options SSH communes
|
|
ssh_opts = [
|
|
"-o", "StrictHostKeyChecking=no",
|
|
"-o", "UserKnownHostsFile=/dev/null",
|
|
"-o", "ConnectTimeout=10",
|
|
"-o", "BatchMode=no" if ssh_password else "BatchMode=yes",
|
|
]
|
|
|
|
# Si pas de mot de passe, utiliser la clé SSH
|
|
if not ssh_password:
|
|
private_key = find_ssh_private_key()
|
|
if private_key:
|
|
ssh_opts.extend(["-i", private_key])
|
|
|
|
ssh_cmd.extend(ssh_opts)
|
|
ssh_cmd.append(f"{ssh_user}@{host}")
|
|
ssh_cmd.append(command)
|
|
|
|
try:
|
|
if ssh_password:
|
|
# Utiliser sshpass pour l'authentification par mot de passe
|
|
full_cmd = ["sshpass", "-p", ssh_password] + ssh_cmd
|
|
else:
|
|
full_cmd = ssh_cmd
|
|
|
|
result = subprocess.run(
|
|
full_cmd,
|
|
capture_output=True,
|
|
text=True,
|
|
timeout=timeout
|
|
)
|
|
return result.returncode, result.stdout, result.stderr
|
|
except subprocess.TimeoutExpired:
|
|
return -1, "", f"Timeout après {timeout} secondes"
|
|
except FileNotFoundError as e:
|
|
if "sshpass" in str(e):
|
|
return -1, "", "sshpass n'est pas installé. Installez-le avec: apt install sshpass"
|
|
return -1, "", str(e)
|
|
except Exception as e:
|
|
return -1, "", str(e)
|
|
|
|
|
|
def bootstrap_host(host: str, root_password: str, automation_user: str = "automation") -> CommandResult:
|
|
"""Prépare un hôte pour Ansible (création user, clé SSH, sudo, python3) pour Debian/Alpine/FreeBSD.
|
|
|
|
Utilise un script shell complet uploadé via heredoc pour éviter les problèmes de quoting.
|
|
"""
|
|
import logging
|
|
logger = logging.getLogger("bootstrap")
|
|
|
|
# Chercher la clé publique dans plusieurs emplacements possibles
|
|
primary_dirs = [
|
|
Path(SSH_KEY_PATH).parent,
|
|
Path("/app/ssh_keys"),
|
|
Path.home() / ".ssh",
|
|
]
|
|
ssh_dir = primary_dirs[0]
|
|
pub_paths = [
|
|
SSH_KEY_PATH + ".pub",
|
|
"/app/ssh_keys/id_rsa.pub",
|
|
"/app/ssh_keys/id_ed25519.pub",
|
|
"/app/ssh_keys/id_ecdsa.pub",
|
|
"/app/ssh_keys/id_automation_ansible.pub",
|
|
]
|
|
|
|
# Ajouter dynamiquement toutes les clés .pub trouvées dans le répertoire SSH
|
|
for directory in primary_dirs:
|
|
if not directory.exists():
|
|
continue
|
|
for f in directory.iterdir():
|
|
if f.is_file() and f.suffix == ".pub" and str(f) not in pub_paths:
|
|
pub_paths.append(str(f))
|
|
|
|
logger.info(f"SSH_KEY_PATH = {SSH_KEY_PATH}")
|
|
logger.info(f"Recherche de clé publique dans: {pub_paths}")
|
|
|
|
pub_key = None
|
|
pub_path_used = None
|
|
|
|
for pub_path in pub_paths:
|
|
try:
|
|
if Path(pub_path).exists():
|
|
pub_key = Path(pub_path).read_text(encoding="utf-8").strip()
|
|
if pub_key:
|
|
pub_path_used = pub_path
|
|
logger.info(f"Clé publique trouvée: {pub_path}")
|
|
break
|
|
except Exception as e:
|
|
logger.warning(f"Erreur lecture {pub_path}: {e}")
|
|
continue
|
|
|
|
if not pub_key:
|
|
# Lister les fichiers disponibles pour le debug
|
|
ssh_dir = Path(SSH_KEY_PATH).parent
|
|
available_files = []
|
|
if ssh_dir.exists():
|
|
available_files = [f.name for f in ssh_dir.iterdir()]
|
|
|
|
raise HTTPException(
|
|
status_code=500,
|
|
detail=f"Clé publique SSH non trouvée. Chemins testés: {pub_paths}. Fichiers disponibles dans {ssh_dir}: {available_files}",
|
|
)
|
|
|
|
# Script shell complet, robuste, avec logs détaillés
|
|
bootstrap_script = f"""#!/bin/sh
|
|
set -e
|
|
|
|
AUT_USER="{automation_user}"
|
|
|
|
echo "=== Bootstrap Ansible Host ==="
|
|
echo "User: $AUT_USER"
|
|
echo ""
|
|
|
|
# 1) Détection OS
|
|
if command -v apk >/dev/null 2>&1; then
|
|
OS_TYPE="alpine"
|
|
echo "[1/7] OS détecté: Alpine Linux"
|
|
elif [ "$(uname -s 2>/dev/null)" = "FreeBSD" ] || \
|
|
command -v pkg >/dev/null 2>&1 || \
|
|
( [ -f /etc/os-release ] && grep -qi 'ID=freebsd' /etc/os-release ); then
|
|
OS_TYPE="freebsd"
|
|
echo "[1/7] OS détecté: FreeBSD"
|
|
else
|
|
OS_TYPE="debian"
|
|
echo "[1/7] OS détecté: Debian-like"
|
|
fi
|
|
|
|
# 2) Vérification / préparation utilisateur
|
|
echo "[2/7] Vérification utilisateur/groupe..."
|
|
if id "$AUT_USER" >/dev/null 2>&1; then
|
|
echo " - Utilisateur déjà existant: $AUT_USER (aucune suppression)"
|
|
else
|
|
echo " - Utilisateur inexistant, il sera créé"
|
|
fi
|
|
|
|
# 3) Création utilisateur (idempotent)
|
|
echo "[3/7] Création utilisateur $AUT_USER..."
|
|
if id "$AUT_USER" >/dev/null 2>&1; then
|
|
echo " - Utilisateur déjà présent, réutilisation"
|
|
elif [ "$OS_TYPE" = "alpine" ]; then
|
|
adduser -D "$AUT_USER"
|
|
echo " - Utilisateur créé (Alpine: adduser -D)"
|
|
elif [ "$OS_TYPE" = "freebsd" ]; then
|
|
pw useradd "$AUT_USER" -m -s /bin/sh
|
|
echo " - Utilisateur créé (FreeBSD: pw useradd)"
|
|
else
|
|
useradd -m -s /bin/bash "$AUT_USER" || useradd -m -s /bin/sh "$AUT_USER"
|
|
echo " - Utilisateur créé (Debian: useradd -m)"
|
|
fi
|
|
|
|
# 3b) S'assurer que le compte n'est pas verrouillé
|
|
echo " - Vérification du verrouillage du compte..."
|
|
if command -v passwd >/dev/null 2>&1; then
|
|
passwd -u "$AUT_USER" 2>/dev/null || true
|
|
fi
|
|
if command -v usermod >/dev/null 2>&1; then
|
|
usermod -U "$AUT_USER" 2>/dev/null || true
|
|
fi
|
|
|
|
# 4) Configuration clé SSH
|
|
echo "[4/7] Configuration clé SSH..."
|
|
HOME_DIR=$(getent passwd "$AUT_USER" | cut -d: -f6)
|
|
if [ -z "$HOME_DIR" ]; then
|
|
HOME_DIR="/home/$AUT_USER"
|
|
fi
|
|
echo " - HOME_DIR: $HOME_DIR"
|
|
|
|
mkdir -p "$HOME_DIR/.ssh"
|
|
chown "$AUT_USER":"$AUT_USER" "$HOME_DIR/.ssh"
|
|
chmod 700 "$HOME_DIR/.ssh"
|
|
echo " - Répertoire .ssh créé et configuré"
|
|
|
|
cat > "$HOME_DIR/.ssh/authorized_keys" << 'SSHKEY_EOF'
|
|
{pub_key}
|
|
SSHKEY_EOF
|
|
|
|
chown "$AUT_USER":"$AUT_USER" "$HOME_DIR/.ssh/authorized_keys"
|
|
chmod 600 "$HOME_DIR/.ssh/authorized_keys"
|
|
echo " - Clé publique installée dans authorized_keys"
|
|
|
|
if [ -s "$HOME_DIR/.ssh/authorized_keys" ]; then
|
|
KEY_COUNT=$(wc -l < "$HOME_DIR/.ssh/authorized_keys")
|
|
echo " - Vérification: $KEY_COUNT clé(s) dans authorized_keys"
|
|
else
|
|
echo " - ERREUR: authorized_keys vide ou absent!"
|
|
exit 1
|
|
fi
|
|
|
|
# 5) Installation sudo
|
|
echo "[5/7] Installation sudo..."
|
|
if command -v sudo >/dev/null 2>&1; then
|
|
echo " - sudo déjà installé"
|
|
else
|
|
if [ "$OS_TYPE" = "alpine" ]; then
|
|
apk add --no-cache sudo
|
|
echo " - sudo installé (apk)"
|
|
elif [ "$OS_TYPE" = "freebsd" ]; then
|
|
pkg install -y sudo
|
|
echo " - sudo installé (pkg)"
|
|
else
|
|
apt-get update -qq && apt-get install -y sudo
|
|
echo " - sudo installé (apt)"
|
|
fi
|
|
fi
|
|
|
|
# 6) Configuration sudoers
|
|
echo "[6/7] Configuration sudoers..."
|
|
if [ ! -d /etc/sudoers.d ]; then
|
|
mkdir -p /etc/sudoers.d
|
|
chmod 750 /etc/sudoers.d 2>/dev/null || true
|
|
echo " - Répertoire /etc/sudoers.d créé"
|
|
fi
|
|
echo "$AUT_USER ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/automation
|
|
chmod 440 /etc/sudoers.d/automation
|
|
echo " - Sudoers configuré: /etc/sudoers.d/automation"
|
|
|
|
# 7) Installation Python3
|
|
echo "[7/7] Installation Python3..."
|
|
if command -v python3 >/dev/null 2>&1; then
|
|
PYTHON_VERSION=$(python3 --version 2>&1)
|
|
echo " - Python3 déjà installé: $PYTHON_VERSION"
|
|
else
|
|
if [ "$OS_TYPE" = "alpine" ]; then
|
|
apk add --no-cache python3
|
|
echo " - Python3 installé (apk)"
|
|
elif [ "$OS_TYPE" = "freebsd" ]; then
|
|
pkg install -y python3
|
|
echo " - Python3 installé (pkg)"
|
|
else
|
|
apt-get update -qq && apt-get install -y python3
|
|
echo " - Python3 installé (apt)"
|
|
fi
|
|
fi
|
|
|
|
echo ""
|
|
echo "=== Bootstrap terminé avec succès ==="
|
|
echo "Utilisateur: $AUT_USER"
|
|
echo "HOME: $HOME_DIR"
|
|
echo "SSH: $HOME_DIR/.ssh/authorized_keys"
|
|
echo "Sudo: /etc/sudoers.d/automation"
|
|
"""
|
|
|
|
# Envoyer le script de manière compatible avec tous les shells
|
|
lines = bootstrap_script.splitlines()
|
|
|
|
def _sh_single_quote(s: str) -> str:
|
|
"""Protège une chaîne pour un shell POSIX en simple quotes."""
|
|
return "'" + s.replace("'", "'\"'\"'") + "'"
|
|
|
|
quoted_lines = " ".join(_sh_single_quote(line) for line in lines)
|
|
remote_cmd = f"printf '%s\\n' {quoted_lines} | sh"
|
|
|
|
rc, out, err = run_ssh_command(
|
|
host,
|
|
remote_cmd,
|
|
ssh_user="root",
|
|
ssh_password=root_password,
|
|
)
|
|
|
|
if rc != 0:
|
|
raise HTTPException(
|
|
status_code=500,
|
|
detail={
|
|
"status": "error",
|
|
"return_code": rc,
|
|
"stdout": out,
|
|
"stderr": err,
|
|
},
|
|
)
|
|
|
|
# Vérification: tester la connexion SSH par clé avec l'utilisateur d'automatisation
|
|
verify_rc, verify_out, verify_err = run_ssh_command(
|
|
host,
|
|
"echo 'ssh_key_ok'",
|
|
ssh_user=automation_user,
|
|
ssh_password=None,
|
|
)
|
|
|
|
if verify_rc != 0:
|
|
combined_stdout = (out or "") + f"\n\n[SSH VERIFY] Échec de la connexion par clé pour {automation_user}@{host}\n" + (verify_out or "")
|
|
combined_stderr = (err or "") + f"\n\n[SSH VERIFY] " + (verify_err or "Aucune erreur détaillée")
|
|
raise HTTPException(
|
|
status_code=500,
|
|
detail={
|
|
"status": "error",
|
|
"return_code": verify_rc,
|
|
"stdout": combined_stdout,
|
|
"stderr": combined_stderr,
|
|
},
|
|
)
|
|
|
|
# Succès complet
|
|
final_stdout = (out or "") + f"\n\n[SSH VERIFY] Connexion par clé OK pour {automation_user}@{host}"
|
|
return CommandResult(
|
|
status="ok",
|
|
return_code=0,
|
|
stdout=final_stdout,
|
|
stderr=err,
|
|
)
|
|
|
|
|
|
# Base de données hybride : hôtes depuis Ansible, tâches/logs en mémoire
|
|
class HybridDB:
|
|
"""Base de données qui charge les hôtes depuis l'inventaire Ansible"""
|
|
|
|
def __init__(self, ansible_svc: AnsibleService):
|
|
self.ansible_service = ansible_svc
|
|
self._hosts_cache: Optional[List[Host]] = None
|
|
self._hosts_cache_time: float = 0
|
|
self._cache_ttl: float = 60 # Cache de 60 secondes
|
|
# Statuts runtime des hôtes (en mémoire) rechargés depuis le fichier JSON persistant
|
|
self._host_runtime_status: Dict[str, Dict[str, Any]] = {}
|
|
try:
|
|
persisted_hosts = host_status_service.get_all_status()
|
|
for host_name, info in persisted_hosts.items():
|
|
last_seen_raw = info.get("last_seen")
|
|
last_seen_dt: Optional[datetime] = None
|
|
if isinstance(last_seen_raw, str):
|
|
try:
|
|
last_seen_dt = datetime.fromisoformat(last_seen_raw.replace("Z", "+00:00"))
|
|
except Exception:
|
|
last_seen_dt = None
|
|
elif isinstance(last_seen_raw, datetime):
|
|
last_seen_dt = last_seen_raw
|
|
|
|
self._host_runtime_status[host_name] = {
|
|
"status": info.get("status", "online"),
|
|
"last_seen": last_seen_dt,
|
|
"os": info.get("os"),
|
|
}
|
|
except Exception:
|
|
# En cas de problème de lecture, on repartira d'un état en mémoire vierge
|
|
self._host_runtime_status = {}
|
|
|
|
# Tâches et logs en mémoire (persistés pendant l'exécution)
|
|
self.tasks: List[Task] = []
|
|
|
|
self.logs: List[LogEntry] = [
|
|
LogEntry(id=1, timestamp=datetime.now(timezone.utc), level="INFO",
|
|
message="Dashboard démarré - Inventaire Ansible chargé")
|
|
]
|
|
|
|
self._id_counters = {"hosts": 100, "tasks": 1, "logs": 2}
|
|
|
|
@property
|
|
def hosts(self) -> List[Host]:
|
|
"""Charge les hôtes depuis l'inventaire Ansible avec cache"""
|
|
current_time = time()
|
|
|
|
# Retourner le cache si valide
|
|
if self._hosts_cache and (current_time - self._hosts_cache_time) < self._cache_ttl:
|
|
return self._hosts_cache
|
|
|
|
# Recharger depuis Ansible
|
|
self._hosts_cache = self._load_hosts_from_ansible()
|
|
self._hosts_cache_time = current_time
|
|
return self._hosts_cache
|
|
|
|
def _load_hosts_from_ansible(self) -> List[Host]:
|
|
"""Convertit l'inventaire Ansible en liste d'hôtes (sans doublons)"""
|
|
hosts = []
|
|
ansible_hosts = self.ansible_service.get_hosts_from_inventory()
|
|
|
|
# Charger tous les statuts de bootstrap
|
|
all_bootstrap_status = bootstrap_status_service.get_all_status()
|
|
|
|
for idx, ah in enumerate(ansible_hosts, start=1):
|
|
# Extraire le groupe principal depuis les groupes
|
|
primary_group = ah.groups[0] if ah.groups else "unknown"
|
|
|
|
# Récupérer le statut bootstrap pour cet hôte
|
|
bootstrap_info = all_bootstrap_status.get(ah.name, {})
|
|
bootstrap_ok = bootstrap_info.get("bootstrap_ok", False)
|
|
bootstrap_date_str = bootstrap_info.get("bootstrap_date")
|
|
bootstrap_date = None
|
|
if bootstrap_date_str:
|
|
try:
|
|
bootstrap_date = datetime.fromisoformat(bootstrap_date_str.replace("Z", "+00:00"))
|
|
except:
|
|
pass
|
|
|
|
runtime_status = self._host_runtime_status.get(ah.name, {})
|
|
status = runtime_status.get("status", "online")
|
|
last_seen = runtime_status.get("last_seen")
|
|
os_label = runtime_status.get("os", f"Linux ({primary_group})")
|
|
|
|
host = Host(
|
|
id=idx,
|
|
name=ah.name,
|
|
ip=ah.ansible_host,
|
|
status=status,
|
|
os=os_label,
|
|
last_seen=last_seen,
|
|
groups=ah.groups, # Tous les groupes de l'hôte
|
|
bootstrap_ok=bootstrap_ok,
|
|
bootstrap_date=bootstrap_date
|
|
)
|
|
hosts.append(host)
|
|
|
|
return hosts
|
|
|
|
def refresh_hosts(self):
|
|
"""Force le rechargement des hôtes depuis Ansible"""
|
|
self._hosts_cache = None
|
|
return self.hosts
|
|
|
|
def update_host_status(self, host_name: str, status: str, os_info: str = None):
|
|
"""Met à jour le statut d'un hôte après un health-check"""
|
|
for host in self.hosts:
|
|
if host.name == host_name:
|
|
host.status = status
|
|
host.last_seen = datetime.now(timezone.utc)
|
|
if os_info:
|
|
host.os = os_info
|
|
self._host_runtime_status[host_name] = {
|
|
"status": host.status,
|
|
"last_seen": host.last_seen,
|
|
"os": host.os,
|
|
}
|
|
# Persister dans le fichier JSON partagé avec Ansible
|
|
try:
|
|
host_status_service.set_status(host_name, host.status, host.last_seen, host.os)
|
|
except Exception:
|
|
# Ne pas casser l'exécution si la persistance échoue
|
|
pass
|
|
break
|
|
|
|
@property
|
|
def metrics(self) -> SystemMetrics:
|
|
"""Calcule les métriques en temps réel basées sur les logs de tâches"""
|
|
hosts = self.hosts
|
|
|
|
# Utiliser les statistiques des fichiers de logs de tâches
|
|
task_stats = task_log_service.get_stats()
|
|
total_tasks = task_stats.get("total", 0)
|
|
completed_tasks = task_stats.get("completed", 0)
|
|
failed_tasks = task_stats.get("failed", 0)
|
|
total_finished = completed_tasks + failed_tasks
|
|
|
|
return SystemMetrics(
|
|
online_hosts=len([h for h in hosts if h.status == "online"]),
|
|
total_tasks=total_tasks,
|
|
success_rate=round((completed_tasks / total_finished * 100) if total_finished > 0 else 100, 1),
|
|
uptime=99.9,
|
|
cpu_usage=0,
|
|
memory_usage=0,
|
|
disk_usage=0
|
|
)
|
|
|
|
def get_next_id(self, collection: str) -> int:
|
|
self._id_counters[collection] += 1
|
|
return self._id_counters[collection] - 1
|
|
|
|
|
|
# Instance globale de la base de données hybride
|
|
db = HybridDB(ansible_service)
|
|
|
|
# Dépendances FastAPI
|
|
async def verify_api_key(api_key: str = Depends(api_key_header)) -> bool:
|
|
"""Vérifie la clé API fournie"""
|
|
if not api_key or api_key != API_KEY:
|
|
raise HTTPException(status_code=401, detail="Clé API invalide ou manquante")
|
|
return True
|
|
|
|
# Routes API
|
|
@app.get("/", response_class=HTMLResponse)
|
|
async def root(request: Request):
|
|
"""Page principale du dashboard"""
|
|
return FileResponse(BASE_DIR / "index.html")
|
|
|
|
|
|
@app.get("/api", response_class=HTMLResponse)
|
|
async def api_home(request: Request):
|
|
"""Page d'accueil de l'API Homelab Dashboard"""
|
|
return """
|
|
<!DOCTYPE html>
|
|
<html lang="fr">
|
|
<head>
|
|
<meta charset="UTF-8">
|
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
|
<title>Homelab Dashboard API</title>
|
|
<style>
|
|
body { font-family: 'Inter', sans-serif; background: #0a0a0a; color: white; margin: 0; padding: 40px; }
|
|
.container { max-width: 800px; margin: 0 auto; text-align: center; }
|
|
.gradient-text { background: linear-gradient(135deg, #7c3aed 0%, #3b82f6 100%); -webkit-background-clip: text; -webkit-text-fill-color: transparent; }
|
|
.card { background: rgba(42, 42, 42, 0.8); border: 1px solid rgba(255, 255, 255, 0.1); border-radius: 16px; padding: 24px; margin: 20px 0; }
|
|
.btn { background: linear-gradient(135deg, #7c3aed 0%, #8b5cf6 100%); color: white; padding: 12px 24px; border: none; border-radius: 8px; text-decoration: none; display: inline-block; margin: 10px; transition: all 0.3s ease; }
|
|
.btn:hover { transform: translateY(-2px); box-shadow: 0 10px 25px rgba(124, 58, 237, 0.3); }
|
|
</style>
|
|
</head>
|
|
<body>
|
|
<div class="container">
|
|
<h1 class="gradient-text" style="font-size: 3rem; margin-bottom: 1rem;">Homelab Dashboard API</h1>
|
|
<p style="font-size: 1.2rem; color: #a1a1aa; margin-bottom: 2rem;">
|
|
API REST moderne pour la gestion automatique d'homelab
|
|
</p>
|
|
|
|
<div class="card">
|
|
<h2 style="color: #7c3aed; margin-bottom: 1rem;">Documentation API</h2>
|
|
<p style="margin-bottom: 1.5rem;">Explorez les endpoints disponibles et testez les fonctionnalités</p>
|
|
<div>
|
|
<a href="/api/docs" class="btn">
|
|
<i class="fas fa-book"></i> Documentation Interactive
|
|
</a>
|
|
<a href="/api/redoc" class="btn">
|
|
<i class="fas fa-file-alt"></i> Documentation Alternative
|
|
</a>
|
|
</div>
|
|
</div>
|
|
|
|
<div class="card">
|
|
<h2 style="color: #7c3aed; margin-bottom: 1rem;">Endpoints Principaux</h2>
|
|
<div style="text-align: left; max-width: 600px; margin: 0 auto;">
|
|
<div style="margin-bottom: 1rem;">
|
|
<strong style="color: #10b981;">GET</strong>
|
|
<code style="background: #1f2937; padding: 4px 8px; border-radius: 4px;">/api/hosts</code>
|
|
<span style="color: #a1a1aa;"> - Liste des hôtes</span>
|
|
</div>
|
|
<div style="margin-bottom: 1rem;">
|
|
<strong style="color: #3b82f6;">POST</strong>
|
|
<code style="background: #1f2937; padding: 4px 8px; border-radius: 4px;">/api/tasks</code>
|
|
<span style="color: #a1a1aa;"> - Créer une tâche</span>
|
|
</div>
|
|
<div style="margin-bottom: 1rem;">
|
|
<strong style="color: #10b981;">GET</strong>
|
|
<code style="background: #1f2937; padding: 4px 8px; border-radius: 4px;">/api/metrics</code>
|
|
<span style="color: #a1a1aa;"> - Métriques système</span>
|
|
</div>
|
|
<div style="margin-bottom: 1rem;">
|
|
<strong style="color: #f59e0b;">WS</strong>
|
|
<code style="background: #1f2937; padding: 4px 8px; border-radius: 4px;">/ws</code>
|
|
<span style="color: #a1a1aa;"> - WebSocket temps réel</span>
|
|
</div>
|
|
</div>
|
|
</div>
|
|
|
|
<div style="margin-top: 2rem; color: #6b7280; font-size: 0.9rem;">
|
|
<p>Version 1.0.0 | Développé avec FastAPI et technologies modernes</p>
|
|
</div>
|
|
</div>
|
|
</body>
|
|
</html>
|
|
"""
|
|
|
|
# ===== ENDPOINTS HOSTS - Routes statiques d'abord =====
|
|
|
|
@app.get("/api/hosts/groups")
|
|
async def get_host_groups(api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Récupère les groupes disponibles pour les hôtes (environnements et rôles)"""
|
|
return {
|
|
"env_groups": ansible_service.get_env_groups(),
|
|
"role_groups": ansible_service.get_role_groups(),
|
|
"all_groups": ansible_service.get_groups()
|
|
}
|
|
|
|
|
|
# ===== ENDPOINTS GROUPS - Gestion des groupes d'environnement et de rôles =====
|
|
|
|
@app.get("/api/groups")
|
|
async def get_all_groups(api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Récupère tous les groupes avec leurs détails"""
|
|
env_groups = ansible_service.get_env_groups()
|
|
role_groups = ansible_service.get_role_groups()
|
|
|
|
groups = []
|
|
for g in env_groups:
|
|
hosts = ansible_service.get_group_hosts(g)
|
|
groups.append({
|
|
"name": g,
|
|
"type": "env",
|
|
"display_name": g.replace('env_', ''),
|
|
"hosts_count": len(hosts),
|
|
"hosts": hosts
|
|
})
|
|
|
|
for g in role_groups:
|
|
hosts = ansible_service.get_group_hosts(g)
|
|
groups.append({
|
|
"name": g,
|
|
"type": "role",
|
|
"display_name": g.replace('role_', ''),
|
|
"hosts_count": len(hosts),
|
|
"hosts": hosts
|
|
})
|
|
|
|
return {
|
|
"groups": groups,
|
|
"env_count": len(env_groups),
|
|
"role_count": len(role_groups)
|
|
}
|
|
|
|
|
|
@app.get("/api/groups/{group_name}")
|
|
async def get_group_details(group_name: str, api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Récupère les détails d'un groupe spécifique"""
|
|
if not ansible_service.group_exists(group_name):
|
|
raise HTTPException(status_code=404, detail=f"Groupe '{group_name}' non trouvé")
|
|
|
|
hosts = ansible_service.get_group_hosts(group_name)
|
|
group_type = "env" if group_name.startswith("env_") else "role" if group_name.startswith("role_") else "other"
|
|
|
|
return {
|
|
"name": group_name,
|
|
"type": group_type,
|
|
"display_name": group_name.replace('env_', '').replace('role_', ''),
|
|
"hosts_count": len(hosts),
|
|
"hosts": hosts
|
|
}
|
|
|
|
|
|
@app.post("/api/groups")
|
|
async def create_group(group_request: GroupRequest, api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Crée un nouveau groupe d'environnement ou de rôle"""
|
|
# Construire le nom complet du groupe
|
|
prefix = "env_" if group_request.type == "env" else "role_"
|
|
|
|
# Si le nom ne commence pas déjà par le préfixe, l'ajouter
|
|
if group_request.name.startswith(prefix):
|
|
full_name = group_request.name
|
|
else:
|
|
full_name = f"{prefix}{group_request.name}"
|
|
|
|
# Vérifier si le groupe existe déjà
|
|
if ansible_service.group_exists(full_name):
|
|
raise HTTPException(status_code=400, detail=f"Le groupe '{full_name}' existe déjà")
|
|
|
|
# Créer le groupe
|
|
success = ansible_service.add_group(full_name)
|
|
|
|
if not success:
|
|
raise HTTPException(status_code=500, detail="Erreur lors de la création du groupe")
|
|
|
|
return {
|
|
"success": True,
|
|
"message": f"Groupe '{full_name}' créé avec succès",
|
|
"group": {
|
|
"name": full_name,
|
|
"type": group_request.type,
|
|
"display_name": full_name.replace('env_', '').replace('role_', ''),
|
|
"hosts_count": 0,
|
|
"hosts": []
|
|
}
|
|
}
|
|
|
|
|
|
@app.put("/api/groups/{group_name}")
|
|
async def update_group(group_name: str, group_update: GroupUpdateRequest, api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Renomme un groupe existant"""
|
|
if not ansible_service.group_exists(group_name):
|
|
raise HTTPException(status_code=404, detail=f"Groupe '{group_name}' non trouvé")
|
|
|
|
# Déterminer le type du groupe
|
|
if group_name.startswith("env_"):
|
|
prefix = "env_"
|
|
group_type = "env"
|
|
elif group_name.startswith("role_"):
|
|
prefix = "role_"
|
|
group_type = "role"
|
|
else:
|
|
raise HTTPException(status_code=400, detail="Seuls les groupes env_ et role_ peuvent être modifiés")
|
|
|
|
# Construire le nouveau nom
|
|
if group_update.new_name.startswith(prefix):
|
|
new_full_name = group_update.new_name
|
|
else:
|
|
new_full_name = f"{prefix}{group_update.new_name}"
|
|
|
|
# Vérifier si le nouveau nom existe déjà
|
|
if ansible_service.group_exists(new_full_name):
|
|
raise HTTPException(status_code=400, detail=f"Le groupe '{new_full_name}' existe déjà")
|
|
|
|
# Renommer le groupe
|
|
success = ansible_service.rename_group(group_name, new_full_name)
|
|
|
|
if not success:
|
|
raise HTTPException(status_code=500, detail="Erreur lors du renommage du groupe")
|
|
|
|
hosts = ansible_service.get_group_hosts(new_full_name)
|
|
|
|
return {
|
|
"success": True,
|
|
"message": f"Groupe renommé de '{group_name}' vers '{new_full_name}'",
|
|
"group": {
|
|
"name": new_full_name,
|
|
"type": group_type,
|
|
"display_name": new_full_name.replace('env_', '').replace('role_', ''),
|
|
"hosts_count": len(hosts),
|
|
"hosts": hosts
|
|
}
|
|
}
|
|
|
|
|
|
@app.delete("/api/groups/{group_name}")
|
|
async def delete_group(
|
|
group_name: str,
|
|
move_hosts_to: Optional[str] = None,
|
|
api_key_valid: bool = Depends(verify_api_key)
|
|
):
|
|
"""Supprime un groupe existant
|
|
|
|
Args:
|
|
group_name: Nom du groupe à supprimer
|
|
move_hosts_to: Groupe vers lequel déplacer les hôtes (optionnel, query param)
|
|
"""
|
|
if not ansible_service.group_exists(group_name):
|
|
raise HTTPException(status_code=404, detail=f"Groupe '{group_name}' non trouvé")
|
|
|
|
# Vérifier si le groupe contient des hôtes
|
|
hosts_in_group = ansible_service.get_group_hosts(group_name)
|
|
|
|
# Si le groupe contient des hôtes et qu'on ne spécifie pas où les déplacer
|
|
if hosts_in_group and not move_hosts_to:
|
|
# Pour les groupes d'environnement, c'est critique car les hôtes doivent avoir un env
|
|
if group_name.startswith("env_"):
|
|
raise HTTPException(
|
|
status_code=400,
|
|
detail=f"Le groupe contient {len(hosts_in_group)} hôte(s). Spécifiez 'move_hosts_to' pour les déplacer."
|
|
)
|
|
|
|
# Si on veut déplacer les hôtes, vérifier que le groupe cible est valide
|
|
if move_hosts_to:
|
|
# Vérifier que le groupe cible est du même type
|
|
if group_name.startswith("env_") and not move_hosts_to.startswith("env_"):
|
|
raise HTTPException(status_code=400, detail="Les hôtes doivent être déplacés vers un groupe d'environnement")
|
|
if group_name.startswith("role_") and not move_hosts_to.startswith("role_"):
|
|
raise HTTPException(status_code=400, detail="Les hôtes doivent être déplacés vers un groupe de rôle")
|
|
|
|
# Supprimer le groupe
|
|
result = ansible_service.delete_group(group_name, move_hosts_to)
|
|
|
|
if not result.get("success"):
|
|
raise HTTPException(status_code=500, detail=result.get("error", "Erreur lors de la suppression"))
|
|
|
|
return {
|
|
"success": True,
|
|
"message": f"Groupe '{group_name}' supprimé avec succès",
|
|
"hosts_affected": result.get("hosts_affected", []),
|
|
"hosts_moved_to": result.get("hosts_moved_to")
|
|
}
|
|
|
|
|
|
@app.get("/api/hosts/by-name/{host_name}")
|
|
async def get_host_by_name(host_name: str, api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Récupère un hôte spécifique par son nom"""
|
|
host = next((h for h in db.hosts if h.name == host_name), None)
|
|
if not host:
|
|
raise HTTPException(status_code=404, detail="Hôte non trouvé")
|
|
return host
|
|
|
|
@app.get("/api/hosts", response_model=List[Host])
|
|
async def get_hosts(
|
|
bootstrap_status: Optional[str] = None,
|
|
api_key_valid: bool = Depends(verify_api_key)
|
|
):
|
|
"""Récupère la liste de tous les hôtes
|
|
|
|
Args:
|
|
bootstrap_status: Filtrer par statut bootstrap ('ready', 'not_configured', ou None pour tous)
|
|
"""
|
|
hosts = db.hosts
|
|
|
|
# Filtrer par statut bootstrap si spécifié
|
|
if bootstrap_status == 'ready':
|
|
hosts = [h for h in hosts if h.bootstrap_ok]
|
|
elif bootstrap_status == 'not_configured':
|
|
hosts = [h for h in hosts if not h.bootstrap_ok]
|
|
|
|
return hosts
|
|
|
|
@app.get("/api/hosts/{host_id}", response_model=Host)
|
|
async def get_host(host_id: int, api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Récupère un hôte spécifique par ID"""
|
|
host = next((h for h in db.hosts if h.id == host_id), None)
|
|
if not host:
|
|
raise HTTPException(status_code=404, detail="Hôte non trouvé")
|
|
return host
|
|
|
|
@app.post("/api/hosts")
|
|
async def create_host(host_request: HostRequest, api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Crée un nouvel hôte dans l'inventaire Ansible (hosts.yml)
|
|
|
|
L'hôte sera ajouté au groupe d'environnement spécifié et aux groupes de rôles.
|
|
"""
|
|
# Vérifier si l'hôte existe déjà
|
|
if ansible_service.host_exists(host_request.name):
|
|
raise HTTPException(status_code=400, detail=f"L'hôte '{host_request.name}' existe déjà dans l'inventaire")
|
|
|
|
# Valider le groupe d'environnement
|
|
env_groups = ansible_service.get_env_groups()
|
|
if host_request.env_group not in env_groups:
|
|
# Créer le groupe s'il n'existe pas mais commence par env_
|
|
if not host_request.env_group.startswith('env_'):
|
|
raise HTTPException(status_code=400, detail=f"Le groupe d'environnement doit commencer par 'env_'. Groupes existants: {env_groups}")
|
|
|
|
# Valider les groupes de rôles
|
|
role_groups = ansible_service.get_role_groups()
|
|
for role in host_request.role_groups:
|
|
if role not in role_groups and not role.startswith('role_'):
|
|
raise HTTPException(status_code=400, detail=f"Le groupe de rôle '{role}' doit commencer par 'role_'. Groupes existants: {role_groups}")
|
|
|
|
try:
|
|
# Ajouter l'hôte à l'inventaire
|
|
ansible_service.add_host_to_inventory(
|
|
hostname=host_request.name,
|
|
env_group=host_request.env_group,
|
|
role_groups=host_request.role_groups,
|
|
ansible_host=host_request.ip
|
|
)
|
|
|
|
# Invalider le cache pour recharger les hôtes
|
|
db._hosts_cache = None
|
|
|
|
# Récupérer le nouvel hôte
|
|
new_host = next((h for h in db.hosts if h.name == host_request.name), None)
|
|
|
|
# Ajouter un log
|
|
log_entry = LogEntry(
|
|
id=db.get_next_id("logs"),
|
|
timestamp=datetime.now(timezone.utc),
|
|
level="INFO",
|
|
message=f"Hôte '{host_request.name}' ajouté à l'inventaire (env: {host_request.env_group}, roles: {host_request.role_groups})",
|
|
source="inventory",
|
|
host=host_request.name
|
|
)
|
|
db.logs.insert(0, log_entry)
|
|
|
|
# Notifier les clients WebSocket
|
|
await ws_manager.broadcast({
|
|
"type": "host_created",
|
|
"data": new_host.dict() if new_host else {"name": host_request.name}
|
|
})
|
|
|
|
return {
|
|
"message": f"Hôte '{host_request.name}' ajouté avec succès",
|
|
"host": new_host.dict() if new_host else None,
|
|
"inventory_updated": True
|
|
}
|
|
|
|
except Exception as e:
|
|
raise HTTPException(status_code=500, detail=f"Erreur lors de l'ajout de l'hôte: {str(e)}")
|
|
|
|
@app.put("/api/hosts/{host_name}")
|
|
async def update_host(
|
|
host_name: str,
|
|
update_request: HostUpdateRequest,
|
|
api_key_valid: bool = Depends(verify_api_key)
|
|
):
|
|
"""Met à jour les groupes d'un hôte existant dans l'inventaire Ansible"""
|
|
# Vérifier que l'hôte existe
|
|
if not ansible_service.host_exists(host_name):
|
|
raise HTTPException(status_code=404, detail=f"Hôte '{host_name}' non trouvé dans l'inventaire")
|
|
|
|
# Valider le groupe d'environnement si fourni
|
|
if update_request.env_group:
|
|
env_groups = ansible_service.get_env_groups()
|
|
if update_request.env_group not in env_groups and not update_request.env_group.startswith('env_'):
|
|
raise HTTPException(status_code=400, detail=f"Le groupe d'environnement doit commencer par 'env_'")
|
|
|
|
# Valider les groupes de rôles si fournis
|
|
if update_request.role_groups:
|
|
for role in update_request.role_groups:
|
|
if not role.startswith('role_'):
|
|
raise HTTPException(status_code=400, detail=f"Le groupe de rôle '{role}' doit commencer par 'role_'")
|
|
|
|
try:
|
|
success = ansible_service.update_host_groups(
|
|
hostname=host_name,
|
|
env_group=update_request.env_group,
|
|
role_groups=update_request.role_groups,
|
|
ansible_host=update_request.ansible_host
|
|
)
|
|
|
|
if not success:
|
|
raise HTTPException(status_code=500, detail="Échec de la mise à jour de l'hôte")
|
|
|
|
# Invalider le cache
|
|
db._hosts_cache = None
|
|
|
|
# Récupérer l'hôte mis à jour
|
|
updated_host = next((h for h in db.hosts if h.name == host_name), None)
|
|
|
|
# Ajouter un log
|
|
log_entry = LogEntry(
|
|
id=db.get_next_id("logs"),
|
|
timestamp=datetime.now(timezone.utc),
|
|
level="INFO",
|
|
message=f"Hôte '{host_name}' mis à jour (env: {update_request.env_group}, roles: {update_request.role_groups})",
|
|
source="inventory",
|
|
host=host_name
|
|
)
|
|
db.logs.insert(0, log_entry)
|
|
|
|
# Notifier les clients WebSocket
|
|
await ws_manager.broadcast({
|
|
"type": "host_updated",
|
|
"data": updated_host.dict() if updated_host else {"name": host_name}
|
|
})
|
|
|
|
return {
|
|
"message": f"Hôte '{host_name}' mis à jour avec succès",
|
|
"host": updated_host.dict() if updated_host else None,
|
|
"inventory_updated": True
|
|
}
|
|
|
|
except HTTPException:
|
|
raise
|
|
except Exception as e:
|
|
raise HTTPException(status_code=500, detail=f"Erreur lors de la mise à jour: {str(e)}")
|
|
|
|
@app.delete("/api/hosts/by-name/{host_name}")
|
|
async def delete_host_by_name(host_name: str, api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Supprime un hôte de l'inventaire Ansible par son nom"""
|
|
# Vérifier que l'hôte existe
|
|
if not ansible_service.host_exists(host_name):
|
|
raise HTTPException(status_code=404, detail=f"Hôte '{host_name}' non trouvé dans l'inventaire")
|
|
|
|
try:
|
|
success = ansible_service.remove_host_from_inventory(host_name)
|
|
|
|
if not success:
|
|
raise HTTPException(status_code=500, detail="Échec de la suppression de l'hôte")
|
|
|
|
# Invalider le cache
|
|
db._hosts_cache = None
|
|
|
|
# Ajouter un log
|
|
log_entry = LogEntry(
|
|
id=db.get_next_id("logs"),
|
|
timestamp=datetime.now(timezone.utc),
|
|
level="WARN",
|
|
message=f"Hôte '{host_name}' supprimé de l'inventaire",
|
|
source="inventory",
|
|
host=host_name
|
|
)
|
|
db.logs.insert(0, log_entry)
|
|
|
|
# Notifier les clients WebSocket
|
|
await ws_manager.broadcast({
|
|
"type": "host_deleted",
|
|
"data": {"name": host_name}
|
|
})
|
|
|
|
return {
|
|
"message": f"Hôte '{host_name}' supprimé avec succès",
|
|
"inventory_updated": True
|
|
}
|
|
|
|
except HTTPException:
|
|
raise
|
|
except Exception as e:
|
|
raise HTTPException(status_code=500, detail=f"Erreur lors de la suppression: {str(e)}")
|
|
|
|
@app.delete("/api/hosts/{host_id}")
|
|
async def delete_host(host_id: int, api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Supprime un hôte par ID"""
|
|
host = next((h for h in db.hosts if h.id == host_id), None)
|
|
if not host:
|
|
raise HTTPException(status_code=404, detail="Hôte non trouvé")
|
|
|
|
return await delete_host_by_name(host.name, api_key_valid)
|
|
|
|
@app.get("/api/tasks", response_model=List[Task])
|
|
async def get_tasks(api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Récupère la liste de toutes les tâches"""
|
|
return db.tasks
|
|
|
|
@app.post("/api/tasks", response_model=Task)
|
|
async def create_task(task_request: TaskRequest, api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Crée une nouvelle tâche et exécute le playbook Ansible correspondant"""
|
|
task_names = {
|
|
'upgrade': 'Mise à jour système',
|
|
'reboot': 'Redémarrage système',
|
|
'health-check': 'Vérification de santé',
|
|
'backup': 'Sauvegarde',
|
|
'deploy': 'Déploiement',
|
|
'rollback': 'Rollback',
|
|
'maintenance': 'Maintenance',
|
|
'bootstrap': 'Bootstrap Ansible'
|
|
}
|
|
|
|
new_task = Task(
|
|
id=db.get_next_id("tasks"),
|
|
name=task_names.get(task_request.action, f"Tâche {task_request.action}"),
|
|
host=task_request.host or task_request.group or "all",
|
|
status="running",
|
|
progress=0,
|
|
start_time=datetime.now(timezone.utc)
|
|
)
|
|
|
|
db.tasks.append(new_task)
|
|
|
|
# Notifier les clients WebSocket
|
|
await ws_manager.broadcast({
|
|
"type": "task_created",
|
|
"data": new_task.dict()
|
|
})
|
|
|
|
# Exécuter le playbook Ansible en arrière-plan
|
|
playbook = ACTION_PLAYBOOK_MAP.get(task_request.action)
|
|
if playbook:
|
|
asyncio.create_task(execute_ansible_task(
|
|
task_id=new_task.id,
|
|
playbook=playbook,
|
|
target=new_task.host,
|
|
extra_vars=task_request.extra_vars,
|
|
check_mode=task_request.dry_run
|
|
))
|
|
else:
|
|
# Pas de playbook correspondant, simuler
|
|
asyncio.create_task(simulate_task_execution(new_task.id))
|
|
|
|
return new_task
|
|
|
|
|
|
# ===== ENDPOINTS LOGS DE TÂCHES (MARKDOWN) =====
|
|
# IMPORTANT: Ces routes doivent être AVANT /api/tasks/{task_id} pour éviter les conflits
|
|
|
|
@app.get("/api/tasks/logs")
|
|
async def get_task_logs(
|
|
status: Optional[str] = None,
|
|
year: Optional[str] = None,
|
|
month: Optional[str] = None,
|
|
day: Optional[str] = None,
|
|
target: Optional[str] = None,
|
|
category: Optional[str] = None,
|
|
api_key_valid: bool = Depends(verify_api_key)
|
|
):
|
|
"""Récupère les logs de tâches depuis les fichiers markdown avec filtrage"""
|
|
logs = task_log_service.get_task_logs(
|
|
year=year,
|
|
month=month,
|
|
day=day,
|
|
status=status,
|
|
target=target,
|
|
category=category
|
|
)
|
|
return {
|
|
"logs": [log.dict() for log in logs],
|
|
"count": len(logs),
|
|
"filters": {
|
|
"status": status,
|
|
"year": year,
|
|
"month": month,
|
|
"day": day,
|
|
"target": target
|
|
}
|
|
}
|
|
|
|
|
|
@app.get("/api/tasks/logs/dates")
|
|
async def get_task_logs_dates(api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Récupère la structure des dates disponibles pour le filtrage"""
|
|
return task_log_service.get_available_dates()
|
|
|
|
|
|
@app.get("/api/tasks/logs/stats")
|
|
async def get_task_logs_stats(api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Récupère les statistiques des logs de tâches"""
|
|
return task_log_service.get_stats()
|
|
|
|
|
|
@app.get("/api/tasks/logs/{log_id}")
|
|
async def get_task_log_content(log_id: str, api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Récupère le contenu d'un log de tâche spécifique"""
|
|
logs = task_log_service.get_task_logs()
|
|
log = next((l for l in logs if l.id == log_id), None)
|
|
|
|
if not log:
|
|
raise HTTPException(status_code=404, detail="Log non trouvé")
|
|
|
|
try:
|
|
content = Path(log.path).read_text(encoding='utf-8')
|
|
return {
|
|
"log": log.dict(),
|
|
"content": content
|
|
}
|
|
except Exception as e:
|
|
raise HTTPException(status_code=500, detail=f"Erreur lecture du fichier: {str(e)}")
|
|
|
|
|
|
@app.delete("/api/tasks/logs/{log_id}")
|
|
async def delete_task_log(log_id: str, api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Supprime un fichier markdown de log de tâche."""
|
|
logs = task_log_service.get_task_logs()
|
|
log = next((l for l in logs if l.id == log_id), None)
|
|
|
|
if not log:
|
|
raise HTTPException(status_code=404, detail="Log non trouvé")
|
|
|
|
try:
|
|
log_path = Path(log.path)
|
|
if log_path.exists():
|
|
log_path.unlink()
|
|
return {"message": "Log supprimé", "id": log_id}
|
|
except Exception as e:
|
|
raise HTTPException(status_code=500, detail=f"Erreur suppression du fichier: {str(e)}")
|
|
|
|
|
|
@app.get("/api/tasks/running")
|
|
async def get_running_tasks(api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Récupère uniquement les tâches en cours d'exécution (running ou pending)"""
|
|
running_tasks = [t for t in db.tasks if t.status in ("running", "pending")]
|
|
return {
|
|
"tasks": [t.dict() for t in running_tasks],
|
|
"count": len(running_tasks)
|
|
}
|
|
|
|
|
|
@app.get("/api/tasks/{task_id}", response_model=Task)
|
|
async def get_task(task_id: int, api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Récupère une tâche spécifique"""
|
|
task = next((t for t in db.tasks if t.id == task_id), None)
|
|
if not task:
|
|
raise HTTPException(status_code=404, detail="Tâche non trouvée")
|
|
return task
|
|
|
|
@app.delete("/api/tasks/{task_id}")
|
|
async def delete_task(task_id: int, api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Supprime une tâche"""
|
|
task = next((t for t in db.tasks if t.id == task_id), None)
|
|
if not task:
|
|
raise HTTPException(status_code=404, detail="Tâche non trouvée")
|
|
|
|
db.tasks = [t for t in db.tasks if t.id != task_id]
|
|
|
|
# Notifier les clients WebSocket
|
|
await ws_manager.broadcast({
|
|
"type": "task_deleted",
|
|
"data": {"id": task_id}
|
|
})
|
|
|
|
return {"message": "Tâche supprimée avec succès"}
|
|
|
|
@app.get("/api/logs", response_model=List[LogEntry])
|
|
async def get_logs(limit: int = 50, api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Récupère les logs récents"""
|
|
return db.logs[:limit]
|
|
|
|
@app.post("/api/logs")
|
|
async def create_log(log_entry: LogEntry, api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Ajoute un nouvel entrée de log"""
|
|
log_entry.id = db.get_next_id("logs")
|
|
db.logs.insert(0, log_entry)
|
|
|
|
# Garder seulement les 100 derniers logs
|
|
if len(db.logs) > 100:
|
|
db.logs = db.logs[:100]
|
|
|
|
# Notifier les clients WebSocket
|
|
await ws_manager.broadcast({
|
|
"type": "new_log",
|
|
"data": log_entry.dict()
|
|
})
|
|
|
|
return log_entry
|
|
|
|
@app.delete("/api/logs")
|
|
async def clear_logs(api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Efface tous les logs"""
|
|
db.logs = []
|
|
|
|
# Notifier les clients WebSocket
|
|
await ws_manager.broadcast({
|
|
"type": "logs_cleared",
|
|
"data": {}
|
|
})
|
|
|
|
return {"message": "Logs effacés avec succès"}
|
|
|
|
@app.get("/api/metrics", response_model=SystemMetrics)
|
|
async def get_metrics(api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Récupère les métriques système calculées dynamiquement"""
|
|
return db.metrics
|
|
|
|
|
|
@app.post("/api/hosts/refresh")
|
|
async def refresh_hosts(api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Force le rechargement des hôtes depuis l'inventaire Ansible"""
|
|
ansible_service.invalidate_cache() # Clear ansible inventory cache first
|
|
hosts = db.refresh_hosts()
|
|
|
|
# Notifier les clients WebSocket
|
|
await ws_manager.broadcast({
|
|
"type": "hosts_refreshed",
|
|
"data": {"count": len(hosts)}
|
|
})
|
|
|
|
return {"message": f"{len(hosts)} hôtes rechargés depuis l'inventaire Ansible"}
|
|
|
|
|
|
# ===== ENDPOINTS ANSIBLE =====
|
|
|
|
@app.get("/api/ansible/playbooks")
|
|
async def get_ansible_playbooks(api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Liste les playbooks Ansible disponibles avec leurs catégories"""
|
|
return {
|
|
"playbooks": ansible_service.get_playbooks(),
|
|
"categories": ansible_service.get_playbook_categories(),
|
|
"ansible_dir": str(ANSIBLE_DIR)
|
|
}
|
|
|
|
@app.get("/api/ansible/inventory")
|
|
async def get_ansible_inventory(
|
|
group: Optional[str] = None,
|
|
api_key_valid: bool = Depends(verify_api_key)
|
|
):
|
|
"""Récupère l'inventaire Ansible avec les hôtes et groupes.
|
|
|
|
Args:
|
|
group: Filtrer les hôtes par groupe (optionnel)
|
|
"""
|
|
return {
|
|
"hosts": [h.dict() for h in ansible_service.get_hosts_from_inventory(group_filter=group)],
|
|
"groups": ansible_service.get_groups(),
|
|
"inventory_path": str(ansible_service.inventory_path),
|
|
"filter": group
|
|
}
|
|
|
|
@app.post("/api/ansible/execute")
|
|
async def execute_ansible_playbook(
|
|
request: AnsibleExecutionRequest,
|
|
api_key_valid: bool = Depends(verify_api_key)
|
|
):
|
|
"""Exécute un playbook Ansible directement"""
|
|
start_time_dt = datetime.now(timezone.utc)
|
|
|
|
# Créer une tâche pour l'historique
|
|
task_id = db.get_next_id("tasks")
|
|
playbook_name = request.playbook.replace('.yml', '').replace('-', ' ').title()
|
|
task = Task(
|
|
id=task_id,
|
|
name=f"Playbook: {playbook_name}",
|
|
host=request.target,
|
|
status="running",
|
|
progress=0,
|
|
start_time=start_time_dt
|
|
)
|
|
db.tasks.insert(0, task)
|
|
|
|
try:
|
|
result = await ansible_service.execute_playbook(
|
|
playbook=request.playbook,
|
|
target=request.target,
|
|
extra_vars=request.extra_vars,
|
|
check_mode=request.check_mode,
|
|
verbose=request.verbose
|
|
)
|
|
|
|
# Mettre à jour la tâche
|
|
task.status = "completed" if result["success"] else "failed"
|
|
task.progress = 100
|
|
task.end_time = datetime.now(timezone.utc)
|
|
task.duration = f"{result.get('execution_time', 0):.1f}s"
|
|
task.output = result.get("stdout", "")
|
|
task.error = result.get("stderr", "") if not result["success"] else None
|
|
|
|
# Ajouter un log
|
|
log_entry = LogEntry(
|
|
id=db.get_next_id("logs"),
|
|
timestamp=datetime.now(timezone.utc),
|
|
level="INFO" if result["success"] else "ERROR",
|
|
message=f"Playbook {request.playbook} exécuté sur {request.target}: {'succès' if result['success'] else 'échec'}",
|
|
source="ansible",
|
|
host=request.target
|
|
)
|
|
db.logs.insert(0, log_entry)
|
|
|
|
# Sauvegarder le log markdown
|
|
try:
|
|
task_log_service.save_task_log(
|
|
task=task,
|
|
output=result.get("stdout", ""),
|
|
error=result.get("stderr", "")
|
|
)
|
|
except Exception as log_error:
|
|
print(f"Erreur sauvegarde log markdown: {log_error}")
|
|
|
|
await ws_manager.broadcast({
|
|
"type": "ansible_execution",
|
|
"data": result
|
|
})
|
|
|
|
# Ajouter task_id au résultat
|
|
result["task_id"] = task_id
|
|
|
|
return result
|
|
except FileNotFoundError as e:
|
|
task.status = "failed"
|
|
task.end_time = datetime.now(timezone.utc)
|
|
task.error = str(e)
|
|
task_log_service.save_task_log(task=task, error=str(e))
|
|
raise HTTPException(status_code=404, detail=str(e))
|
|
except Exception as e:
|
|
task.status = "failed"
|
|
task.end_time = datetime.now(timezone.utc)
|
|
task.error = str(e)
|
|
task_log_service.save_task_log(task=task, error=str(e))
|
|
raise HTTPException(status_code=500, detail=str(e))
|
|
|
|
@app.get("/api/ansible/groups")
|
|
async def get_ansible_groups(api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Récupère la liste des groupes Ansible"""
|
|
return {"groups": ansible_service.get_groups()}
|
|
|
|
|
|
# ===== ENDPOINTS PLAYBOOKS CRUD =====
|
|
|
|
class PlaybookContentRequest(BaseModel):
|
|
"""Requête pour sauvegarder le contenu d'un playbook"""
|
|
content: str = Field(..., description="Contenu YAML du playbook")
|
|
|
|
|
|
@app.get("/api/playbooks/{filename}/content")
|
|
async def get_playbook_content(
|
|
filename: str,
|
|
api_key_valid: bool = Depends(verify_api_key)
|
|
):
|
|
"""Récupère le contenu d'un playbook"""
|
|
playbook_path = ansible_service.playbooks_dir / filename
|
|
|
|
# Vérifier les extensions valides
|
|
if not filename.endswith(('.yml', '.yaml')):
|
|
raise HTTPException(status_code=400, detail="Extension de fichier invalide. Utilisez .yml ou .yaml")
|
|
|
|
if not playbook_path.exists():
|
|
raise HTTPException(status_code=404, detail=f"Playbook non trouvé: {filename}")
|
|
|
|
# Vérifier que le fichier est bien dans le répertoire playbooks (sécurité)
|
|
try:
|
|
playbook_path.resolve().relative_to(ansible_service.playbooks_dir.resolve())
|
|
except ValueError:
|
|
raise HTTPException(status_code=403, detail="Accès non autorisé")
|
|
|
|
try:
|
|
content = playbook_path.read_text(encoding='utf-8')
|
|
stat = playbook_path.stat()
|
|
return {
|
|
"filename": filename,
|
|
"content": content,
|
|
"size": stat.st_size,
|
|
"modified": datetime.fromtimestamp(stat.st_mtime, tz=timezone.utc).isoformat()
|
|
}
|
|
except Exception as e:
|
|
raise HTTPException(status_code=500, detail=f"Erreur lecture fichier: {str(e)}")
|
|
|
|
|
|
@app.put("/api/playbooks/{filename}/content")
|
|
async def save_playbook_content(
|
|
filename: str,
|
|
request: PlaybookContentRequest,
|
|
api_key_valid: bool = Depends(verify_api_key)
|
|
):
|
|
"""Sauvegarde le contenu d'un playbook (création ou modification)"""
|
|
# Vérifier les extensions valides
|
|
if not filename.endswith(('.yml', '.yaml')):
|
|
raise HTTPException(status_code=400, detail="Extension de fichier invalide. Utilisez .yml ou .yaml")
|
|
|
|
# Valider le nom de fichier (sécurité)
|
|
import re
|
|
if not re.match(r'^[a-zA-Z0-9_-]+\.(yml|yaml)$', filename):
|
|
raise HTTPException(status_code=400, detail="Nom de fichier invalide")
|
|
|
|
playbook_path = ansible_service.playbooks_dir / filename
|
|
|
|
# S'assurer que le répertoire existe
|
|
ansible_service.playbooks_dir.mkdir(parents=True, exist_ok=True)
|
|
|
|
# Valider le contenu YAML
|
|
try:
|
|
parsed = yaml.safe_load(request.content)
|
|
if parsed is None:
|
|
raise HTTPException(status_code=400, detail="Contenu YAML vide ou invalide")
|
|
except yaml.YAMLError as e:
|
|
raise HTTPException(status_code=400, detail=f"Erreur de syntaxe YAML: {str(e)}")
|
|
|
|
is_new = not playbook_path.exists()
|
|
|
|
try:
|
|
playbook_path.write_text(request.content, encoding='utf-8')
|
|
stat = playbook_path.stat()
|
|
|
|
# Log l'action
|
|
action = "créé" if is_new else "modifié"
|
|
log_entry = LogEntry(
|
|
id=db.get_next_id("logs"),
|
|
timestamp=datetime.now(timezone.utc),
|
|
level="INFO",
|
|
message=f"Playbook {filename} {action}",
|
|
source="playbook_editor"
|
|
)
|
|
db.logs.insert(0, log_entry)
|
|
|
|
return {
|
|
"success": True,
|
|
"message": f"Playbook {filename} {'créé' if is_new else 'sauvegardé'} avec succès",
|
|
"filename": filename,
|
|
"size": stat.st_size,
|
|
"modified": datetime.fromtimestamp(stat.st_mtime, tz=timezone.utc).isoformat(),
|
|
"is_new": is_new
|
|
}
|
|
except Exception as e:
|
|
raise HTTPException(status_code=500, detail=f"Erreur sauvegarde fichier: {str(e)}")
|
|
|
|
|
|
@app.delete("/api/playbooks/{filename}")
|
|
async def delete_playbook(
|
|
filename: str,
|
|
api_key_valid: bool = Depends(verify_api_key)
|
|
):
|
|
"""Supprime un playbook"""
|
|
# Vérifier les extensions valides
|
|
if not filename.endswith(('.yml', '.yaml')):
|
|
raise HTTPException(status_code=400, detail="Extension de fichier invalide")
|
|
|
|
playbook_path = ansible_service.playbooks_dir / filename
|
|
|
|
if not playbook_path.exists():
|
|
raise HTTPException(status_code=404, detail=f"Playbook non trouvé: {filename}")
|
|
|
|
# Vérifier que le fichier est bien dans le répertoire playbooks (sécurité)
|
|
try:
|
|
playbook_path.resolve().relative_to(ansible_service.playbooks_dir.resolve())
|
|
except ValueError:
|
|
raise HTTPException(status_code=403, detail="Accès non autorisé")
|
|
|
|
try:
|
|
playbook_path.unlink()
|
|
|
|
# Log l'action
|
|
log_entry = LogEntry(
|
|
id=db.get_next_id("logs"),
|
|
timestamp=datetime.now(timezone.utc),
|
|
level="WARN",
|
|
message=f"Playbook {filename} supprimé",
|
|
source="playbook_editor"
|
|
)
|
|
db.logs.insert(0, log_entry)
|
|
|
|
return {
|
|
"success": True,
|
|
"message": f"Playbook {filename} supprimé avec succès"
|
|
}
|
|
except Exception as e:
|
|
raise HTTPException(status_code=500, detail=f"Erreur suppression fichier: {str(e)}")
|
|
|
|
|
|
@app.get("/api/ansible/ssh-config")
|
|
async def get_ssh_config(api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Diagnostic de la configuration SSH pour le bootstrap"""
|
|
ssh_key_path = Path(SSH_KEY_PATH)
|
|
ssh_dir = ssh_key_path.parent
|
|
|
|
# Lister les fichiers dans le répertoire SSH
|
|
available_files = []
|
|
if ssh_dir.exists():
|
|
available_files = [f.name for f in ssh_dir.iterdir()]
|
|
|
|
# Vérifier les clés
|
|
private_key_exists = ssh_key_path.exists()
|
|
public_key_exists = Path(SSH_KEY_PATH + ".pub").exists()
|
|
|
|
# Chercher d'autres clés publiques
|
|
pub_keys_found = []
|
|
for ext in [".pub"]:
|
|
for key_type in ["id_rsa", "id_ed25519", "id_ecdsa", "id_dsa"]:
|
|
key_path = ssh_dir / f"{key_type}{ext}"
|
|
if key_path.exists():
|
|
pub_keys_found.append(str(key_path))
|
|
|
|
# Trouver la clé privée qui sera utilisée
|
|
active_private_key = find_ssh_private_key()
|
|
|
|
return {
|
|
"ssh_key_path": SSH_KEY_PATH,
|
|
"ssh_dir": str(ssh_dir),
|
|
"ssh_dir_exists": ssh_dir.exists(),
|
|
"private_key_exists": private_key_exists,
|
|
"public_key_exists": public_key_exists,
|
|
"available_files": available_files,
|
|
"public_keys_found": pub_keys_found,
|
|
"active_private_key": active_private_key,
|
|
"ssh_user": SSH_USER,
|
|
"sshpass_available": shutil.which("sshpass") is not None,
|
|
}
|
|
|
|
|
|
@app.post("/api/ansible/adhoc", response_model=AdHocCommandResult)
|
|
async def execute_adhoc_command(
|
|
request: AdHocCommandRequest,
|
|
api_key_valid: bool = Depends(verify_api_key)
|
|
):
|
|
"""Exécute une commande ad-hoc Ansible sur un ou plusieurs hôtes.
|
|
|
|
Exemples:
|
|
- Lister les fichiers: {"target": "all", "command": "ls -la /tmp"}
|
|
- Vérifier l'espace disque: {"target": "proxmox", "command": "df -h", "become": true}
|
|
- Redémarrer un service: {"target": "web-servers", "command": "systemctl restart nginx", "become": true}
|
|
"""
|
|
start_time_perf = perf_counter()
|
|
start_time_dt = datetime.now(timezone.utc)
|
|
|
|
# Créer une tâche pour l'historique
|
|
task_id = db.get_next_id("tasks")
|
|
task_name = f"Ad-hoc: {request.command[:40]}{'...' if len(request.command) > 40 else ''}"
|
|
task = Task(
|
|
id=task_id,
|
|
name=task_name,
|
|
host=request.target,
|
|
status="running",
|
|
progress=0,
|
|
start_time=start_time_dt
|
|
)
|
|
db.tasks.insert(0, task)
|
|
|
|
# Construire la commande ansible
|
|
ansible_cmd = [
|
|
"ansible",
|
|
request.target,
|
|
"-i", str(ANSIBLE_DIR / "inventory" / "hosts.yml"),
|
|
"-m", request.module,
|
|
"-a", request.command,
|
|
"--timeout", str(request.timeout),
|
|
]
|
|
|
|
# Ajouter les options
|
|
if request.become:
|
|
ansible_cmd.append("--become")
|
|
|
|
private_key = find_ssh_private_key()
|
|
if private_key:
|
|
ansible_cmd.extend(["--private-key", private_key])
|
|
|
|
if SSH_USER:
|
|
ansible_cmd.extend(["-u", SSH_USER])
|
|
|
|
try:
|
|
result = subprocess.run(
|
|
ansible_cmd,
|
|
capture_output=True,
|
|
text=True,
|
|
timeout=request.timeout + 10,
|
|
cwd=str(ANSIBLE_DIR)
|
|
)
|
|
|
|
duration = perf_counter() - start_time_perf
|
|
success = result.returncode == 0
|
|
|
|
# Mettre à jour la tâche
|
|
task.status = "completed" if success else "failed"
|
|
task.progress = 100
|
|
task.end_time = datetime.now(timezone.utc)
|
|
task.duration = f"{round(duration, 2)}s"
|
|
task.output = result.stdout
|
|
task.error = result.stderr if result.stderr else None
|
|
|
|
# Sauvegarder le log de tâche en markdown
|
|
task_log_service.save_task_log(task, output=result.stdout, error=result.stderr or "")
|
|
|
|
# Log de l'exécution
|
|
log_entry = LogEntry(
|
|
id=db.get_next_id("logs"),
|
|
timestamp=datetime.now(timezone.utc),
|
|
level="INFO" if success else "WARN",
|
|
message=f"Ad-hoc [{request.module}] sur {request.target}: {request.command[:50]}{'...' if len(request.command) > 50 else ''}",
|
|
source="ansible-adhoc",
|
|
host=request.target
|
|
)
|
|
db.logs.insert(0, log_entry)
|
|
|
|
# Notifier via WebSocket
|
|
await ws_manager.broadcast({
|
|
"type": "adhoc_executed",
|
|
"data": {
|
|
"target": request.target,
|
|
"command": request.command,
|
|
"success": success,
|
|
"task_id": task_id
|
|
}
|
|
})
|
|
|
|
# Sauvegarder dans l'historique des commandes ad-hoc (pour réutilisation)
|
|
adhoc_history_service.add_command(
|
|
command=request.command,
|
|
target=request.target,
|
|
module=request.module,
|
|
become=request.become
|
|
)
|
|
|
|
return AdHocCommandResult(
|
|
target=request.target,
|
|
command=request.command,
|
|
success=success,
|
|
return_code=result.returncode,
|
|
stdout=result.stdout,
|
|
stderr=result.stderr if result.stderr else None,
|
|
duration=round(duration, 2)
|
|
)
|
|
|
|
except subprocess.TimeoutExpired:
|
|
duration = perf_counter() - start_time_perf
|
|
# Mettre à jour la tâche en échec
|
|
task.status = "failed"
|
|
task.progress = 100
|
|
task.end_time = datetime.now(timezone.utc)
|
|
task.duration = f"{round(duration, 2)}s"
|
|
task.error = f"Timeout après {request.timeout} secondes"
|
|
|
|
# Sauvegarder le log de tâche
|
|
task_log_service.save_task_log(task, error=task.error)
|
|
|
|
return AdHocCommandResult(
|
|
target=request.target,
|
|
command=request.command,
|
|
success=False,
|
|
return_code=-1,
|
|
stdout="",
|
|
stderr=f"Timeout après {request.timeout} secondes",
|
|
duration=round(duration, 2)
|
|
)
|
|
except FileNotFoundError:
|
|
duration = perf_counter() - start_time_perf
|
|
error_msg = "ansible non trouvé. Vérifiez que Ansible est installé et accessible."
|
|
# Mettre à jour la tâche en échec
|
|
task.status = "failed"
|
|
task.progress = 100
|
|
task.end_time = datetime.now(timezone.utc)
|
|
task.duration = f"{round(duration, 2)}s"
|
|
task.error = error_msg
|
|
|
|
# Sauvegarder le log de tâche
|
|
task_log_service.save_task_log(task, error=error_msg)
|
|
|
|
return AdHocCommandResult(
|
|
target=request.target,
|
|
command=request.command,
|
|
success=False,
|
|
return_code=-1,
|
|
stdout="",
|
|
stderr=error_msg,
|
|
duration=round(duration, 2)
|
|
)
|
|
except Exception as e:
|
|
duration = perf_counter() - start_time_perf
|
|
error_msg = f"Erreur interne: {str(e)}"
|
|
# Mettre à jour la tâche en échec
|
|
task.status = "failed"
|
|
task.progress = 100
|
|
task.end_time = datetime.now(timezone.utc)
|
|
task.duration = f"{round(duration, 2)}s"
|
|
task.error = error_msg
|
|
|
|
# Sauvegarder le log de tâche
|
|
task_log_service.save_task_log(task, error=error_msg)
|
|
|
|
# Return a proper result instead of raising HTTP 500
|
|
return AdHocCommandResult(
|
|
target=request.target,
|
|
command=request.command,
|
|
success=False,
|
|
return_code=-1,
|
|
stdout="",
|
|
stderr=error_msg,
|
|
duration=round(duration, 2)
|
|
)
|
|
|
|
|
|
@app.post("/api/ansible/bootstrap", response_model=CommandResult)
|
|
async def bootstrap_ansible_host(
|
|
request: BootstrapRequest,
|
|
api_key_valid: bool = Depends(verify_api_key)
|
|
):
|
|
"""Bootstrap un hôte pour Ansible.
|
|
|
|
Cette opération:
|
|
1. Se connecte à l'hôte via SSH avec le mot de passe root
|
|
2. Crée l'utilisateur d'automatisation (par défaut: automation)
|
|
3. Configure la clé SSH publique pour l'authentification sans mot de passe
|
|
4. Installe et configure sudo pour cet utilisateur
|
|
5. Installe Python3 (requis par Ansible)
|
|
6. Vérifie la connexion SSH par clé
|
|
|
|
Supporte: Debian/Ubuntu, Alpine Linux, FreeBSD
|
|
"""
|
|
import logging
|
|
import traceback
|
|
logger = logging.getLogger("bootstrap_endpoint")
|
|
|
|
try:
|
|
logger.info(f"Bootstrap request for host={request.host}, user={request.automation_user}")
|
|
result = bootstrap_host(
|
|
host=request.host,
|
|
root_password=request.root_password,
|
|
automation_user=request.automation_user
|
|
)
|
|
logger.info(f"Bootstrap result: status={result.status}, return_code={result.return_code}")
|
|
|
|
# Si le bootstrap a échoué (return_code != 0), lever une exception avec les détails
|
|
if result.return_code != 0:
|
|
raise HTTPException(
|
|
status_code=500,
|
|
detail={
|
|
"status": result.status,
|
|
"return_code": result.return_code,
|
|
"stdout": result.stdout,
|
|
"stderr": result.stderr
|
|
}
|
|
)
|
|
|
|
# Trouver le nom de l'hôte (peut être IP ou hostname)
|
|
host_name = request.host
|
|
for h in db.hosts:
|
|
if h.ip == request.host or h.name == request.host:
|
|
host_name = h.name
|
|
break
|
|
|
|
# Enregistrer le statut de bootstrap réussi
|
|
bootstrap_status_service.set_bootstrap_status(
|
|
host_name=host_name,
|
|
success=True,
|
|
details=f"Bootstrap réussi via API (user: {request.automation_user})"
|
|
)
|
|
|
|
# Invalider le cache des hôtes pour recharger avec le nouveau statut
|
|
db._hosts_cache = None
|
|
|
|
# Ajouter un log de succès
|
|
log_entry = LogEntry(
|
|
id=db.get_next_id("logs"),
|
|
timestamp=datetime.now(timezone.utc),
|
|
level="INFO",
|
|
message=f"Bootstrap réussi pour {host_name} (user: {request.automation_user})",
|
|
source="bootstrap",
|
|
host=host_name
|
|
)
|
|
db.logs.insert(0, log_entry)
|
|
|
|
# Notifier via WebSocket
|
|
await ws_manager.broadcast({
|
|
"type": "bootstrap_success",
|
|
"data": {
|
|
"host": host_name,
|
|
"user": request.automation_user,
|
|
"status": "ok",
|
|
"bootstrap_ok": True
|
|
}
|
|
})
|
|
|
|
return result
|
|
|
|
except HTTPException:
|
|
raise
|
|
except Exception as e:
|
|
logger.error(f"Bootstrap exception: {e}")
|
|
logger.error(traceback.format_exc())
|
|
# Ajouter un log d'erreur
|
|
log_entry = LogEntry(
|
|
id=db.get_next_id("logs"),
|
|
timestamp=datetime.now(timezone.utc),
|
|
level="ERROR",
|
|
message=f"Échec bootstrap pour {request.host}: {str(e)}",
|
|
source="bootstrap",
|
|
host=request.host
|
|
)
|
|
db.logs.insert(0, log_entry)
|
|
|
|
raise HTTPException(status_code=500, detail=str(e))
|
|
|
|
|
|
@app.get("/api/health")
|
|
async def global_health_check():
|
|
"""Endpoint de healthcheck global utilisé par Docker.
|
|
|
|
Ne nécessite pas de clé API pour permettre aux orchestrateurs
|
|
de vérifier l'état du service facilement.
|
|
"""
|
|
return {
|
|
"status": "ok",
|
|
"service": "homelab-automation-api",
|
|
"timestamp": datetime.now(timezone.utc).isoformat()
|
|
}
|
|
|
|
|
|
# ===== ENDPOINTS BOOTSTRAP STATUS =====
|
|
|
|
@app.get("/api/bootstrap/status")
|
|
async def get_all_bootstrap_status(api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Récupère le statut de bootstrap de tous les hôtes"""
|
|
return {
|
|
"hosts": bootstrap_status_service.get_all_status()
|
|
}
|
|
|
|
|
|
@app.get("/api/bootstrap/status/{host_name}")
|
|
async def get_host_bootstrap_status(
|
|
host_name: str,
|
|
api_key_valid: bool = Depends(verify_api_key)
|
|
):
|
|
"""Récupère le statut de bootstrap d'un hôte spécifique"""
|
|
status = bootstrap_status_service.get_bootstrap_status(host_name)
|
|
return {
|
|
"host": host_name,
|
|
**status
|
|
}
|
|
|
|
|
|
@app.post("/api/bootstrap/status/{host_name}")
|
|
async def set_host_bootstrap_status(
|
|
host_name: str,
|
|
success: bool = True,
|
|
details: Optional[str] = None,
|
|
api_key_valid: bool = Depends(verify_api_key)
|
|
):
|
|
"""Définit manuellement le statut de bootstrap d'un hôte"""
|
|
result = bootstrap_status_service.set_bootstrap_status(
|
|
host_name=host_name,
|
|
success=success,
|
|
details=details or f"Status défini manuellement"
|
|
)
|
|
|
|
# Invalider le cache des hôtes
|
|
db._hosts_cache = None
|
|
|
|
# Notifier via WebSocket
|
|
await ws_manager.broadcast({
|
|
"type": "bootstrap_status_updated",
|
|
"data": {
|
|
"host": host_name,
|
|
"bootstrap_ok": success
|
|
}
|
|
})
|
|
|
|
return {
|
|
"host": host_name,
|
|
"status": "updated",
|
|
**result
|
|
}
|
|
|
|
|
|
# ===== ENDPOINTS HISTORIQUE AD-HOC =====
|
|
|
|
@app.get("/api/adhoc/history")
|
|
async def get_adhoc_history(
|
|
category: Optional[str] = None,
|
|
search: Optional[str] = None,
|
|
limit: int = 50,
|
|
api_key_valid: bool = Depends(verify_api_key)
|
|
):
|
|
"""Récupère l'historique des commandes ad-hoc"""
|
|
commands = adhoc_history_service.get_commands(
|
|
category=category,
|
|
search=search,
|
|
limit=limit
|
|
)
|
|
return {
|
|
"commands": [cmd.dict() for cmd in commands],
|
|
"count": len(commands)
|
|
}
|
|
|
|
|
|
@app.get("/api/adhoc/categories")
|
|
async def get_adhoc_categories(api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Récupère la liste des catégories de commandes ad-hoc"""
|
|
categories = adhoc_history_service.get_categories()
|
|
return {"categories": [cat.dict() for cat in categories]}
|
|
|
|
|
|
@app.post("/api/adhoc/categories")
|
|
async def create_adhoc_category(
|
|
name: str,
|
|
description: Optional[str] = None,
|
|
color: str = "#7c3aed",
|
|
icon: str = "fa-folder",
|
|
api_key_valid: bool = Depends(verify_api_key)
|
|
):
|
|
"""Crée une nouvelle catégorie de commandes ad-hoc"""
|
|
category = adhoc_history_service.add_category(name, description, color, icon)
|
|
return {"category": category.dict(), "message": "Catégorie créée"}
|
|
|
|
|
|
@app.put("/api/adhoc/categories/{category_name}")
|
|
async def update_adhoc_category(
|
|
category_name: str,
|
|
request: Request,
|
|
api_key_valid: bool = Depends(verify_api_key)
|
|
):
|
|
"""Met à jour une catégorie existante"""
|
|
try:
|
|
data = await request.json()
|
|
new_name = data.get("name", category_name)
|
|
description = data.get("description", "")
|
|
color = data.get("color", "#7c3aed")
|
|
icon = data.get("icon", "fa-folder")
|
|
|
|
success = adhoc_history_service.update_category(category_name, new_name, description, color, icon)
|
|
if not success:
|
|
raise HTTPException(status_code=404, detail="Catégorie non trouvée")
|
|
return {"message": "Catégorie mise à jour", "category": new_name}
|
|
except Exception as e:
|
|
raise HTTPException(status_code=400, detail=str(e))
|
|
|
|
|
|
@app.delete("/api/adhoc/categories/{category_name}")
|
|
async def delete_adhoc_category(
|
|
category_name: str,
|
|
api_key_valid: bool = Depends(verify_api_key)
|
|
):
|
|
"""Supprime une catégorie et déplace ses commandes vers 'default'"""
|
|
if category_name == "default":
|
|
raise HTTPException(status_code=400, detail="La catégorie 'default' ne peut pas être supprimée")
|
|
|
|
success = adhoc_history_service.delete_category(category_name)
|
|
if not success:
|
|
raise HTTPException(status_code=404, detail="Catégorie non trouvée")
|
|
return {"message": "Catégorie supprimée", "category": category_name}
|
|
|
|
|
|
@app.put("/api/adhoc/history/{command_id}/category")
|
|
async def update_adhoc_command_category(
|
|
command_id: str,
|
|
category: str,
|
|
description: Optional[str] = None,
|
|
api_key_valid: bool = Depends(verify_api_key)
|
|
):
|
|
"""Met à jour la catégorie d'une commande dans l'historique"""
|
|
success = adhoc_history_service.update_command_category(command_id, category, description)
|
|
if not success:
|
|
raise HTTPException(status_code=404, detail="Commande non trouvée")
|
|
return {"message": "Catégorie mise à jour", "command_id": command_id, "category": category}
|
|
|
|
|
|
@app.delete("/api/adhoc/history/{command_id}")
|
|
async def delete_adhoc_command(command_id: str, api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Supprime une commande de l'historique"""
|
|
success = adhoc_history_service.delete_command(command_id)
|
|
if not success:
|
|
raise HTTPException(status_code=404, detail="Commande non trouvée")
|
|
return {"message": "Commande supprimée", "command_id": command_id}
|
|
|
|
|
|
@app.get("/api/health/{host_name}", response_model=HealthCheck)
|
|
async def check_host_health(host_name: str, api_key_valid: bool = Depends(verify_api_key)):
|
|
"""Effectue un health check sur un hôte spécifique et met à jour son last_seen"""
|
|
host = next((h for h in db.hosts if h.name == host_name), None)
|
|
if not host:
|
|
raise HTTPException(status_code=404, detail="Hôte non trouvé")
|
|
|
|
# Simuler un health check à partir du statut actuel
|
|
health_check = HealthCheck(
|
|
host=host_name,
|
|
ssh_ok=host.status == "online",
|
|
ansible_ok=host.status == "online",
|
|
sudo_ok=host.status == "online",
|
|
reachable=host.status != "offline",
|
|
response_time=0.123 if host.status == "online" else None,
|
|
error_message=None if host.status != "offline" else "Hôte injoignable"
|
|
)
|
|
|
|
# Mettre à jour le statut runtime + persistant
|
|
new_status = "online" if health_check.reachable else "offline"
|
|
db.update_host_status(host_name, new_status, host.os)
|
|
|
|
# Ajouter un log pour le health check
|
|
log_entry = LogEntry(
|
|
timestamp=datetime.now(timezone.utc),
|
|
level="INFO" if health_check.reachable else "ERROR",
|
|
message=f"Health check {'réussi' if health_check.reachable else 'échoué'} pour {host_name}",
|
|
source="health_check",
|
|
host=host_name
|
|
)
|
|
|
|
db.logs.insert(0, log_entry)
|
|
|
|
# Notifier les clients WebSocket
|
|
await ws_manager.broadcast({
|
|
"type": "health_check",
|
|
"data": health_check.dict()
|
|
})
|
|
|
|
return health_check
|
|
|
|
# WebSocket pour les mises à jour en temps réel
|
|
@app.websocket("/ws")
|
|
async def websocket_endpoint(websocket: WebSocket):
|
|
await ws_manager.connect(websocket)
|
|
try:
|
|
while True:
|
|
# Garder la connexion ouverte
|
|
data = await websocket.receive_text()
|
|
# Traiter les messages entrants si nécessaire
|
|
except WebSocketDisconnect:
|
|
ws_manager.disconnect(websocket)
|
|
|
|
# Fonctions utilitaires
|
|
async def simulate_task_execution(task_id: int):
|
|
"""Simule l'exécution d'une tâche en arrière-plan"""
|
|
task = next((t for t in db.tasks if t.id == task_id), None)
|
|
if not task:
|
|
return
|
|
|
|
# Simuler la progression
|
|
for progress in range(0, 101, 10):
|
|
task.progress = progress
|
|
|
|
# Notifier les clients WebSocket
|
|
await ws_manager.broadcast({
|
|
"type": "task_progress",
|
|
"data": {
|
|
"id": task_id,
|
|
"progress": progress
|
|
}
|
|
})
|
|
|
|
await asyncio.sleep(0.5) # Attendre 500ms entre chaque mise à jour
|
|
|
|
# Marquer la tâche comme terminée
|
|
task.status = "completed"
|
|
task.end_time = datetime.now(timezone.utc)
|
|
task.duration = "2m 30s"
|
|
|
|
# Ajouter un log
|
|
log_entry = LogEntry(
|
|
timestamp=datetime.now(timezone.utc),
|
|
level="INFO",
|
|
message=f"Tâche '{task.name}' terminée avec succès sur {task.host}",
|
|
source="task_manager",
|
|
host=task.host
|
|
)
|
|
db.logs.insert(0, log_entry)
|
|
|
|
# Notifier les clients WebSocket
|
|
await ws_manager.broadcast({
|
|
"type": "task_completed",
|
|
"data": {
|
|
"id": task_id,
|
|
"status": "completed",
|
|
"progress": 100
|
|
}
|
|
})
|
|
|
|
# Sauvegarder le log markdown
|
|
try:
|
|
task_log_service.save_task_log(task=task, output="Tâche simulée terminée avec succès")
|
|
except Exception as log_error:
|
|
print(f"Erreur sauvegarde log markdown: {log_error}")
|
|
|
|
|
|
async def execute_ansible_task(
|
|
task_id: int,
|
|
playbook: str,
|
|
target: str,
|
|
extra_vars: Optional[Dict[str, Any]] = None,
|
|
check_mode: bool = False
|
|
):
|
|
"""Exécute un playbook Ansible pour une tâche"""
|
|
task = next((t for t in db.tasks if t.id == task_id), None)
|
|
if not task:
|
|
return
|
|
|
|
# Notifier le début
|
|
task.progress = 10
|
|
await ws_manager.broadcast({
|
|
"type": "task_progress",
|
|
"data": {"id": task_id, "progress": 10, "message": "Démarrage du playbook Ansible..."}
|
|
})
|
|
|
|
start_time = perf_counter()
|
|
|
|
try:
|
|
# Exécuter le playbook
|
|
result = await ansible_service.execute_playbook(
|
|
playbook=playbook,
|
|
target=target,
|
|
extra_vars=extra_vars,
|
|
check_mode=check_mode,
|
|
verbose=True
|
|
)
|
|
|
|
execution_time = perf_counter() - start_time
|
|
|
|
# Mettre à jour la tâche
|
|
task.progress = 100
|
|
task.status = "completed" if result["success"] else "failed"
|
|
task.end_time = datetime.now(timezone.utc)
|
|
task.duration = f"{execution_time:.1f}s"
|
|
task.output = result.get("stdout", "")
|
|
task.error = result.get("stderr", "") if not result["success"] else None
|
|
|
|
# Si c'est un health-check ciblé, mettre à jour le statut/last_seen de l'hôte
|
|
if "health-check" in playbook and target and target != "all":
|
|
try:
|
|
new_status = "online" if result["success"] else "offline"
|
|
db.update_host_status(target, new_status)
|
|
except Exception:
|
|
# Ne pas interrompre la gestion de la tâche si la MAJ de statut échoue
|
|
pass
|
|
|
|
# Ajouter un log
|
|
log_entry = LogEntry(
|
|
id=db.get_next_id("logs"),
|
|
timestamp=datetime.now(timezone.utc),
|
|
level="INFO" if result["success"] else "ERROR",
|
|
message=f"Tâche '{task.name}' {'terminée avec succès' if result['success'] else 'échouée'} sur {target}",
|
|
source="ansible",
|
|
host=target
|
|
)
|
|
db.logs.insert(0, log_entry)
|
|
|
|
# Notifier les clients WebSocket
|
|
await ws_manager.broadcast({
|
|
"type": "task_completed",
|
|
"data": {
|
|
"id": task_id,
|
|
"status": task.status,
|
|
"progress": 100,
|
|
"duration": task.duration,
|
|
"success": result["success"],
|
|
"output": result.get("stdout", "")[:500] # Limiter la taille
|
|
}
|
|
})
|
|
|
|
# Sauvegarder le log markdown
|
|
try:
|
|
log_path = task_log_service.save_task_log(
|
|
task=task,
|
|
output=result.get("stdout", ""),
|
|
error=result.get("stderr", "")
|
|
)
|
|
log_entry = LogEntry(
|
|
id=db.get_next_id("logs"),
|
|
timestamp=datetime.now(timezone.utc),
|
|
level="DEBUG",
|
|
message=f"Log de tâche sauvegardé: {log_path}",
|
|
source="task_log",
|
|
host=target
|
|
)
|
|
db.logs.insert(0, log_entry)
|
|
except Exception as log_error:
|
|
print(f"Erreur sauvegarde log markdown: {log_error}")
|
|
|
|
except Exception as e:
|
|
task.status = "failed"
|
|
task.end_time = datetime.now(timezone.utc)
|
|
task.error = str(e)
|
|
|
|
log_entry = LogEntry(
|
|
id=db.get_next_id("logs"),
|
|
timestamp=datetime.now(timezone.utc),
|
|
level="ERROR",
|
|
message=f"Erreur lors de l'exécution de '{task.name}': {str(e)}",
|
|
source="ansible",
|
|
host=target
|
|
)
|
|
db.logs.insert(0, log_entry)
|
|
|
|
# Sauvegarder le log markdown même en cas d'échec
|
|
try:
|
|
task_log_service.save_task_log(task=task, error=str(e))
|
|
except Exception:
|
|
pass
|
|
|
|
await ws_manager.broadcast({
|
|
"type": "task_failed",
|
|
"data": {
|
|
"id": task_id,
|
|
"status": "failed",
|
|
"error": str(e)
|
|
}
|
|
})
|
|
|
|
|
|
# Démarrer l'application
|
|
if __name__ == "__main__":
|
|
uvicorn.run(
|
|
"app_optimized:app",
|
|
host="0.0.0.0",
|
|
port=8008,
|
|
reload=True,
|
|
log_level="info"
|
|
) |