diff --git a/Dockerfile b/Dockerfile index 93461dc..28bd7d4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -39,11 +39,16 @@ COPY app/ ./ COPY ansible/ /ansible/ # Création du répertoire pour les clés SSH (sera monté en volume) -RUN mkdir -p /app/ssh_keys +RUN mkdir -p /app/ssh_keys /app/data # Configuration Ansible pour utiliser le bon répertoire ENV ANSIBLE_CONFIG=/ansible/ansible.cfg +# Variables par défaut pour la base SQLite dans le conteneur +ENV HOMELAB_DATA_DIR=/app/data +ENV DB_PATH=/app/data/homelab.db +ENV DATABASE_URL=sqlite+aiosqlite:////app/data/homelab.db + # Exposition du port EXPOSE 8000 diff --git a/README.md b/README.md index 07070de..760a5ed 100644 --- a/README.md +++ b/README.md @@ -365,16 +365,56 @@ ANSIBLE_DIR=/etc/ansible ### Base de Données -Par défaut, l'application utilise une base de données en mémoire. Pour une utilisation en production, configurez PostgreSQL ou SQLite en modifiant la classe `InMemoryDB`. +L'application utilise **SQLite** avec **SQLAlchemy 2.x async** pour le stockage persistant des données (hôtes, tâches, schedules, logs). La base est créée automatiquement au démarrage dans `data/homelab.db`. + +#### Migration depuis les fichiers JSON + +Si vous migrez depuis une version antérieure utilisant des fichiers JSON : + +```bash +# 1. Installer les nouvelles dépendances +pip install -r app/requirements.txt + +# 2. Exécuter le script de migration +python migrate_json_to_sqlite.py +``` + +Le script : +- Importe les données depuis `ansible/.host_status.json`, `tasks_logs/.bootstrap_status.json`, etc. +- Crée des sauvegardes `.bak` des fichiers JSON originaux +- Génère un rapport de migration + +#### Structure de la base de données + +``` +data/homelab.db +├── hosts # Inventaire des hôtes +├── bootstrap_status # Statut bootstrap SSH par hôte +├── tasks # Historique des tâches exécutées +├── schedules # Planifications récurrentes +├── schedule_runs # Historique des exécutions de schedules +└── logs # Logs système +``` + +#### Migrations Alembic + +Pour les évolutions de schéma : + +```bash +# Appliquer les migrations +alembic upgrade head + +# Créer une nouvelle migration +alembic revision --autogenerate -m "description" +``` ## 🚀 Déploiement ### Production 1. **Configuration de la base de données** - ```python - # Remplacer InMemoryDB par une vraie base de données - ``` + - Par défaut : SQLite dans `data/homelab.db` + - Variable d'environnement : `DATABASE_URL=sqlite+aiosqlite:///./data/homelab.db` 2. **Sécurité** - Utilisez une clé API forte diff --git a/alembic.ini b/alembic.ini new file mode 100644 index 0000000..8b326ce --- /dev/null +++ b/alembic.ini @@ -0,0 +1,35 @@ +[alembic] +script_location = alembic +sqlalchemy.url = sqlite+aiosqlite:///./data/homelab.db + +[loggers] +keys = root,sqlalchemy,alembic + +[handlers] +keys = console + +[formatters] +keys = generic + +[logger_root] +level = WARN +handlers = console + +[logger_sqlalchemy] +level = WARN +handlers = console +qualname = sqlalchemy.engine + +[logger_alembic] +level = INFO +handlers = console +qualname = alembic + +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic + +[formatter_generic] +format = %(levelname)-5.5s [%(name)s] %(message)s diff --git a/alembic/env.py b/alembic/env.py new file mode 100644 index 0000000..a9a6c4f --- /dev/null +++ b/alembic/env.py @@ -0,0 +1,91 @@ +from __future__ import annotations + +import asyncio +from logging.config import fileConfig +import os +from pathlib import Path + +from sqlalchemy import pool +from sqlalchemy.engine import Connection +from sqlalchemy.ext.asyncio import async_engine_from_config + +from alembic import context + +# Add project root to sys.path for module imports +import sys +ROOT_DIR = Path(__file__).resolve().parents[1] +if str(ROOT_DIR) not in sys.path: + sys.path.insert(0, str(ROOT_DIR)) + +from app.models.database import Base, metadata_obj, DATABASE_URL # noqa: E402 + +# this is the Alembic Config object, which provides +# access to the values within the .ini file in use. +config = context.config + +# Interpret the config file for Python logging. +if config.config_file_name is not None: + fileConfig(config.config_file_name) + +# Override sqlalchemy.url from environment if provided +url_from_env = os.environ.get("DATABASE_URL") +if url_from_env: + config.set_main_option("sqlalchemy.url", url_from_env) +else: + config.set_main_option("sqlalchemy.url", str(DATABASE_URL)) + +# add your model's MetaData object here +# for 'autogenerate' support +# from myapp import mymodel +# target_metadata = mymodel.Base.metadata +target_metadata = metadata_obj + + +def run_migrations_offline() -> None: + """Run migrations in 'offline' mode.""" + url = config.get_main_option("sqlalchemy.url") + context.configure( + url=url, + target_metadata=target_metadata, + literal_binds=True, + dialect_opts={"paramstyle": "named"}, + compare_type=True, + ) + + with context.begin_transaction(): + context.run_migrations() + + +def do_run_migrations(connection: Connection) -> None: + context.configure( + connection=connection, + target_metadata=target_metadata, + compare_type=True, + ) + + with context.begin_transaction(): + context.run_migrations() + + +def run_migrations_online() -> None: + """Run migrations in 'online' mode.""" + + connectable = async_engine_from_config( + config.get_section(config.config_ini_section, {}), + prefix="sqlalchemy.", + poolclass=pool.NullPool, + future=True, + ) + + async def async_main() -> None: + async with connectable.connect() as connection: + await connection.run_sync(do_run_migrations) + await connectable.dispose() + + asyncio.run(async_main()) + + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/alembic/versions/0001_initial_schema.py b/alembic/versions/0001_initial_schema.py new file mode 100644 index 0000000..2982d37 --- /dev/null +++ b/alembic/versions/0001_initial_schema.py @@ -0,0 +1,122 @@ +"""Initial database schema for Homelab Automation + +Revision ID: 0001_initial +Revises: +Create Date: 2025-12-04 +""" + +from __future__ import annotations + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = "0001_initial" +down_revision = None +branch_labels = None +depends_on = None + + +def upgrade() -> None: + op.create_table( + "hosts", + sa.Column("id", sa.String(), primary_key=True), + sa.Column("name", sa.String(), nullable=False), + sa.Column("ip_address", sa.String(), nullable=False, unique=True), + sa.Column("status", sa.String(), nullable=False, server_default=sa.text("'unknown'")), + sa.Column("ansible_group", sa.String(), nullable=True), + sa.Column("last_seen", sa.DateTime(timezone=True), nullable=True), + sa.Column("reachable", sa.Boolean(), nullable=False, server_default=sa.text("0")), + sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False), + sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False), + sa.Column("deleted_at", sa.DateTime(timezone=True), nullable=True), + ) + + op.create_table( + "bootstrap_status", + sa.Column("id", sa.Integer(), primary_key=True, autoincrement=True), + sa.Column("host_id", sa.String(), sa.ForeignKey("hosts.id", ondelete="CASCADE"), nullable=False), + sa.Column("status", sa.String(), nullable=False), + sa.Column("automation_user", sa.String(), nullable=True), + sa.Column("last_attempt", sa.DateTime(timezone=True), nullable=True), + sa.Column("error_message", sa.Text(), nullable=True), + sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False), + ) + + op.create_table( + "tasks", + sa.Column("id", sa.String(), primary_key=True), + sa.Column("action", sa.String(), nullable=False), + sa.Column("target", sa.String(), nullable=False), + sa.Column("status", sa.String(), nullable=False, server_default=sa.text("'pending'")), + sa.Column("playbook", sa.String(), nullable=True), + sa.Column("started_at", sa.DateTime(timezone=True), nullable=True), + sa.Column("completed_at", sa.DateTime(timezone=True), nullable=True), + sa.Column("error_message", sa.Text(), nullable=True), + sa.Column("result_data", sa.JSON(), nullable=True), + sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False), + ) + + op.create_table( + "schedules", + sa.Column("id", sa.String(), primary_key=True), + sa.Column("name", sa.String(), nullable=False), + sa.Column("playbook", sa.String(), nullable=False), + sa.Column("target", sa.String(), nullable=False), + sa.Column("schedule_type", sa.String(), nullable=False), + sa.Column("schedule_time", sa.DateTime(timezone=True), nullable=True), + sa.Column("recurrence_type", sa.String(), nullable=True), + sa.Column("recurrence_time", sa.String(), nullable=True), + sa.Column("recurrence_days", sa.Text(), nullable=True), + sa.Column("cron_expression", sa.String(), nullable=True), + sa.Column("enabled", sa.Boolean(), nullable=False, server_default=sa.text("1")), + sa.Column("tags", sa.Text(), nullable=True), + sa.Column("next_run", sa.DateTime(timezone=True), nullable=True), + sa.Column("last_run", sa.DateTime(timezone=True), nullable=True), + sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False), + sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False), + sa.Column("deleted_at", sa.DateTime(timezone=True), nullable=True), + ) + + op.create_table( + "schedule_runs", + sa.Column("id", sa.Integer(), primary_key=True, autoincrement=True), + sa.Column("schedule_id", sa.String(), sa.ForeignKey("schedules.id", ondelete="CASCADE"), nullable=False), + sa.Column("task_id", sa.String(), sa.ForeignKey("tasks.id", ondelete="SET NULL"), nullable=True), + sa.Column("status", sa.String(), nullable=False), + sa.Column("started_at", sa.DateTime(timezone=True), nullable=False), + sa.Column("completed_at", sa.DateTime(timezone=True), nullable=True), + sa.Column("duration", sa.Float(), nullable=True), + sa.Column("error_message", sa.Text(), nullable=True), + sa.Column("output", sa.Text(), nullable=True), + sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False), + ) + + op.create_table( + "logs", + sa.Column("id", sa.Integer(), primary_key=True, autoincrement=True), + sa.Column("level", sa.String(), nullable=False), + sa.Column("source", sa.String(), nullable=True), + sa.Column("message", sa.Text(), nullable=False), + sa.Column("details", sa.JSON(), nullable=True), + sa.Column("host_id", sa.String(), sa.ForeignKey("hosts.id", ondelete="SET NULL"), nullable=True), + sa.Column("task_id", sa.String(), sa.ForeignKey("tasks.id", ondelete="SET NULL"), nullable=True), + sa.Column("schedule_id", sa.String(), sa.ForeignKey("schedules.id", ondelete="SET NULL"), nullable=True), + sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False), + ) + + op.create_index("idx_logs_created_at", "logs", ["created_at"]) + op.create_index("idx_logs_level", "logs", ["level"]) + op.create_index("idx_logs_source", "logs", ["source"]) + + +def downgrade() -> None: + op.drop_index("idx_logs_source", table_name="logs") + op.drop_index("idx_logs_level", table_name="logs") + op.drop_index("idx_logs_created_at", table_name="logs") + op.drop_table("logs") + op.drop_table("schedule_runs") + op.drop_table("schedules") + op.drop_table("tasks") + op.drop_table("bootstrap_status") + op.drop_table("hosts") diff --git a/app/app_optimized.py b/app/app_optimized.py index 11506f7..34c099e 100644 --- a/app/app_optimized.py +++ b/app/app_optimized.py @@ -34,9 +34,20 @@ from fastapi.security import APIKeyHeader from fastapi.templating import Jinja2Templates from fastapi.middleware.cors import CORSMiddleware from fastapi.staticfiles import StaticFiles -from pydantic import BaseModel, Field, field_validator +from pydantic import BaseModel, Field, field_validator, ConfigDict +from sqlalchemy.ext.asyncio import AsyncSession import uvicorn +# Import DB layer (async SQLAlchemy) +from models.database import get_db # type: ignore +from crud.host import HostRepository # type: ignore +from crud.bootstrap_status import BootstrapStatusRepository # type: ignore +from crud.log import LogRepository # type: ignore +from crud.task import TaskRepository # type: ignore +from crud.schedule import ScheduleRepository # type: ignore +from crud.schedule_run import ScheduleRunRepository # type: ignore +from models.database import init_db # type: ignore + BASE_DIR = Path(__file__).resolve().parent # Configuration avancée de l'application @@ -97,7 +108,7 @@ class CommandResult(BaseModel): timestamp: datetime = Field(default_factory=lambda: datetime.now(timezone.utc)) class Host(BaseModel): - id: int + id: str name: str ip: str status: Literal["online", "offline", "warning"] @@ -114,7 +125,7 @@ class Host(BaseModel): } class Task(BaseModel): - id: int + id: str name: str host: str status: Literal["pending", "running", "completed", "failed", "cancelled"] @@ -386,7 +397,7 @@ class ScheduleRun(BaseModel): """Historique d'une exécution de schedule""" id: str = Field(default_factory=lambda: f"run_{uuid.uuid4().hex[:12]}") schedule_id: str = Field(..., description="ID du schedule parent") - task_id: Optional[int] = Field(default=None, description="ID de la tâche créée") + task_id: Optional[str] = Field(default=None, description="ID de la tâche créée") started_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc)) finished_at: Optional[datetime] = Field(default=None) status: Literal["running", "success", "failed", "canceled"] = Field(default="running") @@ -1375,7 +1386,7 @@ class SchedulerService: pass # Créer une tâche - task_id = db.get_next_id("tasks") + task_id = str(db.get_next_id("tasks")) playbook_name = schedule.playbook.replace('.yml', '').replace('-', ' ').title() task = Task( id=task_id, @@ -1646,6 +1657,16 @@ class SchedulerService: # Appliquer les modifications update_data = request.dict(exclude_unset=True, exclude_none=True) for key, value in update_data.items(): + # La récurrence arrive du frontend comme un dict, il faut la retransformer + # en objet ScheduleRecurrence pour que _build_cron_trigger fonctionne. + if key == "recurrence" and isinstance(value, dict): + try: + value = ScheduleRecurrence(**value) + except Exception: + # Si la récurrence est invalide, on laisse passer pour que la + # validation côté endpoint remonte une erreur explicite. + pass + if hasattr(schedule, key): setattr(schedule, key, value) @@ -1838,6 +1859,12 @@ class SchedulerService: "expression": expression } + def get_runs_for_schedule(self, schedule_id: str, limit: int = 50) -> List[Dict]: + """Récupère l'historique des exécutions d'un schedule (retourne des dicts)""" + runs = self._load_runs() + schedule_runs = [r for r in runs if r.get('schedule_id') == schedule_id] + return schedule_runs[:limit] + def cleanup_old_runs(self, days: int = 90): """Nettoie les exécutions plus anciennes que X jours""" cutoff = datetime.now(timezone.utc) - timedelta(days=days) @@ -2913,7 +2940,7 @@ class HybridDB: os_label = runtime_status.get("os", f"Linux ({primary_group})") host = Host( - id=idx, + id=str(idx), name=ah.name, ip=ah.ansible_host, status=status, @@ -3267,233 +3294,311 @@ async def delete_group( } +def _host_to_response(host_obj, bootstrap_status: Optional["BootstrapStatus"] = None) -> Dict[str, Any]: + """Map DB host + latest bootstrap to API-compatible payload.""" + return { + "id": host_obj.id, + "name": host_obj.name, + "ip": getattr(host_obj, "ip_address", None), + "status": host_obj.status, + "os": "Linux", # valeur par défaut faute d'info stockée + "last_seen": host_obj.last_seen, + "created_at": host_obj.created_at, + "groups": [g for g in [getattr(host_obj, "ansible_group", None)] if g], + "bootstrap_ok": (bootstrap_status.status == "success") if bootstrap_status else False, + "bootstrap_date": bootstrap_status.last_attempt if bootstrap_status else None, + } + + @app.get("/api/hosts/by-name/{host_name}") -async def get_host_by_name(host_name: str, api_key_valid: bool = Depends(verify_api_key)): - """Récupère un hôte spécifique par son nom""" - host = next((h for h in db.hosts if h.name == host_name), None) +async def get_host_by_name( + host_name: str, + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), +): + repo = HostRepository(db_session) + bs_repo = BootstrapStatusRepository(db_session) + host = await repo.get_by_ip(host_name) or await repo.get(host_name) if not host: raise HTTPException(status_code=404, detail="Hôte non trouvé") - return host + bootstrap = await bs_repo.latest_for_host(host.id) + return _host_to_response(host, bootstrap) -@app.get("/api/hosts", response_model=List[Host]) + +@app.get("/api/hosts") async def get_hosts( bootstrap_status: Optional[str] = None, - api_key_valid: bool = Depends(verify_api_key) + limit: int = 100, + offset: int = 0, + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), ): - """Récupère la liste de tous les hôtes - - Args: - bootstrap_status: Filtrer par statut bootstrap ('ready', 'not_configured', ou None pour tous) - """ - hosts = db.hosts - - # Filtrer par statut bootstrap si spécifié - if bootstrap_status == 'ready': - hosts = [h for h in hosts if h.bootstrap_ok] - elif bootstrap_status == 'not_configured': - hosts = [h for h in hosts if not h.bootstrap_ok] - - return hosts + repo = HostRepository(db_session) + bs_repo = BootstrapStatusRepository(db_session) + hosts = await repo.list(limit=limit, offset=offset) + # Si la base ne contient encore aucun hôte, on retombe sur les hôtes Ansible via la DB hybride + if not hosts: + hybrid_hosts = db.hosts + fallback_results = [] + for h in hybrid_hosts: + # Appliquer les mêmes filtres de bootstrap que pour la version DB + if bootstrap_status == "ready" and not h.bootstrap_ok: + continue + if bootstrap_status == "not_configured" and h.bootstrap_ok: + continue -@app.get("/api/hosts/{host_id}", response_model=Host) -async def get_host(host_id: int, api_key_valid: bool = Depends(verify_api_key)): - """Récupère un hôte spécifique par ID""" - host = next((h for h in db.hosts if h.id == host_id), None) + fallback_results.append( + { + "id": h.id, + "name": h.name, + "ip": h.ip, + "status": h.status, + "os": h.os, + "last_seen": h.last_seen, + # created_at est déjà géré par le modèle Pydantic Host (default_factory) + "created_at": h.created_at, + "groups": h.groups, + "bootstrap_ok": h.bootstrap_ok, + "bootstrap_date": h.bootstrap_date, + } + ) + return fallback_results + + results = [] + for host in hosts: + bootstrap = await bs_repo.latest_for_host(host.id) + if bootstrap_status == "ready" and not (bootstrap and bootstrap.status == "success"): + continue + if bootstrap_status == "not_configured" and bootstrap and bootstrap.status == "success": + continue + results.append(_host_to_response(host, bootstrap)) + return results + + +@app.get("/api/hosts/{host_id}") +async def get_host( + host_id: str, + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), +): + repo = HostRepository(db_session) + bs_repo = BootstrapStatusRepository(db_session) + host = await repo.get(host_id) if not host: raise HTTPException(status_code=404, detail="Hôte non trouvé") - return host + bootstrap = await bs_repo.latest_for_host(host.id) + return _host_to_response(host, bootstrap) + @app.post("/api/hosts") -async def create_host(host_request: HostRequest, api_key_valid: bool = Depends(verify_api_key)): - """Crée un nouvel hôte dans l'inventaire Ansible (hosts.yml) - - L'hôte sera ajouté au groupe d'environnement spécifié et aux groupes de rôles. - """ +async def create_host( + host_request: HostRequest, + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), +): + repo = HostRepository(db_session) + bs_repo = BootstrapStatusRepository(db_session) + # Vérifier si l'hôte existe déjà - if ansible_service.host_exists(host_request.name): - raise HTTPException(status_code=400, detail=f"L'hôte '{host_request.name}' existe déjà dans l'inventaire") - + existing = await repo.get_by_ip(host_request.name) + if existing: + raise HTTPException(status_code=400, detail=f"L'hôte '{host_request.name}' existe déjà") + # Valider le groupe d'environnement env_groups = ansible_service.get_env_groups() - if host_request.env_group not in env_groups: - # Créer le groupe s'il n'existe pas mais commence par env_ - if not host_request.env_group.startswith('env_'): - raise HTTPException(status_code=400, detail=f"Le groupe d'environnement doit commencer par 'env_'. Groupes existants: {env_groups}") - + if host_request.env_group not in env_groups and not host_request.env_group.startswith("env_"): + raise HTTPException(status_code=400, detail=f"Le groupe d'environnement doit commencer par 'env_'. Groupes existants: {env_groups}") + # Valider les groupes de rôles role_groups = ansible_service.get_role_groups() for role in host_request.role_groups: - if role not in role_groups and not role.startswith('role_'): + if role not in role_groups and not role.startswith("role_"): raise HTTPException(status_code=400, detail=f"Le groupe de rôle '{role}' doit commencer par 'role_'. Groupes existants: {role_groups}") - + try: - # Ajouter l'hôte à l'inventaire + # Ajouter l'hôte à l'inventaire Ansible ansible_service.add_host_to_inventory( hostname=host_request.name, env_group=host_request.env_group, role_groups=host_request.role_groups, - ansible_host=host_request.ip + ansible_host=host_request.ip, ) - - # Invalider le cache pour recharger les hôtes - db._hosts_cache = None - - # Récupérer le nouvel hôte - new_host = next((h for h in db.hosts if h.name == host_request.name), None) - - # Ajouter un log - log_entry = LogEntry( - id=db.get_next_id("logs"), - timestamp=datetime.now(timezone.utc), - level="INFO", - message=f"Hôte '{host_request.name}' ajouté à l'inventaire (env: {host_request.env_group}, roles: {host_request.role_groups})", - source="inventory", - host=host_request.name + + # Créer en base + host = await repo.create( + id=uuid.uuid4().hex, + name=host_request.name, + ip_address=host_request.ip or host_request.name, + ansible_group=host_request.env_group, + status="unknown", + reachable=False, + last_seen=None, ) - db.logs.insert(0, log_entry) - + bootstrap = await bs_repo.latest_for_host(host.id) + + await db_session.commit() + # Notifier les clients WebSocket - await ws_manager.broadcast({ - "type": "host_created", - "data": new_host.dict() if new_host else {"name": host_request.name} - }) - + await ws_manager.broadcast( + { + "type": "host_created", + "data": _host_to_response(host, bootstrap), + } + ) + return { "message": f"Hôte '{host_request.name}' ajouté avec succès", - "host": new_host.dict() if new_host else None, - "inventory_updated": True + "host": _host_to_response(host, bootstrap), + "inventory_updated": True, } - + + except HTTPException: + raise except Exception as e: + await db_session.rollback() raise HTTPException(status_code=500, detail=f"Erreur lors de l'ajout de l'hôte: {str(e)}") + @app.put("/api/hosts/{host_name}") async def update_host( host_name: str, update_request: HostUpdateRequest, - api_key_valid: bool = Depends(verify_api_key) + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), ): - """Met à jour les groupes d'un hôte existant dans l'inventaire Ansible""" - # Vérifier que l'hôte existe - if not ansible_service.host_exists(host_name): - raise HTTPException(status_code=404, detail=f"Hôte '{host_name}' non trouvé dans l'inventaire") - + repo = HostRepository(db_session) + bs_repo = BootstrapStatusRepository(db_session) + host = await repo.get_by_ip(host_name) or await repo.get(host_name) + if not host: + raise HTTPException(status_code=404, detail=f"Hôte '{host_name}' non trouvé") + # Valider le groupe d'environnement si fourni if update_request.env_group: env_groups = ansible_service.get_env_groups() - if update_request.env_group not in env_groups and not update_request.env_group.startswith('env_'): + if update_request.env_group not in env_groups and not update_request.env_group.startswith("env_"): raise HTTPException(status_code=400, detail=f"Le groupe d'environnement doit commencer par 'env_'") - + # Valider les groupes de rôles si fournis if update_request.role_groups: for role in update_request.role_groups: - if not role.startswith('role_'): + if not role.startswith("role_"): raise HTTPException(status_code=400, detail=f"Le groupe de rôle '{role}' doit commencer par 'role_'") - + try: - success = ansible_service.update_host_groups( + ansible_service.update_host_groups( hostname=host_name, env_group=update_request.env_group, role_groups=update_request.role_groups, - ansible_host=update_request.ansible_host + ansible_host=update_request.ansible_host, ) - - if not success: - raise HTTPException(status_code=500, detail="Échec de la mise à jour de l'hôte") - - # Invalider le cache - db._hosts_cache = None - - # Récupérer l'hôte mis à jour - updated_host = next((h for h in db.hosts if h.name == host_name), None) - - # Ajouter un log - log_entry = LogEntry( - id=db.get_next_id("logs"), - timestamp=datetime.now(timezone.utc), - level="INFO", - message=f"Hôte '{host_name}' mis à jour (env: {update_request.env_group}, roles: {update_request.role_groups})", - source="inventory", - host=host_name + + await repo.update( + host, + ansible_group=update_request.env_group or host.ansible_group, ) - db.logs.insert(0, log_entry) - - # Notifier les clients WebSocket - await ws_manager.broadcast({ - "type": "host_updated", - "data": updated_host.dict() if updated_host else {"name": host_name} - }) - + await db_session.commit() + + bootstrap = await bs_repo.latest_for_host(host.id) + + await ws_manager.broadcast( + { + "type": "host_updated", + "data": _host_to_response(host, bootstrap), + } + ) + return { "message": f"Hôte '{host_name}' mis à jour avec succès", - "host": updated_host.dict() if updated_host else None, - "inventory_updated": True + "host": _host_to_response(host, bootstrap), + "inventory_updated": True, } - + except HTTPException: + await db_session.rollback() raise except Exception as e: + await db_session.rollback() raise HTTPException(status_code=500, detail=f"Erreur lors de la mise à jour: {str(e)}") + @app.delete("/api/hosts/by-name/{host_name}") -async def delete_host_by_name(host_name: str, api_key_valid: bool = Depends(verify_api_key)): - """Supprime un hôte de l'inventaire Ansible par son nom""" - # Vérifier que l'hôte existe - if not ansible_service.host_exists(host_name): - raise HTTPException(status_code=404, detail=f"Hôte '{host_name}' non trouvé dans l'inventaire") - +async def delete_host_by_name( + host_name: str, + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), +): + repo = HostRepository(db_session) + host = await repo.get_by_ip(host_name) or await repo.get(host_name) + if not host: + raise HTTPException(status_code=404, detail=f"Hôte '{host_name}' non trouvé") + try: - success = ansible_service.remove_host_from_inventory(host_name) - - if not success: - raise HTTPException(status_code=500, detail="Échec de la suppression de l'hôte") - - # Invalider le cache - db._hosts_cache = None - - # Ajouter un log - log_entry = LogEntry( - id=db.get_next_id("logs"), - timestamp=datetime.now(timezone.utc), - level="WARN", - message=f"Hôte '{host_name}' supprimé de l'inventaire", - source="inventory", - host=host_name + ansible_service.remove_host_from_inventory(host_name) + await repo.soft_delete(host.id) + await db_session.commit() + + await ws_manager.broadcast( + { + "type": "host_deleted", + "data": {"name": host_name}, + } ) - db.logs.insert(0, log_entry) - - # Notifier les clients WebSocket - await ws_manager.broadcast({ - "type": "host_deleted", - "data": {"name": host_name} - }) - - return { - "message": f"Hôte '{host_name}' supprimé avec succès", - "inventory_updated": True - } - + + return {"message": f"Hôte '{host_name}' supprimé avec succès", "inventory_updated": True} except HTTPException: + await db_session.rollback() raise except Exception as e: + await db_session.rollback() raise HTTPException(status_code=500, detail=f"Erreur lors de la suppression: {str(e)}") + @app.delete("/api/hosts/{host_id}") -async def delete_host(host_id: int, api_key_valid: bool = Depends(verify_api_key)): - """Supprime un hôte par ID""" - host = next((h for h in db.hosts if h.id == host_id), None) +async def delete_host( + host_id: str, + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), +): + repo = HostRepository(db_session) + host = await repo.get(host_id) if not host: raise HTTPException(status_code=404, detail="Hôte non trouvé") - - return await delete_host_by_name(host.name, api_key_valid) -@app.get("/api/tasks", response_model=List[Task]) -async def get_tasks(api_key_valid: bool = Depends(verify_api_key)): + return await delete_host_by_name(host.name, api_key_valid, db_session) + +@app.get("/api/tasks") +async def get_tasks( + limit: int = 100, + offset: int = 0, + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), +): """Récupère la liste de toutes les tâches""" - return db.tasks + repo = TaskRepository(db_session) + tasks = await repo.list(limit=limit, offset=offset) + return [ + { + "id": t.id, + "name": t.action, + "host": t.target, + "status": t.status, + "progress": 100 if t.status == "completed" else (50 if t.status == "running" else 0), + "start_time": t.started_at, + "end_time": t.completed_at, + "duration": None, + "output": t.result_data.get("output") if t.result_data else None, + "error": t.error_message, + } + for t in tasks + ] -@app.post("/api/tasks", response_model=Task) -async def create_task(task_request: TaskRequest, api_key_valid: bool = Depends(verify_api_key)): + +@app.post("/api/tasks") +async def create_task( + task_request: TaskRequest, + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), +): """Crée une nouvelle tâche et exécute le playbook Ansible correspondant""" task_names = { 'upgrade': 'Mise à jour système', @@ -3505,39 +3610,55 @@ async def create_task(task_request: TaskRequest, api_key_valid: bool = Depends(v 'maintenance': 'Maintenance', 'bootstrap': 'Bootstrap Ansible' } - - new_task = Task( - id=db.get_next_id("tasks"), - name=task_names.get(task_request.action, f"Tâche {task_request.action}"), - host=task_request.host or task_request.group or "all", + + repo = TaskRepository(db_session) + task_id = uuid.uuid4().hex + target = task_request.host or task_request.group or "all" + playbook = ACTION_PLAYBOOK_MAP.get(task_request.action) + + task_obj = await repo.create( + id=task_id, + action=task_request.action, + target=target, + playbook=playbook, status="running", - progress=0, - start_time=datetime.now(timezone.utc) ) - - db.tasks.append(new_task) - + await repo.update(task_obj, started_at=datetime.now(timezone.utc)) + await db_session.commit() + + response_data = { + "id": task_obj.id, + "name": task_names.get(task_request.action, f"Tâche {task_request.action}"), + "host": target, + "status": "running", + "progress": 0, + "start_time": task_obj.started_at, + "end_time": None, + "duration": None, + "output": None, + "error": None, + } + # Notifier les clients WebSocket await ws_manager.broadcast({ "type": "task_created", - "data": new_task.dict() + "data": response_data }) - + # Exécuter le playbook Ansible en arrière-plan - playbook = ACTION_PLAYBOOK_MAP.get(task_request.action) if playbook: asyncio.create_task(execute_ansible_task( - task_id=new_task.id, + task_id=task_obj.id, playbook=playbook, - target=new_task.host, + target=target, extra_vars=task_request.extra_vars, check_mode=task_request.dry_run )) else: # Pas de playbook correspondant, simuler - asyncio.create_task(simulate_task_execution(new_task.id)) - - return new_task + asyncio.create_task(simulate_task_execution(task_obj.id)) + + return response_data # ===== ENDPOINTS LOGS DE TÂCHES (MARKDOWN) ===== @@ -3625,75 +3746,152 @@ async def delete_task_log(log_id: str, api_key_valid: bool = Depends(verify_api_ @app.get("/api/tasks/running") -async def get_running_tasks(api_key_valid: bool = Depends(verify_api_key)): +async def get_running_tasks( + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), +): """Récupère uniquement les tâches en cours d'exécution (running ou pending)""" - running_tasks = [t for t in db.tasks if t.status in ("running", "pending")] + repo = TaskRepository(db_session) + tasks = await repo.list(limit=100, offset=0) + running_tasks = [t for t in tasks if t.status in ("running", "pending")] return { - "tasks": [t.dict() for t in running_tasks], + "tasks": [ + { + "id": t.id, + "name": t.action, + "host": t.target, + "status": t.status, + "progress": 50 if t.status == "running" else 0, + "start_time": t.started_at, + "end_time": t.completed_at, + } + for t in running_tasks + ], "count": len(running_tasks) } -@app.get("/api/tasks/{task_id}", response_model=Task) -async def get_task(task_id: int, api_key_valid: bool = Depends(verify_api_key)): +@app.get("/api/tasks/{task_id}") +async def get_task( + task_id: str, + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), +): """Récupère une tâche spécifique""" - task = next((t for t in db.tasks if t.id == task_id), None) + repo = TaskRepository(db_session) + task = await repo.get(task_id) if not task: raise HTTPException(status_code=404, detail="Tâche non trouvée") - return task + return { + "id": task.id, + "name": task.action, + "host": task.target, + "status": task.status, + "progress": 100 if task.status == "completed" else (50 if task.status == "running" else 0), + "start_time": task.started_at, + "end_time": task.completed_at, + "duration": None, + "output": task.result_data.get("output") if task.result_data else None, + "error": task.error_message, + } + @app.delete("/api/tasks/{task_id}") -async def delete_task(task_id: int, api_key_valid: bool = Depends(verify_api_key)): - """Supprime une tâche""" - task = next((t for t in db.tasks if t.id == task_id), None) +async def delete_task( + task_id: str, + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), +): + """Supprime une tâche (soft delete non implémenté pour tasks, suppression directe)""" + repo = TaskRepository(db_session) + task = await repo.get(task_id) if not task: raise HTTPException(status_code=404, detail="Tâche non trouvée") - - db.tasks = [t for t in db.tasks if t.id != task_id] - + + await db_session.delete(task) + await db_session.commit() + # Notifier les clients WebSocket await ws_manager.broadcast({ "type": "task_deleted", "data": {"id": task_id} }) - + return {"message": "Tâche supprimée avec succès"} -@app.get("/api/logs", response_model=List[LogEntry]) -async def get_logs(limit: int = 50, api_key_valid: bool = Depends(verify_api_key)): - """Récupère les logs récents""" - return db.logs[:limit] +@app.get("/api/logs") +async def get_logs( + limit: int = 50, + offset: int = 0, + level: Optional[str] = None, + source: Optional[str] = None, + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), +): + """Récupère les logs récents avec filtrage optionnel""" + repo = LogRepository(db_session) + logs = await repo.list(limit=limit, offset=offset, level=level, source=source) + return [ + { + "id": log.id, + "timestamp": log.created_at, + "level": log.level, + "message": log.message, + "source": log.source, + "host": log.host_id, + } + for log in logs + ] + @app.post("/api/logs") -async def create_log(log_entry: LogEntry, api_key_valid: bool = Depends(verify_api_key)): - """Ajoute un nouvel entrée de log""" - log_entry.id = db.get_next_id("logs") - db.logs.insert(0, log_entry) - - # Garder seulement les 100 derniers logs - if len(db.logs) > 100: - db.logs = db.logs[:100] - +async def create_log( + level: str, + message: str, + source: Optional[str] = None, + host_id: Optional[str] = None, + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), +): + """Ajoute une nouvelle entrée de log""" + repo = LogRepository(db_session) + log = await repo.create( + level=level.upper(), + message=message, + source=source, + host_id=host_id, + ) + await db_session.commit() + + response_data = { + "id": log.id, + "timestamp": log.created_at, + "level": log.level, + "message": log.message, + "source": log.source, + "host": log.host_id, + } + # Notifier les clients WebSocket await ws_manager.broadcast({ "type": "new_log", - "data": log_entry.dict() + "data": response_data }) - - return log_entry + + return response_data + @app.delete("/api/logs") -async def clear_logs(api_key_valid: bool = Depends(verify_api_key)): - """Efface tous les logs""" - db.logs = [] - - # Notifier les clients WebSocket - await ws_manager.broadcast({ - "type": "logs_cleared", - "data": {} - }) - - return {"message": "Logs effacés avec succès"} +async def clear_logs( + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), +): + """Efface tous les logs (attention: opération destructive)""" + from sqlalchemy import delete + from models.log import Log as LogModel + await db_session.execute(delete(LogModel)) + await db_session.commit() + return {"message": "Tous les logs ont été supprimés"} @app.get("/api/metrics", response_model=SystemMetrics) async def get_metrics(api_key_valid: bool = Depends(verify_api_key)): @@ -4703,47 +4901,60 @@ async def get_schedules( enabled: Optional[bool] = None, playbook: Optional[str] = None, tag: Optional[str] = None, - api_key_valid: bool = Depends(verify_api_key) + limit: int = 100, + offset: int = 0, + api_key_valid: bool = Depends(verify_api_key), ): - """Liste tous les schedules avec filtrage optionnel - - Args: - enabled: Filtrer par statut (true = actifs, false = en pause) - playbook: Filtrer par nom de playbook (recherche partielle) - tag: Filtrer par tag - """ - schedules = scheduler_service.get_all_schedules(enabled=enabled, playbook=playbook, tag=tag) - return { - "schedules": [s.dict() for s in schedules], - "count": len(schedules) - } + """Liste tous les schedules avec filtrage optionnel (via SchedulerService).""" + # Utiliser le SchedulerService comme source de vérité pour next_run_at / last_run_at + schedules = scheduler_service.get_all_schedules( + enabled=enabled, + playbook=playbook, + tag=tag, + ) + + # Pagination simple côté API (les schedules sont déjà triés par next_run_at) + paginated = schedules[offset : offset + limit] + + results = [] + for s in paginated: + rec = s.recurrence + results.append( + { + "id": s.id, + "name": s.name, + "playbook": s.playbook, + "target": s.target, + "schedule_type": s.schedule_type, + "recurrence": rec.model_dump() if rec else None, + "enabled": s.enabled, + "tags": s.tags, + # Champs utilisés par le frontend pour "Prochaine" et historique + "next_run_at": s.next_run_at, + "last_run_at": s.last_run_at, + "last_status": s.last_status, + "run_count": s.run_count, + "success_count": s.success_count, + "failure_count": s.failure_count, + "created_at": s.created_at, + "updated_at": s.updated_at, + } + ) + + return {"schedules": results, "count": len(schedules)} @app.post("/api/schedules") async def create_schedule( request: ScheduleCreateRequest, - api_key_valid: bool = Depends(verify_api_key) + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), ): - """Crée un nouveau schedule - - Exemple de body: - { - "name": "Backup quotidien", - "playbook": "backup-config.yml", - "target": "all", - "schedule_type": "recurring", - "recurrence": { - "type": "daily", - "time": "02:00" - }, - "tags": ["Backup", "Production"] - } - """ + """Crée un nouveau schedule (stocké en DB)""" # Vérifier que le playbook existe playbooks = ansible_service.get_playbooks() playbook_names = [p['filename'] for p in playbooks] + [p['name'] for p in playbooks] - # Normaliser le nom du playbook playbook_file = request.playbook if not playbook_file.endswith(('.yml', '.yaml')): playbook_file = f"{playbook_file}.yml" @@ -4760,40 +4971,71 @@ async def create_schedule( if not ansible_service.host_exists(request.target): raise HTTPException(status_code=400, detail=f"Hôte '{request.target}' non trouvé") - # Valider la récurrence si nécessaire + # Valider la récurrence if request.schedule_type == "recurring" and not request.recurrence: raise HTTPException(status_code=400, detail="La récurrence est requise pour un schedule récurrent") - # Valider l'expression cron si custom if request.recurrence and request.recurrence.type == "custom": if not request.recurrence.cron_expression: raise HTTPException(status_code=400, detail="Expression cron requise pour le type 'custom'") validation = scheduler_service.validate_cron_expression(request.recurrence.cron_expression) if not validation["valid"]: raise HTTPException(status_code=400, detail=f"Expression cron invalide: {validation.get('error')}") + + # Créer en DB + repo = ScheduleRepository(db_session) + schedule_id = uuid.uuid4().hex - schedule = scheduler_service.create_schedule(request) - - # Log - log_entry = LogEntry( - id=db.get_next_id("logs"), - timestamp=datetime.now(timezone.utc), - level="INFO", - message=f"Schedule '{schedule.name}' créé pour {schedule.playbook} sur {schedule.target}", - source="scheduler" + recurrence = request.recurrence + schedule_obj = await repo.create( + id=schedule_id, + name=request.name, + playbook=playbook_file, + target=request.target, + schedule_type=request.schedule_type, + schedule_time=request.start_at, + recurrence_type=recurrence.type if recurrence else None, + recurrence_time=recurrence.time if recurrence else None, + recurrence_days=json.dumps(recurrence.days) if recurrence and recurrence.days else None, + cron_expression=recurrence.cron_expression if recurrence else None, + enabled=request.enabled, + tags=json.dumps(request.tags) if request.tags else None, ) - db.logs.insert(0, log_entry) - + await db_session.commit() + + # Aussi créer dans le scheduler_service pour APScheduler + scheduler_service.create_schedule(request) + + # Log en DB + log_repo = LogRepository(db_session) + await log_repo.create( + level="INFO", + message=f"Schedule '{request.name}' créé pour {playbook_file} sur {request.target}", + source="scheduler", + ) + await db_session.commit() + # Notifier via WebSocket await ws_manager.broadcast({ "type": "schedule_created", - "data": schedule.dict() + "data": { + "id": schedule_obj.id, + "name": schedule_obj.name, + "playbook": schedule_obj.playbook, + "target": schedule_obj.target, + } }) - + return { "success": True, - "message": f"Schedule '{schedule.name}' créé avec succès", - "schedule": schedule.dict() + "message": f"Schedule '{request.name}' créé avec succès", + "schedule": { + "id": schedule_obj.id, + "name": schedule_obj.name, + "playbook": schedule_obj.playbook, + "target": schedule_obj.target, + "enabled": schedule_obj.enabled, + } } @@ -4835,27 +5077,52 @@ async def validate_cron_expression( @app.get("/api/schedules/{schedule_id}") async def get_schedule( schedule_id: str, - api_key_valid: bool = Depends(verify_api_key) + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), ): - """Récupère les détails d'un schedule spécifique""" - schedule = scheduler_service.get_schedule(schedule_id) + """Récupère les détails d'un schedule spécifique (depuis DB)""" + repo = ScheduleRepository(db_session) + schedule = await repo.get(schedule_id) if not schedule: raise HTTPException(status_code=404, detail=f"Schedule '{schedule_id}' non trouvé") - return schedule.dict() + return { + "id": schedule.id, + "name": schedule.name, + "playbook": schedule.playbook, + "target": schedule.target, + "schedule_type": schedule.schedule_type, + "recurrence_type": schedule.recurrence_type, + "recurrence_time": schedule.recurrence_time, + "recurrence_days": json.loads(schedule.recurrence_days) if schedule.recurrence_days else None, + "cron_expression": schedule.cron_expression, + "enabled": schedule.enabled, + "tags": json.loads(schedule.tags) if schedule.tags else [], + "next_run": schedule.next_run, + "last_run": schedule.last_run, + "created_at": schedule.created_at, + "updated_at": schedule.updated_at, + } @app.put("/api/schedules/{schedule_id}") async def update_schedule( schedule_id: str, request: ScheduleUpdateRequest, - api_key_valid: bool = Depends(verify_api_key) + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), ): - """Met à jour un schedule existant""" - schedule = scheduler_service.get_schedule(schedule_id) - if not schedule: + """Met à jour un schedule existant (DB + scheduler_service)""" + # Essayer d'abord via SchedulerService (source de vérité) + sched = scheduler_service.get_schedule(schedule_id) + repo = ScheduleRepository(db_session) + schedule = await repo.get(schedule_id) + + if not sched and not schedule: raise HTTPException(status_code=404, detail=f"Schedule '{schedule_id}' non trouvé") - + + schedule_name = sched.name if sched else schedule.name + # Valider le playbook si modifié if request.playbook: playbooks = ansible_service.get_playbooks() @@ -4865,71 +5132,107 @@ async def update_schedule( playbook_file = f"{playbook_file}.yml" if playbook_file not in playbook_names and request.playbook not in playbook_names: raise HTTPException(status_code=400, detail=f"Playbook '{request.playbook}' non trouvé") - + # Valider l'expression cron si modifiée if request.recurrence and request.recurrence.type == "custom": if request.recurrence.cron_expression: validation = scheduler_service.validate_cron_expression(request.recurrence.cron_expression) if not validation["valid"]: raise HTTPException(status_code=400, detail=f"Expression cron invalide: {validation.get('error')}") - - updated = scheduler_service.update_schedule(schedule_id, request) - - # Log - log_entry = LogEntry( - id=db.get_next_id("logs"), - timestamp=datetime.now(timezone.utc), + + # Mettre à jour en DB + update_fields = {} + if request.name: + update_fields["name"] = request.name + if request.playbook: + update_fields["playbook"] = request.playbook + if request.target: + update_fields["target"] = request.target + if request.enabled is not None: + update_fields["enabled"] = request.enabled + if request.tags: + update_fields["tags"] = json.dumps(request.tags) + if request.recurrence: + update_fields["recurrence_type"] = request.recurrence.type + update_fields["recurrence_time"] = request.recurrence.time + update_fields["recurrence_days"] = json.dumps(request.recurrence.days) if request.recurrence.days else None + update_fields["cron_expression"] = request.recurrence.cron_expression + + # Mettre à jour en DB si présent + if schedule: + await repo.update(schedule, **update_fields) + await db_session.commit() + + # Aussi mettre à jour dans scheduler_service pour APScheduler + scheduler_service.update_schedule(schedule_id, request) + + # Log en DB + log_repo = LogRepository(db_session) + await log_repo.create( level="INFO", - message=f"Schedule '{updated.name}' mis à jour", - source="scheduler" + message=f"Schedule '{schedule_name}' mis à jour", + source="scheduler", ) - db.logs.insert(0, log_entry) - + await db_session.commit() + # Notifier via WebSocket await ws_manager.broadcast({ "type": "schedule_updated", - "data": updated.dict() + "data": {"id": schedule_id, "name": schedule_name} }) - + return { "success": True, - "message": f"Schedule '{updated.name}' mis à jour", - "schedule": updated.dict() + "message": f"Schedule '{schedule_name}' mis à jour", + "schedule": {"id": schedule_id, "name": schedule_name} } @app.delete("/api/schedules/{schedule_id}") async def delete_schedule( schedule_id: str, - api_key_valid: bool = Depends(verify_api_key) + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), ): - """Supprime un schedule""" - schedule = scheduler_service.get_schedule(schedule_id) + """Supprime un schedule (soft delete en DB + suppression scheduler_service)""" + repo = ScheduleRepository(db_session) + schedule = await repo.get(schedule_id) if not schedule: - raise HTTPException(status_code=404, detail=f"Schedule '{schedule_id}' non trouvé") - + # Aucun enregistrement en DB, mais on tente tout de même de le supprimer + # du SchedulerService (cas des anciens IDs internes du scheduler). + try: + scheduler_service.delete_schedule(schedule_id) + except Exception: + pass + return { + "success": True, + "message": f"Schedule '{schedule_id}' déjà supprimé ou inexistant en base, nettoyage scheduler effectué." + } + schedule_name = schedule.name - success = scheduler_service.delete_schedule(schedule_id) - - if not success: - raise HTTPException(status_code=500, detail="Erreur lors de la suppression") - - # Log - log_entry = LogEntry( - id=db.get_next_id("logs"), - timestamp=datetime.now(timezone.utc), + + # Soft delete en DB + await repo.soft_delete(schedule_id) + await db_session.commit() + + # Supprimer du scheduler_service + scheduler_service.delete_schedule(schedule_id) + + # Log en DB + log_repo = LogRepository(db_session) + await log_repo.create( level="WARN", message=f"Schedule '{schedule_name}' supprimé", - source="scheduler" + source="scheduler", ) - db.logs.insert(0, log_entry) - + await db_session.commit() + # Notifier via WebSocket await ws_manager.broadcast({ "type": "schedule_deleted", "data": {"id": schedule_id, "name": schedule_name} }) - + return { "success": True, "message": f"Schedule '{schedule_name}' supprimé" @@ -4939,19 +5242,28 @@ async def delete_schedule( @app.post("/api/schedules/{schedule_id}/run") async def run_schedule_now( schedule_id: str, - api_key_valid: bool = Depends(verify_api_key) + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), ): """Exécute immédiatement un schedule (exécution forcée)""" - schedule = scheduler_service.get_schedule(schedule_id) - if not schedule: - raise HTTPException(status_code=404, detail=f"Schedule '{schedule_id}' non trouvé") - - # Lancer l'exécution en arrière-plan + # Essayer d'abord via SchedulerService (source de vérité) + sched = scheduler_service.get_schedule(schedule_id) + if not sched: + # Fallback sur la DB + repo = ScheduleRepository(db_session) + schedule = await repo.get(schedule_id) + if not schedule: + raise HTTPException(status_code=404, detail=f"Schedule '{schedule_id}' non trouvé") + schedule_name = schedule.name + else: + schedule_name = sched.name + + # Lancer l'exécution via scheduler_service run = await scheduler_service.run_now(schedule_id) - + return { "success": True, - "message": f"Schedule '{schedule.name}' lancé", + "message": f"Schedule '{schedule_name}' lancé", "run": run.dict() if run else None } @@ -4959,66 +5271,94 @@ async def run_schedule_now( @app.post("/api/schedules/{schedule_id}/pause") async def pause_schedule( schedule_id: str, - api_key_valid: bool = Depends(verify_api_key) + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), ): """Met en pause un schedule""" - schedule = scheduler_service.pause_schedule(schedule_id) - if not schedule: + # Essayer d'abord via SchedulerService (source de vérité) + sched = scheduler_service.get_schedule(schedule_id) + repo = ScheduleRepository(db_session) + schedule = await repo.get(schedule_id) + + if not sched and not schedule: raise HTTPException(status_code=404, detail=f"Schedule '{schedule_id}' non trouvé") - - # Log - log_entry = LogEntry( - id=db.get_next_id("logs"), - timestamp=datetime.now(timezone.utc), + + schedule_name = sched.name if sched else schedule.name + + # Mettre à jour en DB si présent + if schedule: + await repo.update(schedule, enabled=False) + await db_session.commit() + + # Mettre à jour dans scheduler_service + scheduler_service.pause_schedule(schedule_id) + + # Log en DB + log_repo = LogRepository(db_session) + await log_repo.create( level="INFO", - message=f"Schedule '{schedule.name}' mis en pause", - source="scheduler" + message=f"Schedule '{schedule_name}' mis en pause", + source="scheduler", ) - db.logs.insert(0, log_entry) - + await db_session.commit() + # Notifier via WebSocket await ws_manager.broadcast({ "type": "schedule_updated", - "data": schedule.dict() + "data": {"id": schedule_id, "name": schedule_name, "enabled": False} }) - + return { "success": True, - "message": f"Schedule '{schedule.name}' mis en pause", - "schedule": schedule.dict() + "message": f"Schedule '{schedule_name}' mis en pause", + "schedule": {"id": schedule_id, "name": schedule_name, "enabled": False} } @app.post("/api/schedules/{schedule_id}/resume") async def resume_schedule( schedule_id: str, - api_key_valid: bool = Depends(verify_api_key) + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), ): """Reprend un schedule en pause""" - schedule = scheduler_service.resume_schedule(schedule_id) - if not schedule: + # Essayer d'abord via SchedulerService (source de vérité) + sched = scheduler_service.get_schedule(schedule_id) + repo = ScheduleRepository(db_session) + schedule = await repo.get(schedule_id) + + if not sched and not schedule: raise HTTPException(status_code=404, detail=f"Schedule '{schedule_id}' non trouvé") - - # Log - log_entry = LogEntry( - id=db.get_next_id("logs"), - timestamp=datetime.now(timezone.utc), + + schedule_name = sched.name if sched else schedule.name + + # Mettre à jour en DB si présent + if schedule: + await repo.update(schedule, enabled=True) + await db_session.commit() + + # Mettre à jour dans scheduler_service + scheduler_service.resume_schedule(schedule_id) + + # Log en DB + log_repo = LogRepository(db_session) + await log_repo.create( level="INFO", - message=f"Schedule '{schedule.name}' repris", - source="scheduler" + message=f"Schedule '{schedule_name}' repris", + source="scheduler", ) - db.logs.insert(0, log_entry) - + await db_session.commit() + # Notifier via WebSocket await ws_manager.broadcast({ "type": "schedule_updated", - "data": schedule.dict() + "data": {"id": schedule_id, "name": schedule_name, "enabled": True} }) - + return { "success": True, - "message": f"Schedule '{schedule.name}' repris", - "schedule": schedule.dict() + "message": f"Schedule '{schedule_name}' repris", + "schedule": {"id": schedule_id, "name": schedule_name, "enabled": True} } @@ -5026,20 +5366,39 @@ async def resume_schedule( async def get_schedule_runs( schedule_id: str, limit: int = 50, - api_key_valid: bool = Depends(verify_api_key) + offset: int = 0, + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), ): - """Récupère l'historique des exécutions d'un schedule""" - schedule = scheduler_service.get_schedule(schedule_id) - if not schedule: + """Récupère l'historique des exécutions d'un schedule (depuis DB ou SchedulerService)""" + # Essayer d'abord via SchedulerService (source de vérité) + sched = scheduler_service.get_schedule(schedule_id) + repo = ScheduleRepository(db_session) + schedule = await repo.get(schedule_id) + + if not sched and not schedule: raise HTTPException(status_code=404, detail=f"Schedule '{schedule_id}' non trouvé") - - runs = scheduler_service.get_schedule_runs(schedule_id, limit=limit) - + + schedule_name = sched.name if sched else schedule.name + + # Récupérer les runs depuis le SchedulerService (JSON) si pas en DB + runs_from_service = scheduler_service.get_runs_for_schedule(schedule_id, limit=limit) + return { "schedule_id": schedule_id, - "schedule_name": schedule.name, - "runs": [r.dict() for r in runs], - "count": len(runs) + "schedule_name": schedule_name, + "runs": [ + { + "id": r.get("id"), + "status": r.get("status"), + "started_at": r.get("started_at"), + "finished_at": r.get("finished_at"), + "duration_seconds": r.get("duration_seconds"), + "error_message": r.get("error_message"), + } + for r in runs_from_service + ], + "count": len(runs_from_service) } @@ -5049,20 +5408,25 @@ async def get_schedule_runs( async def startup_event(): """Événement de démarrage de l'application""" print("🚀 Homelab Automation Dashboard démarré") - + + # Initialiser la base de données (créer les tables si nécessaire) + await init_db() + print("📦 Base de données SQLite initialisée") + # Démarrer le scheduler scheduler_service.start() - - # Log de démarrage - log_entry = LogEntry( - id=db.get_next_id("logs"), - timestamp=datetime.now(timezone.utc), - level="INFO", - message="Application démarrée - Scheduler initialisé", - source="system" - ) - db.logs.insert(0, log_entry) - + + # Log de démarrage en base + from models.database import async_session_maker + async with async_session_maker() as session: + repo = LogRepository(session) + await repo.create( + level="INFO", + message="Application démarrée - Scheduler initialisé", + source="system", + ) + await session.commit() + # Nettoyer les anciennes exécutions (>90 jours) cleaned = scheduler_service.cleanup_old_runs(days=90) if cleaned > 0: @@ -5073,10 +5437,10 @@ async def startup_event(): async def shutdown_event(): """Événement d'arrêt de l'application""" print("👋 Arrêt de l'application...") - + # Arrêter le scheduler scheduler_service.shutdown() - + print("✅ Scheduler arrêté proprement") diff --git a/app/crud/__init__.py b/app/crud/__init__.py new file mode 100644 index 0000000..7935f69 --- /dev/null +++ b/app/crud/__init__.py @@ -0,0 +1,13 @@ +from .host import HostRepository +from .bootstrap_status import BootstrapStatusRepository +from .task import TaskRepository +from .schedule import ScheduleRepository +from .log import LogRepository + +__all__ = [ + "HostRepository", + "BootstrapStatusRepository", + "TaskRepository", + "ScheduleRepository", + "LogRepository", +] diff --git a/app/crud/bootstrap_status.py b/app/crud/bootstrap_status.py new file mode 100644 index 0000000..d75e798 --- /dev/null +++ b/app/crud/bootstrap_status.py @@ -0,0 +1,41 @@ +from __future__ import annotations + +from typing import Optional + +from sqlalchemy import select +from sqlalchemy.ext.asyncio import AsyncSession + +from models.bootstrap_status import BootstrapStatus + + +class BootstrapStatusRepository: + def __init__(self, session: AsyncSession): + self.session = session + + async def list_for_host(self, host_id: str) -> list[BootstrapStatus]: + stmt = select(BootstrapStatus).where(BootstrapStatus.host_id == host_id).order_by(BootstrapStatus.created_at.desc()) + result = await self.session.execute(stmt) + return result.scalars().all() + + async def latest_for_host(self, host_id: str) -> Optional[BootstrapStatus]: + stmt = ( + select(BootstrapStatus) + .where(BootstrapStatus.host_id == host_id) + .order_by(BootstrapStatus.created_at.desc()) + .limit(1) + ) + result = await self.session.execute(stmt) + return result.scalar_one_or_none() + + async def create(self, *, host_id: str, status: str, automation_user: Optional[str] = None, + last_attempt=None, error_message: Optional[str] = None) -> BootstrapStatus: + record = BootstrapStatus( + host_id=host_id, + status=status, + automation_user=automation_user, + last_attempt=last_attempt, + error_message=error_message, + ) + self.session.add(record) + await self.session.flush() + return record diff --git a/app/crud/host.py b/app/crud/host.py new file mode 100644 index 0000000..7002e20 --- /dev/null +++ b/app/crud/host.py @@ -0,0 +1,67 @@ +from __future__ import annotations + +from datetime import datetime, timezone +from typing import Iterable, Optional + +from sqlalchemy import select, update +from sqlalchemy.ext.asyncio import AsyncSession +from sqlalchemy.orm import selectinload + +from models.host import Host + + +class HostRepository: + def __init__(self, session: AsyncSession): + self.session = session + + async def list(self, limit: int = 100, offset: int = 0, include_deleted: bool = False) -> list[Host]: + stmt = select(Host).order_by(Host.created_at.desc()).offset(offset).limit(limit) + if not include_deleted: + stmt = stmt.where(Host.deleted_at.is_(None)) + result = await self.session.execute(stmt) + return result.scalars().all() + + async def get(self, host_id: str, include_deleted: bool = False) -> Optional[Host]: + stmt = select(Host).where(Host.id == host_id).options(selectinload(Host.bootstrap_statuses)) + if not include_deleted: + stmt = stmt.where(Host.deleted_at.is_(None)) + result = await self.session.execute(stmt) + return result.scalar_one_or_none() + + async def get_by_ip(self, ip_address: str, include_deleted: bool = False) -> Optional[Host]: + stmt = select(Host).where(Host.ip_address == ip_address) + if not include_deleted: + stmt = stmt.where(Host.deleted_at.is_(None)) + result = await self.session.execute(stmt) + return result.scalar_one_or_none() + + async def create(self, *, id: str, name: str, ip_address: str, ansible_group: Optional[str] = None, + status: str = "unknown", reachable: bool = False, last_seen: Optional[datetime] = None) -> Host: + host = Host( + id=id, + name=name, + ip_address=ip_address, + ansible_group=ansible_group, + status=status, + reachable=reachable, + last_seen=last_seen, + ) + self.session.add(host) + await self.session.flush() + return host + + async def update(self, host: Host, **fields) -> Host: + for key, value in fields.items(): + if value is not None: + setattr(host, key, value) + await self.session.flush() + return host + + async def soft_delete(self, host_id: str) -> bool: + stmt = ( + update(Host) + .where(Host.id == host_id, Host.deleted_at.is_(None)) + .values(deleted_at=datetime.now(timezone.utc)) + ) + result = await self.session.execute(stmt) + return result.rowcount > 0 diff --git a/app/crud/log.py b/app/crud/log.py new file mode 100644 index 0000000..781ccee --- /dev/null +++ b/app/crud/log.py @@ -0,0 +1,28 @@ +from __future__ import annotations + +from typing import Optional + +from sqlalchemy import select +from sqlalchemy.ext.asyncio import AsyncSession + +from models.log import Log + + +class LogRepository: + def __init__(self, session: AsyncSession): + self.session = session + + async def list(self, limit: int = 100, offset: int = 0, level: Optional[str] = None, source: Optional[str] = None) -> list[Log]: + stmt = select(Log).order_by(Log.created_at.desc()).offset(offset).limit(limit) + if level: + stmt = stmt.where(Log.level == level) + if source: + stmt = stmt.where(Log.source == source) + result = await self.session.execute(stmt) + return result.scalars().all() + + async def create(self, **fields) -> Log: + log = Log(**fields) + self.session.add(log) + await self.session.flush() + return log diff --git a/app/crud/schedule.py b/app/crud/schedule.py new file mode 100644 index 0000000..155c8ce --- /dev/null +++ b/app/crud/schedule.py @@ -0,0 +1,60 @@ +from __future__ import annotations + +from datetime import datetime, timezone +from typing import Optional + +from sqlalchemy import select, update +from sqlalchemy.ext.asyncio import AsyncSession +from sqlalchemy.orm import selectinload + +from models.schedule import Schedule +from models.schedule_run import ScheduleRun + + +class ScheduleRepository: + def __init__(self, session: AsyncSession): + self.session = session + + async def list(self, limit: int = 100, offset: int = 0, include_deleted: bool = False) -> list[Schedule]: + stmt = select(Schedule).order_by(Schedule.created_at.desc()).offset(offset).limit(limit) + if not include_deleted: + stmt = stmt.where(Schedule.deleted_at.is_(None)) + result = await self.session.execute(stmt) + return result.scalars().all() + + async def get(self, schedule_id: str, include_deleted: bool = False) -> Optional[Schedule]: + stmt = select(Schedule).where(Schedule.id == schedule_id).options( + selectinload(Schedule.runs) + ) + if not include_deleted: + stmt = stmt.where(Schedule.deleted_at.is_(None)) + result = await self.session.execute(stmt) + return result.scalar_one_or_none() + + async def create(self, **fields) -> Schedule: + schedule = Schedule(**fields) + self.session.add(schedule) + await self.session.flush() + return schedule + + async def update(self, schedule: Schedule, **fields) -> Schedule: + for key, value in fields.items(): + if value is not None: + setattr(schedule, key, value) + await self.session.flush() + return schedule + + async def soft_delete(self, schedule_id: str) -> bool: + stmt = ( + update(Schedule) + .where(Schedule.id == schedule_id, Schedule.deleted_at.is_(None)) + .values(deleted_at=datetime.now(timezone.utc)) + ) + result = await self.session.execute(stmt) + return result.rowcount > 0 + + async def add_run(self, **fields) -> ScheduleRun: + run = ScheduleRun(**fields) + self.session.add(run) + await self.session.flush() + return run diff --git a/app/crud/schedule_run.py b/app/crud/schedule_run.py new file mode 100644 index 0000000..8a6da8f --- /dev/null +++ b/app/crud/schedule_run.py @@ -0,0 +1,35 @@ +from __future__ import annotations + +from typing import Optional + +from sqlalchemy import select +from sqlalchemy.ext.asyncio import AsyncSession + +from models.schedule_run import ScheduleRun + + +class ScheduleRunRepository: + def __init__(self, session: AsyncSession): + self.session = session + + async def get(self, run_id: int) -> Optional[ScheduleRun]: + stmt = select(ScheduleRun).where(ScheduleRun.id == run_id) + result = await self.session.execute(stmt) + return result.scalar_one_or_none() + + async def list_for_schedule(self, schedule_id: str, limit: int = 100, offset: int = 0) -> list[ScheduleRun]: + stmt = ( + select(ScheduleRun) + .where(ScheduleRun.schedule_id == schedule_id) + .order_by(ScheduleRun.started_at.desc()) + .offset(offset) + .limit(limit) + ) + result = await self.session.execute(stmt) + return result.scalars().all() + + async def create(self, **fields) -> ScheduleRun: + run = ScheduleRun(**fields) + self.session.add(run) + await self.session.flush() + return run diff --git a/app/crud/task.py b/app/crud/task.py new file mode 100644 index 0000000..083b462 --- /dev/null +++ b/app/crud/task.py @@ -0,0 +1,44 @@ +from __future__ import annotations + +from typing import Optional + +from sqlalchemy import select +from sqlalchemy.ext.asyncio import AsyncSession +from sqlalchemy.orm import selectinload + +from models.task import Task + + +class TaskRepository: + def __init__(self, session: AsyncSession): + self.session = session + + async def list(self, limit: int = 100, offset: int = 0) -> list[Task]: + stmt = select(Task).order_by(Task.created_at.desc()).offset(offset).limit(limit) + result = await self.session.execute(stmt) + return result.scalars().all() + + async def get(self, task_id: str) -> Optional[Task]: + stmt = select(Task).where(Task.id == task_id).options(selectinload(Task.schedule_runs)) + result = await self.session.execute(stmt) + return result.scalar_one_or_none() + + async def create(self, *, id: str, action: str, target: str, playbook: Optional[str] = None, + status: str = "pending") -> Task: + task = Task( + id=id, + action=action, + target=target, + playbook=playbook, + status=status, + ) + self.session.add(task) + await self.session.flush() + return task + + async def update(self, task: Task, **fields) -> Task: + for key, value in fields.items(): + if value is not None: + setattr(task, key, value) + await self.session.flush() + return task diff --git a/app/models/__init__.py b/app/models/__init__.py new file mode 100644 index 0000000..6b2b789 --- /dev/null +++ b/app/models/__init__.py @@ -0,0 +1,17 @@ +from .database import Base +from .host import Host +from .bootstrap_status import BootstrapStatus +from .task import Task +from .schedule import Schedule +from .schedule_run import ScheduleRun +from .log import Log + +__all__ = [ + "Base", + "Host", + "BootstrapStatus", + "Task", + "Schedule", + "ScheduleRun", + "Log", +] diff --git a/app/models/bootstrap_status.py b/app/models/bootstrap_status.py new file mode 100644 index 0000000..63a6b7c --- /dev/null +++ b/app/models/bootstrap_status.py @@ -0,0 +1,27 @@ +from __future__ import annotations + +from datetime import datetime +from typing import Optional + +from sqlalchemy import DateTime, ForeignKey, Integer, String, Text +from sqlalchemy.orm import Mapped, mapped_column, relationship +from sqlalchemy.sql import func + +from .database import Base + + +class BootstrapStatus(Base): + __tablename__ = "bootstrap_status" + + id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True) + host_id: Mapped[str] = mapped_column(String, ForeignKey("hosts.id", ondelete="CASCADE"), nullable=False) + status: Mapped[str] = mapped_column(String, nullable=False) + automation_user: Mapped[Optional[str]] = mapped_column(String, nullable=True) + last_attempt: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True)) + error_message: Mapped[Optional[str]] = mapped_column(Text, nullable=True) + created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), nullable=False, server_default=func.now()) + + host: Mapped["Host"] = relationship("Host", back_populates="bootstrap_statuses") + + def __repr__(self) -> str: # pragma: no cover - debug helper + return f"" diff --git a/app/models/database.py b/app/models/database.py new file mode 100644 index 0000000..303f7a1 --- /dev/null +++ b/app/models/database.py @@ -0,0 +1,106 @@ +"""Database configuration and session management for Homelab Automation. +Uses SQLAlchemy 2.x async engine with SQLite + aiosqlite driver. +""" +from __future__ import annotations + +import os +from pathlib import Path +from typing import AsyncGenerator +from urllib.parse import urlparse + +from sqlalchemy import event, MetaData +from sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession, async_sessionmaker, create_async_engine +from sqlalchemy.orm import declarative_base + +# Naming convention to keep Alembic happy with constraints +NAMING_CONVENTION = { + "ix": "ix_%(column_0_label)s", + "uq": "uq_%(table_name)s_%(column_0_name)s", + "ck": "ck_%(table_name)s_%(constraint_name)s", + "fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s", + "pk": "pk_%(table_name)s", +} + +metadata_obj = MetaData(naming_convention=NAMING_CONVENTION) +Base = declarative_base(metadata=metadata_obj) + +# Resolve base path (project root) +ROOT_DIR = Path(__file__).resolve().parents[2] +DEFAULT_DB_PATH = Path(os.environ.get("DB_PATH") or (ROOT_DIR / "data" / "homelab.db")) +DATABASE_URL = os.environ.get("DATABASE_URL", f"sqlite+aiosqlite:///{DEFAULT_DB_PATH}") + +# Ensure SQLite directory exists even if DATABASE_URL overrides DB_PATH +def _ensure_sqlite_dir(db_url: str) -> None: + if not db_url.startswith("sqlite"): + return + parsed = urlparse(db_url.replace("sqlite+aiosqlite", "sqlite")) + if parsed.scheme != "sqlite": + return + db_path = Path(parsed.path) + if db_path.parent: + db_path.parent.mkdir(parents=True, exist_ok=True) + +DEFAULT_DB_PATH.parent.mkdir(parents=True, exist_ok=True) +_ensure_sqlite_dir(DATABASE_URL) + + +def _debug_db_paths() -> None: + try: + print( + "[DB] DATABASE_URL=%s, DEFAULT_DB_PATH=%s, parent_exists=%s, parent=%s" + % ( + DATABASE_URL, + DEFAULT_DB_PATH, + DEFAULT_DB_PATH.parent.exists(), + DEFAULT_DB_PATH.parent, + ) + ) + except Exception: + # Debug logging should never break startup + pass + + +_debug_db_paths() + +engine: AsyncEngine = create_async_engine( + DATABASE_URL, + echo=False, + pool_pre_ping=True, + future=True, +) + +# Ensure SQLite pragmas (WAL + FK) when using SQLite +if DATABASE_URL.startswith("sqlite"): + @event.listens_for(engine.sync_engine, "connect") + def _set_sqlite_pragmas(dbapi_connection, connection_record): # type: ignore[override] + cursor = dbapi_connection.cursor() + cursor.execute("PRAGMA foreign_keys=ON") + cursor.execute("PRAGMA journal_mode=WAL") + cursor.close() + +async_session_maker = async_sessionmaker( + bind=engine, + autoflush=False, + expire_on_commit=False, + class_=AsyncSession, +) + + +async def get_db() -> AsyncGenerator[AsyncSession, None]: + """FastAPI dependency that yields an AsyncSession with automatic rollback on error.""" + async with async_session_maker() as session: # type: AsyncSession + try: + yield session + except Exception: + await session.rollback() + raise + finally: + await session.close() + + +async def init_db() -> None: + """Create all tables (mostly for dev/tests; migrations should be handled by Alembic).""" + from . import host, task, schedule, schedule_run, log # noqa: F401 + + async with engine.begin() as conn: + await conn.run_sync(Base.metadata.create_all) diff --git a/app/models/host.py b/app/models/host.py new file mode 100644 index 0000000..bdd77a8 --- /dev/null +++ b/app/models/host.py @@ -0,0 +1,33 @@ +from __future__ import annotations + +from datetime import datetime +from typing import List, Optional + +from sqlalchemy import Boolean, DateTime, String, text +from sqlalchemy.orm import Mapped, mapped_column, relationship +from sqlalchemy.sql import func + +from .database import Base + + +class Host(Base): + __tablename__ = "hosts" + + id: Mapped[str] = mapped_column(String, primary_key=True) + name: Mapped[str] = mapped_column(String, nullable=False) + ip_address: Mapped[str] = mapped_column(String, nullable=False, unique=True) + status: Mapped[str] = mapped_column(String, nullable=False, server_default=text("'unknown'")) + ansible_group: Mapped[Optional[str]] = mapped_column(String, nullable=True) + last_seen: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True)) + reachable: Mapped[bool] = mapped_column(Boolean, nullable=False, server_default=text("0")) + created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), nullable=False, server_default=func.now()) + updated_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), nullable=False, server_default=func.now(), onupdate=func.now()) + deleted_at: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True)) + + bootstrap_statuses: Mapped[List["BootstrapStatus"]] = relationship( + "BootstrapStatus", back_populates="host", cascade="all, delete-orphan" + ) + logs: Mapped[List["Log"]] = relationship("Log", back_populates="host") + + def __repr__(self) -> str: # pragma: no cover - debug helper + return f"" diff --git a/app/models/log.py b/app/models/log.py new file mode 100644 index 0000000..511f76c --- /dev/null +++ b/app/models/log.py @@ -0,0 +1,36 @@ +from __future__ import annotations + +from datetime import datetime +from typing import Optional + +from sqlalchemy import DateTime, ForeignKey, Integer, JSON, String, Text, Index +from sqlalchemy.orm import Mapped, mapped_column, relationship +from sqlalchemy.sql import func + +from .database import Base + + +class Log(Base): + __tablename__ = "logs" + __table_args__ = ( + Index("idx_logs_created_at", "created_at"), + Index("idx_logs_level", "level"), + Index("idx_logs_source", "source"), + ) + + id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True) + level: Mapped[str] = mapped_column(String, nullable=False) + source: Mapped[Optional[str]] = mapped_column(String) + message: Mapped[str] = mapped_column(Text, nullable=False) + details: Mapped[Optional[dict]] = mapped_column(JSON) + host_id: Mapped[Optional[str]] = mapped_column(String, ForeignKey("hosts.id", ondelete="SET NULL")) + task_id: Mapped[Optional[str]] = mapped_column(String, ForeignKey("tasks.id", ondelete="SET NULL")) + schedule_id: Mapped[Optional[str]] = mapped_column(String, ForeignKey("schedules.id", ondelete="SET NULL")) + created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), nullable=False, server_default=func.now()) + + host: Mapped[Optional["Host"]] = relationship("Host", back_populates="logs") + task: Mapped[Optional["Task"]] = relationship("Task", back_populates="logs") + schedule: Mapped[Optional["Schedule"]] = relationship("Schedule", back_populates="logs") + + def __repr__(self) -> str: # pragma: no cover - debug helper + return f"" diff --git a/app/models/schedule.py b/app/models/schedule.py new file mode 100644 index 0000000..8af7007 --- /dev/null +++ b/app/models/schedule.py @@ -0,0 +1,40 @@ +from __future__ import annotations + +from datetime import datetime +from typing import List, Optional + +from sqlalchemy import Boolean, DateTime, String, Text +from sqlalchemy.orm import Mapped, mapped_column, relationship +from sqlalchemy.sql import func + +from .database import Base + + +class Schedule(Base): + __tablename__ = "schedules" + + id: Mapped[str] = mapped_column(String, primary_key=True) + name: Mapped[str] = mapped_column(String, nullable=False) + playbook: Mapped[str] = mapped_column(String, nullable=False) + target: Mapped[str] = mapped_column(String, nullable=False) + schedule_type: Mapped[str] = mapped_column(String, nullable=False) + schedule_time: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True)) + recurrence_type: Mapped[Optional[str]] = mapped_column(String) + recurrence_time: Mapped[Optional[str]] = mapped_column(String) + recurrence_days: Mapped[Optional[str]] = mapped_column(Text) + cron_expression: Mapped[Optional[str]] = mapped_column(String) + enabled: Mapped[bool] = mapped_column(Boolean, nullable=False, default=True) + tags: Mapped[Optional[str]] = mapped_column(Text) + next_run: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True)) + last_run: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True)) + created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), nullable=False, server_default=func.now()) + updated_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), nullable=False, server_default=func.now(), onupdate=func.now()) + deleted_at: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True)) + + runs: Mapped[List["ScheduleRun"]] = relationship( + "ScheduleRun", back_populates="schedule", cascade="all, delete-orphan" + ) + logs: Mapped[List["Log"]] = relationship("Log", back_populates="schedule") + + def __repr__(self) -> str: # pragma: no cover - debug helper + return f"" diff --git a/app/models/schedule_run.py b/app/models/schedule_run.py new file mode 100644 index 0000000..431875f --- /dev/null +++ b/app/models/schedule_run.py @@ -0,0 +1,31 @@ +from __future__ import annotations + +from datetime import datetime +from typing import Optional + +from sqlalchemy import DateTime, ForeignKey, Integer, String, Text, Float +from sqlalchemy.orm import Mapped, mapped_column, relationship +from sqlalchemy.sql import func + +from .database import Base + + +class ScheduleRun(Base): + __tablename__ = "schedule_runs" + + id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True) + schedule_id: Mapped[str] = mapped_column(String, ForeignKey("schedules.id", ondelete="CASCADE"), nullable=False) + task_id: Mapped[Optional[str]] = mapped_column(String, ForeignKey("tasks.id", ondelete="SET NULL")) + status: Mapped[str] = mapped_column(String, nullable=False) + started_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), nullable=False) + completed_at: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True)) + duration: Mapped[Optional[float]] = mapped_column(Float) + error_message: Mapped[Optional[str]] = mapped_column(Text) + output: Mapped[Optional[str]] = mapped_column(Text) + created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), nullable=False, server_default=func.now()) + + schedule: Mapped["Schedule"] = relationship("Schedule", back_populates="runs") + task: Mapped[Optional["Task"]] = relationship("Task", back_populates="schedule_runs") + + def __repr__(self) -> str: # pragma: no cover - debug helper + return f"" diff --git a/app/models/task.py b/app/models/task.py new file mode 100644 index 0000000..339dcc1 --- /dev/null +++ b/app/models/task.py @@ -0,0 +1,31 @@ +from __future__ import annotations + +from datetime import datetime +from typing import Optional + +from sqlalchemy import DateTime, ForeignKey, String, Text, JSON, text +from sqlalchemy.orm import Mapped, mapped_column, relationship +from sqlalchemy.sql import func + +from .database import Base + + +class Task(Base): + __tablename__ = "tasks" + + id: Mapped[str] = mapped_column(String, primary_key=True) + action: Mapped[str] = mapped_column(String, nullable=False) + target: Mapped[str] = mapped_column(String, nullable=False) + status: Mapped[str] = mapped_column(String, nullable=False, server_default=text("'pending'")) + playbook: Mapped[Optional[str]] = mapped_column(String) + started_at: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True)) + completed_at: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True)) + error_message: Mapped[Optional[str]] = mapped_column(Text) + result_data: Mapped[Optional[dict]] = mapped_column(JSON) + created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), nullable=False, server_default=func.now()) + + schedule_runs: Mapped[list["ScheduleRun"]] = relationship("ScheduleRun", back_populates="task") + logs: Mapped[list["Log"]] = relationship("Log", back_populates="task") + + def __repr__(self) -> str: # pragma: no cover - debug helper + return f"" diff --git a/app/requirements.txt b/app/requirements.txt index 5bb4219..d9cd604 100644 --- a/app/requirements.txt +++ b/app/requirements.txt @@ -10,4 +10,9 @@ requests>=2.32.0 httpx>=0.28.0 apscheduler>=3.10.0 croniter>=2.0.0 -pytz>=2024.1 \ No newline at end of file +pytz>=2024.1 +sqlalchemy>=2.0.0 +alembic>=1.12.0 +aiosqlite>=0.19.0 +pytest>=7.0.0 +pytest-asyncio>=0.21.0 \ No newline at end of file diff --git a/app/schemas/__init__.py b/app/schemas/__init__.py new file mode 100644 index 0000000..d4a9ee5 --- /dev/null +++ b/app/schemas/__init__.py @@ -0,0 +1,21 @@ +from .host import HostCreate, HostUpdate, HostOut +from .bootstrap_status import BootstrapStatusOut +from .task import TaskCreate, TaskUpdate, TaskOut +from .schedule import ScheduleCreate, ScheduleUpdate, ScheduleOut, ScheduleRunOut +from .log import LogCreate, LogOut + +__all__ = [ + "HostCreate", + "HostUpdate", + "HostOut", + "BootstrapStatusOut", + "TaskCreate", + "TaskUpdate", + "TaskOut", + "ScheduleCreate", + "ScheduleUpdate", + "ScheduleOut", + "ScheduleRunOut", + "LogCreate", + "LogOut", +] diff --git a/app/schemas/bootstrap_status.py b/app/schemas/bootstrap_status.py new file mode 100644 index 0000000..4e59972 --- /dev/null +++ b/app/schemas/bootstrap_status.py @@ -0,0 +1,18 @@ +from __future__ import annotations + +from datetime import datetime +from typing import Optional + +from pydantic import BaseModel, ConfigDict + + +class BootstrapStatusOut(BaseModel): + id: int + host_id: str + status: str + automation_user: Optional[str] = None + last_attempt: Optional[datetime] = None + error_message: Optional[str] = None + created_at: datetime + + model_config = ConfigDict(from_attributes=True) diff --git a/app/schemas/host.py b/app/schemas/host.py new file mode 100644 index 0000000..16625f3 --- /dev/null +++ b/app/schemas/host.py @@ -0,0 +1,36 @@ +from __future__ import annotations + +from datetime import datetime +from typing import Optional + +from pydantic import BaseModel, Field, ConfigDict + + +class HostBase(BaseModel): + name: str = Field(..., min_length=1) + ip_address: str = Field(..., min_length=3) + ansible_group: Optional[str] = None + status: Optional[str] = Field(default="unknown") + reachable: Optional[bool] = False + last_seen: Optional[datetime] = None + + +class HostCreate(HostBase): + pass + + +class HostUpdate(BaseModel): + ansible_group: Optional[str] = None + status: Optional[str] = None + reachable: Optional[bool] = None + last_seen: Optional[datetime] = None + deleted_at: Optional[datetime] = None + + +class HostOut(HostBase): + id: str + created_at: datetime + updated_at: datetime + deleted_at: Optional[datetime] = None + + model_config = ConfigDict(from_attributes=True) diff --git a/app/schemas/log.py b/app/schemas/log.py new file mode 100644 index 0000000..1058dcd --- /dev/null +++ b/app/schemas/log.py @@ -0,0 +1,23 @@ +from __future__ import annotations + +from datetime import datetime +from typing import Optional, Dict, Any + +from pydantic import BaseModel, Field, ConfigDict + + +class LogCreate(BaseModel): + level: str = Field(..., description="Log level: info/warning/error/debug") + source: Optional[str] = None + message: str + details: Optional[Dict[str, Any]] = None + host_id: Optional[str] = None + task_id: Optional[str] = None + schedule_id: Optional[str] = None + + +class LogOut(LogCreate): + id: int + created_at: datetime + + model_config = ConfigDict(from_attributes=True) diff --git a/app/schemas/schedule.py b/app/schemas/schedule.py new file mode 100644 index 0000000..081913d --- /dev/null +++ b/app/schemas/schedule.py @@ -0,0 +1,67 @@ +from __future__ import annotations + +from datetime import datetime +from typing import List, Optional + +from pydantic import BaseModel, Field, ConfigDict + + +class ScheduleBase(BaseModel): + name: str + playbook: str + target: str + schedule_type: str + schedule_time: Optional[datetime] = None + recurrence_type: Optional[str] = None + recurrence_time: Optional[str] = None + recurrence_days: Optional[List[int]] = None + cron_expression: Optional[str] = None + enabled: bool = True + tags: Optional[List[str]] = None + next_run: Optional[datetime] = None + last_run: Optional[datetime] = None + + +class ScheduleCreate(ScheduleBase): + pass + + +class ScheduleUpdate(BaseModel): + name: Optional[str] = None + playbook: Optional[str] = None + target: Optional[str] = None + schedule_type: Optional[str] = None + schedule_time: Optional[datetime] = None + recurrence_type: Optional[str] = None + recurrence_time: Optional[str] = None + recurrence_days: Optional[List[int]] = None + cron_expression: Optional[str] = None + enabled: Optional[bool] = None + tags: Optional[List[str]] = None + next_run: Optional[datetime] = None + last_run: Optional[datetime] = None + deleted_at: Optional[datetime] = None + + +class ScheduleOut(ScheduleBase): + id: str + created_at: datetime + updated_at: datetime + deleted_at: Optional[datetime] = None + + model_config = ConfigDict(from_attributes=True) + + +class ScheduleRunOut(BaseModel): + id: int + schedule_id: str + task_id: Optional[str] = None + status: str + started_at: datetime + completed_at: Optional[datetime] = None + duration: Optional[float] = None + error_message: Optional[str] = None + output: Optional[str] = None + created_at: datetime + + model_config = ConfigDict(from_attributes=True) diff --git a/app/schemas/task.py b/app/schemas/task.py new file mode 100644 index 0000000..d3ae93c --- /dev/null +++ b/app/schemas/task.py @@ -0,0 +1,36 @@ +from __future__ import annotations + +from datetime import datetime +from typing import Optional, Dict, Any + +from pydantic import BaseModel, Field, ConfigDict + + +class TaskBase(BaseModel): + action: str + target: str + playbook: Optional[str] = None + status: str = Field(default="pending") + result_data: Optional[Dict[str, Any]] = None + error_message: Optional[str] = None + + +class TaskCreate(TaskBase): + pass + + +class TaskUpdate(BaseModel): + status: Optional[str] = None + started_at: Optional[datetime] = None + completed_at: Optional[datetime] = None + error_message: Optional[str] = None + result_data: Optional[Dict[str, Any]] = None + + +class TaskOut(TaskBase): + id: str + started_at: Optional[datetime] = None + completed_at: Optional[datetime] = None + created_at: datetime + + model_config = ConfigDict(from_attributes=True) diff --git a/data/homelab.db b/data/homelab.db new file mode 100644 index 0000000..4410bda Binary files /dev/null and b/data/homelab.db differ diff --git a/data/homelab.db-shm b/data/homelab.db-shm new file mode 100644 index 0000000..cd304e9 Binary files /dev/null and b/data/homelab.db-shm differ diff --git a/data/homelab.db-wal b/data/homelab.db-wal new file mode 100644 index 0000000..92d60dc Binary files /dev/null and b/data/homelab.db-wal differ diff --git a/docker-compose.yml b/docker-compose.yml index d26e518..4d67b11 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -19,6 +19,11 @@ services: - ANSIBLE_HOST_KEY_CHECKING=False # Timeout SSH - ANSIBLE_TIMEOUT=30 + # Homelab Data folder (monté sur /app/data) + - HOMELAB_DATA_DIR=${HOMELAB_DATA_DIR:-./data} + # SQLite DB path et URL (doivent correspondre à app/models/database.py) + - DB_PATH=/app/data/homelab.db + - DATABASE_URL=sqlite+aiosqlite:////app/data/homelab.db # Répertoire des logs de tâches (format YYYY/MM/JJ) - DIR_LOGS_TASKS=/app/tasks_logs # Ansible inventory @@ -28,6 +33,8 @@ services: # Ansible group_vars - ANSIBLE_GROUP_VARS=./ansible/inventory/group_vars volumes: + # Monter le dossier des données + - ${HOMELAB_DATA_DIR:-./data}:/app/data # Monter l'inventaire Ansible (permet de modifier sans rebuild) - ${ANSIBLE_INVENTORY:-./ansible/inventory}:/ansible/inventory # Monter les playbooks (permet de modifier sans rebuild) diff --git a/migrate_json_to_sqlite.py b/migrate_json_to_sqlite.py new file mode 100644 index 0000000..e6d3c3b --- /dev/null +++ b/migrate_json_to_sqlite.py @@ -0,0 +1,259 @@ +"""Migration des fichiers JSON vers SQLite (SQLAlchemy async). + +- Importe host_status, bootstrap_status, schedule_runs, et logs JSON journaliers si présents. +- Sauvegarde les fichiers JSON source en .bak après succès. +- Génère un rapport de migration en sortie standard. + +Usage : + python migrate_json_to_sqlite.py +""" +from __future__ import annotations + +import asyncio +import json +import uuid +from datetime import datetime +from pathlib import Path +from typing import Any, Dict, List, Optional + +import yaml + +from app.models.database import async_session_maker, init_db +from app.crud.host import HostRepository +from app.crud.bootstrap_status import BootstrapStatusRepository +from app.crud.schedule import ScheduleRepository +from app.crud.schedule_run import ScheduleRunRepository +from app.crud.task import TaskRepository +from app.crud.log import LogRepository + +BASE_DIR = Path(__file__).resolve().parent +ANSIBLE_DIR = BASE_DIR / "ansible" +TASKS_LOGS_DIR = BASE_DIR / "tasks_logs" +HOST_STATUS_FILE = ANSIBLE_DIR / ".host_status.json" +BOOTSTRAP_STATUS_FILE = TASKS_LOGS_DIR / ".bootstrap_status.json" +SCHEDULE_RUNS_FILE = TASKS_LOGS_DIR / ".schedule_runs.json" + + +def load_json(path: Path, default: Any) -> Any: + if not path.exists(): + return default + try: + with path.open("r", encoding="utf-8") as f: + return json.load(f) + except Exception: + return default + + +def backup_file(path: Path) -> None: + if path.exists(): + bak = path.with_suffix(path.suffix + ".bak") + path.rename(bak) + + +def parse_datetime(value: Optional[str]) -> Optional[datetime]: + if not value: + return None + try: + return datetime.fromisoformat(value) + except Exception: + return None + + +async def migrate_hosts(session, report: Dict[str, Any]) -> Dict[str, str]: + repo = HostRepository(session) + inventory_hosts: Dict[str, Dict[str, Any]] = {} + + # Lire inventaire Ansible pour récupérer les groupes + inventory_file = ANSIBLE_DIR / "inventory" / "hosts.yml" + if inventory_file.exists(): + data = yaml.safe_load(inventory_file.read_text(encoding="utf-8")) or {} + all_children = data.get("all", {}).get("children", {}) + for group_name, group_data in all_children.items(): + hosts = group_data.get("hosts", {}) or {} + for host_name in hosts.keys(): + entry = inventory_hosts.setdefault(host_name, {"groups": set()}) + entry["groups"].add(group_name) + else: + report.setdefault("warnings", []).append("Inventaire Ansible introuvable, ip=hostname et groupe vide") + + host_status_data = load_json(HOST_STATUS_FILE, {"hosts": {}}).get("hosts", {}) + + created = 0 + host_map: Dict[str, str] = {} # hostname -> host_id + for host_name, meta in inventory_hosts.items(): + status_entry = host_status_data.get(host_name, {}) + status = status_entry.get("status", "unknown") + last_seen = parse_datetime(status_entry.get("last_seen")) + ansible_group = next(iter(g for g in meta.get("groups", []) if g.startswith("env_")), None) + reachable = status != "offline" + host_id = uuid.uuid4().hex + + host = await repo.create( + id=host_id, + name=host_name, + ip_address=host_name, + ansible_group=ansible_group, + status=status, + reachable=reachable, + last_seen=last_seen, + ) + created += 1 + host_map[host_name] = host.id + + report["hosts"] = created + backup_file(HOST_STATUS_FILE) + return host_map + + +async def migrate_bootstrap_status(session, report: Dict[str, Any], host_map: Dict[str, str]) -> None: + repo = BootstrapStatusRepository(session) + data = load_json(BOOTSTRAP_STATUS_FILE, {"hosts": {}}).get("hosts", {}) + created = 0 + for host_name, meta in data.items(): + host_id = host_map.get(host_name) + if not host_id: + report.setdefault("warnings", []).append(f"Bootstrap ignoré: host inconnu {host_name}") + continue + bootstrap_ok = meta.get("bootstrap_ok", False) + status = "success" if bootstrap_ok else "failed" + last_attempt = parse_datetime(meta.get("bootstrap_date")) + error_message = None if bootstrap_ok else meta.get("details") + await repo.create( + host_id=host_id, + status=status, + automation_user="automation", + last_attempt=last_attempt, + error_message=error_message, + ) + created += 1 + report["bootstrap_status"] = created + backup_file(BOOTSTRAP_STATUS_FILE) + + +async def migrate_schedule_runs(session, report: Dict[str, Any]) -> None: + repo = ScheduleRunRepository(session) + task_repo = TaskRepository(session) + schedule_repo = ScheduleRepository(session) + + data = load_json(SCHEDULE_RUNS_FILE, {"runs": []}).get("runs", []) + if not data: + report["schedule_runs"] = 0 + return + + created = 0 + for run in data: + schedule_id = run.get("schedule_id") or uuid.uuid4().hex + task_id = run.get("task_id") + status = run.get("status") or "unknown" + started_at = parse_datetime(run.get("started_at")) or datetime.utcnow() + completed_at = parse_datetime(run.get("completed_at")) + duration = run.get("duration") + error_message = run.get("error_message") + output = run.get("output") + + # Assure une entrée schedule/task minimaliste si absente + existing_schedule = await schedule_repo.get(schedule_id, include_deleted=True) + if existing_schedule is None: + await schedule_repo.create( + id=schedule_id, + name=run.get("name", "Imported schedule"), + playbook=run.get("playbook", "unknown"), + target=run.get("target", "all"), + schedule_type=run.get("schedule_type", "once"), + enabled=True, + ) + if task_id: + existing_task = await task_repo.get(task_id) + if existing_task is None: + await task_repo.create( + id=task_id, + action=run.get("action", "unknown"), + target=run.get("target", "all"), + playbook=run.get("playbook"), + status=status, + ) + + await repo.create( + schedule_id=schedule_id, + task_id=task_id, + status=status, + started_at=started_at, + completed_at=completed_at, + duration=duration, + error_message=error_message, + output=output, + ) + created += 1 + + report["schedule_runs"] = created + backup_file(SCHEDULE_RUNS_FILE) + + +async def migrate_logs(session, report: Dict[str, Any], host_map: Dict[str, str]) -> None: + repo = LogRepository(session) + created = 0 + + json_files: List[Path] = [] + if TASKS_LOGS_DIR.exists(): + json_files = [p for p in TASKS_LOGS_DIR.rglob("*.json") if not p.name.startswith(".")] + + for path in json_files: + data = load_json(path, {}) + if isinstance(data, dict): + # Tentative de mapping simple + level = data.get("level") or "info" + message = data.get("message") or json.dumps(data, ensure_ascii=False) + source = data.get("source") + details = data.get("details") + host_val = data.get("host_id") + task_id = data.get("task_id") + schedule_id = data.get("schedule_id") + else: + level = "info" + message = str(data) + source = None + details = None + host_val = None + task_id = None + schedule_id = None + + host_id = host_map.get(host_val) if isinstance(host_val, str) else host_val + await repo.create( + level=level, + source=source, + message=message, + details=details, + host_id=host_id, + task_id=task_id, + schedule_id=schedule_id, + ) + created += 1 + backup_file(path) + + report["logs"] = created + + +async def main() -> None: + report: Dict[str, Any] = {"warnings": []} + await init_db() + + async with async_session_maker() as session: + async with session.begin(): + host_map = await migrate_hosts(session, report) + await migrate_bootstrap_status(session, report, host_map) + await migrate_schedule_runs(session, report) + await migrate_logs(session, report, host_map) + + print("Migration terminée") + for key, value in report.items(): + if key == "warnings": + if value: + print("Warnings:") + for w in value: + print(f" - {w}") + continue + print(f" - {key}: {value}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 0000000..c1203e5 --- /dev/null +++ b/pytest.ini @@ -0,0 +1,3 @@ +[pytest] +asyncio_mode = auto +asyncio_default_fixture_loop_scope = module diff --git a/tasks_logs/.schedule_runs.json b/tasks_logs/.schedule_runs.json index 2f33e44..6787dcb 100644 --- a/tasks_logs/.schedule_runs.json +++ b/tasks_logs/.schedule_runs.json @@ -1,5 +1,173 @@ { "runs": [ + { + "id": "run_e16db5ac6f5c", + "schedule_id": "sched_110c001afe0c", + "task_id": "2", + "started_at": "2025-12-05 02:35:00.012993+00:00", + "finished_at": "2025-12-05 02:35:32.549542+00:00", + "status": "success", + "duration_seconds": 32.45821054699991, + "hosts_impacted": 15, + "error_message": null, + "retry_attempt": 0 + }, + { + "id": "run_6c434a169263", + "schedule_id": "sched_110c001afe0c", + "task_id": "1", + "started_at": "2025-12-05 02:30:00.004595+00:00", + "finished_at": "2025-12-05 02:30:30.003032+00:00", + "status": "success", + "duration_seconds": 29.95905439800117, + "hosts_impacted": 15, + "error_message": null, + "retry_attempt": 0 + }, + { + "id": "run_debf96da90dd", + "schedule_id": "sched_110c001afe0c", + "task_id": "2", + "started_at": "2025-12-05 02:25:00.016354+00:00", + "finished_at": "2025-12-05 02:25:27.580495+00:00", + "status": "success", + "duration_seconds": 27.521959419998893, + "hosts_impacted": 15, + "error_message": null, + "retry_attempt": 0 + }, + { + "id": "run_bda871b98a7c", + "schedule_id": "sched_31a7ffb99bfd", + "task_id": "1", + "started_at": "2025-12-05 02:20:00.004169+00:00", + "finished_at": "2025-12-05 02:20:28.118352+00:00", + "status": "success", + "duration_seconds": 28.0753927859987, + "hosts_impacted": 15, + "error_message": null, + "retry_attempt": 0 + }, + { + "id": "run_9acaf3ee6040", + "schedule_id": "sched_d5370726086b", + "task_id": "4", + "started_at": "2025-12-05 02:05:01.066895+00:00", + "finished_at": null, + "status": "running", + "duration_seconds": null, + "hosts_impacted": 0, + "error_message": null, + "retry_attempt": 0 + }, + { + "id": "run_25dee59d8f54", + "schedule_id": "sched_178b8e511908", + "task_id": "3", + "started_at": "2025-12-05 02:05:00.942939+00:00", + "finished_at": null, + "status": "running", + "duration_seconds": null, + "hosts_impacted": 0, + "error_message": null, + "retry_attempt": 0 + }, + { + "id": "run_06b2fe4c75f9", + "schedule_id": "sched_d5370726086b", + "task_id": "2", + "started_at": "2025-12-05 02:00:00.048675+00:00", + "finished_at": "2025-12-05 02:00:31.174698+00:00", + "status": "success", + "duration_seconds": 31.10493237799892, + "hosts_impacted": 15, + "error_message": null, + "retry_attempt": 0 + }, + { + "id": "run_5a3ada10451e", + "schedule_id": "sched_178b8e511908", + "task_id": "1", + "started_at": "2025-12-05 02:00:00.004396+00:00", + "finished_at": "2025-12-05 02:00:30.956215+00:00", + "status": "success", + "duration_seconds": 30.92840002899902, + "hosts_impacted": 15, + "error_message": null, + "retry_attempt": 0 + }, + { + "id": "run_484f67657ee4", + "schedule_id": "sched_d5370726086b", + "task_id": "3", + "started_at": "2025-12-05 01:55:00.084088+00:00", + "finished_at": "2025-12-05 01:55:32.096250+00:00", + "status": "success", + "duration_seconds": 31.975180113000533, + "hosts_impacted": 15, + "error_message": null, + "retry_attempt": 0 + }, + { + "id": "run_7c9cbee2fe69", + "schedule_id": "sched_178b8e511908", + "task_id": "2", + "started_at": "2025-12-05 01:55:00.018967+00:00", + "finished_at": "2025-12-05 01:55:32.306141+00:00", + "status": "success", + "duration_seconds": 32.26106233700193, + "hosts_impacted": 15, + "error_message": null, + "retry_attempt": 0 + }, + { + "id": "run_a45e3d80323d", + "schedule_id": "sched_d5370726086b", + "task_id": "1", + "started_at": "2025-12-05 01:50:00.003670+00:00", + "finished_at": "2025-12-05 01:50:27.635237+00:00", + "status": "success", + "duration_seconds": 27.58177596600217, + "hosts_impacted": 15, + "error_message": null, + "retry_attempt": 0 + }, + { + "id": "run_6ebb5bb47219", + "schedule_id": "sched_d5370726086b", + "task_id": "2", + "started_at": "2025-12-05 01:45:00.003641+00:00", + "finished_at": "2025-12-05 01:45:26.015984+00:00", + "status": "success", + "duration_seconds": 25.9568110279979, + "hosts_impacted": 15, + "error_message": null, + "retry_attempt": 0 + }, + { + "id": "run_f07c8820abcf", + "schedule_id": "sched_d5370726086b", + "task_id": "1", + "started_at": "2025-12-05 01:40:00.003609+00:00", + "finished_at": "2025-12-05 01:40:27.800302+00:00", + "status": "success", + "duration_seconds": 27.77215807200264, + "hosts_impacted": 15, + "error_message": null, + "retry_attempt": 0 + }, + { + "id": "run_c831165b16d9", + "schedule_id": "sched_d5370726086b", + "task_id": null, + "started_at": "2025-12-05 01:35:00.003976+00:00", + "finished_at": null, + "status": "running", + "duration_seconds": null, + "hosts_impacted": 0, + "error_message": null, + "retry_attempt": 0 + }, { "id": "run_9eaff32da049", "schedule_id": "sched_31a7ffb99bfd", diff --git a/tasks_logs/.schedules.json b/tasks_logs/.schedules.json index 4debd97..fbdc8f7 100644 --- a/tasks_logs/.schedules.json +++ b/tasks_logs/.schedules.json @@ -1,9 +1,9 @@ { "schedules": [ { - "id": "sched_31a7ffb99bfd", + "id": "sched_110c001afe0c", "name": "Health-check-5min", - "description": "Health-check-5min", + "description": null, "playbook": "health-check.yml", "target_type": "group", "target": "all", @@ -19,20 +19,20 @@ "timezone": "America/Montreal", "start_at": null, "end_at": null, - "next_run_at": "2025-12-04 15:35:00-05:00", - "last_run_at": "2025-12-04 20:30:00.003138+00:00", + "next_run_at": "2025-12-04T21:40:00-05:00", + "last_run_at": "2025-12-05 02:35:00.012919+00:00", "last_status": "success", - "enabled": false, + "enabled": true, "retry_on_failure": 0, "timeout": 3600, "tags": [ "Test" ], - "run_count": 22, - "success_count": 19, - "failure_count": 3, - "created_at": "2025-12-04 18:45:18.318152+00:00", - "updated_at": "2025-12-04 20:30:29.181594+00:00" + "run_count": 3, + "success_count": 3, + "failure_count": 0, + "created_at": "2025-12-05 02:24:06.110100+00:00", + "updated_at": "2025-12-05 02:35:32.549928+00:00" } ] } \ No newline at end of file diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_db.py b/tests/test_db.py new file mode 100644 index 0000000..bf9c7bf --- /dev/null +++ b/tests/test_db.py @@ -0,0 +1,206 @@ +"""Tests basiques pour valider la couche DB SQLAlchemy async.""" +from __future__ import annotations + +import asyncio +import sys +from pathlib import Path + +# Add project root to path +sys.path.insert(0, str(Path(__file__).resolve().parents[1])) + +import pytest +import pytest_asyncio + +from app.models.database import async_session_maker, init_db, engine +from app.crud.host import HostRepository +from app.crud.bootstrap_status import BootstrapStatusRepository +from app.crud.task import TaskRepository +from app.crud.schedule import ScheduleRepository +from app.crud.log import LogRepository + +# Configure pytest-asyncio +pytestmark = pytest.mark.asyncio + + +@pytest_asyncio.fixture(scope="module") +async def setup_db(): + await init_db() + yield + # Cleanup: drop all tables + from app.models.database import Base + async with engine.begin() as conn: + await conn.run_sync(Base.metadata.drop_all) + + +@pytest.mark.asyncio +async def test_create_host(setup_db): + async with async_session_maker() as session: + repo = HostRepository(session) + host = await repo.create( + id="test-host-001", + name="test.host.local", + ip_address="192.168.1.100", + ansible_group="env_test", + status="online", + reachable=True, + ) + await session.commit() + + assert host.id == "test-host-001" + assert host.name == "test.host.local" + assert host.ip_address == "192.168.1.100" + + +@pytest.mark.asyncio +async def test_list_hosts(setup_db): + async with async_session_maker() as session: + repo = HostRepository(session) + hosts = await repo.list(limit=10, offset=0) + assert isinstance(hosts, list) + + +@pytest.mark.asyncio +async def test_create_task(setup_db): + async with async_session_maker() as session: + repo = TaskRepository(session) + task = await repo.create( + id="task-001", + action="health-check", + target="all", + playbook="health-check.yml", + status="pending", + ) + await session.commit() + + assert task.id == "task-001" + assert task.action == "health-check" + + +@pytest.mark.asyncio +async def test_create_log(setup_db): + async with async_session_maker() as session: + repo = LogRepository(session) + log = await repo.create( + level="INFO", + message="Test log entry", + source="test", + ) + await session.commit() + + assert log.id is not None + assert log.level == "INFO" + + +@pytest.mark.asyncio +async def test_soft_delete_host(setup_db): + async with async_session_maker() as session: + repo = HostRepository(session) + # Create a host to delete + host = await repo.create( + id="host-to-delete", + name="delete.me.local", + ip_address="192.168.1.200", + status="unknown", + ) + await session.commit() + + # Soft delete + deleted = await repo.soft_delete("host-to-delete") + await session.commit() + assert deleted is True + + # Should not appear in normal list + hosts = await repo.list() + host_ids = [h.id for h in hosts] + assert "host-to-delete" not in host_ids + + # But should appear with include_deleted=True + host_with_deleted = await repo.get("host-to-delete", include_deleted=True) + assert host_with_deleted is not None + assert host_with_deleted.deleted_at is not None + + +@pytest.mark.asyncio +async def test_create_schedule(setup_db): + from app.crud.schedule import ScheduleRepository + async with async_session_maker() as session: + repo = ScheduleRepository(session) + schedule = await repo.create( + id="schedule-001", + name="Daily Backup", + playbook="backup.yml", + target="all", + schedule_type="recurring", + recurrence_type="daily", + recurrence_time="02:00", + enabled=True, + ) + await session.commit() + + assert schedule.id == "schedule-001" + assert schedule.name == "Daily Backup" + assert schedule.enabled is True + + +@pytest.mark.asyncio +async def test_schedule_soft_delete(setup_db): + from app.crud.schedule import ScheduleRepository + async with async_session_maker() as session: + repo = ScheduleRepository(session) + # Create + schedule = await repo.create( + id="schedule-to-delete", + name="To Delete", + playbook="test.yml", + target="all", + schedule_type="once", + enabled=True, + ) + await session.commit() + + # Soft delete + deleted = await repo.soft_delete("schedule-to-delete") + await session.commit() + assert deleted is True + + # Should not appear in normal list + schedules = await repo.list() + schedule_ids = [s.id for s in schedules] + assert "schedule-to-delete" not in schedule_ids + + +@pytest.mark.asyncio +async def test_create_schedule_run(setup_db): + from app.crud.schedule import ScheduleRepository + from app.crud.schedule_run import ScheduleRunRepository + from datetime import datetime, timezone + + async with async_session_maker() as session: + # Create schedule first + sched_repo = ScheduleRepository(session) + schedule = await sched_repo.create( + id="schedule-for-run", + name="Run Test", + playbook="test.yml", + target="all", + schedule_type="once", + enabled=True, + ) + await session.commit() + + # Create run + run_repo = ScheduleRunRepository(session) + run = await run_repo.create( + schedule_id="schedule-for-run", + status="running", + started_at=datetime.now(timezone.utc), + ) + await session.commit() + + assert run.id is not None + assert run.schedule_id == "schedule-for-run" + assert run.status == "running" + + +if __name__ == "__main__": + pytest.main([__file__, "-v"])