From ad3a8a56390d041834e2ea615a82de50625806b4 Mon Sep 17 00:00:00 2001 From: Bruno Charest Date: Thu, 4 Dec 2025 21:37:55 -0500 Subject: [PATCH] Add SQLite database integration with SQLAlchemy async, migration tooling, and comprehensive CRUD operations for hosts, tasks, schedules, and logs --- Dockerfile | 7 +- README.md | 48 +- alembic.ini | 35 + alembic/env.py | 91 ++ alembic/versions/0001_initial_schema.py | 122 +++ app/app_optimized.py | 1108 +++++++++++++++-------- app/crud/__init__.py | 13 + app/crud/bootstrap_status.py | 41 + app/crud/host.py | 67 ++ app/crud/log.py | 28 + app/crud/schedule.py | 60 ++ app/crud/schedule_run.py | 35 + app/crud/task.py | 44 + app/models/__init__.py | 17 + app/models/bootstrap_status.py | 27 + app/models/database.py | 106 +++ app/models/host.py | 33 + app/models/log.py | 36 + app/models/schedule.py | 40 + app/models/schedule_run.py | 31 + app/models/task.py | 31 + app/requirements.txt | 7 +- app/schemas/__init__.py | 21 + app/schemas/bootstrap_status.py | 18 + app/schemas/host.py | 36 + app/schemas/log.py | 23 + app/schemas/schedule.py | 67 ++ app/schemas/task.py | 36 + data/homelab.db | Bin 0 -> 4096 bytes data/homelab.db-shm | Bin 0 -> 32768 bytes data/homelab.db-wal | Bin 0 -> 1042392 bytes docker-compose.yml | 7 + migrate_json_to_sqlite.py | 259 ++++++ pytest.ini | 3 + tasks_logs/.schedule_runs.json | 168 ++++ tasks_logs/.schedules.json | 20 +- tests/__init__.py | 0 tests/test_db.py | 206 +++++ 38 files changed, 2503 insertions(+), 388 deletions(-) create mode 100644 alembic.ini create mode 100644 alembic/env.py create mode 100644 alembic/versions/0001_initial_schema.py create mode 100644 app/crud/__init__.py create mode 100644 app/crud/bootstrap_status.py create mode 100644 app/crud/host.py create mode 100644 app/crud/log.py create mode 100644 app/crud/schedule.py create mode 100644 app/crud/schedule_run.py create mode 100644 app/crud/task.py create mode 100644 app/models/__init__.py create mode 100644 app/models/bootstrap_status.py create mode 100644 app/models/database.py create mode 100644 app/models/host.py create mode 100644 app/models/log.py create mode 100644 app/models/schedule.py create mode 100644 app/models/schedule_run.py create mode 100644 app/models/task.py create mode 100644 app/schemas/__init__.py create mode 100644 app/schemas/bootstrap_status.py create mode 100644 app/schemas/host.py create mode 100644 app/schemas/log.py create mode 100644 app/schemas/schedule.py create mode 100644 app/schemas/task.py create mode 100644 data/homelab.db create mode 100644 data/homelab.db-shm create mode 100644 data/homelab.db-wal create mode 100644 migrate_json_to_sqlite.py create mode 100644 pytest.ini create mode 100644 tests/__init__.py create mode 100644 tests/test_db.py diff --git a/Dockerfile b/Dockerfile index 93461dc..28bd7d4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -39,11 +39,16 @@ COPY app/ ./ COPY ansible/ /ansible/ # Création du répertoire pour les clés SSH (sera monté en volume) -RUN mkdir -p /app/ssh_keys +RUN mkdir -p /app/ssh_keys /app/data # Configuration Ansible pour utiliser le bon répertoire ENV ANSIBLE_CONFIG=/ansible/ansible.cfg +# Variables par défaut pour la base SQLite dans le conteneur +ENV HOMELAB_DATA_DIR=/app/data +ENV DB_PATH=/app/data/homelab.db +ENV DATABASE_URL=sqlite+aiosqlite:////app/data/homelab.db + # Exposition du port EXPOSE 8000 diff --git a/README.md b/README.md index 07070de..760a5ed 100644 --- a/README.md +++ b/README.md @@ -365,16 +365,56 @@ ANSIBLE_DIR=/etc/ansible ### Base de Données -Par défaut, l'application utilise une base de données en mémoire. Pour une utilisation en production, configurez PostgreSQL ou SQLite en modifiant la classe `InMemoryDB`. +L'application utilise **SQLite** avec **SQLAlchemy 2.x async** pour le stockage persistant des données (hôtes, tâches, schedules, logs). La base est créée automatiquement au démarrage dans `data/homelab.db`. + +#### Migration depuis les fichiers JSON + +Si vous migrez depuis une version antérieure utilisant des fichiers JSON : + +```bash +# 1. Installer les nouvelles dépendances +pip install -r app/requirements.txt + +# 2. Exécuter le script de migration +python migrate_json_to_sqlite.py +``` + +Le script : +- Importe les données depuis `ansible/.host_status.json`, `tasks_logs/.bootstrap_status.json`, etc. +- Crée des sauvegardes `.bak` des fichiers JSON originaux +- Génère un rapport de migration + +#### Structure de la base de données + +``` +data/homelab.db +├── hosts # Inventaire des hôtes +├── bootstrap_status # Statut bootstrap SSH par hôte +├── tasks # Historique des tâches exécutées +├── schedules # Planifications récurrentes +├── schedule_runs # Historique des exécutions de schedules +└── logs # Logs système +``` + +#### Migrations Alembic + +Pour les évolutions de schéma : + +```bash +# Appliquer les migrations +alembic upgrade head + +# Créer une nouvelle migration +alembic revision --autogenerate -m "description" +``` ## 🚀 Déploiement ### Production 1. **Configuration de la base de données** - ```python - # Remplacer InMemoryDB par une vraie base de données - ``` + - Par défaut : SQLite dans `data/homelab.db` + - Variable d'environnement : `DATABASE_URL=sqlite+aiosqlite:///./data/homelab.db` 2. **Sécurité** - Utilisez une clé API forte diff --git a/alembic.ini b/alembic.ini new file mode 100644 index 0000000..8b326ce --- /dev/null +++ b/alembic.ini @@ -0,0 +1,35 @@ +[alembic] +script_location = alembic +sqlalchemy.url = sqlite+aiosqlite:///./data/homelab.db + +[loggers] +keys = root,sqlalchemy,alembic + +[handlers] +keys = console + +[formatters] +keys = generic + +[logger_root] +level = WARN +handlers = console + +[logger_sqlalchemy] +level = WARN +handlers = console +qualname = sqlalchemy.engine + +[logger_alembic] +level = INFO +handlers = console +qualname = alembic + +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic + +[formatter_generic] +format = %(levelname)-5.5s [%(name)s] %(message)s diff --git a/alembic/env.py b/alembic/env.py new file mode 100644 index 0000000..a9a6c4f --- /dev/null +++ b/alembic/env.py @@ -0,0 +1,91 @@ +from __future__ import annotations + +import asyncio +from logging.config import fileConfig +import os +from pathlib import Path + +from sqlalchemy import pool +from sqlalchemy.engine import Connection +from sqlalchemy.ext.asyncio import async_engine_from_config + +from alembic import context + +# Add project root to sys.path for module imports +import sys +ROOT_DIR = Path(__file__).resolve().parents[1] +if str(ROOT_DIR) not in sys.path: + sys.path.insert(0, str(ROOT_DIR)) + +from app.models.database import Base, metadata_obj, DATABASE_URL # noqa: E402 + +# this is the Alembic Config object, which provides +# access to the values within the .ini file in use. +config = context.config + +# Interpret the config file for Python logging. +if config.config_file_name is not None: + fileConfig(config.config_file_name) + +# Override sqlalchemy.url from environment if provided +url_from_env = os.environ.get("DATABASE_URL") +if url_from_env: + config.set_main_option("sqlalchemy.url", url_from_env) +else: + config.set_main_option("sqlalchemy.url", str(DATABASE_URL)) + +# add your model's MetaData object here +# for 'autogenerate' support +# from myapp import mymodel +# target_metadata = mymodel.Base.metadata +target_metadata = metadata_obj + + +def run_migrations_offline() -> None: + """Run migrations in 'offline' mode.""" + url = config.get_main_option("sqlalchemy.url") + context.configure( + url=url, + target_metadata=target_metadata, + literal_binds=True, + dialect_opts={"paramstyle": "named"}, + compare_type=True, + ) + + with context.begin_transaction(): + context.run_migrations() + + +def do_run_migrations(connection: Connection) -> None: + context.configure( + connection=connection, + target_metadata=target_metadata, + compare_type=True, + ) + + with context.begin_transaction(): + context.run_migrations() + + +def run_migrations_online() -> None: + """Run migrations in 'online' mode.""" + + connectable = async_engine_from_config( + config.get_section(config.config_ini_section, {}), + prefix="sqlalchemy.", + poolclass=pool.NullPool, + future=True, + ) + + async def async_main() -> None: + async with connectable.connect() as connection: + await connection.run_sync(do_run_migrations) + await connectable.dispose() + + asyncio.run(async_main()) + + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/alembic/versions/0001_initial_schema.py b/alembic/versions/0001_initial_schema.py new file mode 100644 index 0000000..2982d37 --- /dev/null +++ b/alembic/versions/0001_initial_schema.py @@ -0,0 +1,122 @@ +"""Initial database schema for Homelab Automation + +Revision ID: 0001_initial +Revises: +Create Date: 2025-12-04 +""" + +from __future__ import annotations + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = "0001_initial" +down_revision = None +branch_labels = None +depends_on = None + + +def upgrade() -> None: + op.create_table( + "hosts", + sa.Column("id", sa.String(), primary_key=True), + sa.Column("name", sa.String(), nullable=False), + sa.Column("ip_address", sa.String(), nullable=False, unique=True), + sa.Column("status", sa.String(), nullable=False, server_default=sa.text("'unknown'")), + sa.Column("ansible_group", sa.String(), nullable=True), + sa.Column("last_seen", sa.DateTime(timezone=True), nullable=True), + sa.Column("reachable", sa.Boolean(), nullable=False, server_default=sa.text("0")), + sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False), + sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False), + sa.Column("deleted_at", sa.DateTime(timezone=True), nullable=True), + ) + + op.create_table( + "bootstrap_status", + sa.Column("id", sa.Integer(), primary_key=True, autoincrement=True), + sa.Column("host_id", sa.String(), sa.ForeignKey("hosts.id", ondelete="CASCADE"), nullable=False), + sa.Column("status", sa.String(), nullable=False), + sa.Column("automation_user", sa.String(), nullable=True), + sa.Column("last_attempt", sa.DateTime(timezone=True), nullable=True), + sa.Column("error_message", sa.Text(), nullable=True), + sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False), + ) + + op.create_table( + "tasks", + sa.Column("id", sa.String(), primary_key=True), + sa.Column("action", sa.String(), nullable=False), + sa.Column("target", sa.String(), nullable=False), + sa.Column("status", sa.String(), nullable=False, server_default=sa.text("'pending'")), + sa.Column("playbook", sa.String(), nullable=True), + sa.Column("started_at", sa.DateTime(timezone=True), nullable=True), + sa.Column("completed_at", sa.DateTime(timezone=True), nullable=True), + sa.Column("error_message", sa.Text(), nullable=True), + sa.Column("result_data", sa.JSON(), nullable=True), + sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False), + ) + + op.create_table( + "schedules", + sa.Column("id", sa.String(), primary_key=True), + sa.Column("name", sa.String(), nullable=False), + sa.Column("playbook", sa.String(), nullable=False), + sa.Column("target", sa.String(), nullable=False), + sa.Column("schedule_type", sa.String(), nullable=False), + sa.Column("schedule_time", sa.DateTime(timezone=True), nullable=True), + sa.Column("recurrence_type", sa.String(), nullable=True), + sa.Column("recurrence_time", sa.String(), nullable=True), + sa.Column("recurrence_days", sa.Text(), nullable=True), + sa.Column("cron_expression", sa.String(), nullable=True), + sa.Column("enabled", sa.Boolean(), nullable=False, server_default=sa.text("1")), + sa.Column("tags", sa.Text(), nullable=True), + sa.Column("next_run", sa.DateTime(timezone=True), nullable=True), + sa.Column("last_run", sa.DateTime(timezone=True), nullable=True), + sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False), + sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False), + sa.Column("deleted_at", sa.DateTime(timezone=True), nullable=True), + ) + + op.create_table( + "schedule_runs", + sa.Column("id", sa.Integer(), primary_key=True, autoincrement=True), + sa.Column("schedule_id", sa.String(), sa.ForeignKey("schedules.id", ondelete="CASCADE"), nullable=False), + sa.Column("task_id", sa.String(), sa.ForeignKey("tasks.id", ondelete="SET NULL"), nullable=True), + sa.Column("status", sa.String(), nullable=False), + sa.Column("started_at", sa.DateTime(timezone=True), nullable=False), + sa.Column("completed_at", sa.DateTime(timezone=True), nullable=True), + sa.Column("duration", sa.Float(), nullable=True), + sa.Column("error_message", sa.Text(), nullable=True), + sa.Column("output", sa.Text(), nullable=True), + sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False), + ) + + op.create_table( + "logs", + sa.Column("id", sa.Integer(), primary_key=True, autoincrement=True), + sa.Column("level", sa.String(), nullable=False), + sa.Column("source", sa.String(), nullable=True), + sa.Column("message", sa.Text(), nullable=False), + sa.Column("details", sa.JSON(), nullable=True), + sa.Column("host_id", sa.String(), sa.ForeignKey("hosts.id", ondelete="SET NULL"), nullable=True), + sa.Column("task_id", sa.String(), sa.ForeignKey("tasks.id", ondelete="SET NULL"), nullable=True), + sa.Column("schedule_id", sa.String(), sa.ForeignKey("schedules.id", ondelete="SET NULL"), nullable=True), + sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False), + ) + + op.create_index("idx_logs_created_at", "logs", ["created_at"]) + op.create_index("idx_logs_level", "logs", ["level"]) + op.create_index("idx_logs_source", "logs", ["source"]) + + +def downgrade() -> None: + op.drop_index("idx_logs_source", table_name="logs") + op.drop_index("idx_logs_level", table_name="logs") + op.drop_index("idx_logs_created_at", table_name="logs") + op.drop_table("logs") + op.drop_table("schedule_runs") + op.drop_table("schedules") + op.drop_table("tasks") + op.drop_table("bootstrap_status") + op.drop_table("hosts") diff --git a/app/app_optimized.py b/app/app_optimized.py index 11506f7..34c099e 100644 --- a/app/app_optimized.py +++ b/app/app_optimized.py @@ -34,9 +34,20 @@ from fastapi.security import APIKeyHeader from fastapi.templating import Jinja2Templates from fastapi.middleware.cors import CORSMiddleware from fastapi.staticfiles import StaticFiles -from pydantic import BaseModel, Field, field_validator +from pydantic import BaseModel, Field, field_validator, ConfigDict +from sqlalchemy.ext.asyncio import AsyncSession import uvicorn +# Import DB layer (async SQLAlchemy) +from models.database import get_db # type: ignore +from crud.host import HostRepository # type: ignore +from crud.bootstrap_status import BootstrapStatusRepository # type: ignore +from crud.log import LogRepository # type: ignore +from crud.task import TaskRepository # type: ignore +from crud.schedule import ScheduleRepository # type: ignore +from crud.schedule_run import ScheduleRunRepository # type: ignore +from models.database import init_db # type: ignore + BASE_DIR = Path(__file__).resolve().parent # Configuration avancée de l'application @@ -97,7 +108,7 @@ class CommandResult(BaseModel): timestamp: datetime = Field(default_factory=lambda: datetime.now(timezone.utc)) class Host(BaseModel): - id: int + id: str name: str ip: str status: Literal["online", "offline", "warning"] @@ -114,7 +125,7 @@ class Host(BaseModel): } class Task(BaseModel): - id: int + id: str name: str host: str status: Literal["pending", "running", "completed", "failed", "cancelled"] @@ -386,7 +397,7 @@ class ScheduleRun(BaseModel): """Historique d'une exécution de schedule""" id: str = Field(default_factory=lambda: f"run_{uuid.uuid4().hex[:12]}") schedule_id: str = Field(..., description="ID du schedule parent") - task_id: Optional[int] = Field(default=None, description="ID de la tâche créée") + task_id: Optional[str] = Field(default=None, description="ID de la tâche créée") started_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc)) finished_at: Optional[datetime] = Field(default=None) status: Literal["running", "success", "failed", "canceled"] = Field(default="running") @@ -1375,7 +1386,7 @@ class SchedulerService: pass # Créer une tâche - task_id = db.get_next_id("tasks") + task_id = str(db.get_next_id("tasks")) playbook_name = schedule.playbook.replace('.yml', '').replace('-', ' ').title() task = Task( id=task_id, @@ -1646,6 +1657,16 @@ class SchedulerService: # Appliquer les modifications update_data = request.dict(exclude_unset=True, exclude_none=True) for key, value in update_data.items(): + # La récurrence arrive du frontend comme un dict, il faut la retransformer + # en objet ScheduleRecurrence pour que _build_cron_trigger fonctionne. + if key == "recurrence" and isinstance(value, dict): + try: + value = ScheduleRecurrence(**value) + except Exception: + # Si la récurrence est invalide, on laisse passer pour que la + # validation côté endpoint remonte une erreur explicite. + pass + if hasattr(schedule, key): setattr(schedule, key, value) @@ -1838,6 +1859,12 @@ class SchedulerService: "expression": expression } + def get_runs_for_schedule(self, schedule_id: str, limit: int = 50) -> List[Dict]: + """Récupère l'historique des exécutions d'un schedule (retourne des dicts)""" + runs = self._load_runs() + schedule_runs = [r for r in runs if r.get('schedule_id') == schedule_id] + return schedule_runs[:limit] + def cleanup_old_runs(self, days: int = 90): """Nettoie les exécutions plus anciennes que X jours""" cutoff = datetime.now(timezone.utc) - timedelta(days=days) @@ -2913,7 +2940,7 @@ class HybridDB: os_label = runtime_status.get("os", f"Linux ({primary_group})") host = Host( - id=idx, + id=str(idx), name=ah.name, ip=ah.ansible_host, status=status, @@ -3267,233 +3294,311 @@ async def delete_group( } +def _host_to_response(host_obj, bootstrap_status: Optional["BootstrapStatus"] = None) -> Dict[str, Any]: + """Map DB host + latest bootstrap to API-compatible payload.""" + return { + "id": host_obj.id, + "name": host_obj.name, + "ip": getattr(host_obj, "ip_address", None), + "status": host_obj.status, + "os": "Linux", # valeur par défaut faute d'info stockée + "last_seen": host_obj.last_seen, + "created_at": host_obj.created_at, + "groups": [g for g in [getattr(host_obj, "ansible_group", None)] if g], + "bootstrap_ok": (bootstrap_status.status == "success") if bootstrap_status else False, + "bootstrap_date": bootstrap_status.last_attempt if bootstrap_status else None, + } + + @app.get("/api/hosts/by-name/{host_name}") -async def get_host_by_name(host_name: str, api_key_valid: bool = Depends(verify_api_key)): - """Récupère un hôte spécifique par son nom""" - host = next((h for h in db.hosts if h.name == host_name), None) +async def get_host_by_name( + host_name: str, + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), +): + repo = HostRepository(db_session) + bs_repo = BootstrapStatusRepository(db_session) + host = await repo.get_by_ip(host_name) or await repo.get(host_name) if not host: raise HTTPException(status_code=404, detail="Hôte non trouvé") - return host + bootstrap = await bs_repo.latest_for_host(host.id) + return _host_to_response(host, bootstrap) -@app.get("/api/hosts", response_model=List[Host]) + +@app.get("/api/hosts") async def get_hosts( bootstrap_status: Optional[str] = None, - api_key_valid: bool = Depends(verify_api_key) + limit: int = 100, + offset: int = 0, + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), ): - """Récupère la liste de tous les hôtes - - Args: - bootstrap_status: Filtrer par statut bootstrap ('ready', 'not_configured', ou None pour tous) - """ - hosts = db.hosts - - # Filtrer par statut bootstrap si spécifié - if bootstrap_status == 'ready': - hosts = [h for h in hosts if h.bootstrap_ok] - elif bootstrap_status == 'not_configured': - hosts = [h for h in hosts if not h.bootstrap_ok] - - return hosts + repo = HostRepository(db_session) + bs_repo = BootstrapStatusRepository(db_session) + hosts = await repo.list(limit=limit, offset=offset) + # Si la base ne contient encore aucun hôte, on retombe sur les hôtes Ansible via la DB hybride + if not hosts: + hybrid_hosts = db.hosts + fallback_results = [] + for h in hybrid_hosts: + # Appliquer les mêmes filtres de bootstrap que pour la version DB + if bootstrap_status == "ready" and not h.bootstrap_ok: + continue + if bootstrap_status == "not_configured" and h.bootstrap_ok: + continue -@app.get("/api/hosts/{host_id}", response_model=Host) -async def get_host(host_id: int, api_key_valid: bool = Depends(verify_api_key)): - """Récupère un hôte spécifique par ID""" - host = next((h for h in db.hosts if h.id == host_id), None) + fallback_results.append( + { + "id": h.id, + "name": h.name, + "ip": h.ip, + "status": h.status, + "os": h.os, + "last_seen": h.last_seen, + # created_at est déjà géré par le modèle Pydantic Host (default_factory) + "created_at": h.created_at, + "groups": h.groups, + "bootstrap_ok": h.bootstrap_ok, + "bootstrap_date": h.bootstrap_date, + } + ) + return fallback_results + + results = [] + for host in hosts: + bootstrap = await bs_repo.latest_for_host(host.id) + if bootstrap_status == "ready" and not (bootstrap and bootstrap.status == "success"): + continue + if bootstrap_status == "not_configured" and bootstrap and bootstrap.status == "success": + continue + results.append(_host_to_response(host, bootstrap)) + return results + + +@app.get("/api/hosts/{host_id}") +async def get_host( + host_id: str, + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), +): + repo = HostRepository(db_session) + bs_repo = BootstrapStatusRepository(db_session) + host = await repo.get(host_id) if not host: raise HTTPException(status_code=404, detail="Hôte non trouvé") - return host + bootstrap = await bs_repo.latest_for_host(host.id) + return _host_to_response(host, bootstrap) + @app.post("/api/hosts") -async def create_host(host_request: HostRequest, api_key_valid: bool = Depends(verify_api_key)): - """Crée un nouvel hôte dans l'inventaire Ansible (hosts.yml) - - L'hôte sera ajouté au groupe d'environnement spécifié et aux groupes de rôles. - """ +async def create_host( + host_request: HostRequest, + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), +): + repo = HostRepository(db_session) + bs_repo = BootstrapStatusRepository(db_session) + # Vérifier si l'hôte existe déjà - if ansible_service.host_exists(host_request.name): - raise HTTPException(status_code=400, detail=f"L'hôte '{host_request.name}' existe déjà dans l'inventaire") - + existing = await repo.get_by_ip(host_request.name) + if existing: + raise HTTPException(status_code=400, detail=f"L'hôte '{host_request.name}' existe déjà") + # Valider le groupe d'environnement env_groups = ansible_service.get_env_groups() - if host_request.env_group not in env_groups: - # Créer le groupe s'il n'existe pas mais commence par env_ - if not host_request.env_group.startswith('env_'): - raise HTTPException(status_code=400, detail=f"Le groupe d'environnement doit commencer par 'env_'. Groupes existants: {env_groups}") - + if host_request.env_group not in env_groups and not host_request.env_group.startswith("env_"): + raise HTTPException(status_code=400, detail=f"Le groupe d'environnement doit commencer par 'env_'. Groupes existants: {env_groups}") + # Valider les groupes de rôles role_groups = ansible_service.get_role_groups() for role in host_request.role_groups: - if role not in role_groups and not role.startswith('role_'): + if role not in role_groups and not role.startswith("role_"): raise HTTPException(status_code=400, detail=f"Le groupe de rôle '{role}' doit commencer par 'role_'. Groupes existants: {role_groups}") - + try: - # Ajouter l'hôte à l'inventaire + # Ajouter l'hôte à l'inventaire Ansible ansible_service.add_host_to_inventory( hostname=host_request.name, env_group=host_request.env_group, role_groups=host_request.role_groups, - ansible_host=host_request.ip + ansible_host=host_request.ip, ) - - # Invalider le cache pour recharger les hôtes - db._hosts_cache = None - - # Récupérer le nouvel hôte - new_host = next((h for h in db.hosts if h.name == host_request.name), None) - - # Ajouter un log - log_entry = LogEntry( - id=db.get_next_id("logs"), - timestamp=datetime.now(timezone.utc), - level="INFO", - message=f"Hôte '{host_request.name}' ajouté à l'inventaire (env: {host_request.env_group}, roles: {host_request.role_groups})", - source="inventory", - host=host_request.name + + # Créer en base + host = await repo.create( + id=uuid.uuid4().hex, + name=host_request.name, + ip_address=host_request.ip or host_request.name, + ansible_group=host_request.env_group, + status="unknown", + reachable=False, + last_seen=None, ) - db.logs.insert(0, log_entry) - + bootstrap = await bs_repo.latest_for_host(host.id) + + await db_session.commit() + # Notifier les clients WebSocket - await ws_manager.broadcast({ - "type": "host_created", - "data": new_host.dict() if new_host else {"name": host_request.name} - }) - + await ws_manager.broadcast( + { + "type": "host_created", + "data": _host_to_response(host, bootstrap), + } + ) + return { "message": f"Hôte '{host_request.name}' ajouté avec succès", - "host": new_host.dict() if new_host else None, - "inventory_updated": True + "host": _host_to_response(host, bootstrap), + "inventory_updated": True, } - + + except HTTPException: + raise except Exception as e: + await db_session.rollback() raise HTTPException(status_code=500, detail=f"Erreur lors de l'ajout de l'hôte: {str(e)}") + @app.put("/api/hosts/{host_name}") async def update_host( host_name: str, update_request: HostUpdateRequest, - api_key_valid: bool = Depends(verify_api_key) + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), ): - """Met à jour les groupes d'un hôte existant dans l'inventaire Ansible""" - # Vérifier que l'hôte existe - if not ansible_service.host_exists(host_name): - raise HTTPException(status_code=404, detail=f"Hôte '{host_name}' non trouvé dans l'inventaire") - + repo = HostRepository(db_session) + bs_repo = BootstrapStatusRepository(db_session) + host = await repo.get_by_ip(host_name) or await repo.get(host_name) + if not host: + raise HTTPException(status_code=404, detail=f"Hôte '{host_name}' non trouvé") + # Valider le groupe d'environnement si fourni if update_request.env_group: env_groups = ansible_service.get_env_groups() - if update_request.env_group not in env_groups and not update_request.env_group.startswith('env_'): + if update_request.env_group not in env_groups and not update_request.env_group.startswith("env_"): raise HTTPException(status_code=400, detail=f"Le groupe d'environnement doit commencer par 'env_'") - + # Valider les groupes de rôles si fournis if update_request.role_groups: for role in update_request.role_groups: - if not role.startswith('role_'): + if not role.startswith("role_"): raise HTTPException(status_code=400, detail=f"Le groupe de rôle '{role}' doit commencer par 'role_'") - + try: - success = ansible_service.update_host_groups( + ansible_service.update_host_groups( hostname=host_name, env_group=update_request.env_group, role_groups=update_request.role_groups, - ansible_host=update_request.ansible_host + ansible_host=update_request.ansible_host, ) - - if not success: - raise HTTPException(status_code=500, detail="Échec de la mise à jour de l'hôte") - - # Invalider le cache - db._hosts_cache = None - - # Récupérer l'hôte mis à jour - updated_host = next((h for h in db.hosts if h.name == host_name), None) - - # Ajouter un log - log_entry = LogEntry( - id=db.get_next_id("logs"), - timestamp=datetime.now(timezone.utc), - level="INFO", - message=f"Hôte '{host_name}' mis à jour (env: {update_request.env_group}, roles: {update_request.role_groups})", - source="inventory", - host=host_name + + await repo.update( + host, + ansible_group=update_request.env_group or host.ansible_group, ) - db.logs.insert(0, log_entry) - - # Notifier les clients WebSocket - await ws_manager.broadcast({ - "type": "host_updated", - "data": updated_host.dict() if updated_host else {"name": host_name} - }) - + await db_session.commit() + + bootstrap = await bs_repo.latest_for_host(host.id) + + await ws_manager.broadcast( + { + "type": "host_updated", + "data": _host_to_response(host, bootstrap), + } + ) + return { "message": f"Hôte '{host_name}' mis à jour avec succès", - "host": updated_host.dict() if updated_host else None, - "inventory_updated": True + "host": _host_to_response(host, bootstrap), + "inventory_updated": True, } - + except HTTPException: + await db_session.rollback() raise except Exception as e: + await db_session.rollback() raise HTTPException(status_code=500, detail=f"Erreur lors de la mise à jour: {str(e)}") + @app.delete("/api/hosts/by-name/{host_name}") -async def delete_host_by_name(host_name: str, api_key_valid: bool = Depends(verify_api_key)): - """Supprime un hôte de l'inventaire Ansible par son nom""" - # Vérifier que l'hôte existe - if not ansible_service.host_exists(host_name): - raise HTTPException(status_code=404, detail=f"Hôte '{host_name}' non trouvé dans l'inventaire") - +async def delete_host_by_name( + host_name: str, + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), +): + repo = HostRepository(db_session) + host = await repo.get_by_ip(host_name) or await repo.get(host_name) + if not host: + raise HTTPException(status_code=404, detail=f"Hôte '{host_name}' non trouvé") + try: - success = ansible_service.remove_host_from_inventory(host_name) - - if not success: - raise HTTPException(status_code=500, detail="Échec de la suppression de l'hôte") - - # Invalider le cache - db._hosts_cache = None - - # Ajouter un log - log_entry = LogEntry( - id=db.get_next_id("logs"), - timestamp=datetime.now(timezone.utc), - level="WARN", - message=f"Hôte '{host_name}' supprimé de l'inventaire", - source="inventory", - host=host_name + ansible_service.remove_host_from_inventory(host_name) + await repo.soft_delete(host.id) + await db_session.commit() + + await ws_manager.broadcast( + { + "type": "host_deleted", + "data": {"name": host_name}, + } ) - db.logs.insert(0, log_entry) - - # Notifier les clients WebSocket - await ws_manager.broadcast({ - "type": "host_deleted", - "data": {"name": host_name} - }) - - return { - "message": f"Hôte '{host_name}' supprimé avec succès", - "inventory_updated": True - } - + + return {"message": f"Hôte '{host_name}' supprimé avec succès", "inventory_updated": True} except HTTPException: + await db_session.rollback() raise except Exception as e: + await db_session.rollback() raise HTTPException(status_code=500, detail=f"Erreur lors de la suppression: {str(e)}") + @app.delete("/api/hosts/{host_id}") -async def delete_host(host_id: int, api_key_valid: bool = Depends(verify_api_key)): - """Supprime un hôte par ID""" - host = next((h for h in db.hosts if h.id == host_id), None) +async def delete_host( + host_id: str, + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), +): + repo = HostRepository(db_session) + host = await repo.get(host_id) if not host: raise HTTPException(status_code=404, detail="Hôte non trouvé") - - return await delete_host_by_name(host.name, api_key_valid) -@app.get("/api/tasks", response_model=List[Task]) -async def get_tasks(api_key_valid: bool = Depends(verify_api_key)): + return await delete_host_by_name(host.name, api_key_valid, db_session) + +@app.get("/api/tasks") +async def get_tasks( + limit: int = 100, + offset: int = 0, + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), +): """Récupère la liste de toutes les tâches""" - return db.tasks + repo = TaskRepository(db_session) + tasks = await repo.list(limit=limit, offset=offset) + return [ + { + "id": t.id, + "name": t.action, + "host": t.target, + "status": t.status, + "progress": 100 if t.status == "completed" else (50 if t.status == "running" else 0), + "start_time": t.started_at, + "end_time": t.completed_at, + "duration": None, + "output": t.result_data.get("output") if t.result_data else None, + "error": t.error_message, + } + for t in tasks + ] -@app.post("/api/tasks", response_model=Task) -async def create_task(task_request: TaskRequest, api_key_valid: bool = Depends(verify_api_key)): + +@app.post("/api/tasks") +async def create_task( + task_request: TaskRequest, + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), +): """Crée une nouvelle tâche et exécute le playbook Ansible correspondant""" task_names = { 'upgrade': 'Mise à jour système', @@ -3505,39 +3610,55 @@ async def create_task(task_request: TaskRequest, api_key_valid: bool = Depends(v 'maintenance': 'Maintenance', 'bootstrap': 'Bootstrap Ansible' } - - new_task = Task( - id=db.get_next_id("tasks"), - name=task_names.get(task_request.action, f"Tâche {task_request.action}"), - host=task_request.host or task_request.group or "all", + + repo = TaskRepository(db_session) + task_id = uuid.uuid4().hex + target = task_request.host or task_request.group or "all" + playbook = ACTION_PLAYBOOK_MAP.get(task_request.action) + + task_obj = await repo.create( + id=task_id, + action=task_request.action, + target=target, + playbook=playbook, status="running", - progress=0, - start_time=datetime.now(timezone.utc) ) - - db.tasks.append(new_task) - + await repo.update(task_obj, started_at=datetime.now(timezone.utc)) + await db_session.commit() + + response_data = { + "id": task_obj.id, + "name": task_names.get(task_request.action, f"Tâche {task_request.action}"), + "host": target, + "status": "running", + "progress": 0, + "start_time": task_obj.started_at, + "end_time": None, + "duration": None, + "output": None, + "error": None, + } + # Notifier les clients WebSocket await ws_manager.broadcast({ "type": "task_created", - "data": new_task.dict() + "data": response_data }) - + # Exécuter le playbook Ansible en arrière-plan - playbook = ACTION_PLAYBOOK_MAP.get(task_request.action) if playbook: asyncio.create_task(execute_ansible_task( - task_id=new_task.id, + task_id=task_obj.id, playbook=playbook, - target=new_task.host, + target=target, extra_vars=task_request.extra_vars, check_mode=task_request.dry_run )) else: # Pas de playbook correspondant, simuler - asyncio.create_task(simulate_task_execution(new_task.id)) - - return new_task + asyncio.create_task(simulate_task_execution(task_obj.id)) + + return response_data # ===== ENDPOINTS LOGS DE TÂCHES (MARKDOWN) ===== @@ -3625,75 +3746,152 @@ async def delete_task_log(log_id: str, api_key_valid: bool = Depends(verify_api_ @app.get("/api/tasks/running") -async def get_running_tasks(api_key_valid: bool = Depends(verify_api_key)): +async def get_running_tasks( + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), +): """Récupère uniquement les tâches en cours d'exécution (running ou pending)""" - running_tasks = [t for t in db.tasks if t.status in ("running", "pending")] + repo = TaskRepository(db_session) + tasks = await repo.list(limit=100, offset=0) + running_tasks = [t for t in tasks if t.status in ("running", "pending")] return { - "tasks": [t.dict() for t in running_tasks], + "tasks": [ + { + "id": t.id, + "name": t.action, + "host": t.target, + "status": t.status, + "progress": 50 if t.status == "running" else 0, + "start_time": t.started_at, + "end_time": t.completed_at, + } + for t in running_tasks + ], "count": len(running_tasks) } -@app.get("/api/tasks/{task_id}", response_model=Task) -async def get_task(task_id: int, api_key_valid: bool = Depends(verify_api_key)): +@app.get("/api/tasks/{task_id}") +async def get_task( + task_id: str, + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), +): """Récupère une tâche spécifique""" - task = next((t for t in db.tasks if t.id == task_id), None) + repo = TaskRepository(db_session) + task = await repo.get(task_id) if not task: raise HTTPException(status_code=404, detail="Tâche non trouvée") - return task + return { + "id": task.id, + "name": task.action, + "host": task.target, + "status": task.status, + "progress": 100 if task.status == "completed" else (50 if task.status == "running" else 0), + "start_time": task.started_at, + "end_time": task.completed_at, + "duration": None, + "output": task.result_data.get("output") if task.result_data else None, + "error": task.error_message, + } + @app.delete("/api/tasks/{task_id}") -async def delete_task(task_id: int, api_key_valid: bool = Depends(verify_api_key)): - """Supprime une tâche""" - task = next((t for t in db.tasks if t.id == task_id), None) +async def delete_task( + task_id: str, + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), +): + """Supprime une tâche (soft delete non implémenté pour tasks, suppression directe)""" + repo = TaskRepository(db_session) + task = await repo.get(task_id) if not task: raise HTTPException(status_code=404, detail="Tâche non trouvée") - - db.tasks = [t for t in db.tasks if t.id != task_id] - + + await db_session.delete(task) + await db_session.commit() + # Notifier les clients WebSocket await ws_manager.broadcast({ "type": "task_deleted", "data": {"id": task_id} }) - + return {"message": "Tâche supprimée avec succès"} -@app.get("/api/logs", response_model=List[LogEntry]) -async def get_logs(limit: int = 50, api_key_valid: bool = Depends(verify_api_key)): - """Récupère les logs récents""" - return db.logs[:limit] +@app.get("/api/logs") +async def get_logs( + limit: int = 50, + offset: int = 0, + level: Optional[str] = None, + source: Optional[str] = None, + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), +): + """Récupère les logs récents avec filtrage optionnel""" + repo = LogRepository(db_session) + logs = await repo.list(limit=limit, offset=offset, level=level, source=source) + return [ + { + "id": log.id, + "timestamp": log.created_at, + "level": log.level, + "message": log.message, + "source": log.source, + "host": log.host_id, + } + for log in logs + ] + @app.post("/api/logs") -async def create_log(log_entry: LogEntry, api_key_valid: bool = Depends(verify_api_key)): - """Ajoute un nouvel entrée de log""" - log_entry.id = db.get_next_id("logs") - db.logs.insert(0, log_entry) - - # Garder seulement les 100 derniers logs - if len(db.logs) > 100: - db.logs = db.logs[:100] - +async def create_log( + level: str, + message: str, + source: Optional[str] = None, + host_id: Optional[str] = None, + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), +): + """Ajoute une nouvelle entrée de log""" + repo = LogRepository(db_session) + log = await repo.create( + level=level.upper(), + message=message, + source=source, + host_id=host_id, + ) + await db_session.commit() + + response_data = { + "id": log.id, + "timestamp": log.created_at, + "level": log.level, + "message": log.message, + "source": log.source, + "host": log.host_id, + } + # Notifier les clients WebSocket await ws_manager.broadcast({ "type": "new_log", - "data": log_entry.dict() + "data": response_data }) - - return log_entry + + return response_data + @app.delete("/api/logs") -async def clear_logs(api_key_valid: bool = Depends(verify_api_key)): - """Efface tous les logs""" - db.logs = [] - - # Notifier les clients WebSocket - await ws_manager.broadcast({ - "type": "logs_cleared", - "data": {} - }) - - return {"message": "Logs effacés avec succès"} +async def clear_logs( + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), +): + """Efface tous les logs (attention: opération destructive)""" + from sqlalchemy import delete + from models.log import Log as LogModel + await db_session.execute(delete(LogModel)) + await db_session.commit() + return {"message": "Tous les logs ont été supprimés"} @app.get("/api/metrics", response_model=SystemMetrics) async def get_metrics(api_key_valid: bool = Depends(verify_api_key)): @@ -4703,47 +4901,60 @@ async def get_schedules( enabled: Optional[bool] = None, playbook: Optional[str] = None, tag: Optional[str] = None, - api_key_valid: bool = Depends(verify_api_key) + limit: int = 100, + offset: int = 0, + api_key_valid: bool = Depends(verify_api_key), ): - """Liste tous les schedules avec filtrage optionnel - - Args: - enabled: Filtrer par statut (true = actifs, false = en pause) - playbook: Filtrer par nom de playbook (recherche partielle) - tag: Filtrer par tag - """ - schedules = scheduler_service.get_all_schedules(enabled=enabled, playbook=playbook, tag=tag) - return { - "schedules": [s.dict() for s in schedules], - "count": len(schedules) - } + """Liste tous les schedules avec filtrage optionnel (via SchedulerService).""" + # Utiliser le SchedulerService comme source de vérité pour next_run_at / last_run_at + schedules = scheduler_service.get_all_schedules( + enabled=enabled, + playbook=playbook, + tag=tag, + ) + + # Pagination simple côté API (les schedules sont déjà triés par next_run_at) + paginated = schedules[offset : offset + limit] + + results = [] + for s in paginated: + rec = s.recurrence + results.append( + { + "id": s.id, + "name": s.name, + "playbook": s.playbook, + "target": s.target, + "schedule_type": s.schedule_type, + "recurrence": rec.model_dump() if rec else None, + "enabled": s.enabled, + "tags": s.tags, + # Champs utilisés par le frontend pour "Prochaine" et historique + "next_run_at": s.next_run_at, + "last_run_at": s.last_run_at, + "last_status": s.last_status, + "run_count": s.run_count, + "success_count": s.success_count, + "failure_count": s.failure_count, + "created_at": s.created_at, + "updated_at": s.updated_at, + } + ) + + return {"schedules": results, "count": len(schedules)} @app.post("/api/schedules") async def create_schedule( request: ScheduleCreateRequest, - api_key_valid: bool = Depends(verify_api_key) + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), ): - """Crée un nouveau schedule - - Exemple de body: - { - "name": "Backup quotidien", - "playbook": "backup-config.yml", - "target": "all", - "schedule_type": "recurring", - "recurrence": { - "type": "daily", - "time": "02:00" - }, - "tags": ["Backup", "Production"] - } - """ + """Crée un nouveau schedule (stocké en DB)""" # Vérifier que le playbook existe playbooks = ansible_service.get_playbooks() playbook_names = [p['filename'] for p in playbooks] + [p['name'] for p in playbooks] - # Normaliser le nom du playbook playbook_file = request.playbook if not playbook_file.endswith(('.yml', '.yaml')): playbook_file = f"{playbook_file}.yml" @@ -4760,40 +4971,71 @@ async def create_schedule( if not ansible_service.host_exists(request.target): raise HTTPException(status_code=400, detail=f"Hôte '{request.target}' non trouvé") - # Valider la récurrence si nécessaire + # Valider la récurrence if request.schedule_type == "recurring" and not request.recurrence: raise HTTPException(status_code=400, detail="La récurrence est requise pour un schedule récurrent") - # Valider l'expression cron si custom if request.recurrence and request.recurrence.type == "custom": if not request.recurrence.cron_expression: raise HTTPException(status_code=400, detail="Expression cron requise pour le type 'custom'") validation = scheduler_service.validate_cron_expression(request.recurrence.cron_expression) if not validation["valid"]: raise HTTPException(status_code=400, detail=f"Expression cron invalide: {validation.get('error')}") + + # Créer en DB + repo = ScheduleRepository(db_session) + schedule_id = uuid.uuid4().hex - schedule = scheduler_service.create_schedule(request) - - # Log - log_entry = LogEntry( - id=db.get_next_id("logs"), - timestamp=datetime.now(timezone.utc), - level="INFO", - message=f"Schedule '{schedule.name}' créé pour {schedule.playbook} sur {schedule.target}", - source="scheduler" + recurrence = request.recurrence + schedule_obj = await repo.create( + id=schedule_id, + name=request.name, + playbook=playbook_file, + target=request.target, + schedule_type=request.schedule_type, + schedule_time=request.start_at, + recurrence_type=recurrence.type if recurrence else None, + recurrence_time=recurrence.time if recurrence else None, + recurrence_days=json.dumps(recurrence.days) if recurrence and recurrence.days else None, + cron_expression=recurrence.cron_expression if recurrence else None, + enabled=request.enabled, + tags=json.dumps(request.tags) if request.tags else None, ) - db.logs.insert(0, log_entry) - + await db_session.commit() + + # Aussi créer dans le scheduler_service pour APScheduler + scheduler_service.create_schedule(request) + + # Log en DB + log_repo = LogRepository(db_session) + await log_repo.create( + level="INFO", + message=f"Schedule '{request.name}' créé pour {playbook_file} sur {request.target}", + source="scheduler", + ) + await db_session.commit() + # Notifier via WebSocket await ws_manager.broadcast({ "type": "schedule_created", - "data": schedule.dict() + "data": { + "id": schedule_obj.id, + "name": schedule_obj.name, + "playbook": schedule_obj.playbook, + "target": schedule_obj.target, + } }) - + return { "success": True, - "message": f"Schedule '{schedule.name}' créé avec succès", - "schedule": schedule.dict() + "message": f"Schedule '{request.name}' créé avec succès", + "schedule": { + "id": schedule_obj.id, + "name": schedule_obj.name, + "playbook": schedule_obj.playbook, + "target": schedule_obj.target, + "enabled": schedule_obj.enabled, + } } @@ -4835,27 +5077,52 @@ async def validate_cron_expression( @app.get("/api/schedules/{schedule_id}") async def get_schedule( schedule_id: str, - api_key_valid: bool = Depends(verify_api_key) + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), ): - """Récupère les détails d'un schedule spécifique""" - schedule = scheduler_service.get_schedule(schedule_id) + """Récupère les détails d'un schedule spécifique (depuis DB)""" + repo = ScheduleRepository(db_session) + schedule = await repo.get(schedule_id) if not schedule: raise HTTPException(status_code=404, detail=f"Schedule '{schedule_id}' non trouvé") - return schedule.dict() + return { + "id": schedule.id, + "name": schedule.name, + "playbook": schedule.playbook, + "target": schedule.target, + "schedule_type": schedule.schedule_type, + "recurrence_type": schedule.recurrence_type, + "recurrence_time": schedule.recurrence_time, + "recurrence_days": json.loads(schedule.recurrence_days) if schedule.recurrence_days else None, + "cron_expression": schedule.cron_expression, + "enabled": schedule.enabled, + "tags": json.loads(schedule.tags) if schedule.tags else [], + "next_run": schedule.next_run, + "last_run": schedule.last_run, + "created_at": schedule.created_at, + "updated_at": schedule.updated_at, + } @app.put("/api/schedules/{schedule_id}") async def update_schedule( schedule_id: str, request: ScheduleUpdateRequest, - api_key_valid: bool = Depends(verify_api_key) + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), ): - """Met à jour un schedule existant""" - schedule = scheduler_service.get_schedule(schedule_id) - if not schedule: + """Met à jour un schedule existant (DB + scheduler_service)""" + # Essayer d'abord via SchedulerService (source de vérité) + sched = scheduler_service.get_schedule(schedule_id) + repo = ScheduleRepository(db_session) + schedule = await repo.get(schedule_id) + + if not sched and not schedule: raise HTTPException(status_code=404, detail=f"Schedule '{schedule_id}' non trouvé") - + + schedule_name = sched.name if sched else schedule.name + # Valider le playbook si modifié if request.playbook: playbooks = ansible_service.get_playbooks() @@ -4865,71 +5132,107 @@ async def update_schedule( playbook_file = f"{playbook_file}.yml" if playbook_file not in playbook_names and request.playbook not in playbook_names: raise HTTPException(status_code=400, detail=f"Playbook '{request.playbook}' non trouvé") - + # Valider l'expression cron si modifiée if request.recurrence and request.recurrence.type == "custom": if request.recurrence.cron_expression: validation = scheduler_service.validate_cron_expression(request.recurrence.cron_expression) if not validation["valid"]: raise HTTPException(status_code=400, detail=f"Expression cron invalide: {validation.get('error')}") - - updated = scheduler_service.update_schedule(schedule_id, request) - - # Log - log_entry = LogEntry( - id=db.get_next_id("logs"), - timestamp=datetime.now(timezone.utc), + + # Mettre à jour en DB + update_fields = {} + if request.name: + update_fields["name"] = request.name + if request.playbook: + update_fields["playbook"] = request.playbook + if request.target: + update_fields["target"] = request.target + if request.enabled is not None: + update_fields["enabled"] = request.enabled + if request.tags: + update_fields["tags"] = json.dumps(request.tags) + if request.recurrence: + update_fields["recurrence_type"] = request.recurrence.type + update_fields["recurrence_time"] = request.recurrence.time + update_fields["recurrence_days"] = json.dumps(request.recurrence.days) if request.recurrence.days else None + update_fields["cron_expression"] = request.recurrence.cron_expression + + # Mettre à jour en DB si présent + if schedule: + await repo.update(schedule, **update_fields) + await db_session.commit() + + # Aussi mettre à jour dans scheduler_service pour APScheduler + scheduler_service.update_schedule(schedule_id, request) + + # Log en DB + log_repo = LogRepository(db_session) + await log_repo.create( level="INFO", - message=f"Schedule '{updated.name}' mis à jour", - source="scheduler" + message=f"Schedule '{schedule_name}' mis à jour", + source="scheduler", ) - db.logs.insert(0, log_entry) - + await db_session.commit() + # Notifier via WebSocket await ws_manager.broadcast({ "type": "schedule_updated", - "data": updated.dict() + "data": {"id": schedule_id, "name": schedule_name} }) - + return { "success": True, - "message": f"Schedule '{updated.name}' mis à jour", - "schedule": updated.dict() + "message": f"Schedule '{schedule_name}' mis à jour", + "schedule": {"id": schedule_id, "name": schedule_name} } @app.delete("/api/schedules/{schedule_id}") async def delete_schedule( schedule_id: str, - api_key_valid: bool = Depends(verify_api_key) + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), ): - """Supprime un schedule""" - schedule = scheduler_service.get_schedule(schedule_id) + """Supprime un schedule (soft delete en DB + suppression scheduler_service)""" + repo = ScheduleRepository(db_session) + schedule = await repo.get(schedule_id) if not schedule: - raise HTTPException(status_code=404, detail=f"Schedule '{schedule_id}' non trouvé") - + # Aucun enregistrement en DB, mais on tente tout de même de le supprimer + # du SchedulerService (cas des anciens IDs internes du scheduler). + try: + scheduler_service.delete_schedule(schedule_id) + except Exception: + pass + return { + "success": True, + "message": f"Schedule '{schedule_id}' déjà supprimé ou inexistant en base, nettoyage scheduler effectué." + } + schedule_name = schedule.name - success = scheduler_service.delete_schedule(schedule_id) - - if not success: - raise HTTPException(status_code=500, detail="Erreur lors de la suppression") - - # Log - log_entry = LogEntry( - id=db.get_next_id("logs"), - timestamp=datetime.now(timezone.utc), + + # Soft delete en DB + await repo.soft_delete(schedule_id) + await db_session.commit() + + # Supprimer du scheduler_service + scheduler_service.delete_schedule(schedule_id) + + # Log en DB + log_repo = LogRepository(db_session) + await log_repo.create( level="WARN", message=f"Schedule '{schedule_name}' supprimé", - source="scheduler" + source="scheduler", ) - db.logs.insert(0, log_entry) - + await db_session.commit() + # Notifier via WebSocket await ws_manager.broadcast({ "type": "schedule_deleted", "data": {"id": schedule_id, "name": schedule_name} }) - + return { "success": True, "message": f"Schedule '{schedule_name}' supprimé" @@ -4939,19 +5242,28 @@ async def delete_schedule( @app.post("/api/schedules/{schedule_id}/run") async def run_schedule_now( schedule_id: str, - api_key_valid: bool = Depends(verify_api_key) + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), ): """Exécute immédiatement un schedule (exécution forcée)""" - schedule = scheduler_service.get_schedule(schedule_id) - if not schedule: - raise HTTPException(status_code=404, detail=f"Schedule '{schedule_id}' non trouvé") - - # Lancer l'exécution en arrière-plan + # Essayer d'abord via SchedulerService (source de vérité) + sched = scheduler_service.get_schedule(schedule_id) + if not sched: + # Fallback sur la DB + repo = ScheduleRepository(db_session) + schedule = await repo.get(schedule_id) + if not schedule: + raise HTTPException(status_code=404, detail=f"Schedule '{schedule_id}' non trouvé") + schedule_name = schedule.name + else: + schedule_name = sched.name + + # Lancer l'exécution via scheduler_service run = await scheduler_service.run_now(schedule_id) - + return { "success": True, - "message": f"Schedule '{schedule.name}' lancé", + "message": f"Schedule '{schedule_name}' lancé", "run": run.dict() if run else None } @@ -4959,66 +5271,94 @@ async def run_schedule_now( @app.post("/api/schedules/{schedule_id}/pause") async def pause_schedule( schedule_id: str, - api_key_valid: bool = Depends(verify_api_key) + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), ): """Met en pause un schedule""" - schedule = scheduler_service.pause_schedule(schedule_id) - if not schedule: + # Essayer d'abord via SchedulerService (source de vérité) + sched = scheduler_service.get_schedule(schedule_id) + repo = ScheduleRepository(db_session) + schedule = await repo.get(schedule_id) + + if not sched and not schedule: raise HTTPException(status_code=404, detail=f"Schedule '{schedule_id}' non trouvé") - - # Log - log_entry = LogEntry( - id=db.get_next_id("logs"), - timestamp=datetime.now(timezone.utc), + + schedule_name = sched.name if sched else schedule.name + + # Mettre à jour en DB si présent + if schedule: + await repo.update(schedule, enabled=False) + await db_session.commit() + + # Mettre à jour dans scheduler_service + scheduler_service.pause_schedule(schedule_id) + + # Log en DB + log_repo = LogRepository(db_session) + await log_repo.create( level="INFO", - message=f"Schedule '{schedule.name}' mis en pause", - source="scheduler" + message=f"Schedule '{schedule_name}' mis en pause", + source="scheduler", ) - db.logs.insert(0, log_entry) - + await db_session.commit() + # Notifier via WebSocket await ws_manager.broadcast({ "type": "schedule_updated", - "data": schedule.dict() + "data": {"id": schedule_id, "name": schedule_name, "enabled": False} }) - + return { "success": True, - "message": f"Schedule '{schedule.name}' mis en pause", - "schedule": schedule.dict() + "message": f"Schedule '{schedule_name}' mis en pause", + "schedule": {"id": schedule_id, "name": schedule_name, "enabled": False} } @app.post("/api/schedules/{schedule_id}/resume") async def resume_schedule( schedule_id: str, - api_key_valid: bool = Depends(verify_api_key) + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), ): """Reprend un schedule en pause""" - schedule = scheduler_service.resume_schedule(schedule_id) - if not schedule: + # Essayer d'abord via SchedulerService (source de vérité) + sched = scheduler_service.get_schedule(schedule_id) + repo = ScheduleRepository(db_session) + schedule = await repo.get(schedule_id) + + if not sched and not schedule: raise HTTPException(status_code=404, detail=f"Schedule '{schedule_id}' non trouvé") - - # Log - log_entry = LogEntry( - id=db.get_next_id("logs"), - timestamp=datetime.now(timezone.utc), + + schedule_name = sched.name if sched else schedule.name + + # Mettre à jour en DB si présent + if schedule: + await repo.update(schedule, enabled=True) + await db_session.commit() + + # Mettre à jour dans scheduler_service + scheduler_service.resume_schedule(schedule_id) + + # Log en DB + log_repo = LogRepository(db_session) + await log_repo.create( level="INFO", - message=f"Schedule '{schedule.name}' repris", - source="scheduler" + message=f"Schedule '{schedule_name}' repris", + source="scheduler", ) - db.logs.insert(0, log_entry) - + await db_session.commit() + # Notifier via WebSocket await ws_manager.broadcast({ "type": "schedule_updated", - "data": schedule.dict() + "data": {"id": schedule_id, "name": schedule_name, "enabled": True} }) - + return { "success": True, - "message": f"Schedule '{schedule.name}' repris", - "schedule": schedule.dict() + "message": f"Schedule '{schedule_name}' repris", + "schedule": {"id": schedule_id, "name": schedule_name, "enabled": True} } @@ -5026,20 +5366,39 @@ async def resume_schedule( async def get_schedule_runs( schedule_id: str, limit: int = 50, - api_key_valid: bool = Depends(verify_api_key) + offset: int = 0, + api_key_valid: bool = Depends(verify_api_key), + db_session: AsyncSession = Depends(get_db), ): - """Récupère l'historique des exécutions d'un schedule""" - schedule = scheduler_service.get_schedule(schedule_id) - if not schedule: + """Récupère l'historique des exécutions d'un schedule (depuis DB ou SchedulerService)""" + # Essayer d'abord via SchedulerService (source de vérité) + sched = scheduler_service.get_schedule(schedule_id) + repo = ScheduleRepository(db_session) + schedule = await repo.get(schedule_id) + + if not sched and not schedule: raise HTTPException(status_code=404, detail=f"Schedule '{schedule_id}' non trouvé") - - runs = scheduler_service.get_schedule_runs(schedule_id, limit=limit) - + + schedule_name = sched.name if sched else schedule.name + + # Récupérer les runs depuis le SchedulerService (JSON) si pas en DB + runs_from_service = scheduler_service.get_runs_for_schedule(schedule_id, limit=limit) + return { "schedule_id": schedule_id, - "schedule_name": schedule.name, - "runs": [r.dict() for r in runs], - "count": len(runs) + "schedule_name": schedule_name, + "runs": [ + { + "id": r.get("id"), + "status": r.get("status"), + "started_at": r.get("started_at"), + "finished_at": r.get("finished_at"), + "duration_seconds": r.get("duration_seconds"), + "error_message": r.get("error_message"), + } + for r in runs_from_service + ], + "count": len(runs_from_service) } @@ -5049,20 +5408,25 @@ async def get_schedule_runs( async def startup_event(): """Événement de démarrage de l'application""" print("🚀 Homelab Automation Dashboard démarré") - + + # Initialiser la base de données (créer les tables si nécessaire) + await init_db() + print("📦 Base de données SQLite initialisée") + # Démarrer le scheduler scheduler_service.start() - - # Log de démarrage - log_entry = LogEntry( - id=db.get_next_id("logs"), - timestamp=datetime.now(timezone.utc), - level="INFO", - message="Application démarrée - Scheduler initialisé", - source="system" - ) - db.logs.insert(0, log_entry) - + + # Log de démarrage en base + from models.database import async_session_maker + async with async_session_maker() as session: + repo = LogRepository(session) + await repo.create( + level="INFO", + message="Application démarrée - Scheduler initialisé", + source="system", + ) + await session.commit() + # Nettoyer les anciennes exécutions (>90 jours) cleaned = scheduler_service.cleanup_old_runs(days=90) if cleaned > 0: @@ -5073,10 +5437,10 @@ async def startup_event(): async def shutdown_event(): """Événement d'arrêt de l'application""" print("👋 Arrêt de l'application...") - + # Arrêter le scheduler scheduler_service.shutdown() - + print("✅ Scheduler arrêté proprement") diff --git a/app/crud/__init__.py b/app/crud/__init__.py new file mode 100644 index 0000000..7935f69 --- /dev/null +++ b/app/crud/__init__.py @@ -0,0 +1,13 @@ +from .host import HostRepository +from .bootstrap_status import BootstrapStatusRepository +from .task import TaskRepository +from .schedule import ScheduleRepository +from .log import LogRepository + +__all__ = [ + "HostRepository", + "BootstrapStatusRepository", + "TaskRepository", + "ScheduleRepository", + "LogRepository", +] diff --git a/app/crud/bootstrap_status.py b/app/crud/bootstrap_status.py new file mode 100644 index 0000000..d75e798 --- /dev/null +++ b/app/crud/bootstrap_status.py @@ -0,0 +1,41 @@ +from __future__ import annotations + +from typing import Optional + +from sqlalchemy import select +from sqlalchemy.ext.asyncio import AsyncSession + +from models.bootstrap_status import BootstrapStatus + + +class BootstrapStatusRepository: + def __init__(self, session: AsyncSession): + self.session = session + + async def list_for_host(self, host_id: str) -> list[BootstrapStatus]: + stmt = select(BootstrapStatus).where(BootstrapStatus.host_id == host_id).order_by(BootstrapStatus.created_at.desc()) + result = await self.session.execute(stmt) + return result.scalars().all() + + async def latest_for_host(self, host_id: str) -> Optional[BootstrapStatus]: + stmt = ( + select(BootstrapStatus) + .where(BootstrapStatus.host_id == host_id) + .order_by(BootstrapStatus.created_at.desc()) + .limit(1) + ) + result = await self.session.execute(stmt) + return result.scalar_one_or_none() + + async def create(self, *, host_id: str, status: str, automation_user: Optional[str] = None, + last_attempt=None, error_message: Optional[str] = None) -> BootstrapStatus: + record = BootstrapStatus( + host_id=host_id, + status=status, + automation_user=automation_user, + last_attempt=last_attempt, + error_message=error_message, + ) + self.session.add(record) + await self.session.flush() + return record diff --git a/app/crud/host.py b/app/crud/host.py new file mode 100644 index 0000000..7002e20 --- /dev/null +++ b/app/crud/host.py @@ -0,0 +1,67 @@ +from __future__ import annotations + +from datetime import datetime, timezone +from typing import Iterable, Optional + +from sqlalchemy import select, update +from sqlalchemy.ext.asyncio import AsyncSession +from sqlalchemy.orm import selectinload + +from models.host import Host + + +class HostRepository: + def __init__(self, session: AsyncSession): + self.session = session + + async def list(self, limit: int = 100, offset: int = 0, include_deleted: bool = False) -> list[Host]: + stmt = select(Host).order_by(Host.created_at.desc()).offset(offset).limit(limit) + if not include_deleted: + stmt = stmt.where(Host.deleted_at.is_(None)) + result = await self.session.execute(stmt) + return result.scalars().all() + + async def get(self, host_id: str, include_deleted: bool = False) -> Optional[Host]: + stmt = select(Host).where(Host.id == host_id).options(selectinload(Host.bootstrap_statuses)) + if not include_deleted: + stmt = stmt.where(Host.deleted_at.is_(None)) + result = await self.session.execute(stmt) + return result.scalar_one_or_none() + + async def get_by_ip(self, ip_address: str, include_deleted: bool = False) -> Optional[Host]: + stmt = select(Host).where(Host.ip_address == ip_address) + if not include_deleted: + stmt = stmt.where(Host.deleted_at.is_(None)) + result = await self.session.execute(stmt) + return result.scalar_one_or_none() + + async def create(self, *, id: str, name: str, ip_address: str, ansible_group: Optional[str] = None, + status: str = "unknown", reachable: bool = False, last_seen: Optional[datetime] = None) -> Host: + host = Host( + id=id, + name=name, + ip_address=ip_address, + ansible_group=ansible_group, + status=status, + reachable=reachable, + last_seen=last_seen, + ) + self.session.add(host) + await self.session.flush() + return host + + async def update(self, host: Host, **fields) -> Host: + for key, value in fields.items(): + if value is not None: + setattr(host, key, value) + await self.session.flush() + return host + + async def soft_delete(self, host_id: str) -> bool: + stmt = ( + update(Host) + .where(Host.id == host_id, Host.deleted_at.is_(None)) + .values(deleted_at=datetime.now(timezone.utc)) + ) + result = await self.session.execute(stmt) + return result.rowcount > 0 diff --git a/app/crud/log.py b/app/crud/log.py new file mode 100644 index 0000000..781ccee --- /dev/null +++ b/app/crud/log.py @@ -0,0 +1,28 @@ +from __future__ import annotations + +from typing import Optional + +from sqlalchemy import select +from sqlalchemy.ext.asyncio import AsyncSession + +from models.log import Log + + +class LogRepository: + def __init__(self, session: AsyncSession): + self.session = session + + async def list(self, limit: int = 100, offset: int = 0, level: Optional[str] = None, source: Optional[str] = None) -> list[Log]: + stmt = select(Log).order_by(Log.created_at.desc()).offset(offset).limit(limit) + if level: + stmt = stmt.where(Log.level == level) + if source: + stmt = stmt.where(Log.source == source) + result = await self.session.execute(stmt) + return result.scalars().all() + + async def create(self, **fields) -> Log: + log = Log(**fields) + self.session.add(log) + await self.session.flush() + return log diff --git a/app/crud/schedule.py b/app/crud/schedule.py new file mode 100644 index 0000000..155c8ce --- /dev/null +++ b/app/crud/schedule.py @@ -0,0 +1,60 @@ +from __future__ import annotations + +from datetime import datetime, timezone +from typing import Optional + +from sqlalchemy import select, update +from sqlalchemy.ext.asyncio import AsyncSession +from sqlalchemy.orm import selectinload + +from models.schedule import Schedule +from models.schedule_run import ScheduleRun + + +class ScheduleRepository: + def __init__(self, session: AsyncSession): + self.session = session + + async def list(self, limit: int = 100, offset: int = 0, include_deleted: bool = False) -> list[Schedule]: + stmt = select(Schedule).order_by(Schedule.created_at.desc()).offset(offset).limit(limit) + if not include_deleted: + stmt = stmt.where(Schedule.deleted_at.is_(None)) + result = await self.session.execute(stmt) + return result.scalars().all() + + async def get(self, schedule_id: str, include_deleted: bool = False) -> Optional[Schedule]: + stmt = select(Schedule).where(Schedule.id == schedule_id).options( + selectinload(Schedule.runs) + ) + if not include_deleted: + stmt = stmt.where(Schedule.deleted_at.is_(None)) + result = await self.session.execute(stmt) + return result.scalar_one_or_none() + + async def create(self, **fields) -> Schedule: + schedule = Schedule(**fields) + self.session.add(schedule) + await self.session.flush() + return schedule + + async def update(self, schedule: Schedule, **fields) -> Schedule: + for key, value in fields.items(): + if value is not None: + setattr(schedule, key, value) + await self.session.flush() + return schedule + + async def soft_delete(self, schedule_id: str) -> bool: + stmt = ( + update(Schedule) + .where(Schedule.id == schedule_id, Schedule.deleted_at.is_(None)) + .values(deleted_at=datetime.now(timezone.utc)) + ) + result = await self.session.execute(stmt) + return result.rowcount > 0 + + async def add_run(self, **fields) -> ScheduleRun: + run = ScheduleRun(**fields) + self.session.add(run) + await self.session.flush() + return run diff --git a/app/crud/schedule_run.py b/app/crud/schedule_run.py new file mode 100644 index 0000000..8a6da8f --- /dev/null +++ b/app/crud/schedule_run.py @@ -0,0 +1,35 @@ +from __future__ import annotations + +from typing import Optional + +from sqlalchemy import select +from sqlalchemy.ext.asyncio import AsyncSession + +from models.schedule_run import ScheduleRun + + +class ScheduleRunRepository: + def __init__(self, session: AsyncSession): + self.session = session + + async def get(self, run_id: int) -> Optional[ScheduleRun]: + stmt = select(ScheduleRun).where(ScheduleRun.id == run_id) + result = await self.session.execute(stmt) + return result.scalar_one_or_none() + + async def list_for_schedule(self, schedule_id: str, limit: int = 100, offset: int = 0) -> list[ScheduleRun]: + stmt = ( + select(ScheduleRun) + .where(ScheduleRun.schedule_id == schedule_id) + .order_by(ScheduleRun.started_at.desc()) + .offset(offset) + .limit(limit) + ) + result = await self.session.execute(stmt) + return result.scalars().all() + + async def create(self, **fields) -> ScheduleRun: + run = ScheduleRun(**fields) + self.session.add(run) + await self.session.flush() + return run diff --git a/app/crud/task.py b/app/crud/task.py new file mode 100644 index 0000000..083b462 --- /dev/null +++ b/app/crud/task.py @@ -0,0 +1,44 @@ +from __future__ import annotations + +from typing import Optional + +from sqlalchemy import select +from sqlalchemy.ext.asyncio import AsyncSession +from sqlalchemy.orm import selectinload + +from models.task import Task + + +class TaskRepository: + def __init__(self, session: AsyncSession): + self.session = session + + async def list(self, limit: int = 100, offset: int = 0) -> list[Task]: + stmt = select(Task).order_by(Task.created_at.desc()).offset(offset).limit(limit) + result = await self.session.execute(stmt) + return result.scalars().all() + + async def get(self, task_id: str) -> Optional[Task]: + stmt = select(Task).where(Task.id == task_id).options(selectinload(Task.schedule_runs)) + result = await self.session.execute(stmt) + return result.scalar_one_or_none() + + async def create(self, *, id: str, action: str, target: str, playbook: Optional[str] = None, + status: str = "pending") -> Task: + task = Task( + id=id, + action=action, + target=target, + playbook=playbook, + status=status, + ) + self.session.add(task) + await self.session.flush() + return task + + async def update(self, task: Task, **fields) -> Task: + for key, value in fields.items(): + if value is not None: + setattr(task, key, value) + await self.session.flush() + return task diff --git a/app/models/__init__.py b/app/models/__init__.py new file mode 100644 index 0000000..6b2b789 --- /dev/null +++ b/app/models/__init__.py @@ -0,0 +1,17 @@ +from .database import Base +from .host import Host +from .bootstrap_status import BootstrapStatus +from .task import Task +from .schedule import Schedule +from .schedule_run import ScheduleRun +from .log import Log + +__all__ = [ + "Base", + "Host", + "BootstrapStatus", + "Task", + "Schedule", + "ScheduleRun", + "Log", +] diff --git a/app/models/bootstrap_status.py b/app/models/bootstrap_status.py new file mode 100644 index 0000000..63a6b7c --- /dev/null +++ b/app/models/bootstrap_status.py @@ -0,0 +1,27 @@ +from __future__ import annotations + +from datetime import datetime +from typing import Optional + +from sqlalchemy import DateTime, ForeignKey, Integer, String, Text +from sqlalchemy.orm import Mapped, mapped_column, relationship +from sqlalchemy.sql import func + +from .database import Base + + +class BootstrapStatus(Base): + __tablename__ = "bootstrap_status" + + id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True) + host_id: Mapped[str] = mapped_column(String, ForeignKey("hosts.id", ondelete="CASCADE"), nullable=False) + status: Mapped[str] = mapped_column(String, nullable=False) + automation_user: Mapped[Optional[str]] = mapped_column(String, nullable=True) + last_attempt: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True)) + error_message: Mapped[Optional[str]] = mapped_column(Text, nullable=True) + created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), nullable=False, server_default=func.now()) + + host: Mapped["Host"] = relationship("Host", back_populates="bootstrap_statuses") + + def __repr__(self) -> str: # pragma: no cover - debug helper + return f"" diff --git a/app/models/database.py b/app/models/database.py new file mode 100644 index 0000000..303f7a1 --- /dev/null +++ b/app/models/database.py @@ -0,0 +1,106 @@ +"""Database configuration and session management for Homelab Automation. +Uses SQLAlchemy 2.x async engine with SQLite + aiosqlite driver. +""" +from __future__ import annotations + +import os +from pathlib import Path +from typing import AsyncGenerator +from urllib.parse import urlparse + +from sqlalchemy import event, MetaData +from sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession, async_sessionmaker, create_async_engine +from sqlalchemy.orm import declarative_base + +# Naming convention to keep Alembic happy with constraints +NAMING_CONVENTION = { + "ix": "ix_%(column_0_label)s", + "uq": "uq_%(table_name)s_%(column_0_name)s", + "ck": "ck_%(table_name)s_%(constraint_name)s", + "fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s", + "pk": "pk_%(table_name)s", +} + +metadata_obj = MetaData(naming_convention=NAMING_CONVENTION) +Base = declarative_base(metadata=metadata_obj) + +# Resolve base path (project root) +ROOT_DIR = Path(__file__).resolve().parents[2] +DEFAULT_DB_PATH = Path(os.environ.get("DB_PATH") or (ROOT_DIR / "data" / "homelab.db")) +DATABASE_URL = os.environ.get("DATABASE_URL", f"sqlite+aiosqlite:///{DEFAULT_DB_PATH}") + +# Ensure SQLite directory exists even if DATABASE_URL overrides DB_PATH +def _ensure_sqlite_dir(db_url: str) -> None: + if not db_url.startswith("sqlite"): + return + parsed = urlparse(db_url.replace("sqlite+aiosqlite", "sqlite")) + if parsed.scheme != "sqlite": + return + db_path = Path(parsed.path) + if db_path.parent: + db_path.parent.mkdir(parents=True, exist_ok=True) + +DEFAULT_DB_PATH.parent.mkdir(parents=True, exist_ok=True) +_ensure_sqlite_dir(DATABASE_URL) + + +def _debug_db_paths() -> None: + try: + print( + "[DB] DATABASE_URL=%s, DEFAULT_DB_PATH=%s, parent_exists=%s, parent=%s" + % ( + DATABASE_URL, + DEFAULT_DB_PATH, + DEFAULT_DB_PATH.parent.exists(), + DEFAULT_DB_PATH.parent, + ) + ) + except Exception: + # Debug logging should never break startup + pass + + +_debug_db_paths() + +engine: AsyncEngine = create_async_engine( + DATABASE_URL, + echo=False, + pool_pre_ping=True, + future=True, +) + +# Ensure SQLite pragmas (WAL + FK) when using SQLite +if DATABASE_URL.startswith("sqlite"): + @event.listens_for(engine.sync_engine, "connect") + def _set_sqlite_pragmas(dbapi_connection, connection_record): # type: ignore[override] + cursor = dbapi_connection.cursor() + cursor.execute("PRAGMA foreign_keys=ON") + cursor.execute("PRAGMA journal_mode=WAL") + cursor.close() + +async_session_maker = async_sessionmaker( + bind=engine, + autoflush=False, + expire_on_commit=False, + class_=AsyncSession, +) + + +async def get_db() -> AsyncGenerator[AsyncSession, None]: + """FastAPI dependency that yields an AsyncSession with automatic rollback on error.""" + async with async_session_maker() as session: # type: AsyncSession + try: + yield session + except Exception: + await session.rollback() + raise + finally: + await session.close() + + +async def init_db() -> None: + """Create all tables (mostly for dev/tests; migrations should be handled by Alembic).""" + from . import host, task, schedule, schedule_run, log # noqa: F401 + + async with engine.begin() as conn: + await conn.run_sync(Base.metadata.create_all) diff --git a/app/models/host.py b/app/models/host.py new file mode 100644 index 0000000..bdd77a8 --- /dev/null +++ b/app/models/host.py @@ -0,0 +1,33 @@ +from __future__ import annotations + +from datetime import datetime +from typing import List, Optional + +from sqlalchemy import Boolean, DateTime, String, text +from sqlalchemy.orm import Mapped, mapped_column, relationship +from sqlalchemy.sql import func + +from .database import Base + + +class Host(Base): + __tablename__ = "hosts" + + id: Mapped[str] = mapped_column(String, primary_key=True) + name: Mapped[str] = mapped_column(String, nullable=False) + ip_address: Mapped[str] = mapped_column(String, nullable=False, unique=True) + status: Mapped[str] = mapped_column(String, nullable=False, server_default=text("'unknown'")) + ansible_group: Mapped[Optional[str]] = mapped_column(String, nullable=True) + last_seen: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True)) + reachable: Mapped[bool] = mapped_column(Boolean, nullable=False, server_default=text("0")) + created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), nullable=False, server_default=func.now()) + updated_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), nullable=False, server_default=func.now(), onupdate=func.now()) + deleted_at: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True)) + + bootstrap_statuses: Mapped[List["BootstrapStatus"]] = relationship( + "BootstrapStatus", back_populates="host", cascade="all, delete-orphan" + ) + logs: Mapped[List["Log"]] = relationship("Log", back_populates="host") + + def __repr__(self) -> str: # pragma: no cover - debug helper + return f"" diff --git a/app/models/log.py b/app/models/log.py new file mode 100644 index 0000000..511f76c --- /dev/null +++ b/app/models/log.py @@ -0,0 +1,36 @@ +from __future__ import annotations + +from datetime import datetime +from typing import Optional + +from sqlalchemy import DateTime, ForeignKey, Integer, JSON, String, Text, Index +from sqlalchemy.orm import Mapped, mapped_column, relationship +from sqlalchemy.sql import func + +from .database import Base + + +class Log(Base): + __tablename__ = "logs" + __table_args__ = ( + Index("idx_logs_created_at", "created_at"), + Index("idx_logs_level", "level"), + Index("idx_logs_source", "source"), + ) + + id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True) + level: Mapped[str] = mapped_column(String, nullable=False) + source: Mapped[Optional[str]] = mapped_column(String) + message: Mapped[str] = mapped_column(Text, nullable=False) + details: Mapped[Optional[dict]] = mapped_column(JSON) + host_id: Mapped[Optional[str]] = mapped_column(String, ForeignKey("hosts.id", ondelete="SET NULL")) + task_id: Mapped[Optional[str]] = mapped_column(String, ForeignKey("tasks.id", ondelete="SET NULL")) + schedule_id: Mapped[Optional[str]] = mapped_column(String, ForeignKey("schedules.id", ondelete="SET NULL")) + created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), nullable=False, server_default=func.now()) + + host: Mapped[Optional["Host"]] = relationship("Host", back_populates="logs") + task: Mapped[Optional["Task"]] = relationship("Task", back_populates="logs") + schedule: Mapped[Optional["Schedule"]] = relationship("Schedule", back_populates="logs") + + def __repr__(self) -> str: # pragma: no cover - debug helper + return f"" diff --git a/app/models/schedule.py b/app/models/schedule.py new file mode 100644 index 0000000..8af7007 --- /dev/null +++ b/app/models/schedule.py @@ -0,0 +1,40 @@ +from __future__ import annotations + +from datetime import datetime +from typing import List, Optional + +from sqlalchemy import Boolean, DateTime, String, Text +from sqlalchemy.orm import Mapped, mapped_column, relationship +from sqlalchemy.sql import func + +from .database import Base + + +class Schedule(Base): + __tablename__ = "schedules" + + id: Mapped[str] = mapped_column(String, primary_key=True) + name: Mapped[str] = mapped_column(String, nullable=False) + playbook: Mapped[str] = mapped_column(String, nullable=False) + target: Mapped[str] = mapped_column(String, nullable=False) + schedule_type: Mapped[str] = mapped_column(String, nullable=False) + schedule_time: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True)) + recurrence_type: Mapped[Optional[str]] = mapped_column(String) + recurrence_time: Mapped[Optional[str]] = mapped_column(String) + recurrence_days: Mapped[Optional[str]] = mapped_column(Text) + cron_expression: Mapped[Optional[str]] = mapped_column(String) + enabled: Mapped[bool] = mapped_column(Boolean, nullable=False, default=True) + tags: Mapped[Optional[str]] = mapped_column(Text) + next_run: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True)) + last_run: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True)) + created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), nullable=False, server_default=func.now()) + updated_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), nullable=False, server_default=func.now(), onupdate=func.now()) + deleted_at: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True)) + + runs: Mapped[List["ScheduleRun"]] = relationship( + "ScheduleRun", back_populates="schedule", cascade="all, delete-orphan" + ) + logs: Mapped[List["Log"]] = relationship("Log", back_populates="schedule") + + def __repr__(self) -> str: # pragma: no cover - debug helper + return f"" diff --git a/app/models/schedule_run.py b/app/models/schedule_run.py new file mode 100644 index 0000000..431875f --- /dev/null +++ b/app/models/schedule_run.py @@ -0,0 +1,31 @@ +from __future__ import annotations + +from datetime import datetime +from typing import Optional + +from sqlalchemy import DateTime, ForeignKey, Integer, String, Text, Float +from sqlalchemy.orm import Mapped, mapped_column, relationship +from sqlalchemy.sql import func + +from .database import Base + + +class ScheduleRun(Base): + __tablename__ = "schedule_runs" + + id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True) + schedule_id: Mapped[str] = mapped_column(String, ForeignKey("schedules.id", ondelete="CASCADE"), nullable=False) + task_id: Mapped[Optional[str]] = mapped_column(String, ForeignKey("tasks.id", ondelete="SET NULL")) + status: Mapped[str] = mapped_column(String, nullable=False) + started_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), nullable=False) + completed_at: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True)) + duration: Mapped[Optional[float]] = mapped_column(Float) + error_message: Mapped[Optional[str]] = mapped_column(Text) + output: Mapped[Optional[str]] = mapped_column(Text) + created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), nullable=False, server_default=func.now()) + + schedule: Mapped["Schedule"] = relationship("Schedule", back_populates="runs") + task: Mapped[Optional["Task"]] = relationship("Task", back_populates="schedule_runs") + + def __repr__(self) -> str: # pragma: no cover - debug helper + return f"" diff --git a/app/models/task.py b/app/models/task.py new file mode 100644 index 0000000..339dcc1 --- /dev/null +++ b/app/models/task.py @@ -0,0 +1,31 @@ +from __future__ import annotations + +from datetime import datetime +from typing import Optional + +from sqlalchemy import DateTime, ForeignKey, String, Text, JSON, text +from sqlalchemy.orm import Mapped, mapped_column, relationship +from sqlalchemy.sql import func + +from .database import Base + + +class Task(Base): + __tablename__ = "tasks" + + id: Mapped[str] = mapped_column(String, primary_key=True) + action: Mapped[str] = mapped_column(String, nullable=False) + target: Mapped[str] = mapped_column(String, nullable=False) + status: Mapped[str] = mapped_column(String, nullable=False, server_default=text("'pending'")) + playbook: Mapped[Optional[str]] = mapped_column(String) + started_at: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True)) + completed_at: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True)) + error_message: Mapped[Optional[str]] = mapped_column(Text) + result_data: Mapped[Optional[dict]] = mapped_column(JSON) + created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), nullable=False, server_default=func.now()) + + schedule_runs: Mapped[list["ScheduleRun"]] = relationship("ScheduleRun", back_populates="task") + logs: Mapped[list["Log"]] = relationship("Log", back_populates="task") + + def __repr__(self) -> str: # pragma: no cover - debug helper + return f"" diff --git a/app/requirements.txt b/app/requirements.txt index 5bb4219..d9cd604 100644 --- a/app/requirements.txt +++ b/app/requirements.txt @@ -10,4 +10,9 @@ requests>=2.32.0 httpx>=0.28.0 apscheduler>=3.10.0 croniter>=2.0.0 -pytz>=2024.1 \ No newline at end of file +pytz>=2024.1 +sqlalchemy>=2.0.0 +alembic>=1.12.0 +aiosqlite>=0.19.0 +pytest>=7.0.0 +pytest-asyncio>=0.21.0 \ No newline at end of file diff --git a/app/schemas/__init__.py b/app/schemas/__init__.py new file mode 100644 index 0000000..d4a9ee5 --- /dev/null +++ b/app/schemas/__init__.py @@ -0,0 +1,21 @@ +from .host import HostCreate, HostUpdate, HostOut +from .bootstrap_status import BootstrapStatusOut +from .task import TaskCreate, TaskUpdate, TaskOut +from .schedule import ScheduleCreate, ScheduleUpdate, ScheduleOut, ScheduleRunOut +from .log import LogCreate, LogOut + +__all__ = [ + "HostCreate", + "HostUpdate", + "HostOut", + "BootstrapStatusOut", + "TaskCreate", + "TaskUpdate", + "TaskOut", + "ScheduleCreate", + "ScheduleUpdate", + "ScheduleOut", + "ScheduleRunOut", + "LogCreate", + "LogOut", +] diff --git a/app/schemas/bootstrap_status.py b/app/schemas/bootstrap_status.py new file mode 100644 index 0000000..4e59972 --- /dev/null +++ b/app/schemas/bootstrap_status.py @@ -0,0 +1,18 @@ +from __future__ import annotations + +from datetime import datetime +from typing import Optional + +from pydantic import BaseModel, ConfigDict + + +class BootstrapStatusOut(BaseModel): + id: int + host_id: str + status: str + automation_user: Optional[str] = None + last_attempt: Optional[datetime] = None + error_message: Optional[str] = None + created_at: datetime + + model_config = ConfigDict(from_attributes=True) diff --git a/app/schemas/host.py b/app/schemas/host.py new file mode 100644 index 0000000..16625f3 --- /dev/null +++ b/app/schemas/host.py @@ -0,0 +1,36 @@ +from __future__ import annotations + +from datetime import datetime +from typing import Optional + +from pydantic import BaseModel, Field, ConfigDict + + +class HostBase(BaseModel): + name: str = Field(..., min_length=1) + ip_address: str = Field(..., min_length=3) + ansible_group: Optional[str] = None + status: Optional[str] = Field(default="unknown") + reachable: Optional[bool] = False + last_seen: Optional[datetime] = None + + +class HostCreate(HostBase): + pass + + +class HostUpdate(BaseModel): + ansible_group: Optional[str] = None + status: Optional[str] = None + reachable: Optional[bool] = None + last_seen: Optional[datetime] = None + deleted_at: Optional[datetime] = None + + +class HostOut(HostBase): + id: str + created_at: datetime + updated_at: datetime + deleted_at: Optional[datetime] = None + + model_config = ConfigDict(from_attributes=True) diff --git a/app/schemas/log.py b/app/schemas/log.py new file mode 100644 index 0000000..1058dcd --- /dev/null +++ b/app/schemas/log.py @@ -0,0 +1,23 @@ +from __future__ import annotations + +from datetime import datetime +from typing import Optional, Dict, Any + +from pydantic import BaseModel, Field, ConfigDict + + +class LogCreate(BaseModel): + level: str = Field(..., description="Log level: info/warning/error/debug") + source: Optional[str] = None + message: str + details: Optional[Dict[str, Any]] = None + host_id: Optional[str] = None + task_id: Optional[str] = None + schedule_id: Optional[str] = None + + +class LogOut(LogCreate): + id: int + created_at: datetime + + model_config = ConfigDict(from_attributes=True) diff --git a/app/schemas/schedule.py b/app/schemas/schedule.py new file mode 100644 index 0000000..081913d --- /dev/null +++ b/app/schemas/schedule.py @@ -0,0 +1,67 @@ +from __future__ import annotations + +from datetime import datetime +from typing import List, Optional + +from pydantic import BaseModel, Field, ConfigDict + + +class ScheduleBase(BaseModel): + name: str + playbook: str + target: str + schedule_type: str + schedule_time: Optional[datetime] = None + recurrence_type: Optional[str] = None + recurrence_time: Optional[str] = None + recurrence_days: Optional[List[int]] = None + cron_expression: Optional[str] = None + enabled: bool = True + tags: Optional[List[str]] = None + next_run: Optional[datetime] = None + last_run: Optional[datetime] = None + + +class ScheduleCreate(ScheduleBase): + pass + + +class ScheduleUpdate(BaseModel): + name: Optional[str] = None + playbook: Optional[str] = None + target: Optional[str] = None + schedule_type: Optional[str] = None + schedule_time: Optional[datetime] = None + recurrence_type: Optional[str] = None + recurrence_time: Optional[str] = None + recurrence_days: Optional[List[int]] = None + cron_expression: Optional[str] = None + enabled: Optional[bool] = None + tags: Optional[List[str]] = None + next_run: Optional[datetime] = None + last_run: Optional[datetime] = None + deleted_at: Optional[datetime] = None + + +class ScheduleOut(ScheduleBase): + id: str + created_at: datetime + updated_at: datetime + deleted_at: Optional[datetime] = None + + model_config = ConfigDict(from_attributes=True) + + +class ScheduleRunOut(BaseModel): + id: int + schedule_id: str + task_id: Optional[str] = None + status: str + started_at: datetime + completed_at: Optional[datetime] = None + duration: Optional[float] = None + error_message: Optional[str] = None + output: Optional[str] = None + created_at: datetime + + model_config = ConfigDict(from_attributes=True) diff --git a/app/schemas/task.py b/app/schemas/task.py new file mode 100644 index 0000000..d3ae93c --- /dev/null +++ b/app/schemas/task.py @@ -0,0 +1,36 @@ +from __future__ import annotations + +from datetime import datetime +from typing import Optional, Dict, Any + +from pydantic import BaseModel, Field, ConfigDict + + +class TaskBase(BaseModel): + action: str + target: str + playbook: Optional[str] = None + status: str = Field(default="pending") + result_data: Optional[Dict[str, Any]] = None + error_message: Optional[str] = None + + +class TaskCreate(TaskBase): + pass + + +class TaskUpdate(BaseModel): + status: Optional[str] = None + started_at: Optional[datetime] = None + completed_at: Optional[datetime] = None + error_message: Optional[str] = None + result_data: Optional[Dict[str, Any]] = None + + +class TaskOut(TaskBase): + id: str + started_at: Optional[datetime] = None + completed_at: Optional[datetime] = None + created_at: datetime + + model_config = ConfigDict(from_attributes=True) diff --git a/data/homelab.db b/data/homelab.db new file mode 100644 index 0000000000000000000000000000000000000000..4410bda55bad27954e72ab65919e742875d57998 GIT binary patch literal 4096 zcmWFz^vNtqRY=P(%1ta$FlG>7U}9o$P*7lCU|@t|AVoG{WYFsp;RR_IAlr;ljiVtj n8UmvsFd71*Aut*OqaiRF0;3@?8UmvsFd71*Aut*O6ovo*Y?%hH literal 0 HcmV?d00001 diff --git a/data/homelab.db-shm b/data/homelab.db-shm new file mode 100644 index 0000000000000000000000000000000000000000..cd304e910d15957f37a3c243e04fb2c95c5c4e11 GIT binary patch literal 32768 zcmeI*XK++i7{>8`LP$b@P(%p5tDqqexZ8KEdfFRrKnh_7Mk=X z2nd3Oq8LihC`Bou*MLe;2JV4-XLfc#hTSi_GtZs5b4t#6&vSnJd9x8mA~8>(^#(xh z>sA|Xz3~}w+nYos#-yfoOp1;@oD!dzejqtCAvoeb*IT7-J*Msrm&cywFS}!%eJev2 zs|~VVGtO$g!FE5yYD4XQHoFb8+w69m!)|lht+|J}ms8WJXV#mwW}W*Qv)(z&wRQTg zXS&ufdClKUJ<~TCoxZcz89OKsb*QO&i{1 zD6{=6yW&c~JAvF3A(H3mN<5#jnB}bID>m^hTltAS{K^6T)&Aa2YTLL~L zke8xV;6=JIh`B6b1z)m`Z}^UF>|`%#9ON+RoZ=jp`G47Ozuj;W-aa6s^yh# zcg5K$NfQn@NCGWsOUa`tA!!2nhmz1opwOmiXp)8!pdkq)&<0x45K^FlKhEgxe7^7A$&zMgU!R$s)z1FD-`VneE!TLW&z@I0jvLW=5rF}ln1%g2G`pBz^E@Mt~ zdFOX)-dyiKPwxkg?+hLA_&e|Oblw+wBy`7IS&H6jF`-dLN>X5SQJ>2u%Rtj$P1SzGyNAOGeRnz6;h+a z!*3PZf~q{FxXvh)B{`qh6kT_ntsAmYsm&S?2a}`2S)r$*PO7E7Y7e&&S=9?C{LzlN?HAh4Q3Sg??#fXh$-$ zOSnYbC2T0XdI z@8&uZY?Dn9-@;krEUTZ?saCUI(ffS6zd$y^t)-7|pXj8C)vg5k2@?o_00@8p2!H?x zfB*=900@8p2!O!KBtZKJ?0$ju-)g%0ng2ZP);5}megR4W69|9+2!H?xfB*=900@8p z2!H?xtYiXI1JGpY3vPY=qH^}O*RMvuz)CK192*FL00@8p2!H?xfB*=900@8p2-pNT ze}R+#^nrK2c=umyS-@YQ#a`OoZ=i0r>qpyPpt+;V^cU#p>b%J{FA_lj1V8`;KmY_l z00ck)1darOYbs4lynfvf6R#VxK1l@S;&ku{}!uXK-AC8 zh5qhG2cRQ3?B#$Bf&d7B00@8p2!H?xfB*=900@AiETiQ8`E@lp_=9rb2XLLK-JExUYNtrIw0oCea*6AXOO2<0O+k>6e56@=E0TT>=3R009sH0T2KI5C8!X009sHfyNV{ z&;DDjeu3Y4^lNv1?`?g*fsUZ@ps_{}009sH0T2KI5C8!X009sH0T5V*1UMbR$etTc z-uNdEbT6PI2-Y138|Sd@J6m7fcejuBcm%D1#~eBWl7I;WKmY_l00cmwAq4iF-x>%< zqu%S!W?|^WtJ}~GO)g7Sn{Vg$AQzPGoNLCW52do=g<^(_GM^~vhE&K~vEJM~hog9X z0JmH*N>ehmDVHj`qFIa6xNjD@EgOb1RW`WDb9cY+rN?yh-r%#2*%^sV_pNS`-IJ1Y z5y^zX-gRLxoe_sFObM5WyEd3}B0@$S6f^5i7>^($CP{;mO>Q3+t+u8uI7d@Gr^KjGmgGEi z1jmAofZpQ=t$2YupV@uZ+WF>qDnN%ku6wvkA4ARaOxI=nXYO(EoC87UD;j$g zEDR2(lWY)Z_eZ^@iczi@b>3l$>=;9x%2=5HE0$wZ{4Ie-RbOxd`F~`Y%fI;Uv~M0pG#uQ-Aml^7X@zwQZr$3(Wq-v8d>sYCzZm zac7l1)$lT@E;3^@HFVjiNJ7c%V2wyyB;s!gpBJ9XHiAz+Ou zK=1L}tayQsw*Fmb+j}3o5x#>f{FVfV2Ld1f0w4eaAOHd&00JNY0w4eaRRUbRK>UVn zuX%D0+_^x!Kzkj$7V>$?@d9nW*InWTNFpW>I5GsTooEY$ySltL_OXY!#nQM=U+vr@ zTbeS@liH$kl~Q!?_AXVl95>d|6d6lN9#`0NQm)fGuNZQns0$a5q*JtKoA&5A9`Bk@ z4y`S*ZgFd{sMxm~lgu2JT))vg;Z^SdzIksRw z;cDFTV(XZQXc+MVX2qxU5iek!x-_zZ)oF6NH==GMUf`gkOq>t@5iej0+vj_vYt>(j zQ@F^;Cd3OMULc!E5~mh{M7C99zMMa<$fM!r`JVT(rKC7}YuPvM4TQsC@BS$J@yZ?o zvp+uW=POHE$`GctN7mMFPVOh`TtOWbby6+uRr#O}S=9?<$WUoqD^<$Y>gW#vW+|g9 z3O__B`2m}opks{MLyTH6BQb6tmhLcfh|k6&Er#a2N81V8`;KmY_l00ck)1V8`;K;SSF;Nk_kKltxweC0DAShYaBKu6s^ z2FT|n#|yOk?sknAAjz0OU~vS7+grkCb$Q1NYF?QxiowFp41?72RISdR7d70frm#0?bHCOv}XU2l77k68RTp$1fAOHd&00JNY0w4eaAOHd&00KuX0WMzP zw*U8k{_VkU{LUQ<#0xN)fqBH#zi`R%0v*1wIpPJ#3`{GSz)(j^IMU_K@elc#FrPk? z?sR{~Uls1#kNDZw2;v1UuRUM0y7wJU1WWHlMZ7@1LR4_@7a$LF9DdH+vz|GPoiQGP z>zMQoVIi^1uuomY3)I>(CN@R90OAD@FMxOf#0yO8dx`}^k>rX|Vn3a$t^|k|K)gVG zrC?qLZ)`jQdXL{}#S3W9Zus1fZa?u(#0#w8TXGy42!H?xfB*=900@8p2!H?xfB*>8 z2ypQNpWEK??)SenI

zz^dBfo*?__q^@}5d>5ynr|?W<|lQD`e=BR7)V--R-?$l4-lGs-oK; z0-VO%PM$~St;$pOz&Kpwk8|{QZsvRvaLXS9;}Kvyf(>oK+K5P84T8Ub{cM|$#jF}L zI1TGD9>ETFt;Bc)wZR}Fq*D0j8>FHE@dET7f0Y$4@U1O>)gpDhltH|}5xybE=0N}i zKmY_l00ck)1V8`;KmY_l;P4Re(al)>0)K!0??3*X3p{V9;sS5zRuB1)2?Rg@1V8`; zKmY_l00ck)1V8`;K;WaF2=Ve zdgGhV>5ccsW3iH2EU1d2UM107iCAJ&G@gjYHVTROwvCBxoBCXm$YJ^Ep;`R`J3Wby zU*EL*cWAtTH}tZH{Ko_WAOHd&00JNY0w4eaAOHd&00JPeG6<~p_XN3q269-`8^G2P zJiqyoe|mj=U?=(oRz`v2C_n%NKmY_l00ck)1V8`;KmY_lppF3T7vN$H$h>Ac--;JF z?zG3ZK6PVSt|Mq6E3)GSZo92%^~9EYW~%W5 zuX#eR9pS^p=0N}iKmY_l00ck)1V8`;KmY_l00bIJpxwVNXmu!%6}QusTk!%@o|5+A zbAEeI)pzh=Pw3%>ZW=2C0T2KI5C8!X009sH0T2KI5C8!XI5Y(MyltMYt_WRu-%x5W zO??EiEq!feyNa)h}@NTekoD(Q}^oQZ-)SfG2c-&cy@*AOHd&00JNY z0w4eaAOHd&00JPek_ZI+UCe-x91{%g9=FMD*CeG1>V;25BW;!#Sd2d1zzxkUf`F*HxK{;5C8!X z009sH0T2KI5C8!X0D+^BzzP1opu3L1M~}wt7r5-pzuG+=`7jM4@P!hd(5>VTCJ+Dt z5C8!X009sH0T2KI5C8!X00EQ0n6Eu@R?jIOPitSFXQHGVQKJ;iD@Dans(*W@l-^<~ zCl_l20w!XyiaM#5_Np$o<}OJS-u9ledQMvB36gD4pMWUa5E*!Ye(9xOoSTTpVkNa$ zP!&bJN}{=3R_l^P4$4mt%<31|d~@a2CqMPlr)j)^FZ8r0^mOQDIv*1VfB*=900@8p z2!H?xfB*=900@A<(M4d5KjP&(E_|!~J| zGfBKa=*+{gTi65$fB*=900@8p2!H?xfB*=900=B|0%N{4@$PkNyV!0zpXJ*_5@bul z02`Zt4IEH?m|s$Jip!&Vm!u;fFOdI4_`34c15;Js!KX;PK_~wWguyq9Qdi0t%cb@sNr>pS-t7*KzYUir) z9RxrC1V8`;KmY_l00ck)1V8`;Rw{w3XGaAjXWw4Gv8Vu-K!M(kTefc6y7h>}3vfDu zKmXA0c5D>B)=m2b{HyL^{Q~3-69|9+2!H?xfB*=900@8p2!H?xEC<e@%4VT*3Ui zOIlQ`sF{i?WEI_5QY8N|*e}5O4!-~Kw|w|Rzuj^V)e-nZ4^bV#Lr@YdhpLZ#009sH z0T2KI5C8!X009sH0T2KIcLK-zqczEZ**W50ypDi&0oXc%HKCc?`@V7S-&e;eB=G`> z6>x`+R1g3G5C8!X009sH0T2KI5C8!XIQ#_C-gZyab4rqKxzE-&n0)~xRzf916}rk4 zwr+`U-r_Qctj9}N_QaE8`x93_`{>uVzU$_2;t%(aTsB-V6oIUHN;ZT(&uWj)=ea-- zNLC+ttukLt$ASh@$U2Ve&G1d&;d`ddAFzYzR)9~J36{LZwlVh{#@X( z=H0&5>c2UbM)F}Ju(Puz+}GvZTTt`LbRj=26-(o~l+zU1Q1X&&(66idGh#9;3PY&@ zai`#tNl2#zno-zLTQCwj;BSg_d2{@li^^3>k>>4mUn!ekTOa#MR^_|@tS;|3zbd^{ z(Q*o1SBLw$su^l4tA3|zn+$h#d2j49<#pPFt{$Cx@LCFBCIE zDxDQlqr<~*721N;-Ml=R>Axt+&oMV@&Z2pz6kV6c6(K9`BrB*}NnSC?sH3`Y@klyF zGftFrLn0fjTh@^ENk@{^DmapBSl5O{kBLYn$RfMdL7Ri7(ERs4kBG#a~Y{5K!iN5n&Fz0YvsOCDjE!5$Z ztk3+GtW*M=+nSX==P|WKt{o2s!aY6S8#`H5vzCxZORjTY+PKQc%@Q`uSqtrawMxU4 ztjVapLg|KVRCFta%S~o!)swzfgX*hmt~6CHDvlhqX5=fHY!phWFgTn}W@(C|X(dgn zulO{zR58jGgPjNTYoltcO@UHfMAhZ;+htBl*Z%wl%fCN4(w`g<=`r$En%}ZnI*q8? zZIONFw+6z|sQ3D_Sxp-&l?>g`HozY}gf#&Ty|pjjA=Xj(OoW&$&n=+CQ_dkftKl5(sy9dvBQJ znhLv_&;wkNSyhQNKCF3gyA-MNlwy(Q3aVU`XUM@!y3Qo6z_?<#&ayTt88c<~HP>L zppc8Tg?o0h*A|HfTEb$t*Ta4^>3fRg=e|TwL-xJ4q!hPT(t1s$DG&~az1Iz~I>;`k z>`yaS_gKM+0x6OFRm&$XG7B#i8Qx_&kN60RBS0J&fZ$~jeA)U?T@k|V!x@_A0Jml z=5MWoZyE|^NzUi#X=R;L9Cydh2fL6e>ZDrQtMY{;t9pT)iqg1Ns+6tO(IR0dnXV}O zsYmWPauf92wNG7Z^)YTAmR=f%_-w2~cNVYXtgY;+{#=&oNROt5E*llePh%T?gj5Ru ze3gQ2OG>b}rigFhtZ|mrPi`!!et=Hx^pDjquiSM_p)y&HJGOU3e;zTKHC%&)zD_#;{`hC z5>~&!WxKEV@5|qF<`-!Y0P7bxKtEst0T2KI5C8!X009sH0T2KI5C8!XSV;r|{;nYH zI3UNwLIbRRfx*drfB2o}?@FLwU?r6{js*lj00ck)1V8`;KmY_l00ck)1b70pUw{S` zkW}UsVD$@p|DqQjc*j3p`%^PF)f>=$0~%An^af@+-ar5ZKmY_l00ck)1V8`;KmY_l z00fRE0w?(Ug6=v3&Udiq#HWm|tA6v=Mfwgl69lxsV9CCNt%1kjJGhMN5qt-!=OBCs z;XBwk-#N=m5x#>p!%gbG1K+{igp#iiCnoBU$ovMW+hNs>s@}DerZ`8?n6UEM_@%+Lq|}JZKZL(Y%put zFqEk>KUBMWC0hFE)%;lEPEo+DNNl=qb&Kqtl$?t|M*tmx&_9~Vh^eeZ!`4T#$sLz+ zZ6Nz68mnhBf+E#O%dP1S>r4#^%HD0Z%2dl-O>^KY~T~0{mA7l ze{AUp%shAl0T2KI5C8!X009sH0T2KI5C8!XSeXPk9l_dHP7b{Nk%Tq&0QD7g>Ihgr z!pcN}qXGdC009sH0T2KI5C8!X009sHffY(%kvf7m|LWf#@%62`dXYK;8goGV4VJ7U zXzu8$(-E{k*Zy4KvF6>r*6P0%S{6sqVL?X#9RYL%)FU1`g4uDHT&=H8kVpbH)xGa?Co6|%|g!(x5qYYQH1u&^@)?t3-z{iFvd(-ag9&5_Ey4p1j97Y z7&-#z2x=P5>X$S|x^-*2 z*g?{O_P#!k)dv~fUDBe&u_seeg{-0zryAYpotY}i#bQa#DT!EOQ#784#x@Fx__mFS zZJYXBlCJixiFflGNZK1PO3|vpK(-_dFsFefZNp`=y&Jb|-L!RU*}G=_`cv0-d%|Il z$9Z_MSbRV(6la9(a&EFx9%Fya1JsmUMbn7=MxN$~CAP(4E?aR)A_$q!fYmRs_E%T@ z`voUH^=&rrz-o^t^h)TV&{HeAPT}A{00ck)1V8`;KmY_l00ck)1VG@ZCUCqz>b3hW z%+3-23I2F3(eBXjh5YM-c3#rcKn|Sq7a02JkMC*gKJZBs2#qCh^B16f2`tXQ%!fA+ z009sH0T2KI5C8!X009sH0T2LzF)7^je^M1?ffL)L4?9l}B~(4Ey` z4}Ssp3-EOX{sNjpVt%W(1a+088YjzU<>eV&$cj7JK&Uybq)N(kSyObqP*Ur~%Zf@h z%6VI)oJwbf)aWpi`y29jZ5^sIZO}836FD>KvomqoDw?eZW%lT3ZIO7OB`kK&HO`s6 zq!hP5wq8?dV&e7dhM0JrDcaeeW=_1$lI9Vw%Q>U2Ot@>veC8E%NY^n=p{!5p@E5R8 z;$?d_9%cRlbj$(UU*NNMzjW&-?|b5T+h5?`<#wlv{Q&_G009sH0T2KI5C8!X009sH zfn%A#u(y5P`n9J-DT6-RH5VmvfC)t|8WU0C2b-HDehOwET74=tB5?N?XrXJe`~{9* z{ZFg+Z2!lK?Jw{eor?(sKmY_l00ck)1V8`;KmY_l00cl_r4VTMuM1io3gnnLe}TkP zkNeah4Yz+}&m#Q=XrDodyq4xKKsnJd5ALA<;4eUNEVl{%0`M1rzX1FN;4e_f+xmZ9 z7))oxp$k)VjOh)Q{#K$c0vT~o%!sLeaio3-dH4%ZGXr2vG>8L%{U$v*McS`9kmU412zBSPs-+WGQyf+bxRn$qfv{!Wz@-Hr_ zcjKnMt+CjIx4q}Ao|D#jf_;4+Lm^vdyFiQt=$Bsl#kui#ELKvB1yxbht0bChaT_FM zO8W@xeu2}=SN&1tW527~{sK>jUOtw}9mfg+AOHd&00JNY0w4eaAOHd&00Jw8z#4zV z%Xew`R{MK`Tt5RjR;B^4;stKIyYjtLzxC}=yI-KmY_l00ck)1V8`; zKmY_l00fRH0>}A#yj(Z~>ldh{kQf8PgYzA{{p8`jzjevCE-^WEkoU#-4${5_8h5}n z3FfkR0|5{K0T2KI5C8!X009sH0T2KI5Lhk*7U?_qJJ0le@Z?|Y|H(YQgY^EN^(ByZ z8dgA@0}tAF(C_K}!10}-10H|peV)$yLXW_AkWgHP6MP5ZJIMKQQ5=HP8pq*yL_D}? zwnq4Fjfh#>P^Qip%-RC=+@a21RmYxsH#l<+=j>I@<#6^|U<-Awa*gtxv$jag#Kwbx za8Hl-#!f>XD=PN3w2G>8U)uUJVlpcV+2rTdZ!4 zgPcu)(OS6ba{28tr?t$Q!+XeCsqh^nB}?NI2?(o7@zoK&gO*pNiHU2SXc{@2;X7!| zlohK2(bhs!aup4}gYX?RD!NrKY%kFsb4>OguBj;(lBoX;;Q0A4APy!+hqFSDsjue?Nmlg&`PCzhYo$uLzNDDdtWFg1oX;#V zcg#)DpGm^@bb44!rtCTp;#)ZD0=|O}ed4ma9)J3h`z_zWWq7NKT>=3R009sH z0T2KI5C8!X009sH0T5^$0nT?&z3QS{e)ZR~VM$?^KmY_l00ck)1V8`;KmY_l00cmw@dP;E z!DrT9^X$NfxBkG~Opv_8cd+ryu|^O80T2KI5C8!X009sH0T2KI5IDR97U?@^NcaEd zk&_EoFVc6gl^n!zb>J;Vt9^`y3opa~0x-<>-EEg%x5XxanRqt0NSB%mWGpCd)x}sT& zQ(rJA1e1sv~<_0I$P87q%`Z87x0TBPN>dB8sWRzpB(8=4v55}G}RIa zcXxYlnB*)%M>82Qm6a%=k!*4Ydm{+FtJ;j2dNVLT)sN{R~ z`XI&GWuj%@xHk|EhrRowteIf;bL@|gYaUpV*^D1l(@?X%b8i87YgzaXrbe=vB=Lk6 z$R(u;eSz!9^6?#PrZ<*PJUO;Mapkj*wyk^R*Qef;9)a)RA-&zk%0U1GKmY_l00ck) z1V8`;KmY_lV7U-zp*v#r3%tJPjOHJn`W5&KESJ*3et-Z7fB*=900@8p2!H?xfB*=9 zz@a6;`3wBlfs^l^G`{@1<|YH=9sUA`mOs`G0w4eaAOHd&00JNY0w4eaAOHe~p1>mg z1=`>9Z=F{@qpez`zW|LjSVLY*_7?~QZ>jSa2;KsJfkR&v4rUAR7l6M2`~~1IK!n~z z9>}D5^YG zRPw_1bb44!rnn}|kjHE5P?c$eI(=|92+ZxmnYeUMi%K~!J6cRq@`FkE3-n)<%y4Dn za3`@S^^_GgUr@(;xXez63|d!Qu3t2H&dZkjMT*p@O3{!=vB|>4Bk2@dg{CaA6nRgR zs*{XUfZi5~G`ED$3wu4xPXPV`_Ni;tff!fz%l2$M%KQarOn|K;cuo50S3Cau51xU) zz`@)-BQFSm00@8p2!H?xfB*=900@8p2pqKp=$Ha_zd*93=|3KL=z+Jl(ZtpC4<--* z0T2KI5C8!X009sH0T2KI5CDM{On~zj_{g{S?Hj-SH>2k9(yDCQ{sPOm3c_Ci z{sQn9fWLsdK9kq=!CxREWW+&{j5^*gjtJIh?qmr0f}3WXDpTw9rMo93YYD1q*4en! zQXyX*pIR79XT+fkQ^F}DtIvV#B zGogULfLmQ@i(H#(352`5y*Es<0<@}%Zhr`HMP?^2Wsq{GI-4s+;xaO>814mZ`C}L} zW!naTTiDvIs!_l|GAm|>c8IhF)SC#@E$y5Ie*tnjQz&`G)yt)>K_MfiTex*$wrD*b zXbFqmbBWgNC8fCivGtltQy?4;d#@W}b&xeV>`yaS_spiKEjULzAm@xa&j+r4%vZS# z*{JAty|c9C@E2gXr8s+Q**ETGMYKQ4iikf3Qg6K@w%MV(YjdsW^9LRR$xvHFn4wNj;Qt&Y}hX5yhM3UBQ}OdN6()Efl; z0(yj)PWb0DonQqlCD>b2#J6zPILqoMx1vlxK;DD&w$vJ@@T~uH$5rq6`IYa0zW}9z z2?Rg@1V8`;KmY_l00ck)1V8`;Ry+YtNARsvFL>^pPffqYJZc&T03E@KuL3wg5C8!X z009sH0T2KI5C8!X0D%=xV39h4^L~EAH{SjH?O$G`j)3+X98X?L))54ocSA?8j4K~> z1ke#cM*tlGiSBb5=ZOn3oO8e@j8h050Symz1ew;ypF&44BABtgu5|@E0*q65Dc9OU zM^GKj5jq0s2ry3JtidZApd(;I4_G>alYV{GH$O8m)dn5GGQL^Iu7LmufB*=900@8p z2!H?xfB*=9K;sB-I)c=i*3Ccr;LRU0S5JKipd)A;Ype+bKmY_l00ck)1V8`;KmY_l z00fQtB7_=Wbo3j)3+XbdlGRbp&m`*WGjkulri*zaxVRTdff|26Y72 zPP7HWU0vQA`~fTP=}7d9#MjZc{=M0 zbOf3y7QlD}qr+LDe>6izHpxnK#D^*YOwS4#hmHU`f?1WK0Xl+qdKYNv z2%h}R{@?nsFD*hxP`fwAClCMu5C8!X009sH0T2KI5C8!XSiuB19l_RH|L~GC+qYg} z90qr+9 zfxMQiBWU;C?XDxZ8#;pJP&1DT9l>yWOZcoV?|4BaaaM)=v_xb7CB0P9ati&}(Vr2M zSy32D4Tw7hM+PCC5@;r2Lv?`&bOe0FVn1{Q&=EjK03Cr`mP{$YhPGfKPny%Lc%hi# zY|_mDdExS8rvIWOKMy(r=m>HXN}jf+Mqx@32FOL3M#`T4s`p>>bpd&EDc40}OZ|r14U)V*bRaBk((q^6w+2rmLFh}KM{tGmi7mLY4-8dN_&SDGpp6-N%bV7{WsMxmq% zgTv`$mZm70R??&?Mc3tVMaYUfS!$_blq-h)wNW+Jra-9*mu7}`Bs05&OT=BmhC)6< zcc-p~@1C?r9gt}L+PB7)e}8hMKRLjnr{~+USvnGn6XixgFy};sj5sI~&+>k8M5yMX zM?_o40dZI)!l_zK+jo9zARLW)uRojBw6Rjj&<)L21~|V5x%%duJ5QB^j-UorGGVZH zohw0e4p)LSZLYB^NK=t&352`5y*Es9O@-Y|=mDpo89cSMdE>$ zu-NVOu-{*^;*hO^(xtflvGtltQy?4;d#@W}b&xeV>`yaS_sphf&Uz>3*ww(T=FL~R z3>o7QU_64cq9SZhr-#KPy~JAP%f;t%Z2m&Ag0w4eaAOHd&00JNY0w4eaMIlB`k&oZ`h2wtj*Ne~* zSQt(uAC{~m=pbxmga%2fwFu);8DEMa{(H5YW5R}8sO)P;*j(kYseM*29yzG=vwBgqQ; zb0jw^Dw`en!ok=yTO;CVtw`wE;ofWu=J7Wo)_RUD)Zw&a%K4b!x*S|wu$4Y5HrP5Q z=m?-AsEze8OTo~f zgIza@L0+EG>oiCuRZ^zQ8j&hd1ykK3iYk*>TrY((WHK(1C2LZZX@hEsY~3B* zU{NO5U}xgeudUD#@I#hBN8l#;h&0a^2hL<0Qk=cD>>KwosmJ~(lX|e=aQ4T?{d{Ff zOOY5A%95PV)6>d2r?{W24jG~q=-*ROC)LtkwTH_rt9pT)iqg1Ns+6tO*(aHX>~Yoa&4`JCQ( zZ#)(&sl|dyqA#wJXs$#ou_+o)L}MF;M10%EzHNO8mn3pn)PvCO7r1BQg1zq>{fB30 zynr|KvWNV~1Ogxc0w4eaAOHd&00JNY0w4eaAh0qBtoHW=xqb$6SkxO}l@%{=QNHg@ z+s<2cHu?otMuFofKmY_l00ck)1V8`;KmY_l00cmwjsWc!;9?BOykKAzY>xEz4 z|ISzURdodSdP4WsEsc*L00JNY0w4eaAOHd&00JNY0w4eaN0`8{w|(9EwWmbsZuC(T zN-8gyP~@U95zS2~xkzkd1V8`;KmY_l00ck)1V8`;KmY^|4S_yyo2RQQLRa25lp0J^ zAAu}+31p;iK~W8D#ywu3gDzq93!I%f<=RjG^+WHg#tR(qgbvWTm_PsoKmY_l00ck) z1V8`;KmY_l00dSNfq=h@84!|VqM-qHzra7;^UT>_`{rl<1pNXlskCt{AOHd&00JNY z0w4eaAOHd&00JPu6QKP9G^l{2uA(n1Uf`9-*N(jQ++*vi{Q@s|LNBm>0~%96^#(LK zNM5bv6(BF_O+bAM>6iv|yh9&(VFCdV009sH0T2KI5C8!X009sH0T5Vj1Wxex1>JQ7 z>|WmL7x=+?*MbR%k%qIspL7)teT@08M8Eal{4Z9u?8ELKq`)zV(o<<{IKX~Nsy zb5_qu>pVfS4eApRWg8*`56~~Y^ow&7@mQ>+77MDPs8>lem&*wgPrP134n90kCxhZyJhuztOwr)M~%LGoH}Pp=XY+qsHEX00@8p2!H?xfB*=9 z00@8p2!H?xFapQL?0$ilhVJ?4<}bbT9aSB{iBv~$B4~mD z2!H?xfB*=900@8p2!H?xfWWav;OaWHK$3sx-Y~U*OIUyJ<}Di&vA)BqBcT3)wvJ%? z)5&{J`mI<$?HBNdZuXG>m_PsoKmY_l00ck)1V8`;KmY_l00a&TfwFha`t_%-?e>Jj zRo{PNFQDfplzgSAM2Wk=fLtif2;1e{WTiaD{`SsH73E@4Q*sqeE2!gnnj@Cj7K^#` zI=Cc}!=hsg*zp2izwPELe5p&nUmcI&6;J4u!*bx*2nc`x2!H?xfB*=900@8p2!H?x zfWToO5c01N*7r1!O>*%9z76NzeaTh-@b#*W;7k%P5IXZP>=rfw0w4eaAOHd&00JNY z0w4eaAOHf(oWPiGO}u;E+Ag-6LGri$S-w3aLAE3eu(1i)zyZ~V`6V@{xIC(NNjmcJ z0((#Y`TsP1`+2eIJNOid7YIGI%nJs)2Ld1f0w4eaAOHd&00JNY0w4eaAmAi$fYqH>NzDzx7=sz8_d1{5-XvSp$c7P3h|9w<8DJ2lJ$7$%AR;~Y=7d)XCM7b z;kk3)wSRbOu3IZuF;9BiT%n9Hvm7WaTehni<-W%45l;U(1j_w&<1l(M97GPBH4DoI3mzAQbow>Xpszv z!y>6uBVrAz%NESzm*_jc1#=F!g=(&Y+d>^q$@?koVC!-SF1E!$(oGnE0k`?Mn$($xZGrxRz2x!HK@M2 z=1NoLqTAXzBmKz%ksc#orTHzJrPGMI-4@w*erq5cje4&?o7J?jQpwN_O)g8- z0(X87a`nwQcb+QeXrr7JXs)^?SBw(*p+PE%RM8d9s(%zwQ6{aPOrk%h%7$I>=nVHt z)TmlB>zEgA^PGz`qWv@L3TY}*ErD=%xA%rguBot_2|d6SnN^iYnv-dk}*?uUvn)-fo$8l5Yf`Cjs_CMnZpja*x4%Tu?AqSC8+Uf7;a4~xkZci@Ihex%UrFx{-GOdGT%=5~|S zMrY#E>s+OrH-AXkTUq2)InUh9=}$0wNxIW@4GOtfTexR8du@?;pd~DJdp+z&lfI`& ze(p>3G-TgvOGj0icPkEm@U2h`QoaAb2c<>kw|k( z_`L93Ma7nq;_R(u-?*0*(f%kaBKDh#{qb={Wd7DV_@<#ymgIb%o>ta5#c_B1e6S0t zqE4!%y((WwvZ@!zsVI$WrApaa9W4@elIe=VpL*n;BR4_MUHjCvRv+W`Vdd$4Vj`V11=(16f{4}=VM@Xgc&sQnfwxk4mYl`?5&KhS~>AgMG574Qd z{;~Q6j=%H=Kfm#x9@eX~zLMCKeiC^$Qr6t=)Ct_kI*azrac= zZ5#^-fB*=900@8p2!H?xfB*=900{5|XukjrDj=!ME5Pa(xa+t-IPQV(J%53jo9Yc{ zzX6RYpn3z^T@WNMszIRR8@7;FGkH;e0h-rGUYI}t1V8`;KmY_l00ck)1V8`;KmY`m z8-Wx2eL;5}0p~mT!u#I0=I@@V2#fR`qlh=}c2U`P=!FO=E)s?#af$t#o9E9&6 zdpS+alULYYuPZAsWLxQyL%;C`smgCSmI7mz^q7Yx^H!h?4FdIi$F&J9f8n4 zn#qW%tVF}sN3zKsmvU_;bOafW2f4(IE5<>EsFxKrUr@(;xb>8a@(ej0C#^Nnla<7{ zJFj2R5ft(kB90$)1j(#OPg!9=99)D3Ew2<6$8RIY^`Sh;E(iKT*OAJeYCfq>u`!w& zx@=SwHq??LZIOt-C464k>nW&tWm?};EEtL;SBw(-ZmuK2qzg?E-@;krEGzX0q~3va zqCSH5IE7~n24CB@9RZz(2?Rg@1V8`;KmY_l00ck)1V8`;Kw!lZ;B*A1-~YXx z+O}6OGmn<~3PMM)Vv8OJ1_B@e0w4eaAOHd&00JNY0w8eA5Ll#+;9sXVwLN&-=Q4}b z5zv@}H<8zpbp*{FU3EHw_UGE43q01m+t*tCcg&Q|vOQ4f2%sZ?j(~c^Lq{+>4wH*L zgpME|5i;VSNaBY3#Sx)q2JRRopg%d%pB!Lj*nA+mwFJf^xEwkHaahcXf>~F{&?Tvs zK)Acxd&8t5j};ZGs_6EIKz~Lg!LLF#xqVoyuY7I6gAEpTMqzWO`Fx??mJY_4DJ!n= z$=2RV7>{6>1{y<003AV1qgfqO=F-g2j$~$+aEZ8!gpTKF$awR7ao}tzDbC(n_KkZ3 z;c(cyKgya3797t0__*eQB?%i2zGK8i1fN-T_f!A(FTOu*=?FqMhgPq`m;}rAz8yOT0w4eaAOHd&00JNY0w4ea z#~1<0zb3kF-P$g8*i;zb*XObNAfvlWT9i2UWGbqVRdnK1qZ_?5Q$@L0EU7sq5ld`} z#uL%lMj;X3wy|$pU&1BnYTuf8H@|_Ty#b>Xtr`quOTqwi8d%acTsGUgB^HZs+F16k zS-<|&wcVa@xT^cFKRl9SKrR$#gza)}vQi#nf6W8blw3vAi2X*M=7=S>#bPd7aY-Tw zna_aLFYwDBe`n`?JJ$5F!3$S=JfT-Y4~3pO#%dW37X&~61V8`;KmY_l00ck)1V8`; zjv@lb`=egF@51aH@t@$2*Andx4PVH=K4|A9Jq_eAIe&paIrkl(zvLzDar5wLECKul zj-tANT?GLU009sH0T2KI5C8!X009sH0apTx^cVQhU4QZM6M8=Iu0{F_(6EAai3Le*yRlxR3M1>-tJlW!KSqFis&+i3(||hpg9( zJA{Q~pgXI>9{vLG7vSp(`~@_H#Qaum3F<0GHBOez%F8plkQH~bflzZ=NtKl8vZm;I zp`_M}mlc(2l=HSoIhD=|snKC3_c!G6+B#Ha+Ms75Cvs-eXJ_KFRWw@-%Iwk6+9L5l zOIYllYn(HCNhxlBY`vz^#Kh~@4KeXLQ?#=`&763hCCwvVmvcs4nQ+&T`OGWkkgj8# zLRp{G;V)pH#LM<(1A&cr@8!`wQHA1aC^QX%GMb5C8!X009sH z0T2KI5C8!X009SqVQ>4o^=nUw(&RqcH5VmvfC)t|8WU0C2b-HDehOwET74=tB5?N? zXrXJe`~{x)v*=Taf9~II`wP5A=VAf@5C8!X009sH0T2KI5C8!X009tKDFoX6>w;E? z0y!qmUtrxue-^pri3Le*yRl6!NzI zUl#_`8FA>s6dhxFgQdTfsEa^G927HRs$U$bA3`4f0@O@F=M}>B!fv@@l%`}VJC-WC zqSfocnW9@Jqf1Sd4NmH4@dCy(r)> z0Dl3#wpIs51Q*0GlW7?dm zF@v+AX^TXfTf*ms=dyxeOTk|N{sM|bFQWxDuS{1jIZ}dMQ|Qfk-`w?w<&om-%}=tj z@d?ShkKT(~I)a<`|7pAS=RZi$8-8}rerxDN`VkWdfB*=900@8p2!H?xfB*=900S?xHtdd$2LiJzrf3(XI2Ui90v%100@8p2!H?x zfB*=900@8p2pm%cj`R0;xo`&7FHlP%F$RPO=R0`eV`u%W=_?-|G&yyUclZt-Q z3j!bj0w4eaAOHd&00JNY0wAy=2`tig@ct`r{l}lS-}b^hzJoNrfb}JicN$jEOJR%-kG+QHlw?@RQ zZ75S`3}$VCdhSqXuc~8Dy&If4hjaF-=5jcDEwF_;SGh*{&RJU|W@6*PK)9#Jdt;{| zj};YrTUtfcxi4+~88Mj^g=}*BuxLBlNi?6Zp)FX*3qz@_c%hi#oYd@9kolJPLb1HZrzkjfs@Gq6yWEYhQ?q02@^a%r%W zBE*c+Kc5*T+m_`!81XHfbphYO;(ND$===Foo00@8p2!H?xfB*=9 z00@8p2!KH22yniGKY8HSkNwpP|L+5qzd++QgEfHw2!H?xfB*=900@8p2!H?xfB*r*6P2*iwIkrO#r@w@EwHjV1>Bj zG|qPpzJufH!l*5#&-mglo7^Nv@PAOG%MY9&CzF^FRN;V8-s%&sR zRPL2%>8?|Cwx;DtY1T0>;1@-lP@Ri3!gsYlIntjT5Q#-;swEKa?)Kg=$yt=z_N01% zD>BOr)3POXvmI6CDcj$VD=4RH8#j}Yok)VbKo2~3S`>@#7Z?gv_o{7 z3&MAhnzi+hW-?+bD^Wrt+2juR4#IaZuT1NEiUmWFXp31;$@l8@L5j1>M9aQ$Zy+2F zd-q3KGr{cV*dHI)Jg_9Q89%6|p=N#O-U9H}vhW>Djbt-P;t4H~OG*{`0@sn{<2%?) z?*^ZEa%_L%%4Z+_O!R*r{N*)cYvDUM`yLr7AOHd&00JNY0w4eaAOHd&00JPe0tvLx zW3u`MhJT&A^ZL8L_^0p}Sb-&tLjnO1009sH0T2KI5C8!X009sHf!PE&e}VI)Z~dpd z^A9@AYyt8Pe}UP|kOBfA00JNY0w4eaAOHd&00JNY0!J5tMfwZ;FmkT&`QI0QYmxo} zG}ho$@>;ULKp=Qaoxecv7WfM+uWAK<0r(5RUjY6BMCeWA;hLf3CAlW^=Cs4P(p0&q zIC49{aBoK9wGin}x%{sQn9P~k6-j|l0MFdz<#Sy6z$KwD5#aurQe)SMz2 z)Rw>$L_1sw^74!>WW}AVfyrqlRZ^zQnxgB4l3KrrqRL}MB`<7Gr-#L4ifh6QdAzm` zRhc%Z(+6jRz}zmJiA(pisFd@vqs1g8KbVBSK>tO_3|BS|cM^+IPgzm(1$DfK%j|T> zpmoLN`bCrHyllB&q)3gb6b*?Kn=D*Bl1{NzXvz{xk@qyII>|T%=xvcmb4&QVu-C)< z1mG`VpSo5Zh;e1VY|qA{%wK@U1lT%)&O2JhKArFV1^fk;*Zn#61_VF=1V8`;KmY_l z00ck)1V8`;4iN!5rhwfqFq#;Aul7Vo5&i;)h&EOW0w4eaAOHd&00JNY0w4eaAOHf( zivZ^@uujiioIc;P#oR?YRssA4mREUUZ$JP9KmY_l00ck)1V8`;KmY_l;NS@?(qG{D zyUr=yy!vw=Tcp1L?Ke1$yq4@Q&>DCQ{sISInul!(_zS>a0R95-7jV~S^143w3q*vB zI4F`)$NR+*!5Yn-3?W}|(~MJPYMs7x_oQSkK~>E<8<$!tZK`8SBVtxajSdgDMfRQF8VEG2Tf0>?3K&Rc#q7`y zk=B5E6M?#=owMLCKu%{0C9k-8xzsf%WW;m}w=T>Ut;Yi`VX=EI(Yn2)6t_RNUQ=lb zgu`L)bwjKUvL=W9Y3Ayl*%Y+}=V%AyoKffbz}1iWDwiP}72U3PmbM)J0t~klXKyY0 z#=WeF_D5L}F-bf7}9S009sH0T2KI5C8!X009sHfyNQwbOiT|_1(1Y z!Th=A>ZuO_bOeoKjWvM)2!H?xfB*=900@8p2!H?xfWVOux%N*L|(@-;qIut=0$}gF1q1C)xtxt}gG5eazAoIs)hjpd)~e zfQ$@aJ9BfR16Zlf5dy4qj7MNvs6$6!k0?RIJe~CgIs(lU3t&8g(c!GnKbj#Un`9+A zV#7!_xdS=^j7N}Hre}qWLq`A|!K_Nr03AU)y$iH-1fQIK%Qye@!7Kg=I)d80F+PC+ z2!H?xfB*=900@8p2!H?xfWQhS!08BHy(0PEmPfC&p8xyEaYIM2f(svq1_B@e0w4ea zAOHd&00JNY0w8dV5Ll#+;9pmN{`9vD{daMZIs)2nu%5iEegq2BOaCk|HF4Y2m|DjOrzxKU=^B@t3XFUUp~Ej7I<+0dxe1LPyX+Z!j$#LFn7lFQ$GudM_KKVD(|S^~6R%00ck)1V8`;KmY_l z00ck)1V8`;jvN6_NATjlC;#$3=H~!~St<#It5zu~vw~*J8bp##0%jeP&T#oSw8dLOdWG#)=5$x<} z3HNn*_wv#HIZcrbB`?Va4d?H45BaZUs>S`+(jw3i(6C(^kZIYV6T53GnwKWmSlQv5 zi;P%=j(|nJHZG2QM9c~);?dp~xpq7l2>0}OZ|r14U)V*bRaBk((q^6%+2rmLFh}KM{tGmi7mLY4^)3jED>gVT5Q{|%K$Uzs(S2WoulvH7G zIGxPW6h+fYnlz>8x;(B3S#c*zEme$i#jw9Ns>a$BC{^Lo%+QWxW|we@xJ%ej$Vcez z)Yb6allG_s63t)x*0}QTPmc5_2Uzs_)m-$5 zX!|%I4vR!KRjXB$P2=EF_bp~@td-)%;@D9?~G zpLCr`T7hxJaGhmsR5I#D6yRE%S`O$4XvVv=6vs)`%v2o8mgfDJuh=?Fgg(Jy@YuJ0;0Lr3sN?xwLU2!H?xfB*=900@8p z2!H?xfB*;_H3T>vL3qn&-@fI?-#w{r@1Y~8gMp7A00JNY0w4eaAOHd&00JNY0w8cy z5?G{;;NHrsljolMAKpdi2rLXe#DDdB1UiDCNyZG{q_Kqc z;nZB@TwXEcLQxkk9!aNYMn{Y;O)?{jt*B0#WQS_jg2!ocqoT6e$PEhzW7BMnh@-V4 zq3I#;LDy^x=J7Wo)_RUD)Zw&a%K4b!x*S|wu$4Y5HrP5Q=m?-AsEze8OTo~&1e&7e%wY`buAOzG806&{y-tHvQYB@& ztP!aaRWQ{pqNp;7Mb6v7$n{bvLngx#S+XWonKr1F$d2%*SBRpl&rV$WwG}!7e#nyf zL@Lk`upi;GLdKbbOp3F&mVM*iKsX%s?vFCL5z{8IKR!-oWNMZUUSd=zOL9I>Pb=%3 z;(oF^WQbOve@{i7R7-o+9xk)2>IHHtO5<9oQnprSpJckC@TVS88|NnIxoe-g*6N`n zm{loQ0ZR$?))euzgwG3mJ?{0Vnl;Ye{Nz@%)x6#0;splpy6Teu_|TRQLPu}}Z^p4{ z5C8!X009sH0T2KI5C8!X009snzrjq4yv5>2}J?FoA*OaVj zU%g8aqQXdSLZLR=nov*+MnNtX^snBPh$S{f)Wy|v5~Bk2>MS7R)q-U zC^>(DfA+lR+}FPI(p@x&z!%C8e*y9b69|9+2!H?xfB*=900@8p2!H?xfWRUNjQSdF z1h8r2wvCCyAyK?uoU=82lW^5uX>&N3y?QVAOHd& z00JNY0w4eaAOHd&00JPe(g=+D8*2m*-xiB)OKd*6{RO7OANku`*MIze8ZY1v{fPMs zkT*;q00JNY0w4eaAOHd&00JNY0w4eaM=b%%R={t4nA2b2tv_k{_UoU2uTkw6xS#n8 zgzi6T4;(uV0w4eaAOHd&00JNY0w4eaAOHd@lYrAlz>HS#JJVy^4$5EPk*QDbdDm_4 zE>-&le#raof7i7TId^rbJp{`m8+e)oGcUZ5%TjOi~xK41a?5C8!X009sH0T2KI z5C8!X009sKc4IlXY9n@dI z8~SYz`Hu+%KmY_l00ck)1V8`;KmY_l00cnbpb5PHbnluSXGhm|drk^_P7kj0c>4N0 zTer&jE$3|7lHa;n*(jfrJ7;r#_wLR4#OA);iOplVtrsbB(U^$lCY0P{bkkHpov2Ib zotY}i#iFL@HC!(=ULW~3% zB(`mgyCjjrY9+5#R=>b+U;D?W1it*-6*OMJ8+zJ9{$m0G5C8!X009sH0T2KI5C8!X z009s<3JIL<-w|A>BZ3@OJ3Tb3U*PiG=8M1A{Qw8tX| z-FOrnICc>PKmY_l00ck)1V8`;KmY_l00fR!0<*^#V3q*QF6r@Yn>HP)@d&o}zxavU zPrZL%wO`=tp3v7rzX-h;`XNRnI9e+f_8SC100ck)1V8`;KmY_l00cnb7$DH;@AleS zj^^4R1dADufb$pl?e|>${qqMNYNP%FEuH^f=Py9MU;+UU009sH0T2KI5C8!X009s< z+6aucG**>PjP18<+qeb(0tLBP(7$>Y?}iYIZA)w(^)(iGY}=-dhtFSN^4*i)-SS6I z@1pSnEun|&`~}DtOdtRPAOHd&00JNY0w4eaAOHd&00PUEfaNC8VtrVOzrfCKcHFxD zsx!V@?HAZn=PwZ2vs@1x`vn3Z00JNY0w4eaAOHd&00JNY0!Ix27ry`+fzjfUp4hfA zez^Sw-gd{nb1QHE&E{&qz=L)E0-*#AFL2{u-T#B%eaCUvQh$L!=M9{{0Qrgu1V8`;KmY_l00ck)1V8`;K;RG&7!5R5 zl^)-=@tkdmIQ#`%jR5+nrT?hE(a2YTzrgJ$OrG=pmvX~2ULX+qN6ue>e8mI;AOHd& z00JNY0w4eaAOHd&00JPetO!_c0s-s88}b*pQa$PP3trl?vDz=Ni}M!u?q zef~Yl7Y0A}>?-On;0;~iA^$Of00@8p2!H?xfB*=900@8p2!H?x%t7G&=XuxcI6Jzw z+jCObb9!){$J5uB+`3iHZ#ievmi*Su%0~H|+&P=`yLWHSCpP!(PHY~_ZM{g5i^fDW zH=*PvqnoA*>O@^a@61$DE*3QOmZ z?IXw{_gh`k>A;c8-FNV`8=`;K{a@dIvf3~3De61;sn9P%FNS^?`cCLUI1etPnulEh z0T2KI5C8!X009sH0T2KI5I8mnbo#r!wos+DW;WPJ-$7eP@YBz|@A2ptf>%=CLEoy6 z65m1c2NMW@00@8p2!H?xfB*=900@8p2po0-??2r)%hAshPwdWZ-WZcN?%tByxLMwK zPF~56?T+=uVzKy^F(p3B)uB<2ew&CMe-#g?_et_T6;W#&%X5esW0|_@MAPy zz!&j^iu_+ckF5QK*Enwp81yO(0bW*eGZkC7ZUDAEjcz*3Ar! zZP0?sR3_Ny#`pselMo<{39;v~b0lXwak|>BlfEB>ay~nk^Xr~E?Q_rb`67S-0tg_0 z00Ib1B(TuY)lhDXq;Y|x%UP~8g7_h}dIWQS_TGR0cJR)}<$M9B`wvC+2*fTL0tg_0 z00IagfB*srAW)YAai>dE2pCf2$@#BZz2chBW?oNNA|R5^4rhnGsVyQ5zwp+ws2@-w zAwPGQn0f>?(xx7PC>&turmlq=DqECcoO%SUzgqd``{(R_LCzO&DhCSd5gec*L0x|Q zaZ(5%fB*srAb^Uynjm9BlvP)UC&! zwh=%80R#|0009ILKmY**5U8qvtpb75_NYqr2tLal3=i+x_u^x_w^NUxs#lnUBY*$` z2q1s}0tg_000Ib<6|mPMm~+0!jqul3Jp$EKjcHJiz?M6>ArX4}=(c?y%5i}vccw({ zpx8-6009ILKmY**5I_I{1Wrw$yGbOdwQZIMIf;b8L@eWtCcUZfU?y5nFHZjZjNHNO zu$@H@nwlD#RoS#U)%16mdPjUFxr6aorjT4@1Zsg$V`a%}n>*Mu)UnVV-(|>k1Wn5R z;@rXgBoCh2#~hf-1(E=-u~Vz$MWL> zEhTaXm6nq}d%ghy1Q0*~0R#|0009ILKmdWMB2b(oXvp~}O(ph=Qh)k%%N@Ms#2s&Z z?&ubOeq3O;l{>hbP!P8k6N5I_I{1Q0*~0R#}3Rs#0iL1%sD4hE-N?%9Ao|V`N!nAfXltG zWbUBYO+x?y1Q0*~0R#|00D&`1pvxtL0=wnAK8c{fbTpNYW$amaa!PiP+(A1^uHJoU zc4gD*RFm96Z!8%bh=of81o~CeFhY(>lTU-(!OR6;`r2E&k2yk9Xepm)y93=RYNL2gPn0 z0tg_000IagfB*srAbz!3D~j)UA9Lx&K-0~mt)6!c564BIP&ZF?>?N~ns`R$ z3%Fe`jFT@Qo}eLs00IagfB*srAb>!93WVI1UNLP1OuvtO0i#{j+I@!CC)TCQ>U3-7 z$i#(>vKRQ*FI%@d6X}ok4#uPD?vV}eqLtBbe4xKwJdWJdZX{yKMc&L{N<=Osvcu#H zq`m!Pd@k9Rhzp&xHyn@KZTy<4tD&f*v98O*S#Ia~p z&KGbidrRjF>?L79n$ZwI009ILKmY**5I_I{1Q4i-fR!fTwl->)FW`PI@Z&e1`TP3Z zxPa%xIQatN2^sac_tmc_Z$u(TaES1;`gj#s*^H zcr240w)aGF6J8618Y}yeQQLfhm6xu1D7Epii{*R)kFsO@e1RP#44fIi0(qARAb;7K{#=O;@&)qc3>wdM_?=^CH;j`naNp8{-R5i@-3>BdKxsNU zVZMNPnuY)Z2q1s}0tg_0KwS!SDPnbRTem#7lUTf-j;7MF%-Gv$;x?oqlFP{#F#WzR zS7l5^6ugK62zEEg!;vqbwfjUaylU#5rmlu&S9VppIBd|=)X=O-AJ*SNzQ8oj7l^K1 z^^M1ZkvHXh0Y$lWqI`i{Nf@ZhUxAzy0tg_000IagfB*srAb`M>5U|n&6lya&xd}Wkyh6>buzQAm$ee8J8ZtaE>N1nX+ z$Sn_^+jmIj3pkoTn4w(ZxmSFlA%Fk^2q1s}0tg_000Ib1BY~j>j@GUv?Ps^mnD_Y^ z3!LYRC=UOOh|$y6>(_gOok2|x`g=Qj{Q<+*7YPSD{2d*U;L2z?KG5GT{^1+BsohA# zlKo?LT(T_@569!_Xk;**jwQbt8O#i9N%%Bz5Bs7ehWDcUf7i{wHkuijf4x1&L(@%P zpv11B1&zgLscLkIU10e_zs=!tYz?JWJ8h^nxkWiGvz z&S0m}sf%tsNBr-x#s$9n@{V^8Wxu*l&KGbfcg;`^DX)nyGz1Vp009ILKmY**5I_I{ z1Q4i+z(Pk?L%Bg3hkQ}FF_OjwjxJ}pAqw%9T=H#M;{t2`bnBh({=)sboG&o9si#am z0`USG0tg_000IagfB*uM5{S=riTL>;MZWkXN(4mG+2QQ4H??Ij?Jc}D>CK3p;{N2= z+hCfRyYWapf_bq~>1~;TXku9^6^}*21FPunHv-fn z7~9K6ZXBiR`fbh}0vH(+x`fnO4o* z4e*di!8@m--P$j%qY-jc`mnhb{-MUo9(E)kAkJcoy{F%jD zU)tG{8yD~lm8nM{UO+N9O&^E#6N(0-xdai8&N< zvcp-0K5Av^5&Ubrr*ZjTpL#n#Utp=d9>G#75}Zm`=X()A009ILKmY**5I_I{1g4jO ztpdSZ+oP#ekHD5M@U4H(-{<`2%&*IQftDs$*?a-<5*h*sAb@dG(uphf8~moLyy!oc+UO91Z%0R#|0009ILKmY** z5I~?h0#=$pi?vaw`2yF)Uf;NW(`&nP;{u*o*?a-<5*h*sAb9z}oL@{rJUKo1c~W0$HP;+n}7P*21%-s}V38@CLm$CgmnM2&h;%9?xu zdk}yjR@a9-m0ht^Z(Izqe*WnJeY}&?Yv^eziO&R+*zX)?}pFx>ucu7 zx3Dqy7S=CYw|a7a{4#^7R63T(4%^RP?=*EaG`q4LO*QqPsi~n^l|C%_0^V3MHV_NP zW0}G$DY5{xK&Y{@wJOdGww15D9jgFpnRmf yqr9QKqCBlUq9m1wvVnvFtLyU>0R#|0009ILKmY**5I|sB3fN) Any: + if not path.exists(): + return default + try: + with path.open("r", encoding="utf-8") as f: + return json.load(f) + except Exception: + return default + + +def backup_file(path: Path) -> None: + if path.exists(): + bak = path.with_suffix(path.suffix + ".bak") + path.rename(bak) + + +def parse_datetime(value: Optional[str]) -> Optional[datetime]: + if not value: + return None + try: + return datetime.fromisoformat(value) + except Exception: + return None + + +async def migrate_hosts(session, report: Dict[str, Any]) -> Dict[str, str]: + repo = HostRepository(session) + inventory_hosts: Dict[str, Dict[str, Any]] = {} + + # Lire inventaire Ansible pour récupérer les groupes + inventory_file = ANSIBLE_DIR / "inventory" / "hosts.yml" + if inventory_file.exists(): + data = yaml.safe_load(inventory_file.read_text(encoding="utf-8")) or {} + all_children = data.get("all", {}).get("children", {}) + for group_name, group_data in all_children.items(): + hosts = group_data.get("hosts", {}) or {} + for host_name in hosts.keys(): + entry = inventory_hosts.setdefault(host_name, {"groups": set()}) + entry["groups"].add(group_name) + else: + report.setdefault("warnings", []).append("Inventaire Ansible introuvable, ip=hostname et groupe vide") + + host_status_data = load_json(HOST_STATUS_FILE, {"hosts": {}}).get("hosts", {}) + + created = 0 + host_map: Dict[str, str] = {} # hostname -> host_id + for host_name, meta in inventory_hosts.items(): + status_entry = host_status_data.get(host_name, {}) + status = status_entry.get("status", "unknown") + last_seen = parse_datetime(status_entry.get("last_seen")) + ansible_group = next(iter(g for g in meta.get("groups", []) if g.startswith("env_")), None) + reachable = status != "offline" + host_id = uuid.uuid4().hex + + host = await repo.create( + id=host_id, + name=host_name, + ip_address=host_name, + ansible_group=ansible_group, + status=status, + reachable=reachable, + last_seen=last_seen, + ) + created += 1 + host_map[host_name] = host.id + + report["hosts"] = created + backup_file(HOST_STATUS_FILE) + return host_map + + +async def migrate_bootstrap_status(session, report: Dict[str, Any], host_map: Dict[str, str]) -> None: + repo = BootstrapStatusRepository(session) + data = load_json(BOOTSTRAP_STATUS_FILE, {"hosts": {}}).get("hosts", {}) + created = 0 + for host_name, meta in data.items(): + host_id = host_map.get(host_name) + if not host_id: + report.setdefault("warnings", []).append(f"Bootstrap ignoré: host inconnu {host_name}") + continue + bootstrap_ok = meta.get("bootstrap_ok", False) + status = "success" if bootstrap_ok else "failed" + last_attempt = parse_datetime(meta.get("bootstrap_date")) + error_message = None if bootstrap_ok else meta.get("details") + await repo.create( + host_id=host_id, + status=status, + automation_user="automation", + last_attempt=last_attempt, + error_message=error_message, + ) + created += 1 + report["bootstrap_status"] = created + backup_file(BOOTSTRAP_STATUS_FILE) + + +async def migrate_schedule_runs(session, report: Dict[str, Any]) -> None: + repo = ScheduleRunRepository(session) + task_repo = TaskRepository(session) + schedule_repo = ScheduleRepository(session) + + data = load_json(SCHEDULE_RUNS_FILE, {"runs": []}).get("runs", []) + if not data: + report["schedule_runs"] = 0 + return + + created = 0 + for run in data: + schedule_id = run.get("schedule_id") or uuid.uuid4().hex + task_id = run.get("task_id") + status = run.get("status") or "unknown" + started_at = parse_datetime(run.get("started_at")) or datetime.utcnow() + completed_at = parse_datetime(run.get("completed_at")) + duration = run.get("duration") + error_message = run.get("error_message") + output = run.get("output") + + # Assure une entrée schedule/task minimaliste si absente + existing_schedule = await schedule_repo.get(schedule_id, include_deleted=True) + if existing_schedule is None: + await schedule_repo.create( + id=schedule_id, + name=run.get("name", "Imported schedule"), + playbook=run.get("playbook", "unknown"), + target=run.get("target", "all"), + schedule_type=run.get("schedule_type", "once"), + enabled=True, + ) + if task_id: + existing_task = await task_repo.get(task_id) + if existing_task is None: + await task_repo.create( + id=task_id, + action=run.get("action", "unknown"), + target=run.get("target", "all"), + playbook=run.get("playbook"), + status=status, + ) + + await repo.create( + schedule_id=schedule_id, + task_id=task_id, + status=status, + started_at=started_at, + completed_at=completed_at, + duration=duration, + error_message=error_message, + output=output, + ) + created += 1 + + report["schedule_runs"] = created + backup_file(SCHEDULE_RUNS_FILE) + + +async def migrate_logs(session, report: Dict[str, Any], host_map: Dict[str, str]) -> None: + repo = LogRepository(session) + created = 0 + + json_files: List[Path] = [] + if TASKS_LOGS_DIR.exists(): + json_files = [p for p in TASKS_LOGS_DIR.rglob("*.json") if not p.name.startswith(".")] + + for path in json_files: + data = load_json(path, {}) + if isinstance(data, dict): + # Tentative de mapping simple + level = data.get("level") or "info" + message = data.get("message") or json.dumps(data, ensure_ascii=False) + source = data.get("source") + details = data.get("details") + host_val = data.get("host_id") + task_id = data.get("task_id") + schedule_id = data.get("schedule_id") + else: + level = "info" + message = str(data) + source = None + details = None + host_val = None + task_id = None + schedule_id = None + + host_id = host_map.get(host_val) if isinstance(host_val, str) else host_val + await repo.create( + level=level, + source=source, + message=message, + details=details, + host_id=host_id, + task_id=task_id, + schedule_id=schedule_id, + ) + created += 1 + backup_file(path) + + report["logs"] = created + + +async def main() -> None: + report: Dict[str, Any] = {"warnings": []} + await init_db() + + async with async_session_maker() as session: + async with session.begin(): + host_map = await migrate_hosts(session, report) + await migrate_bootstrap_status(session, report, host_map) + await migrate_schedule_runs(session, report) + await migrate_logs(session, report, host_map) + + print("Migration terminée") + for key, value in report.items(): + if key == "warnings": + if value: + print("Warnings:") + for w in value: + print(f" - {w}") + continue + print(f" - {key}: {value}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 0000000..c1203e5 --- /dev/null +++ b/pytest.ini @@ -0,0 +1,3 @@ +[pytest] +asyncio_mode = auto +asyncio_default_fixture_loop_scope = module diff --git a/tasks_logs/.schedule_runs.json b/tasks_logs/.schedule_runs.json index 2f33e44..6787dcb 100644 --- a/tasks_logs/.schedule_runs.json +++ b/tasks_logs/.schedule_runs.json @@ -1,5 +1,173 @@ { "runs": [ + { + "id": "run_e16db5ac6f5c", + "schedule_id": "sched_110c001afe0c", + "task_id": "2", + "started_at": "2025-12-05 02:35:00.012993+00:00", + "finished_at": "2025-12-05 02:35:32.549542+00:00", + "status": "success", + "duration_seconds": 32.45821054699991, + "hosts_impacted": 15, + "error_message": null, + "retry_attempt": 0 + }, + { + "id": "run_6c434a169263", + "schedule_id": "sched_110c001afe0c", + "task_id": "1", + "started_at": "2025-12-05 02:30:00.004595+00:00", + "finished_at": "2025-12-05 02:30:30.003032+00:00", + "status": "success", + "duration_seconds": 29.95905439800117, + "hosts_impacted": 15, + "error_message": null, + "retry_attempt": 0 + }, + { + "id": "run_debf96da90dd", + "schedule_id": "sched_110c001afe0c", + "task_id": "2", + "started_at": "2025-12-05 02:25:00.016354+00:00", + "finished_at": "2025-12-05 02:25:27.580495+00:00", + "status": "success", + "duration_seconds": 27.521959419998893, + "hosts_impacted": 15, + "error_message": null, + "retry_attempt": 0 + }, + { + "id": "run_bda871b98a7c", + "schedule_id": "sched_31a7ffb99bfd", + "task_id": "1", + "started_at": "2025-12-05 02:20:00.004169+00:00", + "finished_at": "2025-12-05 02:20:28.118352+00:00", + "status": "success", + "duration_seconds": 28.0753927859987, + "hosts_impacted": 15, + "error_message": null, + "retry_attempt": 0 + }, + { + "id": "run_9acaf3ee6040", + "schedule_id": "sched_d5370726086b", + "task_id": "4", + "started_at": "2025-12-05 02:05:01.066895+00:00", + "finished_at": null, + "status": "running", + "duration_seconds": null, + "hosts_impacted": 0, + "error_message": null, + "retry_attempt": 0 + }, + { + "id": "run_25dee59d8f54", + "schedule_id": "sched_178b8e511908", + "task_id": "3", + "started_at": "2025-12-05 02:05:00.942939+00:00", + "finished_at": null, + "status": "running", + "duration_seconds": null, + "hosts_impacted": 0, + "error_message": null, + "retry_attempt": 0 + }, + { + "id": "run_06b2fe4c75f9", + "schedule_id": "sched_d5370726086b", + "task_id": "2", + "started_at": "2025-12-05 02:00:00.048675+00:00", + "finished_at": "2025-12-05 02:00:31.174698+00:00", + "status": "success", + "duration_seconds": 31.10493237799892, + "hosts_impacted": 15, + "error_message": null, + "retry_attempt": 0 + }, + { + "id": "run_5a3ada10451e", + "schedule_id": "sched_178b8e511908", + "task_id": "1", + "started_at": "2025-12-05 02:00:00.004396+00:00", + "finished_at": "2025-12-05 02:00:30.956215+00:00", + "status": "success", + "duration_seconds": 30.92840002899902, + "hosts_impacted": 15, + "error_message": null, + "retry_attempt": 0 + }, + { + "id": "run_484f67657ee4", + "schedule_id": "sched_d5370726086b", + "task_id": "3", + "started_at": "2025-12-05 01:55:00.084088+00:00", + "finished_at": "2025-12-05 01:55:32.096250+00:00", + "status": "success", + "duration_seconds": 31.975180113000533, + "hosts_impacted": 15, + "error_message": null, + "retry_attempt": 0 + }, + { + "id": "run_7c9cbee2fe69", + "schedule_id": "sched_178b8e511908", + "task_id": "2", + "started_at": "2025-12-05 01:55:00.018967+00:00", + "finished_at": "2025-12-05 01:55:32.306141+00:00", + "status": "success", + "duration_seconds": 32.26106233700193, + "hosts_impacted": 15, + "error_message": null, + "retry_attempt": 0 + }, + { + "id": "run_a45e3d80323d", + "schedule_id": "sched_d5370726086b", + "task_id": "1", + "started_at": "2025-12-05 01:50:00.003670+00:00", + "finished_at": "2025-12-05 01:50:27.635237+00:00", + "status": "success", + "duration_seconds": 27.58177596600217, + "hosts_impacted": 15, + "error_message": null, + "retry_attempt": 0 + }, + { + "id": "run_6ebb5bb47219", + "schedule_id": "sched_d5370726086b", + "task_id": "2", + "started_at": "2025-12-05 01:45:00.003641+00:00", + "finished_at": "2025-12-05 01:45:26.015984+00:00", + "status": "success", + "duration_seconds": 25.9568110279979, + "hosts_impacted": 15, + "error_message": null, + "retry_attempt": 0 + }, + { + "id": "run_f07c8820abcf", + "schedule_id": "sched_d5370726086b", + "task_id": "1", + "started_at": "2025-12-05 01:40:00.003609+00:00", + "finished_at": "2025-12-05 01:40:27.800302+00:00", + "status": "success", + "duration_seconds": 27.77215807200264, + "hosts_impacted": 15, + "error_message": null, + "retry_attempt": 0 + }, + { + "id": "run_c831165b16d9", + "schedule_id": "sched_d5370726086b", + "task_id": null, + "started_at": "2025-12-05 01:35:00.003976+00:00", + "finished_at": null, + "status": "running", + "duration_seconds": null, + "hosts_impacted": 0, + "error_message": null, + "retry_attempt": 0 + }, { "id": "run_9eaff32da049", "schedule_id": "sched_31a7ffb99bfd", diff --git a/tasks_logs/.schedules.json b/tasks_logs/.schedules.json index 4debd97..fbdc8f7 100644 --- a/tasks_logs/.schedules.json +++ b/tasks_logs/.schedules.json @@ -1,9 +1,9 @@ { "schedules": [ { - "id": "sched_31a7ffb99bfd", + "id": "sched_110c001afe0c", "name": "Health-check-5min", - "description": "Health-check-5min", + "description": null, "playbook": "health-check.yml", "target_type": "group", "target": "all", @@ -19,20 +19,20 @@ "timezone": "America/Montreal", "start_at": null, "end_at": null, - "next_run_at": "2025-12-04 15:35:00-05:00", - "last_run_at": "2025-12-04 20:30:00.003138+00:00", + "next_run_at": "2025-12-04T21:40:00-05:00", + "last_run_at": "2025-12-05 02:35:00.012919+00:00", "last_status": "success", - "enabled": false, + "enabled": true, "retry_on_failure": 0, "timeout": 3600, "tags": [ "Test" ], - "run_count": 22, - "success_count": 19, - "failure_count": 3, - "created_at": "2025-12-04 18:45:18.318152+00:00", - "updated_at": "2025-12-04 20:30:29.181594+00:00" + "run_count": 3, + "success_count": 3, + "failure_count": 0, + "created_at": "2025-12-05 02:24:06.110100+00:00", + "updated_at": "2025-12-05 02:35:32.549928+00:00" } ] } \ No newline at end of file diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_db.py b/tests/test_db.py new file mode 100644 index 0000000..bf9c7bf --- /dev/null +++ b/tests/test_db.py @@ -0,0 +1,206 @@ +"""Tests basiques pour valider la couche DB SQLAlchemy async.""" +from __future__ import annotations + +import asyncio +import sys +from pathlib import Path + +# Add project root to path +sys.path.insert(0, str(Path(__file__).resolve().parents[1])) + +import pytest +import pytest_asyncio + +from app.models.database import async_session_maker, init_db, engine +from app.crud.host import HostRepository +from app.crud.bootstrap_status import BootstrapStatusRepository +from app.crud.task import TaskRepository +from app.crud.schedule import ScheduleRepository +from app.crud.log import LogRepository + +# Configure pytest-asyncio +pytestmark = pytest.mark.asyncio + + +@pytest_asyncio.fixture(scope="module") +async def setup_db(): + await init_db() + yield + # Cleanup: drop all tables + from app.models.database import Base + async with engine.begin() as conn: + await conn.run_sync(Base.metadata.drop_all) + + +@pytest.mark.asyncio +async def test_create_host(setup_db): + async with async_session_maker() as session: + repo = HostRepository(session) + host = await repo.create( + id="test-host-001", + name="test.host.local", + ip_address="192.168.1.100", + ansible_group="env_test", + status="online", + reachable=True, + ) + await session.commit() + + assert host.id == "test-host-001" + assert host.name == "test.host.local" + assert host.ip_address == "192.168.1.100" + + +@pytest.mark.asyncio +async def test_list_hosts(setup_db): + async with async_session_maker() as session: + repo = HostRepository(session) + hosts = await repo.list(limit=10, offset=0) + assert isinstance(hosts, list) + + +@pytest.mark.asyncio +async def test_create_task(setup_db): + async with async_session_maker() as session: + repo = TaskRepository(session) + task = await repo.create( + id="task-001", + action="health-check", + target="all", + playbook="health-check.yml", + status="pending", + ) + await session.commit() + + assert task.id == "task-001" + assert task.action == "health-check" + + +@pytest.mark.asyncio +async def test_create_log(setup_db): + async with async_session_maker() as session: + repo = LogRepository(session) + log = await repo.create( + level="INFO", + message="Test log entry", + source="test", + ) + await session.commit() + + assert log.id is not None + assert log.level == "INFO" + + +@pytest.mark.asyncio +async def test_soft_delete_host(setup_db): + async with async_session_maker() as session: + repo = HostRepository(session) + # Create a host to delete + host = await repo.create( + id="host-to-delete", + name="delete.me.local", + ip_address="192.168.1.200", + status="unknown", + ) + await session.commit() + + # Soft delete + deleted = await repo.soft_delete("host-to-delete") + await session.commit() + assert deleted is True + + # Should not appear in normal list + hosts = await repo.list() + host_ids = [h.id for h in hosts] + assert "host-to-delete" not in host_ids + + # But should appear with include_deleted=True + host_with_deleted = await repo.get("host-to-delete", include_deleted=True) + assert host_with_deleted is not None + assert host_with_deleted.deleted_at is not None + + +@pytest.mark.asyncio +async def test_create_schedule(setup_db): + from app.crud.schedule import ScheduleRepository + async with async_session_maker() as session: + repo = ScheduleRepository(session) + schedule = await repo.create( + id="schedule-001", + name="Daily Backup", + playbook="backup.yml", + target="all", + schedule_type="recurring", + recurrence_type="daily", + recurrence_time="02:00", + enabled=True, + ) + await session.commit() + + assert schedule.id == "schedule-001" + assert schedule.name == "Daily Backup" + assert schedule.enabled is True + + +@pytest.mark.asyncio +async def test_schedule_soft_delete(setup_db): + from app.crud.schedule import ScheduleRepository + async with async_session_maker() as session: + repo = ScheduleRepository(session) + # Create + schedule = await repo.create( + id="schedule-to-delete", + name="To Delete", + playbook="test.yml", + target="all", + schedule_type="once", + enabled=True, + ) + await session.commit() + + # Soft delete + deleted = await repo.soft_delete("schedule-to-delete") + await session.commit() + assert deleted is True + + # Should not appear in normal list + schedules = await repo.list() + schedule_ids = [s.id for s in schedules] + assert "schedule-to-delete" not in schedule_ids + + +@pytest.mark.asyncio +async def test_create_schedule_run(setup_db): + from app.crud.schedule import ScheduleRepository + from app.crud.schedule_run import ScheduleRunRepository + from datetime import datetime, timezone + + async with async_session_maker() as session: + # Create schedule first + sched_repo = ScheduleRepository(session) + schedule = await sched_repo.create( + id="schedule-for-run", + name="Run Test", + playbook="test.yml", + target="all", + schedule_type="once", + enabled=True, + ) + await session.commit() + + # Create run + run_repo = ScheduleRunRepository(session) + run = await run_repo.create( + schedule_id="schedule-for-run", + status="running", + started_at=datetime.now(timezone.utc), + ) + await session.commit() + + assert run.id is not None + assert run.schedule_id == "schedule-for-run" + assert run.status == "running" + + +if __name__ == "__main__": + pytest.main([__file__, "-v"])