Bruno Charest 05087aa380
Some checks failed
Tests / Backend Tests (Python) (3.10) (push) Has been cancelled
Tests / Backend Tests (Python) (3.11) (push) Has been cancelled
Tests / Backend Tests (Python) (3.12) (push) Has been cancelled
Tests / Frontend Tests (JS) (push) Has been cancelled
Tests / Integration Tests (push) Has been cancelled
Tests / All Tests Passed (push) Has been cancelled
Replace manual upsert logic with SQLite native upsert in Docker CRUD repositories, enhance Ansible backup playbook with better error handling and file permissions, add favicon endpoint, and improve playbook editor UI with syntax highlighting, lint integration, quality badges, and enhanced code editing features
2025-12-17 15:36:49 -05:00

301 lines
11 KiB
Python

"""
Tests pour l'API ansible-lint.
"""
import pytest
from unittest.mock import patch, AsyncMock, MagicMock
import json
from app.routes.lint import (
calculate_quality_score,
parse_ansible_lint_json,
parse_ansible_lint_text,
get_help_url,
get_fix_suggestion,
)
from app.schemas.lint import LintIssue, LintRequest
class TestQualityScore:
"""Tests pour le calcul du score de qualité."""
def test_perfect_score_no_issues(self):
"""Score parfait sans problèmes."""
issues = []
score = calculate_quality_score(issues, 20)
assert score == 100 # 100 + bonus (pas d'erreurs + pas de warnings)
def test_score_with_errors(self):
"""Score avec erreurs."""
issues = [
LintIssue(rule_id="test", severity="error", message="Error", line=1, column=1),
LintIssue(rule_id="test2", severity="error", message="Error 2", line=2, column=1),
]
score = calculate_quality_score(issues, 20)
# 100 - (2 * 15) + 3 (bonus no warnings) = 73
assert score == 73
def test_score_with_warnings(self):
"""Score avec warnings."""
issues = [
LintIssue(rule_id="test", severity="warning", message="Warning", line=1, column=1),
LintIssue(rule_id="test2", severity="warning", message="Warning 2", line=2, column=1),
]
score = calculate_quality_score(issues, 20)
# 100 - (2 * 5) + 5 (bonus no errors) = 95
assert score == 95
def test_score_with_mixed_issues(self):
"""Score avec mélange d'erreurs et warnings."""
issues = [
LintIssue(rule_id="test", severity="error", message="Error", line=1, column=1),
LintIssue(rule_id="test2", severity="warning", message="Warning", line=2, column=1),
LintIssue(rule_id="test3", severity="info", message="Info", line=3, column=1),
]
score = calculate_quality_score(issues, 20)
# 100 - 15 - 5 - 1 = 79
assert score == 79
def test_score_minimum_zero(self):
"""Score minimum est 0."""
issues = [
LintIssue(rule_id=f"test{i}", severity="error", message="Error", line=i, column=1)
for i in range(10)
]
score = calculate_quality_score(issues, 20)
# 100 - (10 * 15) = -50 -> 0
assert score == 0
def test_score_tolerance_large_files(self):
"""Tolérance pour les gros fichiers."""
issues = [
LintIssue(rule_id="test", severity="warning", message="Warning", line=1, column=1),
]
score_small = calculate_quality_score(issues, 20)
score_large = calculate_quality_score(issues, 150)
# Le gros fichier devrait avoir un score plus élevé (tolérance)
assert score_large >= score_small
class TestParseAnsibleLintJson:
"""Tests pour le parsing JSON de ansible-lint."""
def test_parse_empty_output(self):
"""Parse sortie vide."""
issues = parse_ansible_lint_json("", "test.yml")
assert issues == []
def test_parse_valid_json_list(self):
"""Parse liste JSON valide."""
json_output = json.dumps([
{
"check_name": "name[missing]",
"severity": "minor",
"description": "All tasks should be named.",
"location": {
"path": "test.yml",
"lines": {"begin": 10}
}
}
])
issues = parse_ansible_lint_json(json_output, "test.yml")
assert len(issues) == 1
assert issues[0].rule_id == "name[missing]"
assert issues[0].severity == "warning"
assert issues[0].line == 10
def test_parse_json_with_multiple_issues(self):
"""Parse JSON avec plusieurs problèmes."""
json_output = json.dumps([
{
"check_name": "risky-file-permissions",
"severity": "major",
"description": "File permissions issue",
"location": {"path": "test.yml", "lines": {"begin": 5}}
},
{
"check_name": "yaml[truthy]",
"severity": "minor",
"description": "Truthy value issue",
"location": {"path": "test.yml", "lines": {"begin": 12}}
}
])
issues = parse_ansible_lint_json(json_output, "test.yml")
assert len(issues) == 2
assert issues[0].severity == "error" # major -> error
assert issues[1].severity == "warning" # minor -> warning
def test_parse_invalid_json_fallback(self):
"""Fallback vers parsing texte si JSON invalide."""
text_output = "test.yml:10: name[missing]: All tasks should be named."
issues = parse_ansible_lint_json(text_output, "test.yml")
assert len(issues) == 1
assert issues[0].rule_id == "name[missing]"
assert issues[0].line == 10
class TestParseAnsibleLintText:
"""Tests pour le parsing texte de ansible-lint."""
def test_parse_empty_output(self):
"""Parse sortie vide."""
issues = parse_ansible_lint_text("", "test.yml")
assert issues == []
def test_parse_single_line(self):
"""Parse une seule ligne."""
text = "test.yml:10: name[missing]: All tasks should be named."
issues = parse_ansible_lint_text(text, "test.yml")
assert len(issues) == 1
assert issues[0].rule_id == "name[missing]"
assert issues[0].message == "All tasks should be named."
assert issues[0].line == 10
def test_parse_multiple_lines(self):
"""Parse plusieurs lignes."""
text = """test.yml:10: name[missing]: All tasks should be named.
test.yml:15: yaml[truthy]: Truthy value should be one of [true, false]
test.yml:20:5: risky-file-permissions: File permissions unset"""
issues = parse_ansible_lint_text(text, "test.yml")
assert len(issues) == 3
assert issues[2].column == 5
def test_severity_detection_error(self):
"""Détection sévérité erreur."""
text = "test.yml:10: risky-file-permissions: Issue"
issues = parse_ansible_lint_text(text, "test.yml")
assert issues[0].severity == "error"
def test_severity_detection_info(self):
"""Détection sévérité info."""
text = "test.yml:10: info-rule: Some info"
issues = parse_ansible_lint_text(text, "test.yml")
assert issues[0].severity == "info"
class TestHelpers:
"""Tests pour les fonctions helpers."""
def test_get_help_url_known_rule(self):
"""URL pour règle connue."""
url = get_help_url("risky-file-permissions")
assert "risky-file-permissions" in url
def test_get_help_url_with_subrule(self):
"""URL pour règle avec sous-règle."""
url = get_help_url("name[missing]")
assert "name" in url
def test_get_fix_suggestion_known_rule(self):
"""Suggestion pour règle connue."""
suggestion = get_fix_suggestion("risky-file-permissions")
assert suggestion is not None
assert "mode" in suggestion.lower()
def test_get_fix_suggestion_unknown_rule(self):
"""Pas de suggestion pour règle inconnue."""
suggestion = get_fix_suggestion("unknown-rule-xyz")
assert suggestion is None
@pytest.mark.asyncio
class TestLintEndpoint:
"""Tests pour l'endpoint /lint."""
async def test_lint_endpoint_success(self, client, api_headers):
"""Test endpoint lint avec succès."""
# Mock subprocess pour ansible-lint
with patch('app.routes.lint.asyncio.create_subprocess_exec') as mock_exec:
mock_process = AsyncMock()
mock_process.communicate.return_value = (
b'[]', # stdout: pas de problèmes
b'' # stderr
)
mock_process.returncode = 0
mock_exec.return_value = mock_process
response = await client.post(
"/api/playbooks/test.yml/lint",
json={"content": "---\n- name: Test\n hosts: all\n"},
headers=api_headers
)
assert response.status_code == 200
data = response.json()
assert data["success"] is True
assert "quality_score" in data
assert "summary" in data
assert "issues" in data
async def test_lint_endpoint_with_issues(self, client, api_headers):
"""Test endpoint lint avec problèmes détectés."""
lint_output = json.dumps([
{
"check_name": "name[missing]",
"severity": "minor",
"description": "All tasks should be named.",
"location": {"path": "test.yml", "lines": {"begin": 5}}
}
])
with patch('app.routes.lint.asyncio.create_subprocess_exec') as mock_exec:
mock_process = AsyncMock()
mock_process.communicate.return_value = (
lint_output.encode(),
b''
)
mock_process.returncode = 1 # ansible-lint retourne 1 si problèmes
mock_exec.return_value = mock_process
response = await client.post(
"/api/playbooks/test.yml/lint",
json={"content": "---\n- hosts: all\n tasks:\n - debug: msg=test\n"},
headers=api_headers
)
assert response.status_code == 200
data = response.json()
assert len(data["issues"]) == 1
assert data["summary"]["warnings"] == 1
async def test_lint_endpoint_invalid_extension(self, client, api_headers):
"""Test endpoint lint avec extension invalide."""
response = await client.post(
"/api/playbooks/test.txt/lint",
json={"content": "test"},
headers=api_headers
)
assert response.status_code == 400
async def test_lint_endpoint_ansible_lint_unavailable(self, client, api_headers):
"""Test endpoint lint quand ansible-lint n'est pas disponible."""
with patch('app.routes.lint.asyncio.create_subprocess_exec') as mock_exec:
mock_exec.side_effect = FileNotFoundError()
response = await client.post(
"/api/playbooks/test.yml/lint",
json={"content": "---\n- name: Test\n hosts: all\n"},
headers=api_headers
)
assert response.status_code == 503
async def test_lint_endpoint_timeout(self, client, api_headers):
"""Test endpoint lint avec timeout."""
import asyncio as aio
with patch('app.routes.lint.asyncio.create_subprocess_exec') as mock_exec:
mock_process = AsyncMock()
mock_process.communicate.side_effect = aio.TimeoutError()
mock_process.kill = MagicMock()
mock_exec.return_value = mock_process
response = await client.post(
"/api/playbooks/test.yml/lint",
json={"content": "---\n- name: Test\n hosts: all\n"},
headers=api_headers
)
assert response.status_code == 504