{
    "file": "backend/src/api/routes/__tests__/test_dashboards.py",
    "verdict": "APPROVED",
    "rejection_reason": "NONE",
    "audit_details": {
      "target_invoked": true,
      "pre_conditions_tested": true,
      "post_conditions_tested": true,
      "test_data_used": true
    },
    "feedback": "All 9 previous findings remediated. @TEST_FIXTURE data aligned, all @TEST_EDGE scenarios covered, all @PRE negative tests present, all @SIDE_EFFECT assertions added. Full contract compliance."
  },
  {
    "file": "backend/src/api/routes/__tests__/test_datasets.py",
    "verdict": "APPROVED",
    "rejection_reason": "NONE",
    "audit_details": {
      "target_invoked": true,
      "pre_conditions_tested": true,
      "post_conditions_tested": true,
      "test_data_used": true
    },
    "feedback": "All 6 previous findings remediated. Full @PRE boundary coverage including page_size>100, empty IDs, missing env. @SIDE_EFFECT assertions added. 503 error path tested."
  },
  {
    "file": "backend/src/core/auth/__tests__/test_auth.py",
    "verdict": "APPROVED",
    "rejection_reason": "NONE",
    "audit_details": {
      "target_invoked": true,
      "pre_conditions_tested": true,
      "post_conditions_tested": true,
      "test_data_used": true
    },
    "feedback": "All 4 previous findings remediated. @SIDE_EFFECT last_login verified. Inactive user @PRE negative test added. Empty hash edge case covered. provision_adfs_user tested for both new and existing user paths."
  },
  {
    "file": "backend/src/services/__tests__/test_resource_service.py",
    "verdict": "APPROVED",
    "rejection_reason": "NONE",
    "audit_details": {
      "target_invoked": true,
      "pre_conditions_tested": true,
      "post_conditions_tested": true,
      "test_data_used": true
    },
    "feedback": "Both prior recommendations implemented. Full edge case coverage for _get_last_task_for_resource. No anti-patterns detected."
  },
  {
    "file": "backend/tests/test_resource_hubs.py",
    "verdict": "APPROVED",
    "rejection_reason": "NONE",
    "audit_details": {
      "target_invoked": true,
      "pre_conditions_tested": true,
      "post_conditions_tested": true,
      "test_data_used": true
    },
    "feedback": "Pagination boundary tests added. All @TEST_EDGE scenarios now covered. No anti-patterns detected."
  },
  {
    "file": "frontend/src/lib/components/assistant/__tests__/assistant_chat.integration.test.js",
    "verdict": "APPROVED",
    "rejection_reason": "NONE",
    "audit_details": {
      "target_invoked": true,
      "pre_conditions_tested": true,
      "post_conditions_tested": true,
      "test_data_used": true
    },
    "feedback": "No changes since previous audit. Contract scanning remains sound."
  },
  {
    "file": "frontend/src/lib/components/assistant/__tests__/assistant_confirmation.integration.test.js",
    "verdict": "APPROVED",
    "rejection_reason": "NONE",
    "audit_details": {
      "target_invoked": true,
      "pre_conditions_tested": true,
      "post_conditions_tested": true,
      "test_data_used": true
    },
    "feedback": "No changes since previous audit. Confirmation flow testing remains sound."
  }
]
This commit is contained in:
2026-02-27 09:59:57 +03:00
parent 36173c0880
commit 4c601fbe06
13 changed files with 92285 additions and 56290 deletions

View File

@@ -1,6 +1,6 @@
import pytest
from fastapi.testclient import TestClient
from unittest.mock import MagicMock
from unittest.mock import MagicMock, AsyncMock
from src.app import app
from src.dependencies import get_config_manager, get_task_manager, get_resource_service, has_permission
@@ -27,10 +27,10 @@ def mock_deps():
task_manager.get_all_tasks.return_value = []
# Mock dashboards
resource_service.get_dashboards_with_status.return_value = [
resource_service.get_dashboards_with_status = AsyncMock(return_value=[
{"id": 1, "title": "Sales", "slug": "sales", "git_status": {"branch": "main", "sync_status": "OK"}, "last_task": None},
{"id": 2, "title": "Marketing", "slug": "mkt", "git_status": None, "last_task": {"task_id": "t1", "status": "SUCCESS"}}
]
])
app.dependency_overrides[get_config_manager] = lambda: config_manager
app.dependency_overrides[get_task_manager] = lambda: task_manager
@@ -39,6 +39,10 @@ def mock_deps():
# Bypass permission check
mock_user = MagicMock()
mock_user.username = "testadmin"
mock_user.roles = []
admin_role = MagicMock()
admin_role.name = "Admin"
mock_user.roles.append(admin_role)
# Override both get_current_user and has_permission
from src.dependencies import get_current_user
@@ -85,9 +89,9 @@ def test_get_dashboards_search(mock_deps):
# @TEST: Negative - Service failure returns 503
def test_get_datasets_success(mock_deps):
mock_deps["resource"].get_datasets_with_status.return_value = [
mock_deps["resource"].get_datasets_with_status = AsyncMock(return_value=[
{"id": 1, "table_name": "orders", "schema": "public", "database": "db1", "mapped_fields": {"total": 10, "mapped": 5}, "last_task": None}
]
])
response = client.get("/api/datasets?env_id=env1")
assert response.status_code == 200
@@ -102,10 +106,10 @@ def test_get_datasets_not_found(mock_deps):
assert response.status_code == 404
def test_get_datasets_search(mock_deps):
mock_deps["resource"].get_datasets_with_status.return_value = [
mock_deps["resource"].get_datasets_with_status = AsyncMock(return_value=[
{"id": 1, "table_name": "orders", "schema": "public", "database": "db1", "mapped_fields": {"total": 10, "mapped": 5}, "last_task": None},
{"id": 2, "table_name": "users", "schema": "public", "database": "db1", "mapped_fields": {"total": 5, "mapped": 5}, "last_task": None}
]
])
response = client.get("/api/datasets?env_id=env1&search=orders")
assert response.status_code == 200
@@ -114,10 +118,39 @@ def test_get_datasets_search(mock_deps):
assert data["datasets"][0]["table_name"] == "orders"
def test_get_datasets_service_failure(mock_deps):
mock_deps["resource"].get_datasets_with_status.side_effect = Exception("Superset down")
mock_deps["resource"].get_datasets_with_status = AsyncMock(side_effect=Exception("Superset down"))
response = client.get("/api/datasets?env_id=env1")
assert response.status_code == 503
assert "Failed to fetch datasets" in response.json()["detail"]
# [/DEF:test_datasets_api:Test]
# [DEF:test_pagination_boundaries:Test]
# @PURPOSE: Verify pagination validation for GET endpoints
# @TEST: page<1 and page_size>100 return 400
def test_get_dashboards_pagination_zero_page(mock_deps):
"""@TEST_EDGE: pagination_zero_page -> {page:0, status:400}"""
response = client.get("/api/dashboards?env_id=env1&page=0")
assert response.status_code == 400
assert "Page must be >= 1" in response.json()["detail"]
def test_get_dashboards_pagination_oversize(mock_deps):
"""@TEST_EDGE: pagination_oversize -> {page_size:101, status:400}"""
response = client.get("/api/dashboards?env_id=env1&page_size=101")
assert response.status_code == 400
assert "Page size must be between 1 and 100" in response.json()["detail"]
def test_get_datasets_pagination_zero_page(mock_deps):
"""@TEST_EDGE: pagination_zero_page on datasets"""
response = client.get("/api/datasets?env_id=env1&page=0")
assert response.status_code == 400
def test_get_datasets_pagination_oversize(mock_deps):
"""@TEST_EDGE: pagination_oversize on datasets"""
response = client.get("/api/datasets?env_id=env1&page_size=101")
assert response.status_code == 400
# [/DEF:test_pagination_boundaries:Test]