Files
ss-tools/backend/tests/test_dashboards_api.py
busya 4c601fbe06 [
{
    "file": "backend/src/api/routes/__tests__/test_dashboards.py",
    "verdict": "APPROVED",
    "rejection_reason": "NONE",
    "audit_details": {
      "target_invoked": true,
      "pre_conditions_tested": true,
      "post_conditions_tested": true,
      "test_data_used": true
    },
    "feedback": "All 9 previous findings remediated. @TEST_FIXTURE data aligned, all @TEST_EDGE scenarios covered, all @PRE negative tests present, all @SIDE_EFFECT assertions added. Full contract compliance."
  },
  {
    "file": "backend/src/api/routes/__tests__/test_datasets.py",
    "verdict": "APPROVED",
    "rejection_reason": "NONE",
    "audit_details": {
      "target_invoked": true,
      "pre_conditions_tested": true,
      "post_conditions_tested": true,
      "test_data_used": true
    },
    "feedback": "All 6 previous findings remediated. Full @PRE boundary coverage including page_size>100, empty IDs, missing env. @SIDE_EFFECT assertions added. 503 error path tested."
  },
  {
    "file": "backend/src/core/auth/__tests__/test_auth.py",
    "verdict": "APPROVED",
    "rejection_reason": "NONE",
    "audit_details": {
      "target_invoked": true,
      "pre_conditions_tested": true,
      "post_conditions_tested": true,
      "test_data_used": true
    },
    "feedback": "All 4 previous findings remediated. @SIDE_EFFECT last_login verified. Inactive user @PRE negative test added. Empty hash edge case covered. provision_adfs_user tested for both new and existing user paths."
  },
  {
    "file": "backend/src/services/__tests__/test_resource_service.py",
    "verdict": "APPROVED",
    "rejection_reason": "NONE",
    "audit_details": {
      "target_invoked": true,
      "pre_conditions_tested": true,
      "post_conditions_tested": true,
      "test_data_used": true
    },
    "feedback": "Both prior recommendations implemented. Full edge case coverage for _get_last_task_for_resource. No anti-patterns detected."
  },
  {
    "file": "backend/tests/test_resource_hubs.py",
    "verdict": "APPROVED",
    "rejection_reason": "NONE",
    "audit_details": {
      "target_invoked": true,
      "pre_conditions_tested": true,
      "post_conditions_tested": true,
      "test_data_used": true
    },
    "feedback": "Pagination boundary tests added. All @TEST_EDGE scenarios now covered. No anti-patterns detected."
  },
  {
    "file": "frontend/src/lib/components/assistant/__tests__/assistant_chat.integration.test.js",
    "verdict": "APPROVED",
    "rejection_reason": "NONE",
    "audit_details": {
      "target_invoked": true,
      "pre_conditions_tested": true,
      "post_conditions_tested": true,
      "test_data_used": true
    },
    "feedback": "No changes since previous audit. Contract scanning remains sound."
  },
  {
    "file": "frontend/src/lib/components/assistant/__tests__/assistant_confirmation.integration.test.js",
    "verdict": "APPROVED",
    "rejection_reason": "NONE",
    "audit_details": {
      "target_invoked": true,
      "pre_conditions_tested": true,
      "post_conditions_tested": true,
      "test_data_used": true
    },
    "feedback": "No changes since previous audit. Confirmation flow testing remains sound."
  }
]
2026-02-27 09:59:57 +03:00

367 lines
15 KiB
Python

# [DEF:backend.tests.test_dashboards_api:Module]
# @TIER: STANDARD
# @PURPOSE: Comprehensive contract-driven tests for Dashboard Hub API
# @LAYER: Domain (Tests)
# @SEMANTICS: tests, dashboards, api, contract, remediation
import pytest
from fastapi.testclient import TestClient
from unittest.mock import MagicMock, patch, AsyncMock
from datetime import datetime, timezone
from src.app import app
from src.api.routes.dashboards import DashboardsResponse, DashboardDetailResponse, DashboardTaskHistoryResponse, DatabaseMappingsResponse
from src.dependencies import get_current_user, has_permission, get_config_manager, get_task_manager, get_resource_service, get_mapping_service
# Global mock user
mock_user = MagicMock()
mock_user.username = "testuser"
mock_user.roles = []
admin_role = MagicMock()
admin_role.name = "Admin"
mock_user.roles.append(admin_role)
@pytest.fixture(autouse=True)
def mock_deps():
config_manager = MagicMock()
task_manager = MagicMock()
resource_service = MagicMock()
mapping_service = MagicMock()
app.dependency_overrides[get_config_manager] = lambda: config_manager
app.dependency_overrides[get_task_manager] = lambda: task_manager
app.dependency_overrides[get_resource_service] = lambda: resource_service
app.dependency_overrides[get_mapping_service] = lambda: mapping_service
app.dependency_overrides[get_current_user] = lambda: mock_user
# Overrides for specific permission checks
app.dependency_overrides[has_permission("plugin:migration", "READ")] = lambda: mock_user
app.dependency_overrides[has_permission("plugin:migration", "EXECUTE")] = lambda: mock_user
app.dependency_overrides[has_permission("plugin:backup", "EXECUTE")] = lambda: mock_user
app.dependency_overrides[has_permission("tasks", "READ")] = lambda: mock_user
app.dependency_overrides[has_permission("dashboards", "READ")] = lambda: mock_user
yield {
"config": config_manager,
"task": task_manager,
"resource": resource_service,
"mapping": mapping_service
}
app.dependency_overrides.clear()
client = TestClient(app)
# --- 1. get_dashboards tests ---
def test_get_dashboards_success(mock_deps):
"""Uses @TEST_FIXTURE: dashboard_list_happy data."""
mock_env = MagicMock()
mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env]
mock_deps["task"].get_all_tasks.return_value = []
# @TEST_FIXTURE: dashboard_list_happy -> {"id": 1, "title": "Main Revenue"}
mock_deps["resource"].get_dashboards_with_status = AsyncMock(return_value=[
{"id": 1, "title": "Main Revenue", "slug": "main-revenue", "git_status": {"branch": "main", "sync_status": "OK"}}
])
response = client.get("/api/dashboards?env_id=prod&page=1&page_size=10")
assert response.status_code == 200
data = response.json()
# exhaustive @POST assertions
assert "dashboards" in data
assert len(data["dashboards"]) == 1 # @TEST_FIXTURE: expected_count: 1
assert data["dashboards"][0]["title"] == "Main Revenue"
assert data["total"] == 1
assert data["page"] == 1
assert data["page_size"] == 10
assert data["total_pages"] == 1
# schema validation
DashboardsResponse(**data)
def test_get_dashboards_with_search(mock_deps):
mock_env = MagicMock()
mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env]
mock_deps["task"].get_all_tasks.return_value = []
mock_deps["resource"].get_dashboards_with_status = AsyncMock(return_value=[
{"id": 1, "title": "Sales Report", "slug": "sales"},
{"id": 2, "title": "Marketing", "slug": "marketing"}
])
response = client.get("/api/dashboards?env_id=prod&search=sales")
assert response.status_code == 200
data = response.json()
assert len(data["dashboards"]) == 1
assert data["dashboards"][0]["title"] == "Sales Report"
def test_get_dashboards_empty(mock_deps):
"""@TEST_EDGE: empty_dashboards -> {env_id: 'empty_env', expected_total: 0}"""
mock_env = MagicMock()
mock_env.id = "empty_env"
mock_deps["config"].get_environments.return_value = [mock_env]
mock_deps["task"].get_all_tasks.return_value = []
mock_deps["resource"].get_dashboards_with_status = AsyncMock(return_value=[])
response = client.get("/api/dashboards?env_id=empty_env")
assert response.status_code == 200
data = response.json()
assert data["total"] == 0
assert len(data["dashboards"]) == 0
assert data["total_pages"] == 1
DashboardsResponse(**data)
def test_get_dashboards_superset_failure(mock_deps):
"""@TEST_EDGE: external_superset_failure -> {env_id: 'bad_conn', status: 503}"""
mock_env = MagicMock()
mock_env.id = "bad_conn"
mock_deps["config"].get_environments.return_value = [mock_env]
mock_deps["task"].get_all_tasks.return_value = []
mock_deps["resource"].get_dashboards_with_status = AsyncMock(
side_effect=Exception("Connection refused")
)
response = client.get("/api/dashboards?env_id=bad_conn")
assert response.status_code == 503
assert "Failed to fetch dashboards" in response.json()["detail"]
def test_get_dashboards_env_not_found(mock_deps):
mock_deps["config"].get_environments.return_value = []
response = client.get("/api/dashboards?env_id=nonexistent")
assert response.status_code == 404
assert "Environment not found" in response.json()["detail"]
def test_get_dashboards_invalid_pagination(mock_deps):
mock_env = MagicMock()
mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env]
# page < 1
assert client.get("/api/dashboards?env_id=prod&page=0").status_code == 400
assert client.get("/api/dashboards?env_id=prod&page=-1").status_code == 400
# page_size < 1
assert client.get("/api/dashboards?env_id=prod&page_size=0").status_code == 400
# page_size > 100
assert client.get("/api/dashboards?env_id=prod&page_size=101").status_code == 400
# --- 2. get_database_mappings tests ---
def test_get_database_mappings_success(mock_deps):
mock_s = MagicMock(); mock_s.id = "s"
mock_t = MagicMock(); mock_t.id = "t"
mock_deps["config"].get_environments.return_value = [mock_s, mock_t]
mock_deps["mapping"].get_suggestions = AsyncMock(return_value=[
{"source_db": "src", "target_db": "dst", "confidence": 0.9}
])
response = client.get("/api/dashboards/db-mappings?source_env_id=s&target_env_id=t")
assert response.status_code == 200
data = response.json()
assert len(data["mappings"]) == 1
assert data["mappings"][0]["confidence"] == 0.9
DatabaseMappingsResponse(**data)
def test_get_database_mappings_env_not_found(mock_deps):
mock_deps["config"].get_environments.return_value = []
response = client.get("/api/dashboards/db-mappings?source_env_id=ghost&target_env_id=t")
assert response.status_code == 404
# --- 3. get_dashboard_detail tests ---
def test_get_dashboard_detail_success(mock_deps):
with patch("src.api.routes.dashboards.SupersetClient") as mock_client_cls:
mock_env = MagicMock()
mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env]
mock_client = MagicMock()
detail_payload = {
"id": 42, "title": "Detail", "charts": [], "datasets": [],
"chart_count": 0, "dataset_count": 0
}
mock_client.get_dashboard_detail.return_value = detail_payload
mock_client_cls.return_value = mock_client
response = client.get("/api/dashboards/42?env_id=prod")
assert response.status_code == 200
data = response.json()
assert data["id"] == 42
DashboardDetailResponse(**data)
def test_get_dashboard_detail_env_not_found(mock_deps):
mock_deps["config"].get_environments.return_value = []
response = client.get("/api/dashboards/42?env_id=missing")
assert response.status_code == 404
# --- 4. get_dashboard_tasks_history tests ---
def test_get_dashboard_tasks_history_success(mock_deps):
now = datetime.now(timezone.utc)
task1 = MagicMock(id="t1", plugin_id="superset-backup", status="SUCCESS", started_at=now, finished_at=None, params={"env": "prod", "dashboards": [42]}, result={})
mock_deps["task"].get_all_tasks.return_value = [task1]
response = client.get("/api/dashboards/42/tasks?env_id=prod")
assert response.status_code == 200
data = response.json()
assert data["dashboard_id"] == 42
assert len(data["items"]) == 1
DashboardTaskHistoryResponse(**data)
def test_get_dashboard_tasks_history_sorting(mock_deps):
"""@POST: Response contains sorted task history (newest first)."""
from datetime import timedelta
now = datetime.now(timezone.utc)
older = now - timedelta(hours=2)
newest = now
task_old = MagicMock(id="t-old", plugin_id="superset-backup", status="SUCCESS",
started_at=older, finished_at=None,
params={"env": "prod", "dashboards": [42]}, result={})
task_new = MagicMock(id="t-new", plugin_id="superset-backup", status="RUNNING",
started_at=newest, finished_at=None,
params={"env": "prod", "dashboards": [42]}, result={})
# Provide in wrong order to verify the endpoint sorts
mock_deps["task"].get_all_tasks.return_value = [task_old, task_new]
response = client.get("/api/dashboards/42/tasks?env_id=prod")
assert response.status_code == 200
data = response.json()
assert len(data["items"]) == 2
# Newest first
assert data["items"][0]["id"] == "t-new"
assert data["items"][1]["id"] == "t-old"
# --- 5. get_dashboard_thumbnail tests ---
def test_get_dashboard_thumbnail_success(mock_deps):
with patch("src.api.routes.dashboards.SupersetClient") as mock_client_cls:
mock_env = MagicMock(); mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env]
mock_client = MagicMock()
mock_response = MagicMock(status_code=200, content=b"img", headers={"Content-Type": "image/png"})
mock_client.network.request.side_effect = lambda method, endpoint, **kw: {"image_url": "url"} if method == "POST" else mock_response
mock_client_cls.return_value = mock_client
response = client.get("/api/dashboards/42/thumbnail?env_id=prod")
assert response.status_code == 200
assert response.content == b"img"
def test_get_dashboard_thumbnail_env_not_found(mock_deps):
mock_deps["config"].get_environments.return_value = []
response = client.get("/api/dashboards/42/thumbnail?env_id=missing")
assert response.status_code == 404
def test_get_dashboard_thumbnail_202(mock_deps):
"""@POST: Returns 202 when thumbnail is being prepared by Superset."""
with patch("src.api.routes.dashboards.SupersetClient") as mock_client_cls:
mock_env = MagicMock(); mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env]
mock_client = MagicMock()
# POST cache_dashboard_screenshot returns image_url
mock_client.network.request.side_effect = [
{"image_url": "/api/v1/dashboard/42/thumbnail/abc123/"}, # POST
MagicMock(status_code=202, json=lambda: {"message": "Thumbnail is being generated"},
headers={"Content-Type": "application/json"}) # GET thumbnail -> 202
]
mock_client_cls.return_value = mock_client
response = client.get("/api/dashboards/42/thumbnail?env_id=prod")
assert response.status_code == 202
assert "Thumbnail is being generated" in response.json()["message"]
# --- 6. migrate_dashboards tests ---
def test_migrate_dashboards_success(mock_deps):
mock_s = MagicMock(); mock_s.id = "s"
mock_t = MagicMock(); mock_t.id = "t"
mock_deps["config"].get_environments.return_value = [mock_s, mock_t]
mock_deps["task"].create_task = AsyncMock(return_value=MagicMock(id="task-123"))
response = client.post("/api/dashboards/migrate", json={
"source_env_id": "s", "target_env_id": "t", "dashboard_ids": [1]
})
assert response.status_code == 200
assert response.json()["task_id"] == "task-123"
def test_migrate_dashboards_pre_checks(mock_deps):
# Missing IDs
response = client.post("/api/dashboards/migrate", json={
"source_env_id": "s", "target_env_id": "t", "dashboard_ids": []
})
assert response.status_code == 400
assert "At least one dashboard ID must be provided" in response.json()["detail"]
def test_migrate_dashboards_env_not_found(mock_deps):
"""@PRE: source_env_id and target_env_id are valid environment IDs."""
mock_deps["config"].get_environments.return_value = []
response = client.post("/api/dashboards/migrate", json={
"source_env_id": "ghost", "target_env_id": "t", "dashboard_ids": [1]
})
assert response.status_code == 404
assert "Source environment not found" in response.json()["detail"]
# --- 7. backup_dashboards tests ---
def test_backup_dashboards_success(mock_deps):
mock_env = MagicMock(); mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env]
mock_deps["task"].create_task = AsyncMock(return_value=MagicMock(id="backup-123"))
response = client.post("/api/dashboards/backup", json={
"env_id": "prod", "dashboard_ids": [1]
})
assert response.status_code == 200
assert response.json()["task_id"] == "backup-123"
def test_backup_dashboards_pre_checks(mock_deps):
response = client.post("/api/dashboards/backup", json={
"env_id": "prod", "dashboard_ids": []
})
assert response.status_code == 400
def test_backup_dashboards_env_not_found(mock_deps):
"""@PRE: env_id is a valid environment ID."""
mock_deps["config"].get_environments.return_value = []
response = client.post("/api/dashboards/backup", json={
"env_id": "ghost", "dashboard_ids": [1]
})
assert response.status_code == 404
assert "Environment not found" in response.json()["detail"]
def test_backup_dashboards_with_schedule(mock_deps):
"""@POST: If schedule is provided, a scheduled task is created."""
mock_env = MagicMock(); mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env]
mock_deps["task"].create_task = AsyncMock(return_value=MagicMock(id="sched-456"))
response = client.post("/api/dashboards/backup", json={
"env_id": "prod", "dashboard_ids": [1], "schedule": "0 0 * * *"
})
assert response.status_code == 200
assert response.json()["task_id"] == "sched-456"
# Verify schedule was propagated to create_task
call_kwargs = mock_deps["task"].create_task.call_args
task_params = call_kwargs.kwargs.get("params") or call_kwargs[1].get("params", {})
assert task_params["schedule"] == "0 0 * * *"
# --- 8. Internal logic: _task_matches_dashboard ---
from src.api.routes.dashboards import _task_matches_dashboard
def test_task_matches_dashboard_logic():
task = MagicMock(plugin_id="superset-backup", params={"dashboards": [42], "env": "prod"})
assert _task_matches_dashboard(task, 42, "prod") is True
assert _task_matches_dashboard(task, 43, "prod") is False
assert _task_matches_dashboard(task, 42, "dev") is False
llm_task = MagicMock(plugin_id="llm_dashboard_validation", params={"dashboard_id": 42, "environment_id": "prod"})
assert _task_matches_dashboard(llm_task, 42, "prod") is True
assert _task_matches_dashboard(llm_task, 42, None) is True
# [/DEF:backend.tests.test_dashboards_api:Module]