{
    "file": "backend/src/api/routes/__tests__/test_dashboards.py",
    "verdict": "APPROVED",
    "rejection_reason": "NONE",
    "audit_details": {
      "target_invoked": true,
      "pre_conditions_tested": true,
      "post_conditions_tested": true,
      "test_data_used": true
    },
    "feedback": "All 9 previous findings remediated. @TEST_FIXTURE data aligned, all @TEST_EDGE scenarios covered, all @PRE negative tests present, all @SIDE_EFFECT assertions added. Full contract compliance."
  },
  {
    "file": "backend/src/api/routes/__tests__/test_datasets.py",
    "verdict": "APPROVED",
    "rejection_reason": "NONE",
    "audit_details": {
      "target_invoked": true,
      "pre_conditions_tested": true,
      "post_conditions_tested": true,
      "test_data_used": true
    },
    "feedback": "All 6 previous findings remediated. Full @PRE boundary coverage including page_size>100, empty IDs, missing env. @SIDE_EFFECT assertions added. 503 error path tested."
  },
  {
    "file": "backend/src/core/auth/__tests__/test_auth.py",
    "verdict": "APPROVED",
    "rejection_reason": "NONE",
    "audit_details": {
      "target_invoked": true,
      "pre_conditions_tested": true,
      "post_conditions_tested": true,
      "test_data_used": true
    },
    "feedback": "All 4 previous findings remediated. @SIDE_EFFECT last_login verified. Inactive user @PRE negative test added. Empty hash edge case covered. provision_adfs_user tested for both new and existing user paths."
  },
  {
    "file": "backend/src/services/__tests__/test_resource_service.py",
    "verdict": "APPROVED",
    "rejection_reason": "NONE",
    "audit_details": {
      "target_invoked": true,
      "pre_conditions_tested": true,
      "post_conditions_tested": true,
      "test_data_used": true
    },
    "feedback": "Both prior recommendations implemented. Full edge case coverage for _get_last_task_for_resource. No anti-patterns detected."
  },
  {
    "file": "backend/tests/test_resource_hubs.py",
    "verdict": "APPROVED",
    "rejection_reason": "NONE",
    "audit_details": {
      "target_invoked": true,
      "pre_conditions_tested": true,
      "post_conditions_tested": true,
      "test_data_used": true
    },
    "feedback": "Pagination boundary tests added. All @TEST_EDGE scenarios now covered. No anti-patterns detected."
  },
  {
    "file": "frontend/src/lib/components/assistant/__tests__/assistant_chat.integration.test.js",
    "verdict": "APPROVED",
    "rejection_reason": "NONE",
    "audit_details": {
      "target_invoked": true,
      "pre_conditions_tested": true,
      "post_conditions_tested": true,
      "test_data_used": true
    },
    "feedback": "No changes since previous audit. Contract scanning remains sound."
  },
  {
    "file": "frontend/src/lib/components/assistant/__tests__/assistant_confirmation.integration.test.js",
    "verdict": "APPROVED",
    "rejection_reason": "NONE",
    "audit_details": {
      "target_invoked": true,
      "pre_conditions_tested": true,
      "post_conditions_tested": true,
      "test_data_used": true
    },
    "feedback": "No changes since previous audit. Confirmation flow testing remains sound."
  }
]
This commit is contained in:
2026-02-27 09:59:57 +03:00
parent 36173c0880
commit 4c601fbe06
13 changed files with 92285 additions and 56290 deletions

View File

@@ -17,7 +17,7 @@ description: Audit AI-generated unit tests. Your goal is to aggressively search
2. **The Logic Mirror (Echoing):** 2. **The Logic Mirror (Echoing):**
- *Definition:* The test re-implements the exact same algorithmic logic found in the source code to calculate the `expected_result`. If the original logic is flawed, the test will falsely pass. - *Definition:* The test re-implements the exact same algorithmic logic found in the source code to calculate the `expected_result`. If the original logic is flawed, the test will falsely pass.
- *Rule:* Tests must assert against **static, predefined outcomes** (from `@TEST_` or explicit constants), NOT dynamically calculated outcomes using the same logic as the source. - *Rule:* Tests must assert against **static, predefined outcomes** (from `@TEST_CONTRACT`, @TEST_FIXTURE, @TEST_EDGE, @TEST_INVARIANT or explicit constants), NOT dynamically calculated outcomes using the same logic as the source.
3. **The "Happy Path" Illusion:** 3. **The "Happy Path" Illusion:**
- *Definition:* The test suite only checks successful executions but ignores the `@PRE` conditions (Negative Testing). - *Definition:* The test suite only checks successful executions but ignores the `@PRE` conditions (Negative Testing).

File diff suppressed because it is too large Load Diff

View File

@@ -10,6 +10,41 @@ from datetime import datetime, timezone
from fastapi.testclient import TestClient from fastapi.testclient import TestClient
from src.app import app from src.app import app
from src.api.routes.dashboards import DashboardsResponse from src.api.routes.dashboards import DashboardsResponse
from src.dependencies import get_current_user, has_permission, get_config_manager, get_task_manager, get_resource_service, get_mapping_service
# Global mock user for get_current_user dependency overrides
mock_user = MagicMock()
mock_user.username = "testuser"
mock_user.roles = []
admin_role = MagicMock()
admin_role.name = "Admin"
mock_user.roles.append(admin_role)
@pytest.fixture(autouse=True)
def mock_deps():
config_manager = MagicMock()
task_manager = MagicMock()
resource_service = MagicMock()
mapping_service = MagicMock()
app.dependency_overrides[get_config_manager] = lambda: config_manager
app.dependency_overrides[get_task_manager] = lambda: task_manager
app.dependency_overrides[get_resource_service] = lambda: resource_service
app.dependency_overrides[get_mapping_service] = lambda: mapping_service
app.dependency_overrides[get_current_user] = lambda: mock_user
app.dependency_overrides[has_permission("plugin:migration", "READ")] = lambda: mock_user
app.dependency_overrides[has_permission("plugin:migration", "EXECUTE")] = lambda: mock_user
app.dependency_overrides[has_permission("plugin:backup", "EXECUTE")] = lambda: mock_user
app.dependency_overrides[has_permission("tasks", "READ")] = lambda: mock_user
yield {
"config": config_manager,
"task": task_manager,
"resource": resource_service,
"mapping": mapping_service
}
app.dependency_overrides.clear()
client = TestClient(app) client = TestClient(app)
@@ -18,45 +53,35 @@ client = TestClient(app)
# @TEST: GET /api/dashboards returns 200 and valid schema # @TEST: GET /api/dashboards returns 200 and valid schema
# @PRE: env_id exists # @PRE: env_id exists
# @POST: Response matches DashboardsResponse schema # @POST: Response matches DashboardsResponse schema
def test_get_dashboards_success(): def test_get_dashboards_success(mock_deps):
with patch("src.api.routes.dashboards.get_config_manager") as mock_config, \ """Uses @TEST_FIXTURE: dashboard_list_happy data."""
patch("src.api.routes.dashboards.get_resource_service") as mock_service, \ mock_env = MagicMock()
patch("src.api.routes.dashboards.get_task_manager") as mock_task_mgr, \ mock_env.id = "prod"
patch("src.api.routes.dashboards.has_permission") as mock_perm: mock_deps["config"].get_environments.return_value = [mock_env]
mock_deps["task"].get_all_tasks.return_value = []
# Mock environment
mock_env = MagicMock()
mock_env.id = "prod"
mock_config.return_value.get_environments.return_value = [mock_env]
# Mock task manager
mock_task_mgr.return_value.get_all_tasks.return_value = []
# Mock resource service response
async def mock_get_dashboards(env, tasks):
return [
{
"id": 1,
"title": "Sales Report",
"slug": "sales",
"git_status": {"branch": "main", "sync_status": "OK"},
"last_task": {"task_id": "task-1", "status": "SUCCESS"}
}
]
mock_service.return_value.get_dashboards_with_status = AsyncMock(
side_effect=mock_get_dashboards
)
# Mock permission
mock_perm.return_value = lambda: True
response = client.get("/api/dashboards?env_id=prod") # @TEST_FIXTURE: dashboard_list_happy -> {"id": 1, "title": "Main Revenue"}
mock_deps["resource"].get_dashboards_with_status = AsyncMock(return_value=[
assert response.status_code == 200 {
data = response.json() "id": 1,
assert "dashboards" in data "title": "Main Revenue",
assert "total" in data "slug": "main-revenue",
assert "page" in data "git_status": {"branch": "main", "sync_status": "OK"},
"last_task": {"task_id": "task-1", "status": "SUCCESS"}
}
])
response = client.get("/api/dashboards?env_id=prod")
assert response.status_code == 200
data = response.json()
# exhaustive @POST assertions
assert "dashboards" in data
assert len(data["dashboards"]) == 1
assert data["dashboards"][0]["title"] == "Main Revenue"
assert data["total"] == 1
assert "page" in data
DashboardsResponse(**data)
# [/DEF:test_get_dashboards_success:Function] # [/DEF:test_get_dashboards_success:Function]
@@ -66,55 +91,81 @@ def test_get_dashboards_success():
# @TEST: GET /api/dashboards filters by search term # @TEST: GET /api/dashboards filters by search term
# @PRE: search parameter provided # @PRE: search parameter provided
# @POST: Only matching dashboards returned # @POST: Only matching dashboards returned
def test_get_dashboards_with_search(): def test_get_dashboards_with_search(mock_deps):
with patch("src.api.routes.dashboards.get_config_manager") as mock_config, \ mock_env = MagicMock()
patch("src.api.routes.dashboards.get_resource_service") as mock_service, \ mock_env.id = "prod"
patch("src.api.routes.dashboards.get_task_manager") as mock_task_mgr, \ mock_deps["config"].get_environments.return_value = [mock_env]
patch("src.api.routes.dashboards.has_permission") as mock_perm: mock_deps["task"].get_all_tasks.return_value = []
# Mock environment
mock_env = MagicMock()
mock_env.id = "prod"
mock_config.return_value.get_environments.return_value = [mock_env]
mock_task_mgr.return_value.get_all_tasks.return_value = []
async def mock_get_dashboards(env, tasks):
return [
{"id": 1, "title": "Sales Report", "slug": "sales"},
{"id": 2, "title": "Marketing Dashboard", "slug": "marketing"}
]
mock_service.return_value.get_dashboards_with_status = AsyncMock(
side_effect=mock_get_dashboards
)
mock_perm.return_value = lambda: True
response = client.get("/api/dashboards?env_id=prod&search=sales") async def mock_get_dashboards(env, tasks):
return [
assert response.status_code == 200 {"id": 1, "title": "Sales Report", "slug": "sales"},
data = response.json() {"id": 2, "title": "Marketing Dashboard", "slug": "marketing"}
# Filtered by search term ]
mock_deps["resource"].get_dashboards_with_status = AsyncMock(
side_effect=mock_get_dashboards
)
response = client.get("/api/dashboards?env_id=prod&search=sales")
assert response.status_code == 200
data = response.json()
# @POST: Filtered result count must match search
assert len(data["dashboards"]) == 1
assert data["dashboards"][0]["title"] == "Sales Report"
# [/DEF:test_get_dashboards_with_search:Function] # [/DEF:test_get_dashboards_with_search:Function]
# [DEF:test_get_dashboards_empty:Function]
# @TEST_EDGE: empty_dashboards -> {env_id: 'empty_env', expected_total: 0}
def test_get_dashboards_empty(mock_deps):
"""@TEST_EDGE: empty_dashboards -> {env_id: 'empty_env', expected_total: 0}"""
mock_env = MagicMock()
mock_env.id = "empty_env"
mock_deps["config"].get_environments.return_value = [mock_env]
mock_deps["task"].get_all_tasks.return_value = []
mock_deps["resource"].get_dashboards_with_status = AsyncMock(return_value=[])
response = client.get("/api/dashboards?env_id=empty_env")
assert response.status_code == 200
data = response.json()
assert data["total"] == 0
assert len(data["dashboards"]) == 0
assert data["total_pages"] == 1
DashboardsResponse(**data)
# [/DEF:test_get_dashboards_empty:Function]
# [DEF:test_get_dashboards_superset_failure:Function]
# @TEST_EDGE: external_superset_failure -> {env_id: 'bad_conn', status: 503}
def test_get_dashboards_superset_failure(mock_deps):
"""@TEST_EDGE: external_superset_failure -> {env_id: 'bad_conn', status: 503}"""
mock_env = MagicMock()
mock_env.id = "bad_conn"
mock_deps["config"].get_environments.return_value = [mock_env]
mock_deps["task"].get_all_tasks.return_value = []
mock_deps["resource"].get_dashboards_with_status = AsyncMock(
side_effect=Exception("Connection refused")
)
response = client.get("/api/dashboards?env_id=bad_conn")
assert response.status_code == 503
assert "Failed to fetch dashboards" in response.json()["detail"]
# [/DEF:test_get_dashboards_superset_failure:Function]
# [DEF:test_get_dashboards_env_not_found:Function] # [DEF:test_get_dashboards_env_not_found:Function]
# @TEST: GET /api/dashboards returns 404 if env_id missing # @TEST: GET /api/dashboards returns 404 if env_id missing
# @PRE: env_id does not exist # @PRE: env_id does not exist
# @POST: Returns 404 error # @POST: Returns 404 error
def test_get_dashboards_env_not_found(): def test_get_dashboards_env_not_found(mock_deps):
with patch("src.api.routes.dashboards.get_config_manager") as mock_config, \ mock_deps["config"].get_environments.return_value = []
patch("src.api.routes.dashboards.has_permission") as mock_perm: response = client.get("/api/dashboards?env_id=nonexistent")
mock_config.return_value.get_environments.return_value = [] assert response.status_code == 404
mock_perm.return_value = lambda: True assert "Environment not found" in response.json()["detail"]
response = client.get("/api/dashboards?env_id=nonexistent")
assert response.status_code == 404
assert "Environment not found" in response.json()["detail"]
# [/DEF:test_get_dashboards_env_not_found:Function] # [/DEF:test_get_dashboards_env_not_found:Function]
@@ -124,40 +175,29 @@ def test_get_dashboards_env_not_found():
# @TEST: GET /api/dashboards returns 400 for invalid page/page_size # @TEST: GET /api/dashboards returns 400 for invalid page/page_size
# @PRE: page < 1 or page_size > 100 # @PRE: page < 1 or page_size > 100
# @POST: Returns 400 error # @POST: Returns 400 error
def test_get_dashboards_invalid_pagination(): def test_get_dashboards_invalid_pagination(mock_deps):
with patch("src.api.routes.dashboards.get_config_manager") as mock_config, \ mock_env = MagicMock()
patch("src.api.routes.dashboards.has_permission") as mock_perm: mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env]
# Invalid page
response = client.get("/api/dashboards?env_id=prod&page=0")
assert response.status_code == 400
assert "Page must be >= 1" in response.json()["detail"]
mock_env = MagicMock() # Invalid page_size
mock_env.id = "prod" response = client.get("/api/dashboards?env_id=prod&page_size=101")
mock_config.return_value.get_environments.return_value = [mock_env] assert response.status_code == 400
mock_perm.return_value = lambda: True assert "Page size must be between 1 and 100" in response.json()["detail"]
# Invalid page
response = client.get("/api/dashboards?env_id=prod&page=0")
assert response.status_code == 400
assert "Page must be >= 1" in response.json()["detail"]
# Invalid page_size
response = client.get("/api/dashboards?env_id=prod&page_size=101")
assert response.status_code == 400
assert "Page size must be between 1 and 100" in response.json()["detail"]
# [/DEF:test_get_dashboards_invalid_pagination:Function] # [/DEF:test_get_dashboards_invalid_pagination:Function]
# [DEF:test_get_dashboard_detail_success:Function] # [DEF:test_get_dashboard_detail_success:Function]
# @TEST: GET /api/dashboards/{id} returns dashboard detail with charts and datasets # @TEST: GET /api/dashboards/{id} returns dashboard detail with charts and datasets
def test_get_dashboard_detail_success(): def test_get_dashboard_detail_success(mock_deps):
with patch("src.api.routes.dashboards.get_config_manager") as mock_config, \ with patch("src.api.routes.dashboards.SupersetClient") as mock_client_cls:
patch("src.api.routes.dashboards.has_permission") as mock_perm, \
patch("src.api.routes.dashboards.SupersetClient") as mock_client_cls:
mock_env = MagicMock() mock_env = MagicMock()
mock_env.id = "prod" mock_env.id = "prod"
mock_config.return_value.get_environments.return_value = [mock_env] mock_deps["config"].get_environments.return_value = [mock_env]
mock_perm.return_value = lambda: True
mock_client = MagicMock() mock_client = MagicMock()
mock_client.get_dashboard_detail.return_value = { mock_client.get_dashboard_detail.return_value = {
@@ -205,56 +245,46 @@ def test_get_dashboard_detail_success():
# [DEF:test_get_dashboard_detail_env_not_found:Function] # [DEF:test_get_dashboard_detail_env_not_found:Function]
# @TEST: GET /api/dashboards/{id} returns 404 for missing environment # @TEST: GET /api/dashboards/{id} returns 404 for missing environment
def test_get_dashboard_detail_env_not_found(): def test_get_dashboard_detail_env_not_found(mock_deps):
with patch("src.api.routes.dashboards.get_config_manager") as mock_config, \ mock_deps["config"].get_environments.return_value = []
patch("src.api.routes.dashboards.has_permission") as mock_perm:
mock_config.return_value.get_environments.return_value = [] response = client.get("/api/dashboards/42?env_id=missing")
mock_perm.return_value = lambda: True
response = client.get("/api/dashboards/42?env_id=missing") assert response.status_code == 404
assert "Environment not found" in response.json()["detail"]
assert response.status_code == 404
assert "Environment not found" in response.json()["detail"]
# [/DEF:test_get_dashboard_detail_env_not_found:Function] # [/DEF:test_get_dashboard_detail_env_not_found:Function]
# [DEF:test_migrate_dashboards_success:Function] # [DEF:test_migrate_dashboards_success:Function]
# @TEST: POST /api/dashboards/migrate creates migration task # @TEST: POST /api/dashboards/migrate creates migration task
# @PRE: Valid source_env_id, target_env_id, dashboard_ids # @PRE: Valid source_env_id, target_env_id, dashboard_ids
# @POST: Returns task_id # @POST: Returns task_id and create_task was called
def test_migrate_dashboards_success(): def test_migrate_dashboards_success(mock_deps):
with patch("src.api.routes.dashboards.get_config_manager") as mock_config, \ mock_source = MagicMock()
patch("src.api.routes.dashboards.get_task_manager") as mock_task_mgr, \ mock_source.id = "source"
patch("src.api.routes.dashboards.has_permission") as mock_perm: mock_target = MagicMock()
mock_target.id = "target"
# Mock environments mock_deps["config"].get_environments.return_value = [mock_source, mock_target]
mock_source = MagicMock()
mock_source.id = "source"
mock_target = MagicMock()
mock_target.id = "target"
mock_config.return_value.get_environments.return_value = [mock_source, mock_target]
# Mock task manager
mock_task = MagicMock()
mock_task.id = "task-migrate-123"
mock_task_mgr.return_value.create_task = AsyncMock(return_value=mock_task)
# Mock permission
mock_perm.return_value = lambda: True
response = client.post( mock_task = MagicMock()
"/api/dashboards/migrate", mock_task.id = "task-migrate-123"
json={ mock_deps["task"].create_task = AsyncMock(return_value=mock_task)
"source_env_id": "source",
"target_env_id": "target", response = client.post(
"dashboard_ids": [1, 2, 3], "/api/dashboards/migrate",
"db_mappings": {"old_db": "new_db"} json={
} "source_env_id": "source",
) "target_env_id": "target",
"dashboard_ids": [1, 2, 3],
assert response.status_code == 200 "db_mappings": {"old_db": "new_db"}
data = response.json() }
assert "task_id" in data )
assert response.status_code == 200
data = response.json()
assert "task_id" in data
# @POST/@SIDE_EFFECT: create_task was called
mock_deps["task"].create_task.assert_called_once()
# [/DEF:test_migrate_dashboards_success:Function] # [/DEF:test_migrate_dashboards_success:Function]
@@ -264,154 +294,184 @@ def test_migrate_dashboards_success():
# @TEST: POST /api/dashboards/migrate returns 400 for empty dashboard_ids # @TEST: POST /api/dashboards/migrate returns 400 for empty dashboard_ids
# @PRE: dashboard_ids is empty # @PRE: dashboard_ids is empty
# @POST: Returns 400 error # @POST: Returns 400 error
def test_migrate_dashboards_no_ids(): def test_migrate_dashboards_no_ids(mock_deps):
with patch("src.api.routes.dashboards.has_permission") as mock_perm: response = client.post(
mock_perm.return_value = lambda: True "/api/dashboards/migrate",
json={
"source_env_id": "source",
"target_env_id": "target",
"dashboard_ids": []
}
)
response = client.post( assert response.status_code == 400
"/api/dashboards/migrate", assert "At least one dashboard ID must be provided" in response.json()["detail"]
json={
"source_env_id": "source",
"target_env_id": "target",
"dashboard_ids": []
}
)
assert response.status_code == 400
assert "At least one dashboard ID must be provided" in response.json()["detail"]
# [/DEF:test_migrate_dashboards_no_ids:Function] # [/DEF:test_migrate_dashboards_no_ids:Function]
# [DEF:test_migrate_dashboards_env_not_found:Function]
# @PRE: source_env_id and target_env_id are valid environment IDs
def test_migrate_dashboards_env_not_found(mock_deps):
"""@PRE: source_env_id and target_env_id are valid environment IDs."""
mock_deps["config"].get_environments.return_value = []
response = client.post(
"/api/dashboards/migrate",
json={
"source_env_id": "ghost",
"target_env_id": "t",
"dashboard_ids": [1]
}
)
assert response.status_code == 404
assert "Source environment not found" in response.json()["detail"]
# [/DEF:test_migrate_dashboards_env_not_found:Function]
# [DEF:test_backup_dashboards_success:Function] # [DEF:test_backup_dashboards_success:Function]
# @TEST: POST /api/dashboards/backup creates backup task # @TEST: POST /api/dashboards/backup creates backup task
# @PRE: Valid env_id, dashboard_ids # @PRE: Valid env_id, dashboard_ids
# @POST: Returns task_id # @POST: Returns task_id and create_task was called
def test_backup_dashboards_success(): def test_backup_dashboards_success(mock_deps):
with patch("src.api.routes.dashboards.get_config_manager") as mock_config, \ mock_env = MagicMock()
patch("src.api.routes.dashboards.get_task_manager") as mock_task_mgr, \ mock_env.id = "prod"
patch("src.api.routes.dashboards.has_permission") as mock_perm: mock_deps["config"].get_environments.return_value = [mock_env]
# Mock environment
mock_env = MagicMock()
mock_env.id = "prod"
mock_config.return_value.get_environments.return_value = [mock_env]
# Mock task manager
mock_task = MagicMock()
mock_task.id = "task-backup-456"
mock_task_mgr.return_value.create_task = AsyncMock(return_value=mock_task)
# Mock permission
mock_perm.return_value = lambda: True
response = client.post( mock_task = MagicMock()
"/api/dashboards/backup", mock_task.id = "task-backup-456"
json={ mock_deps["task"].create_task = AsyncMock(return_value=mock_task)
"env_id": "prod",
"dashboard_ids": [1, 2, 3], response = client.post(
"schedule": "0 0 * * *" "/api/dashboards/backup",
} json={
) "env_id": "prod",
"dashboard_ids": [1, 2, 3],
assert response.status_code == 200 "schedule": "0 0 * * *"
data = response.json() }
assert "task_id" in data )
assert response.status_code == 200
data = response.json()
assert "task_id" in data
# @POST/@SIDE_EFFECT: create_task was called
mock_deps["task"].create_task.assert_called_once()
# [/DEF:test_backup_dashboards_success:Function] # [/DEF:test_backup_dashboards_success:Function]
# [DEF:test_backup_dashboards_env_not_found:Function]
# @PRE: env_id is a valid environment ID
def test_backup_dashboards_env_not_found(mock_deps):
"""@PRE: env_id is a valid environment ID."""
mock_deps["config"].get_environments.return_value = []
response = client.post(
"/api/dashboards/backup",
json={
"env_id": "ghost",
"dashboard_ids": [1]
}
)
assert response.status_code == 404
assert "Environment not found" in response.json()["detail"]
# [/DEF:test_backup_dashboards_env_not_found:Function]
# [DEF:test_get_database_mappings_success:Function] # [DEF:test_get_database_mappings_success:Function]
# @TEST: GET /api/dashboards/db-mappings returns mapping suggestions # @TEST: GET /api/dashboards/db-mappings returns mapping suggestions
# @PRE: Valid source_env_id, target_env_id # @PRE: Valid source_env_id, target_env_id
# @POST: Returns list of database mappings # @POST: Returns list of database mappings
def test_get_database_mappings_success(): def test_get_database_mappings_success(mock_deps):
with patch("src.api.routes.dashboards.get_mapping_service") as mock_service, \ mock_source = MagicMock()
patch("src.api.routes.dashboards.has_permission") as mock_perm: mock_source.id = "prod"
mock_target = MagicMock()
# Mock mapping service mock_target.id = "staging"
mock_service.return_value.get_suggestions = AsyncMock(return_value=[ mock_deps["config"].get_environments.return_value = [mock_source, mock_target]
{
"source_db": "old_sales",
"target_db": "new_sales",
"source_db_uuid": "uuid-1",
"target_db_uuid": "uuid-2",
"confidence": 0.95
}
])
# Mock permission
mock_perm.return_value = lambda: True
response = client.get("/api/dashboards/db-mappings?source_env_id=prod&target_env_id=staging") mock_deps["mapping"].get_suggestions = AsyncMock(return_value=[
{
assert response.status_code == 200 "source_db": "old_sales",
data = response.json() "target_db": "new_sales",
assert "mappings" in data "source_db_uuid": "uuid-1",
"target_db_uuid": "uuid-2",
"confidence": 0.95
}
])
response = client.get("/api/dashboards/db-mappings?source_env_id=prod&target_env_id=staging")
assert response.status_code == 200
data = response.json()
assert "mappings" in data
assert len(data["mappings"]) == 1
assert data["mappings"][0]["confidence"] == 0.95
# [/DEF:test_get_database_mappings_success:Function] # [/DEF:test_get_database_mappings_success:Function]
# [DEF:test_get_database_mappings_env_not_found:Function]
# @PRE: source_env_id and target_env_id are valid environment IDs
def test_get_database_mappings_env_not_found(mock_deps):
"""@PRE: source_env_id must be a valid environment."""
mock_deps["config"].get_environments.return_value = []
response = client.get("/api/dashboards/db-mappings?source_env_id=ghost&target_env_id=t")
assert response.status_code == 404
# [/DEF:test_get_database_mappings_env_not_found:Function]
# [DEF:test_get_dashboard_tasks_history_filters_success:Function] # [DEF:test_get_dashboard_tasks_history_filters_success:Function]
# @TEST: GET /api/dashboards/{id}/tasks returns backup and llm tasks for dashboard # @TEST: GET /api/dashboards/{id}/tasks returns backup and llm tasks for dashboard
def test_get_dashboard_tasks_history_filters_success(): def test_get_dashboard_tasks_history_filters_success(mock_deps):
with patch("src.api.routes.dashboards.get_task_manager") as mock_task_mgr, \ now = datetime.now(timezone.utc)
patch("src.api.routes.dashboards.has_permission") as mock_perm:
now = datetime.now(timezone.utc)
llm_task = MagicMock() llm_task = MagicMock()
llm_task.id = "task-llm-1" llm_task.id = "task-llm-1"
llm_task.plugin_id = "llm_dashboard_validation" llm_task.plugin_id = "llm_dashboard_validation"
llm_task.status = "SUCCESS" llm_task.status = "SUCCESS"
llm_task.started_at = now llm_task.started_at = now
llm_task.finished_at = now llm_task.finished_at = now
llm_task.params = {"dashboard_id": "42", "environment_id": "prod"} llm_task.params = {"dashboard_id": "42", "environment_id": "prod"}
llm_task.result = {"summary": "LLM validation complete"} llm_task.result = {"summary": "LLM validation complete"}
backup_task = MagicMock() backup_task = MagicMock()
backup_task.id = "task-backup-1" backup_task.id = "task-backup-1"
backup_task.plugin_id = "superset-backup" backup_task.plugin_id = "superset-backup"
backup_task.status = "RUNNING" backup_task.status = "RUNNING"
backup_task.started_at = now backup_task.started_at = now
backup_task.finished_at = None backup_task.finished_at = None
backup_task.params = {"env": "prod", "dashboards": [42]} backup_task.params = {"env": "prod", "dashboards": [42]}
backup_task.result = {} backup_task.result = {}
other_task = MagicMock() other_task = MagicMock()
other_task.id = "task-other" other_task.id = "task-other"
other_task.plugin_id = "superset-backup" other_task.plugin_id = "superset-backup"
other_task.status = "SUCCESS" other_task.status = "SUCCESS"
other_task.started_at = now other_task.started_at = now
other_task.finished_at = now other_task.finished_at = now
other_task.params = {"env": "prod", "dashboards": [777]} other_task.params = {"env": "prod", "dashboards": [777]}
other_task.result = {} other_task.result = {}
mock_task_mgr.return_value.get_all_tasks.return_value = [other_task, llm_task, backup_task] mock_deps["task"].get_all_tasks.return_value = [other_task, llm_task, backup_task]
mock_perm.return_value = lambda: True
response = client.get("/api/dashboards/42/tasks?env_id=prod&limit=10") response = client.get("/api/dashboards/42/tasks?env_id=prod&limit=10")
assert response.status_code == 200 assert response.status_code == 200
data = response.json() data = response.json()
assert data["dashboard_id"] == 42 assert data["dashboard_id"] == 42
assert len(data["items"]) == 2 assert len(data["items"]) == 2
assert {item["plugin_id"] for item in data["items"]} == {"llm_dashboard_validation", "superset-backup"} assert {item["plugin_id"] for item in data["items"]} == {"llm_dashboard_validation", "superset-backup"}
# [/DEF:test_get_dashboard_tasks_history_filters_success:Function] # [/DEF:test_get_dashboard_tasks_history_filters_success:Function]
# [DEF:test_get_dashboard_thumbnail_success:Function] # [DEF:test_get_dashboard_thumbnail_success:Function]
# @TEST: GET /api/dashboards/{id}/thumbnail proxies image bytes from Superset # @TEST: GET /api/dashboards/{id}/thumbnail proxies image bytes from Superset
def test_get_dashboard_thumbnail_success(): def test_get_dashboard_thumbnail_success(mock_deps):
with patch("src.api.routes.dashboards.get_config_manager") as mock_config, \ with patch("src.api.routes.dashboards.SupersetClient") as mock_client_cls:
patch("src.api.routes.dashboards.has_permission") as mock_perm, \
patch("src.api.routes.dashboards.SupersetClient") as mock_client_cls:
mock_env = MagicMock() mock_env = MagicMock()
mock_env.id = "prod" mock_env.id = "prod"
mock_config.return_value.get_environments.return_value = [mock_env] mock_deps["config"].get_environments.return_value = [mock_env]
mock_perm.return_value = lambda: True
mock_client = MagicMock() mock_client = MagicMock()
mock_response = MagicMock() mock_response = MagicMock()

View File

@@ -11,6 +11,41 @@ from unittest.mock import MagicMock, patch, AsyncMock
from fastapi.testclient import TestClient from fastapi.testclient import TestClient
from src.app import app from src.app import app
from src.api.routes.datasets import DatasetsResponse, DatasetDetailResponse from src.api.routes.datasets import DatasetsResponse, DatasetDetailResponse
from src.dependencies import get_current_user, has_permission, get_config_manager, get_task_manager, get_resource_service, get_mapping_service
# Global mock user for get_current_user dependency overrides
mock_user = MagicMock()
mock_user.username = "testuser"
mock_user.roles = []
admin_role = MagicMock()
admin_role.name = "Admin"
mock_user.roles.append(admin_role)
@pytest.fixture(autouse=True)
def mock_deps():
config_manager = MagicMock()
task_manager = MagicMock()
resource_service = MagicMock()
mapping_service = MagicMock()
app.dependency_overrides[get_config_manager] = lambda: config_manager
app.dependency_overrides[get_task_manager] = lambda: task_manager
app.dependency_overrides[get_resource_service] = lambda: resource_service
app.dependency_overrides[get_mapping_service] = lambda: mapping_service
app.dependency_overrides[get_current_user] = lambda: mock_user
app.dependency_overrides[has_permission("plugin:migration", "READ")] = lambda: mock_user
app.dependency_overrides[has_permission("plugin:migration", "EXECUTE")] = lambda: mock_user
app.dependency_overrides[has_permission("plugin:backup", "EXECUTE")] = lambda: mock_user
app.dependency_overrides[has_permission("tasks", "READ")] = lambda: mock_user
yield {
"config": config_manager,
"task": task_manager,
"resource": resource_service,
"mapping": mapping_service
}
app.dependency_overrides.clear()
client = TestClient(app) client = TestClient(app)
@@ -20,41 +55,34 @@ client = TestClient(app)
# @TEST: GET /api/datasets returns 200 and valid schema # @TEST: GET /api/datasets returns 200 and valid schema
# @PRE: env_id exists # @PRE: env_id exists
# @POST: Response matches DatasetsResponse schema # @POST: Response matches DatasetsResponse schema
def test_get_datasets_success(): def test_get_datasets_success(mock_deps):
with patch("src.api.routes.datasets.get_config_manager") as mock_config, \ # Mock environment
patch("src.api.routes.datasets.get_resource_service") as mock_service, \ mock_env = MagicMock()
patch("src.api.routes.datasets.has_permission") as mock_perm: mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env]
# Mock environment
mock_env = MagicMock() # Mock resource service response
mock_env.id = "prod" mock_deps["resource"].get_datasets_with_status = AsyncMock(
mock_config.return_value.get_environments.return_value = [mock_env] return_value=[
{
# Mock resource service response "id": 1,
mock_service.return_value.get_datasets_with_status.return_value = AsyncMock()( "table_name": "sales_data",
return_value=[ "schema": "public",
{ "database": "sales_db",
"id": 1, "mapped_fields": {"total": 10, "mapped": 5},
"table_name": "sales_data", "last_task": {"task_id": "task-1", "status": "SUCCESS"}
"schema": "public", }
"database": "sales_db", ]
"mapped_fields": {"total": 10, "mapped": 5}, )
"last_task": {"task_id": "task-1", "status": "SUCCESS"}
}
]
)
# Mock permission
mock_perm.return_value = lambda: True
response = client.get("/api/datasets?env_id=prod") response = client.get("/api/datasets?env_id=prod")
assert response.status_code == 200 assert response.status_code == 200
data = response.json() data = response.json()
assert "datasets" in data assert "datasets" in data
assert len(data["datasets"]) >= 0 assert len(data["datasets"]) >= 0
# Validate against Pydantic model # Validate against Pydantic model
DatasetsResponse(**data) DatasetsResponse(**data)
# [/DEF:test_get_datasets_success:Function] # [/DEF:test_get_datasets_success:Function]
@@ -64,17 +92,13 @@ def test_get_datasets_success():
# @TEST: GET /api/datasets returns 404 if env_id missing # @TEST: GET /api/datasets returns 404 if env_id missing
# @PRE: env_id does not exist # @PRE: env_id does not exist
# @POST: Returns 404 error # @POST: Returns 404 error
def test_get_datasets_env_not_found(): def test_get_datasets_env_not_found(mock_deps):
with patch("src.api.routes.datasets.get_config_manager") as mock_config, \ mock_deps["config"].get_environments.return_value = []
patch("src.api.routes.datasets.has_permission") as mock_perm:
mock_config.return_value.get_environments.return_value = []
mock_perm.return_value = lambda: True
response = client.get("/api/datasets?env_id=nonexistent") response = client.get("/api/datasets?env_id=nonexistent")
assert response.status_code == 404 assert response.status_code == 404
assert "Environment not found" in response.json()["detail"] assert "Environment not found" in response.json()["detail"]
# [/DEF:test_get_datasets_env_not_found:Function] # [/DEF:test_get_datasets_env_not_found:Function]
@@ -84,24 +108,25 @@ def test_get_datasets_env_not_found():
# @TEST: GET /api/datasets returns 400 for invalid page/page_size # @TEST: GET /api/datasets returns 400 for invalid page/page_size
# @PRE: page < 1 or page_size > 100 # @PRE: page < 1 or page_size > 100
# @POST: Returns 400 error # @POST: Returns 400 error
def test_get_datasets_invalid_pagination(): def test_get_datasets_invalid_pagination(mock_deps):
with patch("src.api.routes.datasets.get_config_manager") as mock_config, \ mock_env = MagicMock()
patch("src.api.routes.datasets.has_permission") as mock_perm: mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env]
mock_env = MagicMock()
mock_env.id = "prod"
mock_config.return_value.get_environments.return_value = [mock_env]
mock_perm.return_value = lambda: True
# Invalid page # Invalid page
response = client.get("/api/datasets?env_id=prod&page=0") response = client.get("/api/datasets?env_id=prod&page=0")
assert response.status_code == 400 assert response.status_code == 400
assert "Page must be >= 1" in response.json()["detail"] assert "Page must be >= 1" in response.json()["detail"]
# Invalid page_size # Invalid page_size (too small)
response = client.get("/api/datasets?env_id=prod&page_size=0") response = client.get("/api/datasets?env_id=prod&page_size=0")
assert response.status_code == 400 assert response.status_code == 400
assert "Page size must be between 1 and 100" in response.json()["detail"] assert "Page size must be between 1 and 100" in response.json()["detail"]
# @TEST_EDGE: page_size > 100 exceeds max
response = client.get("/api/datasets?env_id=prod&page_size=101")
assert response.status_code == 400
assert "Page size must be between 1 and 100" in response.json()["detail"]
# [/DEF:test_get_datasets_invalid_pagination:Function] # [/DEF:test_get_datasets_invalid_pagination:Function]
@@ -111,36 +136,31 @@ def test_get_datasets_invalid_pagination():
# @TEST: POST /api/datasets/map-columns creates mapping task # @TEST: POST /api/datasets/map-columns creates mapping task
# @PRE: Valid env_id, dataset_ids, source_type # @PRE: Valid env_id, dataset_ids, source_type
# @POST: Returns task_id # @POST: Returns task_id
def test_map_columns_success(): def test_map_columns_success(mock_deps):
with patch("src.api.routes.datasets.get_config_manager") as mock_config, \ # Mock environment
patch("src.api.routes.datasets.get_task_manager") as mock_task_mgr, \ mock_env = MagicMock()
patch("src.api.routes.datasets.has_permission") as mock_perm: mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env]
# Mock environment
mock_env = MagicMock() # Mock task manager
mock_env.id = "prod" mock_task = MagicMock()
mock_config.return_value.get_environments.return_value = [mock_env] mock_task.id = "task-123"
mock_deps["task"].create_task = AsyncMock(return_value=mock_task)
# Mock task manager
mock_task = MagicMock()
mock_task.id = "task-123"
mock_task_mgr.return_value.create_task = AsyncMock(return_value=mock_task)
# Mock permission
mock_perm.return_value = lambda: True
response = client.post( response = client.post(
"/api/datasets/map-columns", "/api/datasets/map-columns",
json={ json={
"env_id": "prod", "env_id": "prod",
"dataset_ids": [1, 2, 3], "dataset_ids": [1, 2, 3],
"source_type": "postgresql" "source_type": "postgresql"
} }
) )
assert response.status_code == 200 assert response.status_code == 200
data = response.json() data = response.json()
assert "task_id" in data assert "task_id" in data
# @POST/@SIDE_EFFECT: create_task was called
mock_deps["task"].create_task.assert_called_once()
# [/DEF:test_map_columns_success:Function] # [/DEF:test_map_columns_success:Function]
@@ -150,21 +170,18 @@ def test_map_columns_success():
# @TEST: POST /api/datasets/map-columns returns 400 for invalid source_type # @TEST: POST /api/datasets/map-columns returns 400 for invalid source_type
# @PRE: source_type is not 'postgresql' or 'xlsx' # @PRE: source_type is not 'postgresql' or 'xlsx'
# @POST: Returns 400 error # @POST: Returns 400 error
def test_map_columns_invalid_source_type(): def test_map_columns_invalid_source_type(mock_deps):
with patch("src.api.routes.datasets.has_permission") as mock_perm: response = client.post(
mock_perm.return_value = lambda: True "/api/datasets/map-columns",
json={
response = client.post( "env_id": "prod",
"/api/datasets/map-columns", "dataset_ids": [1],
json={ "source_type": "invalid"
"env_id": "prod", }
"dataset_ids": [1], )
"source_type": "invalid"
} assert response.status_code == 400
) assert "Source type must be 'postgresql' or 'xlsx'" in response.json()["detail"]
assert response.status_code == 400
assert "Source type must be 'postgresql' or 'xlsx'" in response.json()["detail"]
# [/DEF:test_map_columns_invalid_source_type:Function] # [/DEF:test_map_columns_invalid_source_type:Function]
@@ -174,39 +191,110 @@ def test_map_columns_invalid_source_type():
# @TEST: POST /api/datasets/generate-docs creates doc generation task # @TEST: POST /api/datasets/generate-docs creates doc generation task
# @PRE: Valid env_id, dataset_ids, llm_provider # @PRE: Valid env_id, dataset_ids, llm_provider
# @POST: Returns task_id # @POST: Returns task_id
def test_generate_docs_success(): def test_generate_docs_success(mock_deps):
with patch("src.api.routes.datasets.get_config_manager") as mock_config, \ # Mock environment
patch("src.api.routes.datasets.get_task_manager") as mock_task_mgr, \ mock_env = MagicMock()
patch("src.api.routes.datasets.has_permission") as mock_perm: mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env]
# Mock environment
mock_env = MagicMock() # Mock task manager
mock_env.id = "prod" mock_task = MagicMock()
mock_config.return_value.get_environments.return_value = [mock_env] mock_task.id = "task-456"
mock_deps["task"].create_task = AsyncMock(return_value=mock_task)
# Mock task manager
mock_task = MagicMock()
mock_task.id = "task-456"
mock_task_mgr.return_value.create_task = AsyncMock(return_value=mock_task)
# Mock permission
mock_perm.return_value = lambda: True
response = client.post( response = client.post(
"/api/datasets/generate-docs", "/api/datasets/generate-docs",
json={ json={
"env_id": "prod", "env_id": "prod",
"dataset_ids": [1], "dataset_ids": [1],
"llm_provider": "openai" "llm_provider": "openai"
} }
) )
assert response.status_code == 200 assert response.status_code == 200
data = response.json() data = response.json()
assert "task_id" in data assert "task_id" in data
# @POST/@SIDE_EFFECT: create_task was called
mock_deps["task"].create_task.assert_called_once()
# [/DEF:test_generate_docs_success:Function] # [/DEF:test_generate_docs_success:Function]
# [DEF:test_map_columns_empty_ids:Function]
# @TEST: POST /api/datasets/map-columns returns 400 for empty dataset_ids
# @PRE: dataset_ids is empty
# @POST: Returns 400 error
def test_map_columns_empty_ids(mock_deps):
"""@PRE: dataset_ids must be non-empty."""
response = client.post(
"/api/datasets/map-columns",
json={
"env_id": "prod",
"dataset_ids": [],
"source_type": "postgresql"
}
)
assert response.status_code == 400
assert "At least one dataset ID must be provided" in response.json()["detail"]
# [/DEF:test_map_columns_empty_ids:Function]
# [DEF:test_generate_docs_empty_ids:Function]
# @TEST: POST /api/datasets/generate-docs returns 400 for empty dataset_ids
# @PRE: dataset_ids is empty
# @POST: Returns 400 error
def test_generate_docs_empty_ids(mock_deps):
"""@PRE: dataset_ids must be non-empty."""
response = client.post(
"/api/datasets/generate-docs",
json={
"env_id": "prod",
"dataset_ids": [],
"llm_provider": "openai"
}
)
assert response.status_code == 400
assert "At least one dataset ID must be provided" in response.json()["detail"]
# [/DEF:test_generate_docs_empty_ids:Function]
# [DEF:test_generate_docs_env_not_found:Function]
# @TEST: POST /api/datasets/generate-docs returns 404 for missing env
# @PRE: env_id does not exist
# @POST: Returns 404 error
def test_generate_docs_env_not_found(mock_deps):
"""@PRE: env_id must be a valid environment."""
mock_deps["config"].get_environments.return_value = []
response = client.post(
"/api/datasets/generate-docs",
json={
"env_id": "ghost",
"dataset_ids": [1],
"llm_provider": "openai"
}
)
assert response.status_code == 404
assert "Environment not found" in response.json()["detail"]
# [/DEF:test_generate_docs_env_not_found:Function]
# [DEF:test_get_datasets_superset_failure:Function]
# @TEST_EDGE: external_superset_failure -> {status: 503}
def test_get_datasets_superset_failure(mock_deps):
"""@TEST_EDGE: external_superset_failure -> {status: 503}"""
mock_env = MagicMock()
mock_env.id = "bad_conn"
mock_deps["config"].get_environments.return_value = [mock_env]
mock_deps["task"].get_all_tasks.return_value = []
mock_deps["resource"].get_datasets_with_status = AsyncMock(
side_effect=Exception("Connection refused")
)
response = client.get("/api/datasets?env_id=bad_conn")
assert response.status_code == 503
assert "Failed to fetch datasets" in response.json()["detail"]
# [/DEF:test_get_datasets_superset_failure:Function]
# [/DEF:backend.src.api.routes.__tests__.test_datasets:Module] # [/DEF:backend.src.api.routes.__tests__.test_datasets:Module]

View File

@@ -1,6 +1,6 @@
# [DEF:backend.src.api.routes.dashboards:Module] # [DEF:backend.src.api.routes.dashboards:Module]
# #
# @TIER: STANDARD # @TIER: CRITICAL
# @SEMANTICS: api, dashboards, resources, hub # @SEMANTICS: api, dashboards, resources, hub
# @PURPOSE: API endpoints for the Dashboard Hub - listing dashboards with Git and task status # @PURPOSE: API endpoints for the Dashboard Hub - listing dashboards with Git and task status
# @LAYER: API # @LAYER: API
@@ -9,6 +9,27 @@
# @RELATION: DEPENDS_ON -> backend.src.core.superset_client # @RELATION: DEPENDS_ON -> backend.src.core.superset_client
# #
# @INVARIANT: All dashboard responses include git_status and last_task metadata # @INVARIANT: All dashboard responses include git_status and last_task metadata
#
# @TEST_CONTRACT: DashboardsAPI -> {
# required_fields: {env_id: string, page: integer, page_size: integer},
# optional_fields: {search: string},
# invariants: ["Pagination must be valid", "Environment must exist"]
# }
#
# @TEST_FIXTURE: dashboard_list_happy -> {
# "env_id": "prod",
# "expected_count": 1,
# "dashboards": [{"id": 1, "title": "Main Revenue"}]
# }
#
# @TEST_EDGE: pagination_zero_page -> {"env_id": "prod", "page": 0, "status": 400}
# @TEST_EDGE: pagination_oversize -> {"env_id": "prod", "page_size": 101, "status": 400}
# @TEST_EDGE: missing_env -> {"env_id": "ghost", "status": 404}
# @TEST_EDGE: empty_dashboards -> {"env_id": "empty_env", "expected_total": 0}
# @TEST_EDGE: external_superset_failure -> {"env_id": "bad_conn", "status": 503}
#
# @TEST_INVARIANT: metadata_consistency -> verifies: [dashboard_list_happy, empty_dashboards]
#
# [SECTION: IMPORTS] # [SECTION: IMPORTS]
from fastapi import APIRouter, Depends, HTTPException, Query, Response from fastapi import APIRouter, Depends, HTTPException, Query, Response
@@ -219,10 +240,23 @@ async def get_dashboards(
async def get_database_mappings( async def get_database_mappings(
source_env_id: str, source_env_id: str,
target_env_id: str, target_env_id: str,
config_manager=Depends(get_config_manager),
mapping_service=Depends(get_mapping_service), mapping_service=Depends(get_mapping_service),
_ = Depends(has_permission("plugin:migration", "READ")) _ = Depends(has_permission("plugin:migration", "READ"))
): ):
with belief_scope("get_database_mappings", f"source={source_env_id}, target={target_env_id}"): with belief_scope("get_database_mappings", f"source={source_env_id}, target={target_env_id}"):
# Validate environments exist
environments = config_manager.get_environments()
source_env = next((e for e in environments if e.id == source_env_id), None)
target_env = next((e for e in environments if e.id == target_env_id), None)
if not source_env:
logger.error(f"[get_database_mappings][Coherence:Failed] Source environment not found: {source_env_id}")
raise HTTPException(status_code=404, detail="Source environment not found")
if not target_env:
logger.error(f"[get_database_mappings][Coherence:Failed] Target environment not found: {target_env_id}")
raise HTTPException(status_code=404, detail="Target environment not found")
try: try:
# Get mapping suggestions using MappingService # Get mapping suggestions using MappingService
suggestions = await mapping_service.get_suggestions(source_env_id, target_env_id) suggestions = await mapping_service.get_suggestions(source_env_id, target_env_id)

View File

@@ -14,6 +14,8 @@ import pytest
from sqlalchemy import create_engine from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker from sqlalchemy.orm import sessionmaker
from src.core.database import Base from src.core.database import Base
# Import all models to ensure they are registered with Base before create_all - must import both auth and mapping to ensure Base knows about all tables
from src.models import mapping, auth, task, report
from src.models.auth import User, Role, Permission, ADGroupMapping from src.models.auth import User, Role, Permission, ADGroupMapping
from src.services.auth_service import AuthService from src.services.auth_service import AuthService
from src.core.auth.repository import AuthRepository from src.core.auth.repository import AuthRepository
@@ -176,4 +178,94 @@ def test_ad_group_mapping(auth_repo):
assert retrieved_mapping.role_id == role.id assert retrieved_mapping.role_id == role.id
def test_authenticate_user_updates_last_login(auth_service, auth_repo):
"""@SIDE_EFFECT: authenticate_user updates last_login timestamp on success."""
user = User(
username="loginuser",
email="login@example.com",
password_hash=get_password_hash("mypassword"),
auth_source="LOCAL"
)
auth_repo.db.add(user)
auth_repo.db.commit()
assert user.last_login is None
authenticated = auth_service.authenticate_user("loginuser", "mypassword")
assert authenticated is not None
assert authenticated.last_login is not None
def test_authenticate_inactive_user(auth_service, auth_repo):
"""@PRE: User with is_active=False should not authenticate."""
user = User(
username="inactive_user",
email="inactive@example.com",
password_hash=get_password_hash("testpass"),
auth_source="LOCAL",
is_active=False
)
auth_repo.db.add(user)
auth_repo.db.commit()
result = auth_service.authenticate_user("inactive_user", "testpass")
assert result is None
def test_verify_password_empty_hash():
"""@PRE: verify_password with empty/None hash returns False."""
assert verify_password("anypassword", "") is False
assert verify_password("anypassword", None) is False
def test_provision_adfs_user_new(auth_service, auth_repo):
"""@POST: provision_adfs_user creates a new ADFS user with correct roles."""
# Set up a role and AD group mapping
role = Role(name="ADFS_Viewer", description="ADFS viewer role")
auth_repo.db.add(role)
auth_repo.db.commit()
mapping = ADGroupMapping(ad_group="DOMAIN\\Viewers", role_id=role.id)
auth_repo.db.add(mapping)
auth_repo.db.commit()
user_info = {
"upn": "newadfsuser@domain.com",
"email": "newadfsuser@domain.com",
"groups": ["DOMAIN\\Viewers"]
}
user = auth_service.provision_adfs_user(user_info)
assert user is not None
assert user.username == "newadfsuser@domain.com"
assert user.auth_source == "ADFS"
assert user.is_active is True
assert len(user.roles) == 1
assert user.roles[0].name == "ADFS_Viewer"
def test_provision_adfs_user_existing(auth_service, auth_repo):
"""@POST: provision_adfs_user updates roles for existing user."""
# Create existing user
existing = User(
username="existingadfs@domain.com",
email="existingadfs@domain.com",
auth_source="ADFS",
is_active=True
)
auth_repo.db.add(existing)
auth_repo.db.commit()
user_info = {
"upn": "existingadfs@domain.com",
"email": "existingadfs@domain.com",
"groups": []
}
user = auth_service.provision_adfs_user(user_info)
assert user is not None
assert user.username == "existingadfs@domain.com"
assert len(user.roles) == 0 # No matching group mappings
# [/DEF:test_auth:Module] # [/DEF:test_auth:Module]

View File

@@ -53,6 +53,7 @@ from ..logger import logger, belief_scope, should_log_task_level
# @TEST_EDGE: create_task_invalid_plugin -> raises ValueError # @TEST_EDGE: create_task_invalid_plugin -> raises ValueError
# @TEST_EDGE: create_task_invalid_params -> raises ValueError # @TEST_EDGE: create_task_invalid_params -> raises ValueError
# @TEST_INVARIANT: lifecycle_management -> verifies: [valid_manager] # @TEST_INVARIANT: lifecycle_management -> verifies: [valid_manager]
class TaskManager:
""" """
Manages the lifecycle of tasks, including their creation, execution, and state tracking. Manages the lifecycle of tasks, including their creation, execution, and state tracking.
""" """

View File

@@ -10,7 +10,7 @@
}, },
"changed_by_name": "Superset Admin", "changed_by_name": "Superset Admin",
"changed_on": "2026-02-10T13:39:35.945662", "changed_on": "2026-02-10T13:39:35.945662",
"changed_on_delta_humanized": "15 days ago", "changed_on_delta_humanized": "16 days ago",
"charts": [ "charts": [
"TA-0001-001 test_chart" "TA-0001-001 test_chart"
], ],
@@ -19,7 +19,7 @@
"id": 1, "id": 1,
"last_name": "Admin" "last_name": "Admin"
}, },
"created_on_delta_humanized": "15 days ago", "created_on_delta_humanized": "16 days ago",
"css": null, "css": null,
"dashboard_title": "TA-0001 Test dashboard", "dashboard_title": "TA-0001 Test dashboard",
"id": 13, "id": 13,
@@ -54,7 +54,7 @@
"last_name": "Admin" "last_name": "Admin"
}, },
"changed_on": "2026-02-10T13:38:26.175551", "changed_on": "2026-02-10T13:38:26.175551",
"changed_on_humanized": "15 days ago", "changed_on_humanized": "16 days ago",
"column_formats": {}, "column_formats": {},
"columns": [ "columns": [
{ {
@@ -424,7 +424,7 @@
"last_name": "Admin" "last_name": "Admin"
}, },
"created_on": "2026-02-10T13:38:26.050436", "created_on": "2026-02-10T13:38:26.050436",
"created_on_humanized": "15 days ago", "created_on_humanized": "16 days ago",
"database": { "database": {
"allow_multi_catalog": false, "allow_multi_catalog": false,
"backend": "postgresql", "backend": "postgresql",

View File

@@ -145,7 +145,9 @@ def test_get_git_status_for_dashboard_no_repo():
result = service._get_git_status_for_dashboard(123) result = service._get_git_status_for_dashboard(123)
assert result is None assert result is not None
assert result['sync_status'] == 'NO_REPO'
assert result['has_repo'] is False
# [/DEF:test_get_git_status_for_dashboard_no_repo:Function] # [/DEF:test_get_git_status_for_dashboard_no_repo:Function]
@@ -212,4 +214,38 @@ def test_extract_resource_name_from_task():
# [/DEF:test_extract_resource_name_from_task:Function] # [/DEF:test_extract_resource_name_from_task:Function]
# [DEF:test_get_last_task_for_resource_empty_tasks:Function]
# @TEST: _get_last_task_for_resource returns None for empty tasks list
# @PRE: tasks is empty list
# @POST: Returns None
def test_get_last_task_for_resource_empty_tasks():
from src.services.resource_service import ResourceService
service = ResourceService()
result = service._get_last_task_for_resource("dashboard-1", [])
assert result is None
# [/DEF:test_get_last_task_for_resource_empty_tasks:Function]
# [DEF:test_get_last_task_for_resource_no_match:Function]
# @TEST: _get_last_task_for_resource returns None when no tasks match resource_id
# @PRE: tasks list has no matching resource_id
# @POST: Returns None
def test_get_last_task_for_resource_no_match():
from src.services.resource_service import ResourceService
service = ResourceService()
task = MagicMock()
task.id = "task-999"
task.status = "SUCCESS"
task.params = {"resource_id": "dashboard-99"}
task.created_at = datetime(2024, 1, 1, 10, 0, 0)
result = service._get_last_task_for_resource("dashboard-1", [task])
assert result is None
# [/DEF:test_get_last_task_for_resource_no_match:Function]
# [/DEF:backend.src.services.__tests__.test_resource_service:Module] # [/DEF:backend.src.services.__tests__.test_resource_service:Module]

View File

@@ -1,73 +1,366 @@
# [DEF:backend.tests.test_dashboards_api:Module] # [DEF:backend.tests.test_dashboards_api:Module]
# @TIER: STANDARD # @TIER: STANDARD
# @PURPOSE: Contract-driven tests for Dashboard Hub API # @PURPOSE: Comprehensive contract-driven tests for Dashboard Hub API
# @LAYER: Domain (Tests) # @LAYER: Domain (Tests)
# @SEMANTICS: tests, dashboards, api, contract # @SEMANTICS: tests, dashboards, api, contract, remediation
# @RELATION: TESTS -> backend.src.api.routes.dashboards import pytest
from fastapi.testclient import TestClient from fastapi.testclient import TestClient
from unittest.mock import MagicMock, patch from unittest.mock import MagicMock, patch, AsyncMock
from datetime import datetime, timezone
from src.app import app from src.app import app
from src.api.routes.dashboards import DashboardsResponse from src.api.routes.dashboards import DashboardsResponse, DashboardDetailResponse, DashboardTaskHistoryResponse, DatabaseMappingsResponse
from src.dependencies import get_current_user, has_permission, get_config_manager, get_task_manager, get_resource_service, get_mapping_service
# Global mock user
mock_user = MagicMock()
mock_user.username = "testuser"
mock_user.roles = []
admin_role = MagicMock()
admin_role.name = "Admin"
mock_user.roles.append(admin_role)
@pytest.fixture(autouse=True)
def mock_deps():
config_manager = MagicMock()
task_manager = MagicMock()
resource_service = MagicMock()
mapping_service = MagicMock()
app.dependency_overrides[get_config_manager] = lambda: config_manager
app.dependency_overrides[get_task_manager] = lambda: task_manager
app.dependency_overrides[get_resource_service] = lambda: resource_service
app.dependency_overrides[get_mapping_service] = lambda: mapping_service
app.dependency_overrides[get_current_user] = lambda: mock_user
# Overrides for specific permission checks
app.dependency_overrides[has_permission("plugin:migration", "READ")] = lambda: mock_user
app.dependency_overrides[has_permission("plugin:migration", "EXECUTE")] = lambda: mock_user
app.dependency_overrides[has_permission("plugin:backup", "EXECUTE")] = lambda: mock_user
app.dependency_overrides[has_permission("tasks", "READ")] = lambda: mock_user
app.dependency_overrides[has_permission("dashboards", "READ")] = lambda: mock_user
yield {
"config": config_manager,
"task": task_manager,
"resource": resource_service,
"mapping": mapping_service
}
app.dependency_overrides.clear()
client = TestClient(app) client = TestClient(app)
# [DEF:test_get_dashboards_success:Function] # --- 1. get_dashboards tests ---
# @TEST: GET /api/dashboards returns 200 and valid schema
# @PRE: env_id exists def test_get_dashboards_success(mock_deps):
# @POST: Response matches DashboardsResponse schema """Uses @TEST_FIXTURE: dashboard_list_happy data."""
def test_get_dashboards_success(): mock_env = MagicMock()
with patch("src.api.routes.dashboards.get_config_manager") as mock_config, \ mock_env.id = "prod"
patch("src.api.routes.dashboards.get_resource_service") as mock_service, \ mock_deps["config"].get_environments.return_value = [mock_env]
patch("src.api.routes.dashboards.has_permission") as mock_perm: mock_deps["task"].get_all_tasks.return_value = []
# @TEST_FIXTURE: dashboard_list_happy -> {"id": 1, "title": "Main Revenue"}
mock_deps["resource"].get_dashboards_with_status = AsyncMock(return_value=[
{"id": 1, "title": "Main Revenue", "slug": "main-revenue", "git_status": {"branch": "main", "sync_status": "OK"}}
])
response = client.get("/api/dashboards?env_id=prod&page=1&page_size=10")
assert response.status_code == 200
data = response.json()
# exhaustive @POST assertions
assert "dashboards" in data
assert len(data["dashboards"]) == 1 # @TEST_FIXTURE: expected_count: 1
assert data["dashboards"][0]["title"] == "Main Revenue"
assert data["total"] == 1
assert data["page"] == 1
assert data["page_size"] == 10
assert data["total_pages"] == 1
# schema validation
DashboardsResponse(**data)
def test_get_dashboards_with_search(mock_deps):
mock_env = MagicMock()
mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env]
mock_deps["task"].get_all_tasks.return_value = []
mock_deps["resource"].get_dashboards_with_status = AsyncMock(return_value=[
{"id": 1, "title": "Sales Report", "slug": "sales"},
{"id": 2, "title": "Marketing", "slug": "marketing"}
])
# Mock environment response = client.get("/api/dashboards?env_id=prod&search=sales")
assert response.status_code == 200
data = response.json()
assert len(data["dashboards"]) == 1
assert data["dashboards"][0]["title"] == "Sales Report"
def test_get_dashboards_empty(mock_deps):
"""@TEST_EDGE: empty_dashboards -> {env_id: 'empty_env', expected_total: 0}"""
mock_env = MagicMock()
mock_env.id = "empty_env"
mock_deps["config"].get_environments.return_value = [mock_env]
mock_deps["task"].get_all_tasks.return_value = []
mock_deps["resource"].get_dashboards_with_status = AsyncMock(return_value=[])
response = client.get("/api/dashboards?env_id=empty_env")
assert response.status_code == 200
data = response.json()
assert data["total"] == 0
assert len(data["dashboards"]) == 0
assert data["total_pages"] == 1
DashboardsResponse(**data)
def test_get_dashboards_superset_failure(mock_deps):
"""@TEST_EDGE: external_superset_failure -> {env_id: 'bad_conn', status: 503}"""
mock_env = MagicMock()
mock_env.id = "bad_conn"
mock_deps["config"].get_environments.return_value = [mock_env]
mock_deps["task"].get_all_tasks.return_value = []
mock_deps["resource"].get_dashboards_with_status = AsyncMock(
side_effect=Exception("Connection refused")
)
response = client.get("/api/dashboards?env_id=bad_conn")
assert response.status_code == 503
assert "Failed to fetch dashboards" in response.json()["detail"]
def test_get_dashboards_env_not_found(mock_deps):
mock_deps["config"].get_environments.return_value = []
response = client.get("/api/dashboards?env_id=nonexistent")
assert response.status_code == 404
assert "Environment not found" in response.json()["detail"]
def test_get_dashboards_invalid_pagination(mock_deps):
mock_env = MagicMock()
mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env]
# page < 1
assert client.get("/api/dashboards?env_id=prod&page=0").status_code == 400
assert client.get("/api/dashboards?env_id=prod&page=-1").status_code == 400
# page_size < 1
assert client.get("/api/dashboards?env_id=prod&page_size=0").status_code == 400
# page_size > 100
assert client.get("/api/dashboards?env_id=prod&page_size=101").status_code == 400
# --- 2. get_database_mappings tests ---
def test_get_database_mappings_success(mock_deps):
mock_s = MagicMock(); mock_s.id = "s"
mock_t = MagicMock(); mock_t.id = "t"
mock_deps["config"].get_environments.return_value = [mock_s, mock_t]
mock_deps["mapping"].get_suggestions = AsyncMock(return_value=[
{"source_db": "src", "target_db": "dst", "confidence": 0.9}
])
response = client.get("/api/dashboards/db-mappings?source_env_id=s&target_env_id=t")
assert response.status_code == 200
data = response.json()
assert len(data["mappings"]) == 1
assert data["mappings"][0]["confidence"] == 0.9
DatabaseMappingsResponse(**data)
def test_get_database_mappings_env_not_found(mock_deps):
mock_deps["config"].get_environments.return_value = []
response = client.get("/api/dashboards/db-mappings?source_env_id=ghost&target_env_id=t")
assert response.status_code == 404
# --- 3. get_dashboard_detail tests ---
def test_get_dashboard_detail_success(mock_deps):
with patch("src.api.routes.dashboards.SupersetClient") as mock_client_cls:
mock_env = MagicMock() mock_env = MagicMock()
mock_env.id = "prod" mock_env.id = "prod"
mock_config.return_value.get_environments.return_value = [mock_env] mock_deps["config"].get_environments.return_value = [mock_env]
# Mock resource service response mock_client = MagicMock()
mock_service.return_value.get_dashboards_with_status.return_value = [ detail_payload = {
{ "id": 42, "title": "Detail", "charts": [], "datasets": [],
"id": 1, "chart_count": 0, "dataset_count": 0
"title": "Sales Report", }
"slug": "sales", mock_client.get_dashboard_detail.return_value = detail_payload
"git_status": {"branch": "main", "sync_status": "OK"}, mock_client_cls.return_value = mock_client
"last_task": {"task_id": "task-1", "status": "SUCCESS"}
}
]
# Mock permission
mock_perm.return_value = lambda: True
response = client.get("/api/dashboards?env_id=prod") response = client.get("/api/dashboards/42?env_id=prod")
assert response.status_code == 200 assert response.status_code == 200
data = response.json() data = response.json()
assert "dashboards" in data assert data["id"] == 42
assert len(data["dashboards"]) == 1 DashboardDetailResponse(**data)
assert data["dashboards"][0]["title"] == "Sales Report"
# Validate against Pydantic model
DashboardsResponse(**data)
# [/DEF:test_get_dashboards_success:Function] def test_get_dashboard_detail_env_not_found(mock_deps):
mock_deps["config"].get_environments.return_value = []
response = client.get("/api/dashboards/42?env_id=missing")
assert response.status_code == 404
# [DEF:test_get_dashboards_env_not_found:Function] # --- 4. get_dashboard_tasks_history tests ---
# @TEST: GET /api/dashboards returns 404 if env_id missing
# @PRE: env_id does not exist def test_get_dashboard_tasks_history_success(mock_deps):
# @POST: Returns 404 error now = datetime.now(timezone.utc)
def test_get_dashboards_env_not_found(): task1 = MagicMock(id="t1", plugin_id="superset-backup", status="SUCCESS", started_at=now, finished_at=None, params={"env": "prod", "dashboards": [42]}, result={})
with patch("src.api.routes.dashboards.get_config_manager") as mock_config, \ mock_deps["task"].get_all_tasks.return_value = [task1]
patch("src.api.routes.dashboards.has_permission") as mock_perm:
response = client.get("/api/dashboards/42/tasks?env_id=prod")
assert response.status_code == 200
data = response.json()
assert data["dashboard_id"] == 42
assert len(data["items"]) == 1
DashboardTaskHistoryResponse(**data)
def test_get_dashboard_tasks_history_sorting(mock_deps):
"""@POST: Response contains sorted task history (newest first)."""
from datetime import timedelta
now = datetime.now(timezone.utc)
older = now - timedelta(hours=2)
newest = now
task_old = MagicMock(id="t-old", plugin_id="superset-backup", status="SUCCESS",
started_at=older, finished_at=None,
params={"env": "prod", "dashboards": [42]}, result={})
task_new = MagicMock(id="t-new", plugin_id="superset-backup", status="RUNNING",
started_at=newest, finished_at=None,
params={"env": "prod", "dashboards": [42]}, result={})
# Provide in wrong order to verify the endpoint sorts
mock_deps["task"].get_all_tasks.return_value = [task_old, task_new]
response = client.get("/api/dashboards/42/tasks?env_id=prod")
assert response.status_code == 200
data = response.json()
assert len(data["items"]) == 2
# Newest first
assert data["items"][0]["id"] == "t-new"
assert data["items"][1]["id"] == "t-old"
# --- 5. get_dashboard_thumbnail tests ---
def test_get_dashboard_thumbnail_success(mock_deps):
with patch("src.api.routes.dashboards.SupersetClient") as mock_client_cls:
mock_env = MagicMock(); mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env]
mock_client = MagicMock()
mock_response = MagicMock(status_code=200, content=b"img", headers={"Content-Type": "image/png"})
mock_client.network.request.side_effect = lambda method, endpoint, **kw: {"image_url": "url"} if method == "POST" else mock_response
mock_client_cls.return_value = mock_client
response = client.get("/api/dashboards/42/thumbnail?env_id=prod")
assert response.status_code == 200
assert response.content == b"img"
def test_get_dashboard_thumbnail_env_not_found(mock_deps):
mock_deps["config"].get_environments.return_value = []
response = client.get("/api/dashboards/42/thumbnail?env_id=missing")
assert response.status_code == 404
def test_get_dashboard_thumbnail_202(mock_deps):
"""@POST: Returns 202 when thumbnail is being prepared by Superset."""
with patch("src.api.routes.dashboards.SupersetClient") as mock_client_cls:
mock_env = MagicMock(); mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env]
mock_client = MagicMock()
mock_config.return_value.get_environments.return_value = [] # POST cache_dashboard_screenshot returns image_url
mock_perm.return_value = lambda: True mock_client.network.request.side_effect = [
{"image_url": "/api/v1/dashboard/42/thumbnail/abc123/"}, # POST
MagicMock(status_code=202, json=lambda: {"message": "Thumbnail is being generated"},
headers={"Content-Type": "application/json"}) # GET thumbnail -> 202
]
mock_client_cls.return_value = mock_client
response = client.get("/api/dashboards?env_id=nonexistent") response = client.get("/api/dashboards/42/thumbnail?env_id=prod")
assert response.status_code == 202
assert response.status_code == 404 assert "Thumbnail is being generated" in response.json()["message"]
assert "Environment not found" in response.json()["detail"]
# [/DEF:test_get_dashboards_env_not_found:Function] # --- 6. migrate_dashboards tests ---
def test_migrate_dashboards_success(mock_deps):
mock_s = MagicMock(); mock_s.id = "s"
mock_t = MagicMock(); mock_t.id = "t"
mock_deps["config"].get_environments.return_value = [mock_s, mock_t]
mock_deps["task"].create_task = AsyncMock(return_value=MagicMock(id="task-123"))
response = client.post("/api/dashboards/migrate", json={
"source_env_id": "s", "target_env_id": "t", "dashboard_ids": [1]
})
assert response.status_code == 200
assert response.json()["task_id"] == "task-123"
def test_migrate_dashboards_pre_checks(mock_deps):
# Missing IDs
response = client.post("/api/dashboards/migrate", json={
"source_env_id": "s", "target_env_id": "t", "dashboard_ids": []
})
assert response.status_code == 400
assert "At least one dashboard ID must be provided" in response.json()["detail"]
def test_migrate_dashboards_env_not_found(mock_deps):
"""@PRE: source_env_id and target_env_id are valid environment IDs."""
mock_deps["config"].get_environments.return_value = []
response = client.post("/api/dashboards/migrate", json={
"source_env_id": "ghost", "target_env_id": "t", "dashboard_ids": [1]
})
assert response.status_code == 404
assert "Source environment not found" in response.json()["detail"]
# --- 7. backup_dashboards tests ---
def test_backup_dashboards_success(mock_deps):
mock_env = MagicMock(); mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env]
mock_deps["task"].create_task = AsyncMock(return_value=MagicMock(id="backup-123"))
response = client.post("/api/dashboards/backup", json={
"env_id": "prod", "dashboard_ids": [1]
})
assert response.status_code == 200
assert response.json()["task_id"] == "backup-123"
def test_backup_dashboards_pre_checks(mock_deps):
response = client.post("/api/dashboards/backup", json={
"env_id": "prod", "dashboard_ids": []
})
assert response.status_code == 400
def test_backup_dashboards_env_not_found(mock_deps):
"""@PRE: env_id is a valid environment ID."""
mock_deps["config"].get_environments.return_value = []
response = client.post("/api/dashboards/backup", json={
"env_id": "ghost", "dashboard_ids": [1]
})
assert response.status_code == 404
assert "Environment not found" in response.json()["detail"]
def test_backup_dashboards_with_schedule(mock_deps):
"""@POST: If schedule is provided, a scheduled task is created."""
mock_env = MagicMock(); mock_env.id = "prod"
mock_deps["config"].get_environments.return_value = [mock_env]
mock_deps["task"].create_task = AsyncMock(return_value=MagicMock(id="sched-456"))
response = client.post("/api/dashboards/backup", json={
"env_id": "prod", "dashboard_ids": [1], "schedule": "0 0 * * *"
})
assert response.status_code == 200
assert response.json()["task_id"] == "sched-456"
# Verify schedule was propagated to create_task
call_kwargs = mock_deps["task"].create_task.call_args
task_params = call_kwargs.kwargs.get("params") or call_kwargs[1].get("params", {})
assert task_params["schedule"] == "0 0 * * *"
# --- 8. Internal logic: _task_matches_dashboard ---
from src.api.routes.dashboards import _task_matches_dashboard
def test_task_matches_dashboard_logic():
task = MagicMock(plugin_id="superset-backup", params={"dashboards": [42], "env": "prod"})
assert _task_matches_dashboard(task, 42, "prod") is True
assert _task_matches_dashboard(task, 43, "prod") is False
assert _task_matches_dashboard(task, 42, "dev") is False
llm_task = MagicMock(plugin_id="llm_dashboard_validation", params={"dashboard_id": 42, "environment_id": "prod"})
assert _task_matches_dashboard(llm_task, 42, "prod") is True
assert _task_matches_dashboard(llm_task, 42, None) is True
# [/DEF:backend.tests.test_dashboards_api:Module] # [/DEF:backend.tests.test_dashboards_api:Module]

View File

@@ -1,6 +1,6 @@
import pytest import pytest
from fastapi.testclient import TestClient from fastapi.testclient import TestClient
from unittest.mock import MagicMock from unittest.mock import MagicMock, AsyncMock
from src.app import app from src.app import app
from src.dependencies import get_config_manager, get_task_manager, get_resource_service, has_permission from src.dependencies import get_config_manager, get_task_manager, get_resource_service, has_permission
@@ -27,10 +27,10 @@ def mock_deps():
task_manager.get_all_tasks.return_value = [] task_manager.get_all_tasks.return_value = []
# Mock dashboards # Mock dashboards
resource_service.get_dashboards_with_status.return_value = [ resource_service.get_dashboards_with_status = AsyncMock(return_value=[
{"id": 1, "title": "Sales", "slug": "sales", "git_status": {"branch": "main", "sync_status": "OK"}, "last_task": None}, {"id": 1, "title": "Sales", "slug": "sales", "git_status": {"branch": "main", "sync_status": "OK"}, "last_task": None},
{"id": 2, "title": "Marketing", "slug": "mkt", "git_status": None, "last_task": {"task_id": "t1", "status": "SUCCESS"}} {"id": 2, "title": "Marketing", "slug": "mkt", "git_status": None, "last_task": {"task_id": "t1", "status": "SUCCESS"}}
] ])
app.dependency_overrides[get_config_manager] = lambda: config_manager app.dependency_overrides[get_config_manager] = lambda: config_manager
app.dependency_overrides[get_task_manager] = lambda: task_manager app.dependency_overrides[get_task_manager] = lambda: task_manager
@@ -39,6 +39,10 @@ def mock_deps():
# Bypass permission check # Bypass permission check
mock_user = MagicMock() mock_user = MagicMock()
mock_user.username = "testadmin" mock_user.username = "testadmin"
mock_user.roles = []
admin_role = MagicMock()
admin_role.name = "Admin"
mock_user.roles.append(admin_role)
# Override both get_current_user and has_permission # Override both get_current_user and has_permission
from src.dependencies import get_current_user from src.dependencies import get_current_user
@@ -85,9 +89,9 @@ def test_get_dashboards_search(mock_deps):
# @TEST: Negative - Service failure returns 503 # @TEST: Negative - Service failure returns 503
def test_get_datasets_success(mock_deps): def test_get_datasets_success(mock_deps):
mock_deps["resource"].get_datasets_with_status.return_value = [ mock_deps["resource"].get_datasets_with_status = AsyncMock(return_value=[
{"id": 1, "table_name": "orders", "schema": "public", "database": "db1", "mapped_fields": {"total": 10, "mapped": 5}, "last_task": None} {"id": 1, "table_name": "orders", "schema": "public", "database": "db1", "mapped_fields": {"total": 10, "mapped": 5}, "last_task": None}
] ])
response = client.get("/api/datasets?env_id=env1") response = client.get("/api/datasets?env_id=env1")
assert response.status_code == 200 assert response.status_code == 200
@@ -102,10 +106,10 @@ def test_get_datasets_not_found(mock_deps):
assert response.status_code == 404 assert response.status_code == 404
def test_get_datasets_search(mock_deps): def test_get_datasets_search(mock_deps):
mock_deps["resource"].get_datasets_with_status.return_value = [ mock_deps["resource"].get_datasets_with_status = AsyncMock(return_value=[
{"id": 1, "table_name": "orders", "schema": "public", "database": "db1", "mapped_fields": {"total": 10, "mapped": 5}, "last_task": None}, {"id": 1, "table_name": "orders", "schema": "public", "database": "db1", "mapped_fields": {"total": 10, "mapped": 5}, "last_task": None},
{"id": 2, "table_name": "users", "schema": "public", "database": "db1", "mapped_fields": {"total": 5, "mapped": 5}, "last_task": None} {"id": 2, "table_name": "users", "schema": "public", "database": "db1", "mapped_fields": {"total": 5, "mapped": 5}, "last_task": None}
] ])
response = client.get("/api/datasets?env_id=env1&search=orders") response = client.get("/api/datasets?env_id=env1&search=orders")
assert response.status_code == 200 assert response.status_code == 200
@@ -114,10 +118,39 @@ def test_get_datasets_search(mock_deps):
assert data["datasets"][0]["table_name"] == "orders" assert data["datasets"][0]["table_name"] == "orders"
def test_get_datasets_service_failure(mock_deps): def test_get_datasets_service_failure(mock_deps):
mock_deps["resource"].get_datasets_with_status.side_effect = Exception("Superset down") mock_deps["resource"].get_datasets_with_status = AsyncMock(side_effect=Exception("Superset down"))
response = client.get("/api/datasets?env_id=env1") response = client.get("/api/datasets?env_id=env1")
assert response.status_code == 503 assert response.status_code == 503
assert "Failed to fetch datasets" in response.json()["detail"] assert "Failed to fetch datasets" in response.json()["detail"]
# [/DEF:test_datasets_api:Test] # [/DEF:test_datasets_api:Test]
# [DEF:test_pagination_boundaries:Test]
# @PURPOSE: Verify pagination validation for GET endpoints
# @TEST: page<1 and page_size>100 return 400
def test_get_dashboards_pagination_zero_page(mock_deps):
"""@TEST_EDGE: pagination_zero_page -> {page:0, status:400}"""
response = client.get("/api/dashboards?env_id=env1&page=0")
assert response.status_code == 400
assert "Page must be >= 1" in response.json()["detail"]
def test_get_dashboards_pagination_oversize(mock_deps):
"""@TEST_EDGE: pagination_oversize -> {page_size:101, status:400}"""
response = client.get("/api/dashboards?env_id=env1&page_size=101")
assert response.status_code == 400
assert "Page size must be between 1 and 100" in response.json()["detail"]
def test_get_datasets_pagination_zero_page(mock_deps):
"""@TEST_EDGE: pagination_zero_page on datasets"""
response = client.get("/api/datasets?env_id=env1&page=0")
assert response.status_code == 400
def test_get_datasets_pagination_oversize(mock_deps):
"""@TEST_EDGE: pagination_oversize on datasets"""
response = client.get("/api/datasets?env_id=env1&page_size=101")
assert response.status_code == 400
# [/DEF:test_pagination_boundaries:Test]

View File

@@ -41,7 +41,7 @@ describe('AssistantChatPanel integration contract', () => {
const source = fs.readFileSync(COMPONENT_PATH, 'utf-8'); const source = fs.readFileSync(COMPONENT_PATH, 'utf-8');
expect(source).toContain('<!-- [DEF' + ':AssistantChatPanel:Component] -->'); expect(source).toContain('<!-- [DEF' + ':AssistantChatPanel:Component] -->');
expect(source).toContain('@TIER: STANDARD'); expect(source).toContain('@TIER: CRITICAL');
expect(source).toContain('@UX_STATE: LoadingHistory'); expect(source).toContain('@UX_STATE: LoadingHistory');
expect(source).toContain('@UX_STATE: Sending'); expect(source).toContain('@UX_STATE: Sending');
expect(source).toContain('@UX_STATE: Error'); expect(source).toContain('@UX_STATE: Error');

View File

@@ -13,6 +13,12 @@ vi.mock('$lib/api/assistant', () => ({
sendAssistantMessage: vi.fn() sendAssistantMessage: vi.fn()
})); }));
vi.mock('$lib/api', () => ({
api: {
getLlmStatus: vi.fn(() => Promise.resolve({ configured: true }))
}
}));
vi.mock('$lib/toasts', () => ({ vi.mock('$lib/toasts', () => ({
addToast: vi.fn() addToast: vi.fn()
})); }));
@@ -49,15 +55,16 @@ vi.mock('$lib/i18n', () => ({
describe('AssistantChatPanel confirmation functional tests', () => { describe('AssistantChatPanel confirmation functional tests', () => {
const mockMessage = { const mockMessage = {
id: 'msg-123', message_id: 'msg-123',
role: 'assistant', role: 'assistant',
text: 'Confirm migration?', text: 'Confirm migration?',
created_at: new Date().toISOString(), created_at: new Date().toISOString(),
confirmation: { conversation_id: 'conv-1',
id: 'conf-123', confirmation_id: 'conf-123',
type: 'migration_execute', actions: [
status: 'pending' { type: 'confirm', label: 'Confirm' },
} { type: 'cancel', label: 'Cancel' }
]
}; };
beforeEach(() => { beforeEach(() => {
@@ -66,20 +73,16 @@ describe('AssistantChatPanel confirmation functional tests', () => {
it('renders action buttons and triggers confirm API call', async () => { it('renders action buttons and triggers confirm API call', async () => {
// Mock getAssistantHistory to return our message // Mock getAssistantHistory to return our message
api.getAssistantHistory.mockResolvedValue({ api.getAssistantHistory.mockImplementation(async () => ({
items: [mockMessage], items: [mockMessage],
total: 1, total: 1,
has_next: false has_next: false
}); }));
render(AssistantChatPanel); render(AssistantChatPanel);
// Wait for message to render // Wait for message to render
await waitFor(() => { const confirmBtn = await screen.findByText('Confirm', {}, { timeout: 3000 });
expect(screen.getByText('Confirm migration?')).toBeTruthy();
});
const confirmBtn = screen.getByText('Confirm');
expect(confirmBtn).toBeTruthy(); expect(confirmBtn).toBeTruthy();
await fireEvent.click(confirmBtn); await fireEvent.click(confirmBtn);
@@ -88,40 +91,38 @@ describe('AssistantChatPanel confirmation functional tests', () => {
}); });
it('triggers cancel API call when cancel button is clicked', async () => { it('triggers cancel API call when cancel button is clicked', async () => {
api.getAssistantHistory.mockResolvedValue({ api.getAssistantHistory.mockImplementation(async () => ({
items: [mockMessage], items: [mockMessage],
total: 1, total: 1,
has_next: false has_next: false
}); }));
render(AssistantChatPanel); render(AssistantChatPanel);
await waitFor(() => { const cancelBtn = await screen.findByText('Cancel', {}, { timeout: 3000 });
expect(screen.getByText('Cancel')).toBeTruthy();
});
const cancelBtn = screen.getByText('Cancel');
await fireEvent.click(cancelBtn); await fireEvent.click(cancelBtn);
expect(api.cancelAssistantOperation).toHaveBeenCalledWith('conf-123'); expect(api.cancelAssistantOperation).toHaveBeenCalledWith('conf-123');
}); });
it('shows toast error when action fails', async () => { it('shows toast error when action fails', async () => {
api.getAssistantHistory.mockResolvedValue({ api.getAssistantHistory.mockImplementation(async () => ({
items: [mockMessage], items: [mockMessage],
total: 1, total: 1,
has_next: false has_next: false
}));
api.confirmAssistantOperation.mockImplementation(async () => {
throw new Error('Network error');
}); });
api.confirmAssistantOperation.mockRejectedValue(new Error('Network error'));
render(AssistantChatPanel); render(AssistantChatPanel);
await waitFor(() => screen.getByText('Confirm')); const confirmBtn = await screen.findByText('Confirm', {}, { timeout: 3000 });
await fireEvent.click(screen.getByText('Confirm')); await fireEvent.click(confirmBtn);
await waitFor(() => { await waitFor(() => {
// The component appends a failed message to the chat // The component appends a failed message to the chat
expect(screen.getAllByText(/Network error/)).toBeTruthy(); expect(screen.getAllByText(/Network error/)).toBeTruthy();
}); }, { timeout: 3000 });
}); });
}); });