Improve dashboard LLM validation UX and report flow

This commit is contained in:
2026-02-26 17:53:41 +03:00
parent 5ec1254336
commit f4612c0737
10 changed files with 1199 additions and 30 deletions

View File

@@ -6,6 +6,7 @@
import pytest
from unittest.mock import MagicMock, patch, AsyncMock
from datetime import datetime, timezone
from fastapi.testclient import TestClient
from src.app import app
from src.api.routes.dashboards import DashboardsResponse
@@ -354,4 +355,84 @@ def test_get_database_mappings_success():
# [/DEF:test_get_database_mappings_success:Function]
# [DEF:test_get_dashboard_tasks_history_filters_success:Function]
# @TEST: GET /api/dashboards/{id}/tasks returns backup and llm tasks for dashboard
def test_get_dashboard_tasks_history_filters_success():
with patch("src.api.routes.dashboards.get_task_manager") as mock_task_mgr, \
patch("src.api.routes.dashboards.has_permission") as mock_perm:
now = datetime.now(timezone.utc)
llm_task = MagicMock()
llm_task.id = "task-llm-1"
llm_task.plugin_id = "llm_dashboard_validation"
llm_task.status = "SUCCESS"
llm_task.started_at = now
llm_task.finished_at = now
llm_task.params = {"dashboard_id": "42", "environment_id": "prod"}
llm_task.result = {"summary": "LLM validation complete"}
backup_task = MagicMock()
backup_task.id = "task-backup-1"
backup_task.plugin_id = "superset-backup"
backup_task.status = "RUNNING"
backup_task.started_at = now
backup_task.finished_at = None
backup_task.params = {"env": "prod", "dashboards": [42]}
backup_task.result = {}
other_task = MagicMock()
other_task.id = "task-other"
other_task.plugin_id = "superset-backup"
other_task.status = "SUCCESS"
other_task.started_at = now
other_task.finished_at = now
other_task.params = {"env": "prod", "dashboards": [777]}
other_task.result = {}
mock_task_mgr.return_value.get_all_tasks.return_value = [other_task, llm_task, backup_task]
mock_perm.return_value = lambda: True
response = client.get("/api/dashboards/42/tasks?env_id=prod&limit=10")
assert response.status_code == 200
data = response.json()
assert data["dashboard_id"] == 42
assert len(data["items"]) == 2
assert {item["plugin_id"] for item in data["items"]} == {"llm_dashboard_validation", "superset-backup"}
# [/DEF:test_get_dashboard_tasks_history_filters_success:Function]
# [DEF:test_get_dashboard_thumbnail_success:Function]
# @TEST: GET /api/dashboards/{id}/thumbnail proxies image bytes from Superset
def test_get_dashboard_thumbnail_success():
with patch("src.api.routes.dashboards.get_config_manager") as mock_config, \
patch("src.api.routes.dashboards.has_permission") as mock_perm, \
patch("src.api.routes.dashboards.SupersetClient") as mock_client_cls:
mock_env = MagicMock()
mock_env.id = "prod"
mock_config.return_value.get_environments.return_value = [mock_env]
mock_perm.return_value = lambda: True
mock_client = MagicMock()
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.content = b"fake-image-bytes"
mock_response.headers = {"Content-Type": "image/png"}
def _network_request(method, endpoint, **kwargs):
if method == "POST":
return {"image_url": "/api/v1/dashboard/42/screenshot/abc123/"}
return mock_response
mock_client.network.request.side_effect = _network_request
mock_client_cls.return_value = mock_client
response = client.get("/api/dashboards/42/thumbnail?env_id=prod")
assert response.status_code == 200
assert response.content == b"fake-image-bytes"
assert response.headers["content-type"].startswith("image/png")
# [/DEF:test_get_dashboard_thumbnail_success:Function]
# [/DEF:backend.src.api.routes.__tests__.test_dashboards:Module]