Improve dashboard LLM validation UX and report flow

This commit is contained in:
2026-02-26 17:53:41 +03:00
parent 5ec1254336
commit f4612c0737
10 changed files with 1199 additions and 30 deletions

View File

@@ -6,6 +6,7 @@
import pytest
from unittest.mock import MagicMock, patch, AsyncMock
from datetime import datetime, timezone
from fastapi.testclient import TestClient
from src.app import app
from src.api.routes.dashboards import DashboardsResponse
@@ -354,4 +355,84 @@ def test_get_database_mappings_success():
# [/DEF:test_get_database_mappings_success:Function]
# [DEF:test_get_dashboard_tasks_history_filters_success:Function]
# @TEST: GET /api/dashboards/{id}/tasks returns backup and llm tasks for dashboard
def test_get_dashboard_tasks_history_filters_success():
with patch("src.api.routes.dashboards.get_task_manager") as mock_task_mgr, \
patch("src.api.routes.dashboards.has_permission") as mock_perm:
now = datetime.now(timezone.utc)
llm_task = MagicMock()
llm_task.id = "task-llm-1"
llm_task.plugin_id = "llm_dashboard_validation"
llm_task.status = "SUCCESS"
llm_task.started_at = now
llm_task.finished_at = now
llm_task.params = {"dashboard_id": "42", "environment_id": "prod"}
llm_task.result = {"summary": "LLM validation complete"}
backup_task = MagicMock()
backup_task.id = "task-backup-1"
backup_task.plugin_id = "superset-backup"
backup_task.status = "RUNNING"
backup_task.started_at = now
backup_task.finished_at = None
backup_task.params = {"env": "prod", "dashboards": [42]}
backup_task.result = {}
other_task = MagicMock()
other_task.id = "task-other"
other_task.plugin_id = "superset-backup"
other_task.status = "SUCCESS"
other_task.started_at = now
other_task.finished_at = now
other_task.params = {"env": "prod", "dashboards": [777]}
other_task.result = {}
mock_task_mgr.return_value.get_all_tasks.return_value = [other_task, llm_task, backup_task]
mock_perm.return_value = lambda: True
response = client.get("/api/dashboards/42/tasks?env_id=prod&limit=10")
assert response.status_code == 200
data = response.json()
assert data["dashboard_id"] == 42
assert len(data["items"]) == 2
assert {item["plugin_id"] for item in data["items"]} == {"llm_dashboard_validation", "superset-backup"}
# [/DEF:test_get_dashboard_tasks_history_filters_success:Function]
# [DEF:test_get_dashboard_thumbnail_success:Function]
# @TEST: GET /api/dashboards/{id}/thumbnail proxies image bytes from Superset
def test_get_dashboard_thumbnail_success():
with patch("src.api.routes.dashboards.get_config_manager") as mock_config, \
patch("src.api.routes.dashboards.has_permission") as mock_perm, \
patch("src.api.routes.dashboards.SupersetClient") as mock_client_cls:
mock_env = MagicMock()
mock_env.id = "prod"
mock_config.return_value.get_environments.return_value = [mock_env]
mock_perm.return_value = lambda: True
mock_client = MagicMock()
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.content = b"fake-image-bytes"
mock_response.headers = {"Content-Type": "image/png"}
def _network_request(method, endpoint, **kwargs):
if method == "POST":
return {"image_url": "/api/v1/dashboard/42/screenshot/abc123/"}
return mock_response
mock_client.network.request.side_effect = _network_request
mock_client_cls.return_value = mock_client
response = client.get("/api/dashboards/42/thumbnail?env_id=prod")
assert response.status_code == 200
assert response.content == b"fake-image-bytes"
assert response.headers["content-type"].startswith("image/png")
# [/DEF:test_get_dashboard_thumbnail_success:Function]
# [/DEF:backend.src.api.routes.__tests__.test_dashboards:Module]

View File

@@ -11,12 +11,16 @@
# @INVARIANT: All dashboard responses include git_status and last_task metadata
# [SECTION: IMPORTS]
from fastapi import APIRouter, Depends, HTTPException
from typing import List, Optional, Dict
from fastapi import APIRouter, Depends, HTTPException, Query, Response
from fastapi.responses import JSONResponse
from typing import List, Optional, Dict, Any
import re
from urllib.parse import urlparse
from pydantic import BaseModel, Field
from ...dependencies import get_config_manager, get_task_manager, get_resource_service, get_mapping_service, has_permission
from ...core.logger import logger, belief_scope
from ...core.superset_client import SupersetClient
from ...core.utils.network import DashboardNotFoundError
# [/SECTION]
router = APIRouter(prefix="/api/dashboards", tags=["Dashboards"])
@@ -90,6 +94,24 @@ class DashboardDetailResponse(BaseModel):
dataset_count: int
# [/DEF:DashboardDetailResponse:DataClass]
# [DEF:DashboardTaskHistoryItem:DataClass]
class DashboardTaskHistoryItem(BaseModel):
id: str
plugin_id: str
status: str
validation_status: Optional[str] = None
started_at: Optional[str] = None
finished_at: Optional[str] = None
env_id: Optional[str] = None
summary: Optional[str] = None
# [/DEF:DashboardTaskHistoryItem:DataClass]
# [DEF:DashboardTaskHistoryResponse:DataClass]
class DashboardTaskHistoryResponse(BaseModel):
dashboard_id: int
items: List[DashboardTaskHistoryItem]
# [/DEF:DashboardTaskHistoryResponse:DataClass]
# [DEF:DatabaseMapping:DataClass]
class DatabaseMapping(BaseModel):
source_db: str
@@ -259,6 +281,190 @@ async def get_dashboard_detail(
raise HTTPException(status_code=503, detail=f"Failed to fetch dashboard detail: {str(e)}")
# [/DEF:get_dashboard_detail:Function]
# [DEF:_task_matches_dashboard:Function]
# @PURPOSE: Checks whether task params are tied to a specific dashboard and environment.
# @PRE: task-like object exposes plugin_id and params fields.
# @POST: Returns True only for supported task plugins tied to dashboard_id (+optional env_id).
def _task_matches_dashboard(task: Any, dashboard_id: int, env_id: Optional[str]) -> bool:
plugin_id = getattr(task, "plugin_id", None)
if plugin_id not in {"superset-backup", "llm_dashboard_validation"}:
return False
params = getattr(task, "params", {}) or {}
dashboard_id_str = str(dashboard_id)
if plugin_id == "llm_dashboard_validation":
task_dashboard_id = params.get("dashboard_id")
if str(task_dashboard_id) != dashboard_id_str:
return False
if env_id:
task_env = params.get("environment_id")
return str(task_env) == str(env_id)
return True
# superset-backup can pass dashboards as "dashboard_ids" or "dashboards"
dashboard_ids = params.get("dashboard_ids") or params.get("dashboards") or []
normalized_ids = {str(item) for item in dashboard_ids}
if dashboard_id_str not in normalized_ids:
return False
if env_id:
task_env = params.get("environment_id") or params.get("env")
return str(task_env) == str(env_id)
return True
# [/DEF:_task_matches_dashboard:Function]
# [DEF:get_dashboard_tasks_history:Function]
# @PURPOSE: Returns history of backup and LLM validation tasks for a dashboard.
# @PRE: dashboard_id is valid integer.
# @POST: Response contains sorted task history (newest first).
@router.get("/{dashboard_id:int}/tasks", response_model=DashboardTaskHistoryResponse)
async def get_dashboard_tasks_history(
dashboard_id: int,
env_id: Optional[str] = None,
limit: int = Query(20, ge=1, le=100),
task_manager=Depends(get_task_manager),
_ = Depends(has_permission("tasks", "READ"))
):
with belief_scope("get_dashboard_tasks_history", f"dashboard_id={dashboard_id}, env_id={env_id}, limit={limit}"):
matching_tasks = []
for task in task_manager.get_all_tasks():
if _task_matches_dashboard(task, dashboard_id, env_id):
matching_tasks.append(task)
def _sort_key(task_obj: Any) -> str:
return (
str(getattr(task_obj, "started_at", "") or "")
or str(getattr(task_obj, "finished_at", "") or "")
)
matching_tasks.sort(key=_sort_key, reverse=True)
selected = matching_tasks[:limit]
items = []
for task in selected:
result = getattr(task, "result", None)
summary = None
validation_status = None
if isinstance(result, dict):
raw_validation_status = result.get("status")
if raw_validation_status is not None:
validation_status = str(raw_validation_status)
summary = (
result.get("summary")
or result.get("status")
or result.get("message")
)
params = getattr(task, "params", {}) or {}
items.append(
DashboardTaskHistoryItem(
id=str(getattr(task, "id", "")),
plugin_id=str(getattr(task, "plugin_id", "")),
status=str(getattr(task, "status", "")),
validation_status=validation_status,
started_at=getattr(task, "started_at", None).isoformat() if getattr(task, "started_at", None) else None,
finished_at=getattr(task, "finished_at", None).isoformat() if getattr(task, "finished_at", None) else None,
env_id=str(params.get("environment_id") or params.get("env")) if (params.get("environment_id") or params.get("env")) else None,
summary=summary,
)
)
logger.info(f"[get_dashboard_tasks_history][Coherence:OK] Found {len(items)} tasks for dashboard {dashboard_id}")
return DashboardTaskHistoryResponse(dashboard_id=dashboard_id, items=items)
# [/DEF:get_dashboard_tasks_history:Function]
# [DEF:get_dashboard_thumbnail:Function]
# @PURPOSE: Proxies Superset dashboard thumbnail with cache support.
# @PRE: env_id must exist.
# @POST: Returns image bytes or 202 when thumbnail is being prepared by Superset.
@router.get("/{dashboard_id:int}/thumbnail")
async def get_dashboard_thumbnail(
dashboard_id: int,
env_id: str,
force: bool = Query(False),
config_manager=Depends(get_config_manager),
_ = Depends(has_permission("plugin:migration", "READ"))
):
with belief_scope("get_dashboard_thumbnail", f"dashboard_id={dashboard_id}, env_id={env_id}, force={force}"):
environments = config_manager.get_environments()
env = next((e for e in environments if e.id == env_id), None)
if not env:
logger.error(f"[get_dashboard_thumbnail][Coherence:Failed] Environment not found: {env_id}")
raise HTTPException(status_code=404, detail="Environment not found")
try:
client = SupersetClient(env)
digest = None
thumb_endpoint = None
# Preferred flow (newer Superset): ask server to cache screenshot and return digest/image_url.
try:
screenshot_payload = client.network.request(
method="POST",
endpoint=f"/dashboard/{dashboard_id}/cache_dashboard_screenshot/",
json={"force": force},
)
payload = screenshot_payload.get("result", screenshot_payload) if isinstance(screenshot_payload, dict) else {}
image_url = payload.get("image_url", "") if isinstance(payload, dict) else ""
if isinstance(image_url, str) and image_url:
matched = re.search(r"/dashboard/\d+/(?:thumbnail|screenshot)/([^/]+)/?$", image_url)
if matched:
digest = matched.group(1)
except DashboardNotFoundError:
logger.warning(
"[get_dashboard_thumbnail][Fallback] cache_dashboard_screenshot endpoint unavailable, fallback to dashboard.thumbnail_url"
)
# Fallback flow (older Superset): read thumbnail_url from dashboard payload.
if not digest:
dashboard_payload = client.network.request(
method="GET",
endpoint=f"/dashboard/{dashboard_id}",
)
dashboard_data = dashboard_payload.get("result", dashboard_payload) if isinstance(dashboard_payload, dict) else {}
thumbnail_url = dashboard_data.get("thumbnail_url", "") if isinstance(dashboard_data, dict) else ""
if isinstance(thumbnail_url, str) and thumbnail_url:
parsed = urlparse(thumbnail_url)
parsed_path = parsed.path or thumbnail_url
if parsed_path.startswith("/api/v1/"):
parsed_path = parsed_path[len("/api/v1"):]
thumb_endpoint = parsed_path
matched = re.search(r"/dashboard/\d+/(?:thumbnail|screenshot)/([^/]+)/?$", parsed_path)
if matched:
digest = matched.group(1)
if not thumb_endpoint:
thumb_endpoint = f"/dashboard/{dashboard_id}/thumbnail/{digest or 'latest'}/"
thumb_response = client.network.request(
method="GET",
endpoint=thumb_endpoint,
raw_response=True,
allow_redirects=True,
)
if thumb_response.status_code == 202:
payload_202: Dict[str, Any] = {}
try:
payload_202 = thumb_response.json()
except Exception:
payload_202 = {"message": "Thumbnail is being generated"}
return JSONResponse(status_code=202, content=payload_202)
content_type = thumb_response.headers.get("Content-Type", "image/png")
return Response(content=thumb_response.content, media_type=content_type)
except DashboardNotFoundError as e:
logger.error(f"[get_dashboard_thumbnail][Coherence:Failed] Dashboard not found for thumbnail: {e}")
raise HTTPException(status_code=404, detail="Dashboard thumbnail not found")
except HTTPException:
raise
except Exception as e:
logger.error(f"[get_dashboard_thumbnail][Coherence:Failed] Failed to fetch dashboard thumbnail: {e}")
raise HTTPException(status_code=503, detail=f"Failed to fetch dashboard thumbnail: {str(e)}")
# [/DEF:get_dashboard_thumbnail:Function]
# [DEF:MigrateRequest:DataClass]
class MigrateRequest(BaseModel):
source_env_id: str = Field(..., description="Source environment ID")

View File

@@ -5,7 +5,7 @@
# @LAYER: UI (API)
from fastapi import APIRouter, Depends, HTTPException, status
from typing import List
from typing import List, Optional
from ...core.logger import logger
from ...schemas.auth import User
from ...dependencies import get_current_user as get_current_active_user
@@ -19,6 +19,20 @@ from sqlalchemy.orm import Session
router = APIRouter(tags=["LLM"])
# [/DEF:router:Global]
# [DEF:_is_valid_runtime_api_key:Function]
# @PURPOSE: Validate decrypted runtime API key presence/shape.
# @PRE: value can be None.
# @POST: Returns True only for non-placeholder key.
def _is_valid_runtime_api_key(value: Optional[str]) -> bool:
key = (value or "").strip()
if not key:
return False
if key in {"********", "EMPTY_OR_NONE"}:
return False
return len(key) >= 16
# [/DEF:_is_valid_runtime_api_key:Function]
# [DEF:get_providers:Function]
# @PURPOSE: Retrieve all LLM provider configurations.
# @PRE: User is authenticated.
@@ -47,6 +61,37 @@ async def get_providers(
]
# [/DEF:get_providers:Function]
# [DEF:get_llm_status:Function]
# @PURPOSE: Returns whether LLM runtime is configured for dashboard validation.
# @PRE: User is authenticated.
# @POST: configured=true only when an active provider with valid decrypted key exists.
@router.get("/status")
async def get_llm_status(
current_user: User = Depends(get_current_active_user),
db: Session = Depends(get_db)
):
service = LLMProviderService(db)
providers = service.get_all_providers()
active_provider = next((p for p in providers if p.is_active), None)
if not active_provider:
return {"configured": False, "reason": "no_active_provider"}
api_key = service.get_decrypted_api_key(active_provider.id)
if not _is_valid_runtime_api_key(api_key):
return {"configured": False, "reason": "invalid_api_key"}
return {
"configured": True,
"reason": "ok",
"provider_id": active_provider.id,
"provider_name": active_provider.name,
"provider_type": active_provider.provider_type,
"default_model": active_provider.default_model,
}
# [/DEF:get_llm_status:Function]
# [DEF:create_provider:Function]
# @PURPOSE: Create a new LLM provider configuration.
# @PRE: User is authenticated and has admin permissions.

View File

@@ -144,4 +144,46 @@ async def download_file(
raise HTTPException(status_code=400, detail=str(e))
# [/DEF:download_file:Function]
# [DEF:get_file_by_path:Function]
# @PURPOSE: Retrieve a file by validated absolute/relative path under storage root.
#
# @PRE: path must resolve under configured storage root.
# @POST: Returns a FileResponse for existing files.
#
# @PARAM: path (str) - Absolute or storage-root-relative file path.
# @RETURN: FileResponse - The file content.
#
# @RELATION: CALLS -> StoragePlugin.get_storage_root
# @RELATION: CALLS -> StoragePlugin.validate_path
@router.get("/file")
async def get_file_by_path(
path: str,
plugin_loader=Depends(get_plugin_loader),
_ = Depends(has_permission("plugin:storage", "READ"))
):
with belief_scope("get_file_by_path"):
storage_plugin: StoragePlugin = plugin_loader.get_plugin("storage-manager")
if not storage_plugin:
raise HTTPException(status_code=500, detail="Storage plugin not loaded")
requested_path = (path or "").strip()
if not requested_path:
raise HTTPException(status_code=400, detail="Path is required")
try:
candidate = Path(requested_path)
if candidate.is_absolute():
abs_path = storage_plugin.validate_path(candidate)
else:
storage_root = storage_plugin.get_storage_root()
abs_path = storage_plugin.validate_path(storage_root / candidate)
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
if not abs_path.exists() or not abs_path.is_file():
raise HTTPException(status_code=404, detail="File not found")
return FileResponse(path=str(abs_path), filename=abs_path.name)
# [/DEF:get_file_by_path:Function]
# [/DEF:storage_routes:Module]

View File

@@ -30,6 +30,34 @@ from ...services.llm_prompt_templates import (
render_prompt,
)
# [DEF:_is_masked_or_invalid_api_key:Function]
# @PURPOSE: Guards against placeholder or malformed API keys in runtime.
# @PRE: value may be None.
# @POST: Returns True when value cannot be used for authenticated provider calls.
def _is_masked_or_invalid_api_key(value: Optional[str]) -> bool:
key = (value or "").strip()
if not key:
return True
if key in {"********", "EMPTY_OR_NONE"}:
return True
# Most provider tokens are significantly longer; short values are almost always placeholders.
return len(key) < 16
# [/DEF:_is_masked_or_invalid_api_key:Function]
# [DEF:_json_safe_value:Function]
# @PURPOSE: Recursively normalize payload values for JSON serialization.
# @PRE: value may be nested dict/list with datetime values.
# @POST: datetime values are converted to ISO strings.
def _json_safe_value(value: Any):
if isinstance(value, datetime):
return value.isoformat()
if isinstance(value, dict):
return {k: _json_safe_value(v) for k, v in value.items()}
if isinstance(value, list):
return [_json_safe_value(v) for v in value]
return value
# [/DEF:_json_safe_value:Function]
# [DEF:DashboardValidationPlugin:Class]
# @PURPOSE: Plugin for automated dashboard health analysis using LLMs.
# @RELATION: IMPLEMENTS -> backend.src.core.plugin_base.PluginBase
@@ -70,6 +98,7 @@ class DashboardValidationPlugin(PluginBase):
# @SIDE_EFFECT: Captures a screenshot, calls LLM API, and writes to the database.
async def execute(self, params: Dict[str, Any], context: Optional[TaskContext] = None):
with belief_scope("execute", f"plugin_id={self.id}"):
validation_started_at = datetime.utcnow()
# Use TaskContext logger if available, otherwise fall back to app logger
log = context.logger if context else logger
@@ -118,11 +147,10 @@ class DashboardValidationPlugin(PluginBase):
llm_log.debug(f"API Key decrypted (first 8 chars): {api_key[:8] if api_key and len(api_key) > 8 else 'EMPTY_OR_NONE'}...")
# Check if API key was successfully decrypted
if not api_key:
if _is_masked_or_invalid_api_key(api_key):
raise ValueError(
f"Failed to decrypt API key for provider {provider_id}. "
f"The provider may have been encrypted with a different encryption key. "
f"Please update the provider with a new API key through the UI."
f"Invalid API key for provider {provider_id}. "
"Please open LLM provider settings and save a real API key (not masked placeholder)."
)
# 3. Capture Screenshot
@@ -135,12 +163,15 @@ class DashboardValidationPlugin(PluginBase):
filename = f"{dashboard_id}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.png"
screenshot_path = os.path.join(screenshots_dir, filename)
screenshot_started_at = datetime.utcnow()
screenshot_log.info(f"Capturing screenshot for dashboard {dashboard_id}")
await screenshot_service.capture_dashboard(dashboard_id, screenshot_path)
screenshot_log.debug(f"Screenshot saved to: {screenshot_path}")
screenshot_finished_at = datetime.utcnow()
# 4. Fetch Logs (from Environment /api/v1/log/)
logs = []
logs_fetch_started_at = datetime.utcnow()
try:
client = SupersetClient(env)
@@ -181,6 +212,7 @@ class DashboardValidationPlugin(PluginBase):
except Exception as e:
superset_log.warning(f"Failed to fetch logs from environment: {e}")
logs = [f"Error fetching remote logs: {str(e)}"]
logs_fetch_finished_at = datetime.utcnow()
# 5. Analyze with LLM
llm_client = LLMClient(
@@ -196,11 +228,13 @@ class DashboardValidationPlugin(PluginBase):
"dashboard_validation_prompt",
DEFAULT_LLM_PROMPTS["dashboard_validation_prompt"],
)
llm_call_started_at = datetime.utcnow()
analysis = await llm_client.analyze_dashboard(
screenshot_path,
logs,
prompt_template=dashboard_prompt,
)
llm_call_finished_at = datetime.utcnow()
# Log analysis summary to task logs for better visibility
llm_log.info(f"[ANALYSIS_SUMMARY] Status: {analysis['status']}")
@@ -218,6 +252,35 @@ class DashboardValidationPlugin(PluginBase):
screenshot_path=screenshot_path,
raw_response=str(analysis)
)
validation_finished_at = datetime.utcnow()
result_payload = _json_safe_value(validation_result.dict())
result_payload["screenshot_paths"] = [screenshot_path]
result_payload["logs_sent_to_llm"] = logs
result_payload["logs_sent_count"] = len(logs)
result_payload["prompt_template"] = dashboard_prompt
result_payload["provider"] = {
"id": db_provider.id,
"name": db_provider.name,
"type": db_provider.provider_type,
"base_url": db_provider.base_url,
"model": db_provider.default_model,
}
result_payload["environment_id"] = env_id
result_payload["timings"] = {
"validation_started_at": validation_started_at.isoformat(),
"validation_finished_at": validation_finished_at.isoformat(),
"validation_duration_ms": int((validation_finished_at - validation_started_at).total_seconds() * 1000),
"screenshot_started_at": screenshot_started_at.isoformat(),
"screenshot_finished_at": screenshot_finished_at.isoformat(),
"screenshot_duration_ms": int((screenshot_finished_at - screenshot_started_at).total_seconds() * 1000),
"logs_fetch_started_at": logs_fetch_started_at.isoformat(),
"logs_fetch_finished_at": logs_fetch_finished_at.isoformat(),
"logs_fetch_duration_ms": int((logs_fetch_finished_at - logs_fetch_started_at).total_seconds() * 1000),
"llm_call_started_at": llm_call_started_at.isoformat(),
"llm_call_finished_at": llm_call_finished_at.isoformat(),
"llm_call_duration_ms": int((llm_call_finished_at - llm_call_started_at).total_seconds() * 1000),
}
db_record = ValidationRecord(
dashboard_id=validation_result.dashboard_id,
@@ -225,7 +288,7 @@ class DashboardValidationPlugin(PluginBase):
summary=validation_result.summary,
issues=[issue.dict() for issue in validation_result.issues],
screenshot_path=validation_result.screenshot_path,
raw_response=validation_result.raw_response
raw_response=json.dumps(result_payload, ensure_ascii=False)
)
db.add(db_record)
db.commit()
@@ -240,7 +303,7 @@ class DashboardValidationPlugin(PluginBase):
# Final log to ensure all analysis is visible in task logs
log.info(f"Validation completed for dashboard {dashboard_id}. Status: {validation_result.status.value}")
return validation_result.dict()
return result_payload
finally:
db.close()
@@ -328,11 +391,10 @@ class DocumentationPlugin(PluginBase):
llm_log.debug(f"API Key decrypted (first 8 chars): {api_key[:8] if api_key and len(api_key) > 8 else 'EMPTY_OR_NONE'}...")
# Check if API key was successfully decrypted
if not api_key:
if _is_masked_or_invalid_api_key(api_key):
raise ValueError(
f"Failed to decrypt API key for provider {provider_id}. "
f"The provider may have been encrypted with a different encryption key. "
f"Please update the provider with a new API key through the UI."
f"Invalid API key for provider {provider_id}. "
"Please open LLM provider settings and save a real API key (not masked placeholder)."
)
# 3. Fetch Metadata (US2 / T024)

View File

@@ -100,6 +100,36 @@ async function fetchApi(endpoint) {
}
// [/DEF:fetchApi:Function]
// [DEF:fetchApiBlob:Function]
// @PURPOSE: Generic GET wrapper for binary payloads.
// @PRE: endpoint string is provided.
// @POST: Returns Blob or throws on error.
async function fetchApiBlob(endpoint, options = {}) {
const notifyError = options.notifyError !== false;
try {
const response = await fetch(`${API_BASE_URL}${endpoint}`, {
headers: getAuthHeaders()
});
if (response.status === 202) {
const payload = await response.json().catch(() => ({ message: "Resource is being prepared" }));
const error = new Error(payload?.message || "Resource is being prepared");
error.status = 202;
throw error;
}
if (!response.ok) {
throw await buildApiError(response);
}
return await response.blob();
} catch (error) {
console.error(`[api.fetchApiBlob][Coherence:Failed] Error fetching blob from ${endpoint}:`, error);
if (notifyError) {
notifyApiError(error);
}
throw error;
}
}
// [/DEF:fetchApiBlob:Function]
// [DEF:postApi:Function]
// @PURPOSE: Generic POST request wrapper.
// @PRE: endpoint and body are provided.
@@ -184,6 +214,16 @@ export const api = {
return fetchApi(`/tasks${query ? `?${query}` : ''}`);
},
getTask: (taskId) => fetchApi(`/tasks/${taskId}`),
getTaskLogs: (taskId, options = {}) => {
const params = new URLSearchParams();
if (options.level) params.append('level', options.level);
if (options.source) params.append('source', options.source);
if (options.search) params.append('search', options.search);
if (options.offset != null) params.append('offset', String(options.offset));
if (options.limit != null) params.append('limit', String(options.limit));
const query = params.toString();
return fetchApi(`/tasks/${taskId}/logs${query ? `?${query}` : ''}`);
},
createTask: (pluginId, params) => postApi('/tasks', { plugin_id: pluginId, params }),
// Settings
@@ -198,6 +238,7 @@ export const api = {
getStorageSettings: () => fetchApi('/settings/storage'),
updateStorageSettings: (storage) => requestApi('/settings/storage', 'PUT', storage),
getEnvironmentsList: () => fetchApi('/environments'),
getLlmStatus: () => fetchApi('/llm/status'),
getEnvironmentDatabases: (id) => fetchApi(`/environments/${id}/databases`),
// Dashboards
@@ -209,6 +250,18 @@ export const api = {
return fetchApi(`/dashboards?${params.toString()}`);
},
getDashboardDetail: (envId, dashboardId) => fetchApi(`/dashboards/${dashboardId}?env_id=${envId}`),
getDashboardTaskHistory: (envId, dashboardId, options = {}) => {
const params = new URLSearchParams();
if (envId) params.append('env_id', envId);
if (options.limit) params.append('limit', options.limit);
return fetchApi(`/dashboards/${dashboardId}/tasks?${params.toString()}`);
},
getDashboardThumbnail: (envId, dashboardId, options = {}) => {
const params = new URLSearchParams();
params.append('env_id', envId);
if (options.force != null) params.append('force', String(Boolean(options.force)));
return fetchApiBlob(`/dashboards/${dashboardId}/thumbnail?${params.toString()}`, { notifyError: false });
},
getDatabaseMappings: (sourceEnvId, targetEnvId) => fetchApi(`/dashboards/db-mappings?source_env_id=${sourceEnvId}&target_env_id=${targetEnvId}`),
// Datasets

View File

@@ -21,6 +21,8 @@
* @UX_TEST: LoadingHistory -> {openPanel: true, expected: loading block visible}
* @UX_TEST: Sending -> {sendMessage: "branch", expected: send button disabled}
* @UX_TEST: NeedsConfirmation -> {click: confirm action, expected: started response with task_id}
* @TEST_DATA: assistant_llm_ready -> {"llmStatus":{"configured":true,"reason":"ok"},"messages":[{"role":"assistant","text":"Ready","state":"success"}]}
* @TEST_DATA: assistant_llm_not_configured -> {"llmStatus":{"configured":false,"reason":"invalid_api_key"}}
*/
import { onMount } from "svelte";
@@ -40,6 +42,7 @@
getAssistantHistory,
getAssistantConversations,
} from "$lib/api/assistant.js";
import { api } from "$lib/api.js";
import { gitService } from "../../../services/gitService.js";
const HISTORY_PAGE_SIZE = 30;
@@ -62,6 +65,8 @@
let conversationsHasNext = false;
let historyViewport = null;
let initialized = false;
let llmReady = true;
let llmStatusReason = "";
$: isOpen = $assistantChatStore?.isOpen || false;
$: conversationId = $assistantChatStore?.conversationId || null;
@@ -202,6 +207,7 @@
$: if (isOpen && !initialized) {
loadConversations(true);
loadHistory();
loadLlmStatus();
}
$: if (isOpen && initialized && conversationId) {
@@ -502,6 +508,17 @@
onMount(() => {
initialized = false;
});
async function loadLlmStatus() {
try {
const status = await api.getLlmStatus();
llmReady = Boolean(status?.configured);
llmStatusReason = status?.reason || "";
} catch (_err) {
llmReady = false;
llmStatusReason = "status_unavailable";
}
}
</script>
{#if isOpen}
@@ -533,6 +550,21 @@
</div>
<div class="flex h-[calc(100%-56px)] flex-col">
{#if !llmReady}
<div class="mx-3 mt-3 rounded-lg border border-rose-300 bg-rose-50 px-3 py-2 text-xs text-rose-800">
<div class="font-semibold">{$t.dashboard?.llm_not_configured || "LLM is not configured"}</div>
<div class="mt-1 text-rose-700">
{#if llmStatusReason === "no_active_provider"}
{$t.dashboard?.llm_configure_provider || "No active LLM provider. Configure it in Admin -> LLM Settings."}
{:else if llmStatusReason === "invalid_api_key"}
{$t.dashboard?.llm_configure_key || "Invalid LLM API key. Update and save a real key in Admin -> LLM Settings."}
{:else}
{$t.dashboard?.llm_status_unavailable || "LLM status is unavailable. Check settings and backend logs."}
{/if}
</div>
</div>
{/if}
<div class="border-b border-slate-200 px-3 py-2">
<div class="mb-2 flex items-center justify-between">
<span
@@ -726,7 +758,7 @@
bind:value={input}
rows="2"
placeholder={$t.assistant?.input_placeholder}
class="min-h-[52px] w-full resize-y rounded-lg border border-slate-300 px-3 py-2 text-sm outline-none transition focus:border-sky-400 focus:ring-2 focus:ring-sky-100"
class="min-h-[52px] w-full resize-y rounded-lg border px-3 py-2 text-sm outline-none transition {llmReady ? 'border-slate-300 focus:border-sky-400 focus:ring-2 focus:ring-sky-100' : 'border-rose-300 bg-rose-50 focus:border-rose-400 focus:ring-2 focus:ring-rose-100'}"
on:keydown={handleKeydown}
></textarea>
<button

View File

@@ -16,6 +16,8 @@
* @UX_FEEDBACK: Back button returns to task list
* @UX_RECOVERY: Click outside or X button closes drawer
* @UX_RECOVERY: Back button shows task list when viewing task details
* @TEST_DATA: llm_task_success_with_fail_result -> {"activeTaskDetails":{"plugin_id":"llm_dashboard_validation","status":"SUCCESS","result":{"status":"FAIL"}}}
* @TEST_DATA: llm_task_success_with_pass_result -> {"activeTaskDetails":{"plugin_id":"llm_dashboard_validation","status":"SUCCESS","result":{"status":"PASS"}}}
*/
import { onDestroy } from "svelte";
@@ -100,6 +102,22 @@
);
}
function resolveLlmValidationStatus(task) {
if (task?.plugin_id !== "llm_dashboard_validation") return null;
const raw = String(task?.result?.status || "").toUpperCase();
if (raw === "FAIL") return { label: "FAIL", tone: "fail", icon: "!" };
if (raw === "WARN") return { label: "WARN", tone: "warn", icon: "!" };
if (raw === "PASS") return { label: "PASS", tone: "pass", icon: "OK" };
return { label: "UNKNOWN", tone: "unknown", icon: "?" };
}
function llmValidationBadgeClass(tone) {
if (tone === "fail") return "text-rose-700 bg-rose-100 border border-rose-200";
if (tone === "warn") return "text-amber-700 bg-amber-100 border border-amber-200";
if (tone === "pass") return "text-emerald-700 bg-emerald-100 border border-emerald-200";
return "text-slate-700 bg-slate-100 border border-slate-200";
}
function stopTaskDetailsPolling() {
if (taskDetailsPollInterval) {
clearInterval(taskDetailsPollInterval);
@@ -224,6 +242,18 @@
return summary;
}
if (task.plugin_id === "llm_dashboard_validation") {
summary.targetEnvId = resolveEnvironmentId(params?.environment_id || null);
summary.targetEnvName = resolveEnvironmentName(
summary.targetEnvId,
null,
);
if (result?.summary) {
summary.lines.push(result.summary);
}
return summary;
}
if (result?.summary) {
summary.lines.push(result.summary);
return summary;
@@ -262,6 +292,15 @@
}
}
function handleOpenLlmReport() {
const taskId = normalizeTaskId(activeTaskId);
if (!taskId) {
addToast($t.tasks?.summary_link_unavailable || "Report unavailable", "error");
return;
}
window.open(`/reports/llm/${encodeURIComponent(taskId)}`, "_blank", "noopener,noreferrer");
}
// Connect to WebSocket for real-time logs
function connectWebSocket() {
if (!activeTaskId) return;
@@ -401,6 +440,7 @@
}
$: taskSummary = buildTaskSummary(activeTaskDetails);
$: activeTaskValidation = resolveLlmValidationStatus(activeTaskDetails);
// Cleanup on destroy
onDestroy(() => {
@@ -461,6 +501,17 @@
>{taskStatus}</span
>
{/if}
{#if activeTaskValidation}
<span
class={`text-xs font-semibold uppercase tracking-wider px-2 py-0.5 rounded-full inline-flex items-center gap-1 ${llmValidationBadgeClass(activeTaskValidation.tone)}`}
title="Dashboard validation result"
>
<span class="inline-flex min-w-[18px] items-center justify-center rounded-full bg-white/70 px-1 text-[10px] font-bold">
{activeTaskValidation.icon}
</span>
{activeTaskValidation.label}
</span>
{/if}
</div>
<div class="flex items-center gap-2">
<button
@@ -536,6 +587,14 @@
>
{$t.tasks?.show_diff || "Show diff"}
</button>
{#if activeTaskDetails?.plugin_id === "llm_dashboard_validation"}
<button
class="rounded-md border border-indigo-300 bg-indigo-50 px-2.5 py-1.5 text-xs font-semibold text-indigo-700 transition-colors hover:bg-indigo-100"
on:click={handleOpenLlmReport}
>
{$t.tasks?.open_llm_report || "Open LLM report"}
</button>
{/if}
</div>
{#if showDiff}
<div class="mt-3 rounded-md border border-slate-200 bg-white p-2">
@@ -576,6 +635,7 @@
{$t.tasks?.recent }
</h3>
{#each recentTasks as task}
{@const taskValidation = resolveLlmValidationStatus(task)}
<button
class="flex items-center gap-3 w-full p-3 mb-2 bg-slate-800 border border-slate-700 rounded-lg cursor-pointer transition-all hover:bg-slate-700 hover:border-slate-600 text-left"
on:click={() => selectTask(task)}
@@ -601,6 +661,17 @@
: 'bg-slate-500/15 text-slate-400'}"
>{task.status || $t.common?.unknown }</span
>
{#if taskValidation}
<span
class={`text-[10px] font-semibold uppercase px-2 py-1 rounded-full inline-flex items-center gap-1 ${llmValidationBadgeClass(taskValidation.tone)}`}
title="Dashboard validation result"
>
<span class="inline-flex min-w-[16px] items-center justify-center rounded-full bg-white/70 px-1 text-[9px] font-bold">
{taskValidation.icon}
</span>
{taskValidation.label}
</span>
{/if}
</button>
{/each}
</div>

View File

@@ -6,13 +6,17 @@
* @LAYER: UI
* @RELATION: BINDS_TO -> dashboard detail API
* @INVARIANT: Shows dashboard metadata, charts, and datasets for selected environment
* @TEST_DATA: dashboard_detail_ready -> {"dashboard":{"id":11,"title":"Ops","chart_count":3,"dataset_count":2},"taskHistory":[{"id":"t-1","plugin_id":"llm_dashboard_validation","status":"SUCCESS"}],"llmStatus":{"configured":true,"reason":"ok"}}
* @TEST_DATA: llm_unconfigured -> {"llmStatus":{"configured":false,"reason":"invalid_api_key"}}
*/
import { onMount } from "svelte";
import { onMount, onDestroy } from "svelte";
import { goto } from "$app/navigation";
import { page } from "$app/stores";
import { t } from "$lib/i18n";
import { api } from "$lib/api.js";
import { openDrawerForTask } from "$lib/stores/taskDrawer.js";
import { addToast } from "$lib/toasts.js";
import Icon from "$lib/ui/Icon.svelte";
$: dashboardId = $page.params.id;
@@ -21,11 +25,34 @@
let dashboard = null;
let isLoading = true;
let error = null;
let taskHistory = [];
let isTaskHistoryLoading = false;
let taskHistoryError = null;
let isStartingBackup = false;
let isStartingValidation = false;
let thumbnailUrl = "";
let isThumbnailLoading = false;
let thumbnailError = null;
let llmReady = true;
let llmStatusReason = "";
onMount(async () => {
await loadDashboardDetail();
await loadDashboardPage();
});
onDestroy(() => {
releaseThumbnailUrl();
});
async function loadDashboardPage() {
await Promise.all([
loadDashboardDetail(),
loadTaskHistory(),
loadThumbnail(false),
loadLlmStatus(),
]);
}
async function loadDashboardDetail() {
if (!dashboardId || !envId) {
error = $t.dashboard?.missing_context ;
@@ -45,6 +72,144 @@
}
}
async function loadTaskHistory() {
if (!dashboardId || !envId) return;
isTaskHistoryLoading = true;
taskHistoryError = null;
try {
const response = await api.getDashboardTaskHistory(envId, dashboardId, { limit: 30 });
taskHistory = response?.items || [];
} catch (err) {
taskHistoryError = err.message || "Failed to load task history";
taskHistory = [];
} finally {
isTaskHistoryLoading = false;
}
}
function releaseThumbnailUrl() {
if (thumbnailUrl) {
URL.revokeObjectURL(thumbnailUrl);
thumbnailUrl = "";
}
}
async function loadThumbnail(force = false) {
if (!dashboardId || !envId) return;
isThumbnailLoading = true;
thumbnailError = null;
try {
const blob = await api.getDashboardThumbnail(envId, dashboardId, { force });
releaseThumbnailUrl();
thumbnailUrl = URL.createObjectURL(blob);
} catch (err) {
if (err?.status === 202) {
thumbnailError = $t.dashboard?.thumbnail_generating || "Thumbnail is being generated";
} else {
thumbnailError = err.message || $t.dashboard?.thumbnail_failed || "Failed to load thumbnail";
}
} finally {
isThumbnailLoading = false;
}
}
async function runBackupTask() {
if (isStartingBackup || !envId || !dashboardId) return;
isStartingBackup = true;
try {
const response = await api.postApi("/dashboards/backup", {
env_id: envId,
dashboard_ids: [Number(dashboardId)],
});
const taskId = response?.task_id;
if (taskId) {
openDrawerForTask(taskId);
addToast($t.dashboard?.backup_started || "Backup task started", "success");
}
await loadTaskHistory();
} catch (err) {
addToast(err.message || $t.dashboard?.backup_task_failed || "Failed to start backup", "error");
} finally {
isStartingBackup = false;
}
}
async function runLlmValidationTask() {
if (!llmReady) {
addToast($t.dashboard?.llm_not_configured || "LLM is not configured", "error");
return;
}
if (isStartingValidation || !envId || !dashboardId) return;
isStartingValidation = true;
try {
const response = await api.postApi("/tasks", {
plugin_id: "llm_dashboard_validation",
params: {
dashboard_id: String(dashboardId),
environment_id: envId,
},
});
const taskId = response?.task_id || response?.id;
if (taskId) {
openDrawerForTask(taskId);
addToast($t.dashboard?.validation_started || "LLM validation started", "success");
}
await Promise.all([
loadTaskHistory(),
loadThumbnail(true),
]);
} catch (err) {
addToast(err.message || $t.dashboard?.validation_start_failed || "Failed to start LLM validation", "error");
} finally {
isStartingValidation = false;
}
}
function openLlmReport(taskId) {
if (!taskId) return;
window.open(`/reports/llm/${encodeURIComponent(String(taskId))}`, "_blank", "noopener,noreferrer");
}
function toTaskTypeLabel(pluginId) {
if (pluginId === "superset-backup") return $t.dashboard?.backup || "Backup";
if (pluginId === "llm_dashboard_validation") return $t.dashboard?.llm_check || "LLM Check";
return pluginId || "-";
}
function getTaskStatusClasses(status) {
const normalized = (status || "").toLowerCase();
if (normalized === "running" || normalized === "pending") return "bg-blue-100 text-blue-700";
if (normalized === "success") return "bg-emerald-100 text-emerald-700";
if (normalized === "failed" || normalized === "error") return "bg-rose-100 text-rose-700";
if (normalized === "awaiting_input" || normalized === "waiting_input") return "bg-amber-100 text-amber-700";
return "bg-slate-100 text-slate-700";
}
function getValidationStatus(task) {
if (task?.plugin_id !== "llm_dashboard_validation") {
return { label: "-", level: "na", icon: "" };
}
const rawStatus = String(task?.validation_status || "").toUpperCase();
if (rawStatus === "FAIL") {
return { label: "FAIL", level: "fail", icon: "!" };
}
if (rawStatus === "WARN") {
return { label: "WARN", level: "warn", icon: "!" };
}
if (rawStatus === "PASS") {
return { label: "PASS", level: "pass", icon: "OK" };
}
return { label: "UNKNOWN", level: "unknown", icon: "?" };
}
function getValidationStatusClasses(level) {
if (level === "fail") return "bg-rose-100 text-rose-700 border-rose-200";
if (level === "warn") return "bg-amber-100 text-amber-700 border-amber-200";
if (level === "pass") return "bg-emerald-100 text-emerald-700 border-emerald-200";
if (level === "unknown") return "bg-slate-100 text-slate-700 border-slate-200";
return "bg-slate-50 text-slate-400 border-slate-200";
}
function goBack() {
goto(`/dashboards?env_id=${encodeURIComponent(envId)}`);
}
@@ -59,6 +224,17 @@
if (Number.isNaN(parsed.getTime())) return "-";
return `${parsed.toLocaleDateString()} ${parsed.toLocaleTimeString([], { hour: "2-digit", minute: "2-digit" })}`;
}
async function loadLlmStatus() {
try {
const status = await api.getLlmStatus();
llmReady = Boolean(status?.configured);
llmStatusReason = status?.reason || "";
} catch (_err) {
llmReady = false;
llmStatusReason = "status_unavailable";
}
}
</script>
<div class="mx-auto w-full max-w-7xl space-y-6">
@@ -78,14 +254,46 @@
{$t.common?.id }: {dashboardId}{#if dashboard?.slug}{dashboard.slug}{/if}
</p>
</div>
<button
class="inline-flex items-center justify-center rounded-lg bg-primary px-4 py-2 text-sm font-medium text-white transition-colors hover:bg-primary-hover"
on:click={loadDashboardDetail}
>
{$t.common?.refresh }
</button>
<div class="flex flex-wrap items-center gap-2">
<button
class="inline-flex items-center justify-center rounded-lg border border-slate-300 bg-white px-4 py-2 text-sm font-medium text-slate-700 transition-colors hover:bg-slate-50"
on:click={runBackupTask}
disabled={isStartingBackup}
>
{isStartingBackup ? ($t.common?.loading || "Loading...") : ($t.dashboard?.run_backup || "Run backup")}
</button>
<button
class="inline-flex items-center justify-center rounded-lg border px-4 py-2 text-sm font-medium transition-colors {llmReady ? 'border-indigo-300 bg-indigo-50 text-indigo-700 hover:bg-indigo-100' : 'border-rose-300 bg-rose-50 text-rose-700 opacity-70 cursor-not-allowed'}"
on:click={runLlmValidationTask}
disabled={isStartingValidation || !llmReady}
title={!llmReady ? ($t.dashboard?.llm_not_configured || "LLM is not configured") : ""}
>
{isStartingValidation ? ($t.common?.loading || "Loading...") : ($t.dashboard?.run_llm_check || "Run LLM check")}
</button>
<button
class="inline-flex items-center justify-center rounded-lg bg-primary px-4 py-2 text-sm font-medium text-white transition-colors hover:bg-primary-hover"
on:click={loadDashboardPage}
>
{$t.common?.refresh }
</button>
</div>
</div>
{#if !llmReady}
<div class="rounded-lg border border-rose-300 bg-rose-50 px-4 py-3 text-sm text-rose-800">
<div class="font-semibold">{$t.dashboard?.llm_not_configured || "LLM is not configured"}</div>
<div class="mt-1 text-rose-700">
{#if llmStatusReason === "no_active_provider"}
{$t.dashboard?.llm_configure_provider || "No active LLM provider. Configure it in Admin -> LLM Settings."}
{:else if llmStatusReason === "invalid_api_key"}
{$t.dashboard?.llm_configure_key || "Invalid LLM API key. Update and save a real key in Admin -> LLM Settings."}
{:else}
{$t.dashboard?.llm_status_unavailable || "LLM status is unavailable. Check settings and backend logs."}
{/if}
</div>
</div>
{/if}
{#if error}
<div class="flex items-center justify-between rounded-lg border border-red-300 bg-red-50 px-4 py-3 text-red-700">
<span>{error}</span>
@@ -103,6 +311,115 @@
</div>
<div class="h-64 animate-pulse rounded-xl border border-slate-200 bg-white"></div>
{:else if dashboard}
<div class="grid grid-cols-1 gap-6 xl:grid-cols-5">
<div class="rounded-xl border border-slate-200 bg-white p-4 xl:col-span-2">
<div class="mb-3 flex items-center justify-between">
<h2 class="text-sm font-semibold uppercase tracking-wide text-slate-500">
{$t.dashboard?.api_thumbnail || "Dashboard thumbnail"}
</h2>
<button
class="rounded-md border border-slate-300 px-2 py-1 text-xs text-slate-700 hover:bg-slate-50"
on:click={() => loadThumbnail(true)}
disabled={isThumbnailLoading}
>
{$t.common?.refresh || "Refresh"}
</button>
</div>
{#if isThumbnailLoading}
<div class="h-56 animate-pulse rounded-lg bg-slate-100"></div>
{:else if thumbnailUrl}
<img
src={thumbnailUrl}
alt="Dashboard thumbnail"
class="h-56 w-full rounded-lg border border-slate-200 object-cover"
/>
{:else}
<div class="flex h-56 items-center justify-center rounded-lg border border-dashed border-slate-300 bg-slate-50 text-sm text-slate-500">
{thumbnailError || ($t.dashboard?.thumbnail_unavailable || "Thumbnail is unavailable")}
</div>
{/if}
</div>
<div class="rounded-xl border border-slate-200 bg-white p-4 xl:col-span-3">
<div class="mb-3 flex items-center justify-between">
<h2 class="text-sm font-semibold uppercase tracking-wide text-slate-500">
{$t.tasks?.recent || "Recent tasks"}
</h2>
<button
class="rounded-md border border-slate-300 px-2 py-1 text-xs text-slate-700 hover:bg-slate-50"
on:click={loadTaskHistory}
disabled={isTaskHistoryLoading}
>
{$t.common?.refresh || "Refresh"}
</button>
</div>
{#if isTaskHistoryLoading}
<div class="space-y-2">
{#each Array(4) as _}
<div class="h-10 animate-pulse rounded bg-slate-100"></div>
{/each}
</div>
{:else if taskHistoryError}
<div class="rounded-lg border border-rose-200 bg-rose-50 px-3 py-2 text-sm text-rose-700">{taskHistoryError}</div>
{:else if taskHistory.length === 0}
<div class="rounded-lg border border-slate-200 bg-slate-50 px-3 py-6 text-center text-sm text-slate-500">
{$t.tasks?.select_task || "No backup/LLM tasks yet"}
</div>
{:else}
<div class="overflow-x-auto">
<table class="min-w-full divide-y divide-slate-200 text-sm">
<thead class="bg-slate-50">
<tr>
<th class="px-3 py-2 text-left font-semibold text-slate-600">{$t.common?.type || "Type"}</th>
<th class="px-3 py-2 text-left font-semibold text-slate-600">{$t.common?.status || "Status"}</th>
<th class="px-3 py-2 text-left font-semibold text-slate-600">{$t.tasks?.result || "Check"}</th>
<th class="px-3 py-2 text-left font-semibold text-slate-600">{$t.common?.started || "Started"}</th>
<th class="px-3 py-2 text-left font-semibold text-slate-600">{$t.common?.finished || "Finished"}</th>
<th class="px-3 py-2 text-left font-semibold text-slate-600">{$t.common?.actions || "Actions"}</th>
</tr>
</thead>
<tbody class="divide-y divide-slate-100">
{#each taskHistory as task}
{@const validation = getValidationStatus(task)}
<tr>
<td class="px-3 py-2 text-slate-800">{toTaskTypeLabel(task.plugin_id)}</td>
<td class="px-3 py-2">
<span class={`rounded-full px-2 py-1 text-xs font-semibold uppercase ${getTaskStatusClasses(task.status)}`}>
{task.status}
</span>
</td>
<td class="px-3 py-2">
<span class={`inline-flex items-center gap-1 rounded-full border px-2 py-1 text-xs font-semibold uppercase ${getValidationStatusClasses(validation.level)}`}>
{#if validation.icon}
<span class="inline-flex min-w-[18px] items-center justify-center rounded-full bg-white/70 px-1 text-[10px] font-bold">
{validation.icon}
</span>
{/if}
{validation.label}
</span>
</td>
<td class="px-3 py-2 text-slate-700">{formatDate(task.started_at)}</td>
<td class="px-3 py-2 text-slate-700">{formatDate(task.finished_at)}</td>
<td class="px-3 py-2">
<div class="flex flex-wrap items-center gap-1">
{#if task.plugin_id === "llm_dashboard_validation"}
<button
class="inline-flex items-center gap-1 rounded-md border border-indigo-300 bg-indigo-50 px-2 py-1 text-xs text-indigo-700 hover:bg-indigo-100"
on:click={() => openLlmReport(task.id)}
>
{$t.tasks?.open_llm_report || "LLM report"}
</button>
{/if}
</div>
</td>
</tr>
{/each}
</tbody>
</table>
</div>
{/if}
</div>
</div>
<div class="grid grid-cols-1 gap-4 md:grid-cols-3">
<div class="rounded-xl border border-slate-200 bg-white p-4">
<p class="text-xs font-semibold uppercase tracking-wide text-slate-500">{$t.dashboard?.last_modified }</p>

View File

@@ -0,0 +1,260 @@
<!-- [DEF:frontend/src/routes/reports/llm/[taskId]/+page.svelte:Component] -->
<script>
/**
* @TIER: CRITICAL
* @PURPOSE: Full report page for LLM dashboard validation task execution.
* @LAYER: UI
* @RELATION: CALLS -> /api/tasks/{taskId}, /api/tasks/{taskId}/logs
* @RELATION: BINDS_TO -> page store
* @UX_STATE: Loading -> Skeleton placeholders visible.
* @UX_STATE: Loaded -> Full report with durations, screenshots, sent logs and task logs.
* @UX_STATE: Error -> Alert with retry action.
* @UX_FEEDBACK: Refresh button reloads report payload.
* @UX_RECOVERY: Retry via refresh button after API error.
* @TEST_DATA: llm_report_success -> {"task":{"id":"task-1","plugin_id":"llm_dashboard_validation","status":"SUCCESS","started_at":"2026-02-26T12:00:00Z","finished_at":"2026-02-26T12:00:10Z","result":{"summary":"OK","issues":[],"screenshot_paths":["/tmp/s1.png"],"logs_sent_to_llm":["[2026-02-26] explore_json"],"timings":{"validation_duration_ms":10000}}},"logs":[{"timestamp":"2026-02-26T12:00:01Z","level":"INFO","source":"llm","message":"Analyzing"}]}
* @TEST_DATA: llm_report_error -> {"error":"Failed to load LLM report"}
*/
import { onMount } from "svelte";
import { page } from "$app/stores";
import { goto } from "$app/navigation";
import { api } from "$lib/api.js";
import { t } from "$lib/i18n";
import { openDrawerForTask } from "$lib/stores/taskDrawer.js";
import Icon from "$lib/ui/Icon.svelte";
$: taskId = $page.params.taskId;
let task = null;
let logs = [];
let isLoading = true;
let error = null;
function formatDate(value) {
if (!value) return "-";
const parsed = new Date(value);
if (Number.isNaN(parsed.getTime())) return "-";
return `${parsed.toLocaleDateString()} ${parsed.toLocaleTimeString([], { hour: "2-digit", minute: "2-digit", second: "2-digit" })}`;
}
function formatMs(value) {
const ms = Number(value);
if (!Number.isFinite(ms) || ms < 0) return "-";
if (ms < 1000) return `${ms} ms`;
return `${(ms / 1000).toFixed(2)} s`;
}
function getDashboardCheckResult(resultPayload) {
const rawStatus = String(resultPayload?.status || "").toUpperCase();
if (rawStatus === "FAIL") return { label: "FAIL", level: "fail", icon: "!" };
if (rawStatus === "WARN") return { label: "WARN", level: "warn", icon: "!" };
if (rawStatus === "PASS") return { label: "PASS", level: "pass", icon: "OK" };
return { label: "UNKNOWN", level: "unknown", icon: "?" };
}
function getCheckResultClasses(level) {
if (level === "fail") return "bg-rose-100 text-rose-700 border-rose-200";
if (level === "warn") return "bg-amber-100 text-amber-700 border-amber-200";
if (level === "pass") return "bg-emerald-100 text-emerald-700 border-emerald-200";
return "bg-slate-100 text-slate-700 border-slate-200";
}
async function loadReport() {
if (!taskId) return;
isLoading = true;
error = null;
try {
const [taskPayload, logsPayload] = await Promise.all([
api.getTask(taskId),
api.getTaskLogs(taskId, { limit: 1000 }),
]);
task = taskPayload;
logs = Array.isArray(logsPayload) ? logsPayload : [];
} catch (err) {
error = err?.message || "Failed to load LLM report";
} finally {
isLoading = false;
}
}
function backToReports() {
goto("/reports");
}
function openTaskDetails() {
if (!taskId) return;
openDrawerForTask(taskId);
}
onMount(async () => {
await loadReport();
});
$: result = task?.result || {};
$: checkResult = getDashboardCheckResult(result);
$: timings = result?.timings || {};
$: screenshotPaths = Array.isArray(result?.screenshot_paths)
? result.screenshot_paths
: (result?.screenshot_path ? [result.screenshot_path] : []);
$: sentLogs = Array.isArray(result?.logs_sent_to_llm) ? result.logs_sent_to_llm : [];
</script>
<div class="mx-auto w-full max-w-7xl space-y-6">
<div class="flex items-start justify-between">
<div>
<button
class="inline-flex items-center gap-2 rounded-lg px-2 py-1 text-sm text-slate-600 transition-colors hover:bg-slate-100 hover:text-slate-900"
on:click={backToReports}
>
<Icon name="chevronLeft" size={16} />
{$t.nav?.reports || "Reports"}
</button>
<h1 class="mt-2 text-2xl font-bold text-slate-900">
{$t.tasks?.result_llm_validation || "LLM Validation Report"}
</h1>
<p class="mt-1 text-sm text-slate-500">Task: {taskId}</p>
</div>
<div class="flex items-center gap-2">
<button
class="inline-flex items-center justify-center rounded-lg border border-slate-300 bg-white px-4 py-2 text-sm font-medium text-slate-700 transition-colors hover:bg-slate-50"
on:click={openTaskDetails}
>
{$t.tasks?.details_logs || "Task details and logs"}
</button>
<button
class="inline-flex items-center justify-center rounded-lg bg-primary px-4 py-2 text-sm font-medium text-white transition-colors hover:bg-primary-hover"
on:click={loadReport}
>
{$t.common?.refresh || "Refresh"}
</button>
</div>
</div>
{#if error}
<div class="rounded-lg border border-rose-300 bg-rose-50 px-4 py-3 text-rose-700">
<div>{error}</div>
</div>
{/if}
{#if isLoading}
<div class="space-y-3">
<div class="h-24 animate-pulse rounded-xl border border-slate-200 bg-white"></div>
<div class="h-64 animate-pulse rounded-xl border border-slate-200 bg-white"></div>
<div class="h-64 animate-pulse rounded-xl border border-slate-200 bg-white"></div>
</div>
{:else if task}
<div class="grid grid-cols-1 gap-4 md:grid-cols-5">
<div class="rounded-xl border border-slate-200 bg-white p-4">
<p class="text-xs font-semibold uppercase tracking-wide text-slate-500">Status</p>
<p class="mt-2 text-lg font-semibold text-slate-900">{task?.status || "-"}</p>
</div>
<div class="rounded-xl border border-slate-200 bg-white p-4">
<p class="text-xs font-semibold uppercase tracking-wide text-slate-500">Dashboard check</p>
<span class={`mt-2 inline-flex items-center gap-1 rounded-full border px-2 py-1 text-sm font-semibold uppercase ${getCheckResultClasses(checkResult.level)}`}>
<span class="inline-flex min-w-[18px] items-center justify-center rounded-full bg-white/70 px-1 text-[10px] font-bold">
{checkResult.icon}
</span>
{checkResult.label}
</span>
</div>
<div class="rounded-xl border border-slate-200 bg-white p-4">
<p class="text-xs font-semibold uppercase tracking-wide text-slate-500">Started</p>
<p class="mt-2 text-sm font-semibold text-slate-900">{formatDate(task?.started_at)}</p>
</div>
<div class="rounded-xl border border-slate-200 bg-white p-4">
<p class="text-xs font-semibold uppercase tracking-wide text-slate-500">Finished</p>
<p class="mt-2 text-sm font-semibold text-slate-900">{formatDate(task?.finished_at)}</p>
</div>
<div class="rounded-xl border border-slate-200 bg-white p-4">
<p class="text-xs font-semibold uppercase tracking-wide text-slate-500">Duration</p>
<p class="mt-2 text-lg font-semibold text-slate-900">{formatMs(timings?.validation_duration_ms)}</p>
</div>
</div>
<div class="rounded-xl border border-slate-200 bg-white p-4">
<h2 class="text-sm font-semibold uppercase tracking-wide text-slate-500">
Text Report
</h2>
<p class="mt-2 text-sm text-slate-800">{result?.summary || "-"}</p>
{#if Array.isArray(result?.issues) && result.issues.length > 0}
<div class="mt-4 space-y-2">
{#each result.issues as issue}
<div class="rounded-lg border border-slate-200 bg-slate-50 p-3 text-sm">
<div class="font-semibold text-slate-900">{issue?.severity || "INFO"}</div>
<div class="text-slate-700">{issue?.message || "-"}</div>
{#if issue?.location}
<div class="text-xs text-slate-500">Location: {issue.location}</div>
{/if}
</div>
{/each}
</div>
{/if}
</div>
<div class="rounded-xl border border-slate-200 bg-white p-4">
<h2 class="text-sm font-semibold uppercase tracking-wide text-slate-500">
Screenshots
</h2>
{#if screenshotPaths.length === 0}
<p class="mt-2 text-sm text-slate-500">No screenshots saved.</p>
{:else}
<div class="mt-3 grid grid-cols-1 gap-4 lg:grid-cols-2">
{#each screenshotPaths as path}
<a href={`/api/storage/file?path=${encodeURIComponent(path)}`} target="_blank" rel="noreferrer noopener" class="block">
<img
src={`/api/storage/file?path=${encodeURIComponent(path)}`}
alt="Validation screenshot"
class="h-64 w-full rounded-lg border border-slate-200 object-cover"
/>
<p class="mt-1 truncate text-xs text-slate-500">{path}</p>
</a>
{/each}
</div>
{/if}
</div>
<div class="rounded-xl border border-slate-200 bg-white p-4">
<h2 class="text-sm font-semibold uppercase tracking-wide text-slate-500">
Logs sent to LLM ({sentLogs.length})
</h2>
{#if sentLogs.length === 0}
<p class="mt-2 text-sm text-slate-500">No source logs were attached.</p>
{:else}
<pre class="mt-3 max-h-80 overflow-auto rounded-lg bg-slate-900 p-3 text-xs text-slate-100">{sentLogs.join('\n')}</pre>
{/if}
</div>
<div class="rounded-xl border border-slate-200 bg-white p-4">
<h2 class="text-sm font-semibold uppercase tracking-wide text-slate-500">
Task execution logs ({logs.length})
</h2>
{#if logs.length === 0}
<p class="mt-2 text-sm text-slate-500">No task logs available.</p>
{:else}
<div class="mt-3 max-h-96 overflow-auto rounded-lg border border-slate-200">
<table class="min-w-full divide-y divide-slate-200 text-xs">
<thead class="bg-slate-50">
<tr>
<th class="px-3 py-2 text-left font-semibold text-slate-600">Time</th>
<th class="px-3 py-2 text-left font-semibold text-slate-600">Level</th>
<th class="px-3 py-2 text-left font-semibold text-slate-600">Source</th>
<th class="px-3 py-2 text-left font-semibold text-slate-600">Message</th>
</tr>
</thead>
<tbody class="divide-y divide-slate-100">
{#each logs as logEntry}
<tr>
<td class="px-3 py-2 align-top text-slate-600">{formatDate(logEntry?.timestamp)}</td>
<td class="px-3 py-2 align-top text-slate-700">{logEntry?.level || "-"}</td>
<td class="px-3 py-2 align-top text-slate-700">{logEntry?.source || "-"}</td>
<td class="px-3 py-2 align-top text-slate-800 whitespace-pre-wrap">{logEntry?.message || "-"}</td>
</tr>
{/each}
</tbody>
</table>
</div>
{/if}
</div>
{/if}
</div>
<!-- [/DEF:frontend/src/routes/reports/llm/[taskId]/+page.svelte:Component] -->