codex specify

This commit is contained in:
2026-02-25 21:19:48 +03:00
parent b7d1ee2b71
commit 5ec1254336
40 changed files with 3535 additions and 238 deletions

View File

@@ -10,6 +10,7 @@ import os
import asyncio
from types import SimpleNamespace
from datetime import datetime, timedelta
import pytest
# Force isolated sqlite databases for test module before dependencies import.
os.environ.setdefault("DATABASE_URL", "sqlite:////tmp/ss_tools_assistant_api.db")
@@ -446,7 +447,7 @@ def test_list_conversations_groups_by_conversation_and_marks_archived():
conversation_id="conv-old",
role="user",
text="old chat",
created_at=now - timedelta(days=assistant_module.ASSISTANT_ARCHIVE_AFTER_DAYS + 2),
created_at=now - timedelta(days=32), # Hardcoded threshold+2
)
)
@@ -536,7 +537,7 @@ def test_list_conversations_archived_only_filters_active():
conversation_id="conv-archived-2",
role="user",
text="archived",
created_at=now - timedelta(days=assistant_module.ASSISTANT_ARCHIVE_AFTER_DAYS + 3),
created_at=now - timedelta(days=33), # Hardcoded threshold+3
)
)
@@ -624,5 +625,25 @@ def test_guarded_operation_confirm_roundtrip():
assert second.task_id is not None
# [DEF:test_confirm_nonexistent_id_returns_404:Function]
# @PURPOSE: Confirming a non-existent ID should raise 404.
# @PRE: user tries to confirm a random/fake UUID.
# @POST: FastAPI HTTPException with status 404.
def test_confirm_nonexistent_id_returns_404():
from fastapi import HTTPException
_clear_assistant_state()
with pytest.raises(HTTPException) as exc:
_run_async(
assistant_module.confirm_operation(
confirmation_id="non-existent-id",
current_user=_admin_user(),
task_manager=_FakeTaskManager(),
config_manager=_FakeConfigManager(),
db=_FakeDb(),
)
)
assert exc.value.status_code == 404
# [/DEF:test_guarded_operation_confirm_roundtrip:Function]
# [/DEF:backend.src.api.routes.__tests__.test_assistant_api:Module]

View File

@@ -249,6 +249,7 @@ def _make_sync_config_manager(environments):
config.environments = environments
cm = MagicMock()
cm.get_config.return_value = config
cm.get_environments.return_value = environments
return cm
@@ -343,4 +344,67 @@ async def test_trigger_sync_now_idempotent_env_upsert(db_session, _mock_env):
assert env_count == 1
# --- get_dashboards tests ---
@pytest.mark.asyncio
async def test_get_dashboards_success(_mock_env):
from src.api.routes.migration import get_dashboards
cm = _make_sync_config_manager([_mock_env])
with patch("src.api.routes.migration.SupersetClient") as MockClient:
mock_client = MagicMock()
mock_client.get_dashboards_summary.return_value = [{"id": 1, "title": "Test"}]
MockClient.return_value = mock_client
result = await get_dashboards(env_id="test-env-1", config_manager=cm, _=None)
assert len(result) == 1
assert result[0]["id"] == 1
@pytest.mark.asyncio
async def test_get_dashboards_invalid_env_raises_404(_mock_env):
from src.api.routes.migration import get_dashboards
cm = _make_sync_config_manager([_mock_env])
with pytest.raises(HTTPException) as exc:
await get_dashboards(env_id="wrong-env", config_manager=cm, _=None)
assert exc.value.status_code == 404
# --- execute_migration tests ---
@pytest.mark.asyncio
async def test_execute_migration_success(_mock_env):
from src.api.routes.migration import execute_migration
from src.models.dashboard import DashboardSelection
cm = _make_sync_config_manager([_mock_env, _mock_env]) # Need both source/target
tm = MagicMock()
tm.create_task = AsyncMock(return_value=MagicMock(id="task-123"))
selection = DashboardSelection(
source_env_id="test-env-1",
target_env_id="test-env-1",
selected_ids=[1, 2]
)
result = await execute_migration(selection=selection, config_manager=cm, task_manager=tm, _=None)
assert result["task_id"] == "task-123"
tm.create_task.assert_called_once()
@pytest.mark.asyncio
async def test_execute_migration_invalid_env_raises_400(_mock_env):
from src.api.routes.migration import execute_migration
from src.models.dashboard import DashboardSelection
cm = _make_sync_config_manager([_mock_env])
selection = DashboardSelection(
source_env_id="test-env-1",
target_env_id="non-existent",
selected_ids=[1]
)
with pytest.raises(HTTPException) as exc:
await execute_migration(selection=selection, config_manager=cm, task_manager=MagicMock(), _=None)
assert exc.value.status_code == 400
# [/DEF:backend.src.api.routes.__tests__.test_migration_routes:Module]

View File

@@ -579,6 +579,137 @@ def _resolve_dashboard_id_entity(
# [/DEF:_resolve_dashboard_id_entity:Function]
# [DEF:_get_environment_name_by_id:Function]
# @PURPOSE: Resolve human-readable environment name by id.
# @PRE: environment id may be None.
# @POST: Returns matching environment name or fallback id.
def _get_environment_name_by_id(env_id: Optional[str], config_manager: ConfigManager) -> str:
if not env_id:
return "unknown"
env = next((item for item in config_manager.get_environments() if item.id == env_id), None)
return env.name if env else env_id
# [/DEF:_get_environment_name_by_id:Function]
# [DEF:_extract_result_deep_links:Function]
# @PURPOSE: Build deep-link actions to verify task result from assistant chat.
# @PRE: task object is available.
# @POST: Returns zero or more assistant actions for dashboard open/diff.
def _extract_result_deep_links(task: Any, config_manager: ConfigManager) -> List[AssistantAction]:
plugin_id = getattr(task, "plugin_id", None)
params = getattr(task, "params", {}) or {}
result = getattr(task, "result", {}) or {}
actions: List[AssistantAction] = []
dashboard_id: Optional[int] = None
env_id: Optional[str] = None
if plugin_id == "superset-migration":
migrated = result.get("migrated_dashboards") if isinstance(result, dict) else None
if isinstance(migrated, list) and migrated:
first = migrated[0]
if isinstance(first, dict) and first.get("id") is not None:
dashboard_id = int(first.get("id"))
if dashboard_id is None and isinstance(params.get("selected_ids"), list) and params["selected_ids"]:
dashboard_id = int(params["selected_ids"][0])
env_id = params.get("target_env_id")
elif plugin_id == "superset-backup":
dashboards = result.get("dashboards") if isinstance(result, dict) else None
if isinstance(dashboards, list) and dashboards:
first = dashboards[0]
if isinstance(first, dict) and first.get("id") is not None:
dashboard_id = int(first.get("id"))
if dashboard_id is None and isinstance(params.get("dashboard_ids"), list) and params["dashboard_ids"]:
dashboard_id = int(params["dashboard_ids"][0])
env_id = params.get("environment_id") or _resolve_env_id(result.get("environment"), config_manager)
elif plugin_id == "llm_dashboard_validation":
if params.get("dashboard_id") is not None:
dashboard_id = int(params["dashboard_id"])
env_id = params.get("environment_id")
if dashboard_id is not None and env_id:
env_name = _get_environment_name_by_id(env_id, config_manager)
actions.append(
AssistantAction(
type="open_route",
label=f"Открыть дашборд в {env_name}",
target=f"/dashboards/{dashboard_id}?env_id={env_id}",
)
)
if dashboard_id is not None:
actions.append(
AssistantAction(
type="open_diff",
label="Показать Diff",
target=str(dashboard_id),
)
)
return actions
# [/DEF:_extract_result_deep_links:Function]
# [DEF:_build_task_observability_summary:Function]
# @PURPOSE: Build compact textual summary for completed tasks to reduce "black box" effect.
# @PRE: task may contain plugin-specific result payload.
# @POST: Returns non-empty summary line for known task types or empty string fallback.
def _build_task_observability_summary(task: Any, config_manager: ConfigManager) -> str:
plugin_id = getattr(task, "plugin_id", None)
status = str(getattr(task, "status", "")).upper()
params = getattr(task, "params", {}) or {}
result = getattr(task, "result", {}) or {}
if plugin_id == "superset-migration" and isinstance(result, dict):
migrated = len(result.get("migrated_dashboards") or [])
failed_rows = result.get("failed_dashboards") or []
failed = len(failed_rows)
selected = result.get("selected_dashboards", migrated + failed)
mappings = result.get("mapping_count", 0)
target_env_id = params.get("target_env_id")
target_env_name = _get_environment_name_by_id(target_env_id, config_manager)
warning = ""
if failed_rows:
first = failed_rows[0]
warning = (
f" Внимание: {first.get('title') or first.get('id')}: "
f"{first.get('error') or 'ошибка'}."
)
return (
f"Сводка миграции: выбрано {selected}, перенесено {migrated}, "
f"с ошибками {failed}, маппингов {mappings}, целевая среда {target_env_name}."
f"{warning}"
)
if plugin_id == "superset-backup" and isinstance(result, dict):
total = int(result.get("total_dashboards", 0) or 0)
ok = int(result.get("backed_up_dashboards", 0) or 0)
failed = int(result.get("failed_dashboards", 0) or 0)
env_id = params.get("environment_id") or _resolve_env_id(result.get("environment"), config_manager)
env_name = _get_environment_name_by_id(env_id, config_manager)
failures = result.get("failures") or []
warning = ""
if failures:
first = failures[0]
warning = (
f" Внимание: {first.get('title') or first.get('id')}: "
f"{first.get('error') or 'ошибка'}."
)
return (
f"Сводка бэкапа: среда {env_name}, всего {total}, успешно {ok}, "
f"с ошибками {failed}. {status}.{warning}"
)
if plugin_id == "llm_dashboard_validation" and isinstance(result, dict):
report_status = result.get("status") or status
report_summary = result.get("summary") or "Итог недоступен."
issues = result.get("issues") or []
return f"Сводка валидации: статус {report_status}, проблем {len(issues)}. {report_summary}"
# Fallback for unknown task payloads.
if status in {"SUCCESS", "FAILED"}:
return f"Задача завершена со статусом {status}."
return ""
# [/DEF:_build_task_observability_summary:Function]
# [DEF:_parse_command:Function]
# @PURPOSE: Deterministically parse RU/EN command text into intent payload.
# @PRE: message contains raw user text and config manager resolves environments.
@@ -1146,19 +1277,29 @@ async def _dispatch_intent(
if not recent:
return "У вас пока нет задач в истории.", None, []
task = recent[0]
actions = [AssistantAction(type="open_task", label="Open Task", target=task.id)]
if str(task.status).upper() in {"SUCCESS", "FAILED"}:
actions.extend(_extract_result_deep_links(task, config_manager))
summary_line = _build_task_observability_summary(task, config_manager)
return (
f"Последняя задача: {task.id}, статус: {task.status}.",
f"Последняя задача: {task.id}, статус: {task.status}."
+ (f"\n{summary_line}" if summary_line else ""),
task.id,
[AssistantAction(type="open_task", label="Open Task", target=task.id)],
actions,
)
task = task_manager.get_task(task_id)
if not task:
raise HTTPException(status_code=404, detail=f"Task {task_id} not found")
actions = [AssistantAction(type="open_task", label="Open Task", target=task.id)]
if str(task.status).upper() in {"SUCCESS", "FAILED"}:
actions.extend(_extract_result_deep_links(task, config_manager))
summary_line = _build_task_observability_summary(task, config_manager)
return (
f"Статус задачи {task.id}: {task.status}.",
f"Статус задачи {task.id}: {task.status}."
+ (f"\n{summary_line}" if summary_line else ""),
task.id,
[AssistantAction(type="open_task", label="Open Task", target=task.id)],
actions,
)
if operation == "create_branch":
@@ -1240,6 +1381,18 @@ async def _dispatch_intent(
[
AssistantAction(type="open_task", label="Open Task", target=task.id),
AssistantAction(type="open_reports", label="Open Reports", target="/reports"),
*(
[
AssistantAction(
type="open_route",
label=f"Открыть дашборд в {_get_environment_name_by_id(tgt, config_manager)}",
target=f"/dashboards/{dashboard_id}?env_id={tgt}",
),
AssistantAction(type="open_diff", label="Показать Diff", target=str(dashboard_id)),
]
if dashboard_id
else []
),
],
)
@@ -1268,6 +1421,18 @@ async def _dispatch_intent(
[
AssistantAction(type="open_task", label="Open Task", target=task.id),
AssistantAction(type="open_reports", label="Open Reports", target="/reports"),
*(
[
AssistantAction(
type="open_route",
label=f"Открыть дашборд в {_get_environment_name_by_id(env_id, config_manager)}",
target=f"/dashboards/{dashboard_id}?env_id={env_id}",
),
AssistantAction(type="open_diff", label="Показать Diff", target=str(dashboard_id)),
]
if entities.get("dashboard_id") or entities.get("dashboard_ref")
else []
),
],
)