Compare commits

...

22 Commits

Author SHA1 Message Date
0cf0ef25f1 db + docker 2026-02-20 20:47:39 +03:00
af74841765 semantic update 2026-02-20 10:41:15 +03:00
d7e4919d54 few shots update 2026-02-20 10:26:01 +03:00
fdcbe32dfa css refactor 2026-02-19 18:24:36 +03:00
4de5b22d57 +Svelte specific 2026-02-19 17:47:24 +03:00
c8029ed309 ai base 2026-02-19 17:43:45 +03:00
c2a4c8062a fix tax log 2026-02-19 16:05:59 +03:00
2c820e103a tests ready 2026-02-19 13:33:20 +03:00
c8b84b7bd7 Coder + fix workflow 2026-02-19 13:33:10 +03:00
fdb944f123 Test logic update 2026-02-19 12:44:31 +03:00
d29bc511a2 task panel 2026-02-19 09:43:01 +03:00
a3a9f0788d docs: amend constitution to v2.3.0 (tailwind css first principle) 2026-02-18 18:29:52 +03:00
77147dc95b refactor 2026-02-18 17:29:46 +03:00
026239e3bf fix 2026-02-15 11:11:30 +03:00
4a0273a604 измененные спеки таски 2026-02-10 15:53:38 +03:00
edb2dd5263 updated tasks 2026-02-10 15:04:43 +03:00
76b98fcf8f linter + новые таски 2026-02-10 12:53:01 +03:00
794cc55fe7 Таски готовы 2026-02-09 12:35:27 +03:00
235b0e3c9f semantic update 2026-02-08 22:53:54 +03:00
e6087bd3c1 таски готовы 2026-02-07 12:42:32 +03:00
0f16bab2b8 Похоже работает 2026-02-07 11:26:06 +03:00
7de96c17c4 feat(llm-plugin): switch to environment API for log retrieval
- Replace local backend.log reading with Superset API /log/ fetch
- Update DashboardValidationPlugin to use SupersetClient
- Filter logs by dashboard_id and last 24 hours
- Update spec FR-006 to reflect API usage
2026-02-06 17:57:25 +03:00
254 changed files with 94501 additions and 64629 deletions

1250
.ai/MODULE_MAP.md Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

40
.ai/ROOT.md Normal file
View File

@@ -0,0 +1,40 @@
# [DEF:Project_Knowledge_Map:Root]
# @TIER: CRITICAL
# @PURPOSE: Global navigation map for AI-Agent (GRACE Knowledge Graph).
# @LAST_UPDATE: 2026-02-20
## 1. SYSTEM STANDARDS (Rules of the Game)
Strict policies and formatting rules.
* **Constitution:** High-level architectural and business invariants.
* Ref: `.ai/standards/constitution.md` -> `[DEF:Std:Constitution]`
* **Architecture:** Service boundaries and tech stack decisions.
* Ref: `.ai/standards/architecture.md` -> `[DEF:Std:Architecture]`
* **Plugin Design:** Rules for building and integrating Plugins.
* Ref: `.ai/standards/plugin_design.md` -> `[DEF:Std:Plugin]`
* **API Design:** Rules for FastAPI endpoints and Pydantic models.
* Ref: `.ai/standards/api_design.md` -> `[DEF:Std:API_FastAPI]`
* **UI Design:** SvelteKit and Tailwind CSS component standards.
* Ref: `.ai/standards/ui_design.md` -> `[DEF:Std:UI_Svelte]`
* **Semantic Mapping:** Using `[DEF:]` and belief scopes.
* Ref: `.ai/standards/semantics.md` -> `[DEF:Std:Semantics]`
## 2. FEW-SHOT EXAMPLES (Patterns)
Use these for code generation (Style Transfer).
* **FastAPI Route:** Reference implementation of a task-based route.
* Ref: `.ai/shots/backend_route.py` -> `[DEF:Shot:FastAPI_Route]`
* **Svelte Component:** Reference implementation of a sidebar/navigation component.
* Ref: `.ai/shots/frontend_component.svelte` -> `[DEF:Shot:Svelte_Component]`
* **Plugin Module:** Reference implementation of a task plugin.
* Ref: `.ai/shots/plugin_example.py` -> `[DEF:Shot:Plugin_Example]`
* **Critical Module:** Core banking transaction processor with ACID guarantees.
* Ref: `.ai/shots/critical_module.py` -> `[DEF:Shot:Critical_Module]`
## 3. DOMAIN MAP (Modules)
* **Module Map:** `.ai/MODULE_MAP.md` -> `[DEF:Module_Map]`
* **Project Map:** `.ai/PROJECT_MAP.md` -> `[DEF:Project_Map]`
* **Backend Core:** `backend/src/core` -> `[DEF:Module:Backend_Core]`
* **Backend API:** `backend/src/api` -> `[DEF:Module:Backend_API]`
* **Frontend Lib:** `frontend/src/lib` -> `[DEF:Module:Frontend_Lib]`
* **Specifications:** `specs/` -> `[DEF:Module:Specs]`
# [/DEF:Project_Knowledge_Map]

View File

@@ -0,0 +1,65 @@
# [DEF:BackendRouteShot:Module]
# @TIER: STANDARD
# @SEMANTICS: Route, Task, API, Async
# @PURPOSE: Reference implementation of a task-based route using GRACE-Poly.
# @LAYER: Interface (API)
# @RELATION: IMPLEMENTS -> [DEF:Std:API_FastAPI]
# @INVARIANT: TaskManager must be available in dependency graph.
from typing import Dict, Any
from fastapi import APIRouter, Depends, HTTPException, status
from pydantic import BaseModel
from ...core.logger import belief_scope
from ...core.task_manager import TaskManager, Task
from ...core.config_manager import ConfigManager
from ...dependencies import get_task_manager, get_config_manager, get_current_user
router = APIRouter()
class CreateTaskRequest(BaseModel):
plugin_id: str
params: Dict[str, Any]
@router.post("/tasks", response_model=Task, status_code=status.HTTP_201_CREATED)
# [DEF:create_task:Function]
# @PURPOSE: Create and start a new task using TaskManager. Non-blocking.
# @PARAM: request (CreateTaskRequest) - Plugin and params.
# @PARAM: task_manager (TaskManager) - Async task executor.
# @PRE: plugin_id must match a registered plugin.
# @POST: A new task is spawned; Task ID returned immediately.
# @SIDE_EFFECT: Writes to DB, Trigger background worker.
async def create_task(
request: CreateTaskRequest,
task_manager: TaskManager = Depends(get_task_manager),
config: ConfigManager = Depends(get_config_manager),
current_user = Depends(get_current_user)
):
# Context Logging
with belief_scope("create_task"):
try:
# 1. Action: Configuration Resolution
timeout = config.get("TASKS_DEFAULT_TIMEOUT", 3600)
# 2. Action: Spawn async task
# @RELATION: CALLS -> task_manager.create_task
task = await task_manager.create_task(
plugin_id=request.plugin_id,
params={**request.params, "timeout": timeout}
)
return task
except ValueError as e:
# 3. Recovery: Domain logic error mapping
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=str(e)
)
except Exception as e:
# @UX_STATE: Error feedback -> 500 Internal Error
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail="Internal Task Spawning Error"
)
# [/DEF:create_task:Function]
# [/DEF:BackendRouteShot:Module]

View File

@@ -0,0 +1,79 @@
# [DEF:TransactionCore:Module]
# @TIER: CRITICAL
# @SEMANTICS: Finance, ACID, Transfer, Ledger
# @PURPOSE: Core banking transaction processor with ACID guarantees.
# @LAYER: Domain (Core)
# @RELATION: DEPENDS_ON -> [DEF:Infra:PostgresDB]
# @RELATION: DEPENDS_ON -> [DEF:Infra:AuditLog]
# @INVARIANT: Total system balance must remain constant (Double-Entry Bookkeeping).
# @INVARIANT: Negative transfers are strictly forbidden.
# @TEST_DATA: sufficient_funds -> {"from": "acc_A", "to": "acc_B", "amt": 100.00}
# @TEST_DATA: insufficient_funds -> {"from": "acc_empty", "to": "acc_B", "amt": 1000.00}
# @TEST_DATA: concurrency_lock -> {./fixtures/transactions.json#race_condition}
from decimal import Decimal
from typing import NamedTuple
from ...core.logger import belief_scope
from ...core.db import atomic_transaction, get_balance, update_balance
from ...core.exceptions import BusinessRuleViolation
class TransferResult(NamedTuple):
tx_id: str
status: str
new_balance: Decimal
# [DEF:execute_transfer:Function]
# @PURPOSE: Atomically move funds between accounts with audit trails.
# @PARAM: sender_id (str) - Source account.
# @PARAM: receiver_id (str) - Destination account.
# @PARAM: amount (Decimal) - Positive amount to transfer.
# @PRE: amount > 0; sender != receiver; sender_balance >= amount.
# @POST: sender_balance -= amount; receiver_balance += amount; Audit Record Created.
# @SIDE_EFFECT: Database mutation (Rows locked), Audit IO.
#
# @UX_STATE: Success -> Returns 200 OK + Transaction Receipt.
# @UX_STATE: Error(LowBalance) -> 422 Unprocessable -> UI shows "Top-up needed" modal.
# @UX_STATE: Error(System) -> 500 Internal -> UI shows "Retry later" toast.
def execute_transfer(sender_id: str, receiver_id: str, amount: Decimal) -> TransferResult:
# Guard: Input Validation
if amount <= Decimal("0.00"):
raise BusinessRuleViolation("Transfer amount must be positive.")
if sender_id == receiver_id:
raise BusinessRuleViolation("Cannot transfer to self.")
with belief_scope("execute_transfer") as context:
context.logger.info("Initiating transfer", data={"from": sender_id, "to": receiver_id})
try:
# 1. Action: Atomic DB Transaction
# @RELATION: CALLS -> atomic_transaction
with atomic_transaction():
# Guard: State Validation (Strict)
current_balance = get_balance(sender_id, for_update=True)
if current_balance < amount:
# @UX_FEEDBACK: Triggers specific UI flow for insufficient funds
context.logger.warn("Insufficient funds", data={"balance": current_balance})
raise BusinessRuleViolation("INSUFFICIENT_FUNDS")
# 2. Action: Mutation
new_src_bal = update_balance(sender_id, -amount)
new_dst_bal = update_balance(receiver_id, +amount)
# 3. Action: Audit
tx_id = context.audit.log_transfer(sender_id, receiver_id, amount)
context.logger.info("Transfer committed", data={"tx_id": tx_id})
return TransferResult(tx_id, "COMPLETED", new_src_bal)
except BusinessRuleViolation as e:
# Logic: Explicit re-raise for UI mapping
raise e
except Exception as e:
# Logic: Catch-all safety net
context.logger.error("Critical Transfer Failure", error=e)
raise RuntimeError("TRANSACTION_ABORTED") from e
# [/DEF:execute_transfer:Function]
# [/DEF:TransactionCore:Module]

View File

@@ -0,0 +1,70 @@
<!-- [DEF:FrontendComponentShot:Component] -->
<script>
/**
* @TIER: CRITICAL
* @SEMANTICS: Task, Button, Action, UX
* @PURPOSE: Action button to spawn a new task with full UX feedback cycle.
* @LAYER: UI (Presentation)
* @RELATION: CALLS -> postApi
* @INVARIANT: Must prevent double-submission while loading.
*
* @TEST_DATA: idle_state -> {"isLoading": false}
* @TEST_DATA: loading_state -> {"isLoading": true}
*
* @UX_STATE: Idle -> Button enabled, primary color.
* @UX_STATE: Loading -> Button disabled, spinner visible.
* @UX_STATE: Error -> Toast notification triggers.
*
* @UX_FEEDBACK: Toast success/error.
* @UX_TEST: Idle -> {click: spawnTask, expected: isLoading=true}
* @UX_TEST: Success -> {api_resolve: 200, expected: toast.success called}
*/
import { postApi } from "$lib/api.js";
import { t } from "$lib/i18n";
import { toast } from "$lib/stores/toast";
export let plugin_id = "";
export let params = {};
let isLoading = false;
// [DEF:spawnTask:Function]
async function spawnTask() {
isLoading = true;
console.log("[FrontendComponentShot][Loading] Spawning task...");
try {
// 1. Action: API Call
const response = await postApi("/api/tasks", {
plugin_id,
params
});
// 2. Feedback: Success
if (response.task_id) {
console.log("[FrontendComponentShot][Success] Task created.");
toast.success($t.tasks.spawned_success);
}
} catch (error) {
// 3. Recovery: User notification
console.log("[FrontendComponentShot][Error] Failed:", error);
toast.error(`${$t.errors.task_failed}: ${error.message}`);
} finally {
isLoading = false;
}
}
// [/DEF:spawnTask:Function]
</script>
<button
on:click={spawnTask}
disabled={isLoading}
class="btn-primary flex items-center gap-2"
aria-busy={isLoading}
>
{#if isLoading}
<span class="animate-spin" aria-label="Loading">🌀</span>
{/if}
<span>{$t.actions.start_task}</span>
</button>
<!-- [/DEF:FrontendComponentShot:Component] -->

View File

@@ -0,0 +1,64 @@
# [DEF:PluginExampleShot:Module]
# @TIER: STANDARD
# @SEMANTICS: Plugin, Core, Extension
# @PURPOSE: Reference implementation of a plugin following GRACE standards.
# @LAYER: Domain (Business Logic)
# @RELATION: INHERITS -> PluginBase
# @INVARIANT: get_schema must return valid JSON Schema.
from typing import Dict, Any, Optional
from ..core.plugin_base import PluginBase
from ..core.task_manager.context import TaskContext
class ExamplePlugin(PluginBase):
@property
def id(self) -> str:
return "example-plugin"
# [DEF:get_schema:Function]
# @PURPOSE: Defines input validation schema.
# @POST: Returns dict compliant with JSON Schema draft 7.
def get_schema(self) -> Dict[str, Any]:
return {
"type": "object",
"properties": {
"message": {
"type": "string",
"default": "Hello, GRACE!",
}
},
"required": ["message"],
}
# [/DEF:get_schema:Function]
# [DEF:execute:Function]
# @PURPOSE: Core plugin logic with structured logging and scope isolation.
# @PARAM: params (Dict) - Validated input parameters.
# @PARAM: context (TaskContext) - Execution tools (log, progress).
# @SIDE_EFFECT: Emits logs to centralized system.
async def execute(self, params: Dict, context: Optional = None):
message = params
# 1. Action: System-level tracing (Rule VI)
with belief_scope("example_plugin_exec") as b_scope:
if context:
# Task Logs: Пишем в пользовательский контекст выполнения задачи
# @RELATION: BINDS_TO -> context.logger
log = context.logger.with_source("example_plugin")
b_scope.logger.info("Using provided TaskContext") # System log
log.info("Starting execution", data={"msg": message}) # Task log
# 2. Action: Progress Reporting
log.progress("Processing...", percent=50)
# 3. Action: Finalize
log.info("Execution completed.")
else:
# Standalone Fallback: Замыкаемся на системный scope
b_scope.logger.warning("No TaskContext provided. Running standalone.")
b_scope.logger.info("Standalone execution", data={"msg": message})
print(f"Standalone: {message}")
# [/DEF:execute:Function]
# [/DEF:PluginExampleShot:Module]

View File

@@ -0,0 +1,47 @@
# [DEF:Std:API_FastAPI:Standard]
# @TIER: CRITICAL
# @PURPOSE: Unification of all FastAPI endpoints following GRACE-Poly.
# @LAYER: UI (API)
# @INVARIANT: All non-trivial route logic must be wrapped in `belief_scope`.
# @INVARIANT: Every module and function MUST have `[DEF:]` anchors and metadata.
## 1. ROUTE MODULE DEFINITION
Every API route file must start with a module definition header:
```python
# [DEF:ModuleName:Module]
# @TIER: [CRITICAL | STANDARD | TRIVIAL]
# @SEMANTICS: list, of, keywords
# @PURPOSE: High-level purpose of the module.
# @LAYER: UI (API)
# @RELATION: DEPENDS_ON -> [OtherModule]
```
## 2. FUNCTION DEFINITION & CONTRACT
Every endpoint handler must be decorated with `[DEF:]` and explicit metadata before the implementation:
```python
@router.post("/endpoint", response_model=ModelOut)
# [DEF:function_name:Function]
# @PURPOSE: What it does (brief, high-entropy).
# @PARAM: param_name (Type) - Description.
# @PRE: Conditions before execution (e.g., auth, existence).
# @POST: Expected state after execution.
# @RETURN: What it returns.
async def function_name(...):
with belief_scope("function_name"):
# Implementation
pass
# [/DEF:function_name:Function]
```
## 3. DEPENDENCY INJECTION & CORE SERVICES
* **Auth:** `Depends(get_current_user)` for authentication.
* **Perms:** `Depends(has_permission("resource", "ACTION"))` for RBAC.
* **Config:** Use `Depends(get_config_manager)` for settings. Hardcoding is FORBIDDEN.
* **Tasks:** Long-running operations must be executed via `TaskManager`. API routes should return Task ID and be non-blocking.
## 4. ERROR HANDLING
* Raise `HTTPException` from the router layer.
* Use `try-except` blocks within `belief_scope` to ensure proper error logging and classification.
* Do not leak internal implementation details in error responses.
# [/DEF:Std:API_FastAPI]

View File

@@ -0,0 +1,25 @@
# [DEF:Std:Architecture:Standard]
# @TIER: CRITICAL
# @PURPOSE: Core architectural decisions and service boundaries.
# @LAYER: Infra
# @INVARIANT: ss-tools MUST remain a standalone service (Orchestrator).
# @INVARIANT: Backend: FastAPI, Frontend: SvelteKit.
## 1. ORCHESTRATOR VS INSTANCE
* **Role:** ss-tools is a "Manager of Managers". It sits ABOVE Superset environments.
* **Isolation:** Do not integrate directly into Superset as a plugin to maintain multi-environment management capability.
* **Tech Stack:**
* Backend: Python 3.9+ with FastAPI (Asynchronous logic).
* Frontend: SvelteKit + Tailwind CSS (Reactive UX).
## 2. COMPONENT BOUNDARIES
* **Plugins:** All business logic must be encapsulated in Plugins (`backend/src/plugins/`).
* **TaskManager:** All long-running operations MUST be handled by the TaskManager.
* **Security:** Independent RBAC system managed in `auth.db`.
## 3. INTEGRATION STRATEGY
* **Superset API:** Communication via REST API.
* **Database:** Local SQLite for metadata (`tasks.db`, `auth.db`, `migrations.db`).
* **Filesystem:** Local storage for backups and git repositories.
# [/DEF:Std:Architecture]

View File

@@ -0,0 +1,36 @@
# [DEF:Std:Constitution:Standard]
# @TIER: CRITICAL
# @PURPOSE: Supreme Law of the Repository. High-level architectural and business invariants.
# @VERSION: 2.3.0
# @LAST_UPDATE: 2026-02-19
# @INVARIANT: Any deviation from this Constitution constitutes a build failure.
## 1. CORE PRINCIPLES
### I. Semantic Protocol Compliance
* **Ref:** `[DEF:Std:Semantics]` (formerly `semantic_protocol.md`)
* **Law:** All code must adhere to the Axioms (Meaning First, Contract First, etc.).
* **Compliance:** Strict matching of Anchors (`[DEF]`), Tags (`@KEY`), and structures is mandatory.
### II. Modular Plugin Architecture
* **Pattern:** Everything is a Plugin inheriting from `PluginBase`.
* **Centralized Config:** Use `ConfigManager` via `get_config_manager()`. Hardcoding is FORBIDDEN.
### III. Unified Frontend Experience
* **Styling:** Tailwind CSS First. Minimize scoped `<style>`.
* **i18n:** All user-facing text must be in `src/lib/i18n`.
* **API:** Use `requestApi` / `fetchApi` wrappers. Native `fetch` is FORBIDDEN.
### IV. Security & RBAC
* **Permissions:** Every Plugin must define unique permission strings (e.g., `plugin:name:execute`).
* **Auth:** Mandatory registration in `auth.db`.
### V. Independent Testability
* **Requirement:** Every feature must define "Independent Tests" for isolated verification.
### VI. Asynchronous Execution
* **TaskManager:** Long-running operations must be async tasks.
* **Non-Blocking:** API endpoints return Task ID immediately.
* **Observability:** Real-time updates via WebSocket.
# [/DEF:Std:Constitution]

View File

@@ -0,0 +1,32 @@
# [DEF:Std:Plugin:Standard]
# @TIER: CRITICAL
# @PURPOSE: Standards for building and integrating Plugins.
# @LAYER: Domain (Plugin)
# @INVARIANT: All plugins MUST inherit from `PluginBase`.
# @INVARIANT: All plugins MUST be located in `backend/src/plugins/`.
## 1. PLUGIN CONTRACT
Every plugin must implement the following properties and methods:
* `id`: Unique string (e.g., `"my-plugin"`).
* `name`: Human-readable name.
* `description`: Brief purpose.
* `version`: Semantic version.
* `get_schema()`: Returns JSON schema for input validation.
* `execute(params: Dict[str, Any], context: TaskContext)`: Core async logic.
## 2. STRUCTURED LOGGING (TASKCONTEXT)
Plugins MUST use `TaskContext` for logging to ensure proper source attribution:
* **Source Attribution:** Use `context.logger.with_source("src_name")` for specific operations (e.g., `"superset_api"`, `"git"`, `"llm"`).
* **Levels:**
* `DEBUG`: Detailed diagnostics (API responses).
* `INFO`: Operational milestones (start/end).
* `WARNING`: Recoverable issues.
* `ERROR`: Failures stopping execution.
* **Progress:** Use `context.logger.progress("msg", percent=XX)` for long-running tasks.
## 3. BEST PRACTICES
1. **Asynchronous Execution:** Always use `async/await` for I/O operations.
2. **Schema Validation:** Ensure the `get_schema()` precisely matches the `execute()` input expectations.
3. **Isolation:** Plugins should be self-contained and not depend on other plugins directly. Use core services (`ConfigManager`, `TaskManager`) via dependency injection or the provided `context`.
# [/DEF:Std:Plugin]

View File

@@ -0,0 +1,97 @@
### **SYSTEM STANDARD: GRACE-Poly (UX Edition)**
ЗАДАЧА: Генерация кода (Python/Svelte).
РЕЖИМ: Строгий. Детерминированный. Без болтовни.
#### I. ЗАКОН (АКСИОМЫ)
1. Смысл первичен. Код вторичен.
2. Контракт (@PRE/@POST) — источник истины.
**3. UX — это логика, а не декор. Состояния интерфейса — часть контракта.**
4. Структура `[DEF]...[/DEF]` — нерушима.
5. Архитектура в Header — неизменяема.
6. Сложность фрактала ограничена: модуль < 300 строк.
#### II. СИНТАКСИС (ЖЕСТКИЙ ФОРМАТ)
ЯКОРЬ (Контейнер):
Начало: `# [DEF:id:Type]` (Python) | `<!-- [DEF:id:Type] -->` (Svelte)
Конец: `# [/DEF:id:Type]` (Python) | `<!-- [/DEF:id:Type] -->` (Svelte) (ОБЯЗАТЕЛЬНО для аккумуляции)
Типы: Module, Class, Function, Component, Store.
ТЕГ (Метаданные):
Вид: `# @KEY: Value` (внутри DEF, до кода).
ГРАФ (Связи):
Вид: `# @RELATION: PREDICATE -> TARGET_ID`
Предикаты: DEPENDS_ON, CALLS, INHERITS, IMPLEMENTS, DISPATCHES, **BINDS_TO**.
#### III. СТРУКТУРА ФАЙЛА
1. HEADER (Всегда первый):
[DEF:filename:Module]
@TIER: [CRITICAL|STANDARD|TRIVIAL] (Дефолт: STANDARD)
@SEMANTICS: [keywords]
@PURPOSE: [Главная цель]
@LAYER: [Domain/UI/Infra]
@RELATION: [Зависимости]
@INVARIANT: [Незыблемое правило]
2. BODY: Импорты -> Реализация.
3. FOOTER: [/DEF:filename]
#### IV. КОНТРАКТ (DBC & UX)
Расположение: Внутри [DEF], ПЕРЕД кодом.
Стиль Python: Комментарии `# @TAG`.
Стиль Svelte: JSDoc `/** @tag */` внутри `<script>`.
**Базовые Теги:**
@PURPOSE: Суть (High Entropy).
@PRE: Входные условия.
@POST: Гарантии выхода.
@SIDE_EFFECT: Мутации, IO.
**UX Теги (Svelte/Frontend):**
**@UX_STATE:** `[StateName] -> Визуальное поведение` (Idle, Loading, Error).
**@UX_FEEDBACK:** Реакция системы (Toast, Shake, Red Border).
**@UX_RECOVERY:** Механизм исправления ошибки пользователем (Retry, Clear Input).
**UX Testing Tags (для Tester Agent):**
**@UX_TEST:** Спецификация теста для UX состояния.
Формат: `@UX_TEST: [state] -> {action, expected}`
Пример: `@UX_TEST: Idle -> {click: toggle, expected: isExpanded=true}`
Правило: Не используй `assert` в коде, используй `if/raise` или `guards`.
#### V. АДАПТАЦИЯ (TIERS)
Определяется тегом `@TIER` в Header.
1. **CRITICAL** (Core/Security/**Complex UI**):
- Требование: Полный контракт (включая **все @UX теги**), Граф, Инварианты, Строгие Логи.
- **@TEST_DATA**: Обязательные эталонные данные для тестирования. Формат:
```
@TEST_DATA: fixture_name -> {JSON_PATH} | {INLINE_DATA}
```
Примеры:
- `@TEST_DATA: valid_user -> {./fixtures/users.json#valid}`
- `@TEST_DATA: empty_state -> {"dashboards": [], "total": 0}`
- Tester Agent **ОБЯЗАН** использовать @TEST_DATA при написании тестов для CRITICAL модулей.
2. **STANDARD** (BizLogic/**Forms**):
- Требование: Базовый контракт (@PURPOSE, @UX_STATE), Логи, @RELATION.
- @TEST_DATA: Рекомендуется для Complex Forms.
3. **TRIVIAL** (DTO/**Atoms**):
- Требование: Только Якоря [DEF] и @PURPOSE.
#### VI. ЛОГИРОВАНИЕ (BELIEF STATE & TASK LOGS)
Цель: Трассировка для самокоррекции и пользовательский мониторинг.
Python:
- Системные логи: Context Manager `with belief_scope("ID"):`.
- Логи задач: `context.logger.info("msg", source="component")`.
Svelte: `console.log("[ID][STATE] Msg")`.
Состояния: Entry -> Action -> Coherence:OK / Failed -> Exit.
Инвариант: Каждый лог задачи должен иметь атрибут `source` для фильтрации.
#### VII. АЛГОРИТМ ГЕНЕРАЦИИ
1. АНАЛИЗ. Оцени TIER, слой и UX-требования.
2. КАРКАС. Создай `[DEF]`, Header и Контракты.
3. РЕАЛИЗАЦИЯ. Напиши логику, удовлетворяющую Контракту (и UX-состояниям).
4. ЗАМЫКАНИЕ. Закрой все `[/DEF]`.
ЕСЛИ ошибка или противоречие -> СТОП. Выведи `[COHERENCE_CHECK_FAILED]`.

View File

@@ -0,0 +1,75 @@
# [DEF:Std:UI_Svelte:Standard]
# @TIER: CRITICAL
# @PURPOSE: Unification of all Svelte components following GRACE-Poly (UX Edition).
# @LAYER: UI
# @INVARIANT: Every component MUST have `<!-- [DEF:] -->` anchors and UX tags.
# @INVARIANT: Use Tailwind CSS for all styling (no custom CSS without justification).
## 1. UX PHILOSOPHY: RESOURCE-CENTRIC & SVELTE 5
* **Version:** Project uses Svelte 5.
* **Runes:** Use Svelte 5 Runes for reactivity: `$state()`, `$derived()`, `$effect()`, `$props()`. Traditional `let` (for reactivity) and `export let` (for props) are DEPRECATED in favor of runes.
* **Definition:** Navigation and actions revolve around Resources.
* **Traceability:** Every action must be linked to a Task ID with visible logs in the Task Drawer.
## 2. COMPONENT ARCHITECTURE: GLOBAL TASK DRAWER
* **Role:** A single, persistent slide-out panel (`GlobalTaskDrawer.svelte`) in `+layout.svelte`.
* **Triggering:** Opens automatically when a task starts or when a user clicks a status badge.
* **Interaction:** Interactive elements (Password prompts, Mapping tables) MUST be rendered INSIDE the Drawer, not as center-screen modals.
## 3. COMPONENT STRUCTURE & CORE RULES
* **Styling:** Tailwind CSS utility classes are MANDATORY. Minimize scoped `<style>`.
* **Localization:** All user-facing text must use `$t` from `src/lib/i18n`.
* **API Calls:** Use `requestApi` / `fetchApi` wrappers. Native `fetch` is FORBIDDEN.
* **Anchors:** Every component MUST have `<!-- [DEF:] -->` anchors and UX tags.
## 2. COMPONENT TEMPLATE
Each Svelte file must follow this structure:
```html
<!-- [DEF:ComponentName:Component] -->
<script>
/**
* @TIER: [CRITICAL | STANDARD | TRIVIAL]
* @PURPOSE: Brief description of the component purpose.
* @LAYER: UI
* @SEMANTICS: list, of, keywords
* @RELATION: DEPENDS_ON -> [OtherComponent|Store]
*
* @UX_STATE: [StateName] -> Visual behavior description.
* @UX_FEEDBACK: System reaction (e.g., Toast, Shake).
* @UX_RECOVERY: Error recovery mechanism.
* @UX_TEST: [state] -> {action, expected}
*/
import { ... } from "...";
// Exports (Props)
export let prop_name = "...";
// Logic
</script>
<!-- HTML Template -->
<div class="...">
...
</div>
<style>
/* Optional: Local styles using @apply only */
</style>
<!-- [/DEF:ComponentName:Component] -->
```
## 2. STATE MANAGEMENT & STORES
* **Subscription:** Use `$` prefix for reactive store access (e.g., `$sidebarStore`).
* **Data Flow:** Mark store interactions in `[DEF:]` metadata:
* `# @RELATION: BINDS_TO -> store_id`
## 3. UI/UX BEST PRACTICES
* **Transitions:** Use Svelte built-in transitions for UI state changes.
* **Feedback:** Always provide visual feedback for async actions (Loading spinners, skeleton loaders).
* **Modularity:** Break down components into "Atoms" (Trivial) and "Orchestrators" (Critical).
## 4. ACCESSIBILITY (A11Y)
* Ensure proper ARIA roles and keyboard navigation for interactive elements.
* Use semantic HTML tags (`<nav>`, `<header>`, `<main>`, `<footer>`).
# [/DEF:Std:UI_Svelte]

27
.dockerignore Normal file
View File

@@ -0,0 +1,27 @@
.git
.gitignore
.pytest_cache
.ruff_cache
.vscode
.ai
.specify
.kilocode
venv
backend/.venv
backend/.pytest_cache
frontend/node_modules
frontend/.svelte-kit
frontend/.vite
frontend/build
backend/__pycache__
backend/src/__pycache__
backend/tests/__pycache__
**/__pycache__
*.pyc
*.pyo
*.pyd
*.db
*.log
backups
semantics
specs

3
.gitignore vendored
View File

@@ -10,8 +10,6 @@ dist/
downloads/ downloads/
eggs/ eggs/
.eggs/ .eggs/
lib/
lib64/
parts/ parts/
sdist/ sdist/
var/ var/
@@ -69,3 +67,4 @@ backend/tasks.db
backend/logs backend/logs
backend/auth.db backend/auth.db
semantics/reports semantics/reports
backend/tasks.db

View File

@@ -2,6 +2,12 @@
Auto-generated from all feature plans. Last updated: 2025-12-19 Auto-generated from all feature plans. Last updated: 2025-12-19
## Knowledge Graph (GRACE)
**CRITICAL**: This project uses a GRACE Knowledge Graph for context. Always load the root map first:
- **Root Map**: `.ai/ROOT.md` -> `[DEF:Project_Knowledge_Map:Root]`
- **Project Map**: `.ai/PROJECT_MAP.md` -> `[DEF:Project_Map]`
- **Standards**: Read `.ai/standards/` for architecture and style rules.
## Active Technologies ## Active Technologies
- Python 3.9+, Node.js 18+ + `uvicorn`, `npm`, `bash` (003-project-launch-script) - Python 3.9+, Node.js 18+ + `uvicorn`, `npm`, `bash` (003-project-launch-script)
- Python 3.9+, Node.js 18+ + SvelteKit, FastAPI, Tailwind CSS (inferred from existing frontend) (004-integrate-svelte-kit) - Python 3.9+, Node.js 18+ + SvelteKit, FastAPI, Tailwind CSS (inferred from existing frontend) (004-integrate-svelte-kit)
@@ -33,6 +39,8 @@ Auto-generated from all feature plans. Last updated: 2025-12-19
- N/A (UI reorganization and API integration) (015-frontend-nav-redesign) - N/A (UI reorganization and API integration) (015-frontend-nav-redesign)
- SQLite (`auth.db`) for Users, Roles, Permissions, and Mappings. (016-multi-user-auth) - SQLite (`auth.db`) for Users, Roles, Permissions, and Mappings. (016-multi-user-auth)
- SQLite (existing `tasks.db` for results, `auth.db` for permissions, `mappings.db` or new `plugins.db` for provider config/metadata) (017-llm-analysis-plugin) - SQLite (existing `tasks.db` for results, `auth.db` for permissions, `mappings.db` or new `plugins.db` for provider config/metadata) (017-llm-analysis-plugin)
- Python 3.9+ (Backend), Node.js 18+ (Frontend) + FastAPI, SvelteKit, Tailwind CSS, SQLAlchemy, WebSocket (existing) (019-superset-ux-redesign)
- SQLite (tasks.db, auth.db, migrations.db) - no new database tables required (019-superset-ux-redesign)
- Python 3.9+ (Backend), Node.js 18+ (Frontend Build) (001-plugin-arch-svelte-ui) - Python 3.9+ (Backend), Node.js 18+ (Frontend Build) (001-plugin-arch-svelte-ui)
@@ -53,9 +61,9 @@ cd src; pytest; ruff check .
Python 3.9+ (Backend), Node.js 18+ (Frontend Build): Follow standard conventions Python 3.9+ (Backend), Node.js 18+ (Frontend Build): Follow standard conventions
## Recent Changes ## Recent Changes
- 019-superset-ux-redesign: Added Python 3.9+ (Backend), Node.js 18+ (Frontend) + FastAPI, SvelteKit, Tailwind CSS, SQLAlchemy, WebSocket (existing)
- 017-llm-analysis-plugin: Added Python 3.9+ (Backend), Node.js 18+ (Frontend) - 017-llm-analysis-plugin: Added Python 3.9+ (Backend), Node.js 18+ (Frontend)
- 016-multi-user-auth: Added Python 3.9+ (Backend), Node.js 18+ (Frontend) - 016-multi-user-auth: Added Python 3.9+ (Backend), Node.js 18+ (Frontend)
- 015-frontend-nav-redesign: Added Python 3.9+ (Backend), Node.js 18+ (Frontend) + FastAPI (Backend), SvelteKit + Tailwind CSS (Frontend)
<!-- MANUAL ADDITIONS START --> <!-- MANUAL ADDITIONS START -->

View File

@@ -1,4 +1,4 @@
--- ---
description: USE SEMANTIC description: USE SEMANTIC
--- ---
Прочитай semantic_protocol.md. ОБЯЗАТЕЛЬНО используй его при разработке Прочитай .ai/standards/semantics.md. ОБЯЗАТЕЛЬНО используй его при разработке

View File

@@ -18,7 +18,7 @@ Identify inconsistencies, duplications, ambiguities, and underspecified items ac
**STRICTLY READ-ONLY**: Do **not** modify any files. Output a structured analysis report. Offer an optional remediation plan (user must explicitly approve before any follow-up editing commands would be invoked manually). **STRICTLY READ-ONLY**: Do **not** modify any files. Output a structured analysis report. Offer an optional remediation plan (user must explicitly approve before any follow-up editing commands would be invoked manually).
**Constitution Authority**: The project constitution (`.specify/memory/constitution.md`) is **non-negotiable** within this analysis scope. Constitution conflicts are automatically CRITICAL and require adjustment of the spec, plan, or tasks—not dilution, reinterpretation, or silent ignoring of the principle. If a principle itself needs to change, that must occur in a separate, explicit constitution update outside `/speckit.analyze`. **Constitution Authority**: The project constitution (`.ai/standards/constitution.md`) is **non-negotiable** within this analysis scope. Constitution conflicts are automatically CRITICAL and require adjustment of the spec, plan, or tasks—not dilution, reinterpretation, or silent ignoring of the principle. If a principle itself needs to change, that must occur in a separate, explicit constitution update outside `/speckit.analyze`.
## Execution Steps ## Execution Steps
@@ -62,8 +62,8 @@ Load only the minimal necessary context from each artifact:
**From constitution:** **From constitution:**
- Load `.specify/memory/constitution.md` for principle validation - Load `.ai/standards/constitution.md` for principle validation
- Load `semantic_protocol.md` for technical standard validation - Load `.ai/standards/semantics.md` for technical standard validation
### 3. Build Semantic Models ### 3. Build Semantic Models

View File

@@ -16,11 +16,11 @@ You **MUST** consider the user input before proceeding (if not empty).
## Outline ## Outline
You are updating the project constitution at `.specify/memory/constitution.md`. This file is a TEMPLATE containing placeholder tokens in square brackets (e.g. `[PROJECT_NAME]`, `[PRINCIPLE_1_NAME]`). Your job is to (a) collect/derive concrete values, (b) fill the template precisely, and (c) propagate any amendments across dependent artifacts. You are updating the project constitution at `.ai/standards/constitution.md`. This file is a TEMPLATE containing placeholder tokens in square brackets (e.g. `[PROJECT_NAME]`, `[PRINCIPLE_1_NAME]`). Your job is to (a) collect/derive concrete values, (b) fill the template precisely, and (c) propagate any amendments across dependent artifacts.
Follow this execution flow: Follow this execution flow:
1. Load the existing constitution template at `.specify/memory/constitution.md`. 1. Load the existing constitution template at `.ai/standards/constitution.md`.
- Identify every placeholder token of the form `[ALL_CAPS_IDENTIFIER]`. - Identify every placeholder token of the form `[ALL_CAPS_IDENTIFIER]`.
**IMPORTANT**: The user might require less or more principles than the ones used in the template. If a number is specified, respect that - follow the general template. You will update the doc accordingly. **IMPORTANT**: The user might require less or more principles than the ones used in the template. If a number is specified, respect that - follow the general template. You will update the doc accordingly.
@@ -61,7 +61,7 @@ Follow this execution flow:
- Dates ISO format YYYY-MM-DD. - Dates ISO format YYYY-MM-DD.
- Principles are declarative, testable, and free of vague language ("should" → replace with MUST/SHOULD rationale where appropriate). - Principles are declarative, testable, and free of vague language ("should" → replace with MUST/SHOULD rationale where appropriate).
7. Write the completed constitution back to `.specify/memory/constitution.md` (overwrite). 7. Write the completed constitution back to `.ai/standards/constitution.md` (overwrite).
8. Output a final summary to the user with: 8. Output a final summary to the user with:
- New version and bump rationale. - New version and bump rationale.
@@ -79,4 +79,4 @@ If the user supplies partial updates (e.g., only one principle revision), still
If critical info missing (e.g., ratification date truly unknown), insert `TODO(<FIELD_NAME>): explanation` and include in the Sync Impact Report under deferred items. If critical info missing (e.g., ratification date truly unknown), insert `TODO(<FIELD_NAME>): explanation` and include in the Sync Impact Report under deferred items.
Do not create a new template; always operate on the existing `.specify/memory/constitution.md` file. Do not create a new template; always operate on the existing `.ai/standards/constitution.md` file.

View File

@@ -0,0 +1,199 @@
---
description: Fix failing tests and implementation issues based on test reports
---
## User Input
```text
$ARGUMENTS
```
You **MUST** consider the user input before proceeding (if not empty).
## Goal
Analyze test failure reports, identify root causes, and fix implementation issues while preserving semantic protocol compliance.
## Operating Constraints
1. **USE CODER MODE**: Always switch to `coder` mode for code fixes
2. **SEMANTIC PROTOCOL**: Never remove semantic annotations ([DEF], @TAGS). Only update code logic.
3. **TEST DATA**: If tests use @TEST_DATA fixtures, preserve them when fixing
4. **NO DELETION**: Never delete existing tests or semantic annotations
5. **REPORT FIRST**: Always write a fix report before making changes
## Execution Steps
### 1. Load Test Report
**Required**: Test report file path (e.g., `specs/<feature>/tests/reports/2026-02-19-report.md`)
**Parse the report for**:
- Failed test cases
- Error messages
- Stack traces
- Expected vs actual behavior
- Affected modules/files
### 2. Analyze Root Causes
For each failed test:
1. **Read the test file** to understand what it's testing
2. **Read the implementation file** to find the bug
3. **Check semantic protocol compliance**:
- Does the implementation have correct [DEF] anchors?
- Are @TAGS (@PRE, @POST, @UX_STATE, etc.) present?
- Does the code match the TIER requirements?
4. **Identify the fix**:
- Logic error in implementation
- Missing error handling
- Incorrect API usage
- State management issue
### 3. Write Fix Report
Create a structured fix report:
```markdown
# Fix Report: [FEATURE]
**Date**: [YYYY-MM-DD]
**Report**: [Test Report Path]
**Fixer**: Coder Agent
## Summary
- Total Failed Tests: [X]
- Total Fixed: [X]
- Total Skipped: [X]
## Failed Tests Analysis
### Test: [Test Name]
**File**: `path/to/test.py`
**Error**: [Error message]
**Root Cause**: [Explanation of why test failed]
**Fix Required**: [Description of fix]
**Status**: [Pending/In Progress/Completed]
## Fixes Applied
### Fix 1: [Description]
**Affected File**: `path/to/file.py`
**Test Affected**: `[Test Name]`
**Changes**:
```diff
<<<<<<< SEARCH
[Original Code]
=======
[Fixed Code]
>>>>>>> REPLACE
```
**Verification**: [How to verify fix works]
**Semantic Integrity**: [Confirmed annotations preserved]
## Next Steps
- [ ] Run tests to verify fix: `cd backend && .venv/bin/python3 -m pytest`
- [ ] Check for related failing tests
- [ ] Update test documentation if needed
```
### 4. Apply Fixes (in Coder Mode)
Switch to `coder` mode and apply fixes:
1. **Read the implementation file** to get exact content
2. **Apply the fix** using apply_diff
3. **Preserve all semantic annotations**:
- Keep [DEF:...] and [/DEF:...] anchors
- Keep all @TAGS (@PURPOSE, @LAYER, @TIER, @RELATION, @PRE, @POST, @UX_STATE, @UX_FEEDBACK, @UX_RECOVERY)
4. **Only update code logic** to fix the bug
5. **Run tests** to verify the fix
### 5. Verification
After applying fixes:
1. **Run tests**:
```bash
cd backend && .venv/bin/python3 -m pytest -v
```
or
```bash
cd frontend && npm run test
```
2. **Check test results**:
- Failed tests should now pass
- No new tests should fail
- Coverage should not decrease
3. **Update fix report** with results:
- Mark fixes as completed
- Add verification steps
- Note any remaining issues
## Output
Generate final fix report:
```markdown
# Fix Report: [FEATURE] - COMPLETED
**Date**: [YYYY-MM-DD]
**Report**: [Test Report Path]
**Fixer**: Coder Agent
## Summary
- Total Failed Tests: [X]
- Total Fixed: [X] ✅
- Total Skipped: [X]
## Fixes Applied
### Fix 1: [Description] ✅
**Affected File**: `path/to/file.py`
**Test Affected**: `[Test Name]`
**Changes**: [Summary of changes]
**Verification**: All tests pass ✅
**Semantic Integrity**: Preserved ✅
## Test Results
```
[Full test output showing all passing tests]
```
## Recommendations
- [ ] Monitor for similar issues
- [ ] Update documentation if needed
- [ ] Consider adding more tests for edge cases
## Related Files
- Test Report: [path]
- Implementation: [path]
- Test File: [path]
```
## Context for Fixing
$ARGUMENTS

View File

@@ -51,7 +51,7 @@ You **MUST** consider the user input before proceeding (if not empty).
- Automatically proceed to step 3 - Automatically proceed to step 3
3. Load and analyze the implementation context: 3. Load and analyze the implementation context:
- **REQUIRED**: Read `semantic_protocol.md` for strict coding standards and contract requirements - **REQUIRED**: Read `.ai/standards/semantics.md` for strict coding standards and contract requirements
- **REQUIRED**: Read tasks.md for the complete task list and execution plan - **REQUIRED**: Read tasks.md for the complete task list and execution plan
- **REQUIRED**: Read plan.md for tech stack, architecture, and file structure - **REQUIRED**: Read plan.md for tech stack, architecture, and file structure
- **IF EXISTS**: Read data-model.md for entities and relationships - **IF EXISTS**: Read data-model.md for entities and relationships
@@ -117,7 +117,8 @@ You **MUST** consider the user input before proceeding (if not empty).
- **Validation checkpoints**: Verify each phase completion before proceeding - **Validation checkpoints**: Verify each phase completion before proceeding
7. Implementation execution rules: 7. Implementation execution rules:
- **Strict Adherence**: Apply `semantic_protocol.md` rules - every file must start with [DEF] header, include @TIER, and define contracts - **Strict Adherence**: Apply `.ai/standards/semantics.md` rules - every file must start with [DEF] header, include @TIER, and define contracts.
- **CRITICAL Contracts**: If a task description contains a contract summary (e.g., `CRITICAL: PRE: ..., POST: ...`), these constraints are **MANDATORY** and must be strictly implemented in the code using guards/assertions (if applicable per protocol).
- **Setup first**: Initialize project structure, dependencies, configuration - **Setup first**: Initialize project structure, dependencies, configuration
- **Tests before code**: If you need to write tests for contracts, entities, and integration scenarios - **Tests before code**: If you need to write tests for contracts, entities, and integration scenarios
- **Core development**: Implement models, services, CLI commands, endpoints - **Core development**: Implement models, services, CLI commands, endpoints

View File

@@ -22,7 +22,7 @@ You **MUST** consider the user input before proceeding (if not empty).
1. **Setup**: Run `.specify/scripts/bash/setup-plan.sh --json` from repo root and parse JSON for FEATURE_SPEC, IMPL_PLAN, SPECS_DIR, BRANCH. For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot"). 1. **Setup**: Run `.specify/scripts/bash/setup-plan.sh --json` from repo root and parse JSON for FEATURE_SPEC, IMPL_PLAN, SPECS_DIR, BRANCH. For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot").
2. **Load context**: Read FEATURE_SPEC, `ux_reference.md`, `semantic_protocol.md` and `.specify/memory/constitution.md`. Load IMPL_PLAN template (already copied). 2. **Load context**: Read `.ai/ROOT.md` and `.ai/PROJECT_MAP.md` to understand the project structure and navigation. Then read required standards: `.ai/standards/constitution.md` and `.ai/standards/semantics.md`. Load IMPL_PLAN template.
3. **Execute plan workflow**: Follow the structure in IMPL_PLAN template to: 3. **Execute plan workflow**: Follow the structure in IMPL_PLAN template to:
- Fill Technical Context (mark unknowns as "NEEDS CLARIFICATION") - Fill Technical Context (mark unknowns as "NEEDS CLARIFICATION")
@@ -66,25 +66,30 @@ You **MUST** consider the user input before proceeding (if not empty).
0. **Validate Design against UX Reference**: 0. **Validate Design against UX Reference**:
- Check if the proposed architecture supports the latency, interactivity, and flow defined in `ux_reference.md`. - Check if the proposed architecture supports the latency, interactivity, and flow defined in `ux_reference.md`.
- **CRITICAL**: If the technical plan requires compromising the UX defined in `ux_reference.md` (e.g. "We can't do real-time validation because X"), you **MUST STOP** and warn the user. Do not proceed until resolved. - **Linkage**: Ensure key UI states from `ux_reference.md` map to Component Contracts (`@UX_STATE`).
- **CRITICAL**: If the technical plan compromises the UX (e.g. "We can't do real-time validation"), you **MUST STOP** and warn the user.
1. **Extract entities from feature spec** → `data-model.md`: 1. **Extract entities from feature spec** → `data-model.md`:
- Entity name, fields, relationships - Entity name, fields, relationships, validation rules.
- Validation rules from requirements
- State transitions if applicable
2. **Define Module & Function Contracts (Semantic Protocol)**: 2. **Design & Verify Contracts (Semantic Protocol)**:
- **MANDATORY**: For every new module, define the [DEF] Header and Module-level Contract (@TIER, @PURPOSE, @INVARIANT) as per `semantic_protocol.md`. - **Drafting**: Define [DEF] Headers and Contracts for all new modules based on `.ai/standards/semantics.md`.
- **REQUIRED**: Define Function Contracts (@PRE, @POST) for critical logic. - **TIER Classification**: Explicitly assign `@TIER: [CRITICAL|STANDARD|TRIVIAL]` to each module.
- Output specific contract definitions to `contracts/modules.md` or append to `data-model.md` to guide implementation. - **CRITICAL Requirements**: For all CRITICAL modules, define full `@PRE`, `@POST`, and (if UI) `@UX_STATE` contracts.
- Ensure strict adherence to `semantic_protocol.md` syntax. - **Self-Review**:
- *Completeness*: Do `@PRE`/`@POST` cover edge cases identified in Research?
- *Connectivity*: Do `@RELATION` tags form a coherent graph?
- *Compliance*: Does syntax match `[DEF:id:Type]` exactly?
- **Output**: Write verified contracts to `contracts/modules.md`.
3. **Generate API contracts** from functional requirements: 3. **Simulate Contract Usage**:
- For each user action → endpoint - Trace one key user scenario through the defined contracts to ensure data flow continuity.
- Use standard REST/GraphQL patterns - If a contract interface mismatch is found, fix it immediately.
- Output OpenAPI/GraphQL schema to `/contracts/`
3. **Agent context update**: 4. **Generate API contracts**:
- Output OpenAPI/GraphQL schema to `/contracts/` for backend-frontend sync.
5. **Agent context update**:
- Run `.specify/scripts/bash/update-agent-context.sh kilocode` - Run `.specify/scripts/bash/update-agent-context.sh kilocode`
- These scripts detect which AI agent is in use - These scripts detect which AI agent is in use
- Update the appropriate agent-specific context file - Update the appropriate agent-specific context file

View File

@@ -119,7 +119,10 @@ Every task MUST strictly follow this format:
- If tests requested: Tests specific to that story - If tests requested: Tests specific to that story
- Mark story dependencies (most stories should be independent) - Mark story dependencies (most stories should be independent)
2. **From Contracts**: 2. **From Contracts (CRITICAL TIER)**:
- Identify components marked as `@TIER: CRITICAL` in `contracts/modules.md`.
- For these components, **MUST** append the summary of `@PRE`, `@POST`, and `@UX_STATE` contracts directly to the task description.
- Example: `- [ ] T005 [P] [US1] Implement Auth (CRITICAL: PRE: token exists, POST: returns User) in src/auth.py`
- Map each contract/endpoint → to the user story it serves - Map each contract/endpoint → to the user story it serves
- If tests requested: Each contract → contract test task [P] before implementation in that story's phase - If tests requested: Each contract → contract test task [P] before implementation in that story's phase

View File

@@ -1,10 +1,7 @@
--- ---
description: Run semantic validation and functional tests for a specific feature, module, or file.
handoffs: description: Generate tests, manage test documentation, and ensure maximum code coverage
- label: Fix Implementation
agent: speckit.implement
prompt: Fix the issues found during testing...
send: true
--- ---
## User Input ## User Input
@@ -13,54 +10,169 @@ handoffs:
$ARGUMENTS $ARGUMENTS
``` ```
**Input format:** Can be a file path, a directory, or a feature name. You **MUST** consider the user input before proceeding (if not empty).
## Outline ## Goal
1. **Context Analysis**: Execute full testing cycle: analyze code for testable modules, write tests with proper coverage, maintain test documentation, and ensure no test duplication or deletion.
- Determine the target scope (Backend vs Frontend vs Full Feature).
- Read `semantic_protocol.md` to load validation rules.
2. **Phase 1: Semantic Static Analysis (The "Compiler" Check)** ## Operating Constraints
- **Command:** Use `grep` or script to verify Protocol compliance before running code.
- **Check:**
- Does the file start with `[DEF:...]` header?
- Are `@TIER` and `@PURPOSE` defined?
- Are imports located *after* the contracts?
- Do functions marked "Critical" have `@PRE`/`@POST` tags?
- **Action:** If this phase fails, **STOP** and report "Semantic Compilation Failed". Do not run runtime tests.
3. **Phase 2: Environment Prep** 1. **NEVER delete existing tests** - Only update if they fail due to bugs in the test or implementation
- Detect project type: 2. **NEVER duplicate tests** - Check existing tests first before creating new ones
- **Python**: Check if `.venv` is active. 3. **Use TEST_DATA fixtures** - For CRITICAL tier modules, read @TEST_DATA from .ai/standards/semantics.md
- **Svelte**: Check if `node_modules` exists. 4. **Co-location required** - Write tests in `__tests__` directories relative to the code being tested
- **Command:** Run linter (e.g., `ruff check`, `eslint`) to catch syntax errors immediately.
4. **Phase 3: Test Execution (Runtime)** ## Execution Steps
- Select the test runner based on the file path:
- **Backend (`*.py`)**:
- Command: `pytest <path_to_test_file> -v`
- If no specific test file exists, try to find it by convention: `tests/test_<module_name>.py`.
- **Frontend (`*.svelte`, `*.ts`)**:
- Command: `npm run test -- <path_to_component>`
- **Verification**:
- Analyze output logs.
- If tests fail, summarize the failure (AssertionError, Timeout, etc.).
5. **Phase 4: Contract Coverage Check (Manual/LLM verify)** ### 1. Analyze Context
- Review the test cases executed.
- **Question**: Do the tests explicitly verify the `@POST` guarantees defined in the module header?
- **Report**: Mark as "Weak Coverage" if contracts exist but aren't tested.
## Execution Rules Run `.specify/scripts/bash/check-prerequisites.sh --json --require-tasks --include-tasks` from repo root and parse FEATURE_DIR and AVAILABLE_DOCS.
- **Fail Fast**: If semantic headers are missing, don't waste time running pytest. Determine:
- **No Silent Failures**: Always output the full error log if a command fails. - FEATURE_DIR - where the feature is located
- **Auto-Correction Hint**: If a test fails, suggest the specific `speckit.implement` command to fix it. - TASKS_FILE - path to tasks.md
- Which modules need testing based on task status
## Example Commands ### 2. Load Relevant Artifacts
- **Python**: `pytest backend/tests/test_auth.py` **From tasks.md:**
- **Svelte**: `npm run test:unit -- src/components/Button.svelte` - Identify completed implementation tasks (not test tasks)
- **Lint**: `ruff check backend/src/api/` - Extract file paths that need tests
**From .ai/standards/semantics.md:**
- Read @TIER annotations for modules
- For CRITICAL modules: Read @TEST_DATA fixtures
**From existing tests:**
- Scan `__tests__` directories for existing tests
- Identify test patterns and coverage gaps
### 3. Test Coverage Analysis
Create coverage matrix:
| Module | File | Has Tests | TIER | TEST_DATA Available |
|--------|------|-----------|------|-------------------|
| ... | ... | ... | ... | ... |
### 4. Write Tests (TDD Approach)
For each module requiring tests:
1. **Check existing tests**: Scan `__tests__/` for duplicates
2. **Read TEST_DATA**: If CRITICAL tier, read @TEST_DATA from .ai/standards/semantics.md
3. **Write test**: Follow co-location strategy
- Python: `src/module/__tests__/test_module.py`
- Svelte: `src/lib/components/__tests__/test_component.test.js`
4. **Use mocks**: Use `unittest.mock.MagicMock` for external dependencies
### 4a. UX Contract Testing (Frontend Components)
For Svelte components with `@UX_STATE`, `@UX_FEEDBACK`, `@UX_RECOVERY` tags:
1. **Parse UX tags**: Read component file and extract all `@UX_*` annotations
2. **Generate UX tests**: Create tests for each UX state transition
```javascript
// Example: Testing @UX_STATE: Idle -> Expanded
it('should transition from Idle to Expanded on toggle click', async () => {
render(Sidebar);
const toggleBtn = screen.getByRole('button', { name: /toggle/i });
await fireEvent.click(toggleBtn);
expect(screen.getByTestId('sidebar')).toHaveClass('expanded');
});
```
3. **Test @UX_FEEDBACK**: Verify visual feedback (toast, shake, color changes)
4. **Test @UX_RECOVERY**: Verify error recovery mechanisms (retry, clear input)
5. **Use @UX_TEST fixtures**: If component has `@UX_TEST` tags, use them as test specifications
**UX Test Template:**
```javascript
// [DEF:__tests__/test_Component:Module]
// @RELATION: VERIFIES -> ../Component.svelte
// @PURPOSE: Test UX states and transitions
describe('Component UX States', () => {
// @UX_STATE: Idle -> {action: click, expected: Active}
it('should transition Idle -> Active on click', async () => { ... });
// @UX_FEEDBACK: Toast on success
it('should show toast on successful action', async () => { ... });
// @UX_RECOVERY: Retry on error
it('should allow retry on error', async () => { ... });
});
```
### 5. Test Documentation
Create/update documentation in `specs/<feature>/tests/`:
```
tests/
├── README.md # Test strategy and overview
├── coverage.md # Coverage matrix and reports
└── reports/
└── YYYY-MM-DD-report.md
```
### 6. Execute Tests
Run tests and report results:
**Backend:**
```bash
cd backend && .venv/bin/python3 -m pytest -v
```
**Frontend:**
```bash
cd frontend && npm run test
```
### 7. Update Tasks
Mark test tasks as completed in tasks.md with:
- Test file path
- Coverage achieved
- Any issues found
## Output
Generate test execution report:
```markdown
# Test Report: [FEATURE]
**Date**: [YYYY-MM-DD]
**Executed by**: Tester Agent
## Coverage Summary
| Module | Tests | Coverage % |
|--------|-------|------------|
| ... | ... | ... |
## Test Results
- Total: [X]
- Passed: [X]
- Failed: [X]
- Skipped: [X]
## Issues Found
| Test | Error | Resolution |
|------|-------|------------|
| ... | ... | ... |
## Next Steps
- [ ] Fix failed tests
- [ ] Add more coverage for [module]
- [ ] Review TEST_DATA fixtures
```
## Context for Testing
$ARGUMENTS

View File

@@ -1,25 +1,39 @@
customModes: customModes:
- slug: tester - slug: tester
name: Tester name: Tester
description: QA and Plan Verification Specialist description: QA and Test Engineer - Full Testing Cycle
roleDefinition: |- roleDefinition: |-
You are Kilo Code, acting as a QA and Verification Specialist. Your primary goal is to validate that the project implementation aligns strictly with the defined specifications and task plans. You are Kilo Code, acting as a QA and Test Engineer. Your primary goal is to ensure maximum test coverage, maintain test quality, and preserve existing tests.
Your responsibilities include: - Reading and analyzing task plans and specifications (typically in the `specs/` directory). - Verifying that implemented code matches the requirements. - Executing tests and validating system behavior via CLI or Browser. - Updating the status of tasks in the plan files (e.g., marking checkboxes [x]) as they are verified. - Identifying and reporting missing features or bugs. Your responsibilities include:
whenToUse: Use this mode when you need to audit the progress of a project, verify completed tasks against the plan, run quality assurance checks, or update the status of task lists in specification documents. - WRITING TESTS: Create comprehensive unit tests following TDD principles, using co-location strategy (`__tests__` directories).
- TEST DATA: For CRITICAL tier modules, you MUST use @TEST_DATA fixtures defined in .ai/standards/semantics.md. Read and apply them in your tests.
- DOCUMENTATION: Maintain test documentation in `specs/<feature>/tests/` directory with coverage reports and test case specifications.
- VERIFICATION: Run tests, analyze results, and ensure all tests pass.
- PROTECTION: NEVER delete existing tests. NEVER duplicate tests - check for existing tests first.
whenToUse: Use this mode when you need to write tests, run test coverage analysis, or perform quality assurance with full testing cycle.
groups: groups:
- read - read
- edit - edit
- command - command
- browser - browser
- mcp - mcp
customInstructions: 1. Always begin by loading the relevant plan or task list from the `specs/` directory. 2. Do not assume a task is done just because it is checked; verify the code or functionality first if asked to audit. 3. When updating task lists, ensure you only mark items as complete if you have verified them. customInstructions: |
1. KNOWLEDGE GRAPH: ALWAYS read .ai/ROOT.md first to understand the project structure and navigation.
2. CO-LOCATION: Write tests in `__tests__` subdirectories relative to the code being tested (Fractal Strategy).
2. TEST DATA MANDATORY: For CRITICAL modules, read @TEST_DATA from .ai/standards/semantics.md and use fixtures in tests.
3. UX CONTRACT TESTING: For Svelte components with @UX_STATE, @UX_FEEDBACK, @UX_RECOVERY tags, create comprehensive UX tests.
4. NO DELETION: Never delete existing tests - only update if they fail due to legitimate bugs.
5. NO DUPLICATION: Check existing tests in `__tests__/` before creating new ones. Reuse existing test patterns.
6. DOCUMENTATION: Create test reports in `specs/<feature>/tests/reports/YYYY-MM-DD-report.md`.
7. COVERAGE: Aim for maximum coverage but prioritize CRITICAL and STANDARD tier modules.
8. RUN TESTS: Execute tests using `cd backend && .venv/bin/python3 -m pytest` or `cd frontend && npm run test`.
- slug: semantic - slug: semantic
name: Semantic Agent name: Semantic Agent
roleDefinition: |- roleDefinition: |-
You are Kilo Code, a Semantic Agent responsible for maintaining the semantic integrity of the codebase. Your primary goal is to ensure that all code entities (Modules, Classes, Functions, Components) are properly annotated with semantic anchors and tags as defined in `semantic_protocol.md`. You are Kilo Code, a Semantic Agent responsible for maintaining the semantic integrity of the codebase. Your primary goal is to ensure that all code entities (Modules, Classes, Functions, Components) are properly annotated with semantic anchors and tags as defined in `.ai/standards/semantics.md`.
Your core responsibilities are: 1. **Semantic Mapping**: You run and maintain the `generate_semantic_map.py` script to generate up-to-date semantic maps (`semantics/semantic_map.json`, `specs/project_map.md`) and compliance reports (`semantics/reports/*.md`). 2. **Compliance Auditing**: You analyze the generated compliance reports to identify files with low semantic coverage or parsing errors. 3. **Semantic Enrichment**: You actively edit code files to add missing semantic anchors (`[DEF:...]`, `[/DEF:...]`) and mandatory tags (`@PURPOSE`, `@LAYER`, etc.) to improve the global compliance score. 4. **Protocol Enforcement**: You strictly adhere to the syntax and rules defined in `semantic_protocol.md` when modifying code. Your core responsibilities are: 1. **Semantic Mapping**: You run and maintain the `generate_semantic_map.py` script to generate up-to-date semantic maps (`semantics/semantic_map.json`, `.ai/PROJECT_MAP.md`) and compliance reports (`semantics/reports/*.md`). 2. **Compliance Auditing**: You analyze the generated compliance reports to identify files with low semantic coverage or parsing errors. 3. **Semantic Enrichment**: You actively edit code files to add missing semantic anchors (`[DEF:...]`, `[/DEF:...]`) and mandatory tags (`@PURPOSE`, `@LAYER`, etc.) to improve the global compliance score. 4. **Protocol Enforcement**: You strictly adhere to the syntax and rules defined in `.ai/standards/semantics.md` when modifying code.
You have access to the full codebase and tools to read, write, and execute scripts. You should prioritize fixing "Critical Parsing Errors" (unclosed anchors) before addressing missing metadata. You have access to the full codebase and tools to read, write, and execute scripts. You should prioritize fixing "Critical Parsing Errors" (unclosed anchors) before addressing missing metadata.
whenToUse: Use this mode when you need to update the project's semantic map, fix semantic compliance issues (missing anchors/tags/DbC ), or analyze the codebase structure. This mode is specialized for maintaining the `semantic_protocol.md` standards. whenToUse: Use this mode when you need to update the project's semantic map, fix semantic compliance issues (missing anchors/tags/DbC ), or analyze the codebase structure. This mode is specialized for maintaining the `.ai/standards/semantics.md` standards.
description: Codebase semantic mapping and compliance expert description: Codebase semantic mapping and compliance expert
customInstructions: Always check `semantics/reports/` for the latest compliance status before starting work. When fixing a file, try to fix all semantic issues in that file at once. After making a batch of fixes, run `python3 generate_semantic_map.py` to verify improvements. customInstructions: Always check `semantics/reports/` for the latest compliance status before starting work. When fixing a file, try to fix all semantic issues in that file at once. After making a batch of fixes, run `python3 generate_semantic_map.py` to verify improvements.
groups: groups:
@@ -33,11 +47,36 @@ customModes:
name: Product Manager name: Product Manager
roleDefinition: |- roleDefinition: |-
Your purpose is to rigorously execute the workflows defined in `.kilocode/workflows/`. Your purpose is to rigorously execute the workflows defined in `.kilocode/workflows/`.
You act as the orchestrator for: - Specification (`speckit.specify`, `speckit.clarify`) - Planning (`speckit.plan`) - Task Management (`speckit.tasks`, `speckit.taskstoissues`) - Quality Assurance (`speckit.analyze`, `speckit.checklist`) - Governance (`speckit.constitution`) - Implementation Oversight (`speckit.implement`) You act as the orchestrator for: - Specification (`speckit.specify`, `speckit.clarify`) - Planning (`speckit.plan`) - Task Management (`speckit.tasks`, `speckit.taskstoissues`) - Quality Assurance (`speckit.analyze`, `speckit.checklist`, `speckit.test`, `speckit.fix`) - Governance (`speckit.constitution`) - Implementation Oversight (`speckit.implement`)
For each task, you must read the relevant workflow file from `.kilocode/workflows/` and follow its Execution Steps precisely. For each task, you must read the relevant workflow file from `.kilocode/workflows/` and follow its Execution Steps precisely.
whenToUse: Use this mode when you need to run any /speckit.* command or when dealing with high-level feature planning, specification writing, or project management tasks. whenToUse: Use this mode when you need to run any /speckit.* command or when dealing with high-level feature planning, specification writing, or project management tasks.
description: Executes SpecKit workflows for feature management description: Executes SpecKit workflows for feature management
customInstructions: 1. Always read the specific workflow file in `.kilocode/workflows/` before executing a command. 2. Adhere strictly to the "Operating Constraints" and "Execution Steps" in the workflow files. customInstructions: 1. Always read `.ai/ROOT.md` first to understand the Knowledge Graph structure. 2. Read the specific workflow file in `.kilocode/workflows/` before executing a command. 3. Adhere strictly to the "Operating Constraints" and "Execution Steps" in the workflow files.
groups:
- read
- edit
- command
- mcp
source: project
- slug: coder
name: Coder
roleDefinition: You are Kilo Code, acting as an Implementation Specialist. Your primary goal is to write code that strictly follows the Semantic Protocol defined in `.ai/standards/semantics.md`.
whenToUse: Use this mode when you need to implement features, write code, or fix issues based on test reports.
description: Implementation Specialist - Semantic Protocol Compliant
customInstructions: |
1. KNOWLEDGE GRAPH: ALWAYS read .ai/ROOT.md first to understand the project structure and navigation.
2. CONSTITUTION: Strictly follow architectural invariants in .ai/standards/constitution.md.
3. SEMANTIC PROTOCOL: ALWAYS use .ai/standards/semantics.md as your source of truth for syntax.
4. ANCHOR FORMAT: Use #[DEF:filename:Type] at start and #[/DEF:filename] at end.
3. TAGS: Add @PURPOSE, @LAYER, @TIER, @RELATION, @PRE, @POST, @UX_STATE, @UX_FEEDBACK, @UX_RECOVERY.
4. TIER COMPLIANCE:
- CRITICAL: Full contract + all UX tags + strict logging
- STANDARD: Basic contract + UX tags where applicable
- TRIVIAL: Only anchors + @PURPOSE
5. CODE SIZE: Keep modules under 300 lines. Refactor if exceeding.
6. ERROR HANDLING: Use if/raise or guards, never assert.
7. TEST FIXES: When fixing failing tests, preserve semantic annotations. Only update code logic.
8. RUN TESTS: After fixes, run tests to verify: `cd backend && .venv/bin/python3 -m pytest` or `cd frontend && npm run test`.
groups: groups:
- read - read
- edit - edit

View File

@@ -1,55 +0,0 @@
<!--
SYNC IMPACT REPORT
Version: 2.2.0 (ConfigManager Discipline)
Changes:
- Updated Principle II: Added mandatory requirement for using `ConfigManager` (via dependency injection) for all configuration access to ensure consistent environment handling and avoid hardcoded values.
- Updated Principle III: Refined `requestApi` requirement.
Templates Status:
- .specify/templates/plan-template.md: ✅ Aligned.
- .specify/templates/spec-template.md: ✅ Aligned.
- .specify/templates/tasks-template.md: ✅ Aligned.
-->
# Semantic Code Generation Constitution
## Core Principles
### I. Semantic Protocol Compliance
The file `semantic_protocol.md` is the **sole and authoritative technical standard** for this project.
- **Law**: All code must adhere to the Axioms (Meaning First, Contract First, etc.) defined in the Protocol.
- **Syntax & Structure**: Anchors (`[DEF]`), Tags (`@KEY`), and File Structures must strictly match the Protocol.
- **Compliance**: Any deviation from `semantic_protocol.md` constitutes a build failure.
### II. Everything is a Plugin & Centralized Config
All functional extensions, tools, or major features must be implemented as modular Plugins inheriting from `PluginBase`.
- **Modularity**: Logic should not reside in standalone services or scripts unless strictly necessary for core infrastructure. This ensures a unified execution model via the `TaskManager`.
- **Configuration Discipline**: All configuration access (environments, settings, paths) MUST use the `ConfigManager`. In the backend, the singleton instance MUST be obtained via dependency injection (`get_config_manager()`). Hardcoding environment IDs (e.g., "1") or paths is STRICTLY FORBIDDEN.
### III. Unified Frontend Experience
To ensure a consistent and accessible user experience, all frontend implementations must strictly adhere to the unified design and localization standards.
- **Component Reusability**: All UI elements MUST utilize the standardized Svelte component library (`src/lib/ui`) and centralized design tokens.
- **Internationalization (i18n)**: All user-facing text MUST be extracted to the translation system (`src/lib/i18n`).
- **Backend Communication**: All API requests MUST use the `requestApi` wrapper (or its derivatives like `fetchApi`, `postApi`) from `src/lib/api.js`. Direct use of the native `fetch` API for backend communication is FORBIDDEN to ensure consistent authentication (JWT) and error handling.
### IV. Security & Access Control
To support the Role-Based Access Control (RBAC) system, all functional components must define explicit permissions.
- **Granular Permissions**: Every Plugin MUST define a unique permission string (e.g., `plugin:name:execute`) required for its operation.
- **Registration**: These permissions MUST be registered in the system database (`auth.db`) during initialization.
### V. Independent Testability
Every feature specification MUST define "Independent Tests" that allow the feature to be verified in isolation.
- **Decoupling**: Features should be designed such that they can be tested without requiring the full application state or external dependencies where possible.
- **Verification**: A feature is not complete until its Independent Test scenarios pass.
### VI. Asynchronous Execution
All long-running or resource-intensive operations (migrations, analysis, backups, external API calls) MUST be executed as asynchronous tasks via the `TaskManager`.
- **Non-Blocking**: HTTP API endpoints MUST NOT block on these operations; they should spawn a task and return a Task ID.
- **Observability**: Tasks MUST emit real-time status updates via the WebSocket infrastructure.
## Governance
This Constitution establishes the "Semantic Code Generation Protocol" as the supreme law of this repository.
- **Authoritative Source**: `semantic_protocol.md` defines the specific implementation rules for Principle I.
- **Amendments**: Changes to core principles require a Constitution amendment. Changes to technical syntax require a Protocol update.
- **Compliance**: Failure to adhere to the Protocol constitutes a build failure.
**Version**: 2.2.0 | **Ratified**: 2025-12-19 | **Last Amended**: 2026-01-29

View File

@@ -2,6 +2,12 @@
Auto-generated from all feature plans. Last updated: [DATE] Auto-generated from all feature plans. Last updated: [DATE]
## Knowledge Graph (GRACE)
**CRITICAL**: This project uses a GRACE Knowledge Graph for context. Always load the root map first:
- **Root Map**: `.ai/ROOT.md` -> `[DEF:Project_Knowledge_Map:Root]`
- **Project Map**: `.ai/PROJECT_MAP.md` -> `[DEF:Project_Map]`
- **Standards**: Read `.ai/standards/` for architecture and style rules.
## Active Technologies ## Active Technologies
[EXTRACTED FROM ALL PLAN.MD FILES] [EXTRACTED FROM ALL PLAN.MD FILES]

View File

@@ -17,8 +17,8 @@
the iteration process. the iteration process.
--> -->
**Language/Version**: [e.g., Python 3.11, Swift 5.9, Rust 1.75 or NEEDS CLARIFICATION] **Language/Version**: [e.g., Python 3.11, Swift 5.9, Rust 1.75 or NEEDS CLARIFICATION]
**Primary Dependencies**: [e.g., FastAPI, UIKit, LLVM or NEEDS CLARIFICATION] **Primary Dependencies**: [e.g., FastAPI, Tailwind CSS, SvelteKit or NEEDS CLARIFICATION]
**Storage**: [if applicable, e.g., PostgreSQL, CoreData, files or N/A] **Storage**: [if applicable, e.g., PostgreSQL, CoreData, files or N/A]
**Testing**: [e.g., pytest, XCTest, cargo test or NEEDS CLARIFICATION] **Testing**: [e.g., pytest, XCTest, cargo test or NEEDS CLARIFICATION]
**Target Platform**: [e.g., Linux server, iOS 15+, WASM or NEEDS CLARIFICATION] **Target Platform**: [e.g., Linux server, iOS 15+, WASM or NEEDS CLARIFICATION]
@@ -102,3 +102,14 @@ directories captured above]
|-----------|------------|-------------------------------------| |-----------|------------|-------------------------------------|
| [e.g., 4th project] | [current need] | [why 3 projects insufficient] | | [e.g., 4th project] | [current need] | [why 3 projects insufficient] |
| [e.g., Repository pattern] | [specific problem] | [why direct DB access insufficient] | | [e.g., Repository pattern] | [specific problem] | [why direct DB access insufficient] |
## Test Data Reference
> **For CRITICAL tier components, reference test fixtures from spec.md**
| Component | TIER | Fixture Name | Location |
|-----------|------|--------------|----------|
| [e.g., DashboardAPI] | CRITICAL | valid_dashboard | spec.md#test-data-fixtures |
| [e.g., TaskDrawer] | CRITICAL | task_states | spec.md#test-data-fixtures |
**Note**: Tester Agent MUST use these fixtures when writing unit tests for CRITICAL modules. See `.ai/standards/semantics.md` for @TEST_DATA syntax.

View File

@@ -114,3 +114,52 @@
- **SC-002**: [Measurable metric, e.g., "System handles 1000 concurrent users without degradation"] - **SC-002**: [Measurable metric, e.g., "System handles 1000 concurrent users without degradation"]
- **SC-003**: [User satisfaction metric, e.g., "90% of users successfully complete primary task on first attempt"] - **SC-003**: [User satisfaction metric, e.g., "90% of users successfully complete primary task on first attempt"]
- **SC-004**: [Business metric, e.g., "Reduce support tickets related to [X] by 50%"] - **SC-004**: [Business metric, e.g., "Reduce support tickets related to [X] by 50%"]
---
## Test Data Fixtures *(recommended for CRITICAL components)*
<!--
Define reference/fixture data for testing CRITICAL tier components.
This data will be used by the Tester Agent when writing unit tests.
Format: JSON or YAML that matches the component's data structures.
-->
### Fixtures
```yaml
# Example fixture format
fixture_name:
description: "Description of this test data"
data:
# JSON or YAML data structure
```
### Example: Dashboard API
```yaml
valid_dashboard:
description: "Valid dashboard object for API responses"
data:
id: 1
title: "Sales Report"
slug: "sales"
git_status:
branch: "main"
sync_status: "OK"
last_task:
task_id: "task-123"
status: "SUCCESS"
empty_dashboards:
description: "Empty dashboard list response"
data:
dashboards: []
total: 0
page: 1
error_not_found:
description: "404 error response"
data:
detail: "Dashboard not found"
```

View File

@@ -93,7 +93,8 @@ Examples of foundational tasks (adjust based on your project):
- [ ] T014 [US1] Implement [Service] in src/services/[service].py (depends on T012, T013) - [ ] T014 [US1] Implement [Service] in src/services/[service].py (depends on T012, T013)
- [ ] T015 [US1] Implement [endpoint/feature] in src/[location]/[file].py - [ ] T015 [US1] Implement [endpoint/feature] in src/[location]/[file].py
- [ ] T016 [US1] Add validation and error handling - [ ] T016 [US1] Add validation and error handling
- [ ] T017 [US1] Add logging for user story 1 operations - [ ] T017 [US1] [P] Implement UI using Tailwind CSS (minimize scoped styles)
- [ ] T018 [US1] Add logging for user story 1 operations
**Checkpoint**: At this point, User Story 1 should be fully functional and testable independently **Checkpoint**: At this point, User Story 1 should be fully functional and testable independently

View File

@@ -0,0 +1,152 @@
---
description: "Test documentation template for feature implementation"
---
# Test Documentation: [FEATURE NAME]
**Feature**: [Link to spec.md]
**Created**: [DATE]
**Updated**: [DATE]
**Tester**: [Agent/User Name]
---
## Overview
[Brief description of what this feature does and why testing is important]
**Test Strategy**:
- [ ] Unit Tests (co-located in `__tests__/` directories)
- [ ] Integration Tests (if needed)
- [ ] E2E Tests (if critical user flows)
- [ ] Contract Tests (for API endpoints)
---
## Test Coverage Matrix
| Module | File | Unit Tests | Coverage % | Status |
|--------|------|------------|------------|--------|
| [Module Name] | `path/to/file.py` | [x] | [XX%] | [Pass/Fail] |
| [Module Name] | `path/to/file.svelte` | [x] | [XX%] | [Pass/Fail] |
---
## Test Cases
### [Module Name]
**Target File**: `path/to/module.py`
| ID | Test Case | Type | Expected Result | Status |
|----|-----------|------|------------------|--------|
| TC001 | [Description] | [Unit/Integration] | [Expected] | [Pass/Fail] |
| TC002 | [Description] | [Unit/Integration] | [Expected] | [Pass/Fail] |
---
## Test Execution Reports
### Report [YYYY-MM-DD]
**Executed by**: [Tester]
**Duration**: [X] minutes
**Result**: [Pass/Fail]
**Summary**:
- Total Tests: [X]
- Passed: [X]
- Failed: [X]
- Skipped: [X]
**Failed Tests**:
| Test | Error | Resolution |
|------|-------|-------------|
| [Test Name] | [Error Message] | [How Fixed] |
---
## Anti-Patterns & Rules
### ✅ DO
1. Write tests BEFORE implementation (TDD approach)
2. Use co-location: `src/module/__tests__/test_module.py`
3. Use MagicMock for external dependencies (DB, Auth, APIs)
4. Include semantic annotations: `# @RELATION: VERIFIES -> module.name`
5. Test edge cases and error conditions
6. **Test UX states** for Svelte components (@UX_STATE, @UX_FEEDBACK, @UX_RECOVERY)
### ❌ DON'T
1. Delete existing tests (only update if they fail)
2. Duplicate tests - check for existing tests first
3. Test implementation details, not behavior
4. Use real external services in unit tests
5. Skip error handling tests
6. **Skip UX contract tests** for CRITICAL frontend components
---
## UX Contract Testing (Frontend)
### UX States Coverage
| Component | @UX_STATE | @UX_FEEDBACK | @UX_RECOVERY | Tests |
|-----------|-----------|--------------|--------------|-------|
| [Component] | [states] | [feedback] | [recovery] | [status] |
### UX Test Cases
| ID | Component | UX Tag | Test Action | Expected Result | Status |
|----|-----------|--------|-------------|-----------------|--------|
| UX001 | [Component] | @UX_STATE: Idle | [action] | [expected] | [Pass/Fail] |
| UX002 | [Component] | @UX_FEEDBACK | [action] | [expected] | [Pass/Fail] |
| UX003 | [Component] | @UX_RECOVERY | [action] | [expected] | [Pass/Fail] |
### UX Test Examples
```javascript
// Testing @UX_STATE transition
it('should transition from Idle to Loading on submit', async () => {
render(FormComponent);
await fireEvent.click(screen.getByText('Submit'));
expect(screen.getByTestId('form')).toHaveClass('loading');
});
// Testing @UX_FEEDBACK
it('should show error toast on validation failure', async () => {
render(FormComponent);
await fireEvent.click(screen.getByText('Submit'));
expect(screen.getByRole('alert')).toHaveTextContent('Validation error');
});
// Testing @UX_RECOVERY
it('should allow retry after error', async () => {
render(FormComponent);
// Trigger error state
await fireEvent.click(screen.getByText('Submit'));
// Click retry
await fireEvent.click(screen.getByText('Retry'));
expect(screen.getByTestId('form')).not.toHaveClass('error');
});
```
---
## Notes
- [Additional notes about testing approach]
- [Known issues or limitations]
- [Recommendations for future testing]
---
## Related Documents
- [spec.md](./spec.md)
- [plan.md](./plan.md)
- [tasks.md](./tasks.md)
- [contracts/](./contracts/)

36
Dockerfile Normal file
View File

@@ -0,0 +1,36 @@
# Stage 1: Build frontend static assets
FROM node:20-alpine AS frontend-build
WORKDIR /app/frontend
COPY frontend/package*.json ./
RUN npm ci
COPY frontend/ ./
RUN npm run build
# Stage 2: Runtime image for backend + static frontend
FROM python:3.11-slim AS runtime
ENV PYTHONDONTWRITEBYTECODE=1
ENV PYTHONUNBUFFERED=1
ENV BACKEND_PORT=8000
WORKDIR /app
RUN apt-get update && apt-get install -y --no-install-recommends \
curl \
git \
&& rm -rf /var/lib/apt/lists/*
COPY backend/requirements.txt /app/backend/requirements.txt
RUN pip install --no-cache-dir -r /app/backend/requirements.txt
COPY backend/ /app/backend/
COPY --from=frontend-build /app/frontend/build /app/frontend/build
WORKDIR /app/backend
EXPOSE 8000
CMD ["python", "-m", "uvicorn", "src.app:app", "--host", "0.0.0.0", "--port", "8000"]

View File

@@ -32,7 +32,7 @@
## Технологический стек ## Технологический стек
- **Backend**: Python 3.9+, FastAPI, SQLAlchemy, APScheduler, Pydantic. - **Backend**: Python 3.9+, FastAPI, SQLAlchemy, APScheduler, Pydantic.
- **Frontend**: Node.js 18+, SvelteKit, Tailwind CSS. - **Frontend**: Node.js 18+, SvelteKit, Tailwind CSS.
- **Database**: SQLite (для хранения метаданных, задач и настроек доступа). - **Database**: PostgreSQL (для хранения метаданных, задач, логов и конфигурации).
## Структура проекта ## Структура проекта
- `backend/` — Серверная часть, API и логика плагинов. - `backend/` — Серверная часть, API и логика плагинов.
@@ -58,20 +58,71 @@
- `--skip-install`: Пропустить установку зависимостей. - `--skip-install`: Пропустить установку зависимостей.
- `--help`: Показать справку. - `--help`: Показать справку.
Переменные окружения: Переменные окружения:
- `BACKEND_PORT`: Порт API (по умолчанию 8000). - `BACKEND_PORT`: Порт API (по умолчанию 8000).
- `FRONTEND_PORT`: Порт UI (по умолчанию 5173). - `FRONTEND_PORT`: Порт UI (по умолчанию 5173).
- `POSTGRES_URL`: Базовый URL PostgreSQL по умолчанию для всех подсистем.
- `DATABASE_URL`: URL основной БД (если не задан, используется `POSTGRES_URL`).
- `TASKS_DATABASE_URL`: URL БД задач/логов (если не задан, используется `DATABASE_URL`).
- `AUTH_DATABASE_URL`: URL БД авторизации (если не задан, используется PostgreSQL дефолт).
## Разработка ## Разработка
Проект следует строгим правилам разработки: Проект следует строгим правилам разработки:
1. **Semantic Code Generation**: Использование протокола `semantic_protocol.md` для обеспечения надежности кода. 1. **Semantic Code Generation**: Использование протокола `.ai/standards/semantics.md` для обеспечения надежности кода.
2. **Design by Contract (DbC)**: Определение предусловий и постусловий для ключевых функций. 2. **Design by Contract (DbC)**: Определение предусловий и постусловий для ключевых функций.
3. **Constitution**: Соблюдение правил, описанных в конституции проекта в папке `.specify/`. 3. **Constitution**: Соблюдение правил, описанных в конституции проекта в папке `.specify/`.
### Полезные команды ### Полезные команды
- **Backend**: `cd backend && .venv/bin/python3 -m uvicorn src.app:app --reload` - **Backend**: `cd backend && .venv/bin/python3 -m uvicorn src.app:app --reload`
- **Frontend**: `cd frontend && npm run dev` - **Frontend**: `cd frontend && npm run dev`
- **Тесты**: `cd backend && .venv/bin/pytest` - **Тесты**: `cd backend && .venv/bin/pytest`
## Контакты и вклад ## Docker и CI/CD
Для добавления новых функций или исправления ошибок, пожалуйста, ознакомьтесь с `docs/plugin_dev.md` и создайте соответствующую спецификацию в `specs/`. ### Локальный запуск в Docker (приложение + PostgreSQL)
```bash
docker compose up --build
```
После старта:
- UI/API: `http://localhost:8000`
- PostgreSQL: `localhost:5432` (`postgres/postgres`, DB `ss_tools`)
Остановить:
```bash
docker compose down
```
Полная очистка тома БД:
```bash
docker compose down -v
```
Если `postgres:16-alpine` не тянется из Docker Hub (TLS timeout), используйте fallback image:
```bash
POSTGRES_IMAGE=mirror.gcr.io/library/postgres:16-alpine docker compose up -d db
```
или:
```bash
POSTGRES_IMAGE=bitnami/postgresql:latest docker compose up -d db
```
Если на хосте уже занят `5432`, поднимайте Postgres на другом порту:
```bash
POSTGRES_HOST_PORT=5433 docker compose up -d db
```
### Миграция legacy-данных в PostgreSQL
Если нужно перенести старые данные из `tasks.db`/`config.json`:
```bash
cd backend
PYTHONPATH=. .venv/bin/python src/scripts/migrate_sqlite_to_postgres.py --sqlite-path tasks.db
```
### CI/CD
Добавлен workflow: `.github/workflows/ci-cd.yml`
- backend smoke tests
- frontend build
- docker build
- push образа в GHCR на `main/master`
## Контакты и вклад
Для добавления новых функций или исправления ошибок, пожалуйста, ознакомьтесь с `docs/plugin_dev.md` и создайте соответствующую спецификацию в `specs/`.

1
backend/get_full_key.py Normal file
View File

@@ -0,0 +1 @@
{"print(f'Length": {"else": "print('Provider not found')\ndb.close()"}}

File diff suppressed because it is too large Load Diff

Binary file not shown.

View File

@@ -1 +1,10 @@
from . import plugins, tasks, settings, connections, environments, mappings, migration, git, storage, admin # Lazy loading of route modules to avoid import issues in tests
# This allows tests to import routes without triggering all module imports
__all__ = ['plugins', 'tasks', 'settings', 'connections', 'environments', 'mappings', 'migration', 'git', 'storage', 'admin']
def __getattr__(name):
if name in __all__:
import importlib
return importlib.import_module(f".{name}", __name__)
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")

View File

@@ -0,0 +1,286 @@
# [DEF:backend.src.api.routes.__tests__.test_dashboards:Module]
# @TIER: STANDARD
# @PURPOSE: Unit tests for Dashboards API endpoints
# @LAYER: API
# @RELATION: TESTS -> backend.src.api.routes.dashboards
import pytest
from unittest.mock import MagicMock, patch, AsyncMock
from fastapi.testclient import TestClient
from src.app import app
from src.api.routes.dashboards import DashboardsResponse
client = TestClient(app)
# [DEF:test_get_dashboards_success:Function]
# @TEST: GET /api/dashboards returns 200 and valid schema
# @PRE: env_id exists
# @POST: Response matches DashboardsResponse schema
def test_get_dashboards_success():
with patch("src.api.routes.dashboards.get_config_manager") as mock_config, \
patch("src.api.routes.dashboards.get_resource_service") as mock_service, \
patch("src.api.routes.dashboards.get_task_manager") as mock_task_mgr, \
patch("src.api.routes.dashboards.has_permission") as mock_perm:
# Mock environment
mock_env = MagicMock()
mock_env.id = "prod"
mock_config.return_value.get_environments.return_value = [mock_env]
# Mock task manager
mock_task_mgr.return_value.get_all_tasks.return_value = []
# Mock resource service response
async def mock_get_dashboards(env, tasks):
return [
{
"id": 1,
"title": "Sales Report",
"slug": "sales",
"git_status": {"branch": "main", "sync_status": "OK"},
"last_task": {"task_id": "task-1", "status": "SUCCESS"}
}
]
mock_service.return_value.get_dashboards_with_status = AsyncMock(
side_effect=mock_get_dashboards
)
# Mock permission
mock_perm.return_value = lambda: True
response = client.get("/api/dashboards?env_id=prod")
assert response.status_code == 200
data = response.json()
assert "dashboards" in data
assert "total" in data
assert "page" in data
# [/DEF:test_get_dashboards_success:Function]
# [DEF:test_get_dashboards_with_search:Function]
# @TEST: GET /api/dashboards filters by search term
# @PRE: search parameter provided
# @POST: Only matching dashboards returned
def test_get_dashboards_with_search():
with patch("src.api.routes.dashboards.get_config_manager") as mock_config, \
patch("src.api.routes.dashboards.get_resource_service") as mock_service, \
patch("src.api.routes.dashboards.get_task_manager") as mock_task_mgr, \
patch("src.api.routes.dashboards.has_permission") as mock_perm:
# Mock environment
mock_env = MagicMock()
mock_env.id = "prod"
mock_config.return_value.get_environments.return_value = [mock_env]
mock_task_mgr.return_value.get_all_tasks.return_value = []
async def mock_get_dashboards(env, tasks):
return [
{"id": 1, "title": "Sales Report", "slug": "sales"},
{"id": 2, "title": "Marketing Dashboard", "slug": "marketing"}
]
mock_service.return_value.get_dashboards_with_status = AsyncMock(
side_effect=mock_get_dashboards
)
mock_perm.return_value = lambda: True
response = client.get("/api/dashboards?env_id=prod&search=sales")
assert response.status_code == 200
data = response.json()
# Filtered by search term
# [/DEF:test_get_dashboards_with_search:Function]
# [DEF:test_get_dashboards_env_not_found:Function]
# @TEST: GET /api/dashboards returns 404 if env_id missing
# @PRE: env_id does not exist
# @POST: Returns 404 error
def test_get_dashboards_env_not_found():
with patch("src.api.routes.dashboards.get_config_manager") as mock_config, \
patch("src.api.routes.dashboards.has_permission") as mock_perm:
mock_config.return_value.get_environments.return_value = []
mock_perm.return_value = lambda: True
response = client.get("/api/dashboards?env_id=nonexistent")
assert response.status_code == 404
assert "Environment not found" in response.json()["detail"]
# [/DEF:test_get_dashboards_env_not_found:Function]
# [DEF:test_get_dashboards_invalid_pagination:Function]
# @TEST: GET /api/dashboards returns 400 for invalid page/page_size
# @PRE: page < 1 or page_size > 100
# @POST: Returns 400 error
def test_get_dashboards_invalid_pagination():
with patch("src.api.routes.dashboards.get_config_manager") as mock_config, \
patch("src.api.routes.dashboards.has_permission") as mock_perm:
mock_env = MagicMock()
mock_env.id = "prod"
mock_config.return_value.get_environments.return_value = [mock_env]
mock_perm.return_value = lambda: True
# Invalid page
response = client.get("/api/dashboards?env_id=prod&page=0")
assert response.status_code == 400
assert "Page must be >= 1" in response.json()["detail"]
# Invalid page_size
response = client.get("/api/dashboards?env_id=prod&page_size=101")
assert response.status_code == 400
assert "Page size must be between 1 and 100" in response.json()["detail"]
# [/DEF:test_get_dashboards_invalid_pagination:Function]
# [DEF:test_migrate_dashboards_success:Function]
# @TEST: POST /api/dashboards/migrate creates migration task
# @PRE: Valid source_env_id, target_env_id, dashboard_ids
# @POST: Returns task_id
def test_migrate_dashboards_success():
with patch("src.api.routes.dashboards.get_config_manager") as mock_config, \
patch("src.api.routes.dashboards.get_task_manager") as mock_task_mgr, \
patch("src.api.routes.dashboards.has_permission") as mock_perm:
# Mock environments
mock_source = MagicMock()
mock_source.id = "source"
mock_target = MagicMock()
mock_target.id = "target"
mock_config.return_value.get_environments.return_value = [mock_source, mock_target]
# Mock task manager
mock_task = MagicMock()
mock_task.id = "task-migrate-123"
mock_task_mgr.return_value.create_task = AsyncMock(return_value=mock_task)
# Mock permission
mock_perm.return_value = lambda: True
response = client.post(
"/api/dashboards/migrate",
json={
"source_env_id": "source",
"target_env_id": "target",
"dashboard_ids": [1, 2, 3],
"db_mappings": {"old_db": "new_db"}
}
)
assert response.status_code == 200
data = response.json()
assert "task_id" in data
# [/DEF:test_migrate_dashboards_success:Function]
# [DEF:test_migrate_dashboards_no_ids:Function]
# @TEST: POST /api/dashboards/migrate returns 400 for empty dashboard_ids
# @PRE: dashboard_ids is empty
# @POST: Returns 400 error
def test_migrate_dashboards_no_ids():
with patch("src.api.routes.dashboards.has_permission") as mock_perm:
mock_perm.return_value = lambda: True
response = client.post(
"/api/dashboards/migrate",
json={
"source_env_id": "source",
"target_env_id": "target",
"dashboard_ids": []
}
)
assert response.status_code == 400
assert "At least one dashboard ID must be provided" in response.json()["detail"]
# [/DEF:test_migrate_dashboards_no_ids:Function]
# [DEF:test_backup_dashboards_success:Function]
# @TEST: POST /api/dashboards/backup creates backup task
# @PRE: Valid env_id, dashboard_ids
# @POST: Returns task_id
def test_backup_dashboards_success():
with patch("src.api.routes.dashboards.get_config_manager") as mock_config, \
patch("src.api.routes.dashboards.get_task_manager") as mock_task_mgr, \
patch("src.api.routes.dashboards.has_permission") as mock_perm:
# Mock environment
mock_env = MagicMock()
mock_env.id = "prod"
mock_config.return_value.get_environments.return_value = [mock_env]
# Mock task manager
mock_task = MagicMock()
mock_task.id = "task-backup-456"
mock_task_mgr.return_value.create_task = AsyncMock(return_value=mock_task)
# Mock permission
mock_perm.return_value = lambda: True
response = client.post(
"/api/dashboards/backup",
json={
"env_id": "prod",
"dashboard_ids": [1, 2, 3],
"schedule": "0 0 * * *"
}
)
assert response.status_code == 200
data = response.json()
assert "task_id" in data
# [/DEF:test_backup_dashboards_success:Function]
# [DEF:test_get_database_mappings_success:Function]
# @TEST: GET /api/dashboards/db-mappings returns mapping suggestions
# @PRE: Valid source_env_id, target_env_id
# @POST: Returns list of database mappings
def test_get_database_mappings_success():
with patch("src.api.routes.dashboards.get_mapping_service") as mock_service, \
patch("src.api.routes.dashboards.has_permission") as mock_perm:
# Mock mapping service
mock_service.return_value.get_suggestions = AsyncMock(return_value=[
{
"source_db": "old_sales",
"target_db": "new_sales",
"source_db_uuid": "uuid-1",
"target_db_uuid": "uuid-2",
"confidence": 0.95
}
])
# Mock permission
mock_perm.return_value = lambda: True
response = client.get("/api/dashboards/db-mappings?source_env_id=prod&target_env_id=staging")
assert response.status_code == 200
data = response.json()
assert "mappings" in data
# [/DEF:test_get_database_mappings_success:Function]
# [/DEF:backend.src.api.routes.__tests__.test_dashboards:Module]

View File

@@ -0,0 +1,209 @@
# [DEF:backend.src.api.routes.__tests__.test_datasets:Module]
# @TIER: STANDARD
# @PURPOSE: Unit tests for Datasets API endpoints
# @LAYER: API
# @RELATION: TESTS -> backend.src.api.routes.datasets
import pytest
from unittest.mock import MagicMock, patch, AsyncMock
from fastapi.testclient import TestClient
from src.app import app
from src.api.routes.datasets import DatasetsResponse, DatasetDetailResponse
client = TestClient(app)
# [DEF:test_get_datasets_success:Function]
# @TEST: GET /api/datasets returns 200 and valid schema
# @PRE: env_id exists
# @POST: Response matches DatasetsResponse schema
def test_get_datasets_success():
with patch("src.api.routes.datasets.get_config_manager") as mock_config, \
patch("src.api.routes.datasets.get_resource_service") as mock_service, \
patch("src.api.routes.datasets.has_permission") as mock_perm:
# Mock environment
mock_env = MagicMock()
mock_env.id = "prod"
mock_config.return_value.get_environments.return_value = [mock_env]
# Mock resource service response
mock_service.return_value.get_datasets_with_status.return_value = AsyncMock()(
return_value=[
{
"id": 1,
"table_name": "sales_data",
"schema": "public",
"database": "sales_db",
"mapped_fields": {"total": 10, "mapped": 5},
"last_task": {"task_id": "task-1", "status": "SUCCESS"}
}
]
)
# Mock permission
mock_perm.return_value = lambda: True
response = client.get("/api/datasets?env_id=prod")
assert response.status_code == 200
data = response.json()
assert "datasets" in data
assert len(data["datasets"]) >= 0
# Validate against Pydantic model
DatasetsResponse(**data)
# [/DEF:test_get_datasets_success:Function]
# [DEF:test_get_datasets_env_not_found:Function]
# @TEST: GET /api/datasets returns 404 if env_id missing
# @PRE: env_id does not exist
# @POST: Returns 404 error
def test_get_datasets_env_not_found():
with patch("src.api.routes.datasets.get_config_manager") as mock_config, \
patch("src.api.routes.datasets.has_permission") as mock_perm:
mock_config.return_value.get_environments.return_value = []
mock_perm.return_value = lambda: True
response = client.get("/api/datasets?env_id=nonexistent")
assert response.status_code == 404
assert "Environment not found" in response.json()["detail"]
# [/DEF:test_get_datasets_env_not_found:Function]
# [DEF:test_get_datasets_invalid_pagination:Function]
# @TEST: GET /api/datasets returns 400 for invalid page/page_size
# @PRE: page < 1 or page_size > 100
# @POST: Returns 400 error
def test_get_datasets_invalid_pagination():
with patch("src.api.routes.datasets.get_config_manager") as mock_config, \
patch("src.api.routes.datasets.has_permission") as mock_perm:
mock_env = MagicMock()
mock_env.id = "prod"
mock_config.return_value.get_environments.return_value = [mock_env]
mock_perm.return_value = lambda: True
# Invalid page
response = client.get("/api/datasets?env_id=prod&page=0")
assert response.status_code == 400
assert "Page must be >= 1" in response.json()["detail"]
# Invalid page_size
response = client.get("/api/datasets?env_id=prod&page_size=0")
assert response.status_code == 400
assert "Page size must be between 1 and 100" in response.json()["detail"]
# [/DEF:test_get_datasets_invalid_pagination:Function]
# [DEF:test_map_columns_success:Function]
# @TEST: POST /api/datasets/map-columns creates mapping task
# @PRE: Valid env_id, dataset_ids, source_type
# @POST: Returns task_id
def test_map_columns_success():
with patch("src.api.routes.datasets.get_config_manager") as mock_config, \
patch("src.api.routes.datasets.get_task_manager") as mock_task_mgr, \
patch("src.api.routes.datasets.has_permission") as mock_perm:
# Mock environment
mock_env = MagicMock()
mock_env.id = "prod"
mock_config.return_value.get_environments.return_value = [mock_env]
# Mock task manager
mock_task = MagicMock()
mock_task.id = "task-123"
mock_task_mgr.return_value.create_task = AsyncMock(return_value=mock_task)
# Mock permission
mock_perm.return_value = lambda: True
response = client.post(
"/api/datasets/map-columns",
json={
"env_id": "prod",
"dataset_ids": [1, 2, 3],
"source_type": "postgresql"
}
)
assert response.status_code == 200
data = response.json()
assert "task_id" in data
# [/DEF:test_map_columns_success:Function]
# [DEF:test_map_columns_invalid_source_type:Function]
# @TEST: POST /api/datasets/map-columns returns 400 for invalid source_type
# @PRE: source_type is not 'postgresql' or 'xlsx'
# @POST: Returns 400 error
def test_map_columns_invalid_source_type():
with patch("src.api.routes.datasets.has_permission") as mock_perm:
mock_perm.return_value = lambda: True
response = client.post(
"/api/datasets/map-columns",
json={
"env_id": "prod",
"dataset_ids": [1],
"source_type": "invalid"
}
)
assert response.status_code == 400
assert "Source type must be 'postgresql' or 'xlsx'" in response.json()["detail"]
# [/DEF:test_map_columns_invalid_source_type:Function]
# [DEF:test_generate_docs_success:Function]
# @TEST: POST /api/datasets/generate-docs creates doc generation task
# @PRE: Valid env_id, dataset_ids, llm_provider
# @POST: Returns task_id
def test_generate_docs_success():
with patch("src.api.routes.datasets.get_config_manager") as mock_config, \
patch("src.api.routes.datasets.get_task_manager") as mock_task_mgr, \
patch("src.api.routes.datasets.has_permission") as mock_perm:
# Mock environment
mock_env = MagicMock()
mock_env.id = "prod"
mock_config.return_value.get_environments.return_value = [mock_env]
# Mock task manager
mock_task = MagicMock()
mock_task.id = "task-456"
mock_task_mgr.return_value.create_task = AsyncMock(return_value=mock_task)
# Mock permission
mock_perm.return_value = lambda: True
response = client.post(
"/api/datasets/generate-docs",
json={
"env_id": "prod",
"dataset_ids": [1],
"llm_provider": "openai"
}
)
assert response.status_code == 200
data = response.json()
assert "task_id" in data
# [/DEF:test_generate_docs_success:Function]
# [/DEF:backend.src.api.routes.__tests__.test_datasets:Module]

View File

@@ -21,8 +21,8 @@ from ...schemas.auth import (
RoleSchema, RoleCreate, RoleUpdate, PermissionSchema, RoleSchema, RoleCreate, RoleUpdate, PermissionSchema,
ADGroupMappingSchema, ADGroupMappingCreate ADGroupMappingSchema, ADGroupMappingCreate
) )
from ...models.auth import User, Role, Permission, ADGroupMapping from ...models.auth import User, Role, ADGroupMapping
from ...dependencies import has_permission, get_current_user from ...dependencies import has_permission
from ...core.logger import logger, belief_scope from ...core.logger import logger, belief_scope
# [/SECTION] # [/SECTION]

View File

@@ -11,7 +11,7 @@ from fastapi import APIRouter, Depends, HTTPException, status
from sqlalchemy.orm import Session from sqlalchemy.orm import Session
from ...core.database import get_db from ...core.database import get_db
from ...models.connection import ConnectionConfig from ...models.connection import ConnectionConfig
from pydantic import BaseModel, Field from pydantic import BaseModel
from datetime import datetime from datetime import datetime
from ...core.logger import logger, belief_scope from ...core.logger import logger, belief_scope
# [/SECTION] # [/SECTION]

View File

@@ -0,0 +1,327 @@
# [DEF:backend.src.api.routes.dashboards:Module]
#
# @TIER: STANDARD
# @SEMANTICS: api, dashboards, resources, hub
# @PURPOSE: API endpoints for the Dashboard Hub - listing dashboards with Git and task status
# @LAYER: API
# @RELATION: DEPENDS_ON -> backend.src.dependencies
# @RELATION: DEPENDS_ON -> backend.src.services.resource_service
# @RELATION: DEPENDS_ON -> backend.src.core.superset_client
#
# @INVARIANT: All dashboard responses include git_status and last_task metadata
# [SECTION: IMPORTS]
from fastapi import APIRouter, Depends, HTTPException
from typing import List, Optional, Dict
from pydantic import BaseModel, Field
from ...dependencies import get_config_manager, get_task_manager, get_resource_service, get_mapping_service, has_permission
from ...core.logger import logger, belief_scope
# [/SECTION]
router = APIRouter(prefix="/api/dashboards", tags=["Dashboards"])
# [DEF:GitStatus:DataClass]
class GitStatus(BaseModel):
branch: Optional[str] = None
sync_status: Optional[str] = Field(None, pattern="^OK|DIFF$")
# [/DEF:GitStatus:DataClass]
# [DEF:LastTask:DataClass]
class LastTask(BaseModel):
task_id: Optional[str] = None
status: Optional[str] = Field(None, pattern="^RUNNING|SUCCESS|ERROR|WAITING_INPUT$")
# [/DEF:LastTask:DataClass]
# [DEF:DashboardItem:DataClass]
class DashboardItem(BaseModel):
id: int
title: str
slug: Optional[str] = None
url: Optional[str] = None
last_modified: Optional[str] = None
git_status: Optional[GitStatus] = None
last_task: Optional[LastTask] = None
# [/DEF:DashboardItem:DataClass]
# [DEF:DashboardsResponse:DataClass]
class DashboardsResponse(BaseModel):
dashboards: List[DashboardItem]
total: int
page: int
page_size: int
total_pages: int
# [/DEF:DashboardsResponse:DataClass]
# [DEF:get_dashboards:Function]
# @PURPOSE: Fetch list of dashboards from a specific environment with Git status and last task status
# @PRE: env_id must be a valid environment ID
# @PRE: page must be >= 1 if provided
# @PRE: page_size must be between 1 and 100 if provided
# @POST: Returns a list of dashboards with enhanced metadata and pagination info
# @POST: Response includes pagination metadata (page, page_size, total, total_pages)
# @PARAM: env_id (str) - The environment ID to fetch dashboards from
# @PARAM: search (Optional[str]) - Filter by title/slug
# @PARAM: page (Optional[int]) - Page number (default: 1)
# @PARAM: page_size (Optional[int]) - Items per page (default: 10, max: 100)
# @RETURN: DashboardsResponse - List of dashboards with status metadata
# @RELATION: CALLS -> ResourceService.get_dashboards_with_status
@router.get("", response_model=DashboardsResponse)
async def get_dashboards(
env_id: str,
search: Optional[str] = None,
page: int = 1,
page_size: int = 10,
config_manager=Depends(get_config_manager),
task_manager=Depends(get_task_manager),
resource_service=Depends(get_resource_service),
_ = Depends(has_permission("plugin:migration", "READ"))
):
with belief_scope("get_dashboards", f"env_id={env_id}, search={search}, page={page}, page_size={page_size}"):
# Validate pagination parameters
if page < 1:
logger.error(f"[get_dashboards][Coherence:Failed] Invalid page: {page}")
raise HTTPException(status_code=400, detail="Page must be >= 1")
if page_size < 1 or page_size > 100:
logger.error(f"[get_dashboards][Coherence:Failed] Invalid page_size: {page_size}")
raise HTTPException(status_code=400, detail="Page size must be between 1 and 100")
# Validate environment exists
environments = config_manager.get_environments()
env = next((e for e in environments if e.id == env_id), None)
if not env:
logger.error(f"[get_dashboards][Coherence:Failed] Environment not found: {env_id}")
raise HTTPException(status_code=404, detail="Environment not found")
try:
# Get all tasks for status lookup
all_tasks = task_manager.get_all_tasks()
# Fetch dashboards with status using ResourceService
dashboards = await resource_service.get_dashboards_with_status(env, all_tasks)
# Apply search filter if provided
if search:
search_lower = search.lower()
dashboards = [
d for d in dashboards
if search_lower in d.get('title', '').lower()
or search_lower in d.get('slug', '').lower()
]
# Calculate pagination
total = len(dashboards)
total_pages = (total + page_size - 1) // page_size if total > 0 else 1
start_idx = (page - 1) * page_size
end_idx = start_idx + page_size
# Slice dashboards for current page
paginated_dashboards = dashboards[start_idx:end_idx]
logger.info(f"[get_dashboards][Coherence:OK] Returning {len(paginated_dashboards)} dashboards (page {page}/{total_pages}, total: {total})")
return DashboardsResponse(
dashboards=paginated_dashboards,
total=total,
page=page,
page_size=page_size,
total_pages=total_pages
)
except Exception as e:
logger.error(f"[get_dashboards][Coherence:Failed] Failed to fetch dashboards: {e}")
raise HTTPException(status_code=503, detail=f"Failed to fetch dashboards: {str(e)}")
# [/DEF:get_dashboards:Function]
# [DEF:MigrateRequest:DataClass]
class MigrateRequest(BaseModel):
source_env_id: str = Field(..., description="Source environment ID")
target_env_id: str = Field(..., description="Target environment ID")
dashboard_ids: List[int] = Field(..., description="List of dashboard IDs to migrate")
db_mappings: Optional[Dict[str, str]] = Field(None, description="Database mappings for migration")
replace_db_config: bool = Field(False, description="Replace database configuration")
# [/DEF:MigrateRequest:DataClass]
# [DEF:TaskResponse:DataClass]
class TaskResponse(BaseModel):
task_id: str
# [/DEF:TaskResponse:DataClass]
# [DEF:migrate_dashboards:Function]
# @PURPOSE: Trigger bulk migration of dashboards from source to target environment
# @PRE: User has permission plugin:migration:execute
# @PRE: source_env_id and target_env_id are valid environment IDs
# @PRE: dashboard_ids is a non-empty list
# @POST: Returns task_id for tracking migration progress
# @POST: Task is created and queued for execution
# @PARAM: request (MigrateRequest) - Migration request with source, target, and dashboard IDs
# @RETURN: TaskResponse - Task ID for tracking
# @RELATION: DISPATCHES -> MigrationPlugin
# @RELATION: CALLS -> task_manager.create_task
@router.post("/migrate", response_model=TaskResponse)
async def migrate_dashboards(
request: MigrateRequest,
config_manager=Depends(get_config_manager),
task_manager=Depends(get_task_manager),
_ = Depends(has_permission("plugin:migration", "EXECUTE"))
):
with belief_scope("migrate_dashboards", f"source={request.source_env_id}, target={request.target_env_id}, count={len(request.dashboard_ids)}"):
# Validate request
if not request.dashboard_ids:
logger.error("[migrate_dashboards][Coherence:Failed] No dashboard IDs provided")
raise HTTPException(status_code=400, detail="At least one dashboard ID must be provided")
# Validate environments exist
environments = config_manager.get_environments()
source_env = next((e for e in environments if e.id == request.source_env_id), None)
target_env = next((e for e in environments if e.id == request.target_env_id), None)
if not source_env:
logger.error(f"[migrate_dashboards][Coherence:Failed] Source environment not found: {request.source_env_id}")
raise HTTPException(status_code=404, detail="Source environment not found")
if not target_env:
logger.error(f"[migrate_dashboards][Coherence:Failed] Target environment not found: {request.target_env_id}")
raise HTTPException(status_code=404, detail="Target environment not found")
try:
# Create migration task
task_params = {
'source_env_id': request.source_env_id,
'target_env_id': request.target_env_id,
'selected_ids': request.dashboard_ids,
'replace_db_config': request.replace_db_config,
'db_mappings': request.db_mappings or {}
}
task_obj = await task_manager.create_task(
plugin_id='superset-migration',
params=task_params
)
logger.info(f"[migrate_dashboards][Coherence:OK] Migration task created: {task_obj.id} for {len(request.dashboard_ids)} dashboards")
return TaskResponse(task_id=str(task_obj.id))
except Exception as e:
logger.error(f"[migrate_dashboards][Coherence:Failed] Failed to create migration task: {e}")
raise HTTPException(status_code=503, detail=f"Failed to create migration task: {str(e)}")
# [/DEF:migrate_dashboards:Function]
# [DEF:BackupRequest:DataClass]
class BackupRequest(BaseModel):
env_id: str = Field(..., description="Environment ID")
dashboard_ids: List[int] = Field(..., description="List of dashboard IDs to backup")
schedule: Optional[str] = Field(None, description="Cron schedule for recurring backups (e.g., '0 0 * * *')")
# [/DEF:BackupRequest:DataClass]
# [DEF:backup_dashboards:Function]
# @PURPOSE: Trigger bulk backup of dashboards with optional cron schedule
# @PRE: User has permission plugin:backup:execute
# @PRE: env_id is a valid environment ID
# @PRE: dashboard_ids is a non-empty list
# @POST: Returns task_id for tracking backup progress
# @POST: Task is created and queued for execution
# @POST: If schedule is provided, a scheduled task is created
# @PARAM: request (BackupRequest) - Backup request with environment and dashboard IDs
# @RETURN: TaskResponse - Task ID for tracking
# @RELATION: DISPATCHES -> BackupPlugin
# @RELATION: CALLS -> task_manager.create_task
@router.post("/backup", response_model=TaskResponse)
async def backup_dashboards(
request: BackupRequest,
config_manager=Depends(get_config_manager),
task_manager=Depends(get_task_manager),
_ = Depends(has_permission("plugin:backup", "EXECUTE"))
):
with belief_scope("backup_dashboards", f"env={request.env_id}, count={len(request.dashboard_ids)}, schedule={request.schedule}"):
# Validate request
if not request.dashboard_ids:
logger.error("[backup_dashboards][Coherence:Failed] No dashboard IDs provided")
raise HTTPException(status_code=400, detail="At least one dashboard ID must be provided")
# Validate environment exists
environments = config_manager.get_environments()
env = next((e for e in environments if e.id == request.env_id), None)
if not env:
logger.error(f"[backup_dashboards][Coherence:Failed] Environment not found: {request.env_id}")
raise HTTPException(status_code=404, detail="Environment not found")
try:
# Create backup task
task_params = {
'env': request.env_id,
'dashboards': request.dashboard_ids,
'schedule': request.schedule
}
task_obj = await task_manager.create_task(
plugin_id='superset-backup',
params=task_params
)
logger.info(f"[backup_dashboards][Coherence:OK] Backup task created: {task_obj.id} for {len(request.dashboard_ids)} dashboards")
return TaskResponse(task_id=str(task_obj.id))
except Exception as e:
logger.error(f"[backup_dashboards][Coherence:Failed] Failed to create backup task: {e}")
raise HTTPException(status_code=503, detail=f"Failed to create backup task: {str(e)}")
# [/DEF:backup_dashboards:Function]
# [DEF:DatabaseMapping:DataClass]
class DatabaseMapping(BaseModel):
source_db: str
target_db: str
source_db_uuid: Optional[str] = None
target_db_uuid: Optional[str] = None
confidence: float
# [/DEF:DatabaseMapping:DataClass]
# [DEF:DatabaseMappingsResponse:DataClass]
class DatabaseMappingsResponse(BaseModel):
mappings: List[DatabaseMapping]
# [/DEF:DatabaseMappingsResponse:DataClass]
# [DEF:get_database_mappings:Function]
# @PURPOSE: Get database mapping suggestions between source and target environments
# @PRE: User has permission plugin:migration:read
# @PRE: source_env_id and target_env_id are valid environment IDs
# @POST: Returns list of suggested database mappings with confidence scores
# @PARAM: source_env_id (str) - Source environment ID
# @PARAM: target_env_id (str) - Target environment ID
# @RETURN: DatabaseMappingsResponse - List of suggested mappings
# @RELATION: CALLS -> MappingService.get_suggestions
@router.get("/db-mappings", response_model=DatabaseMappingsResponse)
async def get_database_mappings(
source_env_id: str,
target_env_id: str,
mapping_service=Depends(get_mapping_service),
_ = Depends(has_permission("plugin:migration", "READ"))
):
with belief_scope("get_database_mappings", f"source={source_env_id}, target={target_env_id}"):
try:
# Get mapping suggestions using MappingService
suggestions = await mapping_service.get_suggestions(source_env_id, target_env_id)
# Format suggestions as DatabaseMapping objects
mappings = [
DatabaseMapping(
source_db=s.get('source_db', ''),
target_db=s.get('target_db', ''),
source_db_uuid=s.get('source_db_uuid'),
target_db_uuid=s.get('target_db_uuid'),
confidence=s.get('confidence', 0.0)
)
for s in suggestions
]
logger.info(f"[get_database_mappings][Coherence:OK] Returning {len(mappings)} database mapping suggestions")
return DatabaseMappingsResponse(mappings=mappings)
except Exception as e:
logger.error(f"[get_database_mappings][Coherence:Failed] Failed to get database mappings: {e}")
raise HTTPException(status_code=503, detail=f"Failed to get database mappings: {str(e)}")
# [/DEF:get_database_mappings:Function]
# [/DEF:backend.src.api.routes.dashboards:Module]

View File

@@ -0,0 +1,395 @@
# [DEF:backend.src.api.routes.datasets:Module]
#
# @TIER: STANDARD
# @SEMANTICS: api, datasets, resources, hub
# @PURPOSE: API endpoints for the Dataset Hub - listing datasets with mapping progress
# @LAYER: API
# @RELATION: DEPENDS_ON -> backend.src.dependencies
# @RELATION: DEPENDS_ON -> backend.src.services.resource_service
# @RELATION: DEPENDS_ON -> backend.src.core.superset_client
#
# @INVARIANT: All dataset responses include last_task metadata
# [SECTION: IMPORTS]
from fastapi import APIRouter, Depends, HTTPException
from typing import List, Optional
from pydantic import BaseModel, Field
from ...dependencies import get_config_manager, get_task_manager, get_resource_service, has_permission
from ...core.logger import logger, belief_scope
from ...core.superset_client import SupersetClient
# [/SECTION]
router = APIRouter(prefix="/api/datasets", tags=["Datasets"])
# [DEF:MappedFields:DataClass]
class MappedFields(BaseModel):
total: int
mapped: int
# [/DEF:MappedFields:DataClass]
# [DEF:LastTask:DataClass]
class LastTask(BaseModel):
task_id: Optional[str] = None
status: Optional[str] = Field(None, pattern="^RUNNING|SUCCESS|ERROR|WAITING_INPUT$")
# [/DEF:LastTask:DataClass]
# [DEF:DatasetItem:DataClass]
class DatasetItem(BaseModel):
id: int
table_name: str
schema: str
database: str
mapped_fields: Optional[MappedFields] = None
last_task: Optional[LastTask] = None
# [/DEF:DatasetItem:DataClass]
# [DEF:LinkedDashboard:DataClass]
class LinkedDashboard(BaseModel):
id: int
title: str
slug: Optional[str] = None
# [/DEF:LinkedDashboard:DataClass]
# [DEF:DatasetColumn:DataClass]
class DatasetColumn(BaseModel):
id: int
name: str
type: Optional[str] = None
is_dttm: bool = False
is_active: bool = True
description: Optional[str] = None
# [/DEF:DatasetColumn:DataClass]
# [DEF:DatasetDetailResponse:DataClass]
class DatasetDetailResponse(BaseModel):
id: int
table_name: Optional[str] = None
schema: Optional[str] = None
database: str
description: Optional[str] = None
columns: List[DatasetColumn]
column_count: int
sql: Optional[str] = None
linked_dashboards: List[LinkedDashboard]
linked_dashboard_count: int
is_sqllab_view: bool = False
created_on: Optional[str] = None
changed_on: Optional[str] = None
# [/DEF:DatasetDetailResponse:DataClass]
# [DEF:DatasetsResponse:DataClass]
class DatasetsResponse(BaseModel):
datasets: List[DatasetItem]
total: int
page: int
page_size: int
total_pages: int
# [/DEF:DatasetsResponse:DataClass]
# [DEF:TaskResponse:DataClass]
class TaskResponse(BaseModel):
task_id: str
# [/DEF:TaskResponse:DataClass]
# [DEF:get_dataset_ids:Function]
# @PURPOSE: Fetch list of all dataset IDs from a specific environment (without pagination)
# @PRE: env_id must be a valid environment ID
# @POST: Returns a list of all dataset IDs
# @PARAM: env_id (str) - The environment ID to fetch datasets from
# @PARAM: search (Optional[str]) - Filter by table name
# @RETURN: List[int] - List of dataset IDs
# @RELATION: CALLS -> ResourceService.get_datasets_with_status
@router.get("/ids")
async def get_dataset_ids(
env_id: str,
search: Optional[str] = None,
config_manager=Depends(get_config_manager),
task_manager=Depends(get_task_manager),
resource_service=Depends(get_resource_service),
_ = Depends(has_permission("plugin:migration", "READ"))
):
with belief_scope("get_dataset_ids", f"env_id={env_id}, search={search}"):
# Validate environment exists
environments = config_manager.get_environments()
env = next((e for e in environments if e.id == env_id), None)
if not env:
logger.error(f"[get_dataset_ids][Coherence:Failed] Environment not found: {env_id}")
raise HTTPException(status_code=404, detail="Environment not found")
try:
# Get all tasks for status lookup
all_tasks = task_manager.get_all_tasks()
# Fetch datasets with status using ResourceService
datasets = await resource_service.get_datasets_with_status(env, all_tasks)
# Apply search filter if provided
if search:
search_lower = search.lower()
datasets = [
d for d in datasets
if search_lower in d.get('table_name', '').lower()
]
# Extract and return just the IDs
dataset_ids = [d['id'] for d in datasets]
logger.info(f"[get_dataset_ids][Coherence:OK] Returning {len(dataset_ids)} dataset IDs")
return {"dataset_ids": dataset_ids}
except Exception as e:
logger.error(f"[get_dataset_ids][Coherence:Failed] Failed to fetch dataset IDs: {e}")
raise HTTPException(status_code=503, detail=f"Failed to fetch dataset IDs: {str(e)}")
# [/DEF:get_dataset_ids:Function]
# [DEF:get_datasets:Function]
# @PURPOSE: Fetch list of datasets from a specific environment with mapping progress
# @PRE: env_id must be a valid environment ID
# @PRE: page must be >= 1 if provided
# @PRE: page_size must be between 1 and 100 if provided
# @POST: Returns a list of datasets with enhanced metadata and pagination info
# @POST: Response includes pagination metadata (page, page_size, total, total_pages)
# @PARAM: env_id (str) - The environment ID to fetch datasets from
# @PARAM: search (Optional[str]) - Filter by table name
# @PARAM: page (Optional[int]) - Page number (default: 1)
# @PARAM: page_size (Optional[int]) - Items per page (default: 10, max: 100)
# @RETURN: DatasetsResponse - List of datasets with status metadata
# @RELATION: CALLS -> ResourceService.get_datasets_with_status
@router.get("", response_model=DatasetsResponse)
async def get_datasets(
env_id: str,
search: Optional[str] = None,
page: int = 1,
page_size: int = 10,
config_manager=Depends(get_config_manager),
task_manager=Depends(get_task_manager),
resource_service=Depends(get_resource_service),
_ = Depends(has_permission("plugin:migration", "READ"))
):
with belief_scope("get_datasets", f"env_id={env_id}, search={search}, page={page}, page_size={page_size}"):
# Validate pagination parameters
if page < 1:
logger.error(f"[get_datasets][Coherence:Failed] Invalid page: {page}")
raise HTTPException(status_code=400, detail="Page must be >= 1")
if page_size < 1 or page_size > 100:
logger.error(f"[get_datasets][Coherence:Failed] Invalid page_size: {page_size}")
raise HTTPException(status_code=400, detail="Page size must be between 1 and 100")
# Validate environment exists
environments = config_manager.get_environments()
env = next((e for e in environments if e.id == env_id), None)
if not env:
logger.error(f"[get_datasets][Coherence:Failed] Environment not found: {env_id}")
raise HTTPException(status_code=404, detail="Environment not found")
try:
# Get all tasks for status lookup
all_tasks = task_manager.get_all_tasks()
# Fetch datasets with status using ResourceService
datasets = await resource_service.get_datasets_with_status(env, all_tasks)
# Apply search filter if provided
if search:
search_lower = search.lower()
datasets = [
d for d in datasets
if search_lower in d.get('table_name', '').lower()
]
# Calculate pagination
total = len(datasets)
total_pages = (total + page_size - 1) // page_size if total > 0 else 1
start_idx = (page - 1) * page_size
end_idx = start_idx + page_size
# Slice datasets for current page
paginated_datasets = datasets[start_idx:end_idx]
logger.info(f"[get_datasets][Coherence:OK] Returning {len(paginated_datasets)} datasets (page {page}/{total_pages}, total: {total})")
return DatasetsResponse(
datasets=paginated_datasets,
total=total,
page=page,
page_size=page_size,
total_pages=total_pages
)
except Exception as e:
logger.error(f"[get_datasets][Coherence:Failed] Failed to fetch datasets: {e}")
raise HTTPException(status_code=503, detail=f"Failed to fetch datasets: {str(e)}")
# [/DEF:get_datasets:Function]
# [DEF:MapColumnsRequest:DataClass]
class MapColumnsRequest(BaseModel):
env_id: str = Field(..., description="Environment ID")
dataset_ids: List[int] = Field(..., description="List of dataset IDs to map")
source_type: str = Field(..., description="Source type: 'postgresql' or 'xlsx'")
connection_id: Optional[str] = Field(None, description="Connection ID for PostgreSQL source")
file_data: Optional[str] = Field(None, description="File path or data for XLSX source")
# [/DEF:MapColumnsRequest:DataClass]
# [DEF:map_columns:Function]
# @PURPOSE: Trigger bulk column mapping for datasets
# @PRE: User has permission plugin:mapper:execute
# @PRE: env_id is a valid environment ID
# @PRE: dataset_ids is a non-empty list
# @POST: Returns task_id for tracking mapping progress
# @POST: Task is created and queued for execution
# @PARAM: request (MapColumnsRequest) - Mapping request with environment and dataset IDs
# @RETURN: TaskResponse - Task ID for tracking
# @RELATION: DISPATCHES -> MapperPlugin
# @RELATION: CALLS -> task_manager.create_task
@router.post("/map-columns", response_model=TaskResponse)
async def map_columns(
request: MapColumnsRequest,
config_manager=Depends(get_config_manager),
task_manager=Depends(get_task_manager),
_ = Depends(has_permission("plugin:mapper", "EXECUTE"))
):
with belief_scope("map_columns", f"env={request.env_id}, count={len(request.dataset_ids)}, source={request.source_type}"):
# Validate request
if not request.dataset_ids:
logger.error("[map_columns][Coherence:Failed] No dataset IDs provided")
raise HTTPException(status_code=400, detail="At least one dataset ID must be provided")
# Validate source type
if request.source_type not in ['postgresql', 'xlsx']:
logger.error(f"[map_columns][Coherence:Failed] Invalid source type: {request.source_type}")
raise HTTPException(status_code=400, detail="Source type must be 'postgresql' or 'xlsx'")
# Validate environment exists
environments = config_manager.get_environments()
env = next((e for e in environments if e.id == request.env_id), None)
if not env:
logger.error(f"[map_columns][Coherence:Failed] Environment not found: {request.env_id}")
raise HTTPException(status_code=404, detail="Environment not found")
try:
# Create mapping task
task_params = {
'env': request.env_id,
'dataset_id': request.dataset_ids[0] if request.dataset_ids else None,
'source': request.source_type,
'connection_id': request.connection_id,
'file_data': request.file_data
}
task_obj = await task_manager.create_task(
plugin_id='dataset-mapper',
params=task_params
)
logger.info(f"[map_columns][Coherence:OK] Mapping task created: {task_obj.id} for {len(request.dataset_ids)} datasets")
return TaskResponse(task_id=str(task_obj.id))
except Exception as e:
logger.error(f"[map_columns][Coherence:Failed] Failed to create mapping task: {e}")
raise HTTPException(status_code=503, detail=f"Failed to create mapping task: {str(e)}")
# [/DEF:map_columns:Function]
# [DEF:GenerateDocsRequest:DataClass]
class GenerateDocsRequest(BaseModel):
env_id: str = Field(..., description="Environment ID")
dataset_ids: List[int] = Field(..., description="List of dataset IDs to generate docs for")
llm_provider: str = Field(..., description="LLM provider to use")
options: Optional[dict] = Field(None, description="Additional options for documentation generation")
# [/DEF:GenerateDocsRequest:DataClass]
# [DEF:generate_docs:Function]
# @PURPOSE: Trigger bulk documentation generation for datasets
# @PRE: User has permission plugin:llm_analysis:execute
# @PRE: env_id is a valid environment ID
# @PRE: dataset_ids is a non-empty list
# @POST: Returns task_id for tracking documentation generation progress
# @POST: Task is created and queued for execution
# @PARAM: request (GenerateDocsRequest) - Documentation generation request
# @RETURN: TaskResponse - Task ID for tracking
# @RELATION: DISPATCHES -> LLMAnalysisPlugin
# @RELATION: CALLS -> task_manager.create_task
@router.post("/generate-docs", response_model=TaskResponse)
async def generate_docs(
request: GenerateDocsRequest,
config_manager=Depends(get_config_manager),
task_manager=Depends(get_task_manager),
_ = Depends(has_permission("plugin:llm_analysis", "EXECUTE"))
):
with belief_scope("generate_docs", f"env={request.env_id}, count={len(request.dataset_ids)}, provider={request.llm_provider}"):
# Validate request
if not request.dataset_ids:
logger.error("[generate_docs][Coherence:Failed] No dataset IDs provided")
raise HTTPException(status_code=400, detail="At least one dataset ID must be provided")
# Validate environment exists
environments = config_manager.get_environments()
env = next((e for e in environments if e.id == request.env_id), None)
if not env:
logger.error(f"[generate_docs][Coherence:Failed] Environment not found: {request.env_id}")
raise HTTPException(status_code=404, detail="Environment not found")
try:
# Create documentation generation task
task_params = {
'environment_id': request.env_id,
'dataset_id': str(request.dataset_ids[0]) if request.dataset_ids else None,
'provider_id': request.llm_provider,
'options': request.options or {}
}
task_obj = await task_manager.create_task(
plugin_id='llm_documentation',
params=task_params
)
logger.info(f"[generate_docs][Coherence:OK] Documentation generation task created: {task_obj.id} for {len(request.dataset_ids)} datasets")
return TaskResponse(task_id=str(task_obj.id))
except Exception as e:
logger.error(f"[generate_docs][Coherence:Failed] Failed to create documentation generation task: {e}")
raise HTTPException(status_code=503, detail=f"Failed to create documentation generation task: {str(e)}")
# [/DEF:generate_docs:Function]
# [DEF:get_dataset_detail:Function]
# @PURPOSE: Get detailed dataset information including columns and linked dashboards
# @PRE: env_id is a valid environment ID
# @PRE: dataset_id is a valid dataset ID
# @POST: Returns detailed dataset info with columns and linked dashboards
# @PARAM: env_id (str) - The environment ID
# @PARAM: dataset_id (int) - The dataset ID
# @RETURN: DatasetDetailResponse - Detailed dataset information
# @RELATION: CALLS -> SupersetClient.get_dataset_detail
@router.get("/{dataset_id}", response_model=DatasetDetailResponse)
async def get_dataset_detail(
env_id: str,
dataset_id: int,
config_manager=Depends(get_config_manager),
_ = Depends(has_permission("plugin:migration", "READ"))
):
with belief_scope("get_dataset_detail", f"env_id={env_id}, dataset_id={dataset_id}"):
# Validate environment exists
environments = config_manager.get_environments()
env = next((e for e in environments if e.id == env_id), None)
if not env:
logger.error(f"[get_dataset_detail][Coherence:Failed] Environment not found: {env_id}")
raise HTTPException(status_code=404, detail="Environment not found")
try:
# Fetch detailed dataset info using SupersetClient
client = SupersetClient(env)
dataset_detail = client.get_dataset_detail(dataset_id)
logger.info(f"[get_dataset_detail][Coherence:OK] Retrieved dataset {dataset_id} with {dataset_detail['column_count']} columns and {dataset_detail['linked_dashboard_count']} linked dashboards")
return DatasetDetailResponse(**dataset_detail)
except Exception as e:
logger.error(f"[get_dataset_detail][Coherence:Failed] Failed to fetch dataset detail: {e}")
raise HTTPException(status_code=503, detail=f"Failed to fetch dataset detail: {str(e)}")
# [/DEF:get_dataset_detail:Function]
# [/DEF:backend.src.api.routes.datasets:Module]

View File

@@ -1,5 +1,6 @@
# [DEF:backend.src.api.routes.environments:Module] # [DEF:backend.src.api.routes.environments:Module]
# #
# @TIER: STANDARD
# @SEMANTICS: api, environments, superset, databases # @SEMANTICS: api, environments, superset, databases
# @PURPOSE: API endpoints for listing environments and their databases. # @PURPOSE: API endpoints for listing environments and their databases.
# @LAYER: API # @LAYER: API
@@ -10,15 +11,14 @@
# [SECTION: IMPORTS] # [SECTION: IMPORTS]
from fastapi import APIRouter, Depends, HTTPException from fastapi import APIRouter, Depends, HTTPException
from typing import List, Dict, Optional from typing import List, Optional
from ...dependencies import get_config_manager, get_scheduler_service, has_permission from ...dependencies import get_config_manager, get_scheduler_service, has_permission
from ...core.superset_client import SupersetClient from ...core.superset_client import SupersetClient
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from ...core.config_models import Environment as EnvModel
from ...core.logger import belief_scope from ...core.logger import belief_scope
# [/SECTION] # [/SECTION]
router = APIRouter() router = APIRouter(prefix="/api/environments", tags=["Environments"])
# [DEF:ScheduleSchema:DataClass] # [DEF:ScheduleSchema:DataClass]
class ScheduleSchema(BaseModel): class ScheduleSchema(BaseModel):
@@ -43,6 +43,8 @@ class DatabaseResponse(BaseModel):
# [DEF:get_environments:Function] # [DEF:get_environments:Function]
# @PURPOSE: List all configured environments. # @PURPOSE: List all configured environments.
# @LAYER: API
# @SEMANTICS: list, environments, config
# @PRE: config_manager is injected via Depends. # @PRE: config_manager is injected via Depends.
# @POST: Returns a list of EnvironmentResponse objects. # @POST: Returns a list of EnvironmentResponse objects.
# @RETURN: List[EnvironmentResponse] # @RETURN: List[EnvironmentResponse]
@@ -71,6 +73,8 @@ async def get_environments(
# [DEF:update_environment_schedule:Function] # [DEF:update_environment_schedule:Function]
# @PURPOSE: Update backup schedule for an environment. # @PURPOSE: Update backup schedule for an environment.
# @LAYER: API
# @SEMANTICS: update, schedule, backup, environment
# @PRE: Environment id exists, schedule is valid ScheduleSchema. # @PRE: Environment id exists, schedule is valid ScheduleSchema.
# @POST: Backup schedule updated and scheduler reloaded. # @POST: Backup schedule updated and scheduler reloaded.
# @PARAM: id (str) - The environment ID. # @PARAM: id (str) - The environment ID.
@@ -103,6 +107,8 @@ async def update_environment_schedule(
# [DEF:get_environment_databases:Function] # [DEF:get_environment_databases:Function]
# @PURPOSE: Fetch the list of databases from a specific environment. # @PURPOSE: Fetch the list of databases from a specific environment.
# @LAYER: API
# @SEMANTICS: fetch, databases, superset, environment
# @PRE: Environment id exists. # @PRE: Environment id exists.
# @POST: Returns a list of database summaries from the environment. # @POST: Returns a list of database summaries from the environment.
# @PARAM: id (str) - The environment ID. # @PARAM: id (str) - The environment ID.

View File

@@ -1,5 +1,6 @@
# [DEF:backend.src.api.routes.git:Module] # [DEF:backend.src.api.routes.git:Module]
# #
# @TIER: STANDARD
# @SEMANTICS: git, routes, api, fastapi, repository, deployment # @SEMANTICS: git, routes, api, fastapi, repository, deployment
# @PURPOSE: Provides FastAPI endpoints for Git integration operations. # @PURPOSE: Provides FastAPI endpoints for Git integration operations.
# @LAYER: API # @LAYER: API
@@ -15,17 +16,17 @@ from typing import List, Optional
import typing import typing
from src.dependencies import get_config_manager, has_permission from src.dependencies import get_config_manager, has_permission
from src.core.database import get_db from src.core.database import get_db
from src.models.git import GitServerConfig, GitStatus, DeploymentEnvironment, GitRepository from src.models.git import GitServerConfig, GitRepository
from src.api.routes.git_schemas import ( from src.api.routes.git_schemas import (
GitServerConfigSchema, GitServerConfigCreate, GitServerConfigSchema, GitServerConfigCreate,
GitRepositorySchema, BranchSchema, BranchCreate, BranchSchema, BranchCreate,
BranchCheckout, CommitSchema, CommitCreate, BranchCheckout, CommitSchema, CommitCreate,
DeploymentEnvironmentSchema, DeployRequest, RepoInitRequest DeploymentEnvironmentSchema, DeployRequest, RepoInitRequest
) )
from src.services.git_service import GitService from src.services.git_service import GitService
from src.core.logger import logger, belief_scope from src.core.logger import logger, belief_scope
router = APIRouter(prefix="/api/git", tags=["git"]) router = APIRouter(tags=["git"])
git_service = GitService() git_service = GitService()
# [DEF:get_git_configs:Function] # [DEF:get_git_configs:Function]

View File

@@ -11,7 +11,6 @@
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from typing import List, Optional from typing import List, Optional
from datetime import datetime from datetime import datetime
from uuid import UUID
from src.models.git import GitProvider, GitStatus, SyncStatus from src.models.git import GitProvider, GitStatus, SyncStatus
# [DEF:GitServerConfigBase:Class] # [DEF:GitServerConfigBase:Class]

View File

@@ -16,7 +16,7 @@ from sqlalchemy.orm import Session
# [DEF:router:Global] # [DEF:router:Global]
# @PURPOSE: APIRouter instance for LLM routes. # @PURPOSE: APIRouter instance for LLM routes.
router = APIRouter(prefix="/api/llm", tags=["LLM"]) router = APIRouter(tags=["LLM"])
# [/DEF:router:Global] # [/DEF:router:Global]
# [DEF:get_providers:Function] # [DEF:get_providers:Function]
@@ -143,6 +143,15 @@ async def test_connection(
raise HTTPException(status_code=404, detail="Provider not found") raise HTTPException(status_code=404, detail="Provider not found")
api_key = service.get_decrypted_api_key(provider_id) api_key = service.get_decrypted_api_key(provider_id)
# Check if API key was successfully decrypted
if not api_key:
logger.error(f"[llm_routes][test_connection] Failed to decrypt API key for provider {provider_id}")
raise HTTPException(
status_code=500,
detail="Failed to decrypt API key. The provider may have been encrypted with a different encryption key. Please update the provider with a new API key."
)
client = LLMClient( client = LLMClient(
provider_type=LLMProviderType(db_provider.provider_type), provider_type=LLMProviderType(db_provider.provider_type),
api_key=api_key, api_key=api_key,
@@ -173,6 +182,13 @@ async def test_provider_config(
from ...plugins.llm_analysis.service import LLMClient from ...plugins.llm_analysis.service import LLMClient
logger.info(f"[llm_routes][test_provider_config][Action] Testing config for {config.name}") logger.info(f"[llm_routes][test_provider_config][Action] Testing config for {config.name}")
# Check if API key is provided
if not config.api_key or config.api_key == "********":
raise HTTPException(
status_code=400,
detail="API key is required for testing connection"
)
client = LLMClient( client = LLMClient(
provider_type=config.provider_type, provider_type=config.provider_type,
api_key=config.api_key, api_key=config.api_key,

View File

@@ -1,5 +1,6 @@
# [DEF:backend.src.api.routes.mappings:Module] # [DEF:backend.src.api.routes.mappings:Module]
# #
# @TIER: STANDARD
# @SEMANTICS: api, mappings, database, fuzzy-matching # @SEMANTICS: api, mappings, database, fuzzy-matching
# @PURPOSE: API endpoints for managing database mappings and getting suggestions. # @PURPOSE: API endpoints for managing database mappings and getting suggestions.
# @LAYER: API # @LAYER: API
@@ -20,7 +21,7 @@ from ...models.mapping import DatabaseMapping
from pydantic import BaseModel from pydantic import BaseModel
# [/SECTION] # [/SECTION]
router = APIRouter(prefix="/api/mappings", tags=["mappings"]) router = APIRouter(tags=["mappings"])
# [DEF:MappingCreate:DataClass] # [DEF:MappingCreate:DataClass]
class MappingCreate(BaseModel): class MappingCreate(BaseModel):
@@ -30,6 +31,7 @@ class MappingCreate(BaseModel):
target_db_uuid: str target_db_uuid: str
source_db_name: str source_db_name: str
target_db_name: str target_db_name: str
engine: Optional[str] = None
# [/DEF:MappingCreate:DataClass] # [/DEF:MappingCreate:DataClass]
# [DEF:MappingResponse:DataClass] # [DEF:MappingResponse:DataClass]
@@ -41,6 +43,7 @@ class MappingResponse(BaseModel):
target_db_uuid: str target_db_uuid: str
source_db_name: str source_db_name: str
target_db_name: str target_db_name: str
engine: Optional[str] = None
class Config: class Config:
from_attributes = True from_attributes = True
@@ -93,6 +96,7 @@ async def create_mapping(
if existing: if existing:
existing.target_db_uuid = mapping.target_db_uuid existing.target_db_uuid = mapping.target_db_uuid
existing.target_db_name = mapping.target_db_name existing.target_db_name = mapping.target_db_name
existing.engine = mapping.engine
db.commit() db.commit()
db.refresh(existing) db.refresh(existing)
return existing return existing

View File

@@ -1,4 +1,5 @@
# [DEF:backend.src.api.routes.migration:Module] # [DEF:backend.src.api.routes.migration:Module]
# @TIER: STANDARD
# @SEMANTICS: api, migration, dashboards # @SEMANTICS: api, migration, dashboards
# @PURPOSE: API endpoints for migration operations. # @PURPOSE: API endpoints for migration operations.
# @LAYER: API # @LAYER: API
@@ -6,7 +7,7 @@
# @RELATION: DEPENDS_ON -> backend.src.models.dashboard # @RELATION: DEPENDS_ON -> backend.src.models.dashboard
from fastapi import APIRouter, Depends, HTTPException from fastapi import APIRouter, Depends, HTTPException
from typing import List, Dict from typing import List
from ...dependencies import get_config_manager, get_task_manager, has_permission from ...dependencies import get_config_manager, get_task_manager, has_permission
from ...models.dashboard import DashboardMetadata, DashboardSelection from ...models.dashboard import DashboardMetadata, DashboardSelection
from ...core.superset_client import SupersetClient from ...core.superset_client import SupersetClient
@@ -43,7 +44,7 @@ async def get_dashboards(
# @POST: Starts the migration task and returns the task ID. # @POST: Starts the migration task and returns the task ID.
# @PARAM: selection (DashboardSelection) - The dashboards to migrate. # @PARAM: selection (DashboardSelection) - The dashboards to migrate.
# @RETURN: Dict - {"task_id": str, "message": str} # @RETURN: Dict - {"task_id": str, "message": str}
@router.post("/migration/execute") @router.post("/execute")
async def execute_migration( async def execute_migration(
selection: DashboardSelection, selection: DashboardSelection,
config_manager=Depends(get_config_manager), config_manager=Depends(get_config_manager),

View File

@@ -1,4 +1,5 @@
# [DEF:PluginsRouter:Module] # [DEF:PluginsRouter:Module]
# @TIER: STANDARD
# @SEMANTICS: api, router, plugins, list # @SEMANTICS: api, router, plugins, list
# @PURPOSE: Defines the FastAPI router for plugin-related endpoints, allowing clients to list available plugins. # @PURPOSE: Defines the FastAPI router for plugin-related endpoints, allowing clients to list available plugins.
# @LAYER: UI (API) # @LAYER: UI (API)

View File

@@ -12,15 +12,24 @@
# [SECTION: IMPORTS] # [SECTION: IMPORTS]
from fastapi import APIRouter, Depends, HTTPException from fastapi import APIRouter, Depends, HTTPException
from typing import List from typing import List
from ...core.config_models import AppConfig, Environment, GlobalSettings from pydantic import BaseModel
from ...core.config_models import AppConfig, Environment, GlobalSettings, LoggingConfig
from ...models.storage import StorageConfig from ...models.storage import StorageConfig
from ...dependencies import get_config_manager, has_permission from ...dependencies import get_config_manager, has_permission
from ...core.config_manager import ConfigManager from ...core.config_manager import ConfigManager
from ...core.logger import logger, belief_scope from ...core.logger import logger, belief_scope
from ...core.superset_client import SupersetClient from ...core.superset_client import SupersetClient
import os
# [/SECTION] # [/SECTION]
# [DEF:LoggingConfigResponse:Class]
# @PURPOSE: Response model for logging configuration with current task log level.
# @SEMANTICS: logging, config, response
class LoggingConfigResponse(BaseModel):
level: str
task_log_level: str
enable_belief_state: bool
# [/DEF:LoggingConfigResponse:Class]
router = APIRouter() router = APIRouter()
# [DEF:get_settings:Function] # [DEF:get_settings:Function]
@@ -223,5 +232,145 @@ async def test_environment_connection(
return {"status": "error", "message": str(e)} return {"status": "error", "message": str(e)}
# [/DEF:test_environment_connection:Function] # [/DEF:test_environment_connection:Function]
# [DEF:get_logging_config:Function]
# @PURPOSE: Retrieves current logging configuration.
# @PRE: Config manager is available.
# @POST: Returns logging configuration.
# @RETURN: LoggingConfigResponse - The current logging config.
@router.get("/logging", response_model=LoggingConfigResponse)
async def get_logging_config(
config_manager: ConfigManager = Depends(get_config_manager),
_ = Depends(has_permission("admin:settings", "READ"))
):
with belief_scope("get_logging_config"):
logging_config = config_manager.get_config().settings.logging
return LoggingConfigResponse(
level=logging_config.level,
task_log_level=logging_config.task_log_level,
enable_belief_state=logging_config.enable_belief_state
)
# [/DEF:get_logging_config:Function]
# [DEF:update_logging_config:Function]
# @PURPOSE: Updates logging configuration.
# @PRE: New logging config is provided.
# @POST: Logging configuration is updated and saved.
# @PARAM: config (LoggingConfig) - The new logging configuration.
# @RETURN: LoggingConfigResponse - The updated logging config.
@router.patch("/logging", response_model=LoggingConfigResponse)
async def update_logging_config(
config: LoggingConfig,
config_manager: ConfigManager = Depends(get_config_manager),
_ = Depends(has_permission("admin:settings", "WRITE"))
):
with belief_scope("update_logging_config"):
logger.info(f"[update_logging_config][Entry] Updating logging config: level={config.level}, task_log_level={config.task_log_level}")
# Get current settings and update logging config
settings = config_manager.get_config().settings
settings.logging = config
config_manager.update_global_settings(settings)
return LoggingConfigResponse(
level=config.level,
task_log_level=config.task_log_level,
enable_belief_state=config.enable_belief_state
)
# [/DEF:update_logging_config:Function]
# [DEF:ConsolidatedSettingsResponse:Class]
class ConsolidatedSettingsResponse(BaseModel):
environments: List[dict]
connections: List[dict]
llm: dict
llm_providers: List[dict]
logging: dict
storage: dict
# [/DEF:ConsolidatedSettingsResponse:Class]
# [DEF:get_consolidated_settings:Function]
# @PURPOSE: Retrieves all settings categories in a single call
# @PRE: Config manager is available.
# @POST: Returns all consolidated settings.
# @RETURN: ConsolidatedSettingsResponse - All settings categories.
@router.get("/consolidated", response_model=ConsolidatedSettingsResponse)
async def get_consolidated_settings(
config_manager: ConfigManager = Depends(get_config_manager),
_ = Depends(has_permission("admin:settings", "READ"))
):
with belief_scope("get_consolidated_settings"):
logger.info("[get_consolidated_settings][Entry] Fetching all consolidated settings")
config = config_manager.get_config()
from ...services.llm_provider import LLMProviderService
from ...core.database import SessionLocal
db = SessionLocal()
try:
llm_service = LLMProviderService(db)
providers = llm_service.get_all_providers()
llm_providers_list = [
{
"id": p.id,
"provider_type": p.provider_type,
"name": p.name,
"base_url": p.base_url,
"api_key": "********",
"default_model": p.default_model,
"is_active": p.is_active
} for p in providers
]
finally:
db.close()
return ConsolidatedSettingsResponse(
environments=[env.dict() for env in config.environments],
connections=config.settings.connections,
llm=config.settings.llm,
llm_providers=llm_providers_list,
logging=config.settings.logging.dict(),
storage=config.settings.storage.dict()
)
# [/DEF:get_consolidated_settings:Function]
# [DEF:update_consolidated_settings:Function]
# @PURPOSE: Bulk update application settings from the consolidated view.
# @PRE: User has admin permissions, config is valid.
# @POST: Settings are updated and saved via ConfigManager.
@router.patch("/consolidated")
async def update_consolidated_settings(
settings_patch: dict,
config_manager: ConfigManager = Depends(get_config_manager),
_ = Depends(has_permission("admin:settings", "WRITE"))
):
with belief_scope("update_consolidated_settings"):
logger.info("[update_consolidated_settings][Entry] Applying consolidated settings patch")
current_config = config_manager.get_config()
current_settings = current_config.settings
# Update connections if provided
if "connections" in settings_patch:
current_settings.connections = settings_patch["connections"]
# Update LLM if provided
if "llm" in settings_patch:
current_settings.llm = settings_patch["llm"]
# Update Logging if provided
if "logging" in settings_patch:
current_settings.logging = LoggingConfig(**settings_patch["logging"])
# Update Storage if provided
if "storage" in settings_patch:
new_storage = StorageConfig(**settings_patch["storage"])
is_valid, message = config_manager.validate_path(new_storage.root_path)
if not is_valid:
raise HTTPException(status_code=400, detail=message)
current_settings.storage = new_storage
config_manager.update_global_settings(current_settings)
return {"status": "success", "message": "Settings updated"}
# [/DEF:update_consolidated_settings:Function]
# [/DEF:SettingsRouter:Module] # [/DEF:SettingsRouter:Module]

View File

@@ -1,5 +1,6 @@
# [DEF:storage_routes:Module] # [DEF:storage_routes:Module]
# #
# @TIER: STANDARD
# @SEMANTICS: storage, files, upload, download, backup, repository # @SEMANTICS: storage, files, upload, download, backup, repository
# @PURPOSE: API endpoints for file storage management (backups and repositories). # @PURPOSE: API endpoints for file storage management (backups and repositories).
# @LAYER: API # @LAYER: API

View File

@@ -1,14 +1,16 @@
# [DEF:TasksRouter:Module] # [DEF:TasksRouter:Module]
# @SEMANTICS: api, router, tasks, create, list, get # @TIER: STANDARD
# @SEMANTICS: api, router, tasks, create, list, get, logs
# @PURPOSE: Defines the FastAPI router for task-related endpoints, allowing clients to create, list, and get the status of tasks. # @PURPOSE: Defines the FastAPI router for task-related endpoints, allowing clients to create, list, and get the status of tasks.
# @LAYER: UI (API) # @LAYER: UI (API)
# @RELATION: Depends on the TaskManager. It is included by the main app. # @RELATION: Depends on the TaskManager. It is included by the main app.
from typing import List, Dict, Any, Optional from typing import List, Dict, Any, Optional
from fastapi import APIRouter, Depends, HTTPException, status from fastapi import APIRouter, Depends, HTTPException, status, Query
from pydantic import BaseModel from pydantic import BaseModel
from ...core.logger import belief_scope from ...core.logger import belief_scope
from ...core.task_manager import TaskManager, Task, TaskStatus, LogEntry from ...core.task_manager import TaskManager, Task, TaskStatus, LogEntry
from ...core.task_manager.models import LogFilter, LogStats
from ...dependencies import get_task_manager, has_permission, get_current_user from ...dependencies import get_task_manager, has_permission, get_current_user
router = APIRouter() router = APIRouter()
@@ -116,27 +118,93 @@ async def get_task(
@router.get("/{task_id}/logs", response_model=List[LogEntry]) @router.get("/{task_id}/logs", response_model=List[LogEntry])
# [DEF:get_task_logs:Function] # [DEF:get_task_logs:Function]
# @PURPOSE: Retrieve logs for a specific task. # @PURPOSE: Retrieve logs for a specific task with optional filtering.
# @PARAM: task_id (str) - The unique identifier of the task. # @PARAM: task_id (str) - The unique identifier of the task.
# @PARAM: level (Optional[str]) - Filter by log level (DEBUG, INFO, WARNING, ERROR).
# @PARAM: source (Optional[str]) - Filter by source component.
# @PARAM: search (Optional[str]) - Text search in message.
# @PARAM: offset (int) - Number of logs to skip.
# @PARAM: limit (int) - Maximum number of logs to return.
# @PARAM: task_manager (TaskManager) - The task manager instance. # @PARAM: task_manager (TaskManager) - The task manager instance.
# @PRE: task_id must exist. # @PRE: task_id must exist.
# @POST: Returns a list of log entries or raises 404. # @POST: Returns a list of log entries or raises 404.
# @RETURN: List[LogEntry] - List of log entries. # @RETURN: List[LogEntry] - List of log entries.
# @TIER: CRITICAL
async def get_task_logs( async def get_task_logs(
task_id: str, task_id: str,
level: Optional[str] = Query(None, description="Filter by log level (DEBUG, INFO, WARNING, ERROR)"),
source: Optional[str] = Query(None, description="Filter by source component"),
search: Optional[str] = Query(None, description="Text search in message"),
offset: int = Query(0, ge=0, description="Number of logs to skip"),
limit: int = Query(100, ge=1, le=1000, description="Maximum number of logs to return"),
task_manager: TaskManager = Depends(get_task_manager), task_manager: TaskManager = Depends(get_task_manager),
_ = Depends(has_permission("tasks", "READ")) _ = Depends(has_permission("tasks", "READ"))
): ):
""" """
Retrieve logs for a specific task. Retrieve logs for a specific task with optional filtering.
Supports filtering by level, source, and text search.
""" """
with belief_scope("get_task_logs"): with belief_scope("get_task_logs"):
task = task_manager.get_task(task_id) task = task_manager.get_task(task_id)
if not task: if not task:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Task not found") raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Task not found")
return task_manager.get_task_logs(task_id)
log_filter = LogFilter(
level=level.upper() if level else None,
source=source,
search=search,
offset=offset,
limit=limit
)
return task_manager.get_task_logs(task_id, log_filter)
# [/DEF:get_task_logs:Function] # [/DEF:get_task_logs:Function]
@router.get("/{task_id}/logs/stats", response_model=LogStats)
# [DEF:get_task_log_stats:Function]
# @PURPOSE: Get statistics about logs for a task (counts by level and source).
# @PARAM: task_id (str) - The unique identifier of the task.
# @PARAM: task_manager (TaskManager) - The task manager instance.
# @PRE: task_id must exist.
# @POST: Returns log statistics or raises 404.
# @RETURN: LogStats - Statistics about task logs.
async def get_task_log_stats(
task_id: str,
task_manager: TaskManager = Depends(get_task_manager),
_ = Depends(has_permission("tasks", "READ"))
):
"""
Get statistics about logs for a task (counts by level and source).
"""
with belief_scope("get_task_log_stats"):
task = task_manager.get_task(task_id)
if not task:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Task not found")
return task_manager.get_task_log_stats(task_id)
# [/DEF:get_task_log_stats:Function]
@router.get("/{task_id}/logs/sources", response_model=List[str])
# [DEF:get_task_log_sources:Function]
# @PURPOSE: Get unique sources for a task's logs.
# @PARAM: task_id (str) - The unique identifier of the task.
# @PARAM: task_manager (TaskManager) - The task manager instance.
# @PRE: task_id must exist.
# @POST: Returns list of unique source names or raises 404.
# @RETURN: List[str] - Unique source names.
async def get_task_log_sources(
task_id: str,
task_manager: TaskManager = Depends(get_task_manager),
_ = Depends(has_permission("tasks", "READ"))
):
"""
Get unique sources for a task's logs.
"""
with belief_scope("get_task_log_sources"):
task = task_manager.get_task(task_id)
if not task:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Task not found")
return task_manager.get_task_log_sources(task_id)
# [/DEF:get_task_log_sources:Function]
@router.post("/{task_id}/resolve", response_model=Task) @router.post("/{task_id}/resolve", response_model=Task)
# [DEF:resolve_task:Function] # [DEF:resolve_task:Function]
# @PURPOSE: Resolve a task that is awaiting mapping. # @PURPOSE: Resolve a task that is awaiting mapping.

View File

@@ -1,27 +1,28 @@
# [DEF:AppModule:Module] # [DEF:AppModule:Module]
# @TIER: CRITICAL
# @SEMANTICS: app, main, entrypoint, fastapi # @SEMANTICS: app, main, entrypoint, fastapi
# @PURPOSE: The main entry point for the FastAPI application. It initializes the app, configures CORS, sets up dependencies, includes API routers, and defines the WebSocket endpoint for log streaming. # @PURPOSE: The main entry point for the FastAPI application. It initializes the app, configures CORS, sets up dependencies, includes API routers, and defines the WebSocket endpoint for log streaming.
# @LAYER: UI (API) # @LAYER: UI (API)
# @RELATION: Depends on the dependency module and API route modules. # @RELATION: Depends on the dependency module and API route modules.
import sys # @INVARIANT: Only one FastAPI app instance exists per process.
# @INVARIANT: All WebSocket connections must be properly cleaned up on disconnect.
from pathlib import Path from pathlib import Path
# project_root is used for static files mounting # project_root is used for static files mounting
project_root = Path(__file__).resolve().parent.parent.parent project_root = Path(__file__).resolve().parent.parent.parent
from fastapi import FastAPI, WebSocket, WebSocketDisconnect, Depends, Request, HTTPException from fastapi import FastAPI, WebSocket, WebSocketDisconnect, Request, HTTPException
from starlette.middleware.sessions import SessionMiddleware from starlette.middleware.sessions import SessionMiddleware
from fastapi.middleware.cors import CORSMiddleware from fastapi.middleware.cors import CORSMiddleware
from fastapi.staticfiles import StaticFiles from fastapi.staticfiles import StaticFiles
from fastapi.responses import FileResponse from fastapi.responses import FileResponse
import asyncio import asyncio
import os
from .dependencies import get_task_manager, get_scheduler_service from .dependencies import get_task_manager, get_scheduler_service
from .core.utils.network import NetworkError
from .core.logger import logger, belief_scope from .core.logger import logger, belief_scope
from .api.routes import plugins, tasks, settings, environments, mappings, migration, connections, git, storage, admin, llm from .api.routes import plugins, tasks, settings, environments, mappings, migration, connections, git, storage, admin, llm, dashboards, datasets
from .api import auth from .api import auth
from .core.database import init_db
# [DEF:App:Global] # [DEF:App:Global]
# @SEMANTICS: app, fastapi, instance # @SEMANTICS: app, fastapi, instance
@@ -77,13 +78,34 @@ app.add_middleware(
# @POST: Logs request and response details. # @POST: Logs request and response details.
# @PARAM: request (Request) - The incoming request object. # @PARAM: request (Request) - The incoming request object.
# @PARAM: call_next (Callable) - The next middleware or route handler. # @PARAM: call_next (Callable) - The next middleware or route handler.
@app.exception_handler(NetworkError)
async def network_error_handler(request: Request, exc: NetworkError):
with belief_scope("network_error_handler"):
logger.error(f"Network error: {exc}")
return HTTPException(
status_code=503,
detail="Environment unavailable. Please check if the Superset instance is running."
)
@app.middleware("http") @app.middleware("http")
async def log_requests(request: Request, call_next): async def log_requests(request: Request, call_next):
with belief_scope("log_requests", f"{request.method} {request.url.path}"): # Avoid spamming logs for polling endpoints
logger.info(f"[DEBUG] Incoming request: {request.method} {request.url.path}") is_polling = request.url.path.endswith("/api/tasks") and request.method == "GET"
if not is_polling:
logger.info(f"Incoming request: {request.method} {request.url.path}")
try:
response = await call_next(request) response = await call_next(request)
logger.info(f"[DEBUG] Response status: {response.status_code} for {request.url.path}") if not is_polling:
logger.info(f"Response status: {response.status_code} for {request.url.path}")
return response return response
except NetworkError as e:
logger.error(f"Network error caught in middleware: {e}")
raise HTTPException(
status_code=503,
detail="Environment unavailable. Please check if the Superset instance is running."
)
# [/DEF:log_requests:Function] # [/DEF:log_requests:Function]
# Include API routes # Include API routes
@@ -93,34 +115,82 @@ app.include_router(plugins.router, prefix="/api/plugins", tags=["Plugins"])
app.include_router(tasks.router, prefix="/api/tasks", tags=["Tasks"]) app.include_router(tasks.router, prefix="/api/tasks", tags=["Tasks"])
app.include_router(settings.router, prefix="/api/settings", tags=["Settings"]) app.include_router(settings.router, prefix="/api/settings", tags=["Settings"])
app.include_router(connections.router, prefix="/api/settings/connections", tags=["Connections"]) app.include_router(connections.router, prefix="/api/settings/connections", tags=["Connections"])
app.include_router(environments.router, prefix="/api/environments", tags=["Environments"]) app.include_router(environments.router, tags=["Environments"])
app.include_router(mappings.router) app.include_router(mappings.router, prefix="/api/mappings", tags=["Mappings"])
app.include_router(migration.router) app.include_router(migration.router)
app.include_router(git.router) app.include_router(git.router, prefix="/api/git", tags=["Git"])
app.include_router(llm.router) app.include_router(llm.router, prefix="/api/llm", tags=["LLM"])
app.include_router(storage.router, prefix="/api/storage", tags=["Storage"]) app.include_router(storage.router, prefix="/api/storage", tags=["Storage"])
app.include_router(dashboards.router)
app.include_router(datasets.router)
# [DEF:api.include_routers:Action]
# @PURPOSE: Registers all API routers with the FastAPI application.
# @LAYER: API
# @SEMANTICS: routes, registration, api
# [/DEF:api.include_routers:Action]
# [DEF:websocket_endpoint:Function] # [DEF:websocket_endpoint:Function]
# @PURPOSE: Provides a WebSocket endpoint for real-time log streaming of a task. # @PURPOSE: Provides a WebSocket endpoint for real-time log streaming of a task with server-side filtering.
# @PRE: task_id must be a valid task ID. # @PRE: task_id must be a valid task ID.
# @POST: WebSocket connection is managed and logs are streamed until disconnect. # @POST: WebSocket connection is managed and logs are streamed until disconnect.
# @TIER: CRITICAL
# @UX_STATE: Connecting -> Streaming -> (Disconnected)
@app.websocket("/ws/logs/{task_id}") @app.websocket("/ws/logs/{task_id}")
async def websocket_endpoint(websocket: WebSocket, task_id: str): async def websocket_endpoint(
websocket: WebSocket,
task_id: str,
source: str = None,
level: str = None
):
"""
WebSocket endpoint for real-time log streaming with optional server-side filtering.
Query Parameters:
source: Filter logs by source component (e.g., "plugin", "superset_api")
level: Filter logs by minimum level (DEBUG, INFO, WARNING, ERROR)
"""
with belief_scope("websocket_endpoint", f"task_id={task_id}"): with belief_scope("websocket_endpoint", f"task_id={task_id}"):
await websocket.accept() await websocket.accept()
logger.info(f"WebSocket connection accepted for task {task_id}")
# Normalize filter parameters
source_filter = source.lower() if source else None
level_filter = level.upper() if level else None
# Level hierarchy for filtering
level_hierarchy = {"DEBUG": 0, "INFO": 1, "WARNING": 2, "ERROR": 3}
min_level = level_hierarchy.get(level_filter, 0) if level_filter else 0
logger.info(f"WebSocket connection accepted for task {task_id} (source={source_filter}, level={level_filter})")
task_manager = get_task_manager() task_manager = get_task_manager()
queue = await task_manager.subscribe_logs(task_id) queue = await task_manager.subscribe_logs(task_id)
def matches_filters(log_entry) -> bool:
"""Check if log entry matches the filter criteria."""
# Check source filter
if source_filter and log_entry.source.lower() != source_filter:
return False
# Check level filter
if level_filter:
log_level = level_hierarchy.get(log_entry.level.upper(), 0)
if log_level < min_level:
return False
return True
try: try:
# Stream new logs # Stream new logs
logger.info(f"Starting log stream for task {task_id}") logger.info(f"Starting log stream for task {task_id}")
# Send initial logs first to build context # Send initial logs first to build context (apply filters)
initial_logs = task_manager.get_task_logs(task_id) initial_logs = task_manager.get_task_logs(task_id)
for log_entry in initial_logs: for log_entry in initial_logs:
log_dict = log_entry.dict() if matches_filters(log_entry):
log_dict['timestamp'] = log_dict['timestamp'].isoformat() log_dict = log_entry.dict()
await websocket.send_json(log_dict) log_dict['timestamp'] = log_dict['timestamp'].isoformat()
await websocket.send_json(log_dict)
# Force a check for AWAITING_INPUT status immediately upon connection # Force a check for AWAITING_INPUT status immediately upon connection
# This ensures that if the task is already waiting when the user connects, they get the prompt. # This ensures that if the task is already waiting when the user connects, they get the prompt.
@@ -138,6 +208,11 @@ async def websocket_endpoint(websocket: WebSocket, task_id: str):
while True: while True:
log_entry = await queue.get() log_entry = await queue.get()
# Apply server-side filtering
if not matches_filters(log_entry):
continue
log_dict = log_entry.dict() log_dict = log_entry.dict()
log_dict['timestamp'] = log_dict['timestamp'].isoformat() log_dict['timestamp'] = log_dict['timestamp'].isoformat()
await websocket.send_json(log_dict) await websocket.send_json(log_dict)
@@ -165,25 +240,24 @@ async def websocket_endpoint(websocket: WebSocket, task_id: str):
frontend_path = project_root / "frontend" / "build" frontend_path = project_root / "frontend" / "build"
if frontend_path.exists(): if frontend_path.exists():
app.mount("/_app", StaticFiles(directory=str(frontend_path / "_app")), name="static") app.mount("/_app", StaticFiles(directory=str(frontend_path / "_app")), name="static")
# Serve other static files from the root of build directory
# [DEF:serve_spa:Function] # [DEF:serve_spa:Function]
# @PURPOSE: Serves frontend static files or index.html for SPA routing. # @PURPOSE: Serves the SPA frontend for any path not matched by API routes.
# @PRE: file_path is requested by the client. # @PRE: frontend_path exists.
# @POST: Returns the requested file or index.html as a fallback. # @POST: Returns the requested file or index.html.
@app.get("/{file_path:path}") @app.get("/{file_path:path}", include_in_schema=False)
async def serve_spa(file_path: str): async def serve_spa(file_path: str):
with belief_scope("serve_spa", f"path={file_path}"): # Only serve SPA for non-API paths
# Don't serve SPA for API routes that fell through # API routes are registered separately and should be matched by FastAPI first
if file_path.startswith("api/"): if file_path and (file_path.startswith("api/") or file_path.startswith("/api/") or file_path == "api"):
logger.info(f"[DEBUG] API route fell through to serve_spa: {file_path}") # This should not happen if API routers are properly registered
raise HTTPException(status_code=404, detail=f"API endpoint not found: {file_path}") # Return 404 instead of serving HTML
raise HTTPException(status_code=404, detail=f"API endpoint not found: {file_path}")
full_path = frontend_path / file_path
if full_path.is_file(): full_path = frontend_path / file_path
return FileResponse(str(full_path)) if file_path and full_path.is_file():
# Fallback to index.html for SPA routing return FileResponse(str(full_path))
return FileResponse(str(frontend_path / "index.html")) return FileResponse(str(frontend_path / "index.html"))
# [/DEF:serve_spa:Function] # [/DEF:serve_spa:Function]
else: else:
# [DEF:read_root:Function] # [DEF:read_root:Function]

View File

@@ -0,0 +1,179 @@
# [DEF:test_auth:Module]
# @TIER: STANDARD
# @PURPOSE: Unit tests for authentication module
# @LAYER: Domain
# @RELATION: VERIFIES -> src.core.auth
import sys
from pathlib import Path
# Add src to path
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "src"))
import pytest
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from src.core.database import Base
from src.models.auth import User, Role, Permission, ADGroupMapping
from src.services.auth_service import AuthService
from src.core.auth.repository import AuthRepository
from src.core.auth.security import verify_password, get_password_hash
# Create in-memory SQLite database for testing
SQLALCHEMY_DATABASE_URL = "sqlite:///:memory:"
engine = create_engine(SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False})
TestingSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
# Create all tables
Base.metadata.create_all(bind=engine)
@pytest.fixture
def db_session():
"""Create a new database session with a transaction, rollback after test"""
connection = engine.connect()
transaction = connection.begin()
session = TestingSessionLocal(bind=connection)
yield session
session.close()
transaction.rollback()
connection.close()
@pytest.fixture
def auth_service(db_session):
return AuthService(db_session)
@pytest.fixture
def auth_repo(db_session):
return AuthRepository(db_session)
def test_create_user(auth_repo):
"""Test user creation"""
user = User(
username="testuser",
email="test@example.com",
password_hash=get_password_hash("testpassword123"),
auth_source="LOCAL"
)
auth_repo.db.add(user)
auth_repo.db.commit()
retrieved_user = auth_repo.get_user_by_username("testuser")
assert retrieved_user is not None
assert retrieved_user.username == "testuser"
assert retrieved_user.email == "test@example.com"
assert verify_password("testpassword123", retrieved_user.password_hash)
def test_authenticate_user(auth_service, auth_repo):
"""Test user authentication with valid and invalid credentials"""
user = User(
username="testuser",
email="test@example.com",
password_hash=get_password_hash("testpassword123"),
auth_source="LOCAL"
)
auth_repo.db.add(user)
auth_repo.db.commit()
# Test valid credentials
authenticated_user = auth_service.authenticate_user("testuser", "testpassword123")
assert authenticated_user is not None
assert authenticated_user.username == "testuser"
# Test invalid password
invalid_user = auth_service.authenticate_user("testuser", "wrongpassword")
assert invalid_user is None
# Test invalid username
invalid_user = auth_service.authenticate_user("nonexistent", "testpassword123")
assert invalid_user is None
def test_create_session(auth_service, auth_repo):
"""Test session token creation"""
user = User(
username="testuser",
email="test@example.com",
password_hash=get_password_hash("testpassword123"),
auth_source="LOCAL"
)
auth_repo.db.add(user)
auth_repo.db.commit()
session = auth_service.create_session(user)
assert "access_token" in session
assert "token_type" in session
assert session["token_type"] == "bearer"
assert len(session["access_token"]) > 0
def test_role_permission_association(auth_repo):
"""Test role and permission association"""
role = Role(name="Admin", description="System administrator")
perm1 = Permission(resource="admin:users", action="READ")
perm2 = Permission(resource="admin:users", action="WRITE")
role.permissions.extend([perm1, perm2])
auth_repo.db.add(role)
auth_repo.db.commit()
retrieved_role = auth_repo.get_role_by_name("Admin")
assert retrieved_role is not None
assert len(retrieved_role.permissions) == 2
permissions = [f"{p.resource}:{p.action}" for p in retrieved_role.permissions]
assert "admin:users:READ" in permissions
assert "admin:users:WRITE" in permissions
def test_user_role_association(auth_repo):
"""Test user and role association"""
role = Role(name="Admin", description="System administrator")
user = User(
username="adminuser",
email="admin@example.com",
password_hash=get_password_hash("adminpass123"),
auth_source="LOCAL"
)
user.roles.append(role)
auth_repo.db.add(role)
auth_repo.db.add(user)
auth_repo.db.commit()
retrieved_user = auth_repo.get_user_by_username("adminuser")
assert retrieved_user is not None
assert len(retrieved_user.roles) == 1
assert retrieved_user.roles[0].name == "Admin"
def test_ad_group_mapping(auth_repo):
"""Test AD group mapping"""
role = Role(name="ADFS_Admin", description="ADFS administrators")
auth_repo.db.add(role)
auth_repo.db.commit()
mapping = ADGroupMapping(ad_group="DOMAIN\\ADFS_Admins", role_id=role.id)
auth_repo.db.add(mapping)
auth_repo.db.commit()
retrieved_mapping = auth_repo.db.query(ADGroupMapping).filter_by(ad_group="DOMAIN\\ADFS_Admins").first()
assert retrieved_mapping is not None
assert retrieved_mapping.role_id == role.id
# [/DEF:test_auth:Module]

View File

@@ -10,7 +10,6 @@
# [SECTION: IMPORTS] # [SECTION: IMPORTS]
from pydantic import Field from pydantic import Field
from pydantic_settings import BaseSettings from pydantic_settings import BaseSettings
import os
# [/SECTION] # [/SECTION]
# [DEF:AuthConfig:Class] # [DEF:AuthConfig:Class]
@@ -25,7 +24,10 @@ class AuthConfig(BaseSettings):
REFRESH_TOKEN_EXPIRE_DAYS: int = 7 REFRESH_TOKEN_EXPIRE_DAYS: int = 7
# Database Settings # Database Settings
AUTH_DATABASE_URL: str = Field(default="sqlite:///./backend/auth.db", env="AUTH_DATABASE_URL") AUTH_DATABASE_URL: str = Field(
default="postgresql+psycopg2://postgres:postgres@localhost:5432/ss_tools",
env="AUTH_DATABASE_URL",
)
# ADFS Settings # ADFS Settings
ADFS_CLIENT_ID: str = Field(default="", env="ADFS_CLIENT_ID") ADFS_CLIENT_ID: str = Field(default="", env="ADFS_CLIENT_ID")
@@ -42,4 +44,4 @@ class AuthConfig(BaseSettings):
auth_config = AuthConfig() auth_config = AuthConfig()
# [/DEF:auth_config:Variable] # [/DEF:auth_config:Variable]
# [/DEF:backend.src.core.auth.config:Module] # [/DEF:backend.src.core.auth.config:Module]

View File

@@ -1,5 +1,6 @@
# [DEF:backend.src.core.auth.jwt:Module] # [DEF:backend.src.core.auth.jwt:Module]
# #
# @TIER: STANDARD
# @SEMANTICS: jwt, token, session, auth # @SEMANTICS: jwt, token, session, auth
# @PURPOSE: JWT token generation and validation logic. # @PURPOSE: JWT token generation and validation logic.
# @LAYER: Core # @LAYER: Core
@@ -10,8 +11,8 @@
# [SECTION: IMPORTS] # [SECTION: IMPORTS]
from datetime import datetime, timedelta from datetime import datetime, timedelta
from typing import Optional, List from typing import Optional
from jose import JWTError, jwt from jose import jwt
from .config import auth_config from .config import auth_config
from ..logger import belief_scope from ..logger import belief_scope
# [/SECTION] # [/SECTION]

View File

@@ -1,5 +1,6 @@
# [DEF:backend.src.core.auth.logger:Module] # [DEF:backend.src.core.auth.logger:Module]
# #
# @TIER: STANDARD
# @SEMANTICS: auth, logger, audit, security # @SEMANTICS: auth, logger, audit, security
# @PURPOSE: Audit logging for security-related events. # @PURPOSE: Audit logging for security-related events.
# @LAYER: Core # @LAYER: Core

View File

@@ -11,7 +11,7 @@
# [SECTION: IMPORTS] # [SECTION: IMPORTS]
from typing import Optional, List from typing import Optional, List
from sqlalchemy.orm import Session from sqlalchemy.orm import Session
from ...models.auth import User, Role, Permission, ADGroupMapping from ...models.auth import User, Role, Permission
from ..logger import belief_scope from ..logger import belief_scope
# [/SECTION] # [/SECTION]

567
backend/src/core/config_manager.py Executable file → Normal file
View File

@@ -1,284 +1,283 @@
# [DEF:ConfigManagerModule:Module] # [DEF:ConfigManagerModule:Module]
# #
# @SEMANTICS: config, manager, persistence, json # @SEMANTICS: config, manager, persistence, postgresql
# @PURPOSE: Manages application configuration, including loading/saving to JSON and CRUD for environments. # @PURPOSE: Manages application configuration persisted in database with one-time migration from JSON.
# @LAYER: Core # @LAYER: Core
# @RELATION: DEPENDS_ON -> ConfigModels # @RELATION: DEPENDS_ON -> ConfigModels
# @RELATION: CALLS -> logger # @RELATION: DEPENDS_ON -> AppConfigRecord
# @RELATION: WRITES_TO -> config.json # @RELATION: CALLS -> logger
# #
# @INVARIANT: Configuration must always be valid according to AppConfig model. # @INVARIANT: Configuration must always be valid according to AppConfig model.
# @PUBLIC_API: ConfigManager # @PUBLIC_API: ConfigManager
# [SECTION: IMPORTS] # [SECTION: IMPORTS]
import json import json
import os import os
from pathlib import Path from pathlib import Path
from typing import Optional, List from typing import Optional, List
from .config_models import AppConfig, Environment, GlobalSettings
from .logger import logger, configure_logger, belief_scope from sqlalchemy.orm import Session
# [/SECTION]
from .config_models import AppConfig, Environment, GlobalSettings, StorageConfig
# [DEF:ConfigManager:Class] from .database import SessionLocal
# @PURPOSE: A class to handle application configuration persistence and management. from ..models.config import AppConfigRecord
# @RELATION: WRITES_TO -> config.json from .logger import logger, configure_logger, belief_scope
class ConfigManager: # [/SECTION]
# [DEF:__init__:Function]
# @PURPOSE: Initializes the ConfigManager. # [DEF:ConfigManager:Class]
# @PRE: isinstance(config_path, str) and len(config_path) > 0 # @PURPOSE: A class to handle application configuration persistence and management.
# @POST: self.config is an instance of AppConfig class ConfigManager:
# @PARAM: config_path (str) - Path to the configuration file. # [DEF:__init__:Function]
def __init__(self, config_path: str = "config.json"): # @PURPOSE: Initializes the ConfigManager.
with belief_scope("__init__"): # @PRE: isinstance(config_path, str) and len(config_path) > 0
# 1. Runtime check of @PRE # @POST: self.config is an instance of AppConfig
assert isinstance(config_path, str) and config_path, "config_path must be a non-empty string" # @PARAM: config_path (str) - Path to legacy JSON config (used only for initial migration fallback).
def __init__(self, config_path: str = "config.json"):
logger.info(f"[ConfigManager][Entry] Initializing with {config_path}") with belief_scope("__init__"):
assert isinstance(config_path, str) and config_path, "config_path must be a non-empty string"
# 2. Logic implementation
self.config_path = Path(config_path) logger.info(f"[ConfigManager][Entry] Initializing with legacy path {config_path}")
self.config: AppConfig = self._load_config()
self.config_path = Path(config_path)
# Configure logger with loaded settings self.config: AppConfig = self._load_config()
configure_logger(self.config.settings.logging)
configure_logger(self.config.settings.logging)
# 3. Runtime check of @POST assert isinstance(self.config, AppConfig), "self.config must be an instance of AppConfig"
assert isinstance(self.config, AppConfig), "self.config must be an instance of AppConfig"
logger.info("[ConfigManager][Exit] Initialized")
logger.info(f"[ConfigManager][Exit] Initialized") # [/DEF:__init__:Function]
# [/DEF:__init__:Function]
# [DEF:_default_config:Function]
# [DEF:_load_config:Function] # @PURPOSE: Returns default application configuration.
# @PURPOSE: Loads the configuration from disk or creates a default one. # @RETURN: AppConfig - Default configuration.
# @PRE: self.config_path is set. def _default_config(self) -> AppConfig:
# @POST: isinstance(return, AppConfig) return AppConfig(
# @RETURN: AppConfig - The loaded or default configuration. environments=[],
def _load_config(self) -> AppConfig: settings=GlobalSettings(storage=StorageConfig()),
with belief_scope("_load_config"): )
logger.debug(f"[_load_config][Entry] Loading from {self.config_path}") # [/DEF:_default_config:Function]
if not self.config_path.exists(): # [DEF:_load_from_legacy_file:Function]
logger.info(f"[_load_config][Action] Config file not found. Creating default.") # @PURPOSE: Loads legacy configuration from config.json for migration fallback.
default_config = AppConfig( # @RETURN: AppConfig - Loaded or default configuration.
environments=[], def _load_from_legacy_file(self) -> AppConfig:
settings=GlobalSettings() with belief_scope("_load_from_legacy_file"):
) if not self.config_path.exists():
self._save_config_to_disk(default_config) logger.info("[_load_from_legacy_file][Action] Legacy config file not found, using defaults")
return default_config return self._default_config()
try:
with open(self.config_path, "r") as f: try:
data = json.load(f) with open(self.config_path, "r", encoding="utf-8") as f:
data = json.load(f)
# Check for deprecated field logger.info("[_load_from_legacy_file][Coherence:OK] Legacy configuration loaded")
if "settings" in data and "backup_path" in data["settings"]: return AppConfig(**data)
del data["settings"]["backup_path"] except Exception as e:
logger.error(f"[_load_from_legacy_file][Coherence:Failed] Error loading legacy config: {e}")
config = AppConfig(**data) return self._default_config()
logger.info(f"[_load_config][Coherence:OK] Configuration loaded") # [/DEF:_load_from_legacy_file:Function]
return config
except Exception as e: # [DEF:_get_record:Function]
logger.error(f"[_load_config][Coherence:Failed] Error loading config: {e}") # @PURPOSE: Loads config record from DB.
# Fallback but try to preserve existing settings if possible? # @PARAM: session (Session) - DB session.
# For now, return default to be safe, but log the error prominently. # @RETURN: Optional[AppConfigRecord] - Existing record or None.
return AppConfig( def _get_record(self, session: Session) -> Optional[AppConfigRecord]:
environments=[], return session.query(AppConfigRecord).filter(AppConfigRecord.id == "global").first()
settings=GlobalSettings(storage=StorageConfig()) # [/DEF:_get_record:Function]
)
# [/DEF:_load_config:Function] # [DEF:_load_config:Function]
# @PURPOSE: Loads the configuration from DB or performs one-time migration from JSON file.
# [DEF:_save_config_to_disk:Function] # @PRE: DB session factory is available.
# @PURPOSE: Saves the provided configuration object to disk. # @POST: isinstance(return, AppConfig)
# @PRE: isinstance(config, AppConfig) # @RETURN: AppConfig - Loaded configuration.
# @POST: Configuration saved to disk. def _load_config(self) -> AppConfig:
# @PARAM: config (AppConfig) - The configuration to save. with belief_scope("_load_config"):
def _save_config_to_disk(self, config: AppConfig): session: Session = SessionLocal()
with belief_scope("_save_config_to_disk"): try:
logger.debug(f"[_save_config_to_disk][Entry] Saving to {self.config_path}") record = self._get_record(session)
if record and record.payload:
# 1. Runtime check of @PRE logger.info("[_load_config][Coherence:OK] Configuration loaded from database")
assert isinstance(config, AppConfig), "config must be an instance of AppConfig" return AppConfig(**record.payload)
# 2. Logic implementation logger.info("[_load_config][Action] No database config found, migrating legacy config")
try: config = self._load_from_legacy_file()
with open(self.config_path, "w") as f: self._save_config_to_db(config, session=session)
json.dump(config.dict(), f, indent=4) return config
logger.info(f"[_save_config_to_disk][Action] Configuration saved") except Exception as e:
except Exception as e: logger.error(f"[_load_config][Coherence:Failed] Error loading config from DB: {e}")
logger.error(f"[_save_config_to_disk][Coherence:Failed] Failed to save: {e}") return self._default_config()
# [/DEF:_save_config_to_disk:Function] finally:
session.close()
# [DEF:save:Function] # [/DEF:_load_config:Function]
# @PURPOSE: Saves the current configuration state to disk.
# @PRE: self.config is set. # [DEF:_save_config_to_db:Function]
# @POST: self._save_config_to_disk called. # @PURPOSE: Saves the provided configuration object to DB.
def save(self): # @PRE: isinstance(config, AppConfig)
with belief_scope("save"): # @POST: Configuration saved to database.
self._save_config_to_disk(self.config) # @PARAM: config (AppConfig) - The configuration to save.
# [/DEF:save:Function] # @PARAM: session (Optional[Session]) - Existing DB session for transactional reuse.
def _save_config_to_db(self, config: AppConfig, session: Optional[Session] = None):
# [DEF:get_config:Function] with belief_scope("_save_config_to_db"):
# @PURPOSE: Returns the current configuration. assert isinstance(config, AppConfig), "config must be an instance of AppConfig"
# @PRE: self.config is set.
# @POST: Returns self.config. owns_session = session is None
# @RETURN: AppConfig - The current configuration. db = session or SessionLocal()
def get_config(self) -> AppConfig: try:
with belief_scope("get_config"): record = self._get_record(db)
return self.config payload = config.model_dump()
# [/DEF:get_config:Function] if record is None:
record = AppConfigRecord(id="global", payload=payload)
# [DEF:update_global_settings:Function] db.add(record)
# @PURPOSE: Updates the global settings and persists the change. else:
# @PRE: isinstance(settings, GlobalSettings) record.payload = payload
# @POST: self.config.settings updated and saved. db.commit()
# @PARAM: settings (GlobalSettings) - The new global settings. logger.info("[_save_config_to_db][Action] Configuration saved to database")
def update_global_settings(self, settings: GlobalSettings): except Exception as e:
with belief_scope("update_global_settings"): db.rollback()
logger.info(f"[update_global_settings][Entry] Updating settings") logger.error(f"[_save_config_to_db][Coherence:Failed] Failed to save: {e}")
raise
# 1. Runtime check of @PRE finally:
assert isinstance(settings, GlobalSettings), "settings must be an instance of GlobalSettings" if owns_session:
db.close()
# 2. Logic implementation # [/DEF:_save_config_to_db:Function]
self.config.settings = settings
self.save() # [DEF:save:Function]
# @PURPOSE: Saves the current configuration state to DB.
# Reconfigure logger with new settings # @PRE: self.config is set.
configure_logger(settings.logging) # @POST: self._save_config_to_db called.
def save(self):
logger.info(f"[update_global_settings][Exit] Settings updated") with belief_scope("save"):
# [/DEF:update_global_settings:Function] self._save_config_to_db(self.config)
# [/DEF:save:Function]
# [DEF:validate_path:Function]
# @PURPOSE: Validates if a path exists and is writable. # [DEF:get_config:Function]
# @PRE: path is a string. # @PURPOSE: Returns the current configuration.
# @POST: Returns (bool, str) status. # @RETURN: AppConfig - The current configuration.
# @PARAM: path (str) - The path to validate. def get_config(self) -> AppConfig:
# @RETURN: tuple (bool, str) - (is_valid, message) with belief_scope("get_config"):
def validate_path(self, path: str) -> tuple[bool, str]: return self.config
with belief_scope("validate_path"): # [/DEF:get_config:Function]
p = os.path.abspath(path)
if not os.path.exists(p): # [DEF:update_global_settings:Function]
try: # @PURPOSE: Updates the global settings and persists the change.
os.makedirs(p, exist_ok=True) # @PRE: isinstance(settings, GlobalSettings)
except Exception as e: # @POST: self.config.settings updated and saved.
return False, f"Path does not exist and could not be created: {e}" # @PARAM: settings (GlobalSettings) - The new global settings.
def update_global_settings(self, settings: GlobalSettings):
if not os.access(p, os.W_OK): with belief_scope("update_global_settings"):
return False, "Path is not writable" logger.info("[update_global_settings][Entry] Updating settings")
return True, "Path is valid and writable" assert isinstance(settings, GlobalSettings), "settings must be an instance of GlobalSettings"
# [/DEF:validate_path:Function] self.config.settings = settings
self.save()
# [DEF:get_environments:Function] configure_logger(settings.logging)
# @PURPOSE: Returns the list of configured environments. logger.info("[update_global_settings][Exit] Settings updated")
# @PRE: self.config is set. # [/DEF:update_global_settings:Function]
# @POST: Returns list of environments.
# @RETURN: List[Environment] - List of environments. # [DEF:validate_path:Function]
def get_environments(self) -> List[Environment]: # @PURPOSE: Validates if a path exists and is writable.
with belief_scope("get_environments"): # @PARAM: path (str) - The path to validate.
return self.config.environments # @RETURN: tuple (bool, str) - (is_valid, message)
# [/DEF:get_environments:Function] def validate_path(self, path: str) -> tuple[bool, str]:
with belief_scope("validate_path"):
# [DEF:has_environments:Function] p = os.path.abspath(path)
# @PURPOSE: Checks if at least one environment is configured. if not os.path.exists(p):
# @PRE: self.config is set. try:
# @POST: Returns boolean indicating if environments exist. os.makedirs(p, exist_ok=True)
# @RETURN: bool - True if at least one environment exists. except Exception as e:
def has_environments(self) -> bool: return False, f"Path does not exist and could not be created: {e}"
with belief_scope("has_environments"):
return len(self.config.environments) > 0 if not os.access(p, os.W_OK):
# [/DEF:has_environments:Function] return False, "Path is not writable"
# [DEF:get_environment:Function] return True, "Path is valid and writable"
# @PURPOSE: Returns a single environment by ID. # [/DEF:validate_path:Function]
# @PRE: self.config is set and isinstance(env_id, str) and len(env_id) > 0.
# @POST: Returns Environment object if found, None otherwise. # [DEF:get_environments:Function]
# @PARAM: env_id (str) - The ID of the environment to retrieve. # @PURPOSE: Returns the list of configured environments.
# @RETURN: Optional[Environment] - The environment with the given ID, or None. # @RETURN: List[Environment] - List of environments.
def get_environment(self, env_id: str) -> Optional[Environment]: def get_environments(self) -> List[Environment]:
with belief_scope("get_environment"): with belief_scope("get_environments"):
for env in self.config.environments: return self.config.environments
if env.id == env_id: # [/DEF:get_environments:Function]
return env
return None # [DEF:has_environments:Function]
# [/DEF:get_environment:Function] # @PURPOSE: Checks if at least one environment is configured.
# @RETURN: bool - True if at least one environment exists.
# [DEF:add_environment:Function] def has_environments(self) -> bool:
# @PURPOSE: Adds a new environment to the configuration. with belief_scope("has_environments"):
# @PRE: isinstance(env, Environment) return len(self.config.environments) > 0
# @POST: Environment added or updated in self.config.environments. # [/DEF:has_environments:Function]
# @PARAM: env (Environment) - The environment to add.
def add_environment(self, env: Environment): # [DEF:get_environment:Function]
with belief_scope("add_environment"): # @PURPOSE: Returns a single environment by ID.
logger.info(f"[add_environment][Entry] Adding environment {env.id}") # @PARAM: env_id (str) - The ID of the environment to retrieve.
# @RETURN: Optional[Environment] - The environment with the given ID, or None.
# 1. Runtime check of @PRE def get_environment(self, env_id: str) -> Optional[Environment]:
assert isinstance(env, Environment), "env must be an instance of Environment" with belief_scope("get_environment"):
for env in self.config.environments:
# 2. Logic implementation if env.id == env_id:
# Check for duplicate ID and remove if exists return env
self.config.environments = [e for e in self.config.environments if e.id != env.id] return None
self.config.environments.append(env) # [/DEF:get_environment:Function]
self.save()
# [DEF:add_environment:Function]
logger.info(f"[add_environment][Exit] Environment added") # @PURPOSE: Adds a new environment to the configuration.
# [/DEF:add_environment:Function] # @PARAM: env (Environment) - The environment to add.
def add_environment(self, env: Environment):
# [DEF:update_environment:Function] with belief_scope("add_environment"):
# @PURPOSE: Updates an existing environment. logger.info(f"[add_environment][Entry] Adding environment {env.id}")
# @PRE: isinstance(env_id, str) and len(env_id) > 0 and isinstance(updated_env, Environment) assert isinstance(env, Environment), "env must be an instance of Environment"
# @POST: Returns True if environment was found and updated.
# @PARAM: env_id (str) - The ID of the environment to update. self.config.environments = [e for e in self.config.environments if e.id != env.id]
# @PARAM: updated_env (Environment) - The updated environment data. self.config.environments.append(env)
# @RETURN: bool - True if updated, False otherwise. self.save()
def update_environment(self, env_id: str, updated_env: Environment) -> bool: logger.info("[add_environment][Exit] Environment added")
with belief_scope("update_environment"): # [/DEF:add_environment:Function]
logger.info(f"[update_environment][Entry] Updating {env_id}")
# [DEF:update_environment:Function]
# 1. Runtime check of @PRE # @PURPOSE: Updates an existing environment.
assert env_id and isinstance(env_id, str), "env_id must be a non-empty string" # @PARAM: env_id (str) - The ID of the environment to update.
assert isinstance(updated_env, Environment), "updated_env must be an instance of Environment" # @PARAM: updated_env (Environment) - The updated environment data.
# @RETURN: bool - True if updated, False otherwise.
# 2. Logic implementation def update_environment(self, env_id: str, updated_env: Environment) -> bool:
for i, env in enumerate(self.config.environments): with belief_scope("update_environment"):
if env.id == env_id: logger.info(f"[update_environment][Entry] Updating {env_id}")
# If password is masked, keep the old one assert env_id and isinstance(env_id, str), "env_id must be a non-empty string"
if updated_env.password == "********": assert isinstance(updated_env, Environment), "updated_env must be an instance of Environment"
updated_env.password = env.password
for i, env in enumerate(self.config.environments):
self.config.environments[i] = updated_env if env.id == env_id:
self.save() if updated_env.password == "********":
logger.info(f"[update_environment][Coherence:OK] Updated {env_id}") updated_env.password = env.password
return True
self.config.environments[i] = updated_env
logger.warning(f"[update_environment][Coherence:Failed] Environment {env_id} not found") self.save()
return False logger.info(f"[update_environment][Coherence:OK] Updated {env_id}")
# [/DEF:update_environment:Function] return True
# [DEF:delete_environment:Function] logger.warning(f"[update_environment][Coherence:Failed] Environment {env_id} not found")
# @PURPOSE: Deletes an environment by ID. return False
# @PRE: isinstance(env_id, str) and len(env_id) > 0 # [/DEF:update_environment:Function]
# @POST: Environment removed from self.config.environments if it existed.
# @PARAM: env_id (str) - The ID of the environment to delete. # [DEF:delete_environment:Function]
def delete_environment(self, env_id: str): # @PURPOSE: Deletes an environment by ID.
with belief_scope("delete_environment"): # @PARAM: env_id (str) - The ID of the environment to delete.
logger.info(f"[delete_environment][Entry] Deleting {env_id}") def delete_environment(self, env_id: str):
with belief_scope("delete_environment"):
# 1. Runtime check of @PRE logger.info(f"[delete_environment][Entry] Deleting {env_id}")
assert env_id and isinstance(env_id, str), "env_id must be a non-empty string" assert env_id and isinstance(env_id, str), "env_id must be a non-empty string"
# 2. Logic implementation original_count = len(self.config.environments)
original_count = len(self.config.environments) self.config.environments = [e for e in self.config.environments if e.id != env_id]
self.config.environments = [e for e in self.config.environments if e.id != env_id]
if len(self.config.environments) < original_count:
if len(self.config.environments) < original_count: self.save()
self.save() logger.info(f"[delete_environment][Action] Deleted {env_id}")
logger.info(f"[delete_environment][Action] Deleted {env_id}") else:
else: logger.warning(f"[delete_environment][Coherence:Failed] Environment {env_id} not found")
logger.warning(f"[delete_environment][Coherence:Failed] Environment {env_id} not found") # [/DEF:delete_environment:Function]
# [/DEF:delete_environment:Function]
# [/DEF:ConfigManager:Class] # [/DEF:ConfigManager:Class]
# [/DEF:ConfigManagerModule:Module]
# [/DEF:ConfigManagerModule:Module]

View File

@@ -1,8 +1,9 @@
# [DEF:ConfigModels:Module] # [DEF:ConfigModels:Module]
# @TIER: STANDARD
# @SEMANTICS: config, models, pydantic # @SEMANTICS: config, models, pydantic
# @PURPOSE: Defines the data models for application configuration using Pydantic. # @PURPOSE: Defines the data models for application configuration using Pydantic.
# @LAYER: Core # @LAYER: Core
# @RELATION: READS_FROM -> config.json # @RELATION: READS_FROM -> app_configurations (database)
# @RELATION: USED_BY -> ConfigManager # @RELATION: USED_BY -> ConfigManager
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
@@ -32,9 +33,10 @@ class Environment(BaseModel):
# [DEF:LoggingConfig:DataClass] # [DEF:LoggingConfig:DataClass]
# @PURPOSE: Defines the configuration for the application's logging system. # @PURPOSE: Defines the configuration for the application's logging system.
class LoggingConfig(BaseModel): class LoggingConfig(BaseModel):
level: str = "INFO" level: str = "INFO"
file_path: Optional[str] = "logs/app.log" task_log_level: str = "INFO" # Minimum level for task-specific logs (DEBUG, INFO, WARNING, ERROR)
file_path: Optional[str] = None
max_bytes: int = 10 * 1024 * 1024 max_bytes: int = 10 * 1024 * 1024
backup_count: int = 5 backup_count: int = 5
enable_belief_state: bool = True enable_belief_state: bool = True
@@ -46,6 +48,8 @@ class GlobalSettings(BaseModel):
storage: StorageConfig = Field(default_factory=StorageConfig) storage: StorageConfig = Field(default_factory=StorageConfig)
default_environment_id: Optional[str] = None default_environment_id: Optional[str] = None
logging: LoggingConfig = Field(default_factory=LoggingConfig) logging: LoggingConfig = Field(default_factory=LoggingConfig)
connections: List[dict] = []
llm: dict = Field(default_factory=lambda: {"providers": [], "default_provider": ""})
# Task retention settings # Task retention settings
task_retention_days: int = 30 task_retention_days: int = 30

View File

@@ -1,7 +1,7 @@
# [DEF:backend.src.core.database:Module] # [DEF:backend.src.core.database:Module]
# #
# @SEMANTICS: database, sqlite, sqlalchemy, session, persistence # @SEMANTICS: database, postgresql, sqlalchemy, session, persistence
# @PURPOSE: Configures the SQLite database connection and session management. # @PURPOSE: Configures database connection and session management (PostgreSQL-first).
# @LAYER: Core # @LAYER: Core
# @RELATION: DEPENDS_ON -> sqlalchemy # @RELATION: DEPENDS_ON -> sqlalchemy
# @RELATION: USES -> backend.src.models.mapping # @RELATION: USES -> backend.src.models.mapping
@@ -11,14 +11,12 @@
# [SECTION: IMPORTS] # [SECTION: IMPORTS]
from sqlalchemy import create_engine from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, Session from sqlalchemy.orm import sessionmaker
from ..models.mapping import Base from ..models.mapping import Base
# Import models to ensure they're registered with Base # Import models to ensure they're registered with Base
from ..models.task import TaskRecord from ..models import task as _task_models # noqa: F401
from ..models.connection import ConnectionConfig from ..models import auth as _auth_models # noqa: F401
from ..models.git import GitServerConfig, GitRepository, DeploymentEnvironment from ..models import config as _config_models # noqa: F401
from ..models.auth import User, Role, Permission, ADGroupMapping
from ..models.llm import LLMProvider, ValidationRecord
from .logger import belief_scope from .logger import belief_scope
from .auth.config import auth_config from .auth.config import auth_config
import os import os
@@ -26,44 +24,50 @@ from pathlib import Path
# [/SECTION] # [/SECTION]
# [DEF:BASE_DIR:Variable] # [DEF:BASE_DIR:Variable]
# @PURPOSE: Base directory for the backend (where .db files should reside). # @PURPOSE: Base directory for the backend.
BASE_DIR = Path(__file__).resolve().parent.parent.parent BASE_DIR = Path(__file__).resolve().parent.parent.parent
# [/DEF:BASE_DIR:Variable] # [/DEF:BASE_DIR:Variable]
# [DEF:DATABASE_URL:Constant] # [DEF:DATABASE_URL:Constant]
# @PURPOSE: URL for the main mappings database. # @PURPOSE: URL for the main application database.
DATABASE_URL = os.getenv("DATABASE_URL", f"sqlite:///{BASE_DIR}/mappings.db") DEFAULT_POSTGRES_URL = os.getenv(
"POSTGRES_URL",
"postgresql+psycopg2://postgres:postgres@localhost:5432/ss_tools",
)
DATABASE_URL = os.getenv("DATABASE_URL", DEFAULT_POSTGRES_URL)
# [/DEF:DATABASE_URL:Constant] # [/DEF:DATABASE_URL:Constant]
# [DEF:TASKS_DATABASE_URL:Constant] # [DEF:TASKS_DATABASE_URL:Constant]
# @PURPOSE: URL for the tasks execution database. # @PURPOSE: URL for the tasks execution database.
TASKS_DATABASE_URL = os.getenv("TASKS_DATABASE_URL", f"sqlite:///{BASE_DIR}/tasks.db") # Defaults to DATABASE_URL to keep task logs in the same PostgreSQL instance.
TASKS_DATABASE_URL = os.getenv("TASKS_DATABASE_URL", DATABASE_URL)
# [/DEF:TASKS_DATABASE_URL:Constant] # [/DEF:TASKS_DATABASE_URL:Constant]
# [DEF:AUTH_DATABASE_URL:Constant] # [DEF:AUTH_DATABASE_URL:Constant]
# @PURPOSE: URL for the authentication database. # @PURPOSE: URL for the authentication database.
AUTH_DATABASE_URL = os.getenv("AUTH_DATABASE_URL", auth_config.AUTH_DATABASE_URL) AUTH_DATABASE_URL = os.getenv("AUTH_DATABASE_URL", auth_config.AUTH_DATABASE_URL)
# If it's a relative sqlite path starting with ./backend/, fix it to be absolute or relative to BASE_DIR
if AUTH_DATABASE_URL.startswith("sqlite:///./backend/"):
AUTH_DATABASE_URL = AUTH_DATABASE_URL.replace("sqlite:///./backend/", f"sqlite:///{BASE_DIR}/")
elif AUTH_DATABASE_URL.startswith("sqlite:///./") and not AUTH_DATABASE_URL.startswith("sqlite:///./backend/"):
# If it's just ./ but we are in backend, it's fine, but let's make it absolute for robustness
AUTH_DATABASE_URL = AUTH_DATABASE_URL.replace("sqlite:///./", f"sqlite:///{BASE_DIR}/")
# [/DEF:AUTH_DATABASE_URL:Constant] # [/DEF:AUTH_DATABASE_URL:Constant]
# [DEF:engine:Variable] # [DEF:engine:Variable]
def _build_engine(db_url: str):
with belief_scope("_build_engine"):
if db_url.startswith("sqlite"):
return create_engine(db_url, connect_args={"check_same_thread": False})
return create_engine(db_url, pool_pre_ping=True)
# @PURPOSE: SQLAlchemy engine for mappings database. # @PURPOSE: SQLAlchemy engine for mappings database.
engine = create_engine(DATABASE_URL, connect_args={"check_same_thread": False}) engine = _build_engine(DATABASE_URL)
# [/DEF:engine:Variable] # [/DEF:engine:Variable]
# [DEF:tasks_engine:Variable] # [DEF:tasks_engine:Variable]
# @PURPOSE: SQLAlchemy engine for tasks database. # @PURPOSE: SQLAlchemy engine for tasks database.
tasks_engine = create_engine(TASKS_DATABASE_URL, connect_args={"check_same_thread": False}) tasks_engine = _build_engine(TASKS_DATABASE_URL)
# [/DEF:tasks_engine:Variable] # [/DEF:tasks_engine:Variable]
# [DEF:auth_engine:Variable] # [DEF:auth_engine:Variable]
# @PURPOSE: SQLAlchemy engine for authentication database. # @PURPOSE: SQLAlchemy engine for authentication database.
auth_engine = create_engine(AUTH_DATABASE_URL, connect_args={"check_same_thread": False}) auth_engine = _build_engine(AUTH_DATABASE_URL)
# [/DEF:auth_engine:Variable] # [/DEF:auth_engine:Variable]
# [DEF:SessionLocal:Class] # [DEF:SessionLocal:Class]

View File

@@ -19,6 +19,9 @@ _belief_state = threading.local()
# Global flag for belief state logging # Global flag for belief state logging
_enable_belief_state = True _enable_belief_state = True
# Global task log level filter
_task_log_level = "INFO"
# [DEF:BeliefFormatter:Class] # [DEF:BeliefFormatter:Class]
# @PURPOSE: Custom logging formatter that adds belief state prefixes to log messages. # @PURPOSE: Custom logging formatter that adds belief state prefixes to log messages.
class BeliefFormatter(logging.Formatter): class BeliefFormatter(logging.Formatter):
@@ -58,12 +61,12 @@ class LogEntry(BaseModel):
# @SEMANTICS: logging, context, belief_state # @SEMANTICS: logging, context, belief_state
@contextmanager @contextmanager
def belief_scope(anchor_id: str, message: str = ""): def belief_scope(anchor_id: str, message: str = ""):
# Log Entry if enabled # Log Entry if enabled (DEBUG level to reduce noise)
if _enable_belief_state: if _enable_belief_state:
entry_msg = f"[{anchor_id}][Entry]" entry_msg = f"[{anchor_id}][Entry]"
if message: if message:
entry_msg += f" {message}" entry_msg += f" {message}"
logger.info(entry_msg) logger.debug(entry_msg)
# Set thread-local anchor_id # Set thread-local anchor_id
old_anchor = getattr(_belief_state, 'anchor_id', None) old_anchor = getattr(_belief_state, 'anchor_id', None)
@@ -71,13 +74,13 @@ def belief_scope(anchor_id: str, message: str = ""):
try: try:
yield yield
# Log Coherence OK and Exit # Log Coherence OK and Exit (DEBUG level to reduce noise)
logger.info(f"[{anchor_id}][Coherence:OK]") logger.debug(f"[{anchor_id}][Coherence:OK]")
if _enable_belief_state: if _enable_belief_state:
logger.info(f"[{anchor_id}][Exit]") logger.debug(f"[{anchor_id}][Exit]")
except Exception as e: except Exception as e:
# Log Coherence Failed # Log Coherence Failed (DEBUG level to reduce noise)
logger.info(f"[{anchor_id}][Coherence:Failed] {str(e)}") logger.debug(f"[{anchor_id}][Coherence:Failed] {str(e)}")
raise raise
finally: finally:
# Restore old anchor # Restore old anchor
@@ -88,12 +91,13 @@ def belief_scope(anchor_id: str, message: str = ""):
# [DEF:configure_logger:Function] # [DEF:configure_logger:Function]
# @PURPOSE: Configures the logger with the provided logging settings. # @PURPOSE: Configures the logger with the provided logging settings.
# @PRE: config is a valid LoggingConfig instance. # @PRE: config is a valid LoggingConfig instance.
# @POST: Logger level, handlers, and belief state flag are updated. # @POST: Logger level, handlers, belief state flag, and task log level are updated.
# @PARAM: config (LoggingConfig) - The logging configuration. # @PARAM: config (LoggingConfig) - The logging configuration.
# @SEMANTICS: logging, configuration, initialization # @SEMANTICS: logging, configuration, initialization
def configure_logger(config): def configure_logger(config):
global _enable_belief_state global _enable_belief_state, _task_log_level
_enable_belief_state = config.enable_belief_state _enable_belief_state = config.enable_belief_state
_task_log_level = config.task_log_level.upper()
# Set logger level # Set logger level
level = getattr(logging, config.level.upper(), logging.INFO) level = getattr(logging, config.level.upper(), logging.INFO)
@@ -107,7 +111,6 @@ def configure_logger(config):
# Add file handler if file_path is set # Add file handler if file_path is set
if config.file_path: if config.file_path:
import os
from pathlib import Path from pathlib import Path
log_file = Path(config.file_path) log_file = Path(config.file_path)
log_file.parent.mkdir(parents=True, exist_ok=True) log_file.parent.mkdir(parents=True, exist_ok=True)
@@ -130,6 +133,36 @@ def configure_logger(config):
)) ))
# [/DEF:configure_logger:Function] # [/DEF:configure_logger:Function]
# [DEF:get_task_log_level:Function]
# @PURPOSE: Returns the current task log level filter.
# @PRE: None.
# @POST: Returns the task log level string.
# @RETURN: str - The current task log level (DEBUG, INFO, WARNING, ERROR).
# @SEMANTICS: logging, configuration, getter
def get_task_log_level() -> str:
"""Returns the current task log level filter."""
return _task_log_level
# [/DEF:get_task_log_level:Function]
# [DEF:should_log_task_level:Function]
# @PURPOSE: Checks if a log level should be recorded based on task_log_level setting.
# @PRE: level is a valid log level string.
# @POST: Returns True if level meets or exceeds task_log_level threshold.
# @PARAM: level (str) - The log level to check.
# @RETURN: bool - True if the level should be logged.
# @SEMANTICS: logging, filter, level
def should_log_task_level(level: str) -> bool:
"""Checks if a log level should be recorded based on task_log_level setting."""
level_order = {"DEBUG": 0, "INFO": 1, "WARNING": 2, "ERROR": 3}
current_level = _task_log_level.upper()
check_level = level.upper()
current_order = level_order.get(current_level, 1) # Default to INFO
check_order = level_order.get(check_level, 1)
return check_order >= current_order
# [/DEF:should_log_task_level:Function]
# [DEF:WebSocketLogHandler:Class] # [DEF:WebSocketLogHandler:Class]
# @SEMANTICS: logging, handler, websocket, buffer # @SEMANTICS: logging, handler, websocket, buffer
# @PURPOSE: A custom logging handler that captures log records into a buffer. It is designed to be extended for real-time log streaming over WebSockets. # @PURPOSE: A custom logging handler that captures log records into a buffer. It is designed to be extended for real-time log streaming over WebSockets.

View File

@@ -0,0 +1,228 @@
# [DEF:test_logger:Module]
# @TIER: STANDARD
# @PURPOSE: Unit tests for logger module
# @LAYER: Infra
# @RELATION: VERIFIES -> src.core.logger
import sys
from pathlib import Path
# Add src to path
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "src"))
import pytest
from src.core.logger import (
belief_scope,
logger,
configure_logger,
get_task_log_level,
should_log_task_level
)
from src.core.config_models import LoggingConfig
# [DEF:test_belief_scope_logs_entry_action_exit_at_debug:Function]
# @PURPOSE: Test that belief_scope generates [ID][Entry], [ID][Action], and [ID][Exit] logs at DEBUG level.
# @PRE: belief_scope is available. caplog fixture is used. Logger configured to DEBUG.
# @POST: Logs are verified to contain Entry, Action, and Exit tags at DEBUG level.
def test_belief_scope_logs_entry_action_exit_at_debug(caplog):
"""Test that belief_scope generates [ID][Entry], [ID][Action], and [ID][Exit] logs at DEBUG level."""
# Configure logger to DEBUG level
config = LoggingConfig(
level="DEBUG",
task_log_level="DEBUG",
enable_belief_state=True
)
configure_logger(config)
caplog.set_level("DEBUG")
with belief_scope("TestFunction"):
logger.info("Doing something important")
# Check that the logs contain the expected patterns
log_messages = [record.message for record in caplog.records]
assert any("[TestFunction][Entry]" in msg for msg in log_messages), "Entry log not found"
assert any("[TestFunction][Action] Doing something important" in msg for msg in log_messages), "Action log not found"
assert any("[TestFunction][Exit]" in msg for msg in log_messages), "Exit log not found"
# Reset to INFO
config = LoggingConfig(level="INFO", task_log_level="INFO", enable_belief_state=True)
configure_logger(config)
# [/DEF:test_belief_scope_logs_entry_action_exit_at_debug:Function]
# [DEF:test_belief_scope_error_handling:Function]
# @PURPOSE: Test that belief_scope logs Coherence:Failed on exception.
# @PRE: belief_scope is available. caplog fixture is used. Logger configured to DEBUG.
# @POST: Logs are verified to contain Coherence:Failed tag.
def test_belief_scope_error_handling(caplog):
"""Test that belief_scope logs Coherence:Failed on exception."""
# Configure logger to DEBUG level
config = LoggingConfig(
level="DEBUG",
task_log_level="DEBUG",
enable_belief_state=True
)
configure_logger(config)
caplog.set_level("DEBUG")
with pytest.raises(ValueError):
with belief_scope("FailingFunction"):
raise ValueError("Something went wrong")
log_messages = [record.message for record in caplog.records]
assert any("[FailingFunction][Entry]" in msg for msg in log_messages), "Entry log not found"
assert any("[FailingFunction][Coherence:Failed]" in msg for msg in log_messages), "Failed coherence log not found"
# Exit should not be logged on failure
# Reset to INFO
config = LoggingConfig(level="INFO", task_log_level="INFO", enable_belief_state=True)
configure_logger(config)
# [/DEF:test_belief_scope_error_handling:Function]
# [DEF:test_belief_scope_success_coherence:Function]
# @PURPOSE: Test that belief_scope logs Coherence:OK on success.
# @PRE: belief_scope is available. caplog fixture is used. Logger configured to DEBUG.
# @POST: Logs are verified to contain Coherence:OK tag.
def test_belief_scope_success_coherence(caplog):
"""Test that belief_scope logs Coherence:OK on success."""
# Configure logger to DEBUG level
config = LoggingConfig(
level="DEBUG",
task_log_level="DEBUG",
enable_belief_state=True
)
configure_logger(config)
caplog.set_level("DEBUG")
with belief_scope("SuccessFunction"):
pass
log_messages = [record.message for record in caplog.records]
assert any("[SuccessFunction][Coherence:OK]" in msg for msg in log_messages), "Success coherence log not found"
# Reset to INFO
config = LoggingConfig(level="INFO", task_log_level="INFO", enable_belief_state=True)
configure_logger(config)
# [/DEF:test_belief_scope_success_coherence:Function]
# [DEF:test_belief_scope_not_visible_at_info:Function]
# @PURPOSE: Test that belief_scope Entry/Exit/Coherence logs are NOT visible at INFO level.
# @PRE: belief_scope is available. caplog fixture is used.
# @POST: Entry/Exit/Coherence logs are not captured at INFO level.
def test_belief_scope_not_visible_at_info(caplog):
"""Test that belief_scope Entry/Exit/Coherence logs are NOT visible at INFO level."""
caplog.set_level("INFO")
with belief_scope("InfoLevelFunction"):
logger.info("Doing something important")
log_messages = [record.message for record in caplog.records]
# Action log should be visible
assert any("[InfoLevelFunction][Action] Doing something important" in msg for msg in log_messages), "Action log not found"
# Entry/Exit/Coherence should NOT be visible at INFO level
assert not any("[InfoLevelFunction][Entry]" in msg for msg in log_messages), "Entry log should not be visible at INFO"
assert not any("[InfoLevelFunction][Exit]" in msg for msg in log_messages), "Exit log should not be visible at INFO"
assert not any("[InfoLevelFunction][Coherence:OK]" in msg for msg in log_messages), "Coherence log should not be visible at INFO"
# [/DEF:test_belief_scope_not_visible_at_info:Function]
# [DEF:test_task_log_level_default:Function]
# @PURPOSE: Test that default task log level is INFO.
# @PRE: None.
# @POST: Default level is INFO.
def test_task_log_level_default():
"""Test that default task log level is INFO."""
level = get_task_log_level()
assert level == "INFO"
# [/DEF:test_task_log_level_default:Function]
# [DEF:test_should_log_task_level:Function]
# @PURPOSE: Test that should_log_task_level correctly filters log levels.
# @PRE: None.
# @POST: Filtering works correctly for all level combinations.
def test_should_log_task_level():
"""Test that should_log_task_level correctly filters log levels."""
# Default level is INFO
assert should_log_task_level("ERROR") is True, "ERROR should be logged at INFO threshold"
assert should_log_task_level("WARNING") is True, "WARNING should be logged at INFO threshold"
assert should_log_task_level("INFO") is True, "INFO should be logged at INFO threshold"
assert should_log_task_level("DEBUG") is False, "DEBUG should NOT be logged at INFO threshold"
# [/DEF:test_should_log_task_level:Function]
# [DEF:test_configure_logger_task_log_level:Function]
# @PURPOSE: Test that configure_logger updates task_log_level.
# @PRE: LoggingConfig is available.
# @POST: task_log_level is updated correctly.
def test_configure_logger_task_log_level():
"""Test that configure_logger updates task_log_level."""
config = LoggingConfig(
level="DEBUG",
task_log_level="DEBUG",
enable_belief_state=True
)
configure_logger(config)
assert get_task_log_level() == "DEBUG", "task_log_level should be DEBUG"
assert should_log_task_level("DEBUG") is True, "DEBUG should be logged at DEBUG threshold"
# Reset to INFO
config = LoggingConfig(
level="INFO",
task_log_level="INFO",
enable_belief_state=True
)
configure_logger(config)
assert get_task_log_level() == "INFO", "task_log_level should be reset to INFO"
# [/DEF:test_configure_logger_task_log_level:Function]
# [DEF:test_enable_belief_state_flag:Function]
# @PURPOSE: Test that enable_belief_state flag controls belief_scope logging.
# @PRE: LoggingConfig is available. caplog fixture is used.
# @POST: belief_scope logs are controlled by the flag.
def test_enable_belief_state_flag(caplog):
"""Test that enable_belief_state flag controls belief_scope logging."""
# Disable belief state
config = LoggingConfig(
level="DEBUG",
task_log_level="DEBUG",
enable_belief_state=False
)
configure_logger(config)
caplog.set_level("DEBUG")
with belief_scope("DisabledFunction"):
logger.info("Doing something")
log_messages = [record.message for record in caplog.records]
# Entry and Exit should NOT be logged when disabled
assert not any("[DisabledFunction][Entry]" in msg for msg in log_messages), "Entry should not be logged when disabled"
assert not any("[DisabledFunction][Exit]" in msg for msg in log_messages), "Exit should not be logged when disabled"
# Coherence:OK should still be logged (internal tracking)
assert any("[DisabledFunction][Coherence:OK]" in msg for msg in log_messages), "Coherence should still be logged"
# Re-enable for other tests
config = LoggingConfig(
level="DEBUG",
task_log_level="DEBUG",
enable_belief_state=True
)
configure_logger(config)
# [/DEF:test_enable_belief_state_flag:Function]
# [/DEF:test_logger:Module]

View File

@@ -11,12 +11,10 @@
import zipfile import zipfile
import yaml import yaml
import os import os
import shutil
import tempfile import tempfile
from pathlib import Path from pathlib import Path
from typing import Dict from typing import Dict
from .logger import logger, belief_scope from .logger import logger, belief_scope
import yaml
# [/SECTION] # [/SECTION]
# [DEF:MigrationEngine:Class] # [DEF:MigrationEngine:Class]

View File

@@ -1,12 +1,12 @@
import importlib.util import importlib.util
import os import os
import sys # Added this line import sys # Added this line
from typing import Dict, Type, List, Optional from typing import Dict, List, Optional
from .plugin_base import PluginBase, PluginConfig from .plugin_base import PluginBase, PluginConfig
from jsonschema import validate
from .logger import belief_scope from .logger import belief_scope
# [DEF:PluginLoader:Class] # [DEF:PluginLoader:Class]
# @TIER: STANDARD
# @SEMANTICS: plugin, loader, dynamic, import # @SEMANTICS: plugin, loader, dynamic, import
# @PURPOSE: Scans a specified directory for Python modules, dynamically loads them, and registers any classes that are valid implementations of the PluginBase interface. # @PURPOSE: Scans a specified directory for Python modules, dynamically loads them, and registers any classes that are valid implementations of the PluginBase interface.
# @LAYER: Core # @LAYER: Core

View File

@@ -1,4 +1,5 @@
# [DEF:SchedulerModule:Module] # [DEF:SchedulerModule:Module]
# @TIER: STANDARD
# @SEMANTICS: scheduler, apscheduler, cron, backup # @SEMANTICS: scheduler, apscheduler, cron, backup
# @PURPOSE: Manages scheduled tasks using APScheduler. # @PURPOSE: Manages scheduled tasks using APScheduler.
# @LAYER: Core # @LAYER: Core
@@ -9,11 +10,11 @@ from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger from apscheduler.triggers.cron import CronTrigger
from .logger import logger, belief_scope from .logger import logger, belief_scope
from .config_manager import ConfigManager from .config_manager import ConfigManager
from typing import Optional
import asyncio import asyncio
# [/SECTION] # [/SECTION]
# [DEF:SchedulerService:Class] # [DEF:SchedulerService:Class]
# @TIER: STANDARD
# @SEMANTICS: scheduler, service, apscheduler # @SEMANTICS: scheduler, service, apscheduler
# @PURPOSE: Provides a service to manage scheduled backup tasks. # @PURPOSE: Provides a service to manage scheduled backup tasks.
class SchedulerService: class SchedulerService:

View File

@@ -13,10 +13,10 @@
import json import json
import zipfile import zipfile
from pathlib import Path from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union, cast from typing import Dict, List, Optional, Tuple, Union, cast
from requests import Response from requests import Response
from .logger import logger as app_logger, belief_scope from .logger import logger as app_logger, belief_scope
from .utils.network import APIClient, SupersetAPIError, AuthenticationError, DashboardNotFoundError, NetworkError from .utils.network import APIClient, SupersetAPIError
from .utils.fileio import get_filename_from_headers from .utils.fileio import get_filename_from_headers
from .config_models import Environment from .config_models import Environment
# [/SECTION] # [/SECTION]
@@ -87,11 +87,11 @@ class SupersetClient:
if 'columns' not in validated_query: if 'columns' not in validated_query:
validated_query['columns'] = ["slug", "id", "changed_on_utc", "dashboard_title", "published"] validated_query['columns'] = ["slug", "id", "changed_on_utc", "dashboard_title", "published"]
total_count = self._fetch_total_object_count(endpoint="/dashboard/")
paginated_data = self._fetch_all_pages( paginated_data = self._fetch_all_pages(
endpoint="/dashboard/", endpoint="/dashboard/",
pagination_options={"base_query": validated_query, "total_count": total_count, "results_field": "result"}, pagination_options={"base_query": validated_query, "results_field": "result"},
) )
total_count = len(paginated_data)
app_logger.info("[get_dashboards][Exit] Found %d dashboards.", total_count) app_logger.info("[get_dashboards][Exit] Found %d dashboards.", total_count)
return total_count, paginated_data return total_count, paginated_data
# [/DEF:get_dashboards:Function] # [/DEF:get_dashboards:Function]
@@ -203,15 +203,121 @@ class SupersetClient:
app_logger.info("[get_datasets][Enter] Fetching datasets.") app_logger.info("[get_datasets][Enter] Fetching datasets.")
validated_query = self._validate_query_params(query) validated_query = self._validate_query_params(query)
total_count = self._fetch_total_object_count(endpoint="/dataset/")
paginated_data = self._fetch_all_pages( paginated_data = self._fetch_all_pages(
endpoint="/dataset/", endpoint="/dataset/",
pagination_options={"base_query": validated_query, "total_count": total_count, "results_field": "result"}, pagination_options={"base_query": validated_query, "results_field": "result"},
) )
total_count = len(paginated_data)
app_logger.info("[get_datasets][Exit] Found %d datasets.", total_count) app_logger.info("[get_datasets][Exit] Found %d datasets.", total_count)
return total_count, paginated_data return total_count, paginated_data
# [/DEF:get_datasets:Function] # [/DEF:get_datasets:Function]
# [DEF:get_datasets_summary:Function]
# @PURPOSE: Fetches dataset metadata optimized for the Dataset Hub grid.
# @PRE: Client is authenticated.
# @POST: Returns a list of dataset metadata summaries.
# @RETURN: List[Dict]
def get_datasets_summary(self) -> List[Dict]:
with belief_scope("SupersetClient.get_datasets_summary"):
query = {
"columns": ["id", "table_name", "schema", "database"]
}
_, datasets = self.get_datasets(query=query)
# Map fields to match the contracts
result = []
for ds in datasets:
result.append({
"id": ds.get("id"),
"table_name": ds.get("table_name"),
"schema": ds.get("schema"),
"database": ds.get("database", {}).get("database_name", "Unknown")
})
return result
# [/DEF:get_datasets_summary:Function]
# [DEF:get_dataset_detail:Function]
# @PURPOSE: Fetches detailed dataset information including columns and linked dashboards
# @PRE: Client is authenticated and dataset_id exists.
# @POST: Returns detailed dataset info with columns and linked dashboards.
# @PARAM: dataset_id (int) - The dataset ID to fetch details for.
# @RETURN: Dict - Dataset details with columns and linked_dashboards.
# @RELATION: CALLS -> self.get_dataset
# @RELATION: CALLS -> self.network.request (for related_objects)
def get_dataset_detail(self, dataset_id: int) -> Dict:
with belief_scope("SupersetClient.get_dataset_detail", f"id={dataset_id}"):
# Get base dataset info
response = self.get_dataset(dataset_id)
# If the response is a dict and has a 'result' key, use that (standard Superset API)
if isinstance(response, dict) and 'result' in response:
dataset = response['result']
else:
dataset = response
# Extract columns information
columns = dataset.get("columns", [])
column_info = []
for col in columns:
column_info.append({
"id": col.get("id"),
"name": col.get("column_name"),
"type": col.get("type"),
"is_dttm": col.get("is_dttm", False),
"is_active": col.get("is_active", True),
"description": col.get("description", "")
})
# Get linked dashboards using related_objects endpoint
linked_dashboards = []
try:
related_objects = self.network.request(
method="GET",
endpoint=f"/dataset/{dataset_id}/related_objects"
)
# Handle different response formats
if isinstance(related_objects, dict):
if "dashboards" in related_objects:
dashboards_data = related_objects["dashboards"]
elif "result" in related_objects and isinstance(related_objects["result"], dict):
dashboards_data = related_objects["result"].get("dashboards", [])
else:
dashboards_data = []
for dash in dashboards_data:
linked_dashboards.append({
"id": dash.get("id"),
"title": dash.get("dashboard_title") or dash.get("title", "Unknown"),
"slug": dash.get("slug")
})
except Exception as e:
app_logger.warning(f"[get_dataset_detail][Warning] Failed to fetch related dashboards: {e}")
linked_dashboards = []
# Extract SQL table information
sql = dataset.get("sql", "")
result = {
"id": dataset.get("id"),
"table_name": dataset.get("table_name"),
"schema": dataset.get("schema"),
"database": dataset.get("database", {}).get("database_name", "Unknown"),
"description": dataset.get("description", ""),
"columns": column_info,
"column_count": len(column_info),
"sql": sql,
"linked_dashboards": linked_dashboards,
"linked_dashboard_count": len(linked_dashboards),
"is_sqllab_view": dataset.get("is_sqllab_view", False),
"created_on": dataset.get("created_on"),
"changed_on": dataset.get("changed_on")
}
app_logger.info(f"[get_dataset_detail][Exit] Got dataset {dataset_id} with {len(column_info)} columns and {len(linked_dashboards)} linked dashboards")
return result
# [/DEF:get_dataset_detail:Function]
# [DEF:get_dataset:Function] # [DEF:get_dataset:Function]
# @PURPOSE: Получает информацию о конкретном датасете по его ID. # @PURPOSE: Получает информацию о конкретном датасете по его ID.
# @PARAM: dataset_id (int) - ID датасета. # @PARAM: dataset_id (int) - ID датасета.
@@ -264,11 +370,12 @@ class SupersetClient:
validated_query = self._validate_query_params(query or {}) validated_query = self._validate_query_params(query or {})
if 'columns' not in validated_query: if 'columns' not in validated_query:
validated_query['columns'] = [] validated_query['columns'] = []
total_count = self._fetch_total_object_count(endpoint="/database/")
paginated_data = self._fetch_all_pages( paginated_data = self._fetch_all_pages(
endpoint="/database/", endpoint="/database/",
pagination_options={"base_query": validated_query, "total_count": total_count, "results_field": "result"}, pagination_options={"base_query": validated_query, "results_field": "result"},
) )
total_count = len(paginated_data)
app_logger.info("[get_databases][Exit] Found %d databases.", total_count) app_logger.info("[get_databases][Exit] Found %d databases.", total_count)
return total_count, paginated_data return total_count, paginated_data
# [/DEF:get_databases:Function] # [/DEF:get_databases:Function]

View File

@@ -1,4 +1,5 @@
# [DEF:TaskManagerPackage:Module] # [DEF:TaskManagerPackage:Module]
# @TIER: TRIVIAL
# @SEMANTICS: task, manager, package, exports # @SEMANTICS: task, manager, package, exports
# @PURPOSE: Exports the public API of the task manager package. # @PURPOSE: Exports the public API of the task manager package.
# @LAYER: Core # @LAYER: Core

View File

@@ -1,47 +1,75 @@
# [DEF:TaskCleanupModule:Module] # [DEF:TaskCleanupModule:Module]
# @SEMANTICS: task, cleanup, retention # @TIER: STANDARD
# @PURPOSE: Implements task cleanup and retention policies. # @SEMANTICS: task, cleanup, retention, logs
# @PURPOSE: Implements task cleanup and retention policies, including associated logs.
# @LAYER: Core # @LAYER: Core
# @RELATION: Uses TaskPersistenceService to delete old tasks. # @RELATION: Uses TaskPersistenceService and TaskLogPersistenceService to delete old tasks and logs.
from datetime import datetime, timedelta from typing import List
from .persistence import TaskPersistenceService from .persistence import TaskPersistenceService, TaskLogPersistenceService
from ..logger import logger, belief_scope from ..logger import logger, belief_scope
from ..config_manager import ConfigManager from ..config_manager import ConfigManager
# [DEF:TaskCleanupService:Class] # [DEF:TaskCleanupService:Class]
# @PURPOSE: Provides methods to clean up old task records. # @PURPOSE: Provides methods to clean up old task records and their associated logs.
# @TIER: STANDARD
class TaskCleanupService: class TaskCleanupService:
# [DEF:__init__:Function] # [DEF:__init__:Function]
# @PURPOSE: Initializes the cleanup service with dependencies. # @PURPOSE: Initializes the cleanup service with dependencies.
# @PRE: persistence_service and config_manager are valid. # @PRE: persistence_service and config_manager are valid.
# @POST: Cleanup service is ready. # @POST: Cleanup service is ready.
def __init__(self, persistence_service: TaskPersistenceService, config_manager: ConfigManager): def __init__(
self,
persistence_service: TaskPersistenceService,
log_persistence_service: TaskLogPersistenceService,
config_manager: ConfigManager
):
self.persistence_service = persistence_service self.persistence_service = persistence_service
self.log_persistence_service = log_persistence_service
self.config_manager = config_manager self.config_manager = config_manager
# [/DEF:__init__:Function] # [/DEF:__init__:Function]
# [DEF:run_cleanup:Function] # [DEF:run_cleanup:Function]
# @PURPOSE: Deletes tasks older than the configured retention period. # @PURPOSE: Deletes tasks older than the configured retention period and their logs.
# @PRE: Config manager has valid settings. # @PRE: Config manager has valid settings.
# @POST: Old tasks are deleted from persistence. # @POST: Old tasks and their logs are deleted from persistence.
def run_cleanup(self): def run_cleanup(self):
with belief_scope("TaskCleanupService.run_cleanup"): with belief_scope("TaskCleanupService.run_cleanup"):
settings = self.config_manager.get_config().settings settings = self.config_manager.get_config().settings
retention_days = settings.task_retention_days retention_days = settings.task_retention_days
# This is a simplified implementation.
# In a real scenario, we would query IDs of tasks older than retention_days.
# For now, we'll log the action.
logger.info(f"Cleaning up tasks older than {retention_days} days.") logger.info(f"Cleaning up tasks older than {retention_days} days.")
# Re-loading tasks to check for limit # Load tasks to check for limit
tasks = self.persistence_service.load_tasks(limit=1000) tasks = self.persistence_service.load_tasks(limit=1000)
if len(tasks) > settings.task_retention_limit: if len(tasks) > settings.task_retention_limit:
to_delete = [t.id for t in tasks[settings.task_retention_limit:]] to_delete: List[str] = [t.id for t in tasks[settings.task_retention_limit:]]
# Delete logs first (before task records)
self.log_persistence_service.delete_logs_for_tasks(to_delete)
# Then delete task records
self.persistence_service.delete_tasks(to_delete) self.persistence_service.delete_tasks(to_delete)
logger.info(f"Deleted {len(to_delete)} tasks exceeding limit of {settings.task_retention_limit}")
logger.info(f"Deleted {len(to_delete)} tasks and their logs exceeding limit of {settings.task_retention_limit}")
# [/DEF:run_cleanup:Function] # [/DEF:run_cleanup:Function]
# [DEF:delete_task_with_logs:Function]
# @PURPOSE: Delete a single task and all its associated logs.
# @PRE: task_id is a valid task ID.
# @POST: Task and all its logs are deleted.
# @PARAM: task_id (str) - The task ID to delete.
def delete_task_with_logs(self, task_id: str) -> None:
"""Delete a single task and all its associated logs."""
with belief_scope("TaskCleanupService.delete_task_with_logs", f"task_id={task_id}"):
# Delete logs first
self.log_persistence_service.delete_logs_for_task(task_id)
# Then delete task record
self.persistence_service.delete_tasks([task_id])
logger.info(f"Deleted task {task_id} and its associated logs")
# [/DEF:delete_task_with_logs:Function]
# [/DEF:TaskCleanupService:Class] # [/DEF:TaskCleanupService:Class]
# [/DEF:TaskCleanupModule:Module] # [/DEF:TaskCleanupModule:Module]

View File

@@ -0,0 +1,115 @@
# [DEF:TaskContextModule:Module]
# @SEMANTICS: task, context, plugin, execution, logger
# @PURPOSE: Provides execution context passed to plugins during task execution.
# @LAYER: Core
# @RELATION: DEPENDS_ON -> TaskLogger, USED_BY -> plugins
# @TIER: CRITICAL
# @INVARIANT: Each TaskContext is bound to a single task execution.
# [SECTION: IMPORTS]
from typing import Dict, Any, Callable
from .task_logger import TaskLogger
# [/SECTION]
# [DEF:TaskContext:Class]
# @SEMANTICS: context, task, execution, plugin
# @PURPOSE: A container passed to plugin.execute() providing the logger and other task-specific utilities.
# @TIER: CRITICAL
# @INVARIANT: logger is always a valid TaskLogger instance.
# @UX_STATE: Idle -> Active -> Complete
class TaskContext:
"""
Execution context provided to plugins during task execution.
Usage:
def execute(params: dict, context: TaskContext = None):
if context:
context.logger.info("Starting process")
context.logger.progress("Processing items", percent=50)
# ... plugin logic
"""
# [DEF:__init__:Function]
# @PURPOSE: Initialize the TaskContext with task-specific resources.
# @PRE: task_id is a valid task identifier, add_log_fn is callable.
# @POST: TaskContext is ready to be passed to plugin.execute().
# @PARAM: task_id (str) - The ID of the task.
# @PARAM: add_log_fn (Callable) - Function to add log to TaskManager.
# @PARAM: params (Dict) - Task parameters.
# @PARAM: default_source (str) - Default source for logs (default: "plugin").
def __init__(
self,
task_id: str,
add_log_fn: Callable,
params: Dict[str, Any],
default_source: str = "plugin"
):
self._task_id = task_id
self._params = params
self._logger = TaskLogger(
task_id=task_id,
add_log_fn=add_log_fn,
source=default_source
)
# [/DEF:__init__:Function]
# [DEF:task_id:Function]
# @PURPOSE: Get the task ID.
# @PRE: TaskContext must be initialized.
# @POST: Returns the task ID string.
# @RETURN: str - The task ID.
@property
def task_id(self) -> str:
return self._task_id
# [/DEF:task_id:Function]
# [DEF:logger:Function]
# @PURPOSE: Get the TaskLogger instance for this context.
# @PRE: TaskContext must be initialized.
# @POST: Returns the TaskLogger instance.
# @RETURN: TaskLogger - The logger instance.
@property
def logger(self) -> TaskLogger:
return self._logger
# [/DEF:logger:Function]
# [DEF:params:Function]
# @PURPOSE: Get the task parameters.
# @PRE: TaskContext must be initialized.
# @POST: Returns the parameters dictionary.
# @RETURN: Dict[str, Any] - The task parameters.
@property
def params(self) -> Dict[str, Any]:
return self._params
# [/DEF:params:Function]
# [DEF:get_param:Function]
# @PURPOSE: Get a specific parameter value with optional default.
# @PRE: TaskContext must be initialized.
# @POST: Returns parameter value or default.
# @PARAM: key (str) - Parameter key.
# @PARAM: default (Any) - Default value if key not found.
# @RETURN: Any - Parameter value or default.
def get_param(self, key: str, default: Any = None) -> Any:
return self._params.get(key, default)
# [/DEF:get_param:Function]
# [DEF:create_sub_context:Function]
# @PURPOSE: Create a sub-context with a different default source.
# @PRE: source is a non-empty string.
# @POST: Returns new TaskContext with different logger source.
# @PARAM: source (str) - New default source for logging.
# @RETURN: TaskContext - New context with different source.
def create_sub_context(self, source: str) -> "TaskContext":
"""Create a sub-context with a different default source for logging."""
return TaskContext(
task_id=self._task_id,
add_log_fn=self._logger._add_log,
params=self._params,
default_source=source
)
# [/DEF:create_sub_context:Function]
# [/DEF:TaskContext:Class]
# [/DEF:TaskContextModule:Module]

View File

@@ -8,23 +8,33 @@
# [SECTION: IMPORTS] # [SECTION: IMPORTS]
import asyncio import asyncio
import threading
import inspect
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime from datetime import datetime
from typing import Dict, Any, List, Optional from typing import Dict, Any, List, Optional
from concurrent.futures import ThreadPoolExecutor
from .models import Task, TaskStatus, LogEntry from .models import Task, TaskStatus, LogEntry, LogFilter, LogStats
from .persistence import TaskPersistenceService from .persistence import TaskPersistenceService, TaskLogPersistenceService
from ..logger import logger, belief_scope from .context import TaskContext
from ..logger import logger, belief_scope, should_log_task_level
# [/SECTION] # [/SECTION]
# [DEF:TaskManager:Class] # [DEF:TaskManager:Class]
# @SEMANTICS: task, manager, lifecycle, execution, state # @SEMANTICS: task, manager, lifecycle, execution, state
# @PURPOSE: Manages the lifecycle of tasks, including their creation, execution, and state tracking. # @PURPOSE: Manages the lifecycle of tasks, including their creation, execution, and state tracking.
# @TIER: CRITICAL
# @INVARIANT: Task IDs are unique within the registry.
# @INVARIANT: Each task has exactly one status at any time.
# @INVARIANT: Log entries are never deleted after being added to a task.
class TaskManager: class TaskManager:
""" """
Manages the lifecycle of tasks, including their creation, execution, and state tracking. Manages the lifecycle of tasks, including their creation, execution, and state tracking.
""" """
# Log flush interval in seconds
LOG_FLUSH_INTERVAL = 2.0
# [DEF:__init__:Function] # [DEF:__init__:Function]
# @PURPOSE: Initialize the TaskManager with dependencies. # @PURPOSE: Initialize the TaskManager with dependencies.
# @PRE: plugin_loader is initialized. # @PRE: plugin_loader is initialized.
@@ -35,8 +45,18 @@ class TaskManager:
self.plugin_loader = plugin_loader self.plugin_loader = plugin_loader
self.tasks: Dict[str, Task] = {} self.tasks: Dict[str, Task] = {}
self.subscribers: Dict[str, List[asyncio.Queue]] = {} self.subscribers: Dict[str, List[asyncio.Queue]] = {}
self.executor = ThreadPoolExecutor(max_workers=5) # For CPU-bound plugin execution self.executor = ThreadPoolExecutor(max_workers=5) # For CPU-bound plugin execution
self.persistence_service = TaskPersistenceService() self.persistence_service = TaskPersistenceService()
self.log_persistence_service = TaskLogPersistenceService()
# Log buffer: task_id -> List[LogEntry]
self._log_buffer: Dict[str, List[LogEntry]] = {}
self._log_buffer_lock = threading.Lock()
# Flusher thread for batch writing logs
self._flusher_stop_event = threading.Event()
self._flusher_thread = threading.Thread(target=self._flusher_loop, daemon=True)
self._flusher_thread.start()
try: try:
self.loop = asyncio.get_running_loop() self.loop = asyncio.get_running_loop()
@@ -47,6 +67,59 @@ class TaskManager:
# Load persisted tasks on startup # Load persisted tasks on startup
self.load_persisted_tasks() self.load_persisted_tasks()
# [/DEF:__init__:Function] # [/DEF:__init__:Function]
# [DEF:_flusher_loop:Function]
# @PURPOSE: Background thread that periodically flushes log buffer to database.
# @PRE: TaskManager is initialized.
# @POST: Logs are batch-written to database every LOG_FLUSH_INTERVAL seconds.
def _flusher_loop(self):
"""Background thread that flushes log buffer to database."""
while not self._flusher_stop_event.is_set():
self._flush_logs()
self._flusher_stop_event.wait(self.LOG_FLUSH_INTERVAL)
# [/DEF:_flusher_loop:Function]
# [DEF:_flush_logs:Function]
# @PURPOSE: Flush all buffered logs to the database.
# @PRE: None.
# @POST: All buffered logs are written to task_logs table.
def _flush_logs(self):
"""Flush all buffered logs to the database."""
with self._log_buffer_lock:
task_ids = list(self._log_buffer.keys())
for task_id in task_ids:
with self._log_buffer_lock:
logs = self._log_buffer.pop(task_id, [])
if logs:
try:
self.log_persistence_service.add_logs(task_id, logs)
except Exception as e:
logger.error(f"Failed to flush logs for task {task_id}: {e}")
# Re-add logs to buffer on failure
with self._log_buffer_lock:
if task_id not in self._log_buffer:
self._log_buffer[task_id] = []
self._log_buffer[task_id].extend(logs)
# [/DEF:_flush_logs:Function]
# [DEF:_flush_task_logs:Function]
# @PURPOSE: Flush logs for a specific task immediately.
# @PRE: task_id exists.
# @POST: Task's buffered logs are written to database.
# @PARAM: task_id (str) - The task ID.
def _flush_task_logs(self, task_id: str):
"""Flush logs for a specific task immediately."""
with self._log_buffer_lock:
logs = self._log_buffer.pop(task_id, [])
if logs:
try:
self.log_persistence_service.add_logs(task_id, logs)
except Exception as e:
logger.error(f"Failed to flush logs for task {task_id}: {e}")
# [/DEF:_flush_task_logs:Function]
# [DEF:create_task:Function] # [DEF:create_task:Function]
# @PURPOSE: Creates and queues a new task for execution. # @PURPOSE: Creates and queues a new task for execution.
@@ -63,7 +136,7 @@ class TaskManager:
logger.error(f"Plugin with ID '{plugin_id}' not found.") logger.error(f"Plugin with ID '{plugin_id}' not found.")
raise ValueError(f"Plugin with ID '{plugin_id}' not found.") raise ValueError(f"Plugin with ID '{plugin_id}' not found.")
plugin = self.plugin_loader.get_plugin(plugin_id) self.plugin_loader.get_plugin(plugin_id)
if not isinstance(params, dict): if not isinstance(params, dict):
logger.error("Task parameters must be a dictionary.") logger.error("Task parameters must be a dictionary.")
@@ -78,7 +151,7 @@ class TaskManager:
# [/DEF:create_task:Function] # [/DEF:create_task:Function]
# [DEF:_run_task:Function] # [DEF:_run_task:Function]
# @PURPOSE: Internal method to execute a task. # @PURPOSE: Internal method to execute a task with TaskContext support.
# @PRE: Task exists in registry. # @PRE: Task exists in registry.
# @POST: Task is executed, status updated to SUCCESS or FAILED. # @POST: Task is executed, status updated to SUCCESS or FAILED.
# @PARAM: task_id (str) - The ID of the task to run. # @PARAM: task_id (str) - The ID of the task to run.
@@ -91,30 +164,54 @@ class TaskManager:
task.status = TaskStatus.RUNNING task.status = TaskStatus.RUNNING
task.started_at = datetime.utcnow() task.started_at = datetime.utcnow()
self.persistence_service.persist_task(task) self.persistence_service.persist_task(task)
self._add_log(task_id, "INFO", f"Task started for plugin '{plugin.name}'") self._add_log(task_id, "INFO", f"Task started for plugin '{plugin.name}'", source="system")
try: try:
# Execute plugin # Prepare params and check if plugin supports new TaskContext
params = {**task.params, "_task_id": task_id} params = {**task.params, "_task_id": task_id}
if asyncio.iscoroutinefunction(plugin.execute): # Check if plugin's execute method accepts 'context' parameter
task.result = await plugin.execute(params) sig = inspect.signature(plugin.execute)
else: accepts_context = 'context' in sig.parameters
task.result = await self.loop.run_in_executor(
self.executor, if accepts_context:
plugin.execute, # Create TaskContext for new-style plugins
params context = TaskContext(
task_id=task_id,
add_log_fn=self._add_log,
params=params,
default_source="plugin"
) )
if asyncio.iscoroutinefunction(plugin.execute):
task.result = await plugin.execute(params, context=context)
else:
task.result = await self.loop.run_in_executor(
self.executor,
lambda: plugin.execute(params, context=context)
)
else:
# Backward compatibility: old-style plugins without context
if asyncio.iscoroutinefunction(plugin.execute):
task.result = await plugin.execute(params)
else:
task.result = await self.loop.run_in_executor(
self.executor,
plugin.execute,
params
)
logger.info(f"Task {task_id} completed successfully") logger.info(f"Task {task_id} completed successfully")
task.status = TaskStatus.SUCCESS task.status = TaskStatus.SUCCESS
self._add_log(task_id, "INFO", f"Task completed successfully for plugin '{plugin.name}'") self._add_log(task_id, "INFO", f"Task completed successfully for plugin '{plugin.name}'", source="system")
except Exception as e: except Exception as e:
logger.error(f"Task {task_id} failed: {e}") logger.error(f"Task {task_id} failed: {e}")
task.status = TaskStatus.FAILED task.status = TaskStatus.FAILED
self._add_log(task_id, "ERROR", f"Task failed: {e}", {"error_type": type(e).__name__}) self._add_log(task_id, "ERROR", f"Task failed: {e}", source="system", metadata={"error_type": type(e).__name__})
finally: finally:
task.finished_at = datetime.utcnow() task.finished_at = datetime.utcnow()
# Flush any remaining buffered logs before persisting task
self._flush_task_logs(task_id)
self.persistence_service.persist_task(task) self.persistence_service.persist_task(task)
logger.info(f"Task {task_id} execution finished with status: {task.status}") logger.info(f"Task {task_id} execution finished with status: {task.status}")
# [/DEF:_run_task:Function] # [/DEF:_run_task:Function]
@@ -151,7 +248,8 @@ class TaskManager:
async def wait_for_resolution(self, task_id: str): async def wait_for_resolution(self, task_id: str):
with belief_scope("TaskManager.wait_for_resolution", f"task_id={task_id}"): with belief_scope("TaskManager.wait_for_resolution", f"task_id={task_id}"):
task = self.tasks.get(task_id) task = self.tasks.get(task_id)
if not task: return if not task:
return
task.status = TaskStatus.AWAITING_MAPPING task.status = TaskStatus.AWAITING_MAPPING
self.persistence_service.persist_task(task) self.persistence_service.persist_task(task)
@@ -172,7 +270,8 @@ class TaskManager:
async def wait_for_input(self, task_id: str): async def wait_for_input(self, task_id: str):
with belief_scope("TaskManager.wait_for_input", f"task_id={task_id}"): with belief_scope("TaskManager.wait_for_input", f"task_id={task_id}"):
task = self.tasks.get(task_id) task = self.tasks.get(task_id)
if not task: return if not task:
return
# Status is already set to AWAITING_INPUT by await_input() # Status is already set to AWAITING_INPUT by await_input()
self.task_futures[task_id] = self.loop.create_future() self.task_futures[task_id] = self.loop.create_future()
@@ -224,36 +323,106 @@ class TaskManager:
# [/DEF:get_tasks:Function] # [/DEF:get_tasks:Function]
# [DEF:get_task_logs:Function] # [DEF:get_task_logs:Function]
# @PURPOSE: Retrieves logs for a specific task. # @PURPOSE: Retrieves logs for a specific task (from memory for running, persistence for completed).
# @PRE: task_id is a string. # @PRE: task_id is a string.
# @POST: Returns list of LogEntry objects. # @POST: Returns list of LogEntry or TaskLog objects.
# @PARAM: task_id (str) - ID of the task. # @PARAM: task_id (str) - ID of the task.
# @PARAM: log_filter (Optional[LogFilter]) - Filter parameters.
# @RETURN: List[LogEntry] - List of log entries. # @RETURN: List[LogEntry] - List of log entries.
def get_task_logs(self, task_id: str) -> List[LogEntry]: def get_task_logs(self, task_id: str, log_filter: Optional[LogFilter] = None) -> List[LogEntry]:
with belief_scope("TaskManager.get_task_logs", f"task_id={task_id}"): with belief_scope("TaskManager.get_task_logs", f"task_id={task_id}"):
task = self.tasks.get(task_id) task = self.tasks.get(task_id)
# For completed tasks, fetch from persistence
if task and task.status in [TaskStatus.SUCCESS, TaskStatus.FAILED]:
if log_filter is None:
log_filter = LogFilter()
task_logs = self.log_persistence_service.get_logs(task_id, log_filter)
# Convert TaskLog to LogEntry for backward compatibility
return [
LogEntry(
timestamp=log.timestamp,
level=log.level,
message=log.message,
source=log.source,
metadata=log.metadata
)
for log in task_logs
]
# For running/pending tasks, return from memory
return task.logs if task else [] return task.logs if task else []
# [/DEF:get_task_logs:Function] # [/DEF:get_task_logs:Function]
# [DEF:get_task_log_stats:Function]
# @PURPOSE: Get statistics about logs for a task.
# @PRE: task_id is a valid task ID.
# @POST: Returns LogStats with counts by level and source.
# @PARAM: task_id (str) - The task ID.
# @RETURN: LogStats - Statistics about task logs.
def get_task_log_stats(self, task_id: str) -> LogStats:
with belief_scope("TaskManager.get_task_log_stats", f"task_id={task_id}"):
return self.log_persistence_service.get_log_stats(task_id)
# [/DEF:get_task_log_stats:Function]
# [DEF:get_task_log_sources:Function]
# @PURPOSE: Get unique sources for a task's logs.
# @PRE: task_id is a valid task ID.
# @POST: Returns list of unique source strings.
# @PARAM: task_id (str) - The task ID.
# @RETURN: List[str] - Unique source names.
def get_task_log_sources(self, task_id: str) -> List[str]:
with belief_scope("TaskManager.get_task_log_sources", f"task_id={task_id}"):
return self.log_persistence_service.get_sources(task_id)
# [/DEF:get_task_log_sources:Function]
# [DEF:_add_log:Function] # [DEF:_add_log:Function]
# @PURPOSE: Adds a log entry to a task and notifies subscribers. # @PURPOSE: Adds a log entry to a task buffer and notifies subscribers.
# @PRE: Task exists. # @PRE: Task exists.
# @POST: Log added to task and pushed to queues. # @POST: Log added to buffer and pushed to queues (if level meets task_log_level filter).
# @PARAM: task_id (str) - ID of the task. # @PARAM: task_id (str) - ID of the task.
# @PARAM: level (str) - Log level. # @PARAM: level (str) - Log level.
# @PARAM: message (str) - Log message. # @PARAM: message (str) - Log message.
# @PARAM: context (Optional[Dict]) - Log context. # @PARAM: source (str) - Source component (default: "system").
def _add_log(self, task_id: str, level: str, message: str, context: Optional[Dict[str, Any]] = None): # @PARAM: metadata (Optional[Dict]) - Additional structured data.
# @PARAM: context (Optional[Dict]) - Legacy context (for backward compatibility).
def _add_log(
self,
task_id: str,
level: str,
message: str,
source: str = "system",
metadata: Optional[Dict[str, Any]] = None,
context: Optional[Dict[str, Any]] = None
):
with belief_scope("TaskManager._add_log", f"task_id={task_id}"): with belief_scope("TaskManager._add_log", f"task_id={task_id}"):
task = self.tasks.get(task_id) task = self.tasks.get(task_id)
if not task: if not task:
return return
log_entry = LogEntry(level=level, message=message, context=context) # Filter logs based on task_log_level configuration
task.logs.append(log_entry) if not should_log_task_level(level):
self.persistence_service.persist_task(task) return
# Notify subscribers # Create log entry with new fields
log_entry = LogEntry(
level=level,
message=message,
source=source,
metadata=metadata,
context=context # Keep for backward compatibility
)
# Add to in-memory logs (for backward compatibility with legacy JSON field)
task.logs.append(log_entry)
# Add to buffer for batch persistence
with self._log_buffer_lock:
if task_id not in self._log_buffer:
self._log_buffer[task_id] = []
self._log_buffer[task_id].append(log_entry)
# Notify subscribers (for real-time WebSocket updates)
if task_id in self.subscribers: if task_id in self.subscribers:
for queue in self.subscribers[task_id]: for queue in self.subscribers[task_id]:
self.loop.call_soon_threadsafe(queue.put_nowait, log_entry) self.loop.call_soon_threadsafe(queue.put_nowait, log_entry)
@@ -353,7 +522,7 @@ class TaskManager:
# [/DEF:resume_task_with_password:Function] # [/DEF:resume_task_with_password:Function]
# [DEF:clear_tasks:Function] # [DEF:clear_tasks:Function]
# @PURPOSE: Clears tasks based on status filter. # @PURPOSE: Clears tasks based on status filter (also deletes associated logs).
# @PRE: status is Optional[TaskStatus]. # @PRE: status is Optional[TaskStatus].
# @POST: Tasks matching filter (or all non-active) cleared from registry and database. # @POST: Tasks matching filter (or all non-active) cleared from registry and database.
# @PARAM: status (Optional[TaskStatus]) - Filter by task status. # @PARAM: status (Optional[TaskStatus]) - Filter by task status.
@@ -387,9 +556,13 @@ class TaskManager:
del self.tasks[tid] del self.tasks[tid]
# Remove from persistence # Remove from persistence (task_records and task_logs via CASCADE)
self.persistence_service.delete_tasks(tasks_to_remove) self.persistence_service.delete_tasks(tasks_to_remove)
# Also explicitly delete logs (in case CASCADE is not set up)
if tasks_to_remove:
self.log_persistence_service.delete_logs_for_tasks(tasks_to_remove)
logger.info(f"Cleared {len(tasks_to_remove)} tasks.") logger.info(f"Cleared {len(tasks_to_remove)} tasks.")
return len(tasks_to_remove) return len(tasks_to_remove)
# [/DEF:clear_tasks:Function] # [/DEF:clear_tasks:Function]

View File

@@ -1,4 +1,5 @@
# [DEF:TaskManagerModels:Module] # [DEF:TaskManagerModels:Module]
# @TIER: STANDARD
# @SEMANTICS: task, models, pydantic, enum, state # @SEMANTICS: task, models, pydantic, enum, state
# @PURPOSE: Defines the data models and enumerations used by the Task Manager. # @PURPOSE: Defines the data models and enumerations used by the Task Manager.
# @LAYER: Core # @LAYER: Core
@@ -16,6 +17,7 @@ from pydantic import BaseModel, Field
# [/SECTION] # [/SECTION]
# [DEF:TaskStatus:Enum] # [DEF:TaskStatus:Enum]
# @TIER: TRIVIAL
# @SEMANTICS: task, status, state, enum # @SEMANTICS: task, status, state, enum
# @PURPOSE: Defines the possible states a task can be in during its lifecycle. # @PURPOSE: Defines the possible states a task can be in during its lifecycle.
class TaskStatus(str, Enum): class TaskStatus(str, Enum):
@@ -27,17 +29,73 @@ class TaskStatus(str, Enum):
AWAITING_INPUT = "AWAITING_INPUT" AWAITING_INPUT = "AWAITING_INPUT"
# [/DEF:TaskStatus:Enum] # [/DEF:TaskStatus:Enum]
# [DEF:LogLevel:Enum]
# @SEMANTICS: log, level, severity, enum
# @PURPOSE: Defines the possible log levels for task logging.
# @TIER: STANDARD
class LogLevel(str, Enum):
DEBUG = "DEBUG"
INFO = "INFO"
WARNING = "WARNING"
ERROR = "ERROR"
# [/DEF:LogLevel:Enum]
# [DEF:LogEntry:Class] # [DEF:LogEntry:Class]
# @SEMANTICS: log, entry, record, pydantic # @SEMANTICS: log, entry, record, pydantic
# @PURPOSE: A Pydantic model representing a single, structured log entry associated with a task. # @PURPOSE: A Pydantic model representing a single, structured log entry associated with a task.
# @TIER: CRITICAL
# @INVARIANT: Each log entry has a unique timestamp and source.
class LogEntry(BaseModel): class LogEntry(BaseModel):
timestamp: datetime = Field(default_factory=datetime.utcnow) timestamp: datetime = Field(default_factory=datetime.utcnow)
level: str level: str = Field(default="INFO")
message: str message: str
context: Optional[Dict[str, Any]] = None source: str = Field(default="system") # Component attribution: plugin, superset_api, git, etc.
context: Optional[Dict[str, Any]] = None # Legacy field, kept for backward compatibility
metadata: Optional[Dict[str, Any]] = None # Structured metadata (e.g., dashboard_id, progress)
# [/DEF:LogEntry:Class] # [/DEF:LogEntry:Class]
# [DEF:TaskLog:Class]
# @SEMANTICS: task, log, persistent, pydantic
# @PURPOSE: A Pydantic model representing a persisted log entry from the database.
# @TIER: STANDARD
# @RELATION: MAPS_TO -> TaskLogRecord
class TaskLog(BaseModel):
id: int
task_id: str
timestamp: datetime
level: str
source: str
message: str
metadata: Optional[Dict[str, Any]] = None
class Config:
from_attributes = True
# [/DEF:TaskLog:Class]
# [DEF:LogFilter:Class]
# @SEMANTICS: log, filter, query, pydantic
# @PURPOSE: Filter parameters for querying task logs.
# @TIER: STANDARD
class LogFilter(BaseModel):
level: Optional[str] = None # Filter by log level
source: Optional[str] = None # Filter by source component
search: Optional[str] = None # Text search in message
offset: int = Field(default=0, ge=0)
limit: int = Field(default=100, ge=1, le=1000)
# [/DEF:LogFilter:Class]
# [DEF:LogStats:Class]
# @SEMANTICS: log, stats, aggregation, pydantic
# @PURPOSE: Statistics about log entries for a task.
# @TIER: STANDARD
class LogStats(BaseModel):
total_count: int
by_level: Dict[str, int] # {"INFO": 10, "ERROR": 2}
by_source: Dict[str, int] # {"plugin": 5, "superset_api": 7}
# [/DEF:LogStats:Class]
# [DEF:Task:Class] # [DEF:Task:Class]
# @TIER: STANDARD
# @SEMANTICS: task, job, execution, state, pydantic # @SEMANTICS: task, job, execution, state, pydantic
# @PURPOSE: A Pydantic model representing a single execution instance of a plugin, including its status, parameters, and logs. # @PURPOSE: A Pydantic model representing a single execution instance of a plugin, including its status, parameters, and logs.
class Task(BaseModel): class Task(BaseModel):

View File

@@ -7,13 +7,13 @@
# [SECTION: IMPORTS] # [SECTION: IMPORTS]
from datetime import datetime from datetime import datetime
from typing import List, Optional, Dict, Any from typing import List, Optional
import json import json
from sqlalchemy.orm import Session from sqlalchemy.orm import Session
from ...models.task import TaskRecord from ...models.task import TaskRecord, TaskLogRecord
from ..database import TasksSessionLocal from ..database import TasksSessionLocal
from .models import Task, TaskStatus, LogEntry from .models import Task, TaskStatus, LogEntry, TaskLog, LogFilter, LogStats
from ..logger import logger, belief_scope from ..logger import logger, belief_scope
# [/SECTION] # [/SECTION]
@@ -36,6 +36,7 @@ class TaskPersistenceService:
# @PRE: isinstance(task, Task) # @PRE: isinstance(task, Task)
# @POST: Task record created or updated in database. # @POST: Task record created or updated in database.
# @PARAM: task (Task) - The task object to persist. # @PARAM: task (Task) - The task object to persist.
# @SIDE_EFFECT: Writes to task_records table in tasks.db
def persist_task(self, task: Task) -> None: def persist_task(self, task: Task) -> None:
with belief_scope("TaskPersistenceService.persist_task", f"task_id={task.id}"): with belief_scope("TaskPersistenceService.persist_task", f"task_id={task.id}"):
session: Session = TasksSessionLocal() session: Session = TasksSessionLocal()
@@ -50,8 +51,19 @@ class TaskPersistenceService:
record.environment_id = task.params.get("environment_id") or task.params.get("source_env_id") record.environment_id = task.params.get("environment_id") or task.params.get("source_env_id")
record.started_at = task.started_at record.started_at = task.started_at
record.finished_at = task.finished_at record.finished_at = task.finished_at
record.params = task.params
record.result = task.result # Ensure params and result are JSON serializable
def json_serializable(obj):
if isinstance(obj, dict):
return {k: json_serializable(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [json_serializable(v) for v in obj]
elif isinstance(obj, datetime):
return obj.isoformat()
return obj
record.params = json_serializable(task.params)
record.result = json_serializable(task.result)
# Store logs as JSON, converting datetime to string # Store logs as JSON, converting datetime to string
record.logs = [] record.logs = []
@@ -59,6 +71,9 @@ class TaskPersistenceService:
log_dict = log.dict() log_dict = log.dict()
if isinstance(log_dict.get('timestamp'), datetime): if isinstance(log_dict.get('timestamp'), datetime):
log_dict['timestamp'] = log_dict['timestamp'].isoformat() log_dict['timestamp'] = log_dict['timestamp'].isoformat()
# Also clean up any datetimes in context
if log_dict.get('context'):
log_dict['context'] = json_serializable(log_dict['context'])
record.logs.append(log_dict) record.logs.append(log_dict)
# Extract error if failed # Extract error if failed
@@ -155,4 +170,215 @@ class TaskPersistenceService:
# [/DEF:delete_tasks:Function] # [/DEF:delete_tasks:Function]
# [/DEF:TaskPersistenceService:Class] # [/DEF:TaskPersistenceService:Class]
# [DEF:TaskLogPersistenceService:Class]
# @SEMANTICS: persistence, service, database, log, sqlalchemy
# @PURPOSE: Provides methods to save and query task logs from the task_logs table.
# @TIER: CRITICAL
# @RELATION: DEPENDS_ON -> TaskLogRecord
# @INVARIANT: Log entries are batch-inserted for performance.
class TaskLogPersistenceService:
"""
Service for persisting and querying task logs.
Supports batch inserts, filtering, and statistics.
"""
# [DEF:__init__:Function]
# @PURPOSE: Initialize the log persistence service.
# @POST: Service is ready.
def __init__(self):
pass
# [/DEF:__init__:Function]
# [DEF:add_logs:Function]
# @PURPOSE: Batch insert log entries for a task.
# @PRE: logs is a list of LogEntry objects.
# @POST: All logs inserted into task_logs table.
# @PARAM: task_id (str) - The task ID.
# @PARAM: logs (List[LogEntry]) - Log entries to insert.
# @SIDE_EFFECT: Writes to task_logs table.
def add_logs(self, task_id: str, logs: List[LogEntry]) -> None:
if not logs:
return
with belief_scope("TaskLogPersistenceService.add_logs", f"task_id={task_id}"):
session: Session = TasksSessionLocal()
try:
for log in logs:
record = TaskLogRecord(
task_id=task_id,
timestamp=log.timestamp,
level=log.level,
source=log.source or "system",
message=log.message,
metadata_json=json.dumps(log.metadata) if log.metadata else None
)
session.add(record)
session.commit()
except Exception as e:
session.rollback()
logger.error(f"Failed to add logs for task {task_id}: {e}")
finally:
session.close()
# [/DEF:add_logs:Function]
# [DEF:get_logs:Function]
# @PURPOSE: Query logs for a task with filtering and pagination.
# @PRE: task_id is a valid task ID.
# @POST: Returns list of TaskLog objects matching filters.
# @PARAM: task_id (str) - The task ID.
# @PARAM: log_filter (LogFilter) - Filter parameters.
# @RETURN: List[TaskLog] - Filtered log entries.
def get_logs(self, task_id: str, log_filter: LogFilter) -> List[TaskLog]:
with belief_scope("TaskLogPersistenceService.get_logs", f"task_id={task_id}"):
session: Session = TasksSessionLocal()
try:
query = session.query(TaskLogRecord).filter(TaskLogRecord.task_id == task_id)
# Apply filters
if log_filter.level:
query = query.filter(TaskLogRecord.level == log_filter.level.upper())
if log_filter.source:
query = query.filter(TaskLogRecord.source == log_filter.source)
if log_filter.search:
search_pattern = f"%{log_filter.search}%"
query = query.filter(TaskLogRecord.message.ilike(search_pattern))
# Order by timestamp ascending (oldest first)
query = query.order_by(TaskLogRecord.timestamp.asc())
# Apply pagination
records = query.offset(log_filter.offset).limit(log_filter.limit).all()
logs = []
for record in records:
metadata = None
if record.metadata_json:
try:
metadata = json.loads(record.metadata_json)
except json.JSONDecodeError:
metadata = None
logs.append(TaskLog(
id=record.id,
task_id=record.task_id,
timestamp=record.timestamp,
level=record.level,
source=record.source,
message=record.message,
metadata=metadata
))
return logs
finally:
session.close()
# [/DEF:get_logs:Function]
# [DEF:get_log_stats:Function]
# @PURPOSE: Get statistics about logs for a task.
# @PRE: task_id is a valid task ID.
# @POST: Returns LogStats with counts by level and source.
# @PARAM: task_id (str) - The task ID.
# @RETURN: LogStats - Statistics about task logs.
def get_log_stats(self, task_id: str) -> LogStats:
with belief_scope("TaskLogPersistenceService.get_log_stats", f"task_id={task_id}"):
session: Session = TasksSessionLocal()
try:
# Get total count
total_count = session.query(TaskLogRecord).filter(
TaskLogRecord.task_id == task_id
).count()
# Get counts by level
from sqlalchemy import func
level_counts = session.query(
TaskLogRecord.level,
func.count(TaskLogRecord.id)
).filter(
TaskLogRecord.task_id == task_id
).group_by(TaskLogRecord.level).all()
by_level = {level: count for level, count in level_counts}
# Get counts by source
source_counts = session.query(
TaskLogRecord.source,
func.count(TaskLogRecord.id)
).filter(
TaskLogRecord.task_id == task_id
).group_by(TaskLogRecord.source).all()
by_source = {source: count for source, count in source_counts}
return LogStats(
total_count=total_count,
by_level=by_level,
by_source=by_source
)
finally:
session.close()
# [/DEF:get_log_stats:Function]
# [DEF:get_sources:Function]
# @PURPOSE: Get unique sources for a task's logs.
# @PRE: task_id is a valid task ID.
# @POST: Returns list of unique source strings.
# @PARAM: task_id (str) - The task ID.
# @RETURN: List[str] - Unique source names.
def get_sources(self, task_id: str) -> List[str]:
with belief_scope("TaskLogPersistenceService.get_sources", f"task_id={task_id}"):
session: Session = TasksSessionLocal()
try:
from sqlalchemy import distinct
sources = session.query(distinct(TaskLogRecord.source)).filter(
TaskLogRecord.task_id == task_id
).all()
return [s[0] for s in sources]
finally:
session.close()
# [/DEF:get_sources:Function]
# [DEF:delete_logs_for_task:Function]
# @PURPOSE: Delete all logs for a specific task.
# @PRE: task_id is a valid task ID.
# @POST: All logs for the task are deleted.
# @PARAM: task_id (str) - The task ID.
# @SIDE_EFFECT: Deletes from task_logs table.
def delete_logs_for_task(self, task_id: str) -> None:
with belief_scope("TaskLogPersistenceService.delete_logs_for_task", f"task_id={task_id}"):
session: Session = TasksSessionLocal()
try:
session.query(TaskLogRecord).filter(
TaskLogRecord.task_id == task_id
).delete(synchronize_session=False)
session.commit()
except Exception as e:
session.rollback()
logger.error(f"Failed to delete logs for task {task_id}: {e}")
finally:
session.close()
# [/DEF:delete_logs_for_task:Function]
# [DEF:delete_logs_for_tasks:Function]
# @PURPOSE: Delete all logs for multiple tasks.
# @PRE: task_ids is a list of task IDs.
# @POST: All logs for the tasks are deleted.
# @PARAM: task_ids (List[str]) - List of task IDs.
def delete_logs_for_tasks(self, task_ids: List[str]) -> None:
if not task_ids:
return
with belief_scope("TaskLogPersistenceService.delete_logs_for_tasks"):
session: Session = TasksSessionLocal()
try:
session.query(TaskLogRecord).filter(
TaskLogRecord.task_id.in_(task_ids)
).delete(synchronize_session=False)
session.commit()
except Exception as e:
session.rollback()
logger.error(f"Failed to delete logs for tasks: {e}")
finally:
session.close()
# [/DEF:delete_logs_for_tasks:Function]
# [/DEF:TaskLogPersistenceService:Class]
# [/DEF:TaskPersistenceModule:Module] # [/DEF:TaskPersistenceModule:Module]

View File

@@ -0,0 +1,167 @@
# [DEF:TaskLoggerModule:Module]
# @SEMANTICS: task, logger, context, plugin, attribution
# @PURPOSE: Provides a dedicated logger for tasks with automatic source attribution.
# @LAYER: Core
# @RELATION: DEPENDS_ON -> TaskManager, CALLS -> TaskManager._add_log
# @TIER: CRITICAL
# @INVARIANT: Each TaskLogger instance is bound to a specific task_id and default source.
# [SECTION: IMPORTS]
from typing import Dict, Any, Optional, Callable
# [/SECTION]
# [DEF:TaskLogger:Class]
# @SEMANTICS: logger, task, source, attribution
# @PURPOSE: A wrapper around TaskManager._add_log that carries task_id and source context.
# @TIER: CRITICAL
# @INVARIANT: All log calls include the task_id and source.
# @UX_STATE: Idle -> Logging -> (system records log)
class TaskLogger:
"""
A dedicated logger for tasks that automatically tags logs with source attribution.
Usage:
logger = TaskLogger(task_id="abc123", add_log_fn=task_manager._add_log, source="plugin")
logger.info("Starting backup process")
logger.error("Failed to connect", metadata={"error_code": 500})
# Create sub-logger with different source
api_logger = logger.with_source("superset_api")
api_logger.info("Fetching dashboards")
"""
# [DEF:__init__:Function]
# @PURPOSE: Initialize the TaskLogger with task context.
# @PRE: add_log_fn is a callable that accepts (task_id, level, message, context, source, metadata).
# @POST: TaskLogger is ready to log messages.
# @PARAM: task_id (str) - The ID of the task.
# @PARAM: add_log_fn (Callable) - Function to add log to TaskManager.
# @PARAM: source (str) - Default source for logs (default: "plugin").
def __init__(
self,
task_id: str,
add_log_fn: Callable,
source: str = "plugin"
):
self._task_id = task_id
self._add_log = add_log_fn
self._default_source = source
# [/DEF:__init__:Function]
# [DEF:with_source:Function]
# @PURPOSE: Create a sub-logger with a different default source.
# @PRE: source is a non-empty string.
# @POST: Returns new TaskLogger with the same task_id but different source.
# @PARAM: source (str) - New default source.
# @RETURN: TaskLogger - New logger instance.
def with_source(self, source: str) -> "TaskLogger":
"""Create a sub-logger with a different source context."""
return TaskLogger(
task_id=self._task_id,
add_log_fn=self._add_log,
source=source
)
# [/DEF:with_source:Function]
# [DEF:_log:Function]
# @PURPOSE: Internal method to log a message at a given level.
# @PRE: level is a valid log level string.
# @POST: Log entry added via add_log_fn.
# @PARAM: level (str) - Log level (DEBUG, INFO, WARNING, ERROR).
# @PARAM: message (str) - Log message.
# @PARAM: source (Optional[str]) - Override source for this log entry.
# @PARAM: metadata (Optional[Dict]) - Additional structured data.
def _log(
self,
level: str,
message: str,
source: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None
) -> None:
"""Internal logging method."""
self._add_log(
task_id=self._task_id,
level=level,
message=message,
source=source or self._default_source,
metadata=metadata
)
# [/DEF:_log:Function]
# [DEF:debug:Function]
# @PURPOSE: Log a DEBUG level message.
# @PARAM: message (str) - Log message.
# @PARAM: source (Optional[str]) - Override source.
# @PARAM: metadata (Optional[Dict]) - Additional data.
def debug(
self,
message: str,
source: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None
) -> None:
self._log("DEBUG", message, source, metadata)
# [/DEF:debug:Function]
# [DEF:info:Function]
# @PURPOSE: Log an INFO level message.
# @PARAM: message (str) - Log message.
# @PARAM: source (Optional[str]) - Override source.
# @PARAM: metadata (Optional[Dict]) - Additional data.
def info(
self,
message: str,
source: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None
) -> None:
self._log("INFO", message, source, metadata)
# [/DEF:info:Function]
# [DEF:warning:Function]
# @PURPOSE: Log a WARNING level message.
# @PARAM: message (str) - Log message.
# @PARAM: source (Optional[str]) - Override source.
# @PARAM: metadata (Optional[Dict]) - Additional data.
def warning(
self,
message: str,
source: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None
) -> None:
self._log("WARNING", message, source, metadata)
# [/DEF:warning:Function]
# [DEF:error:Function]
# @PURPOSE: Log an ERROR level message.
# @PARAM: message (str) - Log message.
# @PARAM: source (Optional[str]) - Override source.
# @PARAM: metadata (Optional[Dict]) - Additional data.
def error(
self,
message: str,
source: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None
) -> None:
self._log("ERROR", message, source, metadata)
# [/DEF:error:Function]
# [DEF:progress:Function]
# @PURPOSE: Log a progress update with percentage.
# @PRE: percent is between 0 and 100.
# @POST: Log entry with progress metadata added.
# @PARAM: message (str) - Progress message.
# @PARAM: percent (float) - Progress percentage (0-100).
# @PARAM: source (Optional[str]) - Override source.
def progress(
self,
message: str,
percent: float,
source: Optional[str] = None
) -> None:
"""Log a progress update with percentage."""
metadata = {"progress": min(100, max(0, percent))}
self._log("INFO", message, source, metadata)
# [/DEF:progress:Function]
# [/DEF:TaskLogger:Class]
# [/DEF:TaskLoggerModule:Module]

View File

@@ -11,7 +11,7 @@
# [SECTION: IMPORTS] # [SECTION: IMPORTS]
import pandas as pd # type: ignore import pandas as pd # type: ignore
import psycopg2 # type: ignore import psycopg2 # type: ignore
from typing import Dict, List, Optional, Any from typing import Dict, Optional, Any
from ..logger import logger as app_logger, belief_scope from ..logger import logger as app_logger, belief_scope
# [/SECTION] # [/SECTION]

View File

@@ -19,7 +19,6 @@ from datetime import date, datetime
import shutil import shutil
import zlib import zlib
from dataclasses import dataclass from dataclasses import dataclass
import yaml
from ..logger import logger as app_logger, belief_scope from ..logger import logger as app_logger, belief_scope
# [/SECTION] # [/SECTION]

View File

@@ -42,6 +42,8 @@ def suggest_mappings(source_databases: List[Dict], target_databases: List[Dict],
name, score, index = match name, score, index = match
if score >= threshold: if score >= threshold:
suggestions.append({ suggestions.append({
"source_db": s_db['database_name'],
"target_db": target_databases[index]['database_name'],
"source_db_uuid": s_db['uuid'], "source_db_uuid": s_db['uuid'],
"target_db_uuid": target_databases[index]['uuid'], "target_db_uuid": target_databases[index]['uuid'],
"confidence": score / 100.0 "confidence": score / 100.0

View File

@@ -118,14 +118,41 @@ class APIClient:
def _init_session(self) -> requests.Session: def _init_session(self) -> requests.Session:
with belief_scope("_init_session"): with belief_scope("_init_session"):
session = requests.Session() session = requests.Session()
# Create a custom adapter that handles TLS issues
class TLSAdapter(HTTPAdapter):
def init_poolmanager(self, connections, maxsize, block=False):
from urllib3.poolmanager import PoolManager
import ssl
# Create an SSL context that ignores TLSv1 unrecognized name errors
ctx = ssl.create_default_context()
ctx.set_ciphers('HIGH:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!SRP:!CAMELLIA')
# Ignore TLSV1_UNRECOGNIZED_NAME errors by disabling hostname verification
# This is safe when verify_ssl is false (we're already not verifying the certificate)
ctx.check_hostname = False
self.poolmanager = PoolManager(
num_pools=connections,
maxsize=maxsize,
block=block,
ssl_context=ctx
)
retries = Retry(total=3, backoff_factor=0.5, status_forcelist=[500, 502, 503, 504]) retries = Retry(total=3, backoff_factor=0.5, status_forcelist=[500, 502, 503, 504])
adapter = HTTPAdapter(max_retries=retries) adapter = TLSAdapter(max_retries=retries)
session.mount('http://', adapter) session.mount('http://', adapter)
session.mount('https://', adapter) session.mount('https://', adapter)
if not self.request_settings["verify_ssl"]: if not self.request_settings["verify_ssl"]:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
app_logger.warning("[_init_session][State] SSL verification disabled.") app_logger.warning("[_init_session][State] SSL verification disabled.")
session.verify = self.request_settings["verify_ssl"] # When verify_ssl is false, we should also disable hostname verification
session.verify = False
else:
session.verify = True
return session return session
# [/DEF:_init_session:Function] # [/DEF:_init_session:Function]
@@ -140,7 +167,16 @@ class APIClient:
app_logger.info("[authenticate][Enter] Authenticating to %s", self.base_url) app_logger.info("[authenticate][Enter] Authenticating to %s", self.base_url)
try: try:
login_url = f"{self.base_url}/security/login" login_url = f"{self.base_url}/security/login"
# Log the payload keys and values (masking password)
masked_auth = {k: ("******" if k == "password" else v) for k, v in self.auth.items()}
app_logger.info(f"[authenticate][Debug] Login URL: {login_url}")
app_logger.info(f"[authenticate][Debug] Auth payload: {masked_auth}")
response = self.session.post(login_url, json=self.auth, timeout=self.request_settings["timeout"]) response = self.session.post(login_url, json=self.auth, timeout=self.request_settings["timeout"])
if response.status_code != 200:
app_logger.error(f"[authenticate][Error] Status: {response.status_code}, Response: {response.text}")
response.raise_for_status() response.raise_for_status()
access_token = response.json()["access_token"] access_token = response.json()["access_token"]
@@ -153,6 +189,9 @@ class APIClient:
app_logger.info("[authenticate][Exit] Authenticated successfully.") app_logger.info("[authenticate][Exit] Authenticated successfully.")
return self._tokens return self._tokens
except requests.exceptions.HTTPError as e: except requests.exceptions.HTTPError as e:
status_code = e.response.status_code if e.response is not None else None
if status_code in [502, 503, 504]:
raise NetworkError(f"Environment unavailable during authentication (Status {status_code})", status_code=status_code) from e
raise AuthenticationError(f"Authentication failed: {e}") from e raise AuthenticationError(f"Authentication failed: {e}") from e
except (requests.exceptions.RequestException, KeyError) as e: except (requests.exceptions.RequestException, KeyError) as e:
raise NetworkError(f"Network or parsing error during authentication: {e}") from e raise NetworkError(f"Network or parsing error during authentication: {e}") from e
@@ -165,7 +204,8 @@ class APIClient:
# @POST: Returns headers including auth tokens. # @POST: Returns headers including auth tokens.
def headers(self) -> Dict[str, str]: def headers(self) -> Dict[str, str]:
with belief_scope("headers"): with belief_scope("headers"):
if not self._authenticated: self.authenticate() if not self._authenticated:
self.authenticate()
return { return {
"Authorization": f"Bearer {self._tokens['access_token']}", "Authorization": f"Bearer {self._tokens['access_token']}",
"X-CSRFToken": self._tokens.get("csrf_token", ""), "X-CSRFToken": self._tokens.get("csrf_token", ""),
@@ -188,7 +228,8 @@ class APIClient:
with belief_scope("request"): with belief_scope("request"):
full_url = f"{self.base_url}{endpoint}" full_url = f"{self.base_url}{endpoint}"
_headers = self.headers.copy() _headers = self.headers.copy()
if headers: _headers.update(headers) if headers:
_headers.update(headers)
try: try:
response = self.session.request(method, full_url, headers=_headers, **kwargs) response = self.session.request(method, full_url, headers=_headers, **kwargs)
@@ -209,9 +250,14 @@ class APIClient:
def _handle_http_error(self, e: requests.exceptions.HTTPError, endpoint: str): def _handle_http_error(self, e: requests.exceptions.HTTPError, endpoint: str):
with belief_scope("_handle_http_error"): with belief_scope("_handle_http_error"):
status_code = e.response.status_code status_code = e.response.status_code
if status_code == 404: raise DashboardNotFoundError(endpoint) from e if status_code == 502 or status_code == 503 or status_code == 504:
if status_code == 403: raise PermissionDeniedError() from e raise NetworkError(f"Environment unavailable (Status {status_code})", status_code=status_code) from e
if status_code == 401: raise AuthenticationError() from e if status_code == 404:
raise DashboardNotFoundError(endpoint) from e
if status_code == 403:
raise PermissionDeniedError() from e
if status_code == 401:
raise AuthenticationError() from e
raise SupersetAPIError(f"API Error {status_code}: {e.response.text}") from e raise SupersetAPIError(f"API Error {status_code}: {e.response.text}") from e
# [/DEF:_handle_http_error:Function] # [/DEF:_handle_http_error:Function]
@@ -223,9 +269,12 @@ class APIClient:
# @POST: Raises a NetworkError. # @POST: Raises a NetworkError.
def _handle_network_error(self, e: requests.exceptions.RequestException, url: str): def _handle_network_error(self, e: requests.exceptions.RequestException, url: str):
with belief_scope("_handle_network_error"): with belief_scope("_handle_network_error"):
if isinstance(e, requests.exceptions.Timeout): msg = "Request timeout" if isinstance(e, requests.exceptions.Timeout):
elif isinstance(e, requests.exceptions.ConnectionError): msg = "Connection error" msg = "Request timeout"
else: msg = f"Unknown network error: {e}" elif isinstance(e, requests.exceptions.ConnectionError):
msg = "Connection error"
else:
msg = f"Unknown network error: {e}"
raise NetworkError(msg, url=url) from e raise NetworkError(msg, url=url) from e
# [/DEF:_handle_network_error:Function] # [/DEF:_handle_network_error:Function]
@@ -242,7 +291,9 @@ class APIClient:
def upload_file(self, endpoint: str, file_info: Dict[str, Any], extra_data: Optional[Dict] = None, timeout: Optional[int] = None) -> Dict: def upload_file(self, endpoint: str, file_info: Dict[str, Any], extra_data: Optional[Dict] = None, timeout: Optional[int] = None) -> Dict:
with belief_scope("upload_file"): with belief_scope("upload_file"):
full_url = f"{self.base_url}{endpoint}" full_url = f"{self.base_url}{endpoint}"
_headers = self.headers.copy(); _headers.pop('Content-Type', None) _headers = self.headers.copy()
_headers.pop('Content-Type', None)
file_obj, file_name, form_field = file_info.get("file_obj"), file_info.get("file_name"), file_info.get("form_field", "file") file_obj, file_name, form_field = file_info.get("file_obj"), file_info.get("file_name"), file_info.get("form_field", "file")
@@ -304,20 +355,40 @@ class APIClient:
# @PURPOSE: Автоматически собирает данные со всех страниц пагинированного эндпоинта. # @PURPOSE: Автоматически собирает данные со всех страниц пагинированного эндпоинта.
# @PARAM: endpoint (str) - Эндпоинт. # @PARAM: endpoint (str) - Эндпоинт.
# @PARAM: pagination_options (Dict[str, Any]) - Опции пагинации. # @PARAM: pagination_options (Dict[str, Any]) - Опции пагинации.
# @PRE: pagination_options must contain 'base_query', 'total_count', 'results_field'. # @PRE: pagination_options must contain 'base_query', 'results_field'. 'total_count' is optional.
# @POST: Returns all items across all pages. # @POST: Returns all items across all pages.
# @RETURN: List[Any] - Список данных. # @RETURN: List[Any] - Список данных.
def fetch_paginated_data(self, endpoint: str, pagination_options: Dict[str, Any]) -> List[Any]: def fetch_paginated_data(self, endpoint: str, pagination_options: Dict[str, Any]) -> List[Any]:
with belief_scope("fetch_paginated_data"): with belief_scope("fetch_paginated_data"):
base_query, total_count = pagination_options["base_query"], pagination_options["total_count"] base_query = pagination_options["base_query"]
results_field, page_size = pagination_options["results_field"], base_query.get('page_size') total_count = pagination_options.get("total_count")
assert page_size and page_size > 0, "'page_size' must be a positive number."
results_field = pagination_options["results_field"]
count_field = pagination_options.get("count_field", "count")
page_size = base_query.get('page_size', 1000)
assert page_size > 0, "'page_size' must be a positive number."
results = [] results = []
for page in range((total_count + page_size - 1) // page_size): page = 0
# Fetch first page to get data and total count if not provided
query = {**base_query, 'page': page}
response_json = cast(Dict[str, Any], self.request("GET", endpoint, params={"q": json.dumps(query)}))
first_page_results = response_json.get(results_field, [])
results.extend(first_page_results)
if total_count is None:
total_count = response_json.get(count_field, len(first_page_results))
app_logger.debug(f"[fetch_paginated_data][State] Total count resolved from first page: {total_count}")
# Fetch remaining pages
total_pages = (total_count + page_size - 1) // page_size
for page in range(1, total_pages):
query = {**base_query, 'page': page} query = {**base_query, 'page': page}
response_json = cast(Dict[str, Any], self.request("GET", endpoint, params={"q": json.dumps(query)})) response_json = cast(Dict[str, Any], self.request("GET", endpoint, params={"q": json.dumps(query)}))
results.extend(response_json.get(results_field, [])) results.extend(response_json.get(results_field, []))
return results return results
# [/DEF:fetch_paginated_data:Function] # [/DEF:fetch_paginated_data:Function]

View File

@@ -1,11 +1,10 @@
# [DEF:Dependencies:Module] # [DEF:Dependencies:Module]
# @SEMANTICS: dependency, injection, singleton, factory, auth, jwt # @SEMANTICS: dependency, injection, singleton, factory, auth, jwt
# @PURPOSE: Manages the creation and provision of shared application dependencies, such as the PluginLoader and TaskManager, to avoid circular imports. # @PURPOSE: Manages creation and provision of shared application dependencies, such as PluginLoader and TaskManager, to avoid circular imports.
# @LAYER: Core # @LAYER: Core
# @RELATION: Used by the main app and API routers to get access to shared instances. # @RELATION: Used by main app and API routers to get access to shared instances.
from pathlib import Path from pathlib import Path
from typing import Optional
from fastapi import Depends, HTTPException, status from fastapi import Depends, HTTPException, status
from fastapi.security import OAuth2PasswordBearer from fastapi.security import OAuth2PasswordBearer
from jose import JWTError from jose import JWTError
@@ -13,30 +12,31 @@ from .core.plugin_loader import PluginLoader
from .core.task_manager import TaskManager from .core.task_manager import TaskManager
from .core.config_manager import ConfigManager from .core.config_manager import ConfigManager
from .core.scheduler import SchedulerService from .core.scheduler import SchedulerService
from .services.resource_service import ResourceService
from .services.mapping_service import MappingService
from .core.database import init_db, get_auth_db from .core.database import init_db, get_auth_db
from .core.logger import logger, belief_scope from .core.logger import logger
from .core.auth.jwt import decode_token from .core.auth.jwt import decode_token
from .core.auth.repository import AuthRepository from .core.auth.repository import AuthRepository
from .models.auth import User from .models.auth import User
# Initialize singletons # Initialize singletons
# Use absolute path relative to this file to ensure plugins are found regardless of CWD # Use absolute path relative to this file to ensure plugins are found regardless of CWD
project_root = Path(__file__).parent.parent.parent project_root = Path(__file__).parent.parent.parent
config_path = project_root / "config.json" config_path = project_root / "config.json"
config_manager = ConfigManager(config_path=str(config_path))
# Initialize database before services that use persisted configuration.
# Initialize database before any other services that might use it init_db()
init_db() config_manager = ConfigManager(config_path=str(config_path))
# [DEF:get_config_manager:Function] # [DEF:get_config_manager:Function]
# @PURPOSE: Dependency injector for the ConfigManager. # @PURPOSE: Dependency injector for ConfigManager.
# @PRE: Global config_manager must be initialized. # @PRE: Global config_manager must be initialized.
# @POST: Returns shared ConfigManager instance. # @POST: Returns shared ConfigManager instance.
# @RETURN: ConfigManager - The shared config manager instance. # @RETURN: ConfigManager - The shared config manager instance.
def get_config_manager() -> ConfigManager: def get_config_manager() -> ConfigManager:
"""Dependency injector for the ConfigManager.""" """Dependency injector for ConfigManager."""
with belief_scope("get_config_manager"): return config_manager
return config_manager
# [/DEF:get_config_manager:Function] # [/DEF:get_config_manager:Function]
plugin_dir = Path(__file__).parent / "plugins" plugin_dir = Path(__file__).parent / "plugins"
@@ -51,72 +51,91 @@ logger.info("TaskManager initialized")
scheduler_service = SchedulerService(task_manager, config_manager) scheduler_service = SchedulerService(task_manager, config_manager)
logger.info("SchedulerService initialized") logger.info("SchedulerService initialized")
resource_service = ResourceService()
logger.info("ResourceService initialized")
# [DEF:get_plugin_loader:Function] # [DEF:get_plugin_loader:Function]
# @PURPOSE: Dependency injector for the PluginLoader. # @PURPOSE: Dependency injector for PluginLoader.
# @PRE: Global plugin_loader must be initialized. # @PRE: Global plugin_loader must be initialized.
# @POST: Returns shared PluginLoader instance. # @POST: Returns shared PluginLoader instance.
# @RETURN: PluginLoader - The shared plugin loader instance. # @RETURN: PluginLoader - The shared plugin loader instance.
def get_plugin_loader() -> PluginLoader: def get_plugin_loader() -> PluginLoader:
"""Dependency injector for the PluginLoader.""" """Dependency injector for PluginLoader."""
with belief_scope("get_plugin_loader"): return plugin_loader
return plugin_loader
# [/DEF:get_plugin_loader:Function] # [/DEF:get_plugin_loader:Function]
# [DEF:get_task_manager:Function] # [DEF:get_task_manager:Function]
# @PURPOSE: Dependency injector for the TaskManager. # @PURPOSE: Dependency injector for TaskManager.
# @PRE: Global task_manager must be initialized. # @PRE: Global task_manager must be initialized.
# @POST: Returns shared TaskManager instance. # @POST: Returns shared TaskManager instance.
# @RETURN: TaskManager - The shared task manager instance. # @RETURN: TaskManager - The shared task manager instance.
def get_task_manager() -> TaskManager: def get_task_manager() -> TaskManager:
"""Dependency injector for the TaskManager.""" """Dependency injector for TaskManager."""
with belief_scope("get_task_manager"): return task_manager
return task_manager
# [/DEF:get_task_manager:Function] # [/DEF:get_task_manager:Function]
# [DEF:get_scheduler_service:Function] # [DEF:get_scheduler_service:Function]
# @PURPOSE: Dependency injector for the SchedulerService. # @PURPOSE: Dependency injector for SchedulerService.
# @PRE: Global scheduler_service must be initialized. # @PRE: Global scheduler_service must be initialized.
# @POST: Returns shared SchedulerService instance. # @POST: Returns shared SchedulerService instance.
# @RETURN: SchedulerService - The shared scheduler service instance. # @RETURN: SchedulerService - The shared scheduler service instance.
def get_scheduler_service() -> SchedulerService: def get_scheduler_service() -> SchedulerService:
"""Dependency injector for the SchedulerService.""" """Dependency injector for SchedulerService."""
with belief_scope("get_scheduler_service"): return scheduler_service
return scheduler_service
# [/DEF:get_scheduler_service:Function] # [/DEF:get_scheduler_service:Function]
# [DEF:get_resource_service:Function]
# @PURPOSE: Dependency injector for ResourceService.
# @PRE: Global resource_service must be initialized.
# @POST: Returns shared ResourceService instance.
# @RETURN: ResourceService - The shared resource service instance.
def get_resource_service() -> ResourceService:
"""Dependency injector for ResourceService."""
return resource_service
# [/DEF:get_resource_service:Function]
# [DEF:get_mapping_service:Function]
# @PURPOSE: Dependency injector for MappingService.
# @PRE: Global config_manager must be initialized.
# @POST: Returns new MappingService instance.
# @RETURN: MappingService - A new mapping service instance.
def get_mapping_service() -> MappingService:
"""Dependency injector for MappingService."""
return MappingService(config_manager)
# [/DEF:get_mapping_service:Function]
# [DEF:oauth2_scheme:Variable] # [DEF:oauth2_scheme:Variable]
# @PURPOSE: OAuth2 password bearer scheme for token extraction. # @PURPOSE: OAuth2 password bearer scheme for token extraction.
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="/api/auth/login") oauth2_scheme = OAuth2PasswordBearer(tokenUrl="/api/auth/login")
# [/DEF:oauth2_scheme:Variable] # [/DEF:oauth2_scheme:Variable]
# [DEF:get_current_user:Function] # [DEF:get_current_user:Function]
# @PURPOSE: Dependency for retrieving the currently authenticated user from a JWT. # @PURPOSE: Dependency for retrieving currently authenticated user from a JWT.
# @PRE: JWT token provided in Authorization header. # @PRE: JWT token provided in Authorization header.
# @POST: Returns the User object if token is valid. # @POST: Returns User object if token is valid.
# @THROW: HTTPException 401 if token is invalid or user not found. # @THROW: HTTPException 401 if token is invalid or user not found.
# @PARAM: token (str) - Extracted JWT token. # @PARAM: token (str) - Extracted JWT token.
# @PARAM: db (Session) - Auth database session. # @PARAM: db (Session) - Auth database session.
# @RETURN: User - The authenticated user. # @RETURN: User - The authenticated user.
def get_current_user(token: str = Depends(oauth2_scheme), db = Depends(get_auth_db)): def get_current_user(token: str = Depends(oauth2_scheme), db = Depends(get_auth_db)):
with belief_scope("get_current_user"): credentials_exception = HTTPException(
credentials_exception = HTTPException( status_code=status.HTTP_401_UNAUTHORIZED,
status_code=status.HTTP_401_UNAUTHORIZED, detail="Could not validate credentials",
detail="Could not validate credentials", headers={"WWW-Authenticate": "Bearer"},
headers={"WWW-Authenticate": "Bearer"}, )
) try:
try: payload = decode_token(token)
payload = decode_token(token) username: str = payload.get("sub")
username: str = payload.get("sub") if username is None:
if username is None:
raise credentials_exception
except JWTError:
raise credentials_exception raise credentials_exception
except JWTError:
repo = AuthRepository(db) raise credentials_exception
user = repo.get_user_by_username(username)
if user is None: repo = AuthRepository(db)
raise credentials_exception user = repo.get_user_by_username(username)
return user if user is None:
raise credentials_exception
return user
# [/DEF:get_current_user:Function] # [/DEF:get_current_user:Function]
# [DEF:has_permission:Function] # [DEF:has_permission:Function]
@@ -129,25 +148,24 @@ def get_current_user(token: str = Depends(oauth2_scheme), db = Depends(get_auth_
# @RETURN: User - The authenticated user if permission granted. # @RETURN: User - The authenticated user if permission granted.
def has_permission(resource: str, action: str): def has_permission(resource: str, action: str):
def permission_checker(current_user: User = Depends(get_current_user)): def permission_checker(current_user: User = Depends(get_current_user)):
with belief_scope("has_permission", f"{resource}:{action}"): # Union of all permissions across all roles
# Union of all permissions across all roles for role in current_user.roles:
for role in current_user.roles: for perm in role.permissions:
for perm in role.permissions: if perm.resource == resource and perm.action == action:
if perm.resource == resource and perm.action == action: return current_user
return current_user
# Special case for Admin role (full access)
if any(role.name == "Admin" for role in current_user.roles):
return current_user
from .core.auth.logger import log_security_event
log_security_event("PERMISSION_DENIED", current_user.username, {"resource": resource, "action": action})
# Special case for Admin role (full access) raise HTTPException(
if any(role.name == "Admin" for role in current_user.roles): status_code=status.HTTP_403_FORBIDDEN,
return current_user detail=f"Permission denied for {resource}:{action}"
)
from .core.auth.logger import log_security_event
log_security_event("PERMISSION_DENIED", current_user.username, {"resource": resource, "action": action})
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail=f"Permission denied for {resource}:{action}"
)
return permission_checker return permission_checker
# [/DEF:has_permission:Function] # [/DEF:has_permission:Function]
# [/DEF:Dependencies:Module] # [/DEF:Dependencies:Module]

View File

@@ -0,0 +1,36 @@
# [DEF:test_models:Module]
# @TIER: TRIVIAL
# @PURPOSE: Unit tests for data models
# @LAYER: Domain
# @RELATION: VERIFIES -> src.models
import sys
from pathlib import Path
# Add src to path
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "src"))
from src.core.config_models import Environment
from src.core.logger import belief_scope
# [DEF:test_environment_model:Function]
# @PURPOSE: Tests that Environment model correctly stores values.
# @PRE: Environment class is available.
# @POST: Values are verified.
def test_environment_model():
with belief_scope("test_environment_model"):
env = Environment(
id="test-id",
name="test-env",
url="http://localhost:8088/api/v1",
username="admin",
password="password"
)
assert env.id == "test-id"
assert env.name == "test-env"
assert env.url == "http://localhost:8088/api/v1"
# [/DEF:test_environment_model:Function]
# [/DEF:test_models:Module]

View File

@@ -11,7 +11,7 @@
# [SECTION: IMPORTS] # [SECTION: IMPORTS]
import uuid import uuid
from datetime import datetime from datetime import datetime
from sqlalchemy import Column, String, Boolean, DateTime, ForeignKey, Table, Enum from sqlalchemy import Column, String, Boolean, DateTime, ForeignKey, Table
from sqlalchemy.orm import relationship from sqlalchemy.orm import relationship
from .mapping import Base from .mapping import Base
# [/SECTION] # [/SECTION]

View File

@@ -0,0 +1,26 @@
# [DEF:backend.src.models.config:Module]
#
# @TIER: STANDARD
# @SEMANTICS: database, config, settings, sqlalchemy
# @PURPOSE: Defines database schema for persisted application configuration.
# @LAYER: Domain
# @RELATION: DEPENDS_ON -> sqlalchemy
from sqlalchemy import Column, String, DateTime, JSON
from sqlalchemy.sql import func
from .mapping import Base
# [DEF:AppConfigRecord:Class]
# @PURPOSE: Stores the single source of truth for application configuration.
class AppConfigRecord(Base):
__tablename__ = "app_configurations"
id = Column(String, primary_key=True)
payload = Column(JSON, nullable=False)
updated_at = Column(DateTime(timezone=True), server_default=func.now(), onupdate=func.now())
# [/DEF:AppConfigRecord:Class]
# [/DEF:backend.src.models.config:Module]

View File

@@ -1,5 +1,6 @@
# [DEF:backend.src.models.connection:Module] # [DEF:backend.src.models.connection:Module]
# #
# @TIER: TRIVIAL
# @SEMANTICS: database, connection, configuration, sqlalchemy, sqlite # @SEMANTICS: database, connection, configuration, sqlalchemy, sqlite
# @PURPOSE: Defines the database schema for external database connection configurations. # @PURPOSE: Defines the database schema for external database connection configurations.
# @LAYER: Domain # @LAYER: Domain
@@ -15,6 +16,7 @@ import uuid
# [/SECTION] # [/SECTION]
# [DEF:ConnectionConfig:Class] # [DEF:ConnectionConfig:Class]
# @TIER: TRIVIAL
# @PURPOSE: Stores credentials for external databases used for column mapping. # @PURPOSE: Stores credentials for external databases used for column mapping.
class ConnectionConfig(Base): class ConnectionConfig(Base):
__tablename__ = "connection_configs" __tablename__ = "connection_configs"

View File

@@ -1,4 +1,5 @@
# [DEF:GitModels:Module] # [DEF:GitModels:Module]
# @TIER: TRIVIAL
# @SEMANTICS: git, models, sqlalchemy, database, schema # @SEMANTICS: git, models, sqlalchemy, database, schema
# @PURPOSE: Git-specific SQLAlchemy models for configuration and repository tracking. # @PURPOSE: Git-specific SQLAlchemy models for configuration and repository tracking.
# @LAYER: Model # @LAYER: Model
@@ -7,7 +8,6 @@
import enum import enum
from datetime import datetime from datetime import datetime
from sqlalchemy import Column, String, Integer, DateTime, Enum, ForeignKey, Boolean from sqlalchemy import Column, String, Integer, DateTime, Enum, ForeignKey, Boolean
from sqlalchemy.dialects.postgresql import UUID
import uuid import uuid
from src.core.database import Base from src.core.database import Base
@@ -26,11 +26,10 @@ class SyncStatus(str, enum.Enum):
DIRTY = "DIRTY" DIRTY = "DIRTY"
CONFLICT = "CONFLICT" CONFLICT = "CONFLICT"
# [DEF:GitServerConfig:Class]
# @TIER: TRIVIAL
# @PURPOSE: Configuration for a Git server connection.
class GitServerConfig(Base): class GitServerConfig(Base):
"""
[DEF:GitServerConfig:Class]
Configuration for a Git server connection.
"""
__tablename__ = "git_server_configs" __tablename__ = "git_server_configs"
id = Column(String(36), primary_key=True, default=lambda: str(uuid.uuid4())) id = Column(String(36), primary_key=True, default=lambda: str(uuid.uuid4()))
@@ -41,12 +40,12 @@ class GitServerConfig(Base):
default_repository = Column(String(255), nullable=True) default_repository = Column(String(255), nullable=True)
status = Column(Enum(GitStatus), default=GitStatus.UNKNOWN) status = Column(Enum(GitStatus), default=GitStatus.UNKNOWN)
last_validated = Column(DateTime, default=datetime.utcnow) last_validated = Column(DateTime, default=datetime.utcnow)
# [/DEF:GitServerConfig:Class]
# [DEF:GitRepository:Class]
# @TIER: TRIVIAL
# @PURPOSE: Tracking for a local Git repository linked to a dashboard.
class GitRepository(Base): class GitRepository(Base):
"""
[DEF:GitRepository:Class]
Tracking for a local Git repository linked to a dashboard.
"""
__tablename__ = "git_repositories" __tablename__ = "git_repositories"
id = Column(String(36), primary_key=True, default=lambda: str(uuid.uuid4())) id = Column(String(36), primary_key=True, default=lambda: str(uuid.uuid4()))
@@ -56,12 +55,12 @@ class GitRepository(Base):
local_path = Column(String(255), nullable=False) local_path = Column(String(255), nullable=False)
current_branch = Column(String(255), default="main") current_branch = Column(String(255), default="main")
sync_status = Column(Enum(SyncStatus), default=SyncStatus.CLEAN) sync_status = Column(Enum(SyncStatus), default=SyncStatus.CLEAN)
# [/DEF:GitRepository:Class]
# [DEF:DeploymentEnvironment:Class]
# @TIER: TRIVIAL
# @PURPOSE: Target Superset environments for dashboard deployment.
class DeploymentEnvironment(Base): class DeploymentEnvironment(Base):
"""
[DEF:DeploymentEnvironment:Class]
Target Superset environments for dashboard deployment.
"""
__tablename__ = "deployment_environments" __tablename__ = "deployment_environments"
id = Column(String(36), primary_key=True, default=lambda: str(uuid.uuid4())) id = Column(String(36), primary_key=True, default=lambda: str(uuid.uuid4()))
@@ -69,5 +68,6 @@ class DeploymentEnvironment(Base):
superset_url = Column(String(255), nullable=False) superset_url = Column(String(255), nullable=False)
superset_token = Column(String(255), nullable=False) superset_token = Column(String(255), nullable=False)
is_active = Column(Boolean, default=True) is_active = Column(Boolean, default=True)
# [/DEF:DeploymentEnvironment:Class]
# [/DEF:GitModels:Module] # [/DEF:GitModels:Module]

View File

@@ -5,7 +5,7 @@
# @LAYER: Domain # @LAYER: Domain
# @RELATION: INHERITS_FROM -> backend.src.models.mapping.Base # @RELATION: INHERITS_FROM -> backend.src.models.mapping.Base
from sqlalchemy import Column, String, Boolean, DateTime, JSON, Enum, Text from sqlalchemy import Column, String, Boolean, DateTime, JSON, Text
from datetime import datetime from datetime import datetime
import uuid import uuid
from .mapping import Base from .mapping import Base

View File

@@ -59,6 +59,7 @@ class DatabaseMapping(Base):
# [/DEF:DatabaseMapping:Class] # [/DEF:DatabaseMapping:Class]
# [DEF:MigrationJob:Class] # [DEF:MigrationJob:Class]
# @TIER: TRIVIAL
# @PURPOSE: Represents a single migration execution job. # @PURPOSE: Represents a single migration execution job.
class MigrationJob(Base): class MigrationJob(Base):
__tablename__ = "migration_jobs" __tablename__ = "migration_jobs"

View File

@@ -1,9 +1,16 @@
# [DEF:backend.src.models.storage:Module]
# @TIER: TRIVIAL
# @SEMANTICS: storage, file, model, pydantic
# @PURPOSE: Data models for the storage system.
# @LAYER: Domain
from datetime import datetime from datetime import datetime
from enum import Enum from enum import Enum
from typing import Optional from typing import Optional
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
# [DEF:FileCategory:Class] # [DEF:FileCategory:Class]
# @TIER: TRIVIAL
# @PURPOSE: Enumeration of supported file categories in the storage system. # @PURPOSE: Enumeration of supported file categories in the storage system.
class FileCategory(str, Enum): class FileCategory(str, Enum):
BACKUP = "backups" BACKUP = "backups"
@@ -11,15 +18,19 @@ class FileCategory(str, Enum):
# [/DEF:FileCategory:Class] # [/DEF:FileCategory:Class]
# [DEF:StorageConfig:Class] # [DEF:StorageConfig:Class]
# @TIER: TRIVIAL
# @PURPOSE: Configuration model for the storage system, defining paths and naming patterns. # @PURPOSE: Configuration model for the storage system, defining paths and naming patterns.
class StorageConfig(BaseModel): class StorageConfig(BaseModel):
root_path: str = Field(default="backups", description="Absolute path to the storage root directory.") root_path: str = Field(default="backups", description="Absolute path to the storage root directory.")
backup_path: str = Field(default="backups", description="Subpath for backups.")
repo_path: str = Field(default="repositorys", description="Subpath for repositories.")
backup_structure_pattern: str = Field(default="{category}/", description="Pattern for backup directory structure.") backup_structure_pattern: str = Field(default="{category}/", description="Pattern for backup directory structure.")
repo_structure_pattern: str = Field(default="{category}/", description="Pattern for repository directory structure.") repo_structure_pattern: str = Field(default="{category}/", description="Pattern for repository directory structure.")
filename_pattern: str = Field(default="{name}_{timestamp}", description="Pattern for filenames.") filename_pattern: str = Field(default="{name}_{timestamp}", description="Pattern for filenames.")
# [/DEF:StorageConfig:Class] # [/DEF:StorageConfig:Class]
# [DEF:StoredFile:Class] # [DEF:StoredFile:Class]
# @TIER: TRIVIAL
# @PURPOSE: Data model representing metadata for a file stored in the system. # @PURPOSE: Data model representing metadata for a file stored in the system.
class StoredFile(BaseModel): class StoredFile(BaseModel):
name: str = Field(..., description="Name of the file (including extension).") name: str = Field(..., description="Name of the file (including extension).")
@@ -28,4 +39,6 @@ class StoredFile(BaseModel):
created_at: datetime = Field(..., description="Creation timestamp.") created_at: datetime = Field(..., description="Creation timestamp.")
category: FileCategory = Field(..., description="Category of the file.") category: FileCategory = Field(..., description="Category of the file.")
mime_type: Optional[str] = Field(None, description="MIME type of the file.") mime_type: Optional[str] = Field(None, description="MIME type of the file.")
# [/DEF:StoredFile:Class] # [/DEF:StoredFile:Class]
# [/DEF:backend.src.models.storage:Module]

View File

@@ -1,5 +1,6 @@
# [DEF:backend.src.models.task:Module] # [DEF:backend.src.models.task:Module]
# #
# @TIER: TRIVIAL
# @SEMANTICS: database, task, record, sqlalchemy, sqlite # @SEMANTICS: database, task, record, sqlalchemy, sqlite
# @PURPOSE: Defines the database schema for task execution records. # @PURPOSE: Defines the database schema for task execution records.
# @LAYER: Domain # @LAYER: Domain
@@ -8,13 +9,14 @@
# @INVARIANT: All primary keys are UUID strings. # @INVARIANT: All primary keys are UUID strings.
# [SECTION: IMPORTS] # [SECTION: IMPORTS]
from sqlalchemy import Column, String, DateTime, JSON, ForeignKey from sqlalchemy import Column, String, DateTime, JSON, ForeignKey, Text, Integer, Index
from sqlalchemy.sql import func from sqlalchemy.sql import func
from .mapping import Base from .mapping import Base
import uuid import uuid
# [/SECTION] # [/SECTION]
# [DEF:TaskRecord:Class] # [DEF:TaskRecord:Class]
# @TIER: TRIVIAL
# @PURPOSE: Represents a persistent record of a task execution. # @PURPOSE: Represents a persistent record of a task execution.
class TaskRecord(Base): class TaskRecord(Base):
__tablename__ = "task_records" __tablename__ = "task_records"
@@ -25,11 +27,35 @@ class TaskRecord(Base):
environment_id = Column(String, ForeignKey("environments.id"), nullable=True) environment_id = Column(String, ForeignKey("environments.id"), nullable=True)
started_at = Column(DateTime(timezone=True), nullable=True) started_at = Column(DateTime(timezone=True), nullable=True)
finished_at = Column(DateTime(timezone=True), nullable=True) finished_at = Column(DateTime(timezone=True), nullable=True)
logs = Column(JSON, nullable=True) # Store structured logs as JSON logs = Column(JSON, nullable=True) # Store structured logs as JSON (legacy, kept for backward compatibility)
error = Column(String, nullable=True) error = Column(String, nullable=True)
result = Column(JSON, nullable=True) result = Column(JSON, nullable=True)
created_at = Column(DateTime(timezone=True), server_default=func.now()) created_at = Column(DateTime(timezone=True), server_default=func.now())
params = Column(JSON, nullable=True) params = Column(JSON, nullable=True)
# [/DEF:TaskRecord:Class] # [/DEF:TaskRecord:Class]
# [DEF:TaskLogRecord:Class]
# @PURPOSE: Represents a single persistent log entry for a task.
# @TIER: CRITICAL
# @RELATION: DEPENDS_ON -> TaskRecord
# @INVARIANT: Each log entry belongs to exactly one task.
class TaskLogRecord(Base):
__tablename__ = "task_logs"
id = Column(Integer, primary_key=True, autoincrement=True)
task_id = Column(String, ForeignKey("task_records.id", ondelete="CASCADE"), nullable=False, index=True)
timestamp = Column(DateTime(timezone=True), nullable=False, index=True)
level = Column(String(16), nullable=False) # INFO, WARNING, ERROR, DEBUG
source = Column(String(64), nullable=False, default="system") # plugin, superset_api, git, etc.
message = Column(Text, nullable=False)
metadata_json = Column(Text, nullable=True) # JSON string for additional metadata
# Composite indexes for efficient filtering
__table_args__ = (
Index('ix_task_logs_task_timestamp', 'task_id', 'timestamp'),
Index('ix_task_logs_task_level', 'task_id', 'level'),
Index('ix_task_logs_task_source', 'task_id', 'source'),
)
# [/DEF:TaskLogRecord:Class]
# [/DEF:backend.src.models.task:Module] # [/DEF:backend.src.models.task:Module]

View File

@@ -5,13 +5,14 @@
# @RELATION: IMPLEMENTS -> PluginBase # @RELATION: IMPLEMENTS -> PluginBase
# @RELATION: DEPENDS_ON -> superset_tool.client # @RELATION: DEPENDS_ON -> superset_tool.client
# @RELATION: DEPENDS_ON -> superset_tool.utils # @RELATION: DEPENDS_ON -> superset_tool.utils
# @RELATION: USES -> TaskContext
from typing import Dict, Any from typing import Dict, Any, Optional
from pathlib import Path from pathlib import Path
from requests.exceptions import RequestException from requests.exceptions import RequestException
from ..core.plugin_base import PluginBase from ..core.plugin_base import PluginBase
from ..core.logger import belief_scope from ..core.logger import belief_scope, logger as app_logger
from ..core.superset_client import SupersetClient from ..core.superset_client import SupersetClient
from ..core.utils.network import SupersetAPIError from ..core.utils.network import SupersetAPIError
from ..core.utils.fileio import ( from ..core.utils.fileio import (
@@ -23,6 +24,7 @@ from ..core.utils.fileio import (
RetentionPolicy RetentionPolicy
) )
from ..dependencies import get_config_manager from ..dependencies import get_config_manager
from ..core.task_manager.context import TaskContext
# [DEF:BackupPlugin:Class] # [DEF:BackupPlugin:Class]
# @PURPOSE: Implementation of the backup plugin logic. # @PURPOSE: Implementation of the backup plugin logic.
@@ -93,7 +95,7 @@ class BackupPlugin(PluginBase):
with belief_scope("get_schema"): with belief_scope("get_schema"):
config_manager = get_config_manager() config_manager = get_config_manager()
envs = [e.name for e in config_manager.get_environments()] envs = [e.name for e in config_manager.get_environments()]
default_path = config_manager.get_config().settings.storage.root_path config_manager.get_config().settings.storage.root_path
return { return {
"type": "object", "type": "object",
@@ -110,14 +112,22 @@ class BackupPlugin(PluginBase):
# [/DEF:get_schema:Function] # [/DEF:get_schema:Function]
# [DEF:execute:Function] # [DEF:execute:Function]
# @PURPOSE: Executes the dashboard backup logic. # @PURPOSE: Executes the dashboard backup logic with TaskContext support.
# @PARAM: params (Dict[str, Any]) - Backup parameters (env, backup_path). # @PARAM: params (Dict[str, Any]) - Backup parameters (env, backup_path, dashboard_ids).
# @PARAM: context (Optional[TaskContext]) - Task context for logging with source attribution.
# @PRE: Target environment must be configured. params must be a dictionary. # @PRE: Target environment must be configured. params must be a dictionary.
# @POST: All dashboards are exported and archived. # @POST: All dashboards are exported and archived.
async def execute(self, params: Dict[str, Any]): async def execute(self, params: Dict[str, Any], context: Optional[TaskContext] = None):
with belief_scope("execute"): with belief_scope("execute"):
config_manager = get_config_manager() config_manager = get_config_manager()
env_id = params.get("environment_id")
# Support both parameter names: environment_id (for task creation) and env (for direct calls)
env_id = params.get("environment_id") or params.get("env")
dashboard_ids = params.get("dashboard_ids") or params.get("dashboards")
# Log the incoming parameters for debugging
log = context.logger if context else app_logger
log.info(f"Backup parameters received: env_id={env_id}, dashboard_ids={dashboard_ids}")
# Resolve environment name if environment_id is provided # Resolve environment name if environment_id is provided
if env_id: if env_id:
@@ -128,13 +138,21 @@ class BackupPlugin(PluginBase):
env = params.get("env") env = params.get("env")
if not env: if not env:
raise KeyError("env") raise KeyError("env")
log.info(f"Backup started for environment: {env}, selected dashboards: {dashboard_ids}")
storage_settings = config_manager.get_config().settings.storage storage_settings = config_manager.get_config().settings.storage
# Use 'backups' subfolder within the storage root # Use 'backups' subfolder within the storage root
backup_path = Path(storage_settings.root_path) / "backups" backup_path = Path(storage_settings.root_path) / "backups"
from ..core.logger import logger as app_logger # Use TaskContext logger if available, otherwise fall back to app_logger
app_logger.info(f"[BackupPlugin][Entry] Starting backup for {env}.") log = context.logger if context else app_logger
# Create sub-loggers for different components
superset_log = log.with_source("superset_api") if context else log
storage_log = log.with_source("storage") if context else log
log.info(f"Starting backup for environment: {env}")
try: try:
config_manager = get_config_manager() config_manager = get_config_manager()
@@ -147,25 +165,43 @@ class BackupPlugin(PluginBase):
client = SupersetClient(env_config) client = SupersetClient(env_config)
dashboard_count, dashboard_meta = client.get_dashboards() # Get all dashboards
app_logger.info(f"[BackupPlugin][Progress] Found {dashboard_count} dashboards to export in {env}.") all_dashboard_count, all_dashboard_meta = client.get_dashboards()
superset_log.info(f"Found {all_dashboard_count} total dashboards in environment")
# Filter dashboards if specific IDs are provided
if dashboard_ids:
dashboard_ids_int = [int(did) for did in dashboard_ids]
dashboard_meta = [db for db in all_dashboard_meta if db.get('id') in dashboard_ids_int]
dashboard_count = len(dashboard_meta)
superset_log.info(f"Filtered to {dashboard_count} selected dashboards: {dashboard_ids_int}")
else:
dashboard_count = all_dashboard_count
superset_log.info("No dashboard filter applied - backing up all dashboards")
dashboard_meta = all_dashboard_meta
if dashboard_count == 0: if dashboard_count == 0:
app_logger.info("[BackupPlugin][Exit] No dashboards to back up.") log.info("No dashboards to back up")
return return
for db in dashboard_meta: total = len(dashboard_meta)
for idx, db in enumerate(dashboard_meta, 1):
dashboard_id = db.get('id') dashboard_id = db.get('id')
dashboard_title = db.get('dashboard_title', 'Unknown Dashboard') dashboard_title = db.get('dashboard_title', 'Unknown Dashboard')
if not dashboard_id: if not dashboard_id:
continue continue
# Report progress
progress_pct = (idx / total) * 100
log.progress(f"Backing up dashboard: {dashboard_title}", percent=progress_pct)
try: try:
dashboard_base_dir_name = sanitize_filename(f"{dashboard_title}") dashboard_base_dir_name = sanitize_filename(f"{dashboard_title}")
dashboard_dir = backup_path / env.upper() / dashboard_base_dir_name dashboard_dir = backup_path / env.upper() / dashboard_base_dir_name
dashboard_dir.mkdir(parents=True, exist_ok=True) dashboard_dir.mkdir(parents=True, exist_ok=True)
zip_content, filename = client.export_dashboard(dashboard_id) zip_content, filename = client.export_dashboard(dashboard_id)
superset_log.debug(f"Exported dashboard: {dashboard_title}")
save_and_unpack_dashboard( save_and_unpack_dashboard(
zip_content=zip_content, zip_content=zip_content,
@@ -175,18 +211,19 @@ class BackupPlugin(PluginBase):
) )
archive_exports(str(dashboard_dir), policy=RetentionPolicy()) archive_exports(str(dashboard_dir), policy=RetentionPolicy())
storage_log.debug(f"Archived dashboard: {dashboard_title}")
except (SupersetAPIError, RequestException, IOError, OSError) as db_error: except (SupersetAPIError, RequestException, IOError, OSError) as db_error:
app_logger.error(f"[BackupPlugin][Failure] Failed to export dashboard {dashboard_title} (ID: {dashboard_id}): {db_error}", exc_info=True) log.error(f"Failed to export dashboard {dashboard_title} (ID: {dashboard_id}): {db_error}")
continue continue
consolidate_archive_folders(backup_path / env.upper()) consolidate_archive_folders(backup_path / env.upper())
remove_empty_directories(str(backup_path / env.upper())) remove_empty_directories(str(backup_path / env.upper()))
app_logger.info(f"[BackupPlugin][CoherenceCheck:Passed] Backup logic completed for {env}.") log.info(f"Backup completed successfully for {env}")
except (RequestException, IOError, KeyError) as e: except (RequestException, IOError, KeyError) as e:
app_logger.critical(f"[BackupPlugin][Failure] Fatal error during backup for {env}: {e}", exc_info=True) log.error(f"Fatal error during backup for {env}: {e}")
raise e raise e
# [/DEF:execute:Function] # [/DEF:execute:Function]
# [/DEF:BackupPlugin:Class] # [/DEF:BackupPlugin:Class]

View File

@@ -3,6 +3,7 @@
# @PURPOSE: Implements a plugin for system diagnostics and debugging Superset API responses. # @PURPOSE: Implements a plugin for system diagnostics and debugging Superset API responses.
# @LAYER: Plugins # @LAYER: Plugins
# @RELATION: Inherits from PluginBase. Uses SupersetClient from core. # @RELATION: Inherits from PluginBase. Uses SupersetClient from core.
# @RELATION: USES -> TaskContext
# @CONSTRAINT: Must use belief_scope for logging. # @CONSTRAINT: Must use belief_scope for logging.
# [SECTION: IMPORTS] # [SECTION: IMPORTS]
@@ -10,6 +11,7 @@ from typing import Dict, Any, Optional
from ..core.plugin_base import PluginBase from ..core.plugin_base import PluginBase
from ..core.superset_client import SupersetClient from ..core.superset_client import SupersetClient
from ..core.logger import logger, belief_scope from ..core.logger import logger, belief_scope
from ..core.task_manager.context import TaskContext
# [/SECTION] # [/SECTION]
# [DEF:DebugPlugin:Class] # [DEF:DebugPlugin:Class]
@@ -114,20 +116,29 @@ class DebugPlugin(PluginBase):
# [/DEF:get_schema:Function] # [/DEF:get_schema:Function]
# [DEF:execute:Function] # [DEF:execute:Function]
# @PURPOSE: Executes the debug logic. # @PURPOSE: Executes the debug logic with TaskContext support.
# @PARAM: params (Dict[str, Any]) - Debug parameters. # @PARAM: params (Dict[str, Any]) - Debug parameters.
# @PARAM: context (Optional[TaskContext]) - Task context for logging with source attribution.
# @PRE: action must be provided in params. # @PRE: action must be provided in params.
# @POST: Debug action is executed and results returned. # @POST: Debug action is executed and results returned.
# @RETURN: Dict[str, Any] - Execution results. # @RETURN: Dict[str, Any] - Execution results.
async def execute(self, params: Dict[str, Any]) -> Dict[str, Any]: async def execute(self, params: Dict[str, Any], context: Optional[TaskContext] = None) -> Dict[str, Any]:
with belief_scope("execute"): with belief_scope("execute"):
action = params.get("action") action = params.get("action")
# Use TaskContext logger if available, otherwise fall back to app logger
log = context.logger if context else logger
debug_log = log.with_source("debug") if context else log
superset_log = log.with_source("superset_api") if context else log
debug_log.info(f"Executing debug action: {action}")
if action == "test-db-api": if action == "test-db-api":
return await self._test_db_api(params) return await self._test_db_api(params, superset_log)
elif action == "get-dataset-structure": elif action == "get-dataset-structure":
return await self._get_dataset_structure(params) return await self._get_dataset_structure(params, superset_log)
else: else:
debug_log.error(f"Unknown action: {action}")
raise ValueError(f"Unknown action: {action}") raise ValueError(f"Unknown action: {action}")
# [/DEF:execute:Function] # [/DEF:execute:Function]
@@ -136,33 +147,37 @@ class DebugPlugin(PluginBase):
# @PRE: source_env and target_env params exist in params. # @PRE: source_env and target_env params exist in params.
# @POST: Returns DB counts for both envs. # @POST: Returns DB counts for both envs.
# @PARAM: params (Dict) - Plugin parameters. # @PARAM: params (Dict) - Plugin parameters.
# @PARAM: log - Logger instance for superset_api source.
# @RETURN: Dict - Comparison results. # @RETURN: Dict - Comparison results.
async def _test_db_api(self, params: Dict[str, Any]) -> Dict[str, Any]: async def _test_db_api(self, params: Dict[str, Any], log) -> Dict[str, Any]:
with belief_scope("_test_db_api"): with belief_scope("_test_db_api"):
source_env_name = params.get("source_env") source_env_name = params.get("source_env")
target_env_name = params.get("target_env") target_env_name = params.get("target_env")
if not source_env_name or not target_env_name: if not source_env_name or not target_env_name:
raise ValueError("source_env and target_env are required for test-db-api") raise ValueError("source_env and target_env are required for test-db-api")
from ..dependencies import get_config_manager from ..dependencies import get_config_manager
config_manager = get_config_manager() config_manager = get_config_manager()
results = {} results = {}
for name in [source_env_name, target_env_name]: for name in [source_env_name, target_env_name]:
env_config = config_manager.get_environment(name) log.info(f"Testing database API for environment: {name}")
if not env_config: env_config = config_manager.get_environment(name)
raise ValueError(f"Environment '{name}' not found.") if not env_config:
log.error(f"Environment '{name}' not found.")
raise ValueError(f"Environment '{name}' not found.")
client = SupersetClient(env_config) client = SupersetClient(env_config)
client.authenticate() client.authenticate()
count, dbs = client.get_databases() count, dbs = client.get_databases()
results[name] = { log.debug(f"Found {count} databases in {name}")
"count": count, results[name] = {
"databases": dbs "count": count,
} "databases": dbs
}
return results return results
# [/DEF:_test_db_api:Function] # [/DEF:_test_db_api:Function]
# [DEF:_get_dataset_structure:Function] # [DEF:_get_dataset_structure:Function]
@@ -170,26 +185,31 @@ class DebugPlugin(PluginBase):
# @PRE: env and dataset_id params exist in params. # @PRE: env and dataset_id params exist in params.
# @POST: Returns dataset JSON structure. # @POST: Returns dataset JSON structure.
# @PARAM: params (Dict) - Plugin parameters. # @PARAM: params (Dict) - Plugin parameters.
# @PARAM: log - Logger instance for superset_api source.
# @RETURN: Dict - Dataset structure. # @RETURN: Dict - Dataset structure.
async def _get_dataset_structure(self, params: Dict[str, Any]) -> Dict[str, Any]: async def _get_dataset_structure(self, params: Dict[str, Any], log) -> Dict[str, Any]:
with belief_scope("_get_dataset_structure"): with belief_scope("_get_dataset_structure"):
env_name = params.get("env") env_name = params.get("env")
dataset_id = params.get("dataset_id") dataset_id = params.get("dataset_id")
if not env_name or dataset_id is None: if not env_name or dataset_id is None:
raise ValueError("env and dataset_id are required for get-dataset-structure") raise ValueError("env and dataset_id are required for get-dataset-structure")
from ..dependencies import get_config_manager log.info(f"Fetching structure for dataset {dataset_id} in {env_name}")
config_manager = get_config_manager()
env_config = config_manager.get_environment(env_name)
if not env_config:
raise ValueError(f"Environment '{env_name}' not found.")
client = SupersetClient(env_config) from ..dependencies import get_config_manager
client.authenticate() config_manager = get_config_manager()
env_config = config_manager.get_environment(env_name)
if not env_config:
log.error(f"Environment '{env_name}' not found.")
raise ValueError(f"Environment '{env_name}' not found.")
client = SupersetClient(env_config)
client.authenticate()
dataset_response = client.get_dataset(dataset_id) dataset_response = client.get_dataset(dataset_id)
return dataset_response.get('result') or {} log.debug(f"Retrieved dataset structure for {dataset_id}")
return dataset_response.get('result') or {}
# [/DEF:_get_dataset_structure:Function] # [/DEF:_get_dataset_structure:Function]
# [/DEF:DebugPlugin:Class] # [/DEF:DebugPlugin:Class]

View File

@@ -5,10 +5,9 @@
# @LAYER: Domain # @LAYER: Domain
# @RELATION: DEPENDS_ON -> backend.src.plugins.llm_analysis.service.LLMClient # @RELATION: DEPENDS_ON -> backend.src.plugins.llm_analysis.service.LLMClient
from typing import List, Optional from typing import List
from tenacity import retry, stop_after_attempt, wait_exponential from tenacity import retry, stop_after_attempt, wait_exponential
from ..llm_analysis.service import LLMClient from ..llm_analysis.service import LLMClient
from ..llm_analysis.models import LLMProviderType
from ...core.logger import belief_scope, logger from ...core.logger import belief_scope, logger
# [DEF:GitLLMExtension:Class] # [DEF:GitLLMExtension:Class]
@@ -61,6 +60,7 @@ class GitLLMExtension:
return "Update dashboard configurations (LLM generation failed)" return "Update dashboard configurations (LLM generation failed)"
return response.choices[0].message.content.strip() return response.choices[0].message.content.strip()
# [/DEF:suggest_commit_message:Function]
# [/DEF:GitLLMExtension:Class] # [/DEF:GitLLMExtension:Class]
# [/DEF:backend/src/plugins/git/llm_extension:Module] # [/DEF:backend/src/plugins/git/llm_extension:Module]

View File

@@ -7,6 +7,7 @@
# @RELATION: USES -> src.services.git_service.GitService # @RELATION: USES -> src.services.git_service.GitService
# @RELATION: USES -> src.core.superset_client.SupersetClient # @RELATION: USES -> src.core.superset_client.SupersetClient
# @RELATION: USES -> src.core.config_manager.ConfigManager # @RELATION: USES -> src.core.config_manager.ConfigManager
# @RELATION: USES -> TaskContext
# #
# @INVARIANT: Все операции с Git должны выполняться через GitService. # @INVARIANT: Все операции с Git должны выполняться через GitService.
# @CONSTRAINT: Плагин работает только с распакованными YAML-экспортами Superset. # @CONSTRAINT: Плагин работает только с распакованными YAML-экспортами Superset.
@@ -20,9 +21,10 @@ from pathlib import Path
from typing import Dict, Any, Optional from typing import Dict, Any, Optional
from src.core.plugin_base import PluginBase from src.core.plugin_base import PluginBase
from src.services.git_service import GitService from src.services.git_service import GitService
from src.core.logger import logger, belief_scope from src.core.logger import logger as app_logger, belief_scope
from src.core.config_manager import ConfigManager from src.core.config_manager import ConfigManager
from src.core.superset_client import SupersetClient from src.core.superset_client import SupersetClient
from src.core.task_manager.context import TaskContext
# [/SECTION] # [/SECTION]
# [DEF:GitPlugin:Class] # [DEF:GitPlugin:Class]
@@ -35,7 +37,7 @@ class GitPlugin(PluginBase):
# @POST: Инициализированы git_service и config_manager. # @POST: Инициализированы git_service и config_manager.
def __init__(self): def __init__(self):
with belief_scope("GitPlugin.__init__"): with belief_scope("GitPlugin.__init__"):
logger.info("[GitPlugin.__init__][Entry] Initializing GitPlugin.") app_logger.info("Initializing GitPlugin.")
self.git_service = GitService() self.git_service = GitService()
# Robust config path resolution: # Robust config path resolution:
@@ -50,13 +52,13 @@ class GitPlugin(PluginBase):
try: try:
from src.dependencies import config_manager from src.dependencies import config_manager
self.config_manager = config_manager self.config_manager = config_manager
logger.info("[GitPlugin.__init__][Exit] GitPlugin initialized using shared config_manager.") app_logger.info("GitPlugin initialized using shared config_manager.")
return return
except: except Exception:
config_path = "config.json" config_path = "config.json"
self.config_manager = ConfigManager(config_path) self.config_manager = ConfigManager(config_path)
logger.info(f"[GitPlugin.__init__][Exit] GitPlugin initialized with {config_path}") app_logger.info(f"GitPlugin initialized with {config_path}")
# [/DEF:__init__:Function] # [/DEF:__init__:Function]
@property @property
@@ -133,36 +135,44 @@ class GitPlugin(PluginBase):
# @POST: Плагин готов к выполнению задач. # @POST: Плагин готов к выполнению задач.
async def initialize(self): async def initialize(self):
with belief_scope("GitPlugin.initialize"): with belief_scope("GitPlugin.initialize"):
logger.info("[GitPlugin.initialize][Action] Initializing Git Integration Plugin logic.") app_logger.info("[GitPlugin.initialize][Action] Initializing Git Integration Plugin logic.")
# [DEF:execute:Function] # [DEF:execute:Function]
# @PURPOSE: Основной метод выполнения задач плагина. # @PURPOSE: Основной метод выполнения задач плагина с поддержкой TaskContext.
# @PRE: task_data содержит 'operation' и 'dashboard_id'. # @PRE: task_data содержит 'operation' и 'dashboard_id'.
# @POST: Возвращает результат выполнения операции. # @POST: Возвращает результат выполнения операции.
# @PARAM: task_data (Dict[str, Any]) - Данные задачи. # @PARAM: task_data (Dict[str, Any]) - Данные задачи.
# @PARAM: context (Optional[TaskContext]) - Task context for logging with source attribution.
# @RETURN: Dict[str, Any] - Статус и сообщение. # @RETURN: Dict[str, Any] - Статус и сообщение.
# @RELATION: CALLS -> self._handle_sync # @RELATION: CALLS -> self._handle_sync
# @RELATION: CALLS -> self._handle_deploy # @RELATION: CALLS -> self._handle_deploy
async def execute(self, task_data: Dict[str, Any]) -> Dict[str, Any]: async def execute(self, task_data: Dict[str, Any], context: Optional[TaskContext] = None) -> Dict[str, Any]:
with belief_scope("GitPlugin.execute"): with belief_scope("GitPlugin.execute"):
operation = task_data.get("operation") operation = task_data.get("operation")
dashboard_id = task_data.get("dashboard_id") dashboard_id = task_data.get("dashboard_id")
logger.info(f"[GitPlugin.execute][Entry] Executing operation: {operation} for dashboard {dashboard_id}") # Use TaskContext logger if available, otherwise fall back to app_logger
log = context.logger if context else app_logger
# Create sub-loggers for different components
git_log = log.with_source("git") if context else log
superset_log = log.with_source("superset_api") if context else log
log.info(f"Executing operation: {operation} for dashboard {dashboard_id}")
if operation == "sync": if operation == "sync":
source_env_id = task_data.get("source_env_id") source_env_id = task_data.get("source_env_id")
result = await self._handle_sync(dashboard_id, source_env_id) result = await self._handle_sync(dashboard_id, source_env_id, log, git_log, superset_log)
elif operation == "deploy": elif operation == "deploy":
env_id = task_data.get("environment_id") env_id = task_data.get("environment_id")
result = await self._handle_deploy(dashboard_id, env_id) result = await self._handle_deploy(dashboard_id, env_id, log, git_log, superset_log)
elif operation == "history": elif operation == "history":
result = {"status": "success", "message": "History available via API"} result = {"status": "success", "message": "History available via API"}
else: else:
logger.error(f"[GitPlugin.execute][Coherence:Failed] Unknown operation: {operation}") log.error(f"Unknown operation: {operation}")
raise ValueError(f"Unknown operation: {operation}") raise ValueError(f"Unknown operation: {operation}")
logger.info(f"[GitPlugin.execute][Exit] Operation {operation} completed.") log.info(f"Operation {operation} completed.")
return result return result
# [/DEF:execute:Function] # [/DEF:execute:Function]
@@ -176,13 +186,13 @@ class GitPlugin(PluginBase):
# @SIDE_EFFECT: Изменяет файлы в локальной рабочей директории репозитория. # @SIDE_EFFECT: Изменяет файлы в локальной рабочей директории репозитория.
# @RELATION: CALLS -> src.services.git_service.GitService.get_repo # @RELATION: CALLS -> src.services.git_service.GitService.get_repo
# @RELATION: CALLS -> src.core.superset_client.SupersetClient.export_dashboard # @RELATION: CALLS -> src.core.superset_client.SupersetClient.export_dashboard
async def _handle_sync(self, dashboard_id: int, source_env_id: Optional[str] = None) -> Dict[str, str]: async def _handle_sync(self, dashboard_id: int, source_env_id: Optional[str] = None, log=None, git_log=None, superset_log=None) -> Dict[str, str]:
with belief_scope("GitPlugin._handle_sync"): with belief_scope("GitPlugin._handle_sync"):
try: try:
# 1. Получение репозитория # 1. Получение репозитория
repo = self.git_service.get_repo(dashboard_id) repo = self.git_service.get_repo(dashboard_id)
repo_path = Path(repo.working_dir) repo_path = Path(repo.working_dir)
logger.info(f"[_handle_sync][Action] Target repo path: {repo_path}") git_log.info(f"Target repo path: {repo_path}")
# 2. Настройка клиента Superset # 2. Настройка клиента Superset
env = self._get_env(source_env_id) env = self._get_env(source_env_id)
@@ -190,11 +200,11 @@ class GitPlugin(PluginBase):
client.authenticate() client.authenticate()
# 3. Экспорт дашборда # 3. Экспорт дашборда
logger.info(f"[_handle_sync][Action] Exporting dashboard {dashboard_id} from {env.name}") superset_log.info(f"Exporting dashboard {dashboard_id} from {env.name}")
zip_bytes, _ = client.export_dashboard(dashboard_id) zip_bytes, _ = client.export_dashboard(dashboard_id)
# 4. Распаковка с выравниванием структуры (flattening) # 4. Распаковка с выравниванием структуры (flattening)
logger.info(f"[_handle_sync][Action] Unpacking export to {repo_path}") git_log.info(f"Unpacking export to {repo_path}")
# Список папок/файлов, которые мы ожидаем от Superset # Список папок/файлов, которые мы ожидаем от Superset
managed_dirs = ["dashboards", "charts", "datasets", "databases"] managed_dirs = ["dashboards", "charts", "datasets", "databases"]
@@ -218,7 +228,7 @@ class GitPlugin(PluginBase):
raise ValueError("Export ZIP is empty") raise ValueError("Export ZIP is empty")
root_folder = namelist[0].split('/')[0] root_folder = namelist[0].split('/')[0]
logger.info(f"[_handle_sync][Action] Detected root folder in ZIP: {root_folder}") git_log.info(f"Detected root folder in ZIP: {root_folder}")
for member in zf.infolist(): for member in zf.infolist():
if member.filename.startswith(root_folder + "/") and len(member.filename) > len(root_folder) + 1: if member.filename.startswith(root_folder + "/") and len(member.filename) > len(root_folder) + 1:
@@ -236,15 +246,15 @@ class GitPlugin(PluginBase):
# 5. Автоматический staging изменений (не коммит, чтобы юзер мог проверить diff) # 5. Автоматический staging изменений (не коммит, чтобы юзер мог проверить diff)
try: try:
repo.git.add(A=True) repo.git.add(A=True)
logger.info(f"[_handle_sync][Action] Changes staged in git") app_logger.info("[_handle_sync][Action] Changes staged in git")
except Exception as ge: except Exception as ge:
logger.warning(f"[_handle_sync][Action] Failed to stage changes: {ge}") app_logger.warning(f"[_handle_sync][Action] Failed to stage changes: {ge}")
logger.info(f"[_handle_sync][Coherence:OK] Dashboard {dashboard_id} synced successfully.") app_logger.info(f"[_handle_sync][Coherence:OK] Dashboard {dashboard_id} synced successfully.")
return {"status": "success", "message": "Dashboard synced and flattened in local repository"} return {"status": "success", "message": "Dashboard synced and flattened in local repository"}
except Exception as e: except Exception as e:
logger.error(f"[_handle_sync][Coherence:Failed] Sync failed: {e}") app_logger.error(f"[_handle_sync][Coherence:Failed] Sync failed: {e}")
raise raise
# [/DEF:_handle_sync:Function] # [/DEF:_handle_sync:Function]
@@ -254,10 +264,13 @@ class GitPlugin(PluginBase):
# @POST: Дашборд импортирован в целевой Superset. # @POST: Дашборд импортирован в целевой Superset.
# @PARAM: dashboard_id (int) - ID дашборда. # @PARAM: dashboard_id (int) - ID дашборда.
# @PARAM: env_id (str) - ID целевого окружения. # @PARAM: env_id (str) - ID целевого окружения.
# @PARAM: log - Main logger instance.
# @PARAM: git_log - Git-specific logger instance.
# @PARAM: superset_log - Superset API-specific logger instance.
# @RETURN: Dict[str, Any] - Результат деплоя. # @RETURN: Dict[str, Any] - Результат деплоя.
# @SIDE_EFFECT: Создает и удаляет временный ZIP-файл. # @SIDE_EFFECT: Создает и удаляет временный ZIP-файл.
# @RELATION: CALLS -> src.core.superset_client.SupersetClient.import_dashboard # @RELATION: CALLS -> src.core.superset_client.SupersetClient.import_dashboard
async def _handle_deploy(self, dashboard_id: int, env_id: str) -> Dict[str, Any]: async def _handle_deploy(self, dashboard_id: int, env_id: str, log=None, git_log=None, superset_log=None) -> Dict[str, Any]:
with belief_scope("GitPlugin._handle_deploy"): with belief_scope("GitPlugin._handle_deploy"):
try: try:
if not env_id: if not env_id:
@@ -268,7 +281,7 @@ class GitPlugin(PluginBase):
repo_path = Path(repo.working_dir) repo_path = Path(repo.working_dir)
# 2. Упаковка в ZIP # 2. Упаковка в ZIP
logger.info(f"[_handle_deploy][Action] Packing repository {repo_path} for deployment.") git_log.info(f"Packing repository {repo_path} for deployment.")
zip_buffer = io.BytesIO() zip_buffer = io.BytesIO()
# Superset expects a root directory in the ZIP (e.g., dashboard_export_20240101T000000/) # Superset expects a root directory in the ZIP (e.g., dashboard_export_20240101T000000/)
@@ -279,7 +292,8 @@ class GitPlugin(PluginBase):
if ".git" in dirs: if ".git" in dirs:
dirs.remove(".git") dirs.remove(".git")
for file in files: for file in files:
if file == ".git" or file.endswith(".zip"): continue if file == ".git" or file.endswith(".zip"):
continue
file_path = Path(root) / file file_path = Path(root) / file
# Prepend the root directory name to the archive path # Prepend the root directory name to the archive path
arcname = Path(root_dir_name) / file_path.relative_to(repo_path) arcname = Path(root_dir_name) / file_path.relative_to(repo_path)
@@ -297,21 +311,21 @@ class GitPlugin(PluginBase):
# 4. Импорт # 4. Импорт
temp_zip_path = repo_path / f"deploy_{dashboard_id}.zip" temp_zip_path = repo_path / f"deploy_{dashboard_id}.zip"
logger.info(f"[_handle_deploy][Action] Saving temporary zip to {temp_zip_path}") git_log.info(f"Saving temporary zip to {temp_zip_path}")
with open(temp_zip_path, "wb") as f: with open(temp_zip_path, "wb") as f:
f.write(zip_buffer.getvalue()) f.write(zip_buffer.getvalue())
try: try:
logger.info(f"[_handle_deploy][Action] Importing dashboard to {env.name}") app_logger.info(f"[_handle_deploy][Action] Importing dashboard to {env.name}")
result = client.import_dashboard(temp_zip_path) result = client.import_dashboard(temp_zip_path)
logger.info(f"[_handle_deploy][Coherence:OK] Deployment successful for dashboard {dashboard_id}.") app_logger.info(f"[_handle_deploy][Coherence:OK] Deployment successful for dashboard {dashboard_id}.")
return {"status": "success", "message": f"Dashboard deployed to {env.name}", "details": result} return {"status": "success", "message": f"Dashboard deployed to {env.name}", "details": result}
finally: finally:
if temp_zip_path.exists(): if temp_zip_path.exists():
os.remove(temp_zip_path) os.remove(temp_zip_path)
except Exception as e: except Exception as e:
logger.error(f"[_handle_deploy][Coherence:Failed] Deployment failed: {e}") app_logger.error(f"[_handle_deploy][Coherence:Failed] Deployment failed: {e}")
raise raise
# [/DEF:_handle_deploy:Function] # [/DEF:_handle_deploy:Function]
@@ -323,13 +337,13 @@ class GitPlugin(PluginBase):
# @RETURN: Environment - Объект конфигурации окружения. # @RETURN: Environment - Объект конфигурации окружения.
def _get_env(self, env_id: Optional[str] = None): def _get_env(self, env_id: Optional[str] = None):
with belief_scope("GitPlugin._get_env"): with belief_scope("GitPlugin._get_env"):
logger.info(f"[_get_env][Entry] Fetching environment for ID: {env_id}") app_logger.info(f"[_get_env][Entry] Fetching environment for ID: {env_id}")
# Priority 1: ConfigManager (config.json) # Priority 1: ConfigManager (config.json)
if env_id: if env_id:
env = self.config_manager.get_environment(env_id) env = self.config_manager.get_environment(env_id)
if env: if env:
logger.info(f"[_get_env][Exit] Found environment by ID in ConfigManager: {env.name}") app_logger.info(f"[_get_env][Exit] Found environment by ID in ConfigManager: {env.name}")
return env return env
# Priority 2: Database (DeploymentEnvironment) # Priority 2: Database (DeploymentEnvironment)
@@ -342,12 +356,12 @@ class GitPlugin(PluginBase):
db_env = db.query(DeploymentEnvironment).filter(DeploymentEnvironment.id == env_id).first() db_env = db.query(DeploymentEnvironment).filter(DeploymentEnvironment.id == env_id).first()
else: else:
# If no ID, try to find active or any environment in DB # If no ID, try to find active or any environment in DB
db_env = db.query(DeploymentEnvironment).filter(DeploymentEnvironment.is_active == True).first() db_env = db.query(DeploymentEnvironment).filter(DeploymentEnvironment.is_active).first()
if not db_env: if not db_env:
db_env = db.query(DeploymentEnvironment).first() db_env = db.query(DeploymentEnvironment).first()
if db_env: if db_env:
logger.info(f"[_get_env][Exit] Found environment in DB: {db_env.name}") app_logger.info(f"[_get_env][Exit] Found environment in DB: {db_env.name}")
from src.core.config_models import Environment from src.core.config_models import Environment
# Use token as password for SupersetClient # Use token as password for SupersetClient
return Environment( return Environment(
@@ -369,14 +383,14 @@ class GitPlugin(PluginBase):
# but we have other envs, maybe it's one of them? # but we have other envs, maybe it's one of them?
env = next((e for e in envs if e.id == env_id), None) env = next((e for e in envs if e.id == env_id), None)
if env: if env:
logger.info(f"[_get_env][Exit] Found environment {env_id} in ConfigManager list") app_logger.info(f"[_get_env][Exit] Found environment {env_id} in ConfigManager list")
return env return env
if not env_id: if not env_id:
logger.info(f"[_get_env][Exit] Using first environment from ConfigManager: {envs[0].name}") app_logger.info(f"[_get_env][Exit] Using first environment from ConfigManager: {envs[0].name}")
return envs[0] return envs[0]
logger.error(f"[_get_env][Coherence:Failed] No environments configured (searched config.json and DB). env_id={env_id}") app_logger.error(f"[_get_env][Coherence:Failed] No environments configured (searched config.json and DB). env_id={env_id}")
raise ValueError("No environments configured. Please add a Superset Environment in Settings.") raise ValueError("No environments configured. Please add a Superset Environment in Settings.")
# [/DEF:_get_env:Function] # [/DEF:_get_env:Function]

View File

@@ -1,6 +1,7 @@
# [DEF:backend/src/plugins/llm_analysis/__init__.py:Module] # [DEF:backend/src/plugins/llm_analysis/__init__.py:Module]
# @TIER: TRIVIAL # @TIER: TRIVIAL
# @PURPOSE: Initialize the LLM Analysis plugin package. # @PURPOSE: Initialize the LLM Analysis plugin package.
# @LAYER: Domain
""" """
LLM Analysis Plugin for automated dashboard validation and dataset documentation. LLM Analysis Plugin for automated dashboard validation and dataset documentation.
@@ -8,4 +9,6 @@ LLM Analysis Plugin for automated dashboard validation and dataset documentation
from .plugin import DashboardValidationPlugin, DocumentationPlugin from .plugin import DashboardValidationPlugin, DocumentationPlugin
# [/DEF:backend/src/plugins/llm_analysis/__init__.py] __all__ = ['DashboardValidationPlugin', 'DocumentationPlugin']
# [/DEF:backend/src/plugins/llm_analysis/__init__.py:Module]

View File

@@ -24,7 +24,7 @@ class LLMProviderConfig(BaseModel):
provider_type: LLMProviderType provider_type: LLMProviderType
name: str name: str
base_url: str base_url: str
api_key: str api_key: Optional[str] = None
default_model: str default_model: str
is_active: bool = True is_active: bool = True
# [/DEF:LLMProviderConfig:Class] # [/DEF:LLMProviderConfig:Class]
@@ -58,4 +58,4 @@ class ValidationResult(BaseModel):
raw_response: Optional[str] = None raw_response: Optional[str] = None
# [/DEF:ValidationResult:Class] # [/DEF:ValidationResult:Class]
# [/DEF:backend/src/plugins/llm_analysis/models.py] # [/DEF:backend/src/plugins/llm_analysis/models.py:Module]

View File

@@ -1,24 +1,32 @@
# [DEF:backend.src.plugins.llm_analysis.plugin:Module] # [DEF:backend/src/plugins/llm_analysis/plugin.py:Module]
# @TIER: STANDARD # @TIER: STANDARD
# @SEMANTICS: plugin, llm, analysis, documentation # @SEMANTICS: plugin, llm, analysis, documentation
# @PURPOSE: Implements DashboardValidationPlugin and DocumentationPlugin. # @PURPOSE: Implements DashboardValidationPlugin and DocumentationPlugin.
# @LAYER: Domain # @LAYER: Domain
# @RELATION: INHERITS_FROM -> backend.src.core.plugin_base.PluginBase # @RELATION: INHERITS -> backend.src.core.plugin_base.PluginBase
# @RELATION: CALLS -> backend.src.plugins.llm_analysis.service.ScreenshotService
# @RELATION: CALLS -> backend.src.plugins.llm_analysis.service.LLMClient
# @RELATION: CALLS -> backend.src.services.llm_provider.LLMProviderService
# @RELATION: USES -> TaskContext
# @INVARIANT: All LLM interactions must be executed as asynchronous tasks.
from typing import Dict, Any, Optional, List from typing import Dict, Any, Optional
import os import os
import json
from datetime import datetime, timedelta from datetime import datetime, timedelta
from ...core.plugin_base import PluginBase from ...core.plugin_base import PluginBase
from ...core.logger import belief_scope, logger from ...core.logger import belief_scope, logger
from ...core.database import SessionLocal from ...core.database import SessionLocal
from ...core.config_manager import ConfigManager
from ...services.llm_provider import LLMProviderService from ...services.llm_provider import LLMProviderService
from ...core.superset_client import SupersetClient
from .service import ScreenshotService, LLMClient from .service import ScreenshotService, LLMClient
from .models import LLMProviderType, ValidationStatus, ValidationResult, DetectedIssue from .models import LLMProviderType, ValidationStatus, ValidationResult, DetectedIssue
from ...models.llm import ValidationRecord from ...models.llm import ValidationRecord
from ...core.task_manager.context import TaskContext
# [DEF:DashboardValidationPlugin:Class] # [DEF:DashboardValidationPlugin:Class]
# @PURPOSE: Plugin for automated dashboard health analysis using LLMs. # @PURPOSE: Plugin for automated dashboard health analysis using LLMs.
# @RELATION: IMPLEMENTS -> backend.src.core.plugin_base.PluginBase
class DashboardValidationPlugin(PluginBase): class DashboardValidationPlugin(PluginBase):
@property @property
def id(self) -> str: def id(self) -> str:
@@ -47,14 +55,28 @@ class DashboardValidationPlugin(PluginBase):
"required": ["dashboard_id", "environment_id", "provider_id"] "required": ["dashboard_id", "environment_id", "provider_id"]
} }
async def execute(self, params: Dict[str, Any]): # [DEF:DashboardValidationPlugin.execute:Function]
# @PURPOSE: Executes the dashboard validation task with TaskContext support.
# @PARAM: params (Dict[str, Any]) - Validation parameters.
# @PARAM: context (Optional[TaskContext]) - Task context for logging with source attribution.
# @PRE: params contains dashboard_id, environment_id, and provider_id.
# @POST: Returns a dictionary with validation results and persists them to the database.
# @SIDE_EFFECT: Captures a screenshot, calls LLM API, and writes to the database.
async def execute(self, params: Dict[str, Any], context: Optional[TaskContext] = None):
with belief_scope("execute", f"plugin_id={self.id}"): with belief_scope("execute", f"plugin_id={self.id}"):
logger.info(f"Executing {self.name} with params: {params}") # Use TaskContext logger if available, otherwise fall back to app logger
log = context.logger if context else logger
# Create sub-loggers for different components
llm_log = log.with_source("llm") if context else log
screenshot_log = log.with_source("screenshot") if context else log
superset_log = log.with_source("superset_api") if context else log
log.info(f"Executing {self.name} with params: {params}")
dashboard_id = params.get("dashboard_id") dashboard_id = params.get("dashboard_id")
env_id = params.get("environment_id") env_id = params.get("environment_id")
provider_id = params.get("provider_id") provider_id = params.get("provider_id")
task_id = params.get("_task_id")
db = SessionLocal() db = SessionLocal()
try: try:
@@ -63,34 +85,91 @@ class DashboardValidationPlugin(PluginBase):
config_mgr = get_config_manager() config_mgr = get_config_manager()
env = config_mgr.get_environment(env_id) env = config_mgr.get_environment(env_id)
if not env: if not env:
log.error(f"Environment {env_id} not found")
raise ValueError(f"Environment {env_id} not found") raise ValueError(f"Environment {env_id} not found")
# 2. Get LLM Provider # 2. Get LLM Provider
llm_service = LLMProviderService(db) llm_service = LLMProviderService(db)
db_provider = llm_service.get_provider(provider_id) db_provider = llm_service.get_provider(provider_id)
if not db_provider: if not db_provider:
log.error(f"LLM Provider {provider_id} not found")
raise ValueError(f"LLM Provider {provider_id} not found") raise ValueError(f"LLM Provider {provider_id} not found")
llm_log.debug("Retrieved provider config:")
llm_log.debug(f" Provider ID: {db_provider.id}")
llm_log.debug(f" Provider Name: {db_provider.name}")
llm_log.debug(f" Provider Type: {db_provider.provider_type}")
llm_log.debug(f" Base URL: {db_provider.base_url}")
llm_log.debug(f" Default Model: {db_provider.default_model}")
llm_log.debug(f" Is Active: {db_provider.is_active}")
api_key = llm_service.get_decrypted_api_key(provider_id) api_key = llm_service.get_decrypted_api_key(provider_id)
llm_log.debug(f"API Key decrypted (first 8 chars): {api_key[:8] if api_key and len(api_key) > 8 else 'EMPTY_OR_NONE'}...")
# Check if API key was successfully decrypted
if not api_key:
raise ValueError(
f"Failed to decrypt API key for provider {provider_id}. "
f"The provider may have been encrypted with a different encryption key. "
f"Please update the provider with a new API key through the UI."
)
# 3. Capture Screenshot # 3. Capture Screenshot
screenshot_service = ScreenshotService(env) screenshot_service = ScreenshotService(env)
os.makedirs("ss-tools-storage/screenshots", exist_ok=True)
screenshot_path = f"ss-tools-storage/screenshots/{dashboard_id}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.png"
storage_root = config_mgr.get_config().settings.storage.root_path
screenshots_dir = os.path.join(storage_root, "screenshots")
os.makedirs(screenshots_dir, exist_ok=True)
filename = f"{dashboard_id}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.png"
screenshot_path = os.path.join(screenshots_dir, filename)
screenshot_log.info(f"Capturing screenshot for dashboard {dashboard_id}")
await screenshot_service.capture_dashboard(dashboard_id, screenshot_path) await screenshot_service.capture_dashboard(dashboard_id, screenshot_path)
screenshot_log.debug(f"Screenshot saved to: {screenshot_path}")
# 4. Fetch Logs (Last 100 lines from backend.log) # 4. Fetch Logs (from Environment /api/v1/log/)
logs = [] logs = []
log_file = "backend.log" try:
if os.path.exists(log_file): client = SupersetClient(env)
with open(log_file, "r") as f:
# Read last 100 lines # Calculate time window (last 24 hours)
all_lines = f.readlines() start_time = (datetime.now() - timedelta(hours=24)).isoformat()
logs = all_lines[-100:]
# Construct filter for logs
if not logs: # Note: We filter by dashboard_id matching the object
logs = ["No logs found in backend.log"] query_params = {
"filters": [
{"col": "dashboard_id", "opr": "eq", "value": dashboard_id},
{"col": "dttm", "opr": "gt", "value": start_time}
],
"order_column": "dttm",
"order_direction": "desc",
"page": 0,
"page_size": 100
}
superset_log.debug(f"Fetching logs for dashboard {dashboard_id}")
response = client.network.request(
method="GET",
endpoint="/log/",
params={"q": json.dumps(query_params)}
)
if isinstance(response, dict) and "result" in response:
for item in response["result"]:
action = item.get("action", "unknown")
dttm = item.get("dttm", "")
details = item.get("json", "")
logs.append(f"[{dttm}] {action}: {details}")
if not logs:
logs = ["No recent logs found for this dashboard."]
superset_log.debug("No recent logs found for this dashboard")
except Exception as e:
superset_log.warning(f"Failed to fetch logs from environment: {e}")
logs = [f"Error fetching remote logs: {str(e)}"]
# 5. Analyze with LLM # 5. Analyze with LLM
llm_client = LLMClient( llm_client = LLMClient(
@@ -100,7 +179,15 @@ class DashboardValidationPlugin(PluginBase):
default_model=db_provider.default_model default_model=db_provider.default_model
) )
llm_log.info(f"Analyzing dashboard {dashboard_id} with LLM")
analysis = await llm_client.analyze_dashboard(screenshot_path, logs) analysis = await llm_client.analyze_dashboard(screenshot_path, logs)
# Log analysis summary to task logs for better visibility
llm_log.info(f"[ANALYSIS_SUMMARY] Status: {analysis['status']}")
llm_log.info(f"[ANALYSIS_SUMMARY] Summary: {analysis['summary']}")
if analysis.get("issues"):
for i, issue in enumerate(analysis["issues"]):
llm_log.info(f"[ANALYSIS_ISSUE][{i+1}] {issue.get('severity')}: {issue.get('message')} (Location: {issue.get('location', 'N/A')})")
# 6. Persist Result # 6. Persist Result
validation_result = ValidationResult( validation_result = ValidationResult(
@@ -125,19 +212,24 @@ class DashboardValidationPlugin(PluginBase):
# 7. Notification on failure (US1 / FR-015) # 7. Notification on failure (US1 / FR-015)
if validation_result.status == ValidationStatus.FAIL: if validation_result.status == ValidationStatus.FAIL:
logger.warning(f"Dashboard {dashboard_id} validation FAILED. Summary: {validation_result.summary}") log.warning(f"Dashboard {dashboard_id} validation FAILED. Summary: {validation_result.summary}")
# Placeholder for Email/Pulse notification dispatch # Placeholder for Email/Pulse notification dispatch
# In a real implementation, we would call a NotificationService here # In a real implementation, we would call a NotificationService here
# with a payload containing the summary and a link to the report. # with a payload containing the summary and a link to the report.
# Final log to ensure all analysis is visible in task logs
log.info(f"Validation completed for dashboard {dashboard_id}. Status: {validation_result.status.value}")
return validation_result.dict() return validation_result.dict()
finally: finally:
db.close() db.close()
# [/DEF:DashboardValidationPlugin.execute:Function]
# [/DEF:DashboardValidationPlugin:Class] # [/DEF:DashboardValidationPlugin:Class]
# [DEF:DocumentationPlugin:Class] # [DEF:DocumentationPlugin:Class]
# @PURPOSE: Plugin for automated dataset documentation using LLMs. # @PURPOSE: Plugin for automated dataset documentation using LLMs.
# @RELATION: IMPLEMENTS -> backend.src.core.plugin_base.PluginBase
class DocumentationPlugin(PluginBase): class DocumentationPlugin(PluginBase):
@property @property
def id(self) -> str: def id(self) -> str:
@@ -166,9 +258,23 @@ class DocumentationPlugin(PluginBase):
"required": ["dataset_id", "environment_id", "provider_id"] "required": ["dataset_id", "environment_id", "provider_id"]
} }
async def execute(self, params: Dict[str, Any]): # [DEF:DocumentationPlugin.execute:Function]
# @PURPOSE: Executes the dataset documentation task with TaskContext support.
# @PARAM: params (Dict[str, Any]) - Documentation parameters.
# @PARAM: context (Optional[TaskContext]) - Task context for logging with source attribution.
# @PRE: params contains dataset_id, environment_id, and provider_id.
# @POST: Returns generated documentation and updates the dataset in Superset.
# @SIDE_EFFECT: Calls LLM API and updates dataset metadata in Superset.
async def execute(self, params: Dict[str, Any], context: Optional[TaskContext] = None):
with belief_scope("execute", f"plugin_id={self.id}"): with belief_scope("execute", f"plugin_id={self.id}"):
logger.info(f"Executing {self.name} with params: {params}") # Use TaskContext logger if available, otherwise fall back to app logger
log = context.logger if context else logger
# Create sub-loggers for different components
llm_log = log.with_source("llm") if context else log
superset_log = log.with_source("superset_api") if context else log
log.info(f"Executing {self.name} with params: {params}")
dataset_id = params.get("dataset_id") dataset_id = params.get("dataset_id")
env_id = params.get("environment_id") env_id = params.get("environment_id")
@@ -181,24 +287,40 @@ class DocumentationPlugin(PluginBase):
config_mgr = get_config_manager() config_mgr = get_config_manager()
env = config_mgr.get_environment(env_id) env = config_mgr.get_environment(env_id)
if not env: if not env:
log.error(f"Environment {env_id} not found")
raise ValueError(f"Environment {env_id} not found") raise ValueError(f"Environment {env_id} not found")
# 2. Get LLM Provider # 2. Get LLM Provider
llm_service = LLMProviderService(db) llm_service = LLMProviderService(db)
db_provider = llm_service.get_provider(provider_id) db_provider = llm_service.get_provider(provider_id)
if not db_provider: if not db_provider:
log.error(f"LLM Provider {provider_id} not found")
raise ValueError(f"LLM Provider {provider_id} not found") raise ValueError(f"LLM Provider {provider_id} not found")
llm_log.debug("Retrieved provider config:")
llm_log.debug(f" Provider ID: {db_provider.id}")
llm_log.debug(f" Provider Name: {db_provider.name}")
llm_log.debug(f" Provider Type: {db_provider.provider_type}")
llm_log.debug(f" Base URL: {db_provider.base_url}")
llm_log.debug(f" Default Model: {db_provider.default_model}")
api_key = llm_service.get_decrypted_api_key(provider_id) api_key = llm_service.get_decrypted_api_key(provider_id)
llm_log.debug(f"API Key decrypted (first 8 chars): {api_key[:8] if api_key and len(api_key) > 8 else 'EMPTY_OR_NONE'}...")
# Check if API key was successfully decrypted
if not api_key:
raise ValueError(
f"Failed to decrypt API key for provider {provider_id}. "
f"The provider may have been encrypted with a different encryption key. "
f"Please update the provider with a new API key through the UI."
)
# 3. Fetch Metadata (US2 / T024) # 3. Fetch Metadata (US2 / T024)
from ...core.superset_client import SupersetClient from ...core.superset_client import SupersetClient
client = SupersetClient(env) client = SupersetClient(env)
# Optimistic locking check (T045) superset_log.debug(f"Fetching dataset {dataset_id}")
dataset = client.get_dataset(int(dataset_id)) dataset = client.get_dataset(int(dataset_id))
# dataset structure might vary, ensure we get the right field
original_changed_on = dataset.get("changed_on_utc") or dataset.get("result", {}).get("changed_on_utc")
# Extract columns and existing descriptions # Extract columns and existing descriptions
columns_data = [] columns_data = []
@@ -208,6 +330,7 @@ class DocumentationPlugin(PluginBase):
"type": col.get("type"), "type": col.get("type"),
"description": col.get("description") "description": col.get("description")
}) })
superset_log.debug(f"Extracted {len(columns_data)} columns from dataset")
# 4. Construct Prompt & Analyze (US2 / T025) # 4. Construct Prompt & Analyze (US2 / T025)
llm_client = LLMClient( llm_client = LLMClient(
@@ -235,18 +358,10 @@ class DocumentationPlugin(PluginBase):
""" """
# Using a generic chat completion for text-only US2 # Using a generic chat completion for text-only US2
response = await llm_client.client.chat.completions.create( llm_log.info(f"Generating documentation for dataset {dataset_id}")
model=db_provider.default_model, doc_result = await llm_client.get_json_completion([{"role": "user", "content": prompt}])
messages=[{"role": "user", "content": prompt}],
response_format={"type": "json_object"}
)
import json
doc_result = json.loads(response.choices[0].message.content)
# 5. Update Metadata (US2 / T026) # 5. Update Metadata (US2 / T026)
# This part normally goes to mapping_service, but we implement the logic here for the plugin flow
# We'll update the dataset in Superset
update_payload = { update_payload = {
"description": doc_result["dataset_description"], "description": doc_result["dataset_description"],
"columns": [] "columns": []
@@ -261,12 +376,16 @@ class DocumentationPlugin(PluginBase):
"description": col_doc["description"] "description": col_doc["description"]
}) })
superset_log.info(f"Updating dataset {dataset_id} with generated documentation")
client.update_dataset(int(dataset_id), update_payload) client.update_dataset(int(dataset_id), update_payload)
log.info(f"Documentation completed for dataset {dataset_id}")
return doc_result return doc_result
finally: finally:
db.close() db.close()
# [/DEF:DocumentationPlugin.execute:Function]
# [/DEF:DocumentationPlugin:Class] # [/DEF:DocumentationPlugin:Class]
# [/DEF:backend.src.plugins.llm_analysis.plugin:Module] # [/DEF:backend/src/plugins/llm_analysis/plugin.py:Module]

View File

@@ -14,6 +14,7 @@ from ...core.logger import belief_scope, logger
# @PARAM: dashboard_id (str) - ID of the dashboard to validate. # @PARAM: dashboard_id (str) - ID of the dashboard to validate.
# @PARAM: cron_expression (str) - Standard cron expression for scheduling. # @PARAM: cron_expression (str) - Standard cron expression for scheduling.
# @PARAM: params (Dict[str, Any]) - Task parameters (environment_id, provider_id). # @PARAM: params (Dict[str, Any]) - Task parameters (environment_id, provider_id).
# @SIDE_EFFECT: Adds a job to the scheduler service.
def schedule_dashboard_validation(dashboard_id: str, cron_expression: str, params: Dict[str, Any]): def schedule_dashboard_validation(dashboard_id: str, cron_expression: str, params: Dict[str, Any]):
with belief_scope("schedule_dashboard_validation", f"dashboard_id={dashboard_id}"): with belief_scope("schedule_dashboard_validation", f"dashboard_id={dashboard_id}"):
scheduler = get_scheduler_service() scheduler = get_scheduler_service()
@@ -38,7 +39,12 @@ def schedule_dashboard_validation(dashboard_id: str, cron_expression: str, param
**_parse_cron(cron_expression) **_parse_cron(cron_expression)
) )
logger.info(f"Scheduled validation for dashboard {dashboard_id} with cron {cron_expression}") logger.info(f"Scheduled validation for dashboard {dashboard_id} with cron {cron_expression}")
# [/DEF:schedule_dashboard_validation:Function]
# [DEF:_parse_cron:Function]
# @PURPOSE: Basic cron parser placeholder.
# @PARAM: cron (str) - Cron expression.
# @RETURN: Dict[str, str] - Parsed cron parts.
def _parse_cron(cron: str) -> Dict[str, str]: def _parse_cron(cron: str) -> Dict[str, str]:
# Basic cron parser placeholder # Basic cron parser placeholder
parts = cron.split() parts = cron.split()
@@ -51,6 +57,6 @@ def _parse_cron(cron: str) -> Dict[str, str]:
"month": parts[3], "month": parts[3],
"day_of_week": parts[4] "day_of_week": parts[4]
} }
# [/DEF:schedule_dashboard_validation:Function] # [/DEF:_parse_cron:Function]
# [/DEF:backend/src/plugins/llm_analysis/scheduler.py] # [/DEF:backend/src/plugins/llm_analysis/scheduler.py:Module]

View File

@@ -1,4 +1,4 @@
# [DEF:backend.src.plugins.llm_analysis.service:Module] # [DEF:backend/src/plugins/llm_analysis/service.py:Module]
# @TIER: STANDARD # @TIER: STANDARD
# @SEMANTICS: service, llm, screenshot, playwright, openai # @SEMANTICS: service, llm, screenshot, playwright, openai
# @PURPOSE: Services for LLM interaction and dashboard screenshots. # @PURPOSE: Services for LLM interaction and dashboard screenshots.
@@ -6,155 +6,580 @@
# @RELATION: DEPENDS_ON -> playwright # @RELATION: DEPENDS_ON -> playwright
# @RELATION: DEPENDS_ON -> openai # @RELATION: DEPENDS_ON -> openai
# @RELATION: DEPENDS_ON -> tenacity # @RELATION: DEPENDS_ON -> tenacity
# @INVARIANT: Screenshots must be 1920px width and capture full page height.
import asyncio import asyncio
from typing import List, Optional, Dict, Any import base64
import json
import io
from typing import List, Dict, Any
from PIL import Image
from playwright.async_api import async_playwright from playwright.async_api import async_playwright
from openai import AsyncOpenAI, RateLimitError from openai import AsyncOpenAI, RateLimitError, AuthenticationError as OpenAIAuthenticationError
from tenacity import retry, stop_after_attempt, wait_exponential, retry_if_exception_type from tenacity import retry, stop_after_attempt, wait_exponential, retry_if_exception
from .models import LLMProviderType, ValidationResult, ValidationStatus, DetectedIssue from .models import LLMProviderType
from ...core.logger import belief_scope, logger from ...core.logger import belief_scope, logger
from ...core.config_models import Environment from ...core.config_models import Environment
# [DEF:ScreenshotService:Class] # [DEF:ScreenshotService:Class]
# @PURPOSE: Handles capturing screenshots of Superset dashboards. # @PURPOSE: Handles capturing screenshots of Superset dashboards.
class ScreenshotService: class ScreenshotService:
# [DEF:ScreenshotService.__init__:Function]
# @PURPOSE: Initializes the ScreenshotService with environment configuration.
# @PRE: env is a valid Environment object. # @PRE: env is a valid Environment object.
def __init__(self, env: Environment): def __init__(self, env: Environment):
self.env = env self.env = env
# [/DEF:ScreenshotService.__init__:Function]
# [DEF:capture_dashboard:Function] # [DEF:ScreenshotService.capture_dashboard:Function]
# @PURPOSE: Captures a screenshot of a dashboard using Playwright. # @PURPOSE: Captures a full-page screenshot of a dashboard using Playwright and CDP.
# @PARAM: dashboard_id (str) - ID of the dashboard. # @PRE: dashboard_id is a valid string, output_path is a writable path.
# @PARAM: output_path (str) - Path to save the screenshot. # @POST: Returns True if screenshot is saved successfully.
# @RETURN: bool - True if successful. # @SIDE_EFFECT: Launches a browser, performs UI login, switches tabs, and writes a PNG file.
# @UX_STATE: [Navigating] -> Loading dashboard UI
# @UX_STATE: [TabSwitching] -> Iterating through dashboard tabs to trigger lazy loading
# @UX_STATE: [CalculatingHeight] -> Determining dashboard dimensions
# @UX_STATE: [Capturing] -> Executing CDP screenshot
async def capture_dashboard(self, dashboard_id: str, output_path: str) -> bool: async def capture_dashboard(self, dashboard_id: str, output_path: str) -> bool:
with belief_scope("capture_dashboard", f"dashboard_id={dashboard_id}"): with belief_scope("capture_dashboard", f"dashboard_id={dashboard_id}"):
logger.info(f"Capturing screenshot for dashboard {dashboard_id}") logger.info(f"Capturing screenshot for dashboard {dashboard_id}")
async with async_playwright() as p: async with async_playwright() as p:
browser = await p.chromium.launch(headless=True) browser = await p.chromium.launch(
context = await browser.new_context(viewport={'width': 1280, 'height': 720}) headless=True,
page = await context.new_page() args=[
"--disable-blink-features=AutomationControlled",
"--disable-infobars",
"--no-sandbox"
]
)
# Set a realistic user agent to avoid 403 Forbidden from OpenResty/WAF
user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
# Construct base UI URL from environment (strip /api/v1 suffix)
base_ui_url = self.env.url.rstrip("/")
if base_ui_url.endswith("/api/v1"):
base_ui_url = base_ui_url[:-len("/api/v1")]
# 1. Authenticate via API to get tokens # Create browser context with realistic headers
from ...core.superset_client import SupersetClient context = await browser.new_context(
client = SupersetClient(self.env) viewport={'width': 1280, 'height': 720},
try: user_agent=user_agent,
tokens = client.authenticate() extra_http_headers={
access_token = tokens.get("access_token") "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
"Accept-Language": "ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7",
# Set JWT in localStorage if possible, or use as cookie "Upgrade-Insecure-Requests": "1",
# Superset UI uses session cookies, but we can try to set the Authorization header "Sec-Fetch-Dest": "document",
# or inject the token into the session. "Sec-Fetch-Mode": "navigate",
# For now, we'll use the token to set a cookie if we can determine the name, "Sec-Fetch-Site": "none",
# but the most reliable way for Playwright is often still the UI login "Sec-Fetch-User": "?1"
# UNLESS we use the API to set a session cookie.
logger.info("API Authentication successful")
except Exception as e:
logger.warning(f"API Authentication failed: {e}. Falling back to UI login.")
# 2. Navigate to dashboard
dashboard_url = f"{self.env.url}/superset/dashboard/{dashboard_id}/"
logger.info(f"Navigating to {dashboard_url}")
# We still go to the URL first
await page.goto(dashboard_url)
await page.wait_for_load_state("networkidle")
# 3. Check if we are redirected to login
if "/login" in page.url:
logger.info(f"Redirected to login: {page.url}. Filling credentials from Environment.")
# More exhaustive list of selectors for various Superset versions/themes
selectors = {
"username": ['input[name="username"]', 'input#username', 'input[placeholder*="Username"]'],
"password": ['input[name="password"]', 'input#password', 'input[placeholder*="Password"]'],
"submit": ['button[type="submit"]', 'button#submit', '.btn-primary']
} }
)
try: logger.info("Browser context created successfully")
# Find and fill username
u_selector = None
for s in selectors["username"]:
if await page.locator(s).count() > 0:
u_selector = s
break
if not u_selector:
raise RuntimeError("Could not find username input field")
await page.fill(u_selector, self.env.username)
# Find and fill password
p_selector = None
for s in selectors["password"]:
if await page.locator(s).count() > 0:
p_selector = s
break
if not p_selector:
raise RuntimeError("Could not find password input field")
await page.fill(p_selector, self.env.password)
# Click submit
s_selector = selectors["submit"][0]
for s in selectors["submit"]:
if await page.locator(s).count() > 0:
s_selector = s
break
await page.click(s_selector)
await page.wait_for_load_state("networkidle")
# Re-verify we are at the dashboard
if "/login" in page.url:
# Check for error messages on page
error_msg = await page.locator(".alert-danger, .error-message").text_content() if await page.locator(".alert-danger, .error-message").count() > 0 else "Unknown error"
raise RuntimeError(f"Login failed after submission: {error_msg}")
if "/superset/dashboard" not in page.url: page = await context.new_page()
logger.info(f"Redirecting back to dashboard after login: {dashboard_url}") # Bypass navigator.webdriver detection
await page.goto(dashboard_url) await page.add_init_script("delete Object.getPrototypeOf(navigator).webdriver")
await page.wait_for_load_state("networkidle")
# 1. Navigate to login page and authenticate
except Exception as e: login_url = f"{base_ui_url.rstrip('/')}/login/"
page_title = await page.title() logger.info(f"[DEBUG] Navigating to login page: {login_url}")
logger.error(f"UI Login failed. Page title: {page_title}, URL: {page.url}, Error: {str(e)}")
response = await page.goto(login_url, wait_until="networkidle", timeout=60000)
if response:
logger.info(f"[DEBUG] Login page response status: {response.status}")
# Wait for login form to be ready
await page.wait_for_load_state("domcontentloaded")
# More exhaustive list of selectors for various Superset versions/themes
selectors = {
"username": ['input[name="username"]', 'input#username', 'input[placeholder*="Username"]', 'input[type="text"]'],
"password": ['input[name="password"]', 'input#password', 'input[placeholder*="Password"]', 'input[type="password"]'],
"submit": ['button[type="submit"]', 'button#submit', '.btn-primary', 'input[type="submit"]']
}
logger.info("[DEBUG] Attempting to find login form elements...")
try:
# Find and fill username
u_selector = None
for s in selectors["username"]:
count = await page.locator(s).count()
logger.info(f"[DEBUG] Selector '{s}': {count} elements found")
if count > 0:
u_selector = s
break
if not u_selector:
# Log all input fields on the page for debugging
all_inputs = await page.locator('input').all()
logger.info(f"[DEBUG] Found {len(all_inputs)} input fields on page")
for i, inp in enumerate(all_inputs[:5]): # Log first 5
inp_type = await inp.get_attribute('type')
inp_name = await inp.get_attribute('name')
inp_id = await inp.get_attribute('id')
logger.info(f"[DEBUG] Input {i}: type={inp_type}, name={inp_name}, id={inp_id}")
raise RuntimeError("Could not find username input field on login page")
logger.info(f"[DEBUG] Filling username field with selector: {u_selector}")
await page.fill(u_selector, self.env.username)
# Find and fill password
p_selector = None
for s in selectors["password"]:
if await page.locator(s).count() > 0:
p_selector = s
break
if not p_selector:
raise RuntimeError("Could not find password input field on login page")
logger.info(f"[DEBUG] Filling password field with selector: {p_selector}")
await page.fill(p_selector, self.env.password)
# Click submit
s_selector = selectors["submit"][0]
for s in selectors["submit"]:
if await page.locator(s).count() > 0:
s_selector = s
break
logger.info(f"[DEBUG] Clicking submit button with selector: {s_selector}")
await page.click(s_selector)
# Wait for navigation after login
await page.wait_for_load_state("networkidle", timeout=30000)
# Check if login was successful
if "/login" in page.url:
# Check for error messages on page
error_msg = await page.locator(".alert-danger, .error-message").text_content() if await page.locator(".alert-danger, .error-message").count() > 0 else "Unknown error"
logger.error(f"[DEBUG] Login failed. Still on login page. Error: {error_msg}")
debug_path = output_path.replace(".png", "_debug_failed_login.png") debug_path = output_path.replace(".png", "_debug_failed_login.png")
await page.screenshot(path=debug_path) await page.screenshot(path=debug_path)
raise RuntimeError(f"Login failed: {str(e)}. Debug screenshot saved to {debug_path}") raise RuntimeError(f"Login failed: {error_msg}. Debug screenshot saved to {debug_path}")
# Wait a bit more for charts to render
await asyncio.sleep(5) logger.info(f"[DEBUG] Login successful. Current URL: {page.url}")
# Check cookies after successful login
page_cookies = await context.cookies()
logger.info(f"[DEBUG] Cookies after login: {len(page_cookies)}")
for c in page_cookies:
logger.info(f"[DEBUG] Cookie: name={c['name']}, domain={c['domain']}, value={c.get('value', '')[:20]}...")
except Exception as e:
page_title = await page.title()
logger.error(f"UI Login failed. Page title: {page_title}, URL: {page.url}, Error: {str(e)}")
debug_path = output_path.replace(".png", "_debug_failed_login.png")
await page.screenshot(path=debug_path)
raise RuntimeError(f"Login failed: {str(e)}. Debug screenshot saved to {debug_path}")
# 2. Navigate to dashboard
# @UX_STATE: [Navigating] -> Loading dashboard UI
dashboard_url = f"{base_ui_url.rstrip('/')}/superset/dashboard/{dashboard_id}/?standalone=true"
if base_ui_url.startswith("https://") and dashboard_url.startswith("http://"):
dashboard_url = dashboard_url.replace("http://", "https://")
logger.info(f"[DEBUG] Navigating to dashboard: {dashboard_url}")
# Use networkidle to ensure all initial assets are loaded
response = await page.goto(dashboard_url, wait_until="networkidle", timeout=60000)
if response:
logger.info(f"[DEBUG] Dashboard navigation response status: {response.status}, URL: {response.url}")
try:
# Wait for the dashboard grid to be present
await page.wait_for_selector('.dashboard-component, .dashboard-header, [data-test="dashboard-grid"]', timeout=30000)
logger.info("[DEBUG] Dashboard container loaded")
# Wait for charts to finish loading (Superset uses loading spinners/skeletons)
# We wait until loading indicators disappear or a timeout occurs
try:
# Wait for loading indicators to disappear
await page.wait_for_selector('.loading, .ant-skeleton, .spinner', state="hidden", timeout=60000)
logger.info("[DEBUG] Loading indicators hidden")
except Exception:
logger.warning("[DEBUG] Timeout waiting for loading indicators to hide")
# Wait for charts to actually render their content (e.g., ECharts, NVD3)
# We look for common chart containers that should have content
try:
await page.wait_for_selector('.chart-container canvas, .slice_container svg, .superset-chart-canvas, .grid-content .chart-container', timeout=60000)
logger.info("[DEBUG] Chart content detected")
except Exception:
logger.warning("[DEBUG] Timeout waiting for chart content")
# Additional check: wait for all chart containers to have non-empty content
logger.info("[DEBUG] Waiting for all charts to have rendered content...")
await page.wait_for_function("""() => {
const charts = document.querySelectorAll('.chart-container, .slice_container');
if (charts.length === 0) return true; // No charts to wait for
// Check if all charts have rendered content (canvas, svg, or non-empty div)
return Array.from(charts).every(chart => {
const hasCanvas = chart.querySelector('canvas') !== null;
const hasSvg = chart.querySelector('svg') !== null;
const hasContent = chart.innerText.trim().length > 0 || chart.children.length > 0;
return hasCanvas || hasSvg || hasContent;
});
}""", timeout=60000)
logger.info("[DEBUG] All charts have rendered content")
# Scroll to bottom and back to top to trigger lazy loading of all charts
logger.info("[DEBUG] Scrolling to trigger lazy loading...")
await page.evaluate("""async () => {
const delay = ms => new Promise(resolve => setTimeout(resolve, ms));
for (let i = 0; i < document.body.scrollHeight; i += 500) {
window.scrollTo(0, i);
await delay(100);
}
window.scrollTo(0, 0);
await delay(500);
}""")
except Exception as e:
logger.warning(f"[DEBUG] Dashboard content wait failed: {e}, proceeding anyway after delay")
# Final stabilization delay - increased for complex dashboards
logger.info("[DEBUG] Final stabilization delay...")
await asyncio.sleep(15)
# Logic to handle tabs and full-page capture
try:
# 1. Handle Tabs (Recursive switching)
# @UX_STATE: [TabSwitching] -> Iterating through dashboard tabs to trigger lazy loading
processed_tabs = set()
async def switch_tabs(depth=0):
if depth > 3:
return # Limit recursion depth
tab_selectors = [
'.ant-tabs-nav-list .ant-tabs-tab',
'.dashboard-component-tabs .ant-tabs-tab',
'[data-test="dashboard-component-tabs"] .ant-tabs-tab'
]
found_tabs = []
for selector in tab_selectors:
found_tabs = await page.locator(selector).all()
if found_tabs:
break
if found_tabs:
logger.info(f"[DEBUG][TabSwitching] Found {len(found_tabs)} tabs at depth {depth}")
for i, tab in enumerate(found_tabs):
try:
tab_text = (await tab.inner_text()).strip()
tab_id = f"{depth}_{i}_{tab_text}"
if tab_id in processed_tabs:
continue
if await tab.is_visible():
logger.info(f"[DEBUG][TabSwitching] Switching to tab: {tab_text}")
processed_tabs.add(tab_id)
is_active = "ant-tabs-tab-active" in (await tab.get_attribute("class") or "")
if not is_active:
await tab.click()
await asyncio.sleep(2) # Wait for content to render
await switch_tabs(depth + 1)
except Exception as tab_e:
logger.warning(f"[DEBUG][TabSwitching] Failed to process tab {i}: {tab_e}")
try:
first_tab = found_tabs[0]
if "ant-tabs-tab-active" not in (await first_tab.get_attribute("class") or ""):
await first_tab.click()
await asyncio.sleep(1)
except Exception:
pass
await switch_tabs()
# 2. Calculate full height for screenshot
# @UX_STATE: [CalculatingHeight] -> Determining dashboard dimensions
full_height = await page.evaluate("""() => {
const body = document.body;
const html = document.documentElement;
const dashboardContent = document.querySelector('.dashboard-content');
return Math.max(
body.scrollHeight, body.offsetHeight,
html.clientHeight, html.scrollHeight, html.offsetHeight,
dashboardContent ? dashboardContent.scrollHeight + 100 : 0
);
}""")
logger.info(f"[DEBUG] Calculated full height: {full_height}")
# DIAGNOSTIC: Count chart elements before resize
chart_count_before = await page.evaluate("""() => {
return {
chartContainers: document.querySelectorAll('.chart-container, .slice_container').length,
canvasElements: document.querySelectorAll('canvas').length,
svgElements: document.querySelectorAll('.chart-container svg, .slice_container svg').length,
visibleCharts: document.querySelectorAll('.chart-container:visible, .slice_container:visible').length
};
}""")
logger.info(f"[DIAGNOSTIC] Chart elements BEFORE viewport resize: {chart_count_before}")
# DIAGNOSTIC: Capture pre-resize screenshot for comparison
pre_resize_path = output_path.replace(".png", "_preresize.png")
try:
await page.screenshot(path=pre_resize_path, full_page=False, timeout=10000)
import os
pre_resize_size = os.path.getsize(pre_resize_path) if os.path.exists(pre_resize_path) else 0
logger.info(f"[DIAGNOSTIC] Pre-resize screenshot saved: {pre_resize_path} ({pre_resize_size} bytes)")
except Exception as pre_e:
logger.warning(f"[DIAGNOSTIC] Failed to capture pre-resize screenshot: {pre_e}")
logger.info(f"[DIAGNOSTIC] Resizing viewport from current to 1920x{int(full_height)}")
await page.set_viewport_size({"width": 1920, "height": int(full_height)})
# DIAGNOSTIC: Increased wait time and log timing
logger.info("[DIAGNOSTIC] Waiting 10 seconds after viewport resize for re-render...")
await asyncio.sleep(10)
logger.info("[DIAGNOSTIC] Wait completed")
# DIAGNOSTIC: Count chart elements after resize and wait
chart_count_after = await page.evaluate("""() => {
return {
chartContainers: document.querySelectorAll('.chart-container, .slice_container').length,
canvasElements: document.querySelectorAll('canvas').length,
svgElements: document.querySelectorAll('.chart-container svg, .slice_container svg').length,
visibleCharts: document.querySelectorAll('.chart-container:visible, .slice_container:visible').length
};
}""")
logger.info(f"[DIAGNOSTIC] Chart elements AFTER viewport resize + wait: {chart_count_after}")
# DIAGNOSTIC: Check if any charts have error states
chart_errors = await page.evaluate("""() => {
const errors = [];
document.querySelectorAll('.chart-container, .slice_container').forEach((chart, i) => {
const errorEl = chart.querySelector('.error, .alert-danger, .ant-alert-error');
if (errorEl) {
errors.push({index: i, text: errorEl.innerText.substring(0, 100)});
}
});
return errors;
}""")
if chart_errors:
logger.warning(f"[DIAGNOSTIC] Charts with error states detected: {chart_errors}")
else:
logger.info("[DIAGNOSTIC] No chart error states detected")
# 3. Take screenshot using CDP to bypass Playwright's font loading wait
# @UX_STATE: [Capturing] -> Executing CDP screenshot
logger.info("[DEBUG] Attempting full-page screenshot via CDP...")
cdp = await page.context.new_cdp_session(page)
screenshot_data = await cdp.send("Page.captureScreenshot", {
"format": "png",
"fromSurface": True,
"captureBeyondViewport": True
})
image_data = base64.b64decode(screenshot_data["data"])
with open(output_path, 'wb') as f:
f.write(image_data)
# DIAGNOSTIC: Verify screenshot file
import os
final_size = os.path.getsize(output_path) if os.path.exists(output_path) else 0
logger.info(f"[DIAGNOSTIC] Final screenshot saved: {output_path}")
logger.info(f"[DIAGNOSTIC] Final screenshot size: {final_size} bytes ({final_size / 1024:.2f} KB)")
# DIAGNOSTIC: Get image dimensions
try:
with Image.open(output_path) as final_img:
logger.info(f"[DIAGNOSTIC] Final screenshot dimensions: {final_img.width}x{final_img.height}")
except Exception as img_err:
logger.warning(f"[DIAGNOSTIC] Could not read final image dimensions: {img_err}")
logger.info(f"Full-page screenshot saved to {output_path} (via CDP)")
except Exception as e:
logger.error(f"[DEBUG] Full-page/Tab capture failed: {e}")
try:
await page.screenshot(path=output_path, full_page=True, timeout=10000)
except Exception as e2:
logger.error(f"[DEBUG] Fallback screenshot also failed: {e2}")
await page.screenshot(path=output_path, timeout=5000)
await page.screenshot(path=output_path, full_page=True)
await browser.close() await browser.close()
logger.info(f"Screenshot saved to {output_path}")
return True return True
# [/DEF:ScreenshotService.capture_dashboard:Function]
# [/DEF:ScreenshotService:Class] # [/DEF:ScreenshotService:Class]
# [DEF:LLMClient:Class] # [DEF:LLMClient:Class]
# @PURPOSE: Wrapper for LLM provider APIs. # @PURPOSE: Wrapper for LLM provider APIs.
class LLMClient: class LLMClient:
# [DEF:LLMClient.__init__:Function]
# @PURPOSE: Initializes the LLMClient with provider settings.
# @PRE: api_key, base_url, and default_model are non-empty strings.
def __init__(self, provider_type: LLMProviderType, api_key: str, base_url: str, default_model: str): def __init__(self, provider_type: LLMProviderType, api_key: str, base_url: str, default_model: str):
self.provider_type = provider_type self.provider_type = provider_type
self.api_key = api_key self.api_key = api_key
self.base_url = base_url self.base_url = base_url
self.default_model = default_model self.default_model = default_model
# DEBUG: Log initialization parameters (without exposing full API key)
logger.info("[LLMClient.__init__] Initializing LLM client:")
logger.info(f"[LLMClient.__init__] Provider Type: {provider_type}")
logger.info(f"[LLMClient.__init__] Base URL: {base_url}")
logger.info(f"[LLMClient.__init__] Default Model: {default_model}")
logger.info(f"[LLMClient.__init__] API Key (first 8 chars): {api_key[:8] if api_key and len(api_key) > 8 else 'EMPTY_OR_NONE'}...")
logger.info(f"[LLMClient.__init__] API Key Length: {len(api_key) if api_key else 0}")
self.client = AsyncOpenAI(api_key=api_key, base_url=base_url) self.client = AsyncOpenAI(api_key=api_key, base_url=base_url)
# [/DEF:LLMClient.__init__:Function]
# [DEF:analyze_dashboard:Function] # [DEF:LLMClient.get_json_completion:Function]
# @PURPOSE: Sends dashboard data to LLM for analysis. # @PURPOSE: Helper to handle LLM calls with JSON mode and fallback parsing.
# @PRE: messages is a list of valid message dictionaries.
# @POST: Returns a parsed JSON dictionary.
# @SIDE_EFFECT: Calls external LLM API.
def _should_retry(exception: Exception) -> bool:
"""Custom retry predicate that excludes authentication errors."""
# Don't retry on authentication errors
if isinstance(exception, OpenAIAuthenticationError):
return False
# Retry on rate limit errors and other exceptions
return isinstance(exception, (RateLimitError, Exception))
@retry( @retry(
stop=stop_after_attempt(5), stop=stop_after_attempt(5),
wait=wait_exponential(multiplier=2, min=5, max=60), wait=wait_exponential(multiplier=2, min=5, max=60),
retry=retry_if_exception_type((Exception, RateLimitError)) retry=retry_if_exception(_should_retry),
reraise=True
) )
async def get_json_completion(self, messages: List[Dict[str, Any]]) -> Dict[str, Any]:
with belief_scope("get_json_completion"):
response = None
try:
try:
logger.info(f"[get_json_completion] Attempting LLM call with JSON mode for model: {self.default_model}")
logger.info(f"[get_json_completion] Base URL being used: {self.base_url}")
logger.info(f"[get_json_completion] Number of messages: {len(messages)}")
logger.info(f"[get_json_completion] API Key present: {bool(self.api_key and len(self.api_key) > 0)}")
response = await self.client.chat.completions.create(
model=self.default_model,
messages=messages,
response_format={"type": "json_object"}
)
except Exception as e:
if "JSON mode is not enabled" in str(e) or "400" in str(e):
logger.warning(f"[get_json_completion] JSON mode failed or not supported: {str(e)}. Falling back to plain text response.")
response = await self.client.chat.completions.create(
model=self.default_model,
messages=messages
)
else:
raise e
logger.debug(f"[get_json_completion] LLM Response: {response}")
except OpenAIAuthenticationError as e:
logger.error(f"[get_json_completion] Authentication error: {str(e)}")
# Do not retry on auth errors - re-raise to stop retry
raise
except RateLimitError as e:
logger.warning(f"[get_json_completion] Rate limit hit: {str(e)}")
# Extract retry_delay from error metadata if available
retry_delay = 5.0 # Default fallback
try:
# Based on logs, the raw response is in e.body or e.response.json()
# The logs show 'metadata': {'raw': '...'} which suggests a proxy or specific client wrapper
# Let's try to find the 'retryDelay' in the error message or response
import re
# Try to find "retryDelay": "XXs" in the string representation of the error
error_str = str(e)
match = re.search(r'"retryDelay":\s*"(\d+)s"', error_str)
if match:
retry_delay = float(match.group(1))
else:
# Try to parse from response if it's a standard OpenAI-like error with body
if hasattr(e, 'body') and isinstance(e.body, dict):
# Some providers put it in details
details = e.body.get('error', {}).get('details', [])
for detail in details:
if detail.get('@type') == 'type.googleapis.com/google.rpc.RetryInfo':
delay_str = detail.get('retryDelay', '5s')
retry_delay = float(delay_str.rstrip('s'))
break
except Exception as parse_e:
logger.debug(f"[get_json_completion] Failed to parse retry delay: {parse_e}")
# Add a small safety margin (0.5s) as requested
wait_time = retry_delay + 0.5
logger.info(f"[get_json_completion] Waiting for {wait_time}s before retry...")
await asyncio.sleep(wait_time)
raise
except Exception as e:
logger.error(f"[get_json_completion] LLM call failed: {str(e)}")
raise
if not response or not hasattr(response, 'choices') or not response.choices:
raise RuntimeError(f"Invalid LLM response: {response}")
content = response.choices[0].message.content
logger.debug(f"[get_json_completion] Raw content to parse: {content}")
try:
return json.loads(content)
except json.JSONDecodeError:
logger.warning("[get_json_completion] Failed to parse JSON directly, attempting to extract from code blocks")
if "```json" in content:
json_str = content.split("```json")[1].split("```")[0].strip()
return json.loads(json_str)
elif "```" in content:
json_str = content.split("```")[1].split("```")[0].strip()
return json.loads(json_str)
else:
raise
# [/DEF:LLMClient.get_json_completion:Function]
# [DEF:LLMClient.analyze_dashboard:Function]
# @PURPOSE: Sends dashboard data (screenshot + logs) to LLM for health analysis.
# @PRE: screenshot_path exists, logs is a list of strings.
# @POST: Returns a structured analysis dictionary (status, summary, issues).
# @SIDE_EFFECT: Reads screenshot file and calls external LLM API.
async def analyze_dashboard(self, screenshot_path: str, logs: List[str]) -> Dict[str, Any]: async def analyze_dashboard(self, screenshot_path: str, logs: List[str]) -> Dict[str, Any]:
with belief_scope("analyze_dashboard"): with belief_scope("analyze_dashboard"):
import base64 # Optimize image to reduce token count (US1 / T023)
with open(screenshot_path, "rb") as image_file: # Gemini/Gemma models have limits on input tokens, and large images contribute significantly.
base64_image = base64.b64encode(image_file.read()).decode('utf-8') try:
with Image.open(screenshot_path) as img:
# Convert to RGB if necessary
if img.mode in ("RGBA", "P"):
img = img.convert("RGB")
# Resize if too large (max 1024px width while maintaining aspect ratio)
# We reduce width further to 1024px to stay within token limits for long dashboards
max_width = 1024
if img.width > max_width or img.height > 2048:
# Calculate scaling factor to fit within 1024x2048
scale = min(max_width / img.width, 2048 / img.height)
if scale < 1.0:
new_width = int(img.width * scale)
new_height = int(img.height * scale)
img = img.resize((new_width, new_height), Image.Resampling.LANCZOS)
logger.info(f"[analyze_dashboard] Resized image from {img.width}x{img.height} to {new_width}x{new_height}")
# Compress and convert to base64
buffer = io.BytesIO()
# Lower quality to 60% to further reduce payload size
img.save(buffer, format="JPEG", quality=60, optimize=True)
base_64_image = base64.b64encode(buffer.getvalue()).decode('utf-8')
logger.info(f"[analyze_dashboard] Optimized image size: {len(buffer.getvalue()) / 1024:.2f} KB")
except Exception as img_e:
logger.warning(f"[analyze_dashboard] Image optimization failed: {img_e}. Using raw image.")
with open(screenshot_path, "rb") as image_file:
base_64_image = base64.b64encode(image_file.read()).decode('utf-8')
log_text = "\n".join(logs) log_text = "\n".join(logs)
prompt = f""" prompt = f"""
@@ -177,48 +602,31 @@ class LLMClient:
}} }}
""" """
logger.debug(f"[analyze_dashboard] Calling LLM with model: {self.default_model}") messages = [
try: {
response = await self.client.chat.completions.create( "role": "user",
model=self.default_model, "content": [
messages=[ {"type": "text", "text": prompt},
{ {
"role": "user", "type": "image_url",
"content": [ "image_url": {
{"type": "text", "text": prompt}, "url": f"data:image/jpeg;base64,{base_64_image}"
{ }
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{base64_image}"
}
}
]
} }
], ]
response_format={"type": "json_object"} }
) ]
logger.debug(f"[analyze_dashboard] LLM Response: {response}")
except RateLimitError as e:
logger.warning(f"[analyze_dashboard] Rate limit hit: {str(e)}")
raise # tenacity will handle retry
except Exception as e:
logger.error(f"[analyze_dashboard] LLM call failed: {str(e)}")
raise
if not response or not hasattr(response, 'choices') or not response.choices: try:
error_info = getattr(response, 'error', 'No choices in response') return await self.get_json_completion(messages)
logger.error(f"[analyze_dashboard] Invalid LLM response. Error info: {error_info}") except Exception as e:
logger.error(f"[analyze_dashboard] Failed to get analysis: {str(e)}")
return { return {
"status": "FAIL", "status": "FAIL",
"summary": f"Failed to get response from LLM: {error_info}", "summary": f"Failed to get response from LLM: {str(e)}",
"issues": [{"severity": "FAIL", "message": "LLM provider returned empty or invalid response"}] "issues": [{"severity": "FAIL", "message": "LLM provider returned empty or invalid response"}]
} }
# [/DEF:LLMClient.analyze_dashboard:Function]
import json
result = json.loads(response.choices[0].message.content)
return result
# [/DEF:analyze_dashboard:Function]
# [/DEF:LLMClient:Class] # [/DEF:LLMClient:Class]
# [/DEF:backend.src.plugins.llm_analysis.service:Module] # [/DEF:backend/src/plugins/llm_analysis/service.py:Module]

View File

@@ -3,6 +3,7 @@
# @PURPOSE: Implements a plugin for mapping dataset columns using external database connections or Excel files. # @PURPOSE: Implements a plugin for mapping dataset columns using external database connections or Excel files.
# @LAYER: Plugins # @LAYER: Plugins
# @RELATION: Inherits from PluginBase. Uses DatasetMapper from superset_tool. # @RELATION: Inherits from PluginBase. Uses DatasetMapper from superset_tool.
# @RELATION: USES -> TaskContext
# @CONSTRAINT: Must use belief_scope for logging. # @CONSTRAINT: Must use belief_scope for logging.
# [SECTION: IMPORTS] # [SECTION: IMPORTS]
@@ -13,6 +14,7 @@ from ..core.logger import logger, belief_scope
from ..core.database import SessionLocal from ..core.database import SessionLocal
from ..models.connection import ConnectionConfig from ..models.connection import ConnectionConfig
from ..core.utils.dataset_mapper import DatasetMapper from ..core.utils.dataset_mapper import DatasetMapper
from ..core.task_manager.context import TaskContext
# [/SECTION] # [/SECTION]
# [DEF:MapperPlugin:Class] # [DEF:MapperPlugin:Class]
@@ -128,19 +130,27 @@ class MapperPlugin(PluginBase):
# [/DEF:get_schema:Function] # [/DEF:get_schema:Function]
# [DEF:execute:Function] # [DEF:execute:Function]
# @PURPOSE: Executes the dataset mapping logic. # @PURPOSE: Executes the dataset mapping logic with TaskContext support.
# @PARAM: params (Dict[str, Any]) - Mapping parameters. # @PARAM: params (Dict[str, Any]) - Mapping parameters.
# @PARAM: context (Optional[TaskContext]) - Task context for logging with source attribution.
# @PRE: Params contain valid 'env', 'dataset_id', and 'source'. params must be a dictionary. # @PRE: Params contain valid 'env', 'dataset_id', and 'source'. params must be a dictionary.
# @POST: Updates the dataset in Superset. # @POST: Updates the dataset in Superset.
# @RETURN: Dict[str, Any] - Execution status. # @RETURN: Dict[str, Any] - Execution status.
async def execute(self, params: Dict[str, Any]) -> Dict[str, Any]: async def execute(self, params: Dict[str, Any], context: Optional[TaskContext] = None) -> Dict[str, Any]:
with belief_scope("execute"): with belief_scope("execute"):
env_name = params.get("env") env_name = params.get("env")
dataset_id = params.get("dataset_id") dataset_id = params.get("dataset_id")
source = params.get("source") source = params.get("source")
# Use TaskContext logger if available, otherwise fall back to app logger
log = context.logger if context else logger
# Create sub-loggers for different components
superset_log = log.with_source("superset_api") if context else log
db_log = log.with_source("postgres") if context else log
if not env_name or dataset_id is None or not source: if not env_name or dataset_id is None or not source:
logger.error("[MapperPlugin.execute][State] Missing required parameters.") log.error("Missing required parameters: env, dataset_id, source")
raise ValueError("Missing required parameters: env, dataset_id, source") raise ValueError("Missing required parameters: env, dataset_id, source")
# Get config and initialize client # Get config and initialize client
@@ -148,7 +158,7 @@ class MapperPlugin(PluginBase):
config_manager = get_config_manager() config_manager = get_config_manager()
env_config = config_manager.get_environment(env_name) env_config = config_manager.get_environment(env_name)
if not env_config: if not env_config:
logger.error(f"[MapperPlugin.execute][State] Environment '{env_name}' not found.") log.error(f"Environment '{env_name}' not found in configuration.")
raise ValueError(f"Environment '{env_name}' not found in configuration.") raise ValueError(f"Environment '{env_name}' not found in configuration.")
client = SupersetClient(env_config) client = SupersetClient(env_config)
@@ -158,7 +168,7 @@ class MapperPlugin(PluginBase):
if source == "postgres": if source == "postgres":
connection_id = params.get("connection_id") connection_id = params.get("connection_id")
if not connection_id: if not connection_id:
logger.error("[MapperPlugin.execute][State] connection_id is required for postgres source.") log.error("connection_id is required for postgres source.")
raise ValueError("connection_id is required for postgres source.") raise ValueError("connection_id is required for postgres source.")
# Load connection from DB # Load connection from DB
@@ -166,7 +176,7 @@ class MapperPlugin(PluginBase):
try: try:
conn_config = db.query(ConnectionConfig).filter(ConnectionConfig.id == connection_id).first() conn_config = db.query(ConnectionConfig).filter(ConnectionConfig.id == connection_id).first()
if not conn_config: if not conn_config:
logger.error(f"[MapperPlugin.execute][State] Connection {connection_id} not found.") db_log.error(f"Connection {connection_id} not found.")
raise ValueError(f"Connection {connection_id} not found.") raise ValueError(f"Connection {connection_id} not found.")
postgres_config = { postgres_config = {
@@ -176,10 +186,11 @@ class MapperPlugin(PluginBase):
'host': conn_config.host, 'host': conn_config.host,
'port': str(conn_config.port) if conn_config.port else '5432' 'port': str(conn_config.port) if conn_config.port else '5432'
} }
db_log.debug(f"Loaded connection config for {conn_config.host}:{conn_config.port}/{conn_config.database}")
finally: finally:
db.close() db.close()
logger.info(f"[MapperPlugin.execute][Action] Starting mapping for dataset {dataset_id} in {env_name}") log.info(f"Starting mapping for dataset {dataset_id} in {env_name}")
mapper = DatasetMapper() mapper = DatasetMapper()
@@ -193,10 +204,10 @@ class MapperPlugin(PluginBase):
table_name=params.get("table_name"), table_name=params.get("table_name"),
table_schema=params.get("table_schema") or "public" table_schema=params.get("table_schema") or "public"
) )
logger.info(f"[MapperPlugin.execute][Success] Mapping completed for dataset {dataset_id}") superset_log.info(f"Mapping completed for dataset {dataset_id}")
return {"status": "success", "dataset_id": dataset_id} return {"status": "success", "dataset_id": dataset_id}
except Exception as e: except Exception as e:
logger.error(f"[MapperPlugin.execute][Failure] Mapping failed: {e}") log.error(f"Mapping failed: {e}")
raise raise
# [/DEF:execute:Function] # [/DEF:execute:Function]

View File

@@ -5,20 +5,20 @@
# @RELATION: IMPLEMENTS -> PluginBase # @RELATION: IMPLEMENTS -> PluginBase
# @RELATION: DEPENDS_ON -> superset_tool.client # @RELATION: DEPENDS_ON -> superset_tool.client
# @RELATION: DEPENDS_ON -> superset_tool.utils # @RELATION: DEPENDS_ON -> superset_tool.utils
# @RELATION: USES -> TaskContext
from typing import Dict, Any, List from typing import Dict, Any, Optional
from pathlib import Path
import zipfile
import re import re
from ..core.plugin_base import PluginBase from ..core.plugin_base import PluginBase
from ..core.logger import belief_scope from ..core.logger import belief_scope, logger as app_logger
from ..core.superset_client import SupersetClient from ..core.superset_client import SupersetClient
from ..core.utils.fileio import create_temp_file, update_yamls, create_dashboard_export from ..core.utils.fileio import create_temp_file
from ..dependencies import get_config_manager from ..dependencies import get_config_manager
from ..core.migration_engine import MigrationEngine from ..core.migration_engine import MigrationEngine
from ..core.database import SessionLocal from ..core.database import SessionLocal
from ..models.mapping import DatabaseMapping, Environment from ..models.mapping import DatabaseMapping, Environment
from ..core.task_manager.context import TaskContext
# [DEF:MigrationPlugin:Class] # [DEF:MigrationPlugin:Class]
# @PURPOSE: Implementation of the migration plugin logic. # @PURPOSE: Implementation of the migration plugin logic.
@@ -132,11 +132,12 @@ class MigrationPlugin(PluginBase):
# [/DEF:get_schema:Function] # [/DEF:get_schema:Function]
# [DEF:execute:Function] # [DEF:execute:Function]
# @PURPOSE: Executes the dashboard migration logic. # @PURPOSE: Executes the dashboard migration logic with TaskContext support.
# @PARAM: params (Dict[str, Any]) - Migration parameters. # @PARAM: params (Dict[str, Any]) - Migration parameters.
# @PARAM: context (Optional[TaskContext]) - Task context for logging with source attribution.
# @PRE: Source and target environments must be configured. # @PRE: Source and target environments must be configured.
# @POST: Selected dashboards are migrated. # @POST: Selected dashboards are migrated.
async def execute(self, params: Dict[str, Any]): async def execute(self, params: Dict[str, Any], context: Optional[TaskContext] = None):
with belief_scope("MigrationPlugin.execute"): with belief_scope("MigrationPlugin.execute"):
source_env_id = params.get("source_env_id") source_env_id = params.get("source_env_id")
target_env_id = params.get("target_env_id") target_env_id = params.get("target_env_id")
@@ -148,8 +149,8 @@ class MigrationPlugin(PluginBase):
dashboard_regex = params.get("dashboard_regex") dashboard_regex = params.get("dashboard_regex")
replace_db_config = params.get("replace_db_config", False) replace_db_config = params.get("replace_db_config", False)
from_db_id = params.get("from_db_id") params.get("from_db_id")
to_db_id = params.get("to_db_id") params.get("to_db_id")
# [DEF:MigrationPlugin.execute:Action] # [DEF:MigrationPlugin.execute:Action]
# @PURPOSE: Execute the migration logic with proper task logging. # @PURPOSE: Execute the migration logic with proper task logging.
@@ -157,74 +158,15 @@ class MigrationPlugin(PluginBase):
from ..dependencies import get_task_manager from ..dependencies import get_task_manager
tm = get_task_manager() tm = get_task_manager()
class TaskLoggerProxy: # Use TaskContext logger if available, otherwise fall back to app_logger
# [DEF:__init__:Function] log = context.logger if context else app_logger
# @PURPOSE: Initializes the proxy logger.
# @PRE: None. # Create sub-loggers for different components
# @POST: Instance is initialized. superset_log = log.with_source("superset_api") if context else log
def __init__(self): migration_log = log.with_source("migration") if context else log
with belief_scope("__init__"):
# Initialize parent with dummy values since we override methods log.info("Starting migration task.")
pass log.debug(f"Params: {params}")
# [/DEF:__init__:Function]
# [DEF:debug:Function]
# @PURPOSE: Logs a debug message to the task manager.
# @PRE: msg is a string.
# @POST: Log is added to task manager if task_id exists.
def debug(self, msg, *args, extra=None, **kwargs):
with belief_scope("debug"):
if task_id: tm._add_log(task_id, "DEBUG", msg, extra or {})
# [/DEF:debug:Function]
# [DEF:info:Function]
# @PURPOSE: Logs an info message to the task manager.
# @PRE: msg is a string.
# @POST: Log is added to task manager if task_id exists.
def info(self, msg, *args, extra=None, **kwargs):
with belief_scope("info"):
if task_id: tm._add_log(task_id, "INFO", msg, extra or {})
# [/DEF:info:Function]
# [DEF:warning:Function]
# @PURPOSE: Logs a warning message to the task manager.
# @PRE: msg is a string.
# @POST: Log is added to task manager if task_id exists.
def warning(self, msg, *args, extra=None, **kwargs):
with belief_scope("warning"):
if task_id: tm._add_log(task_id, "WARNING", msg, extra or {})
# [/DEF:warning:Function]
# [DEF:error:Function]
# @PURPOSE: Logs an error message to the task manager.
# @PRE: msg is a string.
# @POST: Log is added to task manager if task_id exists.
def error(self, msg, *args, extra=None, **kwargs):
with belief_scope("error"):
if task_id: tm._add_log(task_id, "ERROR", msg, extra or {})
# [/DEF:error:Function]
# [DEF:critical:Function]
# @PURPOSE: Logs a critical message to the task manager.
# @PRE: msg is a string.
# @POST: Log is added to task manager if task_id exists.
def critical(self, msg, *args, extra=None, **kwargs):
with belief_scope("critical"):
if task_id: tm._add_log(task_id, "ERROR", msg, extra or {})
# [/DEF:critical:Function]
# [DEF:exception:Function]
# @PURPOSE: Logs an exception message to the task manager.
# @PRE: msg is a string.
# @POST: Log is added to task manager if task_id exists.
def exception(self, msg, *args, **kwargs):
with belief_scope("exception"):
if task_id: tm._add_log(task_id, "ERROR", msg, {"exception": True})
# [/DEF:exception:Function]
logger = TaskLoggerProxy()
logger.info(f"[MigrationPlugin][Entry] Starting migration task.")
logger.info(f"[MigrationPlugin][Action] Params: {params}")
try: try:
with belief_scope("execute"): with belief_scope("execute"):
@@ -251,7 +193,7 @@ class MigrationPlugin(PluginBase):
from_env_name = src_env.name from_env_name = src_env.name
to_env_name = tgt_env.name to_env_name = tgt_env.name
logger.info(f"[MigrationPlugin][State] Resolved environments: {from_env_name} -> {to_env_name}") log.info(f"Resolved environments: {from_env_name} -> {to_env_name}")
from_c = SupersetClient(src_env) from_c = SupersetClient(src_env)
to_c = SupersetClient(tgt_env) to_c = SupersetClient(tgt_env)
@@ -270,29 +212,36 @@ class MigrationPlugin(PluginBase):
d for d in all_dashboards if re.search(regex_str, d["dashboard_title"], re.IGNORECASE) d for d in all_dashboards if re.search(regex_str, d["dashboard_title"], re.IGNORECASE)
] ]
else: else:
logger.warning("[MigrationPlugin][State] No selection criteria provided (selected_ids or dashboard_regex).") log.warning("No selection criteria provided (selected_ids or dashboard_regex).")
return return
if not dashboards_to_migrate: if not dashboards_to_migrate:
logger.warning("[MigrationPlugin][State] No dashboards found matching criteria.") log.warning("No dashboards found matching criteria.")
return return
# Fetch mappings from database # Get mappings from params
db_mapping = {} db_mapping = params.get("db_mappings", {})
if not isinstance(db_mapping, dict):
db_mapping = {}
# Fetch additional mappings from database if requested
if replace_db_config: if replace_db_config:
db = SessionLocal() db = SessionLocal()
try: try:
# Find environment IDs by name # Find environment IDs by name
src_env = db.query(Environment).filter(Environment.name == from_env_name).first() src_env_db = db.query(Environment).filter(Environment.name == from_env_name).first()
tgt_env = db.query(Environment).filter(Environment.name == to_env_name).first() tgt_env_db = db.query(Environment).filter(Environment.name == to_env_name).first()
if src_env and tgt_env: if src_env_db and tgt_env_db:
mappings = db.query(DatabaseMapping).filter( stored_mappings = db.query(DatabaseMapping).filter(
DatabaseMapping.source_env_id == src_env.id, DatabaseMapping.source_env_id == src_env_db.id,
DatabaseMapping.target_env_id == tgt_env.id DatabaseMapping.target_env_id == tgt_env_db.id
).all() ).all()
db_mapping = {m.source_db_uuid: m.target_db_uuid for m in mappings} # Provided mappings override stored ones
logger.info(f"[MigrationPlugin][State] Loaded {len(db_mapping)} database mappings.") stored_map_dict = {m.source_db_uuid: m.target_db_uuid for m in stored_mappings}
stored_map_dict.update(db_mapping)
db_mapping = stored_map_dict
log.info(f"Loaded {len(stored_mappings)} database mappings from database.")
finally: finally:
db.close() db.close()
@@ -311,7 +260,7 @@ class MigrationPlugin(PluginBase):
if not success and replace_db_config: if not success and replace_db_config:
# Signal missing mapping and wait (only if we care about mappings) # Signal missing mapping and wait (only if we care about mappings)
if task_id: if task_id:
logger.info(f"[MigrationPlugin][Action] Pausing for missing mapping in task {task_id}") log.info(f"Pausing for missing mapping in task {task_id}")
# In a real scenario, we'd pass the missing DB info to the frontend # In a real scenario, we'd pass the missing DB info to the frontend
# For this task, we'll just simulate the wait # For this task, we'll just simulate the wait
await tm.wait_for_resolution(task_id) await tm.wait_for_resolution(task_id)
@@ -333,9 +282,9 @@ class MigrationPlugin(PluginBase):
if success: if success:
to_c.import_dashboard(file_name=tmp_new_zip, dash_id=dash_id, dash_slug=dash_slug) to_c.import_dashboard(file_name=tmp_new_zip, dash_id=dash_id, dash_slug=dash_slug)
else: else:
logger.error(f"[MigrationPlugin][Failure] Failed to transform ZIP for dashboard {title}") migration_log.error(f"Failed to transform ZIP for dashboard {title}")
logger.info(f"[MigrationPlugin][Success] Dashboard {title} imported.") superset_log.info(f"Dashboard {title} imported.")
except Exception as exc: except Exception as exc:
# Check for password error # Check for password error
error_msg = str(exc) error_msg = str(exc)
@@ -357,7 +306,7 @@ class MigrationPlugin(PluginBase):
if match_alt: if match_alt:
db_name = match_alt.group(1) db_name = match_alt.group(1)
logger.warning(f"[MigrationPlugin][Action] Detected missing password for database: {db_name}") app_logger.warning(f"[MigrationPlugin][Action] Detected missing password for database: {db_name}")
if task_id: if task_id:
input_request = { input_request = {
@@ -376,19 +325,19 @@ class MigrationPlugin(PluginBase):
# Retry import with password # Retry import with password
if passwords: if passwords:
logger.info(f"[MigrationPlugin][Action] Retrying import for {title} with provided passwords.") app_logger.info(f"[MigrationPlugin][Action] Retrying import for {title} with provided passwords.")
to_c.import_dashboard(file_name=tmp_new_zip, dash_id=dash_id, dash_slug=dash_slug, passwords=passwords) to_c.import_dashboard(file_name=tmp_new_zip, dash_id=dash_id, dash_slug=dash_slug, passwords=passwords)
logger.info(f"[MigrationPlugin][Success] Dashboard {title} imported after password injection.") app_logger.info(f"[MigrationPlugin][Success] Dashboard {title} imported after password injection.")
# Clear passwords from params after use for security # Clear passwords from params after use for security
if "passwords" in task.params: if "passwords" in task.params:
del task.params["passwords"] del task.params["passwords"]
continue continue
logger.error(f"[MigrationPlugin][Failure] Failed to migrate dashboard {title}: {exc}", exc_info=True) app_logger.error(f"[MigrationPlugin][Failure] Failed to migrate dashboard {title}: {exc}", exc_info=True)
logger.info("[MigrationPlugin][Exit] Migration finished.") app_logger.info("[MigrationPlugin][Exit] Migration finished.")
except Exception as e: except Exception as e:
logger.critical(f"[MigrationPlugin][Failure] Fatal error during migration: {e}", exc_info=True) app_logger.critical(f"[MigrationPlugin][Failure] Fatal error during migration: {e}", exc_info=True)
raise e raise e
# [/DEF:MigrationPlugin.execute:Action] # [/DEF:MigrationPlugin.execute:Action]
# [/DEF:execute:Function] # [/DEF:execute:Function]

Some files were not shown because too many files have changed in this diff Show More