chat worked

This commit is contained in:
2026-02-23 20:20:25 +03:00
parent 18e96a58bc
commit 40e6d8cd4c
29 changed files with 1033 additions and 196 deletions

View File

@@ -368,4 +368,27 @@ def test_status_query_without_task_id_returns_latest_user_task():
# [/DEF:test_status_query_without_task_id_returns_latest_user_task:Function] # [/DEF:test_status_query_without_task_id_returns_latest_user_task:Function]
# [DEF:test_llm_validation_missing_dashboard_returns_needs_clarification:Function]
# @PURPOSE: LLM validation command without resolvable dashboard id must request clarification instead of generic failure.
# @PRE: Command intent resolves to run_llm_validation but dashboard id cannot be inferred.
# @POST: Assistant response state is needs_clarification with guidance text.
def test_llm_validation_missing_dashboard_returns_needs_clarification():
_clear_assistant_state()
response = _run_async(
assistant_module.send_message(
request=assistant_module.AssistantMessageRequest(
message="Я хочу сделать валидацию дашборда test1"
),
current_user=_admin_user(),
task_manager=_FakeTaskManager(),
config_manager=_FakeConfigManager(),
db=_FakeDb(),
)
)
assert response.state == "needs_clarification"
assert "Укажите" in response.text or "Missing dashboard_id" in response.text
# [/DEF:test_llm_validation_missing_dashboard_returns_needs_clarification:Function]
# [/DEF:backend.src.api.routes.__tests__.test_assistant_api:Module] # [/DEF:backend.src.api.routes.__tests__.test_assistant_api:Module]

View File

@@ -9,6 +9,7 @@
from __future__ import annotations from __future__ import annotations
import json
import re import re
import uuid import uuid
from datetime import datetime, timedelta from datetime import datetime, timedelta
@@ -26,6 +27,9 @@ from ...core.config_manager import ConfigManager
from ...core.database import get_db from ...core.database import get_db
from ...services.git_service import GitService from ...services.git_service import GitService
from ...services.llm_provider import LLMProviderService from ...services.llm_provider import LLMProviderService
from ...core.superset_client import SupersetClient
from ...plugins.llm_analysis.service import LLMClient
from ...plugins.llm_analysis.models import LLMProviderType
from ...schemas.auth import User from ...schemas.auth import User
from ...models.assistant import ( from ...models.assistant import (
AssistantAuditRecord, AssistantAuditRecord,
@@ -100,6 +104,17 @@ USER_ACTIVE_CONVERSATION: Dict[str, str] = {}
CONFIRMATIONS: Dict[str, ConfirmationRecord] = {} CONFIRMATIONS: Dict[str, ConfirmationRecord] = {}
ASSISTANT_AUDIT: Dict[str, List[Dict[str, Any]]] = {} ASSISTANT_AUDIT: Dict[str, List[Dict[str, Any]]] = {}
INTENT_PERMISSION_CHECKS: Dict[str, List[Tuple[str, str]]] = {
"get_task_status": [("tasks", "READ")],
"create_branch": [("plugin:git", "EXECUTE")],
"commit_changes": [("plugin:git", "EXECUTE")],
"deploy_dashboard": [("plugin:git", "EXECUTE")],
"execute_migration": [("plugin:migration", "EXECUTE"), ("plugin:superset-migration", "EXECUTE")],
"run_backup": [("plugin:superset-backup", "EXECUTE"), ("plugin:backup", "EXECUTE")],
"run_llm_validation": [("plugin:llm_dashboard_validation", "EXECUTE")],
"run_llm_documentation": [("plugin:llm_documentation", "EXECUTE")],
}
# [DEF:_append_history:Function] # [DEF:_append_history:Function]
# @PURPOSE: Append conversation message to in-memory history buffer. # @PURPOSE: Append conversation message to in-memory history buffer.
@@ -387,6 +402,69 @@ def _resolve_provider_id(provider_token: Optional[str], db: Session) -> Optional
# [/DEF:_resolve_provider_id:Function] # [/DEF:_resolve_provider_id:Function]
# [DEF:_get_default_environment_id:Function]
# @PURPOSE: Resolve default environment id from settings or first configured environment.
# @PRE: config_manager returns environments list.
# @POST: Returns default environment id or None when environment list is empty.
def _get_default_environment_id(config_manager: ConfigManager) -> Optional[str]:
configured = config_manager.get_environments()
if not configured:
return None
preferred = None
if hasattr(config_manager, "get_config"):
try:
preferred = config_manager.get_config().settings.default_environment_id
except Exception:
preferred = None
if preferred and any(env.id == preferred for env in configured):
return preferred
explicit_default = next((env.id for env in configured if getattr(env, "is_default", False)), None)
return explicit_default or configured[0].id
# [/DEF:_get_default_environment_id:Function]
# [DEF:_resolve_dashboard_id_by_ref:Function]
# @PURPOSE: Resolve dashboard id by title or slug reference in selected environment.
# @PRE: dashboard_ref is a non-empty string-like token.
# @POST: Returns dashboard id when uniquely matched, otherwise None.
def _resolve_dashboard_id_by_ref(
dashboard_ref: Optional[str],
env_id: Optional[str],
config_manager: ConfigManager,
) -> Optional[int]:
if not dashboard_ref or not env_id:
return None
env = next((item for item in config_manager.get_environments() if item.id == env_id), None)
if not env:
return None
needle = dashboard_ref.strip().lower()
try:
client = SupersetClient(env)
_, dashboards = client.get_dashboards(query={"page_size": 200})
except Exception as exc:
logger.warning(f"[assistant.dashboard_resolve][failed] ref={dashboard_ref} env={env_id} error={exc}")
return None
exact = next(
(
d for d in dashboards
if str(d.get("slug", "")).lower() == needle
or str(d.get("dashboard_title", "")).lower() == needle
or str(d.get("title", "")).lower() == needle
),
None,
)
if exact:
return int(exact.get("id"))
partial = [d for d in dashboards if needle in str(d.get("dashboard_title", d.get("title", ""))).lower()]
if len(partial) == 1 and partial[0].get("id") is not None:
return int(partial[0]["id"])
return None
# [/DEF:_resolve_dashboard_id_by_ref:Function]
# [DEF:_parse_command:Function] # [DEF:_parse_command:Function]
# @PURPOSE: Deterministically parse RU/EN command text into intent payload. # @PURPOSE: Deterministically parse RU/EN command text into intent payload.
# @PRE: message contains raw user text and config manager resolves environments. # @PRE: message contains raw user text and config manager resolves environments.
@@ -396,6 +474,10 @@ def _parse_command(message: str, config_manager: ConfigManager) -> Dict[str, Any
lower = text.lower() lower = text.lower()
dashboard_id = _extract_id(lower, [r"(?:дашборд\w*|dashboard)\s*(?:id\s*)?(\d+)"]) dashboard_id = _extract_id(lower, [r"(?:дашборд\w*|dashboard)\s*(?:id\s*)?(\d+)"])
dashboard_ref = _extract_id(
lower,
[r"(?:дашборд\w*|dashboard)\s*(?:id\s*)?([a-zа-я0-9._-]+)"],
)
dataset_id = _extract_id(lower, [r"(?:датасет\w*|dataset)\s*(?:id\s*)?(\d+)"]) dataset_id = _extract_id(lower, [r"(?:датасет\w*|dataset)\s*(?:id\s*)?(\d+)"])
# Accept short and long task ids (e.g., task-1, task-abc123, UUIDs). # Accept short and long task ids (e.g., task-1, task-abc123, UUIDs).
task_id = _extract_id(lower, [r"(task[-_a-z0-9]{1,}|[0-9a-f]{8}-[0-9a-f-]{27,})"]) task_id = _extract_id(lower, [r"(task[-_a-z0-9]{1,}|[0-9a-f]{8}-[0-9a-f-]{27,})"])
@@ -500,6 +582,7 @@ def _parse_command(message: str, config_manager: ConfigManager) -> Dict[str, Any
"operation": "run_llm_validation", "operation": "run_llm_validation",
"entities": { "entities": {
"dashboard_id": int(dashboard_id) if dashboard_id else None, "dashboard_id": int(dashboard_id) if dashboard_id else None,
"dashboard_ref": dashboard_ref if (dashboard_ref and not dashboard_ref.isdigit()) else None,
"environment": env_match, "environment": env_match,
"provider": provider_match, "provider": provider_match,
}, },
@@ -553,24 +636,272 @@ def _check_any_permission(current_user: User, checks: List[Tuple[str, str]]):
# [/DEF:_check_any_permission:Function] # [/DEF:_check_any_permission:Function]
# [DEF:_has_any_permission:Function]
# @PURPOSE: Check whether user has at least one permission tuple from the provided list.
# @PRE: current_user and checks list are valid.
# @POST: Returns True when at least one permission check passes.
def _has_any_permission(current_user: User, checks: List[Tuple[str, str]]) -> bool:
try:
_check_any_permission(current_user, checks)
return True
except HTTPException:
return False
# [/DEF:_has_any_permission:Function]
# [DEF:_build_tool_catalog:Function]
# @PURPOSE: Build current-user tool catalog for LLM planner with operation contracts and defaults.
# @PRE: current_user is authenticated; config/db are available.
# @POST: Returns list of executable tools filtered by permission and runtime availability.
def _build_tool_catalog(current_user: User, config_manager: ConfigManager, db: Session) -> List[Dict[str, Any]]:
envs = config_manager.get_environments()
default_env_id = _get_default_environment_id(config_manager)
providers = LLMProviderService(db).get_all_providers()
active_provider = next((p.id for p in providers if p.is_active), None)
fallback_provider = active_provider or (providers[0].id if providers else None)
candidates: List[Dict[str, Any]] = [
{
"operation": "get_task_status",
"domain": "status",
"description": "Get task status by task_id or latest user task",
"required_entities": [],
"optional_entities": ["task_id"],
"risk_level": "safe",
"requires_confirmation": False,
},
{
"operation": "create_branch",
"domain": "git",
"description": "Create git branch for dashboard",
"required_entities": ["dashboard_id", "branch_name"],
"optional_entities": [],
"risk_level": "guarded",
"requires_confirmation": False,
},
{
"operation": "commit_changes",
"domain": "git",
"description": "Commit dashboard repository changes",
"required_entities": ["dashboard_id"],
"optional_entities": ["message"],
"risk_level": "guarded",
"requires_confirmation": False,
},
{
"operation": "deploy_dashboard",
"domain": "git",
"description": "Deploy dashboard to target environment",
"required_entities": ["dashboard_id", "environment"],
"optional_entities": [],
"risk_level": "guarded",
"requires_confirmation": False,
},
{
"operation": "execute_migration",
"domain": "migration",
"description": "Run dashboard migration between environments",
"required_entities": ["dashboard_id", "source_env", "target_env"],
"optional_entities": [],
"risk_level": "guarded",
"requires_confirmation": False,
},
{
"operation": "run_backup",
"domain": "backup",
"description": "Run backup for environment or specific dashboard",
"required_entities": ["environment"],
"optional_entities": ["dashboard_id"],
"risk_level": "guarded",
"requires_confirmation": False,
},
{
"operation": "run_llm_validation",
"domain": "llm",
"description": "Run LLM dashboard validation",
"required_entities": ["dashboard_id"],
"optional_entities": ["dashboard_ref", "environment", "provider"],
"defaults": {"environment": default_env_id, "provider": fallback_provider},
"risk_level": "guarded",
"requires_confirmation": False,
},
{
"operation": "run_llm_documentation",
"domain": "llm",
"description": "Generate dataset documentation via LLM",
"required_entities": ["dataset_id"],
"optional_entities": ["environment", "provider"],
"defaults": {"environment": default_env_id, "provider": fallback_provider},
"risk_level": "guarded",
"requires_confirmation": False,
},
]
available: List[Dict[str, Any]] = []
for tool in candidates:
checks = INTENT_PERMISSION_CHECKS.get(tool["operation"], [])
if checks and not _has_any_permission(current_user, checks):
continue
available.append(tool)
return available
# [/DEF:_build_tool_catalog:Function]
# [DEF:_coerce_intent_entities:Function]
# @PURPOSE: Normalize intent entity value types from LLM output to route-compatible values.
# @PRE: intent contains entities dict or missing entities.
# @POST: Returned intent has numeric ids coerced where possible and string values stripped.
def _coerce_intent_entities(intent: Dict[str, Any]) -> Dict[str, Any]:
entities = intent.get("entities")
if not isinstance(entities, dict):
intent["entities"] = {}
entities = intent["entities"]
for key in ("dashboard_id", "dataset_id"):
value = entities.get(key)
if isinstance(value, str) and value.strip().isdigit():
entities[key] = int(value.strip())
for key, value in list(entities.items()):
if isinstance(value, str):
entities[key] = value.strip()
return intent
# [/DEF:_coerce_intent_entities:Function]
# [DEF:_clarification_text_for_intent:Function]
# @PURPOSE: Convert technical missing-parameter errors into user-facing clarification prompts.
# @PRE: state was classified as needs_clarification for current intent/error combination.
# @POST: Returned text is human-readable and actionable for target operation.
def _clarification_text_for_intent(intent: Optional[Dict[str, Any]], detail_text: str) -> str:
operation = (intent or {}).get("operation")
guidance_by_operation: Dict[str, str] = {
"run_llm_validation": (
"Нужно уточнение для запуска LLM-валидации: Укажите дашборд (id или slug), окружение и провайдер LLM."
),
"run_llm_documentation": (
"Нужно уточнение для генерации документации: Укажите dataset_id, окружение и провайдер LLM."
),
"create_branch": "Нужно уточнение: укажите dashboard_id и имя ветки.",
"commit_changes": "Нужно уточнение: укажите dashboard_id для коммита.",
"deploy_dashboard": "Нужно уточнение: укажите dashboard_id и целевое окружение.",
"execute_migration": "Нужно уточнение: укажите dashboard_id, source_env и target_env.",
"run_backup": "Нужно уточнение: укажите окружение для бэкапа.",
}
return guidance_by_operation.get(operation, detail_text)
# [/DEF:_clarification_text_for_intent:Function]
# [DEF:_plan_intent_with_llm:Function]
# @PURPOSE: Use active LLM provider to select best tool/operation from dynamic catalog.
# @PRE: tools list contains allowed operations for current user.
# @POST: Returns normalized intent dict when planning succeeds; otherwise None.
async def _plan_intent_with_llm(
message: str,
tools: List[Dict[str, Any]],
db: Session,
config_manager: ConfigManager,
) -> Optional[Dict[str, Any]]:
if not tools:
return None
llm_service = LLMProviderService(db)
providers = llm_service.get_all_providers()
provider = next((p for p in providers if p.is_active), None)
if not provider:
return None
api_key = llm_service.get_decrypted_api_key(provider.id)
if not api_key:
return None
planner = LLMClient(
provider_type=LLMProviderType(provider.provider_type),
api_key=api_key,
base_url=provider.base_url,
default_model=provider.default_model,
)
system_instruction = (
"You are a deterministic intent planner for backend tools.\n"
"Choose exactly one operation from available_tools or return clarify.\n"
"Output strict JSON object:\n"
"{"
"\"domain\": string, "
"\"operation\": string, "
"\"entities\": object, "
"\"confidence\": number, "
"\"risk_level\": \"safe\"|\"guarded\"|\"dangerous\", "
"\"requires_confirmation\": boolean"
"}\n"
"Rules:\n"
"- Use only operation names from available_tools.\n"
"- If input is ambiguous, operation must be \"clarify\" with low confidence.\n"
"- Keep entities minimal and factual.\n"
)
payload = {
"available_tools": tools,
"user_message": message,
"known_environments": [{"id": e.id, "name": e.name} for e in config_manager.get_environments()],
}
try:
response = await planner.get_json_completion(
[
{"role": "system", "content": system_instruction},
{"role": "user", "content": json.dumps(payload, ensure_ascii=False)},
]
)
except Exception as exc:
logger.warning(f"[assistant.planner][fallback] LLM planner unavailable: {exc}")
return None
if not isinstance(response, dict):
return None
operation = response.get("operation")
valid_ops = {tool["operation"] for tool in tools}
if operation == "clarify":
return {
"domain": "unknown",
"operation": "clarify",
"entities": {},
"confidence": float(response.get("confidence", 0.3)),
"risk_level": "safe",
"requires_confirmation": False,
}
if operation not in valid_ops:
return None
by_operation = {tool["operation"]: tool for tool in tools}
selected = by_operation[operation]
intent = {
"domain": response.get("domain") or selected["domain"],
"operation": operation,
"entities": response.get("entities", {}),
"confidence": float(response.get("confidence", 0.75)),
"risk_level": response.get("risk_level") or selected["risk_level"],
"requires_confirmation": bool(response.get("requires_confirmation", selected["requires_confirmation"])),
}
intent = _coerce_intent_entities(intent)
defaults = selected.get("defaults") or {}
for key, value in defaults.items():
if value and not intent["entities"].get(key):
intent["entities"][key] = value
if operation in {"deploy_dashboard", "execute_migration"}:
env_token = intent["entities"].get("environment") or intent["entities"].get("target_env")
if _is_production_env(env_token, config_manager):
intent["risk_level"] = "dangerous"
intent["requires_confirmation"] = True
return intent
# [/DEF:_plan_intent_with_llm:Function]
# [DEF:_authorize_intent:Function] # [DEF:_authorize_intent:Function]
# @PURPOSE: Validate user permissions for parsed intent before confirmation/dispatch. # @PURPOSE: Validate user permissions for parsed intent before confirmation/dispatch.
# @PRE: intent.operation is present for known assistant command domains. # @PRE: intent.operation is present for known assistant command domains.
# @POST: Returns if authorized; raises HTTPException(403) when denied. # @POST: Returns if authorized; raises HTTPException(403) when denied.
def _authorize_intent(intent: Dict[str, Any], current_user: User): def _authorize_intent(intent: Dict[str, Any], current_user: User):
operation = intent.get("operation") operation = intent.get("operation")
checks_map: Dict[str, List[Tuple[str, str]]] = { if operation in INTENT_PERMISSION_CHECKS:
"get_task_status": [("tasks", "READ")], _check_any_permission(current_user, INTENT_PERMISSION_CHECKS[operation])
"create_branch": [("plugin:git", "EXECUTE")],
"commit_changes": [("plugin:git", "EXECUTE")],
"deploy_dashboard": [("plugin:git", "EXECUTE")],
"execute_migration": [("plugin:migration", "EXECUTE"), ("plugin:superset-migration", "EXECUTE")],
"run_backup": [("plugin:superset-backup", "EXECUTE"), ("plugin:backup", "EXECUTE")],
"run_llm_validation": [("plugin:llm_dashboard_validation", "EXECUTE")],
"run_llm_documentation": [("plugin:llm_documentation", "EXECUTE")],
}
if operation in checks_map:
_check_any_permission(current_user, checks_map[operation])
# [/DEF:_authorize_intent:Function] # [/DEF:_authorize_intent:Function]
@@ -708,11 +1039,20 @@ async def _dispatch_intent(
if operation == "run_llm_validation": if operation == "run_llm_validation":
_check_any_permission(current_user, [("plugin:llm_dashboard_validation", "EXECUTE")]) _check_any_permission(current_user, [("plugin:llm_dashboard_validation", "EXECUTE")])
env_id = _resolve_env_id(entities.get("environment"), config_manager) or _get_default_environment_id(config_manager)
dashboard_id = entities.get("dashboard_id") dashboard_id = entities.get("dashboard_id")
env_id = _resolve_env_id(entities.get("environment"), config_manager) if not dashboard_id:
dashboard_id = _resolve_dashboard_id_by_ref(
entities.get("dashboard_ref"),
env_id,
config_manager,
)
provider_id = _resolve_provider_id(entities.get("provider"), db) provider_id = _resolve_provider_id(entities.get("provider"), db)
if not dashboard_id or not env_id or not provider_id: if not dashboard_id or not env_id or not provider_id:
raise HTTPException(status_code=400, detail="Missing dashboard_id/environment/provider") raise HTTPException(
status_code=422,
detail="Missing dashboard_id/environment/provider. Укажите ID/slug дашборда или окружение.",
)
task = await task_manager.create_task( task = await task_manager.create_task(
plugin_id="llm_dashboard_validation", plugin_id="llm_dashboard_validation",
@@ -782,6 +1122,13 @@ async def send_message(
_append_history(user_id, conversation_id, "user", request.message) _append_history(user_id, conversation_id, "user", request.message)
_persist_message(db, user_id, conversation_id, "user", request.message) _persist_message(db, user_id, conversation_id, "user", request.message)
tools_catalog = _build_tool_catalog(current_user, config_manager, db)
intent = None
try:
intent = await _plan_intent_with_llm(request.message, tools_catalog, db, config_manager)
except Exception as exc:
logger.warning(f"[assistant.planner][fallback] Planner error: {exc}")
if not intent:
intent = _parse_command(request.message, config_manager) intent = _parse_command(request.message, config_manager)
confidence = float(intent.get("confidence", 0.0)) confidence = float(intent.get("confidence", 0.0))
@@ -886,8 +1233,18 @@ async def send_message(
created_at=datetime.utcnow(), created_at=datetime.utcnow(),
) )
except HTTPException as exc: except HTTPException as exc:
state = "denied" if exc.status_code == status.HTTP_403_FORBIDDEN else "failed" detail_text = str(exc.detail)
text = str(exc.detail) is_clarification_error = exc.status_code in (400, 422) and (
detail_text.lower().startswith("missing")
or "укажите" in detail_text.lower()
)
if exc.status_code == status.HTTP_403_FORBIDDEN:
state = "denied"
elif is_clarification_error:
state = "needs_clarification"
else:
state = "failed"
text = _clarification_text_for_intent(intent, detail_text) if state == "needs_clarification" else detail_text
_append_history(user_id, conversation_id, "assistant", text, state=state) _append_history(user_id, conversation_id, "assistant", text, state=state)
_persist_message(db, user_id, conversation_id, "assistant", text, state=state, metadata={"intent": intent}) _persist_message(db, user_id, conversation_id, "assistant", text, state=state, metadata={"intent": intent})
audit_payload = {"decision": state, "message": request.message, "intent": intent, "error": text} audit_payload = {"decision": state, "message": request.message, "intent": intent, "error": text}
@@ -899,7 +1256,7 @@ async def send_message(
state=state, state=state,
text=text, text=text,
intent=intent, intent=intent,
actions=[], actions=[AssistantAction(type="rephrase", label="Rephrase command")] if state == "needs_clarification" else [],
created_at=datetime.utcnow(), created_at=datetime.utcnow(),
) )
# [/DEF:send_message:Function] # [/DEF:send_message:Function]

View File

@@ -25,6 +25,7 @@ from src.api.routes.git_schemas import (
) )
from src.services.git_service import GitService from src.services.git_service import GitService
from src.core.logger import logger, belief_scope from src.core.logger import logger, belief_scope
from ...services.llm_prompt_templates import DEFAULT_LLM_PROMPTS, normalize_llm_settings
router = APIRouter(tags=["git"]) router = APIRouter(tags=["git"])
git_service = GitService() git_service = GitService()
@@ -406,6 +407,7 @@ async def get_repository_diff(
async def generate_commit_message( async def generate_commit_message(
dashboard_id: int, dashboard_id: int,
db: Session = Depends(get_db), db: Session = Depends(get_db),
config_manager = Depends(get_config_manager),
_ = Depends(has_permission("plugin:git", "EXECUTE")) _ = Depends(has_permission("plugin:git", "EXECUTE"))
): ):
with belief_scope("generate_commit_message"): with belief_scope("generate_commit_message"):
@@ -445,7 +447,16 @@ async def generate_commit_message(
# 4. Generate Message # 4. Generate Message
from ...plugins.git.llm_extension import GitLLMExtension from ...plugins.git.llm_extension import GitLLMExtension
extension = GitLLMExtension(client) extension = GitLLMExtension(client)
message = await extension.suggest_commit_message(diff, history) llm_settings = normalize_llm_settings(config_manager.get_config().settings.llm)
git_prompt = llm_settings["prompts"].get(
"git_commit_prompt",
DEFAULT_LLM_PROMPTS["git_commit_prompt"],
)
message = await extension.suggest_commit_message(
diff,
history,
prompt_template=git_prompt,
)
return {"message": message} return {"message": message}
except Exception as e: except Exception as e:

View File

@@ -19,6 +19,7 @@ from ...dependencies import get_config_manager, has_permission
from ...core.config_manager import ConfigManager from ...core.config_manager import ConfigManager
from ...core.logger import logger, belief_scope from ...core.logger import logger, belief_scope
from ...core.superset_client import SupersetClient from ...core.superset_client import SupersetClient
from ...services.llm_prompt_templates import normalize_llm_settings
# [/SECTION] # [/SECTION]
# [DEF:LoggingConfigResponse:Class] # [DEF:LoggingConfigResponse:Class]
@@ -45,6 +46,7 @@ async def get_settings(
with belief_scope("get_settings"): with belief_scope("get_settings"):
logger.info("[get_settings][Entry] Fetching all settings") logger.info("[get_settings][Entry] Fetching all settings")
config = config_manager.get_config().copy(deep=True) config = config_manager.get_config().copy(deep=True)
config.settings.llm = normalize_llm_settings(config.settings.llm)
# Mask passwords # Mask passwords
for env in config.environments: for env in config.environments:
if env.password: if env.password:
@@ -323,10 +325,12 @@ async def get_consolidated_settings(
finally: finally:
db.close() db.close()
normalized_llm = normalize_llm_settings(config.settings.llm)
return ConsolidatedSettingsResponse( return ConsolidatedSettingsResponse(
environments=[env.dict() for env in config.environments], environments=[env.dict() for env in config.environments],
connections=config.settings.connections, connections=config.settings.connections,
llm=config.settings.llm, llm=normalized_llm,
llm_providers=llm_providers_list, llm_providers=llm_providers_list,
logging=config.settings.logging.dict(), logging=config.settings.logging.dict(),
storage=config.settings.storage.dict() storage=config.settings.storage.dict()
@@ -355,7 +359,7 @@ async def update_consolidated_settings(
# Update LLM if provided # Update LLM if provided
if "llm" in settings_patch: if "llm" in settings_patch:
current_settings.llm = settings_patch["llm"] current_settings.llm = normalize_llm_settings(settings_patch["llm"])
# Update Logging if provided # Update Logging if provided
if "logging" in settings_patch: if "logging" in settings_patch:

View File

@@ -9,6 +9,7 @@
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from typing import List, Optional from typing import List, Optional
from ..models.storage import StorageConfig from ..models.storage import StorageConfig
from ..services.llm_prompt_templates import DEFAULT_LLM_PROMPTS
# [DEF:Schedule:DataClass] # [DEF:Schedule:DataClass]
# @PURPOSE: Represents a backup schedule configuration. # @PURPOSE: Represents a backup schedule configuration.
@@ -49,7 +50,13 @@ class GlobalSettings(BaseModel):
default_environment_id: Optional[str] = None default_environment_id: Optional[str] = None
logging: LoggingConfig = Field(default_factory=LoggingConfig) logging: LoggingConfig = Field(default_factory=LoggingConfig)
connections: List[dict] = [] connections: List[dict] = []
llm: dict = Field(default_factory=lambda: {"providers": [], "default_provider": ""}) llm: dict = Field(
default_factory=lambda: {
"providers": [],
"default_provider": "",
"prompts": dict(DEFAULT_LLM_PROMPTS),
}
)
# Task retention settings # Task retention settings
task_retention_days: int = 30 task_retention_days: int = 30

View File

@@ -9,6 +9,7 @@ from typing import List
from tenacity import retry, stop_after_attempt, wait_exponential from tenacity import retry, stop_after_attempt, wait_exponential
from ..llm_analysis.service import LLMClient from ..llm_analysis.service import LLMClient
from ...core.logger import belief_scope, logger from ...core.logger import belief_scope, logger
from ...services.llm_prompt_templates import DEFAULT_LLM_PROMPTS, render_prompt
# [DEF:GitLLMExtension:Class] # [DEF:GitLLMExtension:Class]
# @PURPOSE: Provides LLM capabilities to the Git plugin. # @PURPOSE: Provides LLM capabilities to the Git plugin.
@@ -26,21 +27,18 @@ class GitLLMExtension:
wait=wait_exponential(multiplier=1, min=2, max=10), wait=wait_exponential(multiplier=1, min=2, max=10),
reraise=True reraise=True
) )
async def suggest_commit_message(self, diff: str, history: List[str]) -> str: async def suggest_commit_message(
self,
diff: str,
history: List[str],
prompt_template: str = DEFAULT_LLM_PROMPTS["git_commit_prompt"],
) -> str:
with belief_scope("suggest_commit_message"): with belief_scope("suggest_commit_message"):
history_text = "\n".join(history) history_text = "\n".join(history)
prompt = f""" prompt = render_prompt(
Generate a concise and professional git commit message based on the following diff and recent history. prompt_template,
Use Conventional Commits format (e.g., feat: ..., fix: ..., docs: ...). {"history": history_text, "diff": diff},
)
Recent History:
{history_text}
Diff:
{diff}
Commit Message:
"""
logger.debug(f"[suggest_commit_message] Calling LLM with model: {self.client.default_model}") logger.debug(f"[suggest_commit_message] Calling LLM with model: {self.client.default_model}")
response = await self.client.client.chat.completions.create( response = await self.client.client.chat.completions.create(

View File

@@ -23,6 +23,11 @@ from .service import ScreenshotService, LLMClient
from .models import LLMProviderType, ValidationStatus, ValidationResult, DetectedIssue from .models import LLMProviderType, ValidationStatus, ValidationResult, DetectedIssue
from ...models.llm import ValidationRecord from ...models.llm import ValidationRecord
from ...core.task_manager.context import TaskContext from ...core.task_manager.context import TaskContext
from ...services.llm_prompt_templates import (
DEFAULT_LLM_PROMPTS,
normalize_llm_settings,
render_prompt,
)
# [DEF:DashboardValidationPlugin:Class] # [DEF:DashboardValidationPlugin:Class]
# @PURPOSE: Plugin for automated dashboard health analysis using LLMs. # @PURPOSE: Plugin for automated dashboard health analysis using LLMs.
@@ -181,7 +186,16 @@ class DashboardValidationPlugin(PluginBase):
) )
llm_log.info(f"Analyzing dashboard {dashboard_id} with LLM") llm_log.info(f"Analyzing dashboard {dashboard_id} with LLM")
analysis = await llm_client.analyze_dashboard(screenshot_path, logs) llm_settings = normalize_llm_settings(config_mgr.get_config().settings.llm)
dashboard_prompt = llm_settings["prompts"].get(
"dashboard_validation_prompt",
DEFAULT_LLM_PROMPTS["dashboard_validation_prompt"],
)
analysis = await llm_client.analyze_dashboard(
screenshot_path,
logs,
prompt_template=dashboard_prompt,
)
# Log analysis summary to task logs for better visibility # Log analysis summary to task logs for better visibility
llm_log.info(f"[ANALYSIS_SUMMARY] Status: {analysis['status']}") llm_log.info(f"[ANALYSIS_SUMMARY] Status: {analysis['status']}")
@@ -341,22 +355,18 @@ class DocumentationPlugin(PluginBase):
default_model=db_provider.default_model default_model=db_provider.default_model
) )
prompt = f""" llm_settings = normalize_llm_settings(config_mgr.get_config().settings.llm)
Generate professional documentation for the following dataset and its columns. documentation_prompt = llm_settings["prompts"].get(
Dataset: {dataset.get('table_name')} "documentation_prompt",
Columns: {columns_data} DEFAULT_LLM_PROMPTS["documentation_prompt"],
)
Provide the documentation in JSON format: prompt = render_prompt(
{{ documentation_prompt,
"dataset_description": "General description of the dataset", {
"column_descriptions": [ "dataset_name": dataset.get("table_name") or "",
{{ "columns_json": json.dumps(columns_data, ensure_ascii=False),
"name": "column_name", },
"description": "Generated description" )
}}
]
}}
"""
# Using a generic chat completion for text-only US2 # Using a generic chat completion for text-only US2
llm_log.info(f"Generating documentation for dataset {dataset_id}") llm_log.info(f"Generating documentation for dataset {dataset_id}")

View File

@@ -20,6 +20,7 @@ from tenacity import retry, stop_after_attempt, wait_exponential, retry_if_excep
from .models import LLMProviderType from .models import LLMProviderType
from ...core.logger import belief_scope, logger from ...core.logger import belief_scope, logger
from ...core.config_models import Environment from ...core.config_models import Environment
from ...services.llm_prompt_templates import DEFAULT_LLM_PROMPTS, render_prompt
# [DEF:ScreenshotService:Class] # [DEF:ScreenshotService:Class]
# @PURPOSE: Handles capturing screenshots of Superset dashboards. # @PURPOSE: Handles capturing screenshots of Superset dashboards.
@@ -548,7 +549,12 @@ class LLMClient:
# @PRE: screenshot_path exists, logs is a list of strings. # @PRE: screenshot_path exists, logs is a list of strings.
# @POST: Returns a structured analysis dictionary (status, summary, issues). # @POST: Returns a structured analysis dictionary (status, summary, issues).
# @SIDE_EFFECT: Reads screenshot file and calls external LLM API. # @SIDE_EFFECT: Reads screenshot file and calls external LLM API.
async def analyze_dashboard(self, screenshot_path: str, logs: List[str]) -> Dict[str, Any]: async def analyze_dashboard(
self,
screenshot_path: str,
logs: List[str],
prompt_template: str = DEFAULT_LLM_PROMPTS["dashboard_validation_prompt"],
) -> Dict[str, Any]:
with belief_scope("analyze_dashboard"): with belief_scope("analyze_dashboard"):
# Optimize image to reduce token count (US1 / T023) # Optimize image to reduce token count (US1 / T023)
# Gemini/Gemma models have limits on input tokens, and large images contribute significantly. # Gemini/Gemma models have limits on input tokens, and large images contribute significantly.
@@ -582,25 +588,7 @@ class LLMClient:
base_64_image = base64.b64encode(image_file.read()).decode('utf-8') base_64_image = base64.b64encode(image_file.read()).decode('utf-8')
log_text = "\n".join(logs) log_text = "\n".join(logs)
prompt = f""" prompt = render_prompt(prompt_template, {"logs": log_text})
Analyze the attached dashboard screenshot and the following execution logs for health and visual issues.
Logs:
{log_text}
Provide the analysis in JSON format with the following structure:
{{
"status": "PASS" | "WARN" | "FAIL",
"summary": "Short summary of findings",
"issues": [
{{
"severity": "WARN" | "FAIL",
"message": "Description of the issue",
"location": "Optional location info (e.g. chart name)"
}}
]
}}
"""
messages = [ messages = [
{ {

View File

@@ -0,0 +1,62 @@
# [DEF:backend.src.services.__tests__.test_llm_prompt_templates:Module]
# @TIER: STANDARD
# @SEMANTICS: tests, llm, prompts, templates, settings
# @PURPOSE: Validate normalization and rendering behavior for configurable LLM prompt templates.
# @LAYER: Domain Tests
# @RELATION: DEPENDS_ON -> backend.src.services.llm_prompt_templates
# @INVARIANT: All required prompt keys remain available after normalization.
from src.services.llm_prompt_templates import (
DEFAULT_LLM_PROMPTS,
normalize_llm_settings,
render_prompt,
)
# [DEF:test_normalize_llm_settings_adds_default_prompts:Function]
# @TIER: STANDARD
# @PURPOSE: Ensure legacy/partial llm settings are expanded with all prompt defaults.
# @PRE: Input llm settings do not contain complete prompts object.
# @POST: Returned structure includes required prompt templates with fallback defaults.
def test_normalize_llm_settings_adds_default_prompts():
normalized = normalize_llm_settings({"default_provider": "x"})
assert "prompts" in normalized
assert normalized["default_provider"] == "x"
for key in DEFAULT_LLM_PROMPTS:
assert key in normalized["prompts"]
assert isinstance(normalized["prompts"][key], str)
# [/DEF:test_normalize_llm_settings_adds_default_prompts:Function]
# [DEF:test_normalize_llm_settings_keeps_custom_prompt_values:Function]
# @TIER: STANDARD
# @PURPOSE: Ensure user-customized prompt values are preserved during normalization.
# @PRE: Input llm settings contain custom prompt override.
# @POST: Custom prompt value remains unchanged in normalized output.
def test_normalize_llm_settings_keeps_custom_prompt_values():
custom = "Doc for {dataset_name} using {columns_json}"
normalized = normalize_llm_settings(
{"prompts": {"documentation_prompt": custom}}
)
assert normalized["prompts"]["documentation_prompt"] == custom
# [/DEF:test_normalize_llm_settings_keeps_custom_prompt_values:Function]
# [DEF:test_render_prompt_replaces_known_placeholders:Function]
# @TIER: STANDARD
# @PURPOSE: Ensure template placeholders are deterministically replaced.
# @PRE: Template contains placeholders matching provided variables.
# @POST: Rendered prompt string contains substituted values.
def test_render_prompt_replaces_known_placeholders():
rendered = render_prompt(
"Hello {name}, diff={diff}",
{"name": "bot", "diff": "A->B"},
)
assert rendered == "Hello bot, diff=A->B"
# [/DEF:test_render_prompt_replaces_known_placeholders:Function]
# [/DEF:backend.src.services.__tests__.test_llm_prompt_templates:Module]

View File

@@ -0,0 +1,94 @@
# [DEF:backend.src.services.llm_prompt_templates:Module]
# @TIER: STANDARD
# @SEMANTICS: llm, prompts, templates, settings
# @PURPOSE: Provide default LLM prompt templates and normalization helpers for runtime usage.
# @LAYER: Domain
# @RELATION: DEPENDS_ON -> backend.src.core.config_manager
# @INVARIANT: All required prompt template keys are always present after normalization.
from __future__ import annotations
from copy import deepcopy
from typing import Dict, Any
# [DEF:DEFAULT_LLM_PROMPTS:Constant]
# @TIER: STANDARD
# @PURPOSE: Default prompt templates used by documentation, dashboard validation, and git commit generation.
DEFAULT_LLM_PROMPTS: Dict[str, str] = {
"dashboard_validation_prompt": (
"Analyze the attached dashboard screenshot and the following execution logs for health and visual issues.\n\n"
"Logs:\n"
"{logs}\n\n"
"Provide the analysis in JSON format with the following structure:\n"
"{\n"
' "status": "PASS" | "WARN" | "FAIL",\n'
' "summary": "Short summary of findings",\n'
' "issues": [\n'
" {\n"
' "severity": "WARN" | "FAIL",\n'
' "message": "Description of the issue",\n'
' "location": "Optional location info (e.g. chart name)"\n'
" }\n"
" ]\n"
"}"
),
"documentation_prompt": (
"Generate professional documentation for the following dataset and its columns.\n"
"Dataset: {dataset_name}\n"
"Columns: {columns_json}\n\n"
"Provide the documentation in JSON format:\n"
"{\n"
' "dataset_description": "General description of the dataset",\n'
' "column_descriptions": [\n'
" {\n"
' "name": "column_name",\n'
' "description": "Generated description"\n'
" }\n"
" ]\n"
"}"
),
"git_commit_prompt": (
"Generate a concise and professional git commit message based on the following diff and recent history.\n"
"Use Conventional Commits format (e.g., feat: ..., fix: ..., docs: ...).\n\n"
"Recent History:\n"
"{history}\n\n"
"Diff:\n"
"{diff}\n\n"
"Commit Message:"
),
}
# [/DEF:DEFAULT_LLM_PROMPTS:Constant]
# [DEF:normalize_llm_settings:Function]
# @TIER: STANDARD
# @PURPOSE: Ensure llm settings contain stable schema with prompts section and default templates.
# @PRE: llm_settings is dictionary-like value or None.
# @POST: Returned dict contains prompts with all required template keys.
def normalize_llm_settings(llm_settings: Any) -> Dict[str, Any]:
normalized: Dict[str, Any] = {"providers": [], "default_provider": "", "prompts": {}}
if isinstance(llm_settings, dict):
normalized.update({k: v for k, v in llm_settings.items() if k in ("providers", "default_provider", "prompts")})
prompts = normalized.get("prompts") if isinstance(normalized.get("prompts"), dict) else {}
merged_prompts = deepcopy(DEFAULT_LLM_PROMPTS)
merged_prompts.update({k: v for k, v in prompts.items() if isinstance(v, str) and v.strip()})
normalized["prompts"] = merged_prompts
return normalized
# [/DEF:normalize_llm_settings:Function]
# [DEF:render_prompt:Function]
# @TIER: STANDARD
# @PURPOSE: Render prompt template using deterministic placeholder replacement with graceful fallback.
# @PRE: template is a string and variables values are already stringifiable.
# @POST: Returns rendered prompt text with known placeholders substituted.
def render_prompt(template: str, variables: Dict[str, Any]) -> str:
rendered = template
for key, value in variables.items():
rendered = rendered.replace("{" + key + "}", str(value))
return rendered
# [/DEF:render_prompt:Function]
# [/DEF:backend.src.services.llm_prompt_templates:Module]

View File

@@ -26,18 +26,18 @@
// [/SECTION] // [/SECTION]
// [SECTION: STATE] // [SECTION: STATE]
let filterText = ""; let filterText = $state("");
let currentPage = 0; let currentPage = $state(0);
let pageSize = 20; let pageSize = $state(20);
let sortColumn: keyof DashboardMetadata = "title"; let sortColumn: keyof DashboardMetadata = $state("title");
let sortDirection: "asc" | "desc" = "asc"; let sortDirection: "asc" | "desc" = $state("asc");
// [/SECTION] // [/SECTION]
// [SECTION: UI STATE] // [SECTION: UI STATE]
let showGitManager = false; let showGitManager = $state(false);
let gitDashboardId: number | null = null; let gitDashboardId: number | null = $state(null);
let gitDashboardTitle = ""; let gitDashboardTitle = $state("");
let validatingIds: Set<number> = new Set(); let validatingIds: Set<number> = $state(new Set());
// [/SECTION] // [/SECTION]
// [DEF:handleValidate:Function] // [DEF:handleValidate:Function]
@@ -48,7 +48,7 @@
if (validatingIds.has(dashboard.id)) return; if (validatingIds.has(dashboard.id)) return;
validatingIds.add(dashboard.id); validatingIds.add(dashboard.id);
validatingIds = validatingIds; // Trigger reactivity validatingIds = new Set(validatingIds);
try { try {
// TODO: Get provider_id from settings or prompt user // TODO: Get provider_id from settings or prompt user
@@ -83,7 +83,7 @@
toast(e.message || "Validation failed to start", "error"); toast(e.message || "Validation failed to start", "error");
} finally { } finally {
validatingIds.delete(dashboard.id); validatingIds.delete(dashboard.id);
validatingIds = validatingIds; validatingIds = new Set(validatingIds);
} }
} }
// [/DEF:handleValidate:Function] // [/DEF:handleValidate:Function]
@@ -221,14 +221,14 @@
type="checkbox" type="checkbox"
checked={allSelected} checked={allSelected}
indeterminate={someSelected && !allSelected} indeterminate={someSelected && !allSelected}
on:change={(e) => onchange={(e) =>
handleSelectAll((e.target as HTMLInputElement).checked)} handleSelectAll((e.target as HTMLInputElement).checked)}
class="h-4 w-4 text-blue-600 border-gray-300 rounded focus:ring-blue-500" class="h-4 w-4 text-blue-600 border-gray-300 rounded focus:ring-blue-500"
/> />
</th> </th>
<th <th
class="px-6 py-3 text-left text-xs font-medium text-gray-500 uppercase tracking-wider cursor-pointer hover:text-gray-700 transition-colors" class="px-6 py-3 text-left text-xs font-medium text-gray-500 uppercase tracking-wider cursor-pointer hover:text-gray-700 transition-colors"
on:click={() => handleSort("title")} onclick={() => handleSort("title")}
> >
{$t.dashboard.title} {$t.dashboard.title}
{sortColumn === "title" {sortColumn === "title"
@@ -239,7 +239,7 @@
</th> </th>
<th <th
class="px-6 py-3 text-left text-xs font-medium text-gray-500 uppercase tracking-wider cursor-pointer hover:text-gray-700 transition-colors" class="px-6 py-3 text-left text-xs font-medium text-gray-500 uppercase tracking-wider cursor-pointer hover:text-gray-700 transition-colors"
on:click={() => handleSort("last_modified")} onclick={() => handleSort("last_modified")}
> >
{$t.dashboard.last_modified} {$t.dashboard.last_modified}
{sortColumn === "last_modified" {sortColumn === "last_modified"
@@ -250,7 +250,7 @@
</th> </th>
<th <th
class="px-6 py-3 text-left text-xs font-medium text-gray-500 uppercase tracking-wider cursor-pointer hover:text-gray-700 transition-colors" class="px-6 py-3 text-left text-xs font-medium text-gray-500 uppercase tracking-wider cursor-pointer hover:text-gray-700 transition-colors"
on:click={() => handleSort("status")} onclick={() => handleSort("status")}
> >
{$t.dashboard.status} {$t.dashboard.status}
{sortColumn === "status" {sortColumn === "status"
@@ -276,7 +276,7 @@
<input <input
type="checkbox" type="checkbox"
checked={selectedIds.includes(dashboard.id)} checked={selectedIds.includes(dashboard.id)}
on:change={(e) => onchange={(e) =>
handleSelectionChange( handleSelectionChange(
dashboard.id, dashboard.id,
(e.target as HTMLInputElement).checked, (e.target as HTMLInputElement).checked,

View File

@@ -23,7 +23,7 @@
// [/SECTION] // [/SECTION]
let selectedTargetUuid = ""; let selectedTargetUuid = $state("");
const dispatch = createEventDispatcher(); const dispatch = createEventDispatcher();
// [DEF:resolve:Function] // [DEF:resolve:Function]
@@ -94,7 +94,7 @@
<div class="mt-5 sm:mt-6 sm:grid sm:grid-cols-2 sm:gap-3 sm:grid-flow-row-dense"> <div class="mt-5 sm:mt-6 sm:grid sm:grid-cols-2 sm:gap-3 sm:grid-flow-row-dense">
<button <button
type="button" type="button"
on:click={resolve} onclick={resolve}
disabled={!selectedTargetUuid} disabled={!selectedTargetUuid}
class="w-full inline-flex justify-center rounded-md border border-transparent shadow-sm px-4 py-2 bg-indigo-600 text-base font-medium text-white hover:bg-indigo-700 focus:outline-none focus:ring-2 focus:ring-offset-2 focus:ring-indigo-500 sm:col-start-2 sm:text-sm disabled:bg-gray-400" class="w-full inline-flex justify-center rounded-md border border-transparent shadow-sm px-4 py-2 bg-indigo-600 text-base font-medium text-white hover:bg-indigo-700 focus:outline-none focus:ring-2 focus:ring-offset-2 focus:ring-indigo-500 sm:col-start-2 sm:text-sm disabled:bg-gray-400"
> >
@@ -102,7 +102,7 @@
</button> </button>
<button <button
type="button" type="button"
on:click={cancel} onclick={cancel}
class="mt-3 w-full inline-flex justify-center rounded-md border border-gray-300 shadow-sm px-4 py-2 bg-white text-base font-medium text-gray-700 hover:bg-gray-50 focus:outline-none focus:ring-2 focus:ring-offset-2 focus:ring-indigo-500 sm:mt-0 sm:col-start-1 sm:text-sm" class="mt-3 w-full inline-flex justify-center rounded-md border border-gray-300 shadow-sm px-4 py-2 bg-white text-base font-medium text-gray-700 hover:bg-gray-50 focus:outline-none focus:ring-2 focus:ring-offset-2 focus:ring-indigo-500 sm:mt-0 sm:col-start-1 sm:text-sm"
> >
Cancel Migration Cancel Migration

View File

@@ -27,10 +27,10 @@
// [/SECTION] // [/SECTION]
// [SECTION: STATE] // [SECTION: STATE]
let branches = []; let branches = $state([]);
let loading = false; let loading = $state(false);
let showCreate = false; let showCreate = $state(false);
let newBranchName = ''; let newBranchName = $state('');
// [/SECTION] // [/SECTION]
const dispatch = createEventDispatcher(); const dispatch = createEventDispatcher();
@@ -129,7 +129,7 @@
<div class="flex-grow"> <div class="flex-grow">
<Select <Select
bind:value={currentBranch} bind:value={currentBranch}
on:change={handleSelect} onchange={handleSelect}
disabled={loading} disabled={loading}
options={branches.map(b => ({ value: b.name, label: b.name }))} options={branches.map(b => ({ value: b.name, label: b.name }))}
/> />
@@ -138,7 +138,7 @@
<Button <Button
variant="ghost" variant="ghost"
size="sm" size="sm"
on:click={() => showCreate = !showCreate} onclick={() => showCreate = !showCreate}
disabled={loading} disabled={loading}
class="text-blue-600" class="text-blue-600"
> >
@@ -158,7 +158,7 @@
<Button <Button
variant="primary" variant="primary"
size="sm" size="sm"
on:click={handleCreate} onclick={handleCreate}
disabled={loading || !newBranchName} disabled={loading || !newBranchName}
isLoading={loading} isLoading={loading}
class="bg-green-600 hover:bg-green-700" class="bg-green-600 hover:bg-green-700"
@@ -168,7 +168,7 @@
<Button <Button
variant="ghost" variant="ghost"
size="sm" size="sm"
on:click={() => showCreate = false} onclick={() => showCreate = false}
disabled={loading} disabled={loading}
> >
{$t.common.cancel} {$t.common.cancel}

View File

@@ -23,8 +23,8 @@
// [/SECTION] // [/SECTION]
// [SECTION: STATE] // [SECTION: STATE]
let history = []; let history = $state([]);
let loading = false; let loading = $state(false);
// [/SECTION] // [/SECTION]
// [DEF:onMount:Function] // [DEF:onMount:Function]
@@ -66,7 +66,7 @@
<h3 class="text-sm font-semibold text-gray-400 uppercase tracking-wider"> <h3 class="text-sm font-semibold text-gray-400 uppercase tracking-wider">
{$t.git.history} {$t.git.history}
</h3> </h3>
<Button variant="ghost" size="sm" on:click={loadHistory} class="text-blue-600"> <Button variant="ghost" size="sm" onclick={loadHistory} class="text-blue-600">
{$t.git.refresh} {$t.git.refresh}
</Button> </Button>
</div> </div>

View File

@@ -24,12 +24,12 @@
// [/SECTION] // [/SECTION]
// [SECTION: STATE] // [SECTION: STATE]
let message = ""; let message = $state("");
let committing = false; let committing = $state(false);
let status = null; let status = $state(null);
let diff = ""; let diff = $state("");
let loading = false; let loading = $state(false);
let generatingMessage = false; let generatingMessage = $state(false);
// [/SECTION] // [/SECTION]
const dispatch = createEventDispatcher(); const dispatch = createEventDispatcher();
@@ -153,7 +153,7 @@
>Commit Message</label >Commit Message</label
> >
<button <button
on:click={handleGenerateMessage} onclick={handleGenerateMessage}
disabled={generatingMessage || loading} disabled={generatingMessage || loading}
class="text-xs text-blue-600 hover:text-blue-800 disabled:opacity-50 flex items-center" class="text-xs text-blue-600 hover:text-blue-800 disabled:opacity-50 flex items-center"
> >
@@ -243,13 +243,13 @@
<div class="flex justify-end space-x-3 mt-6 pt-4 border-t"> <div class="flex justify-end space-x-3 mt-6 pt-4 border-t">
<button <button
on:click={() => (show = false)} onclick={() => (show = false)}
class="px-4 py-2 text-gray-600 hover:bg-gray-100 rounded" class="px-4 py-2 text-gray-600 hover:bg-gray-100 rounded"
> >
Cancel Cancel
</button> </button>
<button <button
on:click={handleCommit} onclick={handleCommit}
disabled={committing || disabled={committing ||
!message || !message ||
loading || loading ||

View File

@@ -26,7 +26,7 @@
// [SECTION: STATE] // [SECTION: STATE]
const dispatch = createEventDispatcher(); const dispatch = createEventDispatcher();
/** @type {Object.<string, 'mine' | 'theirs' | 'manual'>} */ /** @type {Object.<string, 'mine' | 'theirs' | 'manual'>} */
let resolutions = {}; let resolutions = $state({});
// [/SECTION] // [/SECTION]
// [DEF:resolve:Function] // [DEF:resolve:Function]
@@ -126,7 +126,7 @@
] === 'mine' ] === 'mine'
? 'bg-blue-600 text-white' ? 'bg-blue-600 text-white'
: 'bg-gray-50 hover:bg-blue-50 text-blue-600'}" : 'bg-gray-50 hover:bg-blue-50 text-blue-600'}"
on:click={() => onclick={() =>
resolve(conflict.file_path, "mine")} resolve(conflict.file_path, "mine")}
> >
Keep Mine Keep Mine
@@ -148,7 +148,7 @@
] === 'theirs' ] === 'theirs'
? 'bg-green-600 text-white' ? 'bg-green-600 text-white'
: 'bg-gray-50 hover:bg-green-50 text-green-600'}" : 'bg-gray-50 hover:bg-green-50 text-green-600'}"
on:click={() => onclick={() =>
resolve(conflict.file_path, "theirs")} resolve(conflict.file_path, "theirs")}
> >
Keep Theirs Keep Theirs
@@ -161,13 +161,13 @@
<div class="flex justify-end space-x-3 pt-4 border-t"> <div class="flex justify-end space-x-3 pt-4 border-t">
<button <button
on:click={() => (show = false)} onclick={() => (show = false)}
class="px-4 py-2 text-gray-600 hover:bg-gray-100 rounded transition-colors" class="px-4 py-2 text-gray-600 hover:bg-gray-100 rounded transition-colors"
> >
Cancel Cancel
</button> </button>
<button <button
on:click={handleSave} onclick={handleSave}
class="px-4 py-2 bg-blue-600 text-white rounded hover:bg-blue-700 transition-colors shadow-sm" class="px-4 py-2 bg-blue-600 text-white rounded hover:bg-blue-700 transition-colors shadow-sm"
> >
Resolve & Continue Resolve & Continue

View File

@@ -22,10 +22,10 @@
// [/SECTION] // [/SECTION]
// [SECTION: STATE] // [SECTION: STATE]
let environments = []; let environments = $state([]);
let selectedEnv = ""; let selectedEnv = $state("");
let loading = false; let loading = $state(false);
let deploying = false; let deploying = $state(false);
// [/SECTION] // [/SECTION]
const dispatch = createEventDispatcher(); const dispatch = createEventDispatcher();
@@ -108,7 +108,7 @@
</p> </p>
<div class="flex justify-end"> <div class="flex justify-end">
<button <button
on:click={() => (show = false)} onclick={() => (show = false)}
class="px-4 py-2 bg-gray-200 text-gray-800 rounded hover:bg-gray-300" class="px-4 py-2 bg-gray-200 text-gray-800 rounded hover:bg-gray-300"
> >
Close Close
@@ -133,13 +133,13 @@
<div class="flex justify-end space-x-3"> <div class="flex justify-end space-x-3">
<button <button
on:click={() => (show = false)} onclick={() => (show = false)}
class="px-4 py-2 text-gray-600 hover:bg-gray-100 rounded" class="px-4 py-2 text-gray-600 hover:bg-gray-100 rounded"
> >
Cancel Cancel
</button> </button>
<button <button
on:click={handleDeploy} onclick={handleDeploy}
disabled={deploying || !selectedEnv} disabled={deploying || !selectedEnv}
class="px-4 py-2 bg-green-600 text-white rounded hover:bg-green-700 disabled:opacity-50 flex items-center" class="px-4 py-2 bg-green-600 text-white rounded hover:bg-green-700 disabled:opacity-50 flex items-center"
> >

View File

@@ -35,20 +35,20 @@
// [/SECTION] // [/SECTION]
// [SECTION: STATE] // [SECTION: STATE]
let currentBranch = 'main'; let currentBranch = $state('main');
let showCommitModal = false; let showCommitModal = $state(false);
let showDeployModal = false; let showDeployModal = $state(false);
let showHistory = true; let showHistory = true;
let showConflicts = false; let showConflicts = $state(false);
let conflicts = []; let conflicts = [];
let loading = false; let loading = $state(false);
let initialized = false; let initialized = $state(false);
let checkingStatus = true; let checkingStatus = $state(true);
// Initialization form state // Initialization form state
let configs = []; let configs = $state([]);
let selectedConfigId = ""; let selectedConfigId = $state("");
let remoteUrl = ""; let remoteUrl = $state("");
// [/SECTION] // [/SECTION]
// [DEF:checkStatus:Function] // [DEF:checkStatus:Function]
@@ -167,7 +167,7 @@
<PageHeader title="{$t.git.management}: {dashboardTitle}"> <PageHeader title="{$t.git.management}: {dashboardTitle}">
<div slot="subtitle" class="text-sm text-gray-500">ID: {dashboardId}</div> <div slot="subtitle" class="text-sm text-gray-500">ID: {dashboardId}</div>
<div slot="actions"> <div slot="actions">
<button on:click={() => show = false} class="text-gray-400 hover:text-gray-600 transition-colors"> <button onclick={() => show = false} class="text-gray-400 hover:text-gray-600 transition-colors">
<svg xmlns="http://www.w3.org/2000/svg" class="h-6 w-6" fill="none" viewBox="0 0 24 24" stroke="currentColor"> <svg xmlns="http://www.w3.org/2000/svg" class="h-6 w-6" fill="none" viewBox="0 0 24 24" stroke="currentColor">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M6 18L18 6M6 6l12 12" /> <path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M6 18L18 6M6 6l12 12" />
</svg> </svg>
@@ -203,7 +203,7 @@
/> />
<Button <Button
on:click={handleInit} onclick={handleInit}
disabled={loading || configs.length === 0} disabled={loading || configs.length === 0}
isLoading={loading} isLoading={loading}
class="w-full" class="w-full"
@@ -226,14 +226,14 @@
<h3 class="text-sm font-semibold text-gray-400 uppercase tracking-wider mb-3">{$t.git.actions}</h3> <h3 class="text-sm font-semibold text-gray-400 uppercase tracking-wider mb-3">{$t.git.actions}</h3>
<Button <Button
variant="secondary" variant="secondary"
on:click={handleSync} onclick={handleSync}
disabled={loading} disabled={loading}
class="w-full" class="w-full"
> >
{$t.git.sync} {$t.git.sync}
</Button> </Button>
<Button <Button
on:click={() => showCommitModal = true} onclick={() => showCommitModal = true}
disabled={loading} disabled={loading}
class="w-full" class="w-full"
> >
@@ -242,7 +242,7 @@
<div class="grid grid-cols-2 gap-3"> <div class="grid grid-cols-2 gap-3">
<Button <Button
variant="ghost" variant="ghost"
on:click={handlePull} onclick={handlePull}
disabled={loading} disabled={loading}
class="border border-gray-200" class="border border-gray-200"
> >
@@ -250,7 +250,7 @@
</Button> </Button>
<Button <Button
variant="ghost" variant="ghost"
on:click={handlePush} onclick={handlePush}
disabled={loading} disabled={loading}
class="border border-gray-200" class="border border-gray-200"
> >
@@ -263,7 +263,7 @@
<h3 class="text-sm font-semibold text-gray-400 uppercase tracking-wider mb-3">{$t.git.deployment}</h3> <h3 class="text-sm font-semibold text-gray-400 uppercase tracking-wider mb-3">{$t.git.deployment}</h3>
<Button <Button
variant="primary" variant="primary"
on:click={() => showDeployModal = true} onclick={() => showDeployModal = true}
disabled={loading} disabled={loading}
class="w-full bg-green-600 hover:bg-green-700 focus-visible:ring-green-500" class="w-full bg-green-600 hover:bg-green-700 focus-visible:ring-green-500"
> >

View File

@@ -11,13 +11,17 @@
/** @type {Object} */ /** @type {Object} */
let { let {
documentation = null,
content = "", content = "",
type = 'markdown', type = 'markdown',
format = 'text', format = 'text',
onSave = async () => {},
onCancel = () => {},
} = $props(); } = $props();
let previewDoc = $derived(documentation || content);
let isSaving = false; let isSaving = $state(false);
async function handleSave() { async function handleSave() {
isSaving = true; isSaving = true;
@@ -31,14 +35,14 @@
} }
</script> </script>
{#if documentation} {#if previewDoc}
<div class="fixed inset-0 bg-black bg-opacity-50 flex items-center justify-center z-50"> <div class="fixed inset-0 bg-black bg-opacity-50 flex items-center justify-center z-50">
<div class="bg-white p-6 rounded-lg shadow-xl w-full max-w-2xl max-h-[90vh] flex flex-col"> <div class="bg-white p-6 rounded-lg shadow-xl w-full max-w-2xl max-h-[90vh] flex flex-col">
<h3 class="text-lg font-semibold mb-4">{$t.llm.doc_preview_title}</h3> <h3 class="text-lg font-semibold mb-4">{$t.llm.doc_preview_title}</h3>
<div class="flex-1 overflow-y-auto mb-6 prose prose-sm max-w-none border rounded p-4 bg-gray-50"> <div class="flex-1 overflow-y-auto mb-6 prose prose-sm max-w-none border rounded p-4 bg-gray-50">
<h4 class="text-md font-bold text-gray-800 mb-2">{$t.llm.dataset_desc}</h4> <h4 class="text-md font-bold text-gray-800 mb-2">{$t.llm.dataset_desc}</h4>
<p class="text-gray-700 mb-4 whitespace-pre-wrap">{documentation.description || 'No description generated.'}</p> <p class="text-gray-700 mb-4 whitespace-pre-wrap">{previewDoc.description || 'No description generated.'}</p>
<h4 class="text-md font-bold text-gray-800 mb-2">{$t.llm.column_doc}</h4> <h4 class="text-md font-bold text-gray-800 mb-2">{$t.llm.column_doc}</h4>
<table class="min-w-full divide-y divide-gray-200"> <table class="min-w-full divide-y divide-gray-200">
@@ -49,7 +53,7 @@
</tr> </tr>
</thead> </thead>
<tbody class="divide-y divide-gray-200"> <tbody class="divide-y divide-gray-200">
{#each Object.entries(documentation.columns || {}) as [name, desc]} {#each Object.entries(previewDoc.columns || {}) as [name, desc]}
<tr> <tr>
<td class="px-3 py-2 text-sm font-mono text-gray-900">{name}</td> <td class="px-3 py-2 text-sm font-mono text-gray-900">{name}</td>
<td class="px-3 py-2 text-sm text-gray-700">{desc}</td> <td class="px-3 py-2 text-sm text-gray-700">{desc}</td>
@@ -62,14 +66,14 @@
<div class="flex justify-end gap-3"> <div class="flex justify-end gap-3">
<button <button
class="px-4 py-2 border rounded hover:bg-gray-50" class="px-4 py-2 border rounded hover:bg-gray-50"
on:click={onCancel} onclick={onCancel}
disabled={isSaving} disabled={isSaving}
> >
{$t.llm.cancel} {$t.llm.cancel}
</button> </button>
<button <button
class="px-4 py-2 bg-blue-600 text-white rounded hover:bg-blue-700 disabled:opacity-50" class="px-4 py-2 bg-blue-600 text-white rounded hover:bg-blue-700 disabled:opacity-50"
on:click={handleSave} onclick={handleSave}
disabled={isSaving} disabled={isSaving}
> >
{isSaving ? $t.llm.applying : $t.llm.apply_doc} {isSaving ? $t.llm.applying : $t.llm.apply_doc}

View File

@@ -7,12 +7,12 @@
--> -->
<script> <script>
import { onMount } from "svelte";
import { t } from "../../lib/i18n"; import { t } from "../../lib/i18n";
import { requestApi } from "../../lib/api"; import { requestApi } from "../../lib/api";
/** @type {Array} */ /** @type {Array} */
let { providers = [], onSave = () => {} } = $props(); export let providers = [];
export let onSave = () => {};
let editingProvider = null; let editingProvider = null;
let showForm = false; let showForm = false;
@@ -43,8 +43,18 @@
} }
function handleEdit(provider) { function handleEdit(provider) {
console.log("[ProviderConfig][Action] Editing provider", provider?.id);
editingProvider = provider; editingProvider = provider;
formData = { ...provider, api_key: "" }; // Don't populate key for security // Normalize provider fields to editable form shape.
formData = {
name: provider?.name ?? "",
provider_type: provider?.provider_type ?? "openai",
base_url: provider?.base_url ?? "https://api.openai.com/v1",
api_key: "",
default_model: provider?.default_model ?? "gpt-4o",
is_active: Boolean(provider?.is_active),
};
testStatus = { type: "", message: "" };
showForm = true; showForm = true;
} }
@@ -121,8 +131,9 @@
<div class="flex justify-between items-center mb-6"> <div class="flex justify-between items-center mb-6">
<h2 class="text-xl font-bold">{$t.llm.providers_title}</h2> <h2 class="text-xl font-bold">{$t.llm.providers_title}</h2>
<button <button
type="button"
class="bg-blue-600 text-white px-4 py-2 rounded hover:bg-blue-700 transition" class="bg-blue-600 text-white px-4 py-2 rounded hover:bg-blue-700 transition"
on:click={() => { on:click|preventDefault={() => {
resetForm(); resetForm();
showForm = true; showForm = true;
}} }}
@@ -134,6 +145,8 @@
{#if showForm} {#if showForm}
<div <div
class="fixed inset-0 bg-black bg-opacity-50 flex items-center justify-center z-50" class="fixed inset-0 bg-black bg-opacity-50 flex items-center justify-center z-50"
role="dialog"
aria-modal="true"
> >
<div class="bg-white p-6 rounded-lg shadow-xl w-full max-w-md"> <div class="bg-white p-6 rounded-lg shadow-xl w-full max-w-md">
<h3 class="text-lg font-semibold mb-4"> <h3 class="text-lg font-semibold mb-4">
@@ -241,23 +254,26 @@
<div class="mt-6 flex justify-between gap-2"> <div class="mt-6 flex justify-between gap-2">
<button <button
type="button"
class="px-4 py-2 border rounded hover:bg-gray-50 flex-1" class="px-4 py-2 border rounded hover:bg-gray-50 flex-1"
on:click={() => { on:click|preventDefault={() => {
showForm = false; showForm = false;
}} }}
> >
{$t.llm.cancel} {$t.llm.cancel}
</button> </button>
<button <button
type="button"
class="px-4 py-2 bg-gray-600 text-white rounded hover:bg-gray-700 flex-1" class="px-4 py-2 bg-gray-600 text-white rounded hover:bg-gray-700 flex-1"
disabled={isTesting} disabled={isTesting}
on:click={testConnection} on:click|preventDefault={testConnection}
> >
{isTesting ? $t.llm.testing : $t.llm.test} {isTesting ? $t.llm.testing : $t.llm.test}
</button> </button>
<button <button
type="button"
class="px-4 py-2 bg-blue-600 text-white rounded hover:bg-blue-700 flex-1" class="px-4 py-2 bg-blue-600 text-white rounded hover:bg-blue-700 flex-1"
on:click={handleSubmit} on:click|preventDefault={handleSubmit}
> >
{$t.llm.save} {$t.llm.save}
</button> </button>
@@ -286,14 +302,16 @@
</div> </div>
<div class="flex gap-2"> <div class="flex gap-2">
<button <button
type="button"
class="text-sm text-blue-600 hover:underline" class="text-sm text-blue-600 hover:underline"
on:click={() => handleEdit(provider)} on:click|preventDefault|stopPropagation={() => handleEdit(provider)}
> >
{$t.common.edit} {$t.common.edit}
</button> </button>
<button <button
type="button"
class={`text-sm ${provider.is_active ? "text-orange-600" : "text-green-600"} hover:underline`} class={`text-sm ${provider.is_active ? "text-orange-600" : "text-green-600"} hover:underline`}
on:click={() => toggleActive(provider)} on:click|preventDefault|stopPropagation={() => toggleActive(provider)}
> >
{provider.is_active ? "Deactivate" : "Activate"} {provider.is_active ? "Deactivate" : "Activate"}
</button> </button>

View File

@@ -0,0 +1,44 @@
// [DEF:frontend.src.components.llm.__tests__.provider_config_integration:Module]
// @TIER: STANDARD
// @SEMANTICS: llm, provider-config, integration-test, edit-flow
// @PURPOSE: Protect edit-button interaction contract in LLM provider settings UI.
// @LAYER: UI Tests
// @RELATION: VERIFIES -> frontend/src/components/llm/ProviderConfig.svelte
// @INVARIANT: Edit action keeps explicit click handler and opens normalized edit form.
import { describe, it, expect } from 'vitest';
import fs from 'node:fs';
import path from 'node:path';
const COMPONENT_PATH = path.resolve(
process.cwd(),
'src/components/llm/ProviderConfig.svelte',
);
// [DEF:provider_config_edit_contract_tests:Function]
// @TIER: STANDARD
// @PURPOSE: Validate edit button handler wiring and normalized edit form state mapping.
// @PRE: ProviderConfig component source exists in expected path.
// @POST: Contract checks ensure edit click cannot degrade into no-op flow.
describe('ProviderConfig edit interaction contract', () => {
it('keeps explicit edit click handler with guarded button semantics', () => {
const source = fs.readFileSync(COMPONENT_PATH, 'utf-8');
expect(source).toContain('type="button"');
expect(source).toContain(
"on:click|preventDefault|stopPropagation={() => handleEdit(provider)}",
);
});
it('normalizes provider payload into editable form shape', () => {
const source = fs.readFileSync(COMPONENT_PATH, 'utf-8');
expect(source).toContain('formData = {');
expect(source).toContain('name: provider?.name ?? ""');
expect(source).toContain('provider_type: provider?.provider_type ?? "openai"');
expect(source).toContain('default_model: provider?.default_model ?? "gpt-4o"');
expect(source).toContain('showForm = true;');
});
});
// [/DEF:provider_config_edit_contract_tests:Function]
// [/DEF:frontend.src.components.llm.__tests__.provider_config_integration:Module]

View File

@@ -31,8 +31,8 @@
path = '', path = '',
} = $props(); } = $props();
let isUploading = false; let isUploading = $state(false);
let dragOver = false; let dragOver = $state(false);
async function handleUpload() { async function handleUpload() {
const file = fileInput.files[0]; const file = fileInput.files[0];
@@ -94,9 +94,9 @@
<div <div
class="mt-1 flex justify-center px-6 pt-5 pb-6 border-2 border-dashed rounded-md transition-colors class="mt-1 flex justify-center px-6 pt-5 pb-6 border-2 border-dashed rounded-md transition-colors
{dragOver ? 'border-indigo-500 bg-indigo-50' : 'border-gray-300'}" {dragOver ? 'border-indigo-500 bg-indigo-50' : 'border-gray-300'}"
on:dragover|preventDefault={() => dragOver = true} ondragover={(event) => { event.preventDefault(); dragOver = true; }}
on:dragleave|preventDefault={() => dragOver = false} ondragleave={(event) => { event.preventDefault(); dragOver = false; }}
on:drop|preventDefault={handleDrop} ondrop={(event) => { event.preventDefault(); handleDrop(event); }}
> >
<div class="space-y-1 text-center"> <div class="space-y-1 text-center">
<svg class="mx-auto h-12 w-12 text-gray-400" stroke="currentColor" fill="none" viewBox="0 0 48 48" aria-hidden="true"> <svg class="mx-auto h-12 w-12 text-gray-400" stroke="currentColor" fill="none" viewBox="0 0 48 48" aria-hidden="true">
@@ -111,7 +111,7 @@
type="file" type="file"
class="sr-only" class="sr-only"
bind:this={fileInput} bind:this={fileInput}
on:change={handleUpload} onchange={handleUpload}
disabled={isUploading} disabled={isUploading}
> >
</label> </label>

View File

@@ -246,7 +246,7 @@
<aside class="fixed right-0 top-0 z-[71] h-full w-full max-w-md border-l border-slate-200 bg-white shadow-2xl"> <aside class="fixed right-0 top-0 z-[71] h-full w-full max-w-md border-l border-slate-200 bg-white shadow-2xl">
<div class="flex h-14 items-center justify-between border-b border-slate-200 px-4"> <div class="flex h-14 items-center justify-between border-b border-slate-200 px-4">
<div class="flex items-center gap-2 text-slate-800"> <div class="flex items-center gap-2 text-slate-800">
<Icon name="activity" size={18} /> <Icon name="clipboard" size={18} />
<h2 class="text-sm font-semibold">{$t.assistant?.title || 'AI Assistant'}</h2> <h2 class="text-sm font-semibold">{$t.assistant?.title || 'AI Assistant'}</h2>
</div> </div>
<button <button

View File

@@ -4,7 +4,7 @@
* @TIER: CRITICAL * @TIER: CRITICAL
* @PURPOSE: Global task drawer for monitoring background operations * @PURPOSE: Global task drawer for monitoring background operations
* @LAYER: UI * @LAYER: UI
* @RELATION: BINDS_TO -> taskDrawerStore, WebSocket * @RELATION: BINDS_TO -> taskDrawerStore, assistantChatStore, WebSocket
* @SEMANTICS: TaskLogViewer * @SEMANTICS: TaskLogViewer
* @INVARIANT: Drawer shows logs for active task or remains closed * @INVARIANT: Drawer shows logs for active task or remains closed
* *
@@ -20,6 +20,7 @@
import { onMount, onDestroy } from "svelte"; import { onMount, onDestroy } from "svelte";
import { taskDrawerStore, closeDrawer } from "$lib/stores/taskDrawer.js"; import { taskDrawerStore, closeDrawer } from "$lib/stores/taskDrawer.js";
import { assistantChatStore } from "$lib/stores/assistantChat.js";
import TaskLogViewer from "../../../components/TaskLogViewer.svelte"; import TaskLogViewer from "../../../components/TaskLogViewer.svelte";
import PasswordPrompt from "../../../components/PasswordPrompt.svelte"; import PasswordPrompt from "../../../components/PasswordPrompt.svelte";
import { t } from "$lib/i18n"; import { t } from "$lib/i18n";
@@ -33,12 +34,15 @@
let taskStatus = null; let taskStatus = null;
let recentTasks = []; let recentTasks = [];
let loadingTasks = false; let loadingTasks = false;
let isAssistantOpen = false;
// Subscribe to task drawer store // Subscribe to task drawer store
$: if ($taskDrawerStore) { $: if ($taskDrawerStore) {
isOpen = $taskDrawerStore.isOpen; isOpen = $taskDrawerStore.isOpen;
activeTaskId = $taskDrawerStore.activeTaskId; activeTaskId = $taskDrawerStore.activeTaskId;
} }
$: isAssistantOpen = Boolean($assistantChatStore?.isOpen);
$: assistantOffset = isAssistantOpen ? "min(100vw, 28rem)" : "0px";
// Derive short task ID for display // Derive short task ID for display
$: shortTaskId = activeTaskId $: shortTaskId = activeTaskId
@@ -191,7 +195,8 @@
<!-- Drawer Overlay --> <!-- Drawer Overlay -->
{#if isOpen} {#if isOpen}
<div <div
class="fixed inset-0 z-50 bg-black/35 backdrop-blur-sm" class="fixed inset-0 z-[69] bg-black/35 backdrop-blur-sm"
style={`right: ${assistantOffset};`}
on:click={handleOverlayClick} on:click={handleOverlayClick}
on:keydown={(e) => e.key === 'Escape' && handleClose()} on:keydown={(e) => e.key === 'Escape' && handleClose()}
role="button" role="button"
@@ -200,7 +205,8 @@
> >
<!-- Drawer Panel --> <!-- Drawer Panel -->
<div <div
class="fixed right-0 top-0 z-50 flex h-full w-full max-w-[560px] flex-col border-l border-slate-200 bg-white shadow-[-8px_0_30px_rgba(15,23,42,0.15)] transition-transform duration-300 ease-out" class="fixed top-0 z-[72] flex h-full w-full max-w-[560px] flex-col border-l border-slate-200 bg-white shadow-[-8px_0_30px_rgba(15,23,42,0.15)] transition-[right] duration-300 ease-out"
style={`right: ${assistantOffset};`}
role="dialog" role="dialog"
aria-modal="true" aria-modal="true"
aria-label="Task drawer" aria-label="Task drawer"

View File

@@ -143,7 +143,7 @@
aria-label={$t.assistant?.open || "Open assistant"} aria-label={$t.assistant?.open || "Open assistant"}
title={$t.assistant?.title || "AI Assistant"} title={$t.assistant?.title || "AI Assistant"}
> >
<Icon name="activity" size={22} /> <Icon name="clipboard" size={22} />
</button> </button>
<!-- Activity Indicator --> <!-- Activity Indicator -->

View File

@@ -93,6 +93,12 @@
"env_actions": "Actions", "env_actions": "Actions",
"connections_description": "Configure database connections for data mapping.", "connections_description": "Configure database connections for data mapping.",
"llm_description": "Configure LLM providers for dataset documentation.", "llm_description": "Configure LLM providers for dataset documentation.",
"llm_prompts_title": "LLM Prompt Templates",
"llm_prompts_description": "Edit reusable prompts used for documentation, dashboard validation, and git commit generation.",
"llm_prompt_documentation": "Documentation Prompt",
"llm_prompt_dashboard_validation": "Dashboard Validation Prompt",
"llm_prompt_git_commit": "Git Commit Prompt",
"save_llm_prompts": "Save LLM Prompts",
"logging": "Logging Configuration", "logging": "Logging Configuration",
"logging_description": "Configure logging and task log levels.", "logging_description": "Configure logging and task log levels.",
"storage_description": "Configure file storage paths and patterns.", "storage_description": "Configure file storage paths and patterns.",

View File

@@ -93,6 +93,12 @@
"env_actions": "Действия", "env_actions": "Действия",
"connections_description": "Настройка подключений к базам данных для маппинга.", "connections_description": "Настройка подключений к базам данных для маппинга.",
"llm_description": "Настройка LLM провайдеров для документирования датасетов.", "llm_description": "Настройка LLM провайдеров для документирования датасетов.",
"llm_prompts_title": "Шаблоны промптов LLM",
"llm_prompts_description": "Редактируйте промпты для документации, проверки дашбордов и генерации git-коммитов.",
"llm_prompt_documentation": "Промпт документации",
"llm_prompt_dashboard_validation": "Промпт проверки дашборда",
"llm_prompt_git_commit": "Промпт git-коммита",
"save_llm_prompts": "Сохранить промпты LLM",
"logging": "Настройка логирования", "logging": "Настройка логирования",
"logging_description": "Настройка уровней логирования задач.", "logging_description": "Настройка уровней логирования задач.",
"storage_description": "Настройка путей и шаблонов файлового хранилища.", "storage_description": "Настройка путей и шаблонов файлового хранилища.",

View File

@@ -10,15 +10,40 @@
<script> <script>
import { onMount } from 'svelte'; import { onMount } from 'svelte';
import ProviderConfig from '../../../../components/llm/ProviderConfig.svelte'; import ProviderConfig from '../../../../components/llm/ProviderConfig.svelte';
import { t } from '../../../../lib/i18n';
import { addToast } from '../../../../lib/toasts';
import { requestApi } from '../../../../lib/api'; import { requestApi } from '../../../../lib/api';
let providers = []; let providers = [];
let loading = true; let loading = true;
let savingPrompts = false;
let prompts = {
documentation_prompt: '',
dashboard_validation_prompt: '',
git_commit_prompt: '',
};
const DEFAULT_LLM_PROMPTS = {
dashboard_validation_prompt:
"Analyze the attached dashboard screenshot and the following execution logs for health and visual issues.\\n\\nLogs:\\n{logs}\\n\\nProvide the analysis in JSON format with the following structure:\\n{\\n \\\"status\\\": \\\"PASS\\\" | \\\"WARN\\\" | \\\"FAIL\\\",\\n \\\"summary\\\": \\\"Short summary of findings\\\",\\n \\\"issues\\\": [\\n {\\n \\\"severity\\\": \\\"WARN\\\" | \\\"FAIL\\\",\\n \\\"message\\\": \\\"Description of the issue\\\",\\n \\\"location\\\": \\\"Optional location info (e.g. chart name)\\\"\\n }\\n ]\\n}",
documentation_prompt:
"Generate professional documentation for the following dataset and its columns.\\nDataset: {dataset_name}\\nColumns: {columns_json}\\n\\nProvide the documentation in JSON format:\\n{\\n \\\"dataset_description\\\": \\\"General description of the dataset\\\",\\n \\\"column_descriptions\\\": [\\n {\\n \\\"name\\\": \\\"column_name\\\",\\n \\\"description\\\": \\\"Generated description\\\"\\n }\\n ]\\n}",
git_commit_prompt:
"Generate a concise and professional git commit message based on the following diff and recent history.\\nUse Conventional Commits format (e.g., feat: ..., fix: ..., docs: ...).\\n\\nRecent History:\\n{history}\\n\\nDiff:\\n{diff}\\n\\nCommit Message:",
};
async function fetchProviders() { async function fetchProviders() {
loading = true; loading = true;
try { try {
providers = await requestApi('/llm/providers'); const [providerList, consolidatedSettings] = await Promise.all([
requestApi('/llm/providers'),
requestApi('/settings/consolidated'),
]);
providers = providerList;
prompts = {
...DEFAULT_LLM_PROMPTS,
...(consolidatedSettings?.llm?.prompts || {}),
};
} catch (err) { } catch (err) {
console.error("Failed to fetch providers", err); console.error("Failed to fetch providers", err);
} finally { } finally {
@@ -26,6 +51,30 @@
} }
} }
async function savePrompts() {
savingPrompts = true;
try {
const current = await requestApi('/settings/consolidated');
const payload = {
...current,
llm: {
...(current?.llm || {}),
prompts: {
...DEFAULT_LLM_PROMPTS,
...prompts,
},
},
};
await requestApi('/settings/consolidated', 'PATCH', payload);
addToast($t.settings?.save_success || 'Settings saved', 'success');
} catch (err) {
console.error('[LLMSettingsPage][Coherence:Failed] Failed to save prompts', err);
addToast($t.settings?.save_failed || 'Failed to save settings', 'error');
} finally {
savingPrompts = false;
}
}
onMount(fetchProviders); onMount(fetchProviders);
</script> </script>
@@ -43,6 +92,64 @@
</div> </div>
{:else} {:else}
<ProviderConfig {providers} onSave={fetchProviders} /> <ProviderConfig {providers} onSave={fetchProviders} />
<div class="mt-6 rounded-lg border border-gray-200 bg-gray-50 p-4">
<h2 class="text-lg font-semibold text-gray-900">
{$t.settings?.llm_prompts_title || 'LLM Prompt Templates'}
</h2>
<p class="mt-1 text-sm text-gray-600">
{$t.settings?.llm_prompts_description ||
'Edit reusable prompts used for documentation, dashboard validation, and git commit generation.'}
</p>
<div class="mt-4 space-y-4">
<div>
<label for="admin-documentation-prompt" class="block text-sm font-medium text-gray-700">
{$t.settings?.llm_prompt_documentation || 'Documentation Prompt'}
</label>
<textarea
id="admin-documentation-prompt"
bind:value={prompts.documentation_prompt}
rows="8"
class="mt-1 block w-full rounded-md border border-gray-300 p-2 font-mono text-xs"
></textarea>
</div>
<div>
<label for="admin-dashboard-validation-prompt" class="block text-sm font-medium text-gray-700">
{$t.settings?.llm_prompt_dashboard_validation || 'Dashboard Validation Prompt'}
</label>
<textarea
id="admin-dashboard-validation-prompt"
bind:value={prompts.dashboard_validation_prompt}
rows="10"
class="mt-1 block w-full rounded-md border border-gray-300 p-2 font-mono text-xs"
></textarea>
</div>
<div>
<label for="admin-git-commit-prompt" class="block text-sm font-medium text-gray-700">
{$t.settings?.llm_prompt_git_commit || 'Git Commit Prompt'}
</label>
<textarea
id="admin-git-commit-prompt"
bind:value={prompts.git_commit_prompt}
rows="8"
class="mt-1 block w-full rounded-md border border-gray-300 p-2 font-mono text-xs"
></textarea>
</div>
</div>
<div class="mt-4 flex justify-end">
<button
class="rounded bg-blue-600 px-4 py-2 text-white hover:bg-blue-700 disabled:opacity-60"
disabled={savingPrompts}
on:click={savePrompts}
>
{savingPrompts ? '...' : ($t.settings?.save_llm_prompts || 'Save LLM Prompts')}
</button>
</div>
</div>
{/if} {/if}
</div> </div>

View File

@@ -20,6 +20,15 @@
import { addToast } from "$lib/toasts"; import { addToast } from "$lib/toasts";
import ProviderConfig from "../../components/llm/ProviderConfig.svelte"; import ProviderConfig from "../../components/llm/ProviderConfig.svelte";
const DEFAULT_LLM_PROMPTS = {
dashboard_validation_prompt:
"Analyze the attached dashboard screenshot and the following execution logs for health and visual issues.\\n\\nLogs:\\n{logs}\\n\\nProvide the analysis in JSON format with the following structure:\\n{\\n \\\"status\\\": \\\"PASS\\\" | \\\"WARN\\\" | \\\"FAIL\\\",\\n \\\"summary\\\": \\\"Short summary of findings\\\",\\n \\\"issues\\\": [\\n {\\n \\\"severity\\\": \\\"WARN\\\" | \\\"FAIL\\\",\\n \\\"message\\\": \\\"Description of the issue\\\",\\n \\\"location\\\": \\\"Optional location info (e.g. chart name)\\\"\\n }\\n ]\\n}",
documentation_prompt:
"Generate professional documentation for the following dataset and its columns.\\nDataset: {dataset_name}\\nColumns: {columns_json}\\n\\nProvide the documentation in JSON format:\\n{\\n \\\"dataset_description\\\": \\\"General description of the dataset\\\",\\n \\\"column_descriptions\\\": [\\n {\\n \\\"name\\\": \\\"column_name\\\",\\n \\\"description\\\": \\\"Generated description\\\"\\n }\\n ]\\n}",
git_commit_prompt:
"Generate a concise and professional git commit message based on the following diff and recent history.\\nUse Conventional Commits format (e.g., feat: ..., fix: ..., docs: ...).\\n\\nRecent History:\\n{history}\\n\\nDiff:\\n{diff}\\n\\nCommit Message:",
};
// State // State
let activeTab = "environments"; let activeTab = "environments";
let settings = null; let settings = null;
@@ -53,6 +62,7 @@
error = null; error = null;
try { try {
const response = await api.getConsolidatedSettings(); const response = await api.getConsolidatedSettings();
response.llm = normalizeLlmSettings(response.llm);
settings = response; settings = response;
} catch (err) { } catch (err) {
error = err.message || "Failed to load settings"; error = err.message || "Failed to load settings";
@@ -62,6 +72,20 @@
} }
} }
function normalizeLlmSettings(llm) {
const normalized = {
providers: [],
default_provider: "",
prompts: { ...DEFAULT_LLM_PROMPTS },
...(llm || {}),
};
normalized.prompts = {
...DEFAULT_LLM_PROMPTS,
...(llm?.prompts || {}),
};
return normalized;
}
// Handle tab change // Handle tab change
function handleTabChange(tab) { function handleTabChange(tab) {
activeTab = tab; activeTab = tab;
@@ -78,6 +102,7 @@
async function handleSave() { async function handleSave() {
console.log("[SettingsPage][Action] Saving settings"); console.log("[SettingsPage][Action] Saving settings");
try { try {
settings.llm = normalizeLlmSettings(settings.llm);
// In a real app we might want to only send the changed section, // In a real app we might want to only send the changed section,
// but updateConsolidatedSettings expects full object or we can use specific endpoints. // but updateConsolidatedSettings expects full object or we can use specific endpoints.
// For now we use the consolidated update. // For now we use the consolidated update.
@@ -644,6 +669,73 @@
providers={settings.llm_providers || []} providers={settings.llm_providers || []}
onSave={loadSettings} onSave={loadSettings}
/> />
<div class="mt-6 rounded-lg border border-gray-200 bg-gray-50 p-4">
<h3 class="text-base font-semibold text-gray-900">
{$t.settings?.llm_prompts_title || "LLM Prompt Templates"}
</h3>
<p class="mt-1 text-sm text-gray-600">
{$t.settings?.llm_prompts_description ||
"Edit reusable prompts used for documentation, dashboard validation, and git commit generation."}
</p>
<div class="mt-4 space-y-4">
<div>
<label
for="documentation-prompt"
class="block text-sm font-medium text-gray-700"
>
{$t.settings?.llm_prompt_documentation || "Documentation Prompt"}
</label>
<textarea
id="documentation-prompt"
bind:value={settings.llm.prompts.documentation_prompt}
rows="8"
class="mt-1 block w-full rounded-md border border-gray-300 p-2 font-mono text-xs"
></textarea>
</div>
<div>
<label
for="dashboard-validation-prompt"
class="block text-sm font-medium text-gray-700"
>
{$t.settings?.llm_prompt_dashboard_validation ||
"Dashboard Validation Prompt"}
</label>
<textarea
id="dashboard-validation-prompt"
bind:value={settings.llm.prompts.dashboard_validation_prompt}
rows="10"
class="mt-1 block w-full rounded-md border border-gray-300 p-2 font-mono text-xs"
></textarea>
</div>
<div>
<label
for="git-commit-prompt"
class="block text-sm font-medium text-gray-700"
>
{$t.settings?.llm_prompt_git_commit || "Git Commit Prompt"}
</label>
<textarea
id="git-commit-prompt"
bind:value={settings.llm.prompts.git_commit_prompt}
rows="8"
class="mt-1 block w-full rounded-md border border-gray-300 p-2 font-mono text-xs"
></textarea>
</div>
</div>
<div class="mt-4 flex justify-end">
<button
class="rounded bg-blue-600 px-4 py-2 text-white hover:bg-blue-700"
on:click={handleSave}
>
{$t.settings?.save_llm_prompts || "Save LLM Prompts"}
</button>
</div>
</div>
</div> </div>
{:else if activeTab === "storage"} {:else if activeTab === "storage"}
<!-- Storage Tab --> <!-- Storage Tab -->