2115 lines
89 KiB
Python
2115 lines
89 KiB
Python
# [DEF:backend.src.api.routes.assistant:Module]
|
||
# @TIER: STANDARD
|
||
# @SEMANTICS: api, assistant, chat, command, confirmation
|
||
# @PURPOSE: API routes for LLM assistant command parsing and safe execution orchestration.
|
||
# @LAYER: API
|
||
# @RELATION: DEPENDS_ON -> backend.src.core.task_manager
|
||
# @RELATION: DEPENDS_ON -> backend.src.models.assistant
|
||
# @INVARIANT: Risky operations are never executed without valid confirmation token.
|
||
|
||
from __future__ import annotations
|
||
|
||
import json
|
||
import re
|
||
import uuid
|
||
from datetime import datetime, timedelta
|
||
from typing import Any, Dict, List, Optional, Tuple
|
||
|
||
from fastapi import APIRouter, Depends, HTTPException, Query, status
|
||
from pydantic import BaseModel, Field
|
||
from sqlalchemy.orm import Session
|
||
from sqlalchemy import desc
|
||
|
||
from ...core.logger import belief_scope, logger
|
||
from ...core.task_manager import TaskManager
|
||
from ...dependencies import get_current_user, get_task_manager, get_config_manager, has_permission
|
||
from ...core.config_manager import ConfigManager
|
||
from ...core.database import get_db
|
||
from ...services.git_service import GitService
|
||
from ...services.llm_provider import LLMProviderService
|
||
from ...services.llm_prompt_templates import (
|
||
is_multimodal_model,
|
||
normalize_llm_settings,
|
||
resolve_bound_provider_id,
|
||
)
|
||
from ...core.superset_client import SupersetClient
|
||
from ...plugins.llm_analysis.service import LLMClient
|
||
from ...plugins.llm_analysis.models import LLMProviderType
|
||
from ...schemas.auth import User
|
||
from ...models.assistant import (
|
||
AssistantAuditRecord,
|
||
AssistantConfirmationRecord,
|
||
AssistantMessageRecord,
|
||
)
|
||
|
||
router = APIRouter(tags=["Assistant"])
|
||
git_service = GitService()
|
||
|
||
|
||
# [DEF:AssistantMessageRequest:Class]
|
||
# @TIER: TRIVIAL
|
||
# @PURPOSE: Input payload for assistant message endpoint.
|
||
# @PRE: message length is within accepted bounds.
|
||
# @POST: Request object provides message text and optional conversation binding.
|
||
class AssistantMessageRequest(BaseModel):
|
||
conversation_id: Optional[str] = None
|
||
message: str = Field(..., min_length=1, max_length=4000)
|
||
# [/DEF:AssistantMessageRequest:Class]
|
||
|
||
|
||
# [DEF:AssistantAction:Class]
|
||
# @TIER: TRIVIAL
|
||
# @PURPOSE: UI action descriptor returned with assistant responses.
|
||
# @PRE: type and label are provided by orchestration logic.
|
||
# @POST: Action can be rendered as button on frontend.
|
||
class AssistantAction(BaseModel):
|
||
type: str
|
||
label: str
|
||
target: Optional[str] = None
|
||
# [/DEF:AssistantAction:Class]
|
||
|
||
|
||
# [DEF:AssistantMessageResponse:Class]
|
||
# @TIER: STANDARD
|
||
# @PURPOSE: Output payload contract for assistant interaction endpoints.
|
||
# @PRE: Response includes deterministic state and text.
|
||
# @POST: Payload may include task_id/confirmation_id/actions for UI follow-up.
|
||
class AssistantMessageResponse(BaseModel):
|
||
conversation_id: str
|
||
response_id: str
|
||
state: str
|
||
text: str
|
||
intent: Optional[Dict[str, Any]] = None
|
||
confirmation_id: Optional[str] = None
|
||
task_id: Optional[str] = None
|
||
actions: List[AssistantAction] = Field(default_factory=list)
|
||
created_at: datetime
|
||
# [/DEF:AssistantMessageResponse:Class]
|
||
|
||
|
||
# [DEF:ConfirmationRecord:Class]
|
||
# @TIER: STANDARD
|
||
# @PURPOSE: In-memory confirmation token model for risky operation dispatch.
|
||
# @PRE: intent/dispatch/user_id are populated at confirmation request time.
|
||
# @POST: Record tracks lifecycle state and expiry timestamp.
|
||
class ConfirmationRecord(BaseModel):
|
||
id: str
|
||
user_id: str
|
||
conversation_id: str
|
||
intent: Dict[str, Any]
|
||
dispatch: Dict[str, Any]
|
||
expires_at: datetime
|
||
state: str = "pending"
|
||
created_at: datetime
|
||
# [/DEF:ConfirmationRecord:Class]
|
||
|
||
|
||
CONVERSATIONS: Dict[Tuple[str, str], List[Dict[str, Any]]] = {}
|
||
USER_ACTIVE_CONVERSATION: Dict[str, str] = {}
|
||
CONFIRMATIONS: Dict[str, ConfirmationRecord] = {}
|
||
ASSISTANT_AUDIT: Dict[str, List[Dict[str, Any]]] = {}
|
||
ASSISTANT_ARCHIVE_AFTER_DAYS = 14
|
||
ASSISTANT_MESSAGE_TTL_DAYS = 90
|
||
|
||
INTENT_PERMISSION_CHECKS: Dict[str, List[Tuple[str, str]]] = {
|
||
"get_task_status": [("tasks", "READ")],
|
||
"create_branch": [("plugin:git", "EXECUTE")],
|
||
"commit_changes": [("plugin:git", "EXECUTE")],
|
||
"deploy_dashboard": [("plugin:git", "EXECUTE")],
|
||
"execute_migration": [("plugin:migration", "EXECUTE"), ("plugin:superset-migration", "EXECUTE")],
|
||
"run_backup": [("plugin:superset-backup", "EXECUTE"), ("plugin:backup", "EXECUTE")],
|
||
"run_llm_validation": [("plugin:llm_dashboard_validation", "EXECUTE")],
|
||
"run_llm_documentation": [("plugin:llm_documentation", "EXECUTE")],
|
||
}
|
||
|
||
|
||
# [DEF:_append_history:Function]
|
||
# @PURPOSE: Append conversation message to in-memory history buffer.
|
||
# @PRE: user_id and conversation_id identify target conversation bucket.
|
||
# @POST: Message entry is appended to CONVERSATIONS key list.
|
||
def _append_history(
|
||
user_id: str,
|
||
conversation_id: str,
|
||
role: str,
|
||
text: str,
|
||
state: Optional[str] = None,
|
||
task_id: Optional[str] = None,
|
||
confirmation_id: Optional[str] = None,
|
||
):
|
||
key = (user_id, conversation_id)
|
||
if key not in CONVERSATIONS:
|
||
CONVERSATIONS[key] = []
|
||
CONVERSATIONS[key].append(
|
||
{
|
||
"message_id": str(uuid.uuid4()),
|
||
"conversation_id": conversation_id,
|
||
"role": role,
|
||
"text": text,
|
||
"state": state,
|
||
"task_id": task_id,
|
||
"confirmation_id": confirmation_id,
|
||
"created_at": datetime.utcnow(),
|
||
}
|
||
)
|
||
# [/DEF:_append_history:Function]
|
||
|
||
|
||
# [DEF:_persist_message:Function]
|
||
# @PURPOSE: Persist assistant/user message record to database.
|
||
# @PRE: db session is writable and message payload is serializable.
|
||
# @POST: Message row is committed or persistence failure is logged.
|
||
def _persist_message(
|
||
db: Session,
|
||
user_id: str,
|
||
conversation_id: str,
|
||
role: str,
|
||
text: str,
|
||
state: Optional[str] = None,
|
||
task_id: Optional[str] = None,
|
||
confirmation_id: Optional[str] = None,
|
||
metadata: Optional[Dict[str, Any]] = None,
|
||
):
|
||
try:
|
||
row = AssistantMessageRecord(
|
||
id=str(uuid.uuid4()),
|
||
user_id=user_id,
|
||
conversation_id=conversation_id,
|
||
role=role,
|
||
text=text,
|
||
state=state,
|
||
task_id=task_id,
|
||
confirmation_id=confirmation_id,
|
||
payload=metadata,
|
||
)
|
||
db.add(row)
|
||
db.commit()
|
||
except Exception as exc:
|
||
db.rollback()
|
||
logger.warning(f"[assistant.message][persist_failed] {exc}")
|
||
# [/DEF:_persist_message:Function]
|
||
|
||
|
||
# [DEF:_audit:Function]
|
||
# @PURPOSE: Append in-memory audit record for assistant decision trace.
|
||
# @PRE: payload describes decision/outcome fields.
|
||
# @POST: ASSISTANT_AUDIT list for user contains new timestamped entry.
|
||
def _audit(user_id: str, payload: Dict[str, Any]):
|
||
if user_id not in ASSISTANT_AUDIT:
|
||
ASSISTANT_AUDIT[user_id] = []
|
||
ASSISTANT_AUDIT[user_id].append({**payload, "created_at": datetime.utcnow().isoformat()})
|
||
logger.info(f"[assistant.audit] {payload}")
|
||
# [/DEF:_audit:Function]
|
||
|
||
|
||
# [DEF:_persist_audit:Function]
|
||
# @PURPOSE: Persist structured assistant audit payload in database.
|
||
# @PRE: db session is writable and payload is JSON-serializable.
|
||
# @POST: Audit row is committed or failure is logged with rollback.
|
||
def _persist_audit(db: Session, user_id: str, payload: Dict[str, Any], conversation_id: Optional[str]):
|
||
try:
|
||
row = AssistantAuditRecord(
|
||
id=str(uuid.uuid4()),
|
||
user_id=user_id,
|
||
conversation_id=conversation_id,
|
||
decision=payload.get("decision"),
|
||
task_id=payload.get("task_id"),
|
||
message=payload.get("message"),
|
||
payload=payload,
|
||
)
|
||
db.add(row)
|
||
db.commit()
|
||
except Exception as exc:
|
||
db.rollback()
|
||
logger.warning(f"[assistant.audit][persist_failed] {exc}")
|
||
# [/DEF:_persist_audit:Function]
|
||
|
||
|
||
# [DEF:_persist_confirmation:Function]
|
||
# @PURPOSE: Persist confirmation token record to database.
|
||
# @PRE: record contains id/user/intent/dispatch/expiry fields.
|
||
# @POST: Confirmation row exists in persistent storage.
|
||
def _persist_confirmation(db: Session, record: ConfirmationRecord):
|
||
try:
|
||
row = AssistantConfirmationRecord(
|
||
id=record.id,
|
||
user_id=record.user_id,
|
||
conversation_id=record.conversation_id,
|
||
state=record.state,
|
||
intent=record.intent,
|
||
dispatch=record.dispatch,
|
||
expires_at=record.expires_at,
|
||
created_at=record.created_at,
|
||
consumed_at=None,
|
||
)
|
||
db.merge(row)
|
||
db.commit()
|
||
except Exception as exc:
|
||
db.rollback()
|
||
logger.warning(f"[assistant.confirmation][persist_failed] {exc}")
|
||
# [/DEF:_persist_confirmation:Function]
|
||
|
||
|
||
# [DEF:_update_confirmation_state:Function]
|
||
# @PURPOSE: Update persistent confirmation token lifecycle state.
|
||
# @PRE: confirmation_id references existing row.
|
||
# @POST: State and consumed_at fields are updated when applicable.
|
||
def _update_confirmation_state(db: Session, confirmation_id: str, state: str):
|
||
try:
|
||
row = db.query(AssistantConfirmationRecord).filter(AssistantConfirmationRecord.id == confirmation_id).first()
|
||
if not row:
|
||
return
|
||
row.state = state
|
||
if state in {"consumed", "expired", "cancelled"}:
|
||
row.consumed_at = datetime.utcnow()
|
||
db.commit()
|
||
except Exception as exc:
|
||
db.rollback()
|
||
logger.warning(f"[assistant.confirmation][update_failed] {exc}")
|
||
# [/DEF:_update_confirmation_state:Function]
|
||
|
||
|
||
# [DEF:_load_confirmation_from_db:Function]
|
||
# @PURPOSE: Load confirmation token from database into in-memory model.
|
||
# @PRE: confirmation_id may or may not exist in storage.
|
||
# @POST: Returns ConfirmationRecord when found, otherwise None.
|
||
def _load_confirmation_from_db(db: Session, confirmation_id: str) -> Optional[ConfirmationRecord]:
|
||
row = (
|
||
db.query(AssistantConfirmationRecord)
|
||
.filter(AssistantConfirmationRecord.id == confirmation_id)
|
||
.first()
|
||
)
|
||
if not row:
|
||
return None
|
||
return ConfirmationRecord(
|
||
id=row.id,
|
||
user_id=row.user_id,
|
||
conversation_id=row.conversation_id,
|
||
intent=row.intent or {},
|
||
dispatch=row.dispatch or {},
|
||
expires_at=row.expires_at,
|
||
state=row.state,
|
||
created_at=row.created_at,
|
||
)
|
||
# [/DEF:_load_confirmation_from_db:Function]
|
||
|
||
|
||
# [DEF:_ensure_conversation:Function]
|
||
# @PURPOSE: Resolve active conversation id in memory or create a new one.
|
||
# @PRE: user_id identifies current actor.
|
||
# @POST: Returns stable conversation id and updates USER_ACTIVE_CONVERSATION.
|
||
def _ensure_conversation(user_id: str, conversation_id: Optional[str]) -> str:
|
||
if conversation_id:
|
||
USER_ACTIVE_CONVERSATION[user_id] = conversation_id
|
||
return conversation_id
|
||
|
||
active = USER_ACTIVE_CONVERSATION.get(user_id)
|
||
if active:
|
||
return active
|
||
|
||
new_id = str(uuid.uuid4())
|
||
USER_ACTIVE_CONVERSATION[user_id] = new_id
|
||
return new_id
|
||
# [/DEF:_ensure_conversation:Function]
|
||
|
||
|
||
# [DEF:_resolve_or_create_conversation:Function]
|
||
# @PURPOSE: Resolve active conversation using explicit id, memory cache, or persisted history.
|
||
# @PRE: user_id and db session are available.
|
||
# @POST: Returns conversation id and updates USER_ACTIVE_CONVERSATION cache.
|
||
def _resolve_or_create_conversation(user_id: str, conversation_id: Optional[str], db: Session) -> str:
|
||
if conversation_id:
|
||
USER_ACTIVE_CONVERSATION[user_id] = conversation_id
|
||
return conversation_id
|
||
|
||
active = USER_ACTIVE_CONVERSATION.get(user_id)
|
||
if active:
|
||
return active
|
||
|
||
last_message = (
|
||
db.query(AssistantMessageRecord)
|
||
.filter(AssistantMessageRecord.user_id == user_id)
|
||
.order_by(desc(AssistantMessageRecord.created_at))
|
||
.first()
|
||
)
|
||
if last_message:
|
||
USER_ACTIVE_CONVERSATION[user_id] = last_message.conversation_id
|
||
return last_message.conversation_id
|
||
|
||
new_id = str(uuid.uuid4())
|
||
USER_ACTIVE_CONVERSATION[user_id] = new_id
|
||
return new_id
|
||
# [/DEF:_resolve_or_create_conversation:Function]
|
||
|
||
|
||
# [DEF:_cleanup_history_ttl:Function]
|
||
# @PURPOSE: Enforce assistant message retention window by deleting expired rows and in-memory records.
|
||
# @PRE: db session is available and user_id references current actor scope.
|
||
# @POST: Messages older than ASSISTANT_MESSAGE_TTL_DAYS are removed from persistence and memory mirrors.
|
||
def _cleanup_history_ttl(db: Session, user_id: str):
|
||
cutoff = datetime.utcnow() - timedelta(days=ASSISTANT_MESSAGE_TTL_DAYS)
|
||
try:
|
||
query = db.query(AssistantMessageRecord).filter(
|
||
AssistantMessageRecord.user_id == user_id,
|
||
AssistantMessageRecord.created_at < cutoff,
|
||
)
|
||
if hasattr(query, "delete"):
|
||
query.delete(synchronize_session=False)
|
||
db.commit()
|
||
except Exception as exc:
|
||
db.rollback()
|
||
logger.warning(f"[assistant.history][ttl_cleanup_failed] user={user_id} error={exc}")
|
||
|
||
stale_keys: List[Tuple[str, str]] = []
|
||
for key, items in CONVERSATIONS.items():
|
||
if key[0] != user_id:
|
||
continue
|
||
kept = []
|
||
for item in items:
|
||
created_at = item.get("created_at")
|
||
if isinstance(created_at, datetime) and created_at < cutoff:
|
||
continue
|
||
kept.append(item)
|
||
if kept:
|
||
CONVERSATIONS[key] = kept
|
||
else:
|
||
stale_keys.append(key)
|
||
for key in stale_keys:
|
||
CONVERSATIONS.pop(key, None)
|
||
# [/DEF:_cleanup_history_ttl:Function]
|
||
|
||
|
||
# [DEF:_is_conversation_archived:Function]
|
||
# @PURPOSE: Determine archived state for a conversation based on last update timestamp.
|
||
# @PRE: updated_at can be null for empty conversations.
|
||
# @POST: Returns True when conversation inactivity exceeds archive threshold.
|
||
def _is_conversation_archived(updated_at: Optional[datetime]) -> bool:
|
||
if not updated_at:
|
||
return False
|
||
cutoff = datetime.utcnow() - timedelta(days=ASSISTANT_ARCHIVE_AFTER_DAYS)
|
||
return updated_at < cutoff
|
||
# [/DEF:_is_conversation_archived:Function]
|
||
|
||
|
||
# [DEF:_coerce_query_bool:Function]
|
||
# @PURPOSE: Normalize bool-like query values for compatibility in direct handler invocations/tests.
|
||
# @PRE: value may be bool, string, or FastAPI Query metadata object.
|
||
# @POST: Returns deterministic boolean flag.
|
||
def _coerce_query_bool(value: Any) -> bool:
|
||
if isinstance(value, bool):
|
||
return value
|
||
if isinstance(value, str):
|
||
return value.strip().lower() in {"1", "true", "yes", "on"}
|
||
return False
|
||
# [/DEF:_coerce_query_bool:Function]
|
||
|
||
|
||
# [DEF:_extract_id:Function]
|
||
# @PURPOSE: Extract first regex match group from text by ordered pattern list.
|
||
# @PRE: patterns contain at least one capture group.
|
||
# @POST: Returns first matched token or None.
|
||
def _extract_id(text: str, patterns: List[str]) -> Optional[str]:
|
||
for p in patterns:
|
||
m = re.search(p, text, flags=re.IGNORECASE)
|
||
if m:
|
||
return m.group(1)
|
||
return None
|
||
# [/DEF:_extract_id:Function]
|
||
|
||
|
||
# [DEF:_resolve_env_id:Function]
|
||
# @PURPOSE: Resolve environment identifier/name token to canonical environment id.
|
||
# @PRE: config_manager provides environment list.
|
||
# @POST: Returns matched environment id or None.
|
||
def _resolve_env_id(token: Optional[str], config_manager: ConfigManager) -> Optional[str]:
|
||
if not token:
|
||
return None
|
||
|
||
normalized = token.strip().lower()
|
||
envs = config_manager.get_environments()
|
||
for env in envs:
|
||
if env.id.lower() == normalized or env.name.lower() == normalized:
|
||
return env.id
|
||
return None
|
||
# [/DEF:_resolve_env_id:Function]
|
||
|
||
|
||
# [DEF:_is_production_env:Function]
|
||
# @PURPOSE: Determine whether environment token resolves to production-like target.
|
||
# @PRE: config_manager provides environments or token text is provided.
|
||
# @POST: Returns True for production/prod synonyms, else False.
|
||
def _is_production_env(token: Optional[str], config_manager: ConfigManager) -> bool:
|
||
env_id = _resolve_env_id(token, config_manager)
|
||
if not env_id:
|
||
return (token or "").strip().lower() in {"prod", "production", "прод"}
|
||
|
||
env = next((e for e in config_manager.get_environments() if e.id == env_id), None)
|
||
if not env:
|
||
return False
|
||
target = f"{env.id} {env.name}".lower()
|
||
return "prod" in target or "production" in target or "прод" in target
|
||
# [/DEF:_is_production_env:Function]
|
||
|
||
|
||
# [DEF:_resolve_provider_id:Function]
|
||
# @PURPOSE: Resolve provider token to provider id with active/default fallback.
|
||
# @PRE: db session can load provider list through LLMProviderService.
|
||
# @POST: Returns provider id or None when no providers configured.
|
||
def _resolve_provider_id(
|
||
provider_token: Optional[str],
|
||
db: Session,
|
||
config_manager: Optional[ConfigManager] = None,
|
||
task_key: Optional[str] = None,
|
||
) -> Optional[str]:
|
||
service = LLMProviderService(db)
|
||
providers = service.get_all_providers()
|
||
if not providers:
|
||
return None
|
||
|
||
if provider_token:
|
||
needle = provider_token.strip().lower()
|
||
for p in providers:
|
||
if p.id.lower() == needle or p.name.lower() == needle:
|
||
return p.id
|
||
|
||
if config_manager and task_key:
|
||
try:
|
||
llm_settings = config_manager.get_config().settings.llm
|
||
bound_provider_id = resolve_bound_provider_id(llm_settings, task_key)
|
||
if bound_provider_id and any(p.id == bound_provider_id for p in providers):
|
||
return bound_provider_id
|
||
except Exception:
|
||
pass
|
||
|
||
active = next((p for p in providers if p.is_active), None)
|
||
return active.id if active else providers[0].id
|
||
# [/DEF:_resolve_provider_id:Function]
|
||
|
||
|
||
# [DEF:_get_default_environment_id:Function]
|
||
# @PURPOSE: Resolve default environment id from settings or first configured environment.
|
||
# @PRE: config_manager returns environments list.
|
||
# @POST: Returns default environment id or None when environment list is empty.
|
||
def _get_default_environment_id(config_manager: ConfigManager) -> Optional[str]:
|
||
configured = config_manager.get_environments()
|
||
if not configured:
|
||
return None
|
||
preferred = None
|
||
if hasattr(config_manager, "get_config"):
|
||
try:
|
||
preferred = config_manager.get_config().settings.default_environment_id
|
||
except Exception:
|
||
preferred = None
|
||
if preferred and any(env.id == preferred for env in configured):
|
||
return preferred
|
||
explicit_default = next((env.id for env in configured if getattr(env, "is_default", False)), None)
|
||
return explicit_default or configured[0].id
|
||
# [/DEF:_get_default_environment_id:Function]
|
||
|
||
|
||
# [DEF:_resolve_dashboard_id_by_ref:Function]
|
||
# @PURPOSE: Resolve dashboard id by title or slug reference in selected environment.
|
||
# @PRE: dashboard_ref is a non-empty string-like token.
|
||
# @POST: Returns dashboard id when uniquely matched, otherwise None.
|
||
def _resolve_dashboard_id_by_ref(
|
||
dashboard_ref: Optional[str],
|
||
env_id: Optional[str],
|
||
config_manager: ConfigManager,
|
||
) -> Optional[int]:
|
||
if not dashboard_ref or not env_id:
|
||
return None
|
||
env = next((item for item in config_manager.get_environments() if item.id == env_id), None)
|
||
if not env:
|
||
return None
|
||
|
||
needle = dashboard_ref.strip().lower()
|
||
try:
|
||
client = SupersetClient(env)
|
||
_, dashboards = client.get_dashboards(query={"page_size": 200})
|
||
except Exception as exc:
|
||
logger.warning(f"[assistant.dashboard_resolve][failed] ref={dashboard_ref} env={env_id} error={exc}")
|
||
return None
|
||
|
||
exact = next(
|
||
(
|
||
d for d in dashboards
|
||
if str(d.get("slug", "")).lower() == needle
|
||
or str(d.get("dashboard_title", "")).lower() == needle
|
||
or str(d.get("title", "")).lower() == needle
|
||
),
|
||
None,
|
||
)
|
||
if exact:
|
||
return int(exact.get("id"))
|
||
|
||
partial = [d for d in dashboards if needle in str(d.get("dashboard_title", d.get("title", ""))).lower()]
|
||
if len(partial) == 1 and partial[0].get("id") is not None:
|
||
return int(partial[0]["id"])
|
||
return None
|
||
# [/DEF:_resolve_dashboard_id_by_ref:Function]
|
||
|
||
|
||
# [DEF:_resolve_dashboard_id_entity:Function]
|
||
# @PURPOSE: Resolve dashboard id from intent entities using numeric id or dashboard_ref fallback.
|
||
# @PRE: entities may contain dashboard_id as int/str and optional dashboard_ref.
|
||
# @POST: Returns resolved dashboard id or None when ambiguous/unresolvable.
|
||
def _resolve_dashboard_id_entity(
|
||
entities: Dict[str, Any],
|
||
config_manager: ConfigManager,
|
||
env_hint: Optional[str] = None,
|
||
) -> Optional[int]:
|
||
raw_dashboard_id = entities.get("dashboard_id")
|
||
dashboard_ref = entities.get("dashboard_ref")
|
||
|
||
if isinstance(raw_dashboard_id, int):
|
||
return raw_dashboard_id
|
||
|
||
if isinstance(raw_dashboard_id, str):
|
||
token = raw_dashboard_id.strip()
|
||
if token.isdigit():
|
||
return int(token)
|
||
if token and not dashboard_ref:
|
||
dashboard_ref = token
|
||
|
||
if not dashboard_ref:
|
||
return None
|
||
|
||
env_token = env_hint or entities.get("environment") or entities.get("source_env") or entities.get("target_env")
|
||
env_id = _resolve_env_id(env_token, config_manager) if env_token else _get_default_environment_id(config_manager)
|
||
return _resolve_dashboard_id_by_ref(str(dashboard_ref), env_id, config_manager)
|
||
# [/DEF:_resolve_dashboard_id_entity:Function]
|
||
|
||
|
||
# [DEF:_get_environment_name_by_id:Function]
|
||
# @PURPOSE: Resolve human-readable environment name by id.
|
||
# @PRE: environment id may be None.
|
||
# @POST: Returns matching environment name or fallback id.
|
||
def _get_environment_name_by_id(env_id: Optional[str], config_manager: ConfigManager) -> str:
|
||
if not env_id:
|
||
return "unknown"
|
||
env = next((item for item in config_manager.get_environments() if item.id == env_id), None)
|
||
return env.name if env else env_id
|
||
# [/DEF:_get_environment_name_by_id:Function]
|
||
|
||
|
||
# [DEF:_extract_result_deep_links:Function]
|
||
# @PURPOSE: Build deep-link actions to verify task result from assistant chat.
|
||
# @PRE: task object is available.
|
||
# @POST: Returns zero or more assistant actions for dashboard open/diff.
|
||
def _extract_result_deep_links(task: Any, config_manager: ConfigManager) -> List[AssistantAction]:
|
||
plugin_id = getattr(task, "plugin_id", None)
|
||
params = getattr(task, "params", {}) or {}
|
||
result = getattr(task, "result", {}) or {}
|
||
actions: List[AssistantAction] = []
|
||
dashboard_id: Optional[int] = None
|
||
env_id: Optional[str] = None
|
||
|
||
if plugin_id == "superset-migration":
|
||
migrated = result.get("migrated_dashboards") if isinstance(result, dict) else None
|
||
if isinstance(migrated, list) and migrated:
|
||
first = migrated[0]
|
||
if isinstance(first, dict) and first.get("id") is not None:
|
||
dashboard_id = int(first.get("id"))
|
||
if dashboard_id is None and isinstance(params.get("selected_ids"), list) and params["selected_ids"]:
|
||
dashboard_id = int(params["selected_ids"][0])
|
||
env_id = params.get("target_env_id")
|
||
elif plugin_id == "superset-backup":
|
||
dashboards = result.get("dashboards") if isinstance(result, dict) else None
|
||
if isinstance(dashboards, list) and dashboards:
|
||
first = dashboards[0]
|
||
if isinstance(first, dict) and first.get("id") is not None:
|
||
dashboard_id = int(first.get("id"))
|
||
if dashboard_id is None and isinstance(params.get("dashboard_ids"), list) and params["dashboard_ids"]:
|
||
dashboard_id = int(params["dashboard_ids"][0])
|
||
env_id = params.get("environment_id") or _resolve_env_id(result.get("environment"), config_manager)
|
||
elif plugin_id == "llm_dashboard_validation":
|
||
if params.get("dashboard_id") is not None:
|
||
dashboard_id = int(params["dashboard_id"])
|
||
env_id = params.get("environment_id")
|
||
|
||
if dashboard_id is not None and env_id:
|
||
env_name = _get_environment_name_by_id(env_id, config_manager)
|
||
actions.append(
|
||
AssistantAction(
|
||
type="open_route",
|
||
label=f"Открыть дашборд в {env_name}",
|
||
target=f"/dashboards/{dashboard_id}?env_id={env_id}",
|
||
)
|
||
)
|
||
if dashboard_id is not None:
|
||
actions.append(
|
||
AssistantAction(
|
||
type="open_diff",
|
||
label="Показать Diff",
|
||
target=str(dashboard_id),
|
||
)
|
||
)
|
||
return actions
|
||
# [/DEF:_extract_result_deep_links:Function]
|
||
|
||
|
||
# [DEF:_build_task_observability_summary:Function]
|
||
# @PURPOSE: Build compact textual summary for completed tasks to reduce "black box" effect.
|
||
# @PRE: task may contain plugin-specific result payload.
|
||
# @POST: Returns non-empty summary line for known task types or empty string fallback.
|
||
def _build_task_observability_summary(task: Any, config_manager: ConfigManager) -> str:
|
||
plugin_id = getattr(task, "plugin_id", None)
|
||
status = str(getattr(task, "status", "")).upper()
|
||
params = getattr(task, "params", {}) or {}
|
||
result = getattr(task, "result", {}) or {}
|
||
|
||
if plugin_id == "superset-migration" and isinstance(result, dict):
|
||
migrated = len(result.get("migrated_dashboards") or [])
|
||
failed_rows = result.get("failed_dashboards") or []
|
||
failed = len(failed_rows)
|
||
selected = result.get("selected_dashboards", migrated + failed)
|
||
mappings = result.get("mapping_count", 0)
|
||
target_env_id = params.get("target_env_id")
|
||
target_env_name = _get_environment_name_by_id(target_env_id, config_manager)
|
||
warning = ""
|
||
if failed_rows:
|
||
first = failed_rows[0]
|
||
warning = (
|
||
f" Внимание: {first.get('title') or first.get('id')}: "
|
||
f"{first.get('error') or 'ошибка'}."
|
||
)
|
||
return (
|
||
f"Сводка миграции: выбрано {selected}, перенесено {migrated}, "
|
||
f"с ошибками {failed}, маппингов {mappings}, целевая среда {target_env_name}."
|
||
f"{warning}"
|
||
)
|
||
|
||
if plugin_id == "superset-backup" and isinstance(result, dict):
|
||
total = int(result.get("total_dashboards", 0) or 0)
|
||
ok = int(result.get("backed_up_dashboards", 0) or 0)
|
||
failed = int(result.get("failed_dashboards", 0) or 0)
|
||
env_id = params.get("environment_id") or _resolve_env_id(result.get("environment"), config_manager)
|
||
env_name = _get_environment_name_by_id(env_id, config_manager)
|
||
failures = result.get("failures") or []
|
||
warning = ""
|
||
if failures:
|
||
first = failures[0]
|
||
warning = (
|
||
f" Внимание: {first.get('title') or first.get('id')}: "
|
||
f"{first.get('error') or 'ошибка'}."
|
||
)
|
||
return (
|
||
f"Сводка бэкапа: среда {env_name}, всего {total}, успешно {ok}, "
|
||
f"с ошибками {failed}. {status}.{warning}"
|
||
)
|
||
|
||
if plugin_id == "llm_dashboard_validation" and isinstance(result, dict):
|
||
report_status = result.get("status") or status
|
||
report_summary = result.get("summary") or "Итог недоступен."
|
||
issues = result.get("issues") or []
|
||
return f"Сводка валидации: статус {report_status}, проблем {len(issues)}. {report_summary}"
|
||
|
||
# Fallback for unknown task payloads.
|
||
if status in {"SUCCESS", "FAILED"}:
|
||
return f"Задача завершена со статусом {status}."
|
||
return ""
|
||
# [/DEF:_build_task_observability_summary:Function]
|
||
|
||
|
||
# [DEF:_parse_command:Function]
|
||
# @PURPOSE: Deterministically parse RU/EN command text into intent payload.
|
||
# @PRE: message contains raw user text and config manager resolves environments.
|
||
# @POST: Returns intent dict with domain/operation/entities/confidence/risk fields.
|
||
def _parse_command(message: str, config_manager: ConfigManager) -> Dict[str, Any]:
|
||
text = message.strip()
|
||
lower = text.lower()
|
||
|
||
if any(
|
||
phrase in lower
|
||
for phrase in [
|
||
"что ты умеешь",
|
||
"что умеешь",
|
||
"что ты можешь",
|
||
"help",
|
||
"помощь",
|
||
"доступные команды",
|
||
"какие команды",
|
||
]
|
||
):
|
||
return {
|
||
"domain": "assistant",
|
||
"operation": "show_capabilities",
|
||
"entities": {},
|
||
"confidence": 0.98,
|
||
"risk_level": "safe",
|
||
"requires_confirmation": False,
|
||
}
|
||
|
||
dashboard_id = _extract_id(lower, [r"(?:дашборд\w*|dashboard)\s*(?:id\s*)?(\d+)"])
|
||
dashboard_ref = _extract_id(
|
||
lower,
|
||
[r"(?:дашборд\w*|dashboard)\s*(?:id\s*)?([a-zа-я0-9._-]+)"],
|
||
)
|
||
dataset_id = _extract_id(lower, [r"(?:датасет\w*|dataset)\s*(?:id\s*)?(\d+)"])
|
||
# Accept short and long task ids (e.g., task-1, task-abc123, UUIDs).
|
||
task_id = _extract_id(lower, [r"(task[-_a-z0-9]{1,}|[0-9a-f]{8}-[0-9a-f-]{27,})"])
|
||
|
||
# Status query
|
||
if any(k in lower for k in ["статус", "status", "state", "проверь задачу"]):
|
||
return {
|
||
"domain": "status",
|
||
"operation": "get_task_status",
|
||
"entities": {"task_id": task_id},
|
||
"confidence": 0.92 if task_id else 0.66,
|
||
"risk_level": "safe",
|
||
"requires_confirmation": False,
|
||
}
|
||
|
||
# Git branch create
|
||
if any(k in lower for k in ["ветк", "branch"]) and any(k in lower for k in ["созд", "сделай", "create"]):
|
||
branch = _extract_id(lower, [r"(?:ветк\w*|branch)\s+([a-z0-9._/-]+)"])
|
||
return {
|
||
"domain": "git",
|
||
"operation": "create_branch",
|
||
"entities": {
|
||
"dashboard_id": int(dashboard_id) if dashboard_id else None,
|
||
"branch_name": branch,
|
||
},
|
||
"confidence": 0.95 if branch and dashboard_id else 0.7,
|
||
"risk_level": "guarded",
|
||
"requires_confirmation": False,
|
||
}
|
||
|
||
# Git commit
|
||
if any(k in lower for k in ["коммит", "commit"]):
|
||
quoted = re.search(r'"([^"]{3,120})"', text)
|
||
message_text = quoted.group(1) if quoted else "assistant: update dashboard changes"
|
||
return {
|
||
"domain": "git",
|
||
"operation": "commit_changes",
|
||
"entities": {
|
||
"dashboard_id": int(dashboard_id) if dashboard_id else None,
|
||
"message": message_text,
|
||
},
|
||
"confidence": 0.9 if dashboard_id else 0.7,
|
||
"risk_level": "guarded",
|
||
"requires_confirmation": False,
|
||
}
|
||
|
||
# Git deploy
|
||
if any(k in lower for k in ["деплой", "deploy", "разверн"]):
|
||
env_match = _extract_id(lower, [r"(?:в|to)\s+([a-z0-9_-]+)"])
|
||
is_dangerous = _is_production_env(env_match, config_manager)
|
||
return {
|
||
"domain": "git",
|
||
"operation": "deploy_dashboard",
|
||
"entities": {
|
||
"dashboard_id": int(dashboard_id) if dashboard_id else None,
|
||
"environment": env_match,
|
||
},
|
||
"confidence": 0.92 if dashboard_id and env_match else 0.7,
|
||
"risk_level": "dangerous" if is_dangerous else "guarded",
|
||
"requires_confirmation": is_dangerous,
|
||
}
|
||
|
||
# Migration
|
||
if any(k in lower for k in ["миграц", "migration", "migrate"]):
|
||
src = _extract_id(lower, [r"(?:с|from)\s+([a-z0-9_-]+)"])
|
||
tgt = _extract_id(lower, [r"(?:на|to)\s+([a-z0-9_-]+)"])
|
||
dry_run = "--dry-run" in lower or "dry run" in lower
|
||
replace_db_config = "--replace-db-config" in lower
|
||
fix_cross_filters = "--fix-cross-filters" not in lower # Default true usually, but let's say test uses --dry-run
|
||
is_dangerous = _is_production_env(tgt, config_manager)
|
||
return {
|
||
"domain": "migration",
|
||
"operation": "execute_migration",
|
||
"entities": {
|
||
"dashboard_id": int(dashboard_id) if dashboard_id else None,
|
||
"source_env": src,
|
||
"target_env": tgt,
|
||
"dry_run": dry_run,
|
||
"replace_db_config": replace_db_config,
|
||
"fix_cross_filters": True,
|
||
},
|
||
"confidence": 0.95 if dashboard_id and src and tgt else 0.72,
|
||
"risk_level": "dangerous" if is_dangerous else "guarded",
|
||
"requires_confirmation": is_dangerous or dry_run,
|
||
}
|
||
|
||
# Backup
|
||
if any(k in lower for k in ["бэкап", "backup", "резерв"]):
|
||
env_match = _extract_id(lower, [r"(?:в|for|из|from)\s+([a-z0-9_-]+)"])
|
||
return {
|
||
"domain": "backup",
|
||
"operation": "run_backup",
|
||
"entities": {
|
||
"dashboard_id": int(dashboard_id) if dashboard_id else None,
|
||
"environment": env_match,
|
||
},
|
||
"confidence": 0.9 if env_match else 0.7,
|
||
"risk_level": "guarded",
|
||
"requires_confirmation": False,
|
||
}
|
||
|
||
# LLM validation
|
||
if any(k in lower for k in ["валидац", "validate", "провер"]):
|
||
env_match = _extract_id(lower, [r"(?:в|for|env|окружени[ея])\s+([a-z0-9_-]+)"])
|
||
provider_match = _extract_id(lower, [r"(?:provider|провайдер)\s+([a-z0-9_-]+)"])
|
||
return {
|
||
"domain": "llm",
|
||
"operation": "run_llm_validation",
|
||
"entities": {
|
||
"dashboard_id": int(dashboard_id) if dashboard_id else None,
|
||
"dashboard_ref": dashboard_ref if (dashboard_ref and not dashboard_ref.isdigit()) else None,
|
||
"environment": env_match,
|
||
"provider": provider_match,
|
||
},
|
||
"confidence": 0.88 if dashboard_id else 0.64,
|
||
"risk_level": "guarded",
|
||
"requires_confirmation": False,
|
||
}
|
||
|
||
# Documentation
|
||
if any(k in lower for k in ["документац", "documentation", "generate docs", "сгенерируй док"]):
|
||
env_match = _extract_id(lower, [r"(?:в|for|env|окружени[ея])\s+([a-z0-9_-]+)"])
|
||
provider_match = _extract_id(lower, [r"(?:provider|провайдер)\s+([a-z0-9_-]+)"])
|
||
return {
|
||
"domain": "llm",
|
||
"operation": "run_llm_documentation",
|
||
"entities": {
|
||
"dataset_id": int(dataset_id) if dataset_id else None,
|
||
"environment": env_match,
|
||
"provider": provider_match,
|
||
},
|
||
"confidence": 0.88 if dataset_id else 0.64,
|
||
"risk_level": "guarded",
|
||
"requires_confirmation": False,
|
||
}
|
||
|
||
return {
|
||
"domain": "unknown",
|
||
"operation": "clarify",
|
||
"entities": {},
|
||
"confidence": 0.3,
|
||
"risk_level": "safe",
|
||
"requires_confirmation": False,
|
||
}
|
||
# [/DEF:_parse_command:Function]
|
||
|
||
|
||
# [DEF:_check_any_permission:Function]
|
||
# @PURPOSE: Validate user against alternative permission checks (logical OR).
|
||
# @PRE: checks list contains resource-action tuples.
|
||
# @POST: Returns on first successful permission; raises 403-like HTTPException otherwise.
|
||
def _check_any_permission(current_user: User, checks: List[Tuple[str, str]]):
|
||
errors: List[HTTPException] = []
|
||
for resource, action in checks:
|
||
try:
|
||
has_permission(resource, action)(current_user)
|
||
return
|
||
except HTTPException as exc:
|
||
errors.append(exc)
|
||
|
||
raise errors[-1] if errors else HTTPException(status_code=403, detail="Permission denied")
|
||
# [/DEF:_check_any_permission:Function]
|
||
|
||
|
||
# [DEF:_has_any_permission:Function]
|
||
# @PURPOSE: Check whether user has at least one permission tuple from the provided list.
|
||
# @PRE: current_user and checks list are valid.
|
||
# @POST: Returns True when at least one permission check passes.
|
||
def _has_any_permission(current_user: User, checks: List[Tuple[str, str]]) -> bool:
|
||
try:
|
||
_check_any_permission(current_user, checks)
|
||
return True
|
||
except HTTPException:
|
||
return False
|
||
# [/DEF:_has_any_permission:Function]
|
||
|
||
|
||
# [DEF:_build_tool_catalog:Function]
|
||
# @PURPOSE: Build current-user tool catalog for LLM planner with operation contracts and defaults.
|
||
# @PRE: current_user is authenticated; config/db are available.
|
||
# @POST: Returns list of executable tools filtered by permission and runtime availability.
|
||
def _build_tool_catalog(current_user: User, config_manager: ConfigManager, db: Session) -> List[Dict[str, Any]]:
|
||
envs = config_manager.get_environments()
|
||
default_env_id = _get_default_environment_id(config_manager)
|
||
providers = LLMProviderService(db).get_all_providers()
|
||
llm_settings = {}
|
||
try:
|
||
llm_settings = config_manager.get_config().settings.llm
|
||
except Exception:
|
||
llm_settings = {}
|
||
active_provider = next((p.id for p in providers if p.is_active), None)
|
||
fallback_provider = active_provider or (providers[0].id if providers else None)
|
||
validation_provider = resolve_bound_provider_id(llm_settings, "dashboard_validation") or fallback_provider
|
||
documentation_provider = resolve_bound_provider_id(llm_settings, "documentation") or fallback_provider
|
||
|
||
candidates: List[Dict[str, Any]] = [
|
||
{
|
||
"operation": "show_capabilities",
|
||
"domain": "assistant",
|
||
"description": "Show available assistant commands and examples",
|
||
"required_entities": [],
|
||
"optional_entities": [],
|
||
"risk_level": "safe",
|
||
"requires_confirmation": False,
|
||
},
|
||
{
|
||
"operation": "get_task_status",
|
||
"domain": "status",
|
||
"description": "Get task status by task_id or latest user task",
|
||
"required_entities": [],
|
||
"optional_entities": ["task_id"],
|
||
"risk_level": "safe",
|
||
"requires_confirmation": False,
|
||
},
|
||
{
|
||
"operation": "create_branch",
|
||
"domain": "git",
|
||
"description": "Create git branch for dashboard by id/slug/title",
|
||
"required_entities": ["branch_name"],
|
||
"optional_entities": ["dashboard_id", "dashboard_ref"],
|
||
"risk_level": "guarded",
|
||
"requires_confirmation": False,
|
||
},
|
||
{
|
||
"operation": "commit_changes",
|
||
"domain": "git",
|
||
"description": "Commit dashboard repository changes by dashboard id/slug/title",
|
||
"required_entities": [],
|
||
"optional_entities": ["dashboard_id", "dashboard_ref", "message"],
|
||
"risk_level": "guarded",
|
||
"requires_confirmation": False,
|
||
},
|
||
{
|
||
"operation": "deploy_dashboard",
|
||
"domain": "git",
|
||
"description": "Deploy dashboard (id/slug/title) to target environment",
|
||
"required_entities": ["environment"],
|
||
"optional_entities": ["dashboard_id", "dashboard_ref"],
|
||
"risk_level": "guarded",
|
||
"requires_confirmation": False,
|
||
},
|
||
{
|
||
"operation": "execute_migration",
|
||
"domain": "migration",
|
||
"description": "Run dashboard migration (id/slug/title) between environments. Optional boolean flags: replace_db_config, fix_cross_filters",
|
||
"required_entities": ["source_env", "target_env"],
|
||
"optional_entities": ["dashboard_id", "dashboard_ref", "replace_db_config", "fix_cross_filters"],
|
||
"risk_level": "guarded",
|
||
"requires_confirmation": False,
|
||
},
|
||
{
|
||
"operation": "run_backup",
|
||
"domain": "backup",
|
||
"description": "Run backup for environment or specific dashboard by id/slug/title",
|
||
"required_entities": ["environment"],
|
||
"optional_entities": ["dashboard_id", "dashboard_ref"],
|
||
"risk_level": "guarded",
|
||
"requires_confirmation": False,
|
||
},
|
||
{
|
||
"operation": "run_llm_validation",
|
||
"domain": "llm",
|
||
"description": "Run LLM dashboard validation by dashboard id/slug/title",
|
||
"required_entities": [],
|
||
"optional_entities": ["dashboard_ref", "environment", "provider"],
|
||
"defaults": {"environment": default_env_id, "provider": validation_provider},
|
||
"risk_level": "guarded",
|
||
"requires_confirmation": False,
|
||
},
|
||
{
|
||
"operation": "run_llm_documentation",
|
||
"domain": "llm",
|
||
"description": "Generate dataset documentation via LLM",
|
||
"required_entities": ["dataset_id"],
|
||
"optional_entities": ["environment", "provider"],
|
||
"defaults": {"environment": default_env_id, "provider": documentation_provider},
|
||
"risk_level": "guarded",
|
||
"requires_confirmation": False,
|
||
},
|
||
]
|
||
|
||
available: List[Dict[str, Any]] = []
|
||
for tool in candidates:
|
||
checks = INTENT_PERMISSION_CHECKS.get(tool["operation"], [])
|
||
if checks and not _has_any_permission(current_user, checks):
|
||
continue
|
||
available.append(tool)
|
||
return available
|
||
# [/DEF:_build_tool_catalog:Function]
|
||
|
||
|
||
# [DEF:_coerce_intent_entities:Function]
|
||
# @PURPOSE: Normalize intent entity value types from LLM output to route-compatible values.
|
||
# @PRE: intent contains entities dict or missing entities.
|
||
# @POST: Returned intent has numeric ids coerced where possible and string values stripped.
|
||
def _coerce_intent_entities(intent: Dict[str, Any]) -> Dict[str, Any]:
|
||
entities = intent.get("entities")
|
||
if not isinstance(entities, dict):
|
||
intent["entities"] = {}
|
||
entities = intent["entities"]
|
||
for key in ("dashboard_id", "dataset_id"):
|
||
value = entities.get(key)
|
||
if isinstance(value, str) and value.strip().isdigit():
|
||
entities[key] = int(value.strip())
|
||
for key, value in list(entities.items()):
|
||
if isinstance(value, str):
|
||
entities[key] = value.strip()
|
||
return intent
|
||
# [/DEF:_coerce_intent_entities:Function]
|
||
|
||
|
||
# Operations that are read-only and do not require confirmation.
|
||
_SAFE_OPS = {"show_capabilities", "get_task_status"}
|
||
|
||
|
||
# [DEF:_confirmation_summary:Function]
|
||
# @PURPOSE: Build human-readable confirmation prompt for an intent before execution.
|
||
# @PRE: intent contains operation and entities fields.
|
||
# @POST: Returns descriptive Russian-language text ending with confirmation prompt.
|
||
async def _async_confirmation_summary(intent: Dict[str, Any], config_manager: ConfigManager, db: Session) -> str:
|
||
operation = intent.get("operation", "")
|
||
entities = intent.get("entities", {})
|
||
descriptions: Dict[str, str] = {
|
||
"create_branch": "создание ветки{branch} для дашборда{dashboard}",
|
||
"commit_changes": "коммит изменений для дашборда{dashboard}",
|
||
"deploy_dashboard": "деплой дашборда{dashboard} в окружение{env}",
|
||
"execute_migration": "миграция дашборда{dashboard} с{src} на{tgt}",
|
||
"run_backup": "бэкап окружения{env}{dashboard}",
|
||
"run_llm_validation": "LLM-валидация дашборда{dashboard}{env}",
|
||
"run_llm_documentation": "генерация документации для датасета{dataset}{env}",
|
||
}
|
||
template = descriptions.get(operation)
|
||
if not template:
|
||
return "Подтвердите выполнение операции или отмените."
|
||
|
||
def _label(value: Any, prefix: str = " ") -> str:
|
||
return f"{prefix}{value}" if value else ""
|
||
|
||
dashboard = entities.get("dashboard_id") or entities.get("dashboard_ref")
|
||
text = template.format(
|
||
branch=_label(entities.get("branch_name")),
|
||
dashboard=_label(dashboard),
|
||
env=_label(entities.get("environment") or entities.get("target_env")),
|
||
src=_label(entities.get("source_env")),
|
||
tgt=_label(entities.get("target_env")),
|
||
dataset=_label(entities.get("dataset_id")),
|
||
)
|
||
|
||
if operation == "execute_migration":
|
||
flags = []
|
||
flags.append("маппинг БД: " + ("ВКЛ" if _coerce_query_bool(entities.get("replace_db_config", False)) else "ВЫКЛ"))
|
||
flags.append("исправление кроссфильтров: " + ("ВКЛ" if _coerce_query_bool(entities.get("fix_cross_filters", True)) else "ВЫКЛ"))
|
||
dry_run_enabled = _coerce_query_bool(entities.get("dry_run", False))
|
||
flags.append("отчет dry-run: " + ("ВКЛ" if dry_run_enabled else "ВЫКЛ"))
|
||
text += f" ({', '.join(flags)})"
|
||
|
||
if dry_run_enabled:
|
||
try:
|
||
from ...core.migration.dry_run_orchestrator import MigrationDryRunService
|
||
from ...models.dashboard import DashboardSelection
|
||
from ...core.superset_client import SupersetClient
|
||
|
||
src_token = entities.get("source_env")
|
||
tgt_token = entities.get("target_env")
|
||
dashboard_id = _resolve_dashboard_id_entity(entities, config_manager, env_hint=src_token)
|
||
|
||
if dashboard_id and src_token and tgt_token:
|
||
src_env_id = _resolve_env_id(src_token, config_manager)
|
||
tgt_env_id = _resolve_env_id(tgt_token, config_manager)
|
||
|
||
if src_env_id and tgt_env_id:
|
||
env_map = {env.id: env for env in config_manager.get_environments()}
|
||
source_env = env_map.get(src_env_id)
|
||
target_env = env_map.get(tgt_env_id)
|
||
|
||
if source_env and target_env and source_env.id != target_env.id:
|
||
selection = DashboardSelection(
|
||
source_env_id=source_env.id,
|
||
target_env_id=target_env.id,
|
||
selected_ids=[dashboard_id],
|
||
replace_db_config=_coerce_query_bool(entities.get("replace_db_config", False)),
|
||
fix_cross_filters=_coerce_query_bool(entities.get("fix_cross_filters", True))
|
||
)
|
||
service = MigrationDryRunService()
|
||
source_client = SupersetClient(source_env)
|
||
target_client = SupersetClient(target_env)
|
||
report = service.run(selection, source_client, target_client, db)
|
||
|
||
s = report.get("summary", {})
|
||
dash_s = s.get("dashboards", {})
|
||
charts_s = s.get("charts", {})
|
||
ds_s = s.get("datasets", {})
|
||
|
||
# Determine main actions counts
|
||
creates = dash_s.get("create", 0) + charts_s.get("create", 0) + ds_s.get("create", 0)
|
||
updates = dash_s.get("update", 0) + charts_s.get("update", 0) + ds_s.get("update", 0)
|
||
deletes = dash_s.get("delete", 0) + charts_s.get("delete", 0) + ds_s.get("delete", 0)
|
||
|
||
text += f"\n\nОтчет dry-run:\n- Будет создано новых объектов: {creates}\n- Будет обновлено: {updates}\n- Будет удалено: {deletes}"
|
||
else:
|
||
text += "\n\n(Не удалось загрузить отчет dry-run: неверные окружения)."
|
||
except Exception as e:
|
||
import traceback
|
||
logger.warning("[assistant.dry_run_summary][failed] Exception: %s\n%s", e, traceback.format_exc())
|
||
text += f"\n\n(Не удалось загрузить отчет dry-run: {e})."
|
||
|
||
return f"Выполнить: {text}. Подтвердите или отмените."
|
||
# [/DEF:_async_confirmation_summary:Function]
|
||
|
||
|
||
# [DEF:_clarification_text_for_intent:Function]
|
||
# @PURPOSE: Convert technical missing-parameter errors into user-facing clarification prompts.
|
||
# @PRE: state was classified as needs_clarification for current intent/error combination.
|
||
# @POST: Returned text is human-readable and actionable for target operation.
|
||
def _clarification_text_for_intent(intent: Optional[Dict[str, Any]], detail_text: str) -> str:
|
||
operation = (intent or {}).get("operation")
|
||
guidance_by_operation: Dict[str, str] = {
|
||
"run_llm_validation": (
|
||
"Нужно уточнение для запуска LLM-валидации: Укажите дашборд (id или slug), окружение и провайдер LLM."
|
||
),
|
||
"run_llm_documentation": (
|
||
"Нужно уточнение для генерации документации: Укажите dataset_id, окружение и провайдер LLM."
|
||
),
|
||
"create_branch": "Нужно уточнение: укажите дашборд (id/slug/title) и имя ветки.",
|
||
"commit_changes": "Нужно уточнение: укажите дашборд (id/slug/title) для коммита.",
|
||
"deploy_dashboard": "Нужно уточнение: укажите дашборд (id/slug/title) и целевое окружение.",
|
||
"execute_migration": "Нужно уточнение: укажите дашборд (id/slug/title), source_env и target_env.",
|
||
"run_backup": "Нужно уточнение: укажите окружение и при необходимости дашборд (id/slug/title).",
|
||
}
|
||
return guidance_by_operation.get(operation, detail_text)
|
||
# [/DEF:_clarification_text_for_intent:Function]
|
||
|
||
|
||
# [DEF:_plan_intent_with_llm:Function]
|
||
# @PURPOSE: Use active LLM provider to select best tool/operation from dynamic catalog.
|
||
# @PRE: tools list contains allowed operations for current user.
|
||
# @POST: Returns normalized intent dict when planning succeeds; otherwise None.
|
||
async def _plan_intent_with_llm(
|
||
message: str,
|
||
tools: List[Dict[str, Any]],
|
||
db: Session,
|
||
config_manager: ConfigManager,
|
||
) -> Optional[Dict[str, Any]]:
|
||
if not tools:
|
||
return None
|
||
|
||
llm_settings = normalize_llm_settings(config_manager.get_config().settings.llm)
|
||
planner_provider_token = llm_settings.get("assistant_planner_provider")
|
||
planner_model_override = llm_settings.get("assistant_planner_model")
|
||
llm_service = LLMProviderService(db)
|
||
providers = llm_service.get_all_providers()
|
||
provider_id = _resolve_provider_id(planner_provider_token, db)
|
||
provider = next((p for p in providers if p.id == provider_id), None)
|
||
if not provider:
|
||
return None
|
||
api_key = llm_service.get_decrypted_api_key(provider.id)
|
||
if not api_key:
|
||
return None
|
||
|
||
planner = LLMClient(
|
||
provider_type=LLMProviderType(provider.provider_type),
|
||
api_key=api_key,
|
||
base_url=provider.base_url,
|
||
default_model=planner_model_override or provider.default_model,
|
||
)
|
||
|
||
system_instruction = (
|
||
"You are a deterministic intent planner for backend tools.\n"
|
||
"Choose exactly one operation from available_tools or return clarify.\n"
|
||
"Output strict JSON object:\n"
|
||
"{"
|
||
"\"domain\": string, "
|
||
"\"operation\": string, "
|
||
"\"entities\": object, "
|
||
"\"confidence\": number, "
|
||
"\"risk_level\": \"safe\"|\"guarded\"|\"dangerous\", "
|
||
"\"requires_confirmation\": boolean"
|
||
"}\n"
|
||
"Rules:\n"
|
||
"- Use only operation names from available_tools.\n"
|
||
"- If input is ambiguous, operation must be \"clarify\" with low confidence.\n"
|
||
"- If dashboard is provided as name/slug (e.g., COVID), put it into entities.dashboard_ref.\n"
|
||
"- Keep entities minimal and factual.\n"
|
||
)
|
||
payload = {
|
||
"available_tools": tools,
|
||
"user_message": message,
|
||
"known_environments": [{"id": e.id, "name": e.name} for e in config_manager.get_environments()],
|
||
}
|
||
try:
|
||
response = await planner.get_json_completion(
|
||
[
|
||
{"role": "system", "content": system_instruction},
|
||
{"role": "user", "content": json.dumps(payload, ensure_ascii=False)},
|
||
]
|
||
)
|
||
except Exception as exc:
|
||
import traceback
|
||
logger.warning(f"[assistant.planner][fallback] LLM planner unavailable: {exc}\n{traceback.format_exc()}")
|
||
return None
|
||
if not isinstance(response, dict):
|
||
return None
|
||
|
||
operation = response.get("operation")
|
||
valid_ops = {tool["operation"] for tool in tools}
|
||
if operation == "clarify":
|
||
return {
|
||
"domain": "unknown",
|
||
"operation": "clarify",
|
||
"entities": {},
|
||
"confidence": float(response.get("confidence", 0.3)),
|
||
"risk_level": "safe",
|
||
"requires_confirmation": False,
|
||
}
|
||
if operation not in valid_ops:
|
||
return None
|
||
|
||
by_operation = {tool["operation"]: tool for tool in tools}
|
||
selected = by_operation[operation]
|
||
intent = {
|
||
"domain": response.get("domain") or selected["domain"],
|
||
"operation": operation,
|
||
"entities": response.get("entities", {}),
|
||
"confidence": float(response.get("confidence", 0.75)),
|
||
"risk_level": response.get("risk_level") or selected["risk_level"],
|
||
"requires_confirmation": bool(response.get("requires_confirmation", selected["requires_confirmation"])),
|
||
}
|
||
intent = _coerce_intent_entities(intent)
|
||
|
||
defaults = selected.get("defaults") or {}
|
||
for key, value in defaults.items():
|
||
if value and not intent["entities"].get(key):
|
||
intent["entities"][key] = value
|
||
|
||
if operation in {"deploy_dashboard", "execute_migration"}:
|
||
env_token = intent["entities"].get("environment") or intent["entities"].get("target_env")
|
||
if _is_production_env(env_token, config_manager):
|
||
intent["risk_level"] = "dangerous"
|
||
intent["requires_confirmation"] = True
|
||
return intent
|
||
# [/DEF:_plan_intent_with_llm:Function]
|
||
|
||
|
||
# [DEF:_authorize_intent:Function]
|
||
# @PURPOSE: Validate user permissions for parsed intent before confirmation/dispatch.
|
||
# @PRE: intent.operation is present for known assistant command domains.
|
||
# @POST: Returns if authorized; raises HTTPException(403) when denied.
|
||
def _authorize_intent(intent: Dict[str, Any], current_user: User):
|
||
operation = intent.get("operation")
|
||
if operation in INTENT_PERMISSION_CHECKS:
|
||
_check_any_permission(current_user, INTENT_PERMISSION_CHECKS[operation])
|
||
# [/DEF:_authorize_intent:Function]
|
||
|
||
|
||
# [DEF:_dispatch_intent:Function]
|
||
# @PURPOSE: Execute parsed assistant intent via existing task/plugin/git services.
|
||
# @PRE: intent operation is known and actor permissions are validated per operation.
|
||
# @POST: Returns response text, optional task id, and UI actions for follow-up.
|
||
async def _dispatch_intent(
|
||
intent: Dict[str, Any],
|
||
current_user: User,
|
||
task_manager: TaskManager,
|
||
config_manager: ConfigManager,
|
||
db: Session,
|
||
) -> Tuple[str, Optional[str], List[AssistantAction]]:
|
||
operation = intent.get("operation")
|
||
entities = intent.get("entities", {})
|
||
|
||
if operation == "show_capabilities":
|
||
tools_catalog = _build_tool_catalog(current_user, config_manager, db)
|
||
labels = {
|
||
"create_branch": "Git: создание ветки",
|
||
"commit_changes": "Git: коммит",
|
||
"deploy_dashboard": "Git: деплой дашборда",
|
||
"execute_migration": "Миграции: запуск переноса",
|
||
"run_backup": "Бэкапы: запуск резервного копирования",
|
||
"run_llm_validation": "LLM: валидация дашборда",
|
||
"run_llm_documentation": "LLM: генерация документации",
|
||
"get_task_status": "Статус: проверка задачи",
|
||
}
|
||
available = [labels[t["operation"]] for t in tools_catalog if t["operation"] in labels]
|
||
if not available:
|
||
return "Сейчас нет доступных для вас операций ассистента.", None, []
|
||
commands = "\n".join(f"- {item}" for item in available)
|
||
text = (
|
||
"Вот что я могу сделать для вас:\n"
|
||
f"{commands}\n\n"
|
||
"Пример: `запусти миграцию с dev на prod для дашборда 42`."
|
||
)
|
||
return text, None, []
|
||
|
||
if operation == "get_task_status":
|
||
_check_any_permission(current_user, [("tasks", "READ")])
|
||
task_id = entities.get("task_id")
|
||
if not task_id:
|
||
recent = [t for t in task_manager.get_tasks(limit=20, offset=0) if t.user_id == current_user.id]
|
||
if not recent:
|
||
return "У вас пока нет задач в истории.", None, []
|
||
task = recent[0]
|
||
actions = [AssistantAction(type="open_task", label="Open Task", target=task.id)]
|
||
if str(task.status).upper() in {"SUCCESS", "FAILED"}:
|
||
actions.extend(_extract_result_deep_links(task, config_manager))
|
||
summary_line = _build_task_observability_summary(task, config_manager)
|
||
return (
|
||
f"Последняя задача: {task.id}, статус: {task.status}."
|
||
+ (f"\n{summary_line}" if summary_line else ""),
|
||
task.id,
|
||
actions,
|
||
)
|
||
|
||
task = task_manager.get_task(task_id)
|
||
if not task:
|
||
raise HTTPException(status_code=404, detail=f"Task {task_id} not found")
|
||
actions = [AssistantAction(type="open_task", label="Open Task", target=task.id)]
|
||
if str(task.status).upper() in {"SUCCESS", "FAILED"}:
|
||
actions.extend(_extract_result_deep_links(task, config_manager))
|
||
summary_line = _build_task_observability_summary(task, config_manager)
|
||
return (
|
||
f"Статус задачи {task.id}: {task.status}."
|
||
+ (f"\n{summary_line}" if summary_line else ""),
|
||
task.id,
|
||
actions,
|
||
)
|
||
|
||
if operation == "create_branch":
|
||
_check_any_permission(current_user, [("plugin:git", "EXECUTE")])
|
||
dashboard_id = _resolve_dashboard_id_entity(entities, config_manager)
|
||
branch_name = entities.get("branch_name")
|
||
if not dashboard_id or not branch_name:
|
||
raise HTTPException(status_code=422, detail="Missing dashboard_id/dashboard_ref or branch_name")
|
||
git_service.create_branch(dashboard_id, branch_name, "main")
|
||
return f"Ветка `{branch_name}` создана для дашборда {dashboard_id}.", None, []
|
||
|
||
if operation == "commit_changes":
|
||
_check_any_permission(current_user, [("plugin:git", "EXECUTE")])
|
||
dashboard_id = _resolve_dashboard_id_entity(entities, config_manager)
|
||
commit_message = entities.get("message")
|
||
if not dashboard_id:
|
||
raise HTTPException(status_code=422, detail="Missing dashboard_id/dashboard_ref")
|
||
git_service.commit_changes(dashboard_id, commit_message, None)
|
||
return "Коммит выполнен успешно.", None, []
|
||
|
||
if operation == "deploy_dashboard":
|
||
_check_any_permission(current_user, [("plugin:git", "EXECUTE")])
|
||
env_token = entities.get("environment")
|
||
env_id = _resolve_env_id(env_token, config_manager)
|
||
dashboard_id = _resolve_dashboard_id_entity(entities, config_manager, env_hint=env_token)
|
||
if not dashboard_id or not env_id:
|
||
raise HTTPException(status_code=422, detail="Missing dashboard_id/dashboard_ref or environment")
|
||
|
||
task = await task_manager.create_task(
|
||
plugin_id="git-integration",
|
||
params={
|
||
"operation": "deploy",
|
||
"dashboard_id": dashboard_id,
|
||
"environment_id": env_id,
|
||
},
|
||
user_id=current_user.id,
|
||
)
|
||
return (
|
||
f"Деплой запущен. task_id={task.id}",
|
||
task.id,
|
||
[
|
||
AssistantAction(type="open_task", label="Open Task", target=task.id),
|
||
AssistantAction(type="open_reports", label="Open Reports", target="/reports"),
|
||
],
|
||
)
|
||
|
||
if operation == "execute_migration":
|
||
_check_any_permission(current_user, [("plugin:migration", "EXECUTE"), ("plugin:superset-migration", "EXECUTE")])
|
||
src_token = entities.get("source_env")
|
||
dashboard_ref = entities.get("dashboard_ref")
|
||
dashboard_id = _resolve_dashboard_id_entity(entities, config_manager, env_hint=src_token)
|
||
src = _resolve_env_id(src_token, config_manager)
|
||
tgt = _resolve_env_id(entities.get("target_env"), config_manager)
|
||
if not src or not tgt:
|
||
raise HTTPException(status_code=422, detail="Missing source_env/target_env")
|
||
if not dashboard_id and not dashboard_ref:
|
||
raise HTTPException(status_code=422, detail="Missing dashboard_id/dashboard_ref")
|
||
|
||
migration_params: Dict[str, Any] = {
|
||
"source_env_id": src,
|
||
"target_env_id": tgt,
|
||
"replace_db_config": _coerce_query_bool(entities.get("replace_db_config", False)),
|
||
"fix_cross_filters": _coerce_query_bool(entities.get("fix_cross_filters", True)),
|
||
}
|
||
if dashboard_id:
|
||
migration_params["selected_ids"] = [dashboard_id]
|
||
else:
|
||
# Fallback: pass dashboard_ref as regex for the migration plugin to match
|
||
migration_params["dashboard_regex"] = str(dashboard_ref)
|
||
|
||
task = await task_manager.create_task(
|
||
plugin_id="superset-migration",
|
||
params=migration_params,
|
||
user_id=current_user.id,
|
||
)
|
||
return (
|
||
f"Миграция запущена. task_id={task.id}",
|
||
task.id,
|
||
[
|
||
AssistantAction(type="open_task", label="Open Task", target=task.id),
|
||
AssistantAction(type="open_reports", label="Open Reports", target="/reports"),
|
||
*(
|
||
[
|
||
AssistantAction(
|
||
type="open_route",
|
||
label=f"Открыть дашборд в {_get_environment_name_by_id(tgt, config_manager)}",
|
||
target=f"/dashboards/{dashboard_id}?env_id={tgt}",
|
||
),
|
||
AssistantAction(type="open_diff", label="Показать Diff", target=str(dashboard_id)),
|
||
]
|
||
if dashboard_id
|
||
else []
|
||
),
|
||
],
|
||
)
|
||
|
||
if operation == "run_backup":
|
||
_check_any_permission(current_user, [("plugin:superset-backup", "EXECUTE"), ("plugin:backup", "EXECUTE")])
|
||
env_token = entities.get("environment")
|
||
env_id = _resolve_env_id(env_token, config_manager)
|
||
if not env_id:
|
||
raise HTTPException(status_code=400, detail="Missing or unknown environment")
|
||
|
||
params: Dict[str, Any] = {"environment_id": env_id}
|
||
if entities.get("dashboard_id") or entities.get("dashboard_ref"):
|
||
dashboard_id = _resolve_dashboard_id_entity(entities, config_manager, env_hint=env_token)
|
||
if not dashboard_id:
|
||
raise HTTPException(status_code=422, detail="Missing dashboard_id/dashboard_ref")
|
||
params["dashboard_ids"] = [dashboard_id]
|
||
|
||
task = await task_manager.create_task(
|
||
plugin_id="superset-backup",
|
||
params=params,
|
||
user_id=current_user.id,
|
||
)
|
||
return (
|
||
f"Бэкап запущен. task_id={task.id}",
|
||
task.id,
|
||
[
|
||
AssistantAction(type="open_task", label="Open Task", target=task.id),
|
||
AssistantAction(type="open_reports", label="Open Reports", target="/reports"),
|
||
*(
|
||
[
|
||
AssistantAction(
|
||
type="open_route",
|
||
label=f"Открыть дашборд в {_get_environment_name_by_id(env_id, config_manager)}",
|
||
target=f"/dashboards/{dashboard_id}?env_id={env_id}",
|
||
),
|
||
AssistantAction(type="open_diff", label="Показать Diff", target=str(dashboard_id)),
|
||
]
|
||
if entities.get("dashboard_id") or entities.get("dashboard_ref")
|
||
else []
|
||
),
|
||
],
|
||
)
|
||
|
||
if operation == "run_llm_validation":
|
||
_check_any_permission(current_user, [("plugin:llm_dashboard_validation", "EXECUTE")])
|
||
env_token = entities.get("environment")
|
||
env_id = _resolve_env_id(env_token, config_manager) or _get_default_environment_id(config_manager)
|
||
dashboard_id = _resolve_dashboard_id_entity(entities, config_manager, env_hint=env_token)
|
||
provider_id = _resolve_provider_id(
|
||
entities.get("provider"),
|
||
db,
|
||
config_manager=config_manager,
|
||
task_key="dashboard_validation",
|
||
)
|
||
if not dashboard_id or not env_id or not provider_id:
|
||
raise HTTPException(
|
||
status_code=422,
|
||
detail="Missing dashboard_id/environment/provider. Укажите ID/slug дашборда или окружение.",
|
||
)
|
||
provider = LLMProviderService(db).get_provider(provider_id)
|
||
provider_model = provider.default_model if provider else ""
|
||
if not is_multimodal_model(provider_model, provider.provider_type if provider else None):
|
||
raise HTTPException(
|
||
status_code=422,
|
||
detail=(
|
||
"Selected provider model is not multimodal for dashboard validation. "
|
||
"Выберите мультимодальную модель (например, gpt-4o)."
|
||
),
|
||
)
|
||
|
||
task = await task_manager.create_task(
|
||
plugin_id="llm_dashboard_validation",
|
||
params={
|
||
"dashboard_id": str(dashboard_id),
|
||
"environment_id": env_id,
|
||
"provider_id": provider_id,
|
||
},
|
||
user_id=current_user.id,
|
||
)
|
||
return (
|
||
f"LLM-валидация запущена. task_id={task.id}",
|
||
task.id,
|
||
[
|
||
AssistantAction(type="open_task", label="Open Task", target=task.id),
|
||
AssistantAction(type="open_reports", label="Open Reports", target="/reports"),
|
||
],
|
||
)
|
||
|
||
if operation == "run_llm_documentation":
|
||
_check_any_permission(current_user, [("plugin:llm_documentation", "EXECUTE")])
|
||
dataset_id = entities.get("dataset_id")
|
||
env_id = _resolve_env_id(entities.get("environment"), config_manager)
|
||
provider_id = _resolve_provider_id(
|
||
entities.get("provider"),
|
||
db,
|
||
config_manager=config_manager,
|
||
task_key="documentation",
|
||
)
|
||
if not dataset_id or not env_id or not provider_id:
|
||
raise HTTPException(status_code=400, detail="Missing dataset_id/environment/provider")
|
||
|
||
task = await task_manager.create_task(
|
||
plugin_id="llm_documentation",
|
||
params={
|
||
"dataset_id": str(dataset_id),
|
||
"environment_id": env_id,
|
||
"provider_id": provider_id,
|
||
},
|
||
user_id=current_user.id,
|
||
)
|
||
return (
|
||
f"Генерация документации запущена. task_id={task.id}",
|
||
task.id,
|
||
[
|
||
AssistantAction(type="open_task", label="Open Task", target=task.id),
|
||
AssistantAction(type="open_reports", label="Open Reports", target="/reports"),
|
||
],
|
||
)
|
||
|
||
raise HTTPException(status_code=400, detail="Unsupported operation")
|
||
# [/DEF:_dispatch_intent:Function]
|
||
|
||
|
||
@router.post("/messages", response_model=AssistantMessageResponse)
|
||
# [DEF:send_message:Function]
|
||
# @PURPOSE: Parse assistant command, enforce safety gates, and dispatch executable intent.
|
||
# @PRE: Authenticated user is available and message text is non-empty.
|
||
# @POST: Response state is one of clarification/confirmation/started/success/denied/failed.
|
||
# @RETURN: AssistantMessageResponse with operation feedback and optional actions.
|
||
async def send_message(
|
||
request: AssistantMessageRequest,
|
||
current_user: User = Depends(get_current_user),
|
||
task_manager: TaskManager = Depends(get_task_manager),
|
||
config_manager: ConfigManager = Depends(get_config_manager),
|
||
db: Session = Depends(get_db),
|
||
):
|
||
with belief_scope("assistant.send_message"):
|
||
user_id = current_user.id
|
||
conversation_id = _resolve_or_create_conversation(user_id, request.conversation_id, db)
|
||
|
||
_append_history(user_id, conversation_id, "user", request.message)
|
||
_persist_message(db, user_id, conversation_id, "user", request.message)
|
||
|
||
tools_catalog = _build_tool_catalog(current_user, config_manager, db)
|
||
intent = None
|
||
try:
|
||
intent = await _plan_intent_with_llm(request.message, tools_catalog, db, config_manager)
|
||
except Exception as exc:
|
||
logger.warning(f"[assistant.planner][fallback] Planner error: {exc}")
|
||
if not intent:
|
||
intent = _parse_command(request.message, config_manager)
|
||
confidence = float(intent.get("confidence", 0.0))
|
||
|
||
if intent.get("domain") == "unknown" or confidence < 0.6:
|
||
text = "Команда неоднозначна. Уточните действие: git / migration / backup / llm / status."
|
||
_append_history(user_id, conversation_id, "assistant", text, state="needs_clarification")
|
||
_persist_message(db, user_id, conversation_id, "assistant", text, state="needs_clarification", metadata={"intent": intent})
|
||
audit_payload = {"decision": "needs_clarification", "message": request.message, "intent": intent}
|
||
_audit(user_id, audit_payload)
|
||
_persist_audit(db, user_id, audit_payload, conversation_id)
|
||
return AssistantMessageResponse(
|
||
conversation_id=conversation_id,
|
||
response_id=str(uuid.uuid4()),
|
||
state="needs_clarification",
|
||
text=text,
|
||
intent=intent,
|
||
actions=[AssistantAction(type="rephrase", label="Rephrase command")],
|
||
created_at=datetime.utcnow(),
|
||
)
|
||
|
||
try:
|
||
_authorize_intent(intent, current_user)
|
||
|
||
operation = intent.get("operation")
|
||
if operation not in _SAFE_OPS:
|
||
confirmation_id = str(uuid.uuid4())
|
||
confirm = ConfirmationRecord(
|
||
id=confirmation_id,
|
||
user_id=user_id,
|
||
conversation_id=conversation_id,
|
||
intent=intent,
|
||
dispatch={"intent": intent},
|
||
expires_at=datetime.utcnow() + timedelta(minutes=5),
|
||
created_at=datetime.utcnow(),
|
||
)
|
||
CONFIRMATIONS[confirmation_id] = confirm
|
||
_persist_confirmation(db, confirm)
|
||
text = await _async_confirmation_summary(intent, config_manager, db)
|
||
_append_history(
|
||
user_id,
|
||
conversation_id,
|
||
"assistant",
|
||
text,
|
||
state="needs_confirmation",
|
||
confirmation_id=confirmation_id,
|
||
)
|
||
_persist_message(
|
||
db,
|
||
user_id,
|
||
conversation_id,
|
||
"assistant",
|
||
text,
|
||
state="needs_confirmation",
|
||
confirmation_id=confirmation_id,
|
||
metadata={
|
||
"intent": intent,
|
||
"actions": [
|
||
{"type": "confirm", "label": "✅ Подтвердить", "target": confirmation_id},
|
||
{"type": "cancel", "label": "❌ Отменить", "target": confirmation_id},
|
||
],
|
||
},
|
||
)
|
||
audit_payload = {
|
||
"decision": "needs_confirmation",
|
||
"message": request.message,
|
||
"intent": intent,
|
||
"confirmation_id": confirmation_id,
|
||
}
|
||
_audit(user_id, audit_payload)
|
||
_persist_audit(db, user_id, audit_payload, conversation_id)
|
||
return AssistantMessageResponse(
|
||
conversation_id=conversation_id,
|
||
response_id=str(uuid.uuid4()),
|
||
state="needs_confirmation",
|
||
text=text,
|
||
intent=intent,
|
||
confirmation_id=confirmation_id,
|
||
actions=[
|
||
AssistantAction(type="confirm", label="✅ Подтвердить", target=confirmation_id),
|
||
AssistantAction(type="cancel", label="❌ Отменить", target=confirmation_id),
|
||
],
|
||
created_at=datetime.utcnow(),
|
||
)
|
||
|
||
# Read-only operations execute immediately
|
||
text, task_id, actions = await _dispatch_intent(intent, current_user, task_manager, config_manager, db)
|
||
state = "started" if task_id else "success"
|
||
_append_history(user_id, conversation_id, "assistant", text, state=state, task_id=task_id)
|
||
_persist_message(
|
||
db,
|
||
user_id,
|
||
conversation_id,
|
||
"assistant",
|
||
text,
|
||
state=state,
|
||
task_id=task_id,
|
||
metadata={"intent": intent, "actions": [a.model_dump() for a in actions]},
|
||
)
|
||
audit_payload = {"decision": "executed", "message": request.message, "intent": intent, "task_id": task_id}
|
||
_audit(user_id, audit_payload)
|
||
_persist_audit(db, user_id, audit_payload, conversation_id)
|
||
return AssistantMessageResponse(
|
||
conversation_id=conversation_id,
|
||
response_id=str(uuid.uuid4()),
|
||
state=state,
|
||
text=text,
|
||
intent=intent,
|
||
task_id=task_id,
|
||
actions=actions,
|
||
created_at=datetime.utcnow(),
|
||
)
|
||
except HTTPException as exc:
|
||
detail_text = str(exc.detail)
|
||
is_clarification_error = exc.status_code in (400, 422) and (
|
||
detail_text.lower().startswith("missing")
|
||
or "укажите" in detail_text.lower()
|
||
or "выберите" in detail_text.lower()
|
||
)
|
||
if exc.status_code == status.HTTP_403_FORBIDDEN:
|
||
state = "denied"
|
||
elif is_clarification_error:
|
||
state = "needs_clarification"
|
||
else:
|
||
state = "failed"
|
||
text = _clarification_text_for_intent(intent, detail_text) if state == "needs_clarification" else detail_text
|
||
_append_history(user_id, conversation_id, "assistant", text, state=state)
|
||
_persist_message(db, user_id, conversation_id, "assistant", text, state=state, metadata={"intent": intent})
|
||
audit_payload = {"decision": state, "message": request.message, "intent": intent, "error": text}
|
||
_audit(user_id, audit_payload)
|
||
_persist_audit(db, user_id, audit_payload, conversation_id)
|
||
return AssistantMessageResponse(
|
||
conversation_id=conversation_id,
|
||
response_id=str(uuid.uuid4()),
|
||
state=state,
|
||
text=text,
|
||
intent=intent,
|
||
actions=[AssistantAction(type="rephrase", label="Rephrase command")] if state == "needs_clarification" else [],
|
||
created_at=datetime.utcnow(),
|
||
)
|
||
# [/DEF:send_message:Function]
|
||
|
||
|
||
@router.post("/confirmations/{confirmation_id}/confirm", response_model=AssistantMessageResponse)
|
||
# [DEF:confirm_operation:Function]
|
||
# @PURPOSE: Execute previously requested risky operation after explicit user confirmation.
|
||
# @PRE: confirmation_id exists, belongs to current user, is pending, and not expired.
|
||
# @POST: Confirmation state becomes consumed and operation result is persisted in history.
|
||
# @RETURN: AssistantMessageResponse with task details when async execution starts.
|
||
async def confirm_operation(
|
||
confirmation_id: str,
|
||
current_user: User = Depends(get_current_user),
|
||
task_manager: TaskManager = Depends(get_task_manager),
|
||
config_manager: ConfigManager = Depends(get_config_manager),
|
||
db: Session = Depends(get_db),
|
||
):
|
||
with belief_scope("assistant.confirm"):
|
||
record = CONFIRMATIONS.get(confirmation_id)
|
||
if not record:
|
||
record = _load_confirmation_from_db(db, confirmation_id)
|
||
if record:
|
||
CONFIRMATIONS[confirmation_id] = record
|
||
else:
|
||
raise HTTPException(status_code=404, detail="Confirmation not found")
|
||
|
||
if record.user_id != current_user.id:
|
||
raise HTTPException(status_code=403, detail="Confirmation does not belong to current user")
|
||
|
||
if record.state != "pending":
|
||
raise HTTPException(status_code=400, detail=f"Confirmation already {record.state}")
|
||
|
||
if datetime.utcnow() > record.expires_at:
|
||
record.state = "expired"
|
||
_update_confirmation_state(db, confirmation_id, "expired")
|
||
raise HTTPException(status_code=400, detail="Confirmation expired")
|
||
|
||
intent = record.intent
|
||
text, task_id, actions = await _dispatch_intent(intent, current_user, task_manager, config_manager, db)
|
||
record.state = "consumed"
|
||
_update_confirmation_state(db, confirmation_id, "consumed")
|
||
|
||
_append_history(current_user.id, record.conversation_id, "assistant", text, state="started" if task_id else "success", task_id=task_id)
|
||
_persist_message(
|
||
db,
|
||
current_user.id,
|
||
record.conversation_id,
|
||
"assistant",
|
||
text,
|
||
state="started" if task_id else "success",
|
||
task_id=task_id,
|
||
metadata={"intent": intent, "confirmation_id": confirmation_id},
|
||
)
|
||
audit_payload = {"decision": "confirmed_execute", "confirmation_id": confirmation_id, "task_id": task_id, "intent": intent}
|
||
_audit(current_user.id, audit_payload)
|
||
_persist_audit(db, current_user.id, audit_payload, record.conversation_id)
|
||
|
||
return AssistantMessageResponse(
|
||
conversation_id=record.conversation_id,
|
||
response_id=str(uuid.uuid4()),
|
||
state="started" if task_id else "success",
|
||
text=text,
|
||
intent=intent,
|
||
task_id=task_id,
|
||
actions=actions,
|
||
created_at=datetime.utcnow(),
|
||
)
|
||
# [/DEF:confirm_operation:Function]
|
||
|
||
|
||
@router.post("/confirmations/{confirmation_id}/cancel", response_model=AssistantMessageResponse)
|
||
# [DEF:cancel_operation:Function]
|
||
# @PURPOSE: Cancel pending risky operation and mark confirmation token as cancelled.
|
||
# @PRE: confirmation_id exists, belongs to current user, and is still pending.
|
||
# @POST: Confirmation becomes cancelled and cannot be executed anymore.
|
||
# @RETURN: AssistantMessageResponse confirming cancellation.
|
||
async def cancel_operation(
|
||
confirmation_id: str,
|
||
current_user: User = Depends(get_current_user),
|
||
db: Session = Depends(get_db),
|
||
):
|
||
with belief_scope("assistant.cancel"):
|
||
record = CONFIRMATIONS.get(confirmation_id)
|
||
if not record:
|
||
record = _load_confirmation_from_db(db, confirmation_id)
|
||
if record:
|
||
CONFIRMATIONS[confirmation_id] = record
|
||
else:
|
||
raise HTTPException(status_code=404, detail="Confirmation not found")
|
||
|
||
if record.user_id != current_user.id:
|
||
raise HTTPException(status_code=403, detail="Confirmation does not belong to current user")
|
||
|
||
if record.state != "pending":
|
||
raise HTTPException(status_code=400, detail=f"Confirmation already {record.state}")
|
||
|
||
record.state = "cancelled"
|
||
_update_confirmation_state(db, confirmation_id, "cancelled")
|
||
text = "Операция отменена. Выполнение не запускалось."
|
||
_append_history(current_user.id, record.conversation_id, "assistant", text, state="success", confirmation_id=confirmation_id)
|
||
_persist_message(
|
||
db,
|
||
current_user.id,
|
||
record.conversation_id,
|
||
"assistant",
|
||
text,
|
||
state="success",
|
||
confirmation_id=confirmation_id,
|
||
metadata={"intent": record.intent},
|
||
)
|
||
audit_payload = {"decision": "cancelled", "confirmation_id": confirmation_id, "intent": record.intent}
|
||
_audit(current_user.id, audit_payload)
|
||
_persist_audit(db, current_user.id, audit_payload, record.conversation_id)
|
||
|
||
return AssistantMessageResponse(
|
||
conversation_id=record.conversation_id,
|
||
response_id=str(uuid.uuid4()),
|
||
state="success",
|
||
text=text,
|
||
intent=record.intent,
|
||
confirmation_id=confirmation_id,
|
||
actions=[],
|
||
created_at=datetime.utcnow(),
|
||
)
|
||
# [/DEF:cancel_operation:Function]
|
||
|
||
|
||
# [DEF:list_conversations:Function]
|
||
# @PURPOSE: Return paginated conversation list for current user with archived flag and last message preview.
|
||
# @PRE: Authenticated user context and valid pagination params.
|
||
# @POST: Conversations are grouped by conversation_id sorted by latest activity descending.
|
||
# @RETURN: Dict with items, paging metadata, and archive segmentation counts.
|
||
@router.get("/conversations")
|
||
async def list_conversations(
|
||
page: int = Query(1, ge=1),
|
||
page_size: int = Query(20, ge=1, le=100),
|
||
include_archived: bool = Query(False),
|
||
archived_only: bool = Query(False),
|
||
search: Optional[str] = Query(None),
|
||
current_user: User = Depends(get_current_user),
|
||
db: Session = Depends(get_db),
|
||
):
|
||
with belief_scope("assistant.conversations"):
|
||
user_id = current_user.id
|
||
include_archived = _coerce_query_bool(include_archived)
|
||
archived_only = _coerce_query_bool(archived_only)
|
||
_cleanup_history_ttl(db, user_id)
|
||
|
||
rows = (
|
||
db.query(AssistantMessageRecord)
|
||
.filter(AssistantMessageRecord.user_id == user_id)
|
||
.order_by(desc(AssistantMessageRecord.created_at))
|
||
.all()
|
||
)
|
||
|
||
summary: Dict[str, Dict[str, Any]] = {}
|
||
for row in rows:
|
||
conv_id = row.conversation_id
|
||
if not conv_id:
|
||
continue
|
||
created_at = row.created_at or datetime.utcnow()
|
||
if conv_id not in summary:
|
||
summary[conv_id] = {
|
||
"conversation_id": conv_id,
|
||
"title": "",
|
||
"updated_at": created_at,
|
||
"last_message": row.text,
|
||
"last_role": row.role,
|
||
"last_state": row.state,
|
||
"last_task_id": row.task_id,
|
||
"message_count": 0,
|
||
}
|
||
item = summary[conv_id]
|
||
item["message_count"] += 1
|
||
if row.role == "user" and row.text and not item["title"]:
|
||
item["title"] = row.text.strip()[:80]
|
||
|
||
items = []
|
||
search_term = search.lower().strip() if search else ""
|
||
archived_total = sum(1 for c in summary.values() if _is_conversation_archived(c.get("updated_at")))
|
||
active_total = len(summary) - archived_total
|
||
for conv in summary.values():
|
||
conv["archived"] = _is_conversation_archived(conv.get("updated_at"))
|
||
if not conv.get("title"):
|
||
conv["title"] = f"Conversation {conv['conversation_id'][:8]}"
|
||
if search_term:
|
||
haystack = f"{conv.get('title', '')} {conv.get('last_message', '')}".lower()
|
||
if search_term not in haystack:
|
||
continue
|
||
if archived_only and not conv["archived"]:
|
||
continue
|
||
if not archived_only and not include_archived and conv["archived"]:
|
||
continue
|
||
updated = conv.get("updated_at")
|
||
conv["updated_at"] = updated.isoformat() if isinstance(updated, datetime) else None
|
||
items.append(conv)
|
||
|
||
items.sort(key=lambda x: x.get("updated_at") or "", reverse=True)
|
||
total = len(items)
|
||
start = (page - 1) * page_size
|
||
page_items = items[start : start + page_size]
|
||
|
||
return {
|
||
"items": page_items,
|
||
"total": total,
|
||
"page": page,
|
||
"page_size": page_size,
|
||
"has_next": start + page_size < total,
|
||
"active_total": active_total,
|
||
"archived_total": archived_total,
|
||
}
|
||
# [/DEF:list_conversations:Function]
|
||
|
||
|
||
# [DEF:delete_conversation:Function]
|
||
# @PURPOSE: Soft-delete or hard-delete a conversation and clear its in-memory trace.
|
||
# @PRE: conversation_id belongs to current_user.
|
||
# @POST: Conversation records are removed from DB and CONVERSATIONS cache.
|
||
@router.delete("/conversations/{conversation_id}")
|
||
async def delete_conversation(
|
||
conversation_id: str,
|
||
current_user: User = Depends(get_current_user),
|
||
db: Session = Depends(get_db),
|
||
):
|
||
with belief_scope("assistant.conversations.delete"):
|
||
user_id = current_user.id
|
||
|
||
# 1. Remove from in-memory cache
|
||
key = (user_id, conversation_id)
|
||
if key in CONVERSATIONS:
|
||
del CONVERSATIONS[key]
|
||
|
||
# 2. Delete from database
|
||
deleted_count = db.query(AssistantMessageRecord).filter(
|
||
AssistantMessageRecord.user_id == user_id,
|
||
AssistantMessageRecord.conversation_id == conversation_id
|
||
).delete()
|
||
|
||
db.commit()
|
||
|
||
if deleted_count == 0:
|
||
raise HTTPException(status_code=404, detail="Conversation not found or already deleted")
|
||
|
||
return {"status": "success", "deleted": deleted_count, "conversation_id": conversation_id}
|
||
# [/DEF:delete_conversation:Function]
|
||
|
||
|
||
@router.get("/history")
|
||
# [DEF:get_history:Function]
|
||
# @PURPOSE: Retrieve paginated assistant conversation history for current user.
|
||
# @PRE: Authenticated user is available and page params are valid.
|
||
# @POST: Returns persistent messages and mirrored in-memory snapshot for diagnostics.
|
||
# @RETURN: Dict with items, paging metadata, and resolved conversation_id.
|
||
async def get_history(
|
||
page: int = Query(1, ge=1),
|
||
page_size: int = Query(20, ge=1, le=100),
|
||
conversation_id: Optional[str] = Query(None),
|
||
from_latest: bool = Query(False),
|
||
current_user: User = Depends(get_current_user),
|
||
db: Session = Depends(get_db),
|
||
):
|
||
with belief_scope("assistant.history"):
|
||
user_id = current_user.id
|
||
_cleanup_history_ttl(db, user_id)
|
||
conv_id = _resolve_or_create_conversation(user_id, conversation_id, db)
|
||
|
||
base_query = (
|
||
db.query(AssistantMessageRecord)
|
||
.filter(
|
||
AssistantMessageRecord.user_id == user_id,
|
||
AssistantMessageRecord.conversation_id == conv_id,
|
||
)
|
||
)
|
||
total = base_query.count()
|
||
start = (page - 1) * page_size
|
||
if from_latest:
|
||
rows = (
|
||
base_query
|
||
.order_by(desc(AssistantMessageRecord.created_at))
|
||
.offset(start)
|
||
.limit(page_size)
|
||
.all()
|
||
)
|
||
rows = list(reversed(rows))
|
||
else:
|
||
rows = (
|
||
base_query
|
||
.order_by(AssistantMessageRecord.created_at.asc())
|
||
.offset(start)
|
||
.limit(page_size)
|
||
.all()
|
||
)
|
||
|
||
persistent_items = [
|
||
{
|
||
"message_id": row.id,
|
||
"conversation_id": row.conversation_id,
|
||
"role": row.role,
|
||
"text": row.text,
|
||
"state": row.state,
|
||
"task_id": row.task_id,
|
||
"confirmation_id": row.confirmation_id,
|
||
"created_at": row.created_at.isoformat() if row.created_at else None,
|
||
"metadata": row.payload,
|
||
}
|
||
for row in rows
|
||
]
|
||
|
||
memory_items = CONVERSATIONS.get((user_id, conv_id), [])
|
||
return {
|
||
"items": persistent_items,
|
||
"memory_items": memory_items,
|
||
"total": total,
|
||
"page": page,
|
||
"page_size": page_size,
|
||
"has_next": start + page_size < total,
|
||
"from_latest": from_latest,
|
||
"conversation_id": conv_id,
|
||
}
|
||
# [/DEF:get_history:Function]
|
||
|
||
|
||
@router.get("/audit")
|
||
# [DEF:get_assistant_audit:Function]
|
||
# @PURPOSE: Return assistant audit decisions for current user from persistent and in-memory stores.
|
||
# @PRE: User has tasks:READ permission.
|
||
# @POST: Audit payload is returned in reverse chronological order from DB.
|
||
# @RETURN: Dict with persistent and memory audit slices.
|
||
async def get_assistant_audit(
|
||
limit: int = Query(50, ge=1, le=500),
|
||
current_user: User = Depends(get_current_user),
|
||
db: Session = Depends(get_db),
|
||
_=Depends(has_permission("tasks", "READ")),
|
||
):
|
||
with belief_scope("assistant.audit"):
|
||
memory_rows = ASSISTANT_AUDIT.get(current_user.id, [])
|
||
db_rows = (
|
||
db.query(AssistantAuditRecord)
|
||
.filter(AssistantAuditRecord.user_id == current_user.id)
|
||
.order_by(AssistantAuditRecord.created_at.desc())
|
||
.limit(limit)
|
||
.all()
|
||
)
|
||
persistent = [
|
||
{
|
||
"id": row.id,
|
||
"user_id": row.user_id,
|
||
"conversation_id": row.conversation_id,
|
||
"decision": row.decision,
|
||
"task_id": row.task_id,
|
||
"message": row.message,
|
||
"payload": row.payload,
|
||
"created_at": row.created_at.isoformat() if row.created_at else None,
|
||
}
|
||
for row in db_rows
|
||
]
|
||
return {
|
||
"items": persistent,
|
||
"memory_items": memory_rows[-limit:],
|
||
"total": len(persistent),
|
||
"memory_total": len(memory_rows),
|
||
}
|
||
# [/DEF:get_assistant_audit:Function]
|
||
|
||
# [/DEF:backend.src.api.routes.assistant:Module]
|