feat(assistant): add multi-dialog UX, task-aware llm settings, and i18n cleanup

This commit is contained in:
2026-02-23 23:45:01 +03:00
parent ab1c87ffba
commit 7df7b4f98c
30 changed files with 1145 additions and 221 deletions

View File

@@ -218,6 +218,29 @@ def test_unknown_command_returns_needs_clarification():
# [/DEF:test_unknown_command_returns_needs_clarification:Function]
# [DEF:test_capabilities_question_returns_successful_help:Function]
# @PURPOSE: Capability query should return deterministic help response, not clarification.
# @PRE: User sends natural-language "what can you do" style query.
# @POST: Response is successful and includes capabilities summary.
def test_capabilities_question_returns_successful_help():
_clear_assistant_state()
response = _run_async(
assistant_module.send_message(
request=assistant_module.AssistantMessageRequest(message="Что ты умеешь?"),
current_user=_admin_user(),
task_manager=_FakeTaskManager(),
config_manager=_FakeConfigManager(),
db=_FakeDb(),
)
)
assert response.state == "success"
assert "Вот что я могу сделать" in response.text
assert "Миграции" in response.text or "Git" in response.text
# [/DEF:test_capabilities_question_returns_successful_help:Function]
# [DEF:test_non_admin_command_returns_denied:Function]
# @PURPOSE: Non-admin user must receive denied state for privileged command.
# @PRE: Limited principal executes privileged git branch command.

View File

@@ -27,6 +27,11 @@ from ...core.config_manager import ConfigManager
from ...core.database import get_db
from ...services.git_service import GitService
from ...services.llm_provider import LLMProviderService
from ...services.llm_prompt_templates import (
is_multimodal_model,
normalize_llm_settings,
resolve_bound_provider_id,
)
from ...core.superset_client import SupersetClient
from ...plugins.llm_analysis.service import LLMClient
from ...plugins.llm_analysis.models import LLMProviderType
@@ -449,7 +454,12 @@ def _is_production_env(token: Optional[str], config_manager: ConfigManager) -> b
# @PURPOSE: Resolve provider token to provider id with active/default fallback.
# @PRE: db session can load provider list through LLMProviderService.
# @POST: Returns provider id or None when no providers configured.
def _resolve_provider_id(provider_token: Optional[str], db: Session) -> Optional[str]:
def _resolve_provider_id(
provider_token: Optional[str],
db: Session,
config_manager: Optional[ConfigManager] = None,
task_key: Optional[str] = None,
) -> Optional[str]:
service = LLMProviderService(db)
providers = service.get_all_providers()
if not providers:
@@ -461,6 +471,15 @@ def _resolve_provider_id(provider_token: Optional[str], db: Session) -> Optional
if p.id.lower() == needle or p.name.lower() == needle:
return p.id
if config_manager and task_key:
try:
llm_settings = config_manager.get_config().settings.llm
bound_provider_id = resolve_bound_provider_id(llm_settings, task_key)
if bound_provider_id and any(p.id == bound_provider_id for p in providers):
return bound_provider_id
except Exception:
pass
active = next((p for p in providers if p.is_active), None)
return active.id if active else providers[0].id
# [/DEF:_resolve_provider_id:Function]
@@ -537,6 +556,27 @@ def _parse_command(message: str, config_manager: ConfigManager) -> Dict[str, Any
text = message.strip()
lower = text.lower()
if any(
phrase in lower
for phrase in [
"что ты умеешь",
"что умеешь",
"что ты можешь",
"help",
"помощь",
"доступные команды",
"какие команды",
]
):
return {
"domain": "assistant",
"operation": "show_capabilities",
"entities": {},
"confidence": 0.98,
"risk_level": "safe",
"requires_confirmation": False,
}
dashboard_id = _extract_id(lower, [r"(?:дашборд\w*|dashboard)\s*(?:id\s*)?(\d+)"])
dashboard_ref = _extract_id(
lower,
@@ -721,10 +761,26 @@ def _build_tool_catalog(current_user: User, config_manager: ConfigManager, db: S
envs = config_manager.get_environments()
default_env_id = _get_default_environment_id(config_manager)
providers = LLMProviderService(db).get_all_providers()
llm_settings = {}
try:
llm_settings = config_manager.get_config().settings.llm
except Exception:
llm_settings = {}
active_provider = next((p.id for p in providers if p.is_active), None)
fallback_provider = active_provider or (providers[0].id if providers else None)
validation_provider = resolve_bound_provider_id(llm_settings, "dashboard_validation") or fallback_provider
documentation_provider = resolve_bound_provider_id(llm_settings, "documentation") or fallback_provider
candidates: List[Dict[str, Any]] = [
{
"operation": "show_capabilities",
"domain": "assistant",
"description": "Show available assistant commands and examples",
"required_entities": [],
"optional_entities": [],
"risk_level": "safe",
"requires_confirmation": False,
},
{
"operation": "get_task_status",
"domain": "status",
@@ -785,7 +841,7 @@ def _build_tool_catalog(current_user: User, config_manager: ConfigManager, db: S
"description": "Run LLM dashboard validation",
"required_entities": ["dashboard_id"],
"optional_entities": ["dashboard_ref", "environment", "provider"],
"defaults": {"environment": default_env_id, "provider": fallback_provider},
"defaults": {"environment": default_env_id, "provider": validation_provider},
"risk_level": "guarded",
"requires_confirmation": False,
},
@@ -795,7 +851,7 @@ def _build_tool_catalog(current_user: User, config_manager: ConfigManager, db: S
"description": "Generate dataset documentation via LLM",
"required_entities": ["dataset_id"],
"optional_entities": ["environment", "provider"],
"defaults": {"environment": default_env_id, "provider": fallback_provider},
"defaults": {"environment": default_env_id, "provider": documentation_provider},
"risk_level": "guarded",
"requires_confirmation": False,
},
@@ -867,9 +923,13 @@ async def _plan_intent_with_llm(
if not tools:
return None
llm_settings = normalize_llm_settings(config_manager.get_config().settings.llm)
planner_provider_token = llm_settings.get("assistant_planner_provider")
planner_model_override = llm_settings.get("assistant_planner_model")
llm_service = LLMProviderService(db)
providers = llm_service.get_all_providers()
provider = next((p for p in providers if p.is_active), None)
provider_id = _resolve_provider_id(planner_provider_token, db)
provider = next((p for p in providers if p.id == provider_id), None)
if not provider:
return None
api_key = llm_service.get_decrypted_api_key(provider.id)
@@ -880,7 +940,7 @@ async def _plan_intent_with_llm(
provider_type=LLMProviderType(provider.provider_type),
api_key=api_key,
base_url=provider.base_url,
default_model=provider.default_model,
default_model=planner_model_override or provider.default_model,
)
system_instruction = (
@@ -983,6 +1043,29 @@ async def _dispatch_intent(
operation = intent.get("operation")
entities = intent.get("entities", {})
if operation == "show_capabilities":
tools_catalog = _build_tool_catalog(current_user, config_manager, db)
labels = {
"create_branch": "Git: создание ветки",
"commit_changes": "Git: коммит",
"deploy_dashboard": "Git: деплой дашборда",
"execute_migration": "Миграции: запуск переноса",
"run_backup": "Бэкапы: запуск резервного копирования",
"run_llm_validation": "LLM: валидация дашборда",
"run_llm_documentation": "LLM: генерация документации",
"get_task_status": "Статус: проверка задачи",
}
available = [labels[t["operation"]] for t in tools_catalog if t["operation"] in labels]
if not available:
return "Сейчас нет доступных для вас операций ассистента.", None, []
commands = "\n".join(f"- {item}" for item in available)
text = (
"Вот что я могу сделать для вас:\n"
f"{commands}\n\n"
"Пример: `запусти миграцию с dev на prod для дашборда 42`."
)
return text, None, []
if operation == "get_task_status":
_check_any_permission(current_user, [("tasks", "READ")])
task_id = entities.get("task_id")
@@ -1111,12 +1194,27 @@ async def _dispatch_intent(
env_id,
config_manager,
)
provider_id = _resolve_provider_id(entities.get("provider"), db)
provider_id = _resolve_provider_id(
entities.get("provider"),
db,
config_manager=config_manager,
task_key="dashboard_validation",
)
if not dashboard_id or not env_id or not provider_id:
raise HTTPException(
status_code=422,
detail="Missing dashboard_id/environment/provider. Укажите ID/slug дашборда или окружение.",
)
provider = LLMProviderService(db).get_provider(provider_id)
provider_model = provider.default_model if provider else ""
if not is_multimodal_model(provider_model):
raise HTTPException(
status_code=422,
detail=(
"Selected provider model is not multimodal for dashboard validation. "
"Выберите мультимодальную модель (например, gpt-4o)."
),
)
task = await task_manager.create_task(
plugin_id="llm_dashboard_validation",
@@ -1140,7 +1238,12 @@ async def _dispatch_intent(
_check_any_permission(current_user, [("plugin:llm_documentation", "EXECUTE")])
dataset_id = entities.get("dataset_id")
env_id = _resolve_env_id(entities.get("environment"), config_manager)
provider_id = _resolve_provider_id(entities.get("provider"), db)
provider_id = _resolve_provider_id(
entities.get("provider"),
db,
config_manager=config_manager,
task_key="documentation",
)
if not dataset_id or not env_id or not provider_id:
raise HTTPException(status_code=400, detail="Missing dataset_id/environment/provider")
@@ -1301,6 +1404,7 @@ async def send_message(
is_clarification_error = exc.status_code in (400, 422) and (
detail_text.lower().startswith("missing")
or "укажите" in detail_text.lower()
or "выберите" in detail_text.lower()
)
if exc.status_code == status.HTTP_403_FORBIDDEN:
state = "denied"

View File

@@ -25,7 +25,11 @@ from src.api.routes.git_schemas import (
)
from src.services.git_service import GitService
from src.core.logger import logger, belief_scope
from ...services.llm_prompt_templates import DEFAULT_LLM_PROMPTS, normalize_llm_settings
from ...services.llm_prompt_templates import (
DEFAULT_LLM_PROMPTS,
normalize_llm_settings,
resolve_bound_provider_id,
)
router = APIRouter(tags=["git"])
git_service = GitService()
@@ -431,7 +435,11 @@ async def generate_commit_message(
llm_service = LLMProviderService(db)
providers = llm_service.get_all_providers()
provider = next((p for p in providers if p.is_active), None)
llm_settings = normalize_llm_settings(config_manager.get_config().settings.llm)
bound_provider_id = resolve_bound_provider_id(llm_settings, "git_commit")
provider = next((p for p in providers if p.id == bound_provider_id), None)
if not provider:
provider = next((p for p in providers if p.is_active), None)
if not provider:
raise HTTPException(status_code=400, detail="No active LLM provider found")
@@ -447,7 +455,6 @@ async def generate_commit_message(
# 4. Generate Message
from ...plugins.git.llm_extension import GitLLMExtension
extension = GitLLMExtension(client)
llm_settings = normalize_llm_settings(config_manager.get_config().settings.llm)
git_prompt = llm_settings["prompts"].get(
"git_commit_prompt",
DEFAULT_LLM_PROMPTS["git_commit_prompt"],

View File

@@ -9,9 +9,15 @@ from fastapi import APIRouter, Depends, HTTPException, status, Query
from pydantic import BaseModel
from ...core.logger import belief_scope
from ...core.task_manager import TaskManager, Task, TaskStatus, LogEntry
from ...core.task_manager.models import LogFilter, LogStats
from ...dependencies import get_task_manager, has_permission, get_current_user
from ...core.task_manager import TaskManager, Task, TaskStatus, LogEntry
from ...core.task_manager.models import LogFilter, LogStats
from ...dependencies import get_task_manager, has_permission, get_current_user, get_config_manager
from ...core.config_manager import ConfigManager
from ...services.llm_prompt_templates import (
is_multimodal_model,
normalize_llm_settings,
resolve_bound_provider_id,
)
router = APIRouter()
@@ -39,32 +45,50 @@ class ResumeTaskRequest(BaseModel):
# @PRE: plugin_id must exist and params must be valid for that plugin.
# @POST: A new task is created and started.
# @RETURN: Task - The created task instance.
async def create_task(
request: CreateTaskRequest,
task_manager: TaskManager = Depends(get_task_manager),
current_user = Depends(get_current_user)
):
async def create_task(
request: CreateTaskRequest,
task_manager: TaskManager = Depends(get_task_manager),
current_user = Depends(get_current_user),
config_manager: ConfigManager = Depends(get_config_manager),
):
# Dynamic permission check based on plugin_id
has_permission(f"plugin:{request.plugin_id}", "EXECUTE")(current_user)
"""
Create and start a new task for a given plugin.
"""
with belief_scope("create_task"):
try:
# Special handling for validation task to include provider config
if request.plugin_id == "llm_dashboard_validation":
from ...core.database import SessionLocal
from ...services.llm_provider import LLMProviderService
db = SessionLocal()
try:
llm_service = LLMProviderService(db)
provider_id = request.params.get("provider_id")
if provider_id:
db_provider = llm_service.get_provider(provider_id)
if not db_provider:
raise ValueError(f"LLM Provider {provider_id} not found")
finally:
db.close()
try:
# Special handling for LLM tasks to resolve provider config by task binding.
if request.plugin_id in {"llm_dashboard_validation", "llm_documentation"}:
from ...core.database import SessionLocal
from ...services.llm_provider import LLMProviderService
db = SessionLocal()
try:
llm_service = LLMProviderService(db)
provider_id = request.params.get("provider_id")
if not provider_id:
llm_settings = normalize_llm_settings(config_manager.get_config().settings.llm)
binding_key = "dashboard_validation" if request.plugin_id == "llm_dashboard_validation" else "documentation"
provider_id = resolve_bound_provider_id(llm_settings, binding_key)
if provider_id:
request.params["provider_id"] = provider_id
if not provider_id:
providers = llm_service.get_all_providers()
active_provider = next((p for p in providers if p.is_active), None)
if active_provider:
provider_id = active_provider.id
request.params["provider_id"] = provider_id
if provider_id:
db_provider = llm_service.get_provider(provider_id)
if not db_provider:
raise ValueError(f"LLM Provider {provider_id} not found")
if request.plugin_id == "llm_dashboard_validation" and not is_multimodal_model(db_provider.default_model):
raise ValueError(
"Selected provider model is not multimodal for dashboard validation"
)
finally:
db.close()
task = await task_manager.create_task(
plugin_id=request.plugin_id,

View File

@@ -9,7 +9,11 @@
from pydantic import BaseModel, Field
from typing import List, Optional
from ..models.storage import StorageConfig
from ..services.llm_prompt_templates import DEFAULT_LLM_PROMPTS
from ..services.llm_prompt_templates import (
DEFAULT_LLM_ASSISTANT_SETTINGS,
DEFAULT_LLM_PROMPTS,
DEFAULT_LLM_PROVIDER_BINDINGS,
)
# [DEF:Schedule:DataClass]
# @PURPOSE: Represents a backup schedule configuration.
@@ -55,6 +59,8 @@ class GlobalSettings(BaseModel):
"providers": [],
"default_provider": "",
"prompts": dict(DEFAULT_LLM_PROMPTS),
"provider_bindings": dict(DEFAULT_LLM_PROVIDER_BINDINGS),
**dict(DEFAULT_LLM_ASSISTANT_SETTINGS),
}
)

View File

@@ -25,6 +25,7 @@ from ...models.llm import ValidationRecord
from ...core.task_manager.context import TaskContext
from ...services.llm_prompt_templates import (
DEFAULT_LLM_PROMPTS,
is_multimodal_model,
normalize_llm_settings,
render_prompt,
)
@@ -108,6 +109,10 @@ class DashboardValidationPlugin(PluginBase):
llm_log.debug(f" Base URL: {db_provider.base_url}")
llm_log.debug(f" Default Model: {db_provider.default_model}")
llm_log.debug(f" Is Active: {db_provider.is_active}")
if not is_multimodal_model(db_provider.default_model):
raise ValueError(
"Dashboard validation requires a multimodal model (image input support)."
)
api_key = llm_service.get_decrypted_api_key(provider_id)
llm_log.debug(f"API Key decrypted (first 8 chars): {api_key[:8] if api_key and len(api_key) > 8 else 'EMPTY_OR_NONE'}...")

View File

@@ -7,8 +7,12 @@
# @INVARIANT: All required prompt keys remain available after normalization.
from src.services.llm_prompt_templates import (
DEFAULT_LLM_ASSISTANT_SETTINGS,
DEFAULT_LLM_PROVIDER_BINDINGS,
DEFAULT_LLM_PROMPTS,
is_multimodal_model,
normalize_llm_settings,
resolve_bound_provider_id,
render_prompt,
)
@@ -22,10 +26,15 @@ def test_normalize_llm_settings_adds_default_prompts():
normalized = normalize_llm_settings({"default_provider": "x"})
assert "prompts" in normalized
assert "provider_bindings" in normalized
assert normalized["default_provider"] == "x"
for key in DEFAULT_LLM_PROMPTS:
assert key in normalized["prompts"]
assert isinstance(normalized["prompts"][key], str)
for key in DEFAULT_LLM_PROVIDER_BINDINGS:
assert key in normalized["provider_bindings"]
for key in DEFAULT_LLM_ASSISTANT_SETTINGS:
assert key in normalized
# [/DEF:test_normalize_llm_settings_adds_default_prompts:Function]
@@ -59,4 +68,42 @@ def test_render_prompt_replaces_known_placeholders():
# [/DEF:test_render_prompt_replaces_known_placeholders:Function]
# [DEF:test_is_multimodal_model_detects_known_vision_models:Function]
# @TIER: STANDARD
# @PURPOSE: Ensure multimodal model detection recognizes common vision-capable model names.
def test_is_multimodal_model_detects_known_vision_models():
assert is_multimodal_model("gpt-4o") is True
assert is_multimodal_model("claude-3-5-sonnet") is True
assert is_multimodal_model("text-only-model") is False
# [/DEF:test_is_multimodal_model_detects_known_vision_models:Function]
# [DEF:test_resolve_bound_provider_id_prefers_binding_then_default:Function]
# @TIER: STANDARD
# @PURPOSE: Verify provider binding resolution priority.
def test_resolve_bound_provider_id_prefers_binding_then_default():
settings = {
"default_provider": "default-1",
"provider_bindings": {"dashboard_validation": "vision-1"},
}
assert resolve_bound_provider_id(settings, "dashboard_validation") == "vision-1"
assert resolve_bound_provider_id(settings, "documentation") == "default-1"
# [/DEF:test_resolve_bound_provider_id_prefers_binding_then_default:Function]
# [DEF:test_normalize_llm_settings_keeps_assistant_planner_settings:Function]
# @TIER: STANDARD
# @PURPOSE: Ensure assistant planner provider/model fields are preserved and normalized.
def test_normalize_llm_settings_keeps_assistant_planner_settings():
normalized = normalize_llm_settings(
{
"assistant_planner_provider": "provider-a",
"assistant_planner_model": "gpt-4.1-mini",
}
)
assert normalized["assistant_planner_provider"] == "provider-a"
assert normalized["assistant_planner_model"] == "gpt-4.1-mini"
# [/DEF:test_normalize_llm_settings_keeps_assistant_planner_settings:Function]
# [/DEF:backend.src.services.__tests__.test_llm_prompt_templates:Module]

View File

@@ -61,23 +61,109 @@ DEFAULT_LLM_PROMPTS: Dict[str, str] = {
# [/DEF:DEFAULT_LLM_PROMPTS:Constant]
# [DEF:DEFAULT_LLM_PROVIDER_BINDINGS:Constant]
# @TIER: STANDARD
# @PURPOSE: Default provider binding per task domain.
DEFAULT_LLM_PROVIDER_BINDINGS: Dict[str, str] = {
"dashboard_validation": "",
"documentation": "",
"git_commit": "",
}
# [/DEF:DEFAULT_LLM_PROVIDER_BINDINGS:Constant]
# [DEF:DEFAULT_LLM_ASSISTANT_SETTINGS:Constant]
# @TIER: STANDARD
# @PURPOSE: Default planner settings for assistant chat intent model/provider resolution.
DEFAULT_LLM_ASSISTANT_SETTINGS: Dict[str, str] = {
"assistant_planner_provider": "",
"assistant_planner_model": "",
}
# [/DEF:DEFAULT_LLM_ASSISTANT_SETTINGS:Constant]
# [DEF:normalize_llm_settings:Function]
# @TIER: STANDARD
# @PURPOSE: Ensure llm settings contain stable schema with prompts section and default templates.
# @PRE: llm_settings is dictionary-like value or None.
# @POST: Returned dict contains prompts with all required template keys.
def normalize_llm_settings(llm_settings: Any) -> Dict[str, Any]:
normalized: Dict[str, Any] = {"providers": [], "default_provider": "", "prompts": {}}
normalized: Dict[str, Any] = {
"providers": [],
"default_provider": "",
"prompts": {},
"provider_bindings": {},
**DEFAULT_LLM_ASSISTANT_SETTINGS,
}
if isinstance(llm_settings, dict):
normalized.update({k: v for k, v in llm_settings.items() if k in ("providers", "default_provider", "prompts")})
normalized.update(
{
k: v
for k, v in llm_settings.items()
if k
in (
"providers",
"default_provider",
"prompts",
"provider_bindings",
"assistant_planner_provider",
"assistant_planner_model",
)
}
)
prompts = normalized.get("prompts") if isinstance(normalized.get("prompts"), dict) else {}
merged_prompts = deepcopy(DEFAULT_LLM_PROMPTS)
merged_prompts.update({k: v for k, v in prompts.items() if isinstance(v, str) and v.strip()})
normalized["prompts"] = merged_prompts
bindings = normalized.get("provider_bindings") if isinstance(normalized.get("provider_bindings"), dict) else {}
merged_bindings = deepcopy(DEFAULT_LLM_PROVIDER_BINDINGS)
merged_bindings.update({k: v for k, v in bindings.items() if isinstance(v, str)})
normalized["provider_bindings"] = merged_bindings
for key, default_value in DEFAULT_LLM_ASSISTANT_SETTINGS.items():
value = normalized.get(key, default_value)
normalized[key] = value.strip() if isinstance(value, str) else default_value
return normalized
# [/DEF:normalize_llm_settings:Function]
# [DEF:is_multimodal_model:Function]
# @TIER: STANDARD
# @PURPOSE: Heuristically determine whether model supports image input required for dashboard validation.
# @PRE: model_name may be empty or mixed-case.
# @POST: Returns True when model likely supports multimodal input.
def is_multimodal_model(model_name: str) -> bool:
token = (model_name or "").strip().lower()
if not token:
return False
multimodal_markers = (
"gpt-4o",
"gpt-4.1",
"vision",
"vl",
"gemini",
"claude-3",
"claude-sonnet-4",
)
return any(marker in token for marker in multimodal_markers)
# [/DEF:is_multimodal_model:Function]
# [DEF:resolve_bound_provider_id:Function]
# @TIER: STANDARD
# @PURPOSE: Resolve provider id configured for a task binding with fallback to default provider.
# @PRE: llm_settings is normalized or raw dict from config.
# @POST: Returns configured provider id or fallback id/empty string when not defined.
def resolve_bound_provider_id(llm_settings: Any, task_key: str) -> str:
normalized = normalize_llm_settings(llm_settings)
bindings = normalized.get("provider_bindings", {})
bound = bindings.get(task_key)
if isinstance(bound, str) and bound.strip():
return bound.strip()
default_provider = normalized.get("default_provider", "")
return default_provider.strip() if isinstance(default_provider, str) else ""
# [/DEF:resolve_bound_provider_id:Function]
# [DEF:render_prompt:Function]
# @TIER: STANDARD
# @PURPOSE: Render prompt template using deterministic placeholder replacement with graceful fallback.