chat worked
This commit is contained in:
62
backend/src/services/__tests__/test_llm_prompt_templates.py
Normal file
62
backend/src/services/__tests__/test_llm_prompt_templates.py
Normal file
@@ -0,0 +1,62 @@
|
||||
# [DEF:backend.src.services.__tests__.test_llm_prompt_templates:Module]
|
||||
# @TIER: STANDARD
|
||||
# @SEMANTICS: tests, llm, prompts, templates, settings
|
||||
# @PURPOSE: Validate normalization and rendering behavior for configurable LLM prompt templates.
|
||||
# @LAYER: Domain Tests
|
||||
# @RELATION: DEPENDS_ON -> backend.src.services.llm_prompt_templates
|
||||
# @INVARIANT: All required prompt keys remain available after normalization.
|
||||
|
||||
from src.services.llm_prompt_templates import (
|
||||
DEFAULT_LLM_PROMPTS,
|
||||
normalize_llm_settings,
|
||||
render_prompt,
|
||||
)
|
||||
|
||||
|
||||
# [DEF:test_normalize_llm_settings_adds_default_prompts:Function]
|
||||
# @TIER: STANDARD
|
||||
# @PURPOSE: Ensure legacy/partial llm settings are expanded with all prompt defaults.
|
||||
# @PRE: Input llm settings do not contain complete prompts object.
|
||||
# @POST: Returned structure includes required prompt templates with fallback defaults.
|
||||
def test_normalize_llm_settings_adds_default_prompts():
|
||||
normalized = normalize_llm_settings({"default_provider": "x"})
|
||||
|
||||
assert "prompts" in normalized
|
||||
assert normalized["default_provider"] == "x"
|
||||
for key in DEFAULT_LLM_PROMPTS:
|
||||
assert key in normalized["prompts"]
|
||||
assert isinstance(normalized["prompts"][key], str)
|
||||
# [/DEF:test_normalize_llm_settings_adds_default_prompts:Function]
|
||||
|
||||
|
||||
# [DEF:test_normalize_llm_settings_keeps_custom_prompt_values:Function]
|
||||
# @TIER: STANDARD
|
||||
# @PURPOSE: Ensure user-customized prompt values are preserved during normalization.
|
||||
# @PRE: Input llm settings contain custom prompt override.
|
||||
# @POST: Custom prompt value remains unchanged in normalized output.
|
||||
def test_normalize_llm_settings_keeps_custom_prompt_values():
|
||||
custom = "Doc for {dataset_name} using {columns_json}"
|
||||
normalized = normalize_llm_settings(
|
||||
{"prompts": {"documentation_prompt": custom}}
|
||||
)
|
||||
|
||||
assert normalized["prompts"]["documentation_prompt"] == custom
|
||||
# [/DEF:test_normalize_llm_settings_keeps_custom_prompt_values:Function]
|
||||
|
||||
|
||||
# [DEF:test_render_prompt_replaces_known_placeholders:Function]
|
||||
# @TIER: STANDARD
|
||||
# @PURPOSE: Ensure template placeholders are deterministically replaced.
|
||||
# @PRE: Template contains placeholders matching provided variables.
|
||||
# @POST: Rendered prompt string contains substituted values.
|
||||
def test_render_prompt_replaces_known_placeholders():
|
||||
rendered = render_prompt(
|
||||
"Hello {name}, diff={diff}",
|
||||
{"name": "bot", "diff": "A->B"},
|
||||
)
|
||||
|
||||
assert rendered == "Hello bot, diff=A->B"
|
||||
# [/DEF:test_render_prompt_replaces_known_placeholders:Function]
|
||||
|
||||
|
||||
# [/DEF:backend.src.services.__tests__.test_llm_prompt_templates:Module]
|
||||
94
backend/src/services/llm_prompt_templates.py
Normal file
94
backend/src/services/llm_prompt_templates.py
Normal file
@@ -0,0 +1,94 @@
|
||||
# [DEF:backend.src.services.llm_prompt_templates:Module]
|
||||
# @TIER: STANDARD
|
||||
# @SEMANTICS: llm, prompts, templates, settings
|
||||
# @PURPOSE: Provide default LLM prompt templates and normalization helpers for runtime usage.
|
||||
# @LAYER: Domain
|
||||
# @RELATION: DEPENDS_ON -> backend.src.core.config_manager
|
||||
# @INVARIANT: All required prompt template keys are always present after normalization.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from copy import deepcopy
|
||||
from typing import Dict, Any
|
||||
|
||||
|
||||
# [DEF:DEFAULT_LLM_PROMPTS:Constant]
|
||||
# @TIER: STANDARD
|
||||
# @PURPOSE: Default prompt templates used by documentation, dashboard validation, and git commit generation.
|
||||
DEFAULT_LLM_PROMPTS: Dict[str, str] = {
|
||||
"dashboard_validation_prompt": (
|
||||
"Analyze the attached dashboard screenshot and the following execution logs for health and visual issues.\n\n"
|
||||
"Logs:\n"
|
||||
"{logs}\n\n"
|
||||
"Provide the analysis in JSON format with the following structure:\n"
|
||||
"{\n"
|
||||
' "status": "PASS" | "WARN" | "FAIL",\n'
|
||||
' "summary": "Short summary of findings",\n'
|
||||
' "issues": [\n'
|
||||
" {\n"
|
||||
' "severity": "WARN" | "FAIL",\n'
|
||||
' "message": "Description of the issue",\n'
|
||||
' "location": "Optional location info (e.g. chart name)"\n'
|
||||
" }\n"
|
||||
" ]\n"
|
||||
"}"
|
||||
),
|
||||
"documentation_prompt": (
|
||||
"Generate professional documentation for the following dataset and its columns.\n"
|
||||
"Dataset: {dataset_name}\n"
|
||||
"Columns: {columns_json}\n\n"
|
||||
"Provide the documentation in JSON format:\n"
|
||||
"{\n"
|
||||
' "dataset_description": "General description of the dataset",\n'
|
||||
' "column_descriptions": [\n'
|
||||
" {\n"
|
||||
' "name": "column_name",\n'
|
||||
' "description": "Generated description"\n'
|
||||
" }\n"
|
||||
" ]\n"
|
||||
"}"
|
||||
),
|
||||
"git_commit_prompt": (
|
||||
"Generate a concise and professional git commit message based on the following diff and recent history.\n"
|
||||
"Use Conventional Commits format (e.g., feat: ..., fix: ..., docs: ...).\n\n"
|
||||
"Recent History:\n"
|
||||
"{history}\n\n"
|
||||
"Diff:\n"
|
||||
"{diff}\n\n"
|
||||
"Commit Message:"
|
||||
),
|
||||
}
|
||||
# [/DEF:DEFAULT_LLM_PROMPTS:Constant]
|
||||
|
||||
|
||||
# [DEF:normalize_llm_settings:Function]
|
||||
# @TIER: STANDARD
|
||||
# @PURPOSE: Ensure llm settings contain stable schema with prompts section and default templates.
|
||||
# @PRE: llm_settings is dictionary-like value or None.
|
||||
# @POST: Returned dict contains prompts with all required template keys.
|
||||
def normalize_llm_settings(llm_settings: Any) -> Dict[str, Any]:
|
||||
normalized: Dict[str, Any] = {"providers": [], "default_provider": "", "prompts": {}}
|
||||
if isinstance(llm_settings, dict):
|
||||
normalized.update({k: v for k, v in llm_settings.items() if k in ("providers", "default_provider", "prompts")})
|
||||
prompts = normalized.get("prompts") if isinstance(normalized.get("prompts"), dict) else {}
|
||||
merged_prompts = deepcopy(DEFAULT_LLM_PROMPTS)
|
||||
merged_prompts.update({k: v for k, v in prompts.items() if isinstance(v, str) and v.strip()})
|
||||
normalized["prompts"] = merged_prompts
|
||||
return normalized
|
||||
# [/DEF:normalize_llm_settings:Function]
|
||||
|
||||
|
||||
# [DEF:render_prompt:Function]
|
||||
# @TIER: STANDARD
|
||||
# @PURPOSE: Render prompt template using deterministic placeholder replacement with graceful fallback.
|
||||
# @PRE: template is a string and variables values are already stringifiable.
|
||||
# @POST: Returns rendered prompt text with known placeholders substituted.
|
||||
def render_prompt(template: str, variables: Dict[str, Any]) -> str:
|
||||
rendered = template
|
||||
for key, value in variables.items():
|
||||
rendered = rendered.replace("{" + key + "}", str(value))
|
||||
return rendered
|
||||
# [/DEF:render_prompt:Function]
|
||||
|
||||
|
||||
# [/DEF:backend.src.services.llm_prompt_templates:Module]
|
||||
Reference in New Issue
Block a user