semantic protocol condense + script update

This commit is contained in:
2026-01-28 15:49:39 +03:00
parent e7b31accd6
commit 18b42f8dd0
2 changed files with 577 additions and 364 deletions

View File

@@ -1,8 +1,10 @@
# [DEF:generate_semantic_map:Module]
#
# @SEMANTICS: semantic_analysis, parser, map_generator, compliance_checker
# @TIER: CRITICAL
# @SEMANTICS: semantic_analysis, parser, map_generator, compliance_checker, tier_validation, svelte_props, data_flow
# @PURPOSE: Scans the codebase to generate a Semantic Map and Compliance Report based on the System Standard.
# @LAYER: DevOps/Tooling
# @INVARIANT: All DEF anchors must have matching closing anchors; TIER determines validation strictness.
# @RELATION: READS -> FileSystem
# @RELATION: PRODUCES -> semantics/semantic_map.json
# @RELATION: PRODUCES -> specs/project_map.md
@@ -14,12 +16,15 @@ import re
import json
import datetime
import fnmatch
from enum import Enum
from dataclasses import dataclass, field
from typing import Dict, List, Optional, Any, Pattern, Tuple, Set
# Mock belief_scope for the script itself to avoid import issues
class belief_scope:
# [DEF:__init__:Function]
# @PURPOSE: Mock init.
# @TIER: TRIVIAL
# @PURPOSE: Mock init for self-containment.
# @PRE: name is a string.
# @POST: Instance initialized.
def __init__(self, name):
@@ -27,6 +32,7 @@ class belief_scope:
# [/DEF:__init__:Function]
# [DEF:__enter__:Function]
# @TIER: TRIVIAL
# @PURPOSE: Mock enter.
# @PRE: Instance initialized.
# @POST: Returns self.
@@ -35,6 +41,7 @@ class belief_scope:
# [/DEF:__enter__:Function]
# [DEF:__exit__:Function]
# @TIER: TRIVIAL
# @PURPOSE: Mock exit.
# @PRE: Context entered.
# @POST: Context exited.
@@ -44,10 +51,32 @@ class belief_scope:
# [/SECTION]
# [SECTION: CONFIGURATION]
class Tier(Enum):
# [DEF:Tier:Class]
# @TIER: TRIVIAL
# @PURPOSE: Enumeration of semantic tiers defining validation strictness.
CRITICAL = "CRITICAL"
STANDARD = "STANDARD"
TRIVIAL = "TRIVIAL"
# [/DEF:Tier:Class]
class Severity(Enum):
# [DEF:Severity:Class]
# @TIER: TRIVIAL
# @PURPOSE: Severity levels for compliance issues.
ERROR = "ERROR"
WARNING = "WARNING"
INFO = "INFO"
# [/DEF:Severity:Class]
PROJECT_ROOT = "."
IGNORE_DIRS = {
".git", "__pycache__", "node_modules", "venv", ".pytest_cache",
".kilocode", "backups", "logs", "semantics", "specs"
".kilocode", "backups", "logs", "semantics", "specs", ".venv"
}
IGNORE_FILES = {
"package-lock.json", "poetry.lock", "yarn.lock"
@@ -56,19 +85,62 @@ OUTPUT_JSON = "semantics/semantic_map.json"
OUTPUT_COMPRESSED_MD = "specs/project_map.md"
REPORTS_DIR = "semantics/reports"
MANDATORY_TAGS = {
"Module": ["PURPOSE", "LAYER", "SEMANTICS"],
"Component": ["PURPOSE", "LAYER", "SEMANTICS"],
"Function": ["PURPOSE", "PRE", "POST"],
"Class": ["PURPOSE"]
# Tier-based mandatory tags
TIER_MANDATORY_TAGS = {
Tier.CRITICAL: {
"Module": ["PURPOSE", "LAYER", "SEMANTICS", "TIER", "INVARIANT"],
"Component": ["PURPOSE", "LAYER", "SEMANTICS", "TIER", "INVARIANT"],
"Function": ["PURPOSE", "PRE", "POST"],
"Class": ["PURPOSE", "TIER"]
},
Tier.STANDARD: {
"Module": ["PURPOSE", "LAYER", "SEMANTICS", "TIER"],
"Component": ["PURPOSE", "LAYER", "SEMANTICS", "TIER"],
"Function": ["PURPOSE", "PRE", "POST"],
"Class": ["PURPOSE", "TIER"]
},
Tier.TRIVIAL: {
"Module": ["PURPOSE", "TIER"],
"Component": ["PURPOSE", "TIER"],
"Function": ["PURPOSE"],
"Class": ["PURPOSE", "TIER"]
}
}
# Tier-based belief state requirements
TIER_BELIEF_REQUIRED = {
Tier.CRITICAL: True,
Tier.STANDARD: True,
Tier.TRIVIAL: False
}
# [/SECTION]
# [DEF:ComplianceIssue:Class]
# @TIER: TRIVIAL
# @PURPOSE: Represents a single compliance issue with severity.
@dataclass
class ComplianceIssue:
message: str
severity: Severity
line_number: Optional[int] = None
def to_dict(self) -> Dict[str, Any]:
return {
"message": self.message,
"severity": self.severity.value,
"line_number": self.line_number
}
# [/DEF:ComplianceIssue:Class]
# [DEF:SemanticEntity:Class]
# @PURPOSE: Represents a code entity (Module, Function, Component) found during parsing.
# @INVARIANT: start_line is always set; end_line is set upon closure.
# @TIER: CRITICAL
# @PURPOSE: Represents a code entity (Module, Function, Component) found during parsing.
# @INVARIANT: start_line is always set; end_line is set upon closure; tier defaults to STANDARD.
class SemanticEntity:
# [DEF:__init__:Function]
# @TIER: STANDARD
# @PURPOSE: Initializes a new SemanticEntity instance.
# @PRE: name, type_, start_line, file_path are provided.
# @POST: Instance is initialized with default values.
@@ -83,43 +155,80 @@ class SemanticEntity:
self.relations: List[Dict[str, str]] = []
self.children: List['SemanticEntity'] = []
self.parent: Optional['SemanticEntity'] = None
self.compliance_issues: List[str] = []
self.compliance_issues: List[ComplianceIssue] = []
self.has_belief_scope: bool = False
self.has_console_log: bool = False
# New fields for enhanced Svelte analysis
self.props: List[Dict[str, Any]] = []
self.events: List[str] = []
self.data_flow: List[Dict[str, str]] = []
# [/DEF:__init__:Function]
# [DEF:get_tier:Function]
# @TIER: STANDARD
# @PURPOSE: Returns the tier of the entity, defaulting to STANDARD.
# @PRE: tags dictionary is accessible.
# @POST: Returns Tier enum value.
def get_tier(self) -> Tier:
with belief_scope("get_tier"):
tier_str = self.tags.get("TIER", "STANDARD").upper()
try:
return Tier(tier_str)
except ValueError:
return Tier.STANDARD
# [/DEF:get_tier:Function]
# [DEF:to_dict:Function]
# @TIER: STANDARD
# @PURPOSE: Serializes the entity to a dictionary for JSON output.
# @PRE: Entity is fully populated.
# @POST: Returns a dictionary representation.
# @RETURN: Dict representation of the entity.
def to_dict(self) -> Dict[str, Any]:
with belief_scope("to_dict"):
return {
result = {
"name": self.name,
"type": self.type,
"tier": self.get_tier().value,
"start_line": self.start_line,
"end_line": self.end_line,
"tags": self.tags,
"relations": self.relations,
"children": [c.to_dict() for c in self.children],
"compliance": {
"valid": len(self.compliance_issues) == 0,
"issues": self.compliance_issues
"valid": len([i for i in self.compliance_issues if i.severity == Severity.ERROR]) == 0,
"issues": [i.to_dict() for i in self.compliance_issues],
"score": self.get_score()
}
}
if self.props:
result["props"] = self.props
if self.events:
result["events"] = self.events
if self.data_flow:
result["data_flow"] = self.data_flow
return result
# [/DEF:to_dict:Function]
# [DEF:validate:Function]
# @PURPOSE: Checks for semantic compliance (closure, mandatory tags, belief state).
# @PRE: Entity structure is complete.
# @POST: Populates self.compliance_issues.
# @TIER: CRITICAL
# @PURPOSE: Checks for semantic compliance based on TIER requirements.
# @PRE: Entity structure is complete; tier is determined.
# @POST: Populates self.compliance_issues with severity levels.
# @SIDE_EFFECT: Modifies self.compliance_issues list.
def validate(self):
with belief_scope("validate"):
# 1. Check Closure
if self.end_line is None:
self.compliance_issues.append(f"Unclosed Anchor: [DEF:{self.name}:{self.type}] started at line {self.start_line}")
tier = self.get_tier()
# 2. Check Mandatory Tags
required = MANDATORY_TAGS.get(self.type, [])
# 1. Check Closure (required for ALL tiers)
if self.end_line is None:
self.compliance_issues.append(ComplianceIssue(
f"Unclosed Anchor: [DEF:{self.name}:{self.type}] started at line {self.start_line}",
Severity.ERROR,
self.start_line
))
# 2. Check Mandatory Tags based on TIER
required = TIER_MANDATORY_TAGS.get(tier, {}).get(self.type, [])
for req_tag in required:
found = False
for existing_tag in self.tags:
@@ -127,13 +236,39 @@ class SemanticEntity:
found = True
break
if not found:
self.compliance_issues.append(f"Missing Mandatory Tag: @{req_tag}")
severity = Severity.ERROR if tier == Tier.CRITICAL else Severity.WARNING
self.compliance_issues.append(ComplianceIssue(
f"Missing Mandatory Tag: @{req_tag} (required for {tier.value} tier)",
severity,
self.start_line
))
# 3. Check for Belief State Logging (Python only)
# Skip check for logger.py to avoid circular dependencies
if self.type == "Function" and self.file_path.endswith(".py") and "backend/src/core/logger.py" not in self.file_path:
if not getattr(self, 'has_belief_scope', False):
self.compliance_issues.append("Missing Belief State Logging: Function should use belief_scope context manager.")
# 3. Check for Belief State Logging based on TIER
if self.type == "Function":
belief_required = TIER_BELIEF_REQUIRED.get(tier, False)
if belief_required:
is_python = self.file_path.endswith(".py")
has_belief = self.has_belief_scope if is_python else self.has_console_log
if not has_belief:
# Check if it's a special case (logger.py or mock functions)
if "logger.py" not in self.file_path and "__" not in self.name:
severity = Severity.ERROR if tier == Tier.CRITICAL else Severity.WARNING
log_type = "belief_scope" if is_python else "console.log with [ID][STATE]"
self.compliance_issues.append(ComplianceIssue(
f"Missing Belief State Logging: Function should use {log_type} (required for {tier.value} tier)",
severity,
self.start_line
))
# 4. Check for @INVARIANT in CRITICAL tier
if tier == Tier.CRITICAL and self.type in ["Module", "Component", "Class"]:
if "INVARIANT" not in [k.upper() for k in self.tags.keys()]:
self.compliance_issues.append(ComplianceIssue(
f"Missing @INVARIANT tag (required for CRITICAL tier)",
Severity.ERROR,
self.start_line
))
# Recursive validation
for child in self.children:
@@ -141,27 +276,37 @@ class SemanticEntity:
# [/DEF:validate:Function]
# [DEF:get_score:Function]
# @PURPOSE: Calculates a compliance score (0.0 to 1.0).
# @TIER: STANDARD
# @PURPOSE: Calculates a compliance score (0.0 to 1.0) based on tier requirements.
# @PRE: validate() has been called.
# @POST: Returns a float score.
# @RETURN: Float score.
def get_score(self) -> float:
with belief_scope("get_score"):
if self.end_line is None:
return 0.0
tier = self.get_tier()
score = 1.0
required = MANDATORY_TAGS.get(self.type, [])
# Count issues by severity
errors = len([i for i in self.compliance_issues if i.severity == Severity.ERROR])
warnings = len([i for i in self.compliance_issues if i.severity == Severity.WARNING])
# Penalties
score -= errors * 0.3
score -= warnings * 0.1
# Check mandatory tags
required = TIER_MANDATORY_TAGS.get(tier, {}).get(self.type, [])
if required:
found_count = 0
for req_tag in required:
for existing_tag in self.tags:
for existing_tag in self.tags:
if existing_tag.upper() == req_tag:
found_count += 1
break
if found_count < len(required):
# Penalty proportional to missing tags
score -= 0.5 * (1 - (found_count / len(required)))
score -= 0.2 * (1 - (found_count / len(required)))
return max(0.0, score)
# [/DEF:get_score:Function]
@@ -169,17 +314,17 @@ class SemanticEntity:
# [DEF:get_patterns:Function]
# @TIER: STANDARD
# @PURPOSE: Returns regex patterns for a specific language.
# @PRE: lang is either 'python' or 'svelte_js'.
# @POST: Returns a dictionary of compiled regex patterns.
# @PARAM: lang (str) - 'python' or 'svelte_js'
# @RETURN: Dict containing compiled regex patterns.
def get_patterns(lang: str) -> Dict[str, Pattern]:
with belief_scope("get_patterns"):
if lang == "python":
return {
"anchor_start": re.compile(r"#\s*\[DEF:(?P<name>[\w\.]+):(?P<type>\w+)\]"),
"anchor_end": re.compile(r"#\s*\[/DEF:(?P<name>[\w\.]+):(?P<type>\w+)\]"),
"anchor_end": re.compile(r"#\s*\[/DEF:(?P<name>[\w\.]+)\]"),
"tag": re.compile(r"#\s*@(?P<tag>[A-Z_]+):\s*(?P<value>.*)"),
"relation": re.compile(r"#\s*@RELATION:\s*(?P<type>\w+)\s*->\s*(?P<target>.*)"),
"func_def": re.compile(r"^\s*(async\s+)?def\s+(?P<name>\w+)"),
@@ -188,54 +333,189 @@ def get_patterns(lang: str) -> Dict[str, Pattern]:
else:
return {
"html_anchor_start": re.compile(r"<!--\s*\[DEF:(?P<name>[\w\.]+):(?P<type>\w+)\]\s*-->"),
"html_anchor_end": re.compile(r"<!--\s*\[/DEF:(?P<name>[\w\.]+):(?P<type>\w+)\]\s*-->"),
"html_anchor_end": re.compile(r"<!--\s*\[/DEF:(?P<name>[\w\.]+)\]\s*-->"),
"js_anchor_start": re.compile(r"//\s*\[DEF:(?P<name>[\w\.]+):(?P<type>\w+)\]"),
"js_anchor_end": re.compile(r"//\s*\[/DEF:(?P<name>[\w\.]+):(?P<type>\w+)\]"),
"js_anchor_end": re.compile(r"//\s*\[/DEF:(?P<name>[\w\.]+)\]"),
"html_tag": re.compile(r"@(?P<tag>[A-Z_]+):\s*(?P<value>.*)"),
"jsdoc_tag": re.compile(r"\*\s*@(?P<tag>[a-zA-Z]+)\s+(?P<value>.*)"),
"relation": re.compile(r"//\s*@RELATION:\s*(?P<type>\w+)\s*->\s*(?P<target>.*)"),
"func_def": re.compile(r"^\s*(export\s+)?(async\s+)?function\s+(?P<name>\w+)"),
"console_log": re.compile(r"console\.log\s*\(\s*['\"]\[[\w_]+\]\[[\w_]+\]"),
# Svelte-specific patterns
"export_let": re.compile(r"export\s+let\s+(?P<name>\w+)(?:\s*:\s*(?P<type>[\w\[\]|<>]+))?(?:\s*=\s*(?P<default>[^;]+))?"),
"create_event_dispatcher": re.compile(r"createEventDispatcher\s*<\s*\{\s*(?P<events>[^}]+)\s*\}\s*\>"),
"dispatch_call": re.compile(r"dispatch\s*\(\s*['\"](?P<event>\w+)['\"]"),
"store_subscription": re.compile(r"\$(?P<store>\w+)"),
"store_import": re.compile(r"import\s*\{[^}]*\b(?P<store>\w+Store|store)\b[^}]*\}\s*from\s*['\"][^'\"]*stores?[^'\"]*['\"]"),
}
# [/DEF:get_patterns:Function]
# [DEF:extract_svelte_props:Function]
# @TIER: STANDARD
# @PURPOSE: Extracts props from Svelte component script section.
# @PRE: lines is a list of file lines, start_idx is the starting line index.
# @POST: Returns list of prop definitions.
def extract_svelte_props(lines: List[str], start_idx: int) -> List[Dict[str, Any]]:
with belief_scope("extract_svelte_props"):
props = []
pattern = re.compile(r"export\s+let\s+(?P<name>\w+)(?:\s*:\s*(?P<type>[\w\[\]|<>\s]+))?(?:\s*=\s*(?P<default>[^;]+))?;")
for i in range(start_idx, min(start_idx + 100, len(lines))): # Look ahead 100 lines
line = lines[i].strip()
# Stop at script end or function definitions
if line == "</script>" or line.startswith("function ") or line.startswith("const "):
break
match = pattern.search(line)
if match:
prop = {
"name": match.group("name"),
"type": match.group("type") if match.group("type") else "any",
"default": match.group("default").strip() if match.group("default") else None
}
props.append(prop)
return props
# [/DEF:extract_svelte_props:Function]
# [DEF:extract_svelte_events:Function]
# @TIER: STANDARD
# @PURPOSE: Extracts dispatched events from Svelte component.
# @PRE: lines is a list of file lines.
# @POST: Returns list of event names.
def extract_svelte_events(lines: List[str]) -> List[str]:
with belief_scope("extract_svelte_events"):
events = set()
# Pattern 1: createEventDispatcher with type definition
dispatcher_pattern = re.compile(r"createEventDispatcher\s*<\s*\{\s*([^}]+)\s*\}\s*\>")
# Pattern 2: dispatch('eventName')
dispatch_pattern = re.compile(r"dispatch\s*\(\s*['\"](\w+)['\"]")
for line in lines:
line = line.strip()
# Check for typed dispatcher
match = dispatcher_pattern.search(line)
if match:
events_str = match.group(1)
# Extract event names from type definition like: submit: Type; cancel: Type
for event_def in events_str.split(";"):
if ":" in event_def:
event_name = event_def.split(":")[0].strip()
if event_name:
events.add(event_name)
# Check for dispatch calls
match = dispatch_pattern.search(line)
if match:
events.add(match.group(1))
return sorted(list(events))
# [/DEF:extract_svelte_events:Function]
# [DEF:extract_data_flow:Function]
# @TIER: STANDARD
# @PURPOSE: Extracts store subscriptions and data flow from Svelte component.
# @PRE: lines is a list of file lines.
# @POST: Returns list of data flow descriptors.
def extract_data_flow(lines: List[str]) -> List[Dict[str, str]]:
with belief_scope("extract_data_flow"):
data_flow = []
# Pattern for store subscriptions: $storeName
subscription_pattern = re.compile(r"\$(?P<store>\w+)")
# Pattern for store imports
import_pattern = re.compile(r"import\s*\{[^}]*\}\s*from\s*['\"][^'\"]*stores?[^'\"]*['\"]")
store_names = set()
# First pass: find store imports
for line in lines:
if import_pattern.search(line):
# Extract imported names
match = re.search(r"import\s*\{([^}]+)\}", line)
if match:
imports = match.group(1).split(",")
for imp in imports:
store_names.add(imp.strip().split()[0])
# Second pass: find subscriptions
for i, line in enumerate(lines):
line_stripped = line.strip()
# Skip comments
if line_stripped.startswith("//") or line_stripped.startswith("*"):
continue
# Find store subscriptions
for match in subscription_pattern.finditer(line):
store_name = match.group("store")
if store_name not in ["if", "while", "for", "switch"]:
flow_type = "READS_FROM"
# Check if it's an assignment (write)
if "=" in line and line.index("$") > line.index("="):
flow_type = "WRITES_TO"
data_flow.append({
"store": store_name,
"type": flow_type,
"line": i + 1
})
return data_flow
# [/DEF:extract_data_flow:Function]
# [DEF:parse_file:Function]
# @PURPOSE: Parses a single file to extract semantic entities.
# @TIER: CRITICAL
# @PURPOSE: Parses a single file to extract semantic entities with tier awareness and enhanced Svelte analysis.
# @PRE: full_path, rel_path, lang are valid strings.
# @POST: Returns extracted entities and list of issues.
# @INVARIANT: Every opened anchor must have a matching closing anchor for valid compliance.
# @PARAM: full_path - Absolute path to file.
# @PARAM: rel_path - Relative path from project root.
# @PARAM: lang - Language identifier.
# @RETURN: Tuple[List[SemanticEntity], List[str]] - Entities found and global issues.
def parse_file(full_path: str, rel_path: str, lang: str) -> Tuple[List[SemanticEntity], List[str]]:
def parse_file(full_path: str, rel_path: str, lang: str) -> Tuple[List[SemanticEntity], List[ComplianceIssue]]:
with belief_scope("parse_file"):
issues: List[str] = []
issues: List[ComplianceIssue] = []
try:
with open(full_path, 'r', encoding='utf-8') as f:
lines = f.readlines()
except Exception as e:
return [], [f"Could not read file {rel_path}: {e}"]
return [], [ComplianceIssue(f"Could not read file {rel_path}: {e}", Severity.ERROR)]
stack: List[SemanticEntity] = []
file_entities: List[SemanticEntity] = []
orphan_functions: List[SemanticEntity] = []
patterns = get_patterns(lang)
# Track current module for grouping orphans
current_module: Optional[SemanticEntity] = None
for i, line in enumerate(lines):
lineno = i + 1
line = line.strip()
line_stripped = line.strip()
# 1. Check for Anchor Start
match_start = None
if lang == "python":
match_start = patterns["anchor_start"].search(line)
match_start = patterns["anchor_start"].search(line_stripped)
else:
match_start = patterns["html_anchor_start"].search(line) or patterns["js_anchor_start"].search(line)
match_start = patterns["html_anchor_start"].search(line_stripped) or patterns["js_anchor_start"].search(line_stripped)
if match_start:
name = match_start.group("name")
type_ = match_start.group("type")
entity = SemanticEntity(name, type_, lineno, rel_path)
# Track module-level entities
if type_ == "Module" and not stack:
current_module = entity
if stack:
parent = stack[-1]
parent.children.append(entity)
@@ -249,46 +529,57 @@ def parse_file(full_path: str, rel_path: str, lang: str) -> Tuple[List[SemanticE
# 2. Check for Anchor End
match_end = None
if lang == "python":
match_end = patterns["anchor_end"].search(line)
match_end = patterns["anchor_end"].search(line_stripped)
else:
match_end = patterns["html_anchor_end"].search(line) or patterns["js_anchor_end"].search(line)
match_end = patterns["html_anchor_end"].search(line_stripped) or patterns["js_anchor_end"].search(line_stripped)
if match_end:
name = match_end.group("name")
type_ = match_end.group("type")
if not stack:
issues.append(f"{rel_path}:{lineno} Found closing anchor [/DEF:{name}:{type_}] without opening anchor.")
issues.append(ComplianceIssue(
f"{rel_path}:{lineno} Found closing anchor [/DEF:{name}] without opening anchor.",
Severity.ERROR,
lineno
))
continue
top = stack[-1]
if top.name == name and top.type == type_:
if top.name == name:
top.end_line = lineno
stack.pop()
else:
issues.append(f"{rel_path}:{lineno} Mismatched closing anchor. Expected [/DEF:{top.name}:{top.type}], found [/DEF:{name}:{type_}].")
issues.append(ComplianceIssue(
f"{rel_path}:{lineno} Mismatched closing anchor. Expected [/DEF:{top.name}], found [/DEF:{name}].",
Severity.ERROR,
lineno
))
continue
# 3. Check for Naked Functions (Missing Contracts)
# 3. Check for Naked Functions (Missing Contracts) - track as orphans
if "func_def" in patterns:
match_func = patterns["func_def"].search(line)
match_func = patterns["func_def"].search(line_stripped)
if match_func:
func_name = match_func.group("name")
is_covered = False
if stack:
current = stack[-1]
# Check if we are inside a Function anchor that matches the name
if current.type == "Function" and current.name == func_name:
is_covered = True
if not is_covered:
issues.append(f"{rel_path}:{lineno} Function '{func_name}' implementation found without matching [DEF:{func_name}:Function] contract.")
# Create orphan function entity
orphan = SemanticEntity(func_name, "Function", lineno, rel_path)
orphan.tags["PURPOSE"] = f"Auto-detected function (orphan)"
orphan.tags["TIER"] = "TRIVIAL"
orphan.end_line = lineno # Mark as closed immediately
orphan_functions.append(orphan)
# 4. Check for Tags/Relations
if stack:
current = stack[-1]
match_rel = patterns["relation"].search(line)
match_rel = patterns["relation"].search(line_stripped)
if match_rel:
current.relations.append({
"type": match_rel.group("type"),
@@ -298,11 +589,11 @@ def parse_file(full_path: str, rel_path: str, lang: str) -> Tuple[List[SemanticE
match_tag = None
if lang == "python":
match_tag = patterns["tag"].search(line)
match_tag = patterns["tag"].search(line_stripped)
elif lang == "svelte_js":
match_tag = patterns["html_tag"].search(line)
if not match_tag and ("/*" in line or "*" in line or "//" in line):
match_tag = patterns["jsdoc_tag"].search(line)
match_tag = patterns["html_tag"].search(line_stripped)
if not match_tag and ("/*" in line_stripped or "*" in line_stripped or "//" in line_stripped):
match_tag = patterns["jsdoc_tag"].search(line_stripped)
if match_tag:
tag_name = match_tag.group("tag").upper()
@@ -314,21 +605,68 @@ def parse_file(full_path: str, rel_path: str, lang: str) -> Tuple[List[SemanticE
if patterns["belief_scope"].search(line):
current.has_belief_scope = True
# Check for console.log belief state in Svelte
if lang == "svelte_js" and "console_log" in patterns:
if patterns["console_log"].search(line):
current.has_console_log = True
# End of file check
if stack:
for unclosed in stack:
unclosed.compliance_issues.append(f"Unclosed Anchor at end of file (started line {unclosed.start_line})")
issues.append(ComplianceIssue(
f"{rel_path}: Unclosed Anchor [DEF:{unclosed.name}:{unclosed.type}] at end of file (started line {unclosed.start_line})",
Severity.ERROR,
unclosed.start_line
))
if unclosed.parent is None and unclosed not in file_entities:
file_entities.append(unclosed)
# Post-processing for Svelte files
if lang == "svelte_js":
for entity in file_entities:
if entity.type == "Component":
# Extract props, events, and data flow
entity.props = extract_svelte_props(lines, entity.start_line)
entity.events = extract_svelte_events(lines)
entity.data_flow = extract_data_flow(lines)
# Group orphan functions under their module
if orphan_functions:
if current_module:
# Add orphans as children of the module
for orphan in orphan_functions:
orphan.parent = current_module
current_module.children.append(orphan)
else:
# Create a synthetic module for orphans
synthetic_module = SemanticEntity(
os.path.splitext(os.path.basename(rel_path))[0],
"Module",
1,
rel_path
)
synthetic_module.tags["PURPOSE"] = f"Auto-generated module for {rel_path}"
synthetic_module.tags["TIER"] = "TRIVIAL"
synthetic_module.tags["LAYER"] = "Unknown"
synthetic_module.end_line = len(lines)
for orphan in orphan_functions:
orphan.parent = synthetic_module
synthetic_module.children.append(orphan)
file_entities.append(synthetic_module)
return file_entities, issues
# [/DEF:parse_file:Function]
# [DEF:SemanticMapGenerator:Class]
# @PURPOSE: Orchestrates the mapping process.
# @TIER: CRITICAL
# @PURPOSE: Orchestrates the mapping process with tier-based validation.
# @INVARIANT: All entities are validated according to their TIER requirements.
class SemanticMapGenerator:
# [DEF:__init__:Function]
# @TIER: STANDARD
# @PURPOSE: Initializes the generator with a root directory.
# @PRE: root_dir is a valid path string.
# @POST: Generator instance is ready.
@@ -337,15 +675,15 @@ class SemanticMapGenerator:
self.root_dir = root_dir
self.entities: List[SemanticEntity] = []
self.file_scores: Dict[str, float] = {}
self.global_issues: List[str] = []
self.global_issues: List[ComplianceIssue] = []
self.ignored_patterns = self._load_gitignore()
# [/DEF:__init__:Function]
# [DEF:_load_gitignore:Function]
# @TIER: STANDARD
# @PURPOSE: Loads patterns from .gitignore file.
# @PRE: .gitignore exists in root_dir.
# @POST: Returns set of ignore patterns.
# @RETURN: Set of patterns to ignore.
def _load_gitignore(self) -> Set[str]:
with belief_scope("_load_gitignore"):
patterns = set()
@@ -360,17 +698,14 @@ class SemanticMapGenerator:
# [/DEF:_load_gitignore:Function]
# [DEF:_is_ignored:Function]
# @TIER: STANDARD
# @PURPOSE: Checks if a path should be ignored based on .gitignore or hardcoded defaults.
# @PRE: rel_path is a valid relative path string.
# @POST: Returns True if the path should be ignored.
# @PARAM: rel_path (str) - Path relative to root.
# @RETURN: bool - True if ignored.
def _is_ignored(self, rel_path: str) -> bool:
with belief_scope("_is_ignored"):
# Normalize path for matching
rel_path = rel_path.replace(os.sep, '/')
# Check hardcoded defaults
parts = rel_path.split('/')
for part in parts:
if part in IGNORE_DIRS:
@@ -379,21 +714,17 @@ class SemanticMapGenerator:
if os.path.basename(rel_path) in IGNORE_FILES:
return True
# Check gitignore patterns
for pattern in self.ignored_patterns:
# Handle directory patterns like 'node_modules/'
if pattern.endswith('/'):
dir_pattern = pattern.rstrip('/')
if rel_path == dir_pattern or rel_path.startswith(pattern):
return True
# Check for patterns in frontend/ or backend/
if rel_path.startswith("frontend/") and fnmatch.fnmatch(rel_path[9:], pattern):
return True
if rel_path.startswith("backend/") and fnmatch.fnmatch(rel_path[8:], pattern):
return True
# Use fnmatch for glob patterns
if fnmatch.fnmatch(rel_path, pattern) or \
fnmatch.fnmatch(os.path.basename(rel_path), pattern) or \
any(fnmatch.fnmatch(part, pattern) for part in parts):
@@ -403,6 +734,7 @@ class SemanticMapGenerator:
# [/DEF:_is_ignored:Function]
# [DEF:run:Function]
# @TIER: CRITICAL
# @PURPOSE: Main execution flow.
# @PRE: Generator is initialized.
# @POST: Semantic map and reports are generated.
@@ -417,13 +749,13 @@ class SemanticMapGenerator:
# [/DEF:run:Function]
# [DEF:_walk_and_parse:Function]
# @TIER: CRITICAL
# @PURPOSE: Recursively walks directories and triggers parsing.
# @PRE: root_dir exists.
# @POST: All files are scanned and entities extracted.
def _walk_and_parse(self):
with belief_scope("_walk_and_parse"):
for root, dirs, files in os.walk(self.root_dir):
# Optimization: don't enter ignored directories
dirs[:] = [d for d in dirs if not self._is_ignored(os.path.relpath(os.path.join(root, d), self.root_dir) + "/")]
for file in files:
@@ -448,7 +780,8 @@ class SemanticMapGenerator:
# [/DEF:_walk_and_parse:Function]
# [DEF:_process_file_results:Function]
# @PURPOSE: Validates entities and calculates file scores.
# @TIER: STANDARD
# @PURPOSE: Validates entities and calculates file scores with tier awareness.
# @PRE: Entities have been parsed from the file.
# @POST: File score is calculated and issues collected.
def _process_file_results(self, rel_path: str, entities: List[SemanticEntity]):
@@ -457,6 +790,7 @@ class SemanticMapGenerator:
count = 0
# [DEF:validate_recursive:Function]
# @TIER: STANDARD
# @PURPOSE: Recursively validates a list of entities.
# @PRE: ent_list is a list of SemanticEntity objects.
# @POST: All entities and their children are validated.
@@ -477,12 +811,12 @@ class SemanticMapGenerator:
# [/DEF:_process_file_results:Function]
# [DEF:_generate_artifacts:Function]
# @PURPOSE: Writes output files.
# @TIER: CRITICAL
# @PURPOSE: Writes output files with tier-based compliance data.
# @PRE: Parsing and validation are complete.
# @POST: JSON and Markdown artifacts are written to disk.
def _generate_artifacts(self):
with belief_scope("_generate_artifacts"):
# 1. Full JSON Map
full_map = {
"project_root": self.root_dir,
"generated_at": datetime.datetime.now().isoformat(),
@@ -494,15 +828,13 @@ class SemanticMapGenerator:
json.dump(full_map, f, indent=2)
print(f"Generated {OUTPUT_JSON}")
# 2. Compliance Report
self._generate_report()
# 3. Compressed Map (Markdown)
self._generate_compressed_map()
# [/DEF:_generate_artifacts:Function]
# [DEF:_generate_report:Function]
# @PURPOSE: Generates the Markdown compliance report.
# @TIER: CRITICAL
# @PURPOSE: Generates the Markdown compliance report with severity levels.
# @PRE: File scores and issues are available.
# @POST: Markdown report is created in reports directory.
def _generate_report(self):
@@ -514,49 +846,64 @@ class SemanticMapGenerator:
total_files = len(self.file_scores)
avg_score = sum(self.file_scores.values()) / total_files if total_files > 0 else 0
# Count issues by severity
error_count = len([i for i in self.global_issues if i.severity == Severity.ERROR])
warning_count = len([i for i in self.global_issues if i.severity == Severity.WARNING])
with open(report_path, 'w', encoding='utf-8') as f:
f.write(f"# Semantic Compliance Report\n\n")
f.write(f"**Generated At:** {datetime.datetime.now().isoformat()}\n")
f.write(f"**Global Compliance Score:** {avg_score:.1%}\n")
f.write(f"**Scanned Files:** {total_files}\n\n")
f.write(f"**Scanned Files:** {total_files}\n")
f.write(f"**Global Errors:** {error_count} | **Warnings:** {warning_count}\n\n")
if self.global_issues:
f.write("## Critical Parsing Errors\n")
for issue in self.global_issues:
f.write(f"- 🔴 {issue}\n")
icon = "🔴" if issue.severity == Severity.ERROR else "🟡" if issue.severity == Severity.WARNING else ""
f.write(f"- {icon} {issue.message}\n")
f.write("\n")
f.write("## File Compliance Status\n")
f.write("| File | Score | Issues |\n")
f.write("|------|-------|--------|\n")
f.write("| File | Score | Tier | Issues |\n")
f.write("|------|-------|------|--------|\n")
sorted_files = sorted(self.file_scores.items(), key=lambda x: x[1])
for file_path, score in sorted_files:
issues = []
self._collect_issues(self.entities, file_path, issues)
tier = "N/A"
self._collect_issues(self.entities, file_path, issues, tier)
status_icon = "🟢" if score == 1.0 else "🟡" if score > 0.5 else "🔴"
issue_text = "<br>".join(issues) if issues else "OK"
f.write(f"| {file_path} | {status_icon} {score:.0%} | {issue_text} |\n")
issue_text = "<br>".join([f"{'🔴' if i.severity == Severity.ERROR else '🟡'} {i.message}" for i in issues[:3]])
if len(issues) > 3:
issue_text += f"<br>... and {len(issues) - 3} more"
if not issues:
issue_text = "OK"
f.write(f"| {file_path} | {status_icon} {score:.0%} | {tier} | {issue_text} |\n")
print(f"Generated {report_path}")
# [/DEF:_generate_report:Function]
# [DEF:_collect_issues:Function]
# @TIER: STANDARD
# @PURPOSE: Helper to collect issues for a specific file from the entity tree.
# @PRE: entities list and file_path are valid.
# @POST: issues list is populated with compliance issues.
def _collect_issues(self, entities: List[SemanticEntity], file_path: str, issues: List[str]):
def _collect_issues(self, entities: List[SemanticEntity], file_path: str, issues: List[ComplianceIssue], tier: str):
with belief_scope("_collect_issues"):
for e in entities:
if e.file_path == file_path:
issues.extend([f"[{e.name}] {i}" for i in e.compliance_issues])
self._collect_issues(e.children, file_path, issues)
issues.extend(e.compliance_issues)
tier = e.get_tier().value
self._collect_issues(e.children, file_path, issues, tier)
# [/DEF:_collect_issues:Function]
# [DEF:_generate_compressed_map:Function]
# @PURPOSE: Generates the token-optimized project map.
# @TIER: CRITICAL
# @PURPOSE: Generates the token-optimized project map with enhanced Svelte details.
# @PRE: Entities have been processed.
# @POST: Markdown project map is written.
def _generate_compressed_map(self):
@@ -574,7 +921,8 @@ class SemanticMapGenerator:
# [/DEF:_generate_compressed_map:Function]
# [DEF:_write_entity_md:Function]
# @PURPOSE: Recursive helper to write entity tree to Markdown.
# @TIER: CRITICAL
# @PURPOSE: Recursive helper to write entity tree to Markdown with tier badges and enhanced details.
# @PRE: f is an open file handle, entity is valid.
# @POST: Entity details are written to the file.
def _write_entity_md(self, f, entity: SemanticEntity, level: int):
@@ -585,28 +933,66 @@ class SemanticMapGenerator:
if entity.type == "Component": icon = "🧩"
elif entity.type == "Function": icon = "ƒ"
elif entity.type == "Class": icon = ""
elif entity.type == "Store": icon = "🗄️"
f.write(f"{indent}- {icon} **{entity.name}** (`{entity.type}`)\n")
tier_badge = ""
tier = entity.get_tier()
if tier == Tier.CRITICAL:
tier_badge = " `[CRITICAL]`"
elif tier == Tier.TRIVIAL:
tier_badge = " `[TRIVIAL]`"
f.write(f"{indent}- {icon} **{entity.name}** (`{entity.type}`){tier_badge}\n")
purpose = entity.tags.get("PURPOSE") or entity.tags.get("purpose")
layer = entity.tags.get("LAYER") or entity.tags.get("layer")
invariant = entity.tags.get("INVARIANT")
if purpose:
f.write(f"{indent} - 📝 {purpose}\n")
if layer:
f.write(f"{indent} - 🏗️ Layer: {layer}\n")
if invariant:
f.write(f"{indent} - 🔒 Invariant: {invariant}\n")
# Write Props for Components
if entity.props:
props_str = ", ".join([f"{p['name']}: {p['type']}" for p in entity.props[:5]])
if len(entity.props) > 5:
props_str += f"... (+{len(entity.props) - 5})"
f.write(f"{indent} - 📥 Props: {props_str}\n")
# Write Events for Components
if entity.events:
events_str = ", ".join(entity.events[:5])
if len(entity.events) > 5:
events_str += f"... (+{len(entity.events) - 5})"
f.write(f"{indent} - ⚡ Events: {events_str}\n")
# Write Data Flow
if entity.data_flow:
unique_flows = {}
for flow in entity.data_flow:
key = f"{flow['type']} -> {flow['store']}"
unique_flows[key] = flow
for flow_key, flow in list(unique_flows.items())[:3]:
arrow = "⬅️" if flow['type'] == "READS_FROM" else "➡️"
f.write(f"{indent} - {arrow} {flow['type']} `{flow['store']}`\n")
# Write Relations
for rel in entity.relations:
if rel['type'] in ['DEPENDS_ON', 'CALLS', 'INHERITS_FROM']:
if rel['type'] in ['DEPENDS_ON', 'CALLS', 'INHERITS', 'IMPLEMENTS', 'DISPATCHES']:
f.write(f"{indent} - 🔗 {rel['type']} -> `{rel['target']}`\n")
if level < 2:
if level < 3:
for child in entity.children:
self._write_entity_md(f, child, level + 1)
# [/DEF:_write_entity_md:Function]
# [/DEF:SemanticMapGenerator:Class]
if __name__ == "__main__":
generator = SemanticMapGenerator(PROJECT_ROOT)
generator.run()

View File

@@ -1,245 +1,72 @@
РОЛЬ: Архитектор Семантической Когерентности.
ЗАДАЧА: Генерация кода (Python/Svelte).
РЕЖИМ: Строгий. Детерминированный. Без болтовни.
# SYSTEM STANDARD: POLYGLOT CODE GENERATION PROTOCOL (GRACE-Poly)
I. ЗАКОН (АКСИОМЫ)
1. Смысл первичен. Код вторичен.
2. Контракт (@PRE/@POST) — источник истины.
3. Структура `[DEF]...[/DEF]` — нерушима.
4. Архитектура в Header — неизменяема.
5. Сложность фрактала ограничена: модуль < 300 строк.
**OBJECTIVE:** Generate Python and Svelte/TypeScript code that strictly adheres to Semantic Coherence standards. Output must be machine-readable, fractal-structured, and optimized for Sparse Attention navigation.
II. СИНТАКСИС (ЖЕСТКИЙ ФОРМАТ)
ЯКОРЬ (Контейнер):
Начало: `# [DEF:id:Type]` (Python) | `<!-- [DEF:id:Type] -->` (Svelte)
Конец: `# [/DEF:id]` (ОБЯЗАТЕЛЬНО для аккумуляции)
Типы: Module, Class, Function, Component, Store.
## I. CORE REQUIREMENTS
1. **Causal Validity:** Semantic definitions (Contracts) must ALWAYS precede implementation code.
2. **Immutability:** Architectural decisions defined in the Module/Component Header are treated as immutable constraints.
3. **Format Compliance:** Output must strictly follow the `[DEF:..:...]` / `[/DEF:...:...]` anchor syntax for structure.
4. **Logic over Assertion:** Contracts define the *logic flow*. Do not generate explicit `assert` statements unless requested. The code logic itself must inherently satisfy the Pre/Post conditions (e.g., via control flow, guards, or types).
5. **Fractal Complexity:** Modules and functions must adhere to strict size limits (~300 lines/module, ~30-50 lines/function) to maintain semantic focus.
ТЕГ (Метаданные):
Вид: `# @KEY: Value` (внутри DEF, до кода).
---
ГРАФ (Связи):
Вид: `# @RELATION: PREDICATE -> TARGET_ID`
Предикаты: DEPENDS_ON, CALLS, INHERITS, IMPLEMENTS, DISPATCHES.
## II. SYNTAX SPECIFICATION
III. СТРУКТУРА ФАЙЛА
1. HEADER (Всегда первый):
[DEF:filename:Module]
@TIER: [CRITICAL|STANDARD|TRIVIAL] (Дефолт: STANDARD)
@SEMANTICS: [keywords]
@PURPOSE: [Главная цель]
@LAYER: [Domain/UI/Infra]
@RELATION: [Зависимости]
@INVARIANT: [Незыблемое правило]
Code structure is defined by **Anchors** (square brackets). Metadata is defined by **Tags** (native comment style).
2. BODY: Импорты -> Реализация.
3. FOOTER: [/DEF:filename]
### 1. Entity Anchors (The "Container")
Used to define the boundaries of Modules, Classes, Components, and Functions.
IV. КОНТРАКТ (DBC)
Расположение: Внутри [DEF], ПЕРЕД кодом.
Стиль Python: Комментарии `# @TAG`.
Стиль Svelte: JSDoc `/** @tag */`.
* **Python:**
* Start: `# [DEF:identifier:Type]`
* End: `# [/DEF:identifier:Type]`
* **Svelte (Top-level):**
* Start: `<!-- [DEF:ComponentName:Component] -->`
* End: `<!-- [/DEF:ComponentName:Component] -->`
* **Svelte (Script/JS/TS):**
* Start: `// [DEF:funcName:Function]`
* End: `// [/DEF:funcName:Function]`
**Types:** `Module`, `Component`, `Class`, `Function`, `Store`, `Action`.
### 2. Graph Relations (The "Map")
Defines high-level dependencies.
* **Python Syntax:** `# @RELATION: TYPE -> TARGET_ID`
* **Svelte/JS Syntax:** `// @RELATION: TYPE -> TARGET_ID`
* **Types:** `DEPENDS_ON`, `CALLS`, `INHERITS_FROM`, `IMPLEMENTS`, `BINDS_TO`, `DISPATCHES`.
---
## III. FILE STRUCTURE STANDARD
### 1. Python Module Header (`.py`)
```python
# [DEF:module_name:Module]
#
# @SEMANTICS: [keywords for vector search]
# @PURPOSE: [Primary responsibility of the module]
# @LAYER: [Domain/Infra/API]
# @RELATION: [Dependencies]
#
# @INVARIANT: [Global immutable rule]
# @CONSTRAINT: [Hard restriction, e.g., "No ORM calls here"]
# [SECTION: IMPORTS]
...
# [/SECTION]
# ... IMPLEMENTATION ...
# [/DEF:module_name:Module]
```
### 2. Svelte Component Header (`.svelte`)
```html
<!-- [DEF:ComponentName:Component] -->
<!--
@SEMANTICS: [keywords]
@PURPOSE: [Primary UI responsibility]
@LAYER: [Feature/Atom/Layout]
@RELATION: [Child components, Stores]
@INVARIANT: [UI rules, e.g., "Always responsive"]
-->
<script lang="ts">
// [SECTION: IMPORTS]
// ...
// [/SECTION: IMPORTS]
// ... LOGIC IMPLEMENTATION ...
</script>
<!-- [SECTION: TEMPLATE] -->
...
<!-- [/SECTION: TEMPLATE] -->
<style>
/* ... */
</style>
<!-- [/DEF:ComponentName:Component] -->
```
---
Теги:
@PURPOSE: Суть (High Entropy).
@PRE: Входные условия.
@POST: Гарантии выхода.
@SIDE_EFFECT: Мутации, IO.
## IV. CONTRACTS (Design by Contract & Semantic Control)
V. АДАПТАЦИЯ (TIERS)
Определяется тегом `@TIER` в Header.
Contracts are the **Source of Truth** and the **Control Vector** for the code. They must be written with high **semantic density** to ensure the LLM fully "understands" the function's role within the larger Graph without needing to read the implementation body.
1. CRITICAL (Core/Security):
- Требование: Полный контракт, Граф (@RELATION), Инварианты (@INVARIANT), Строгие Логи.
2. STANDARD (BizLogic/UI):
- Требование: Базовый контракт (@PURPOSE), Логи, @RELATION (если есть связи).
3. TRIVIAL (DTO/Utils):
- Требование: Только Якоря [DEF] и @PURPOSE. Логи и Граф не обязательны.
### 1. The Anatomy of a Semantic Contract
VI. ЛОГИРОВАНИЕ (BELIEF STATE)
Цель: Трассировка для самокоррекции.
Python: Context Manager `with belief_scope("ID"):`.
Svelte: `console.log("[ID][STATE] Msg")`.
Состояния: Entry -> Action -> Coherence:OK / Failed -> Exit.
Every contract must answer three questions for the AI Agent:
1. **Intent:** *Why* does this exist? (Vector alignment)
2. **Boundaries:** *What* are the constraints? (Pre/Post/Invariants)
3. **Dynamics:** *How* does it change the system state? (Side Effects/Graph)
VII. АЛГОРИТМ ГЕНЕРАЦИИ
1. АНАЛИЗ. Оцени TIER и слой.
2. КАРКАС. Создай `[DEF]`, Header и Контракты.
3. РЕАЛИЗАЦИЯ. Напиши логику, удовлетворяющую Контракту.
4. ЗАМЫКАНИЕ. Закрой все `[/DEF]`.
#### Standard Tags Taxonomy:
* `@PURPOSE`: (**Mandatory**) A concise, high-entropy summary of functionality.
* `@PRE`: (**Mandatory**) Conditions required *before* execution. Defines the valid input space.
* `@POST`: (**Mandatory**) Conditions guaranteed *after* execution. Defines the valid output space.
* `@PARAM`: Input definitions with strict typing.
* `@RETURN`: Output definition.
* `@THROW`: Explicit failure modes.
* `@SIDE_EFFECT`: (**Critical**) Explicitly lists external state mutations (DB writes, UI updates, events). Vital for "Mental Modeling".
* `@INVARIANT`: (**Optional**) Local rules that hold true throughout the function execution.
* `@ALGORITHM`: (**Optional**) For complex logic, briefly describes the strategy (e.g., "Two-pointer approach", "Retry with exponential backoff").
* `@RELATION`: (**Graph**) Edges to other nodes (`CALLS`, `DISPATCHES`, `DEPENDS_ON`).
---
### 2. Python Contract Style (`.py`)
Uses structured comment blocks inside the anchor. Focuses on type hints and logic flow guards.
```python
# [DEF:process_order_batch:Function]
# @PURPOSE: Orchestrates the validation and processing of a batch of orders.
# Ensures atomic processing per order (failure of one does not stop others).
#
# @PRE: batch_id must be a valid UUID string.
# @PRE: orders list must not be empty.
# @POST: Returns a dict mapping order_ids to their processing status (Success/Failed).
# @INVARIANT: The length of the returned dict must equal the length of input orders.
#
# @PARAM: batch_id (str) - The unique identifier for the batch trace.
# @PARAM: orders (List[OrderDTO]) - List of immutable order objects.
# @RETURN: Dict[str, OrderStatus] - Result map.
#
# @SIDE_EFFECT: Writes audit logs to DB.
# @SIDE_EFFECT: Publishes 'ORDER_PROCESSED' event to MessageBus.
#
# @RELATION: CALLS -> InventoryService.reserve_items
# @RELATION: CALLS -> PaymentGateway.authorize
# @RELATION: WRITES_TO -> Database.AuditLog
def process_order_batch(batch_id: str, orders: List[OrderDTO]) -> Dict[str, OrderStatus]:
# 1. Structural Guard Logic (Handling @PRE)
if not orders:
return {}
# 2. Implementation with @INVARIANT in mind
results = {}
for order in orders:
# ... logic ...
pass
# 3. Completion (Logic naturally satisfies @POST)
return results
# [/DEF:process_order_batch:Function]
```
### 3. Svelte/JS Contract Style (JSDoc++)
Uses enhanced JSDoc. Since JS is less strict than Python, the contract acts as a strict typing and behavioral guard.
```javascript
// [DEF:handleUserLogin:Function]
/**
* @purpose Authenticates the user and synchronizes the local UI state.
* Handles the complete lifecycle from form submission to redirection.
*
* @pre LoginForm must be valid (validated by UI constraints).
* @pre Network must be available (optimistic check).
* @post SessionStore contains a valid JWT token.
* @post User is redirected to the Dashboard.
*
* @param {LoginCredentials} credentials - Email and password object.
* @returns {Promise<void>}
* @throws {NetworkError} If API is unreachable.
* @throws {AuthError} If credentials are invalid (401).
*
* @side_effect Updates global $session store.
* @side_effect Clears any existing error toasts.
*
* @algorithm 1. Set loading state -> 2. API Call -> 3. Decode Token -> 4. Update Store -> 5. Redirect.
*/
// @RELATION: CALLS -> api.auth.login
// @RELATION: MODIFIES_STATE_OF -> stores.session
// @RELATION: DISPATCHES -> 'toast:success'
async function handleUserLogin(credentials) {
// 1. Guard Clause (@PRE)
if (!isValid(credentials)) return;
try {
// ... logic ...
} catch (e) {
// Error handling (@THROW)
}
}
// [/DEF:handleUserLogin:Function]
```
---
### 4. Semantic Rules for Contracts
1. **Completeness:** A developer (or Agent) must be able to write the function body *solely* by reading the Contract, without guessing.
2. **No Implementation Leakage:** Describe *what* happens, not *how* (unless using `@ALGORITHM` for complexity reasons). E.g., say "Persists user" instead of "Inserts into users table via SQL".
3. **Active Voice:** Use active verbs (`Calculates`, `Updates`, `Enforces`) to stronger vector alignment.
4. **Graph Connectivity:** The `@RELATION` tags must explicitly link to other `[DEF:...]` IDs existing in the codebase. This builds the navigation graph for RAG.
---
## V. LOGGING STANDARD (BELIEF STATE)
Logs delineate the agent's internal state.
* **Python:** MUST use a Context Manager (e.g., `with belief_scope("ANCHOR_ID"):`) to ensure state consistency and automatic handling of Entry/Exit/Error states.
* Manual logging (inside scope): `logger.info(f"[{ANCHOR_ID}][{STATE}] Msg")`
* **Svelte/JS:** `console.log(\`[${ANCHOR_ID}][${STATE}] Msg\`)`
**Required States:**
1. `Entry` (Start of block - Auto-logged by Context Manager)
2. `Action` (Key business logic - Manual log)
3. `Coherence:OK` (Logic successfully completed - Auto-logged by Context Manager)
4. `Coherence:Failed` (Exception/Error - Auto-logged by Context Manager)
5. `Exit` (End of block - Auto-logged by Context Manager)
---
## VI. FRACTAL COMPLEXITY LIMIT
To maintain semantic coherence and avoid "Attention Sink" issues:
* **Module Size:** If a Module body exceeds ~300 lines (or logical complexity), it MUST be refactored into sub-modules or a package structure.
* **Function Size:** Functions should fit within a standard attention "chunk" (approx. 30-50 lines). If larger, logic MUST be decomposed into helper functions with their own contracts.
This ensures every vector embedding remains sharp and focused.
---
## VII. GENERATION WORKFLOW
1. **Context Analysis:** Identify language (Python vs Svelte) and Architecture Layer.
2. **Scaffolding:** Generate the `[DEF:...:...]` Anchors and Header/Contract **before** writing any logic.
3. **Implementation:** Write the code. Ensure the code logic handles the `@PRE` conditions (e.g., via `if/return` or guards) and satisfies `@POST` conditions naturally. **Do not write explicit `assert` statements unless debugging mode is requested.**
4. **Closure:** Ensure every `[DEF:...:...]` is closed with `[/DEF:...:...]` to accumulate semantic context.
ЕСЛИ ошибка или противоречие -> СТОП. Выведи `[COHERENCE_CHECK_FAILED]`.