feat(clean-release): complete and verify backend test suite (33 passing tests)
- Relocated and standardized tests for clean_release subsystem into __tests__ sub-packages. - Implemented missing unit tests for preparation_service, audit_service, and stages. - Enhanced API contract tests for candidate preparation and compliance reporting. - Updated 023-clean-repo-enterprise coverage matrix with final verification results. - Fixed relative import issues and model validation mismatches during test migration.
This commit is contained in:
@@ -19,6 +19,8 @@ from src.models.clean_release import (
|
||||
ReleaseCandidateStatus,
|
||||
ResourceSourceEntry,
|
||||
ResourceSourceRegistry,
|
||||
ComplianceReport,
|
||||
CheckFinalStatus,
|
||||
)
|
||||
from src.services.clean_release.repository import CleanReleaseRepository
|
||||
|
||||
@@ -107,5 +109,49 @@ def test_get_report_not_found_returns_404():
|
||||
client = TestClient(app)
|
||||
resp = client.get("/api/clean-release/reports/unknown-report")
|
||||
assert resp.status_code == 404
|
||||
finally:
|
||||
app.dependency_overrides.clear()
|
||||
|
||||
def test_get_report_success():
|
||||
repo = _repo_with_seed_data()
|
||||
report = ComplianceReport(
|
||||
report_id="rep-1",
|
||||
check_run_id="run-1",
|
||||
candidate_id="2026.03.03-rc1",
|
||||
generated_at=datetime.now(timezone.utc),
|
||||
final_status=CheckFinalStatus.COMPLIANT,
|
||||
operator_summary="all systems go",
|
||||
structured_payload_ref="manifest-1",
|
||||
violations_count=0,
|
||||
blocking_violations_count=0
|
||||
)
|
||||
repo.save_report(report)
|
||||
app.dependency_overrides[get_clean_release_repository] = lambda: repo
|
||||
try:
|
||||
client = TestClient(app)
|
||||
resp = client.get("/api/clean-release/reports/rep-1")
|
||||
assert resp.status_code == 200
|
||||
assert resp.json()["report_id"] == "rep-1"
|
||||
finally:
|
||||
app.dependency_overrides.clear()
|
||||
|
||||
def test_prepare_candidate_api_success():
|
||||
repo = _repo_with_seed_data()
|
||||
app.dependency_overrides[get_clean_release_repository] = lambda: repo
|
||||
try:
|
||||
client = TestClient(app)
|
||||
response = client.post(
|
||||
"/api/clean-release/candidates/prepare",
|
||||
json={
|
||||
"candidate_id": "2026.03.03-rc1",
|
||||
"artifacts": [{"path": "file1.txt", "category": "system-init", "reason": "core"}],
|
||||
"sources": ["repo.intra.company.local"],
|
||||
"operator_id": "operator-1",
|
||||
},
|
||||
)
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["status"] == "prepared"
|
||||
assert "manifest_id" in data
|
||||
finally:
|
||||
app.dependency_overrides.clear()
|
||||
@@ -182,6 +182,16 @@ async def get_task(
|
||||
# @POST: Returns a list of log entries or raises 404.
|
||||
# @RETURN: List[LogEntry] - List of log entries.
|
||||
# @TIER: CRITICAL
|
||||
# @TEST_CONTRACT get_task_logs_api ->
|
||||
# {
|
||||
# required_params: {task_id: str},
|
||||
# optional_params: {level: str, source: str, search: str},
|
||||
# invariants: ["returns 404 for non-existent task", "applies filters correctly"]
|
||||
# }
|
||||
# @TEST_FIXTURE valid_task_logs_request -> {"task_id": "test_1", "level": "INFO"}
|
||||
# @TEST_EDGE task_not_found -> raises 404
|
||||
# @TEST_EDGE invalid_limit -> Query(limit=0) returns 422
|
||||
# @TEST_INVARIANT response_purity -> verifies: [valid_task_logs_request]
|
||||
# @TEST_CONTRACT: TaskLogQueryInput -> List[LogEntry]
|
||||
# @TEST_SCENARIO: existing_task_logs_filtered -> Returns filtered logs by level/source/search with pagination.
|
||||
# @TEST_FIXTURE: valid_task_with_mixed_logs -> backend/tests/fixtures/task_logs/valid_task_with_mixed_logs.json
|
||||
|
||||
149
backend/src/models/__tests__/test_clean_release.py
Normal file
149
backend/src/models/__tests__/test_clean_release.py
Normal file
@@ -0,0 +1,149 @@
|
||||
# [DEF:__tests__/test_clean_release:Module]
|
||||
# @RELATION: VERIFIES -> ../clean_release.py
|
||||
# @PURPOSE: Contract testing for Clean Release models
|
||||
# [/DEF:__tests__/test_clean_release:Module]
|
||||
|
||||
import pytest
|
||||
from datetime import datetime
|
||||
from pydantic import ValidationError
|
||||
from src.models.clean_release import (
|
||||
ReleaseCandidate,
|
||||
ReleaseCandidateStatus,
|
||||
ProfileType,
|
||||
CleanProfilePolicy,
|
||||
DistributionManifest,
|
||||
ManifestItem,
|
||||
ManifestSummary,
|
||||
ClassificationType,
|
||||
ComplianceCheckRun,
|
||||
CheckFinalStatus,
|
||||
CheckStageResult,
|
||||
CheckStageName,
|
||||
CheckStageStatus,
|
||||
ComplianceReport,
|
||||
ExecutionMode
|
||||
)
|
||||
|
||||
# @TEST_FIXTURE: valid_enterprise_candidate
|
||||
@pytest.fixture
|
||||
def valid_candidate_data():
|
||||
return {
|
||||
"candidate_id": "RC-001",
|
||||
"version": "1.0.0",
|
||||
"profile": ProfileType.ENTERPRISE_CLEAN,
|
||||
"created_at": datetime.now(),
|
||||
"created_by": "admin",
|
||||
"source_snapshot_ref": "v1.0.0-snapshot"
|
||||
}
|
||||
|
||||
def test_release_candidate_valid(valid_candidate_data):
|
||||
rc = ReleaseCandidate(**valid_candidate_data)
|
||||
assert rc.candidate_id == "RC-001"
|
||||
assert rc.status == ReleaseCandidateStatus.DRAFT
|
||||
|
||||
def test_release_candidate_empty_id(valid_candidate_data):
|
||||
valid_candidate_data["candidate_id"] = " "
|
||||
with pytest.raises(ValueError, match="candidate_id must be non-empty"):
|
||||
ReleaseCandidate(**valid_candidate_data)
|
||||
|
||||
# @TEST_FIXTURE: valid_enterprise_policy
|
||||
@pytest.fixture
|
||||
def valid_policy_data():
|
||||
return {
|
||||
"policy_id": "POL-001",
|
||||
"policy_version": "1",
|
||||
"active": True,
|
||||
"prohibited_artifact_categories": ["test-data"],
|
||||
"required_system_categories": ["core"],
|
||||
"internal_source_registry_ref": "REG-1",
|
||||
"effective_from": datetime.now(),
|
||||
"profile": ProfileType.ENTERPRISE_CLEAN
|
||||
}
|
||||
|
||||
# @TEST_INVARIANT: policy_purity
|
||||
def test_enterprise_policy_valid(valid_policy_data):
|
||||
policy = CleanProfilePolicy(**valid_policy_data)
|
||||
assert policy.external_source_forbidden is True
|
||||
|
||||
# @TEST_EDGE: enterprise_policy_missing_prohibited
|
||||
def test_enterprise_policy_missing_prohibited(valid_policy_data):
|
||||
valid_policy_data["prohibited_artifact_categories"] = []
|
||||
with pytest.raises(ValueError, match="enterprise-clean policy requires prohibited_artifact_categories"):
|
||||
CleanProfilePolicy(**valid_policy_data)
|
||||
|
||||
# @TEST_EDGE: enterprise_policy_external_allowed
|
||||
def test_enterprise_policy_external_allowed(valid_policy_data):
|
||||
valid_policy_data["external_source_forbidden"] = False
|
||||
with pytest.raises(ValueError, match="enterprise-clean policy requires external_source_forbidden=true"):
|
||||
CleanProfilePolicy(**valid_policy_data)
|
||||
|
||||
# @TEST_INVARIANT: manifest_consistency
|
||||
# @TEST_EDGE: manifest_count_mismatch
|
||||
def test_manifest_count_mismatch():
|
||||
summary = ManifestSummary(included_count=1, excluded_count=0, prohibited_detected_count=0)
|
||||
item = ManifestItem(path="p", category="c", classification=ClassificationType.ALLOWED, reason="r")
|
||||
|
||||
# Valid
|
||||
DistributionManifest(
|
||||
manifest_id="m1", candidate_id="rc1", policy_id="p1",
|
||||
generated_at=datetime.now(), generated_by="u", items=[item],
|
||||
summary=summary, deterministic_hash="h"
|
||||
)
|
||||
|
||||
# Invalid count
|
||||
summary.included_count = 2
|
||||
with pytest.raises(ValueError, match="manifest summary counts must match items size"):
|
||||
DistributionManifest(
|
||||
manifest_id="m1", candidate_id="rc1", policy_id="p1",
|
||||
generated_at=datetime.now(), generated_by="u", items=[item],
|
||||
summary=summary, deterministic_hash="h"
|
||||
)
|
||||
|
||||
# @TEST_INVARIANT: run_integrity
|
||||
# @TEST_EDGE: compliant_run_stage_fail
|
||||
def test_compliant_run_validation():
|
||||
base_run = {
|
||||
"check_run_id": "run1",
|
||||
"candidate_id": "rc1",
|
||||
"policy_id": "p1",
|
||||
"started_at": datetime.now(),
|
||||
"triggered_by": "u",
|
||||
"execution_mode": ExecutionMode.TUI,
|
||||
"final_status": CheckFinalStatus.COMPLIANT,
|
||||
"checks": [
|
||||
CheckStageResult(stage=CheckStageName.DATA_PURITY, status=CheckStageStatus.PASS),
|
||||
CheckStageResult(stage=CheckStageName.INTERNAL_SOURCES_ONLY, status=CheckStageStatus.PASS),
|
||||
CheckStageResult(stage=CheckStageName.NO_EXTERNAL_ENDPOINTS, status=CheckStageStatus.PASS),
|
||||
CheckStageResult(stage=CheckStageName.MANIFEST_CONSISTENCY, status=CheckStageStatus.PASS),
|
||||
]
|
||||
}
|
||||
# Valid
|
||||
ComplianceCheckRun(**base_run)
|
||||
|
||||
# One stage fails -> cannot be COMPLIANT
|
||||
base_run["checks"][0].status = CheckStageStatus.FAIL
|
||||
with pytest.raises(ValueError, match="compliant run requires PASS on all mandatory stages"):
|
||||
ComplianceCheckRun(**base_run)
|
||||
|
||||
# Missing stage -> cannot be COMPLIANT
|
||||
base_run["checks"] = base_run["checks"][1:]
|
||||
with pytest.raises(ValueError, match="compliant run requires all mandatory stages"):
|
||||
ComplianceCheckRun(**base_run)
|
||||
|
||||
def test_report_validation():
|
||||
# Valid blocked report
|
||||
ComplianceReport(
|
||||
report_id="rep1", check_run_id="run1", candidate_id="rc1",
|
||||
generated_at=datetime.now(), final_status=CheckFinalStatus.BLOCKED,
|
||||
operator_summary="Blocked", structured_payload_ref="ref",
|
||||
violations_count=2, blocking_violations_count=2
|
||||
)
|
||||
|
||||
# BLOCKED with 0 blocking violations
|
||||
with pytest.raises(ValueError, match="blocked report requires blocking violations"):
|
||||
ComplianceReport(
|
||||
report_id="rep1", check_run_id="run1", candidate_id="rc1",
|
||||
generated_at=datetime.now(), final_status=CheckFinalStatus.BLOCKED,
|
||||
operator_summary="Blocked", structured_payload_ref="ref",
|
||||
violations_count=2, blocking_violations_count=0
|
||||
)
|
||||
@@ -5,6 +5,28 @@
|
||||
# @LAYER: Domain
|
||||
# @RELATION: BINDS_TO -> specs/023-clean-repo-enterprise/data-model.md
|
||||
# @INVARIANT: Enterprise-clean policy always forbids external sources.
|
||||
#
|
||||
# @TEST_CONTRACT CleanReleaseModels ->
|
||||
# {
|
||||
# required_fields: {
|
||||
# ReleaseCandidate: [candidate_id, version, profile, source_snapshot_ref],
|
||||
# CleanProfilePolicy: [policy_id, policy_version, internal_source_registry_ref]
|
||||
# },
|
||||
# invariants: [
|
||||
# "enterprise-clean profile enforces external_source_forbidden=True",
|
||||
# "manifest summary counts are consistent with items",
|
||||
# "compliant run requires all mandatory stages to pass"
|
||||
# ]
|
||||
# }
|
||||
# @TEST_FIXTURE valid_enterprise_candidate -> {"candidate_id": "RC-001", "version": "1.0.0", "profile": "enterprise-clean", "source_snapshot_ref": "v1.0.0-snapshot"}
|
||||
# @TEST_FIXTURE valid_enterprise_policy -> {"policy_id": "POL-001", "policy_version": "1", "internal_source_registry_ref": "REG-1", "prohibited_artifact_categories": ["test-data"]}
|
||||
# @TEST_EDGE enterprise_policy_missing_prohibited -> profile=enterprise-clean with empty prohibited_artifact_categories raises ValueError
|
||||
# @TEST_EDGE enterprise_policy_external_allowed -> profile=enterprise-clean with external_source_forbidden=False raises ValueError
|
||||
# @TEST_EDGE manifest_count_mismatch -> included + excluded != len(items) raises ValueError
|
||||
# @TEST_EDGE compliant_run_stage_fail -> COMPLIANT run with failed stage raises ValueError
|
||||
# @TEST_INVARIANT policy_purity -> verifies: [valid_enterprise_policy, enterprise_policy_external_allowed]
|
||||
# @TEST_INVARIANT manifest_consistency -> verifies: [manifest_count_mismatch]
|
||||
# @TEST_INVARIANT run_integrity -> verifies: [compliant_run_stage_fail]
|
||||
# @TEST_CONTRACT: CleanReleaseModelPayload -> ValidatedCleanReleaseModel | ValidationError
|
||||
# @TEST_SCENARIO: valid_enterprise_models -> CRITICAL entities validate and preserve lifecycle/compliance invariants.
|
||||
# @TEST_FIXTURE: clean_release_models_baseline -> backend/tests/fixtures/clean_release/fixtures_clean_release.json
|
||||
|
||||
@@ -0,0 +1,24 @@
|
||||
# [DEF:backend.tests.services.clean_release.test_audit_service:Module]
|
||||
# @TIER: STANDARD
|
||||
# @SEMANTICS: tests, clean-release, audit, logging
|
||||
# @PURPOSE: Validate audit hooks emit expected log patterns for clean release lifecycle.
|
||||
# @LAYER: Infra
|
||||
# @RELATION: TESTS -> backend.src.services.clean_release.audit_service
|
||||
|
||||
from unittest.mock import patch
|
||||
from src.services.clean_release.audit_service import audit_preparation, audit_check_run, audit_report
|
||||
|
||||
@patch("src.services.clean_release.audit_service.logger")
|
||||
def test_audit_preparation(mock_logger):
|
||||
audit_preparation("cand-1", "PREPARED")
|
||||
mock_logger.info.assert_called_with("[REASON] clean-release preparation candidate=cand-1 status=PREPARED")
|
||||
|
||||
@patch("src.services.clean_release.audit_service.logger")
|
||||
def test_audit_check_run(mock_logger):
|
||||
audit_check_run("check-1", "COMPLIANT")
|
||||
mock_logger.info.assert_called_with("[REFLECT] clean-release check_run=check-1 final_status=COMPLIANT")
|
||||
|
||||
@patch("src.services.clean_release.audit_service.logger")
|
||||
def test_audit_report(mock_logger):
|
||||
audit_report("rep-1", "cand-1")
|
||||
mock_logger.info.assert_called_with("[EXPLORE] clean-release report_id=rep-1 candidate=cand-1")
|
||||
@@ -0,0 +1,114 @@
|
||||
# [DEF:__tests__/test_policy_engine:Module]
|
||||
# @RELATION: VERIFIES -> ../policy_engine.py
|
||||
# @PURPOSE: Contract testing for CleanPolicyEngine
|
||||
# [/DEF:__tests__/test_policy_engine:Module]
|
||||
|
||||
import pytest
|
||||
from datetime import datetime
|
||||
from src.models.clean_release import (
|
||||
CleanProfilePolicy,
|
||||
ResourceSourceRegistry,
|
||||
ResourceSourceEntry,
|
||||
ProfileType,
|
||||
RegistryStatus
|
||||
)
|
||||
from src.services.clean_release.policy_engine import CleanPolicyEngine
|
||||
|
||||
# @TEST_FIXTURE: policy_enterprise_clean
|
||||
@pytest.fixture
|
||||
def enterprise_clean_setup():
|
||||
policy = CleanProfilePolicy(
|
||||
policy_id="POL-1",
|
||||
policy_version="1",
|
||||
active=True,
|
||||
prohibited_artifact_categories=["demo", "test"],
|
||||
required_system_categories=["core"],
|
||||
internal_source_registry_ref="REG-1",
|
||||
effective_from=datetime.now(),
|
||||
profile=ProfileType.ENTERPRISE_CLEAN
|
||||
)
|
||||
registry = ResourceSourceRegistry(
|
||||
registry_id="REG-1",
|
||||
name="Internal Registry",
|
||||
entries=[
|
||||
ResourceSourceEntry(source_id="S1", host="internal.com", protocol="https", purpose="p1", enabled=True)
|
||||
],
|
||||
updated_at=datetime.now(),
|
||||
updated_by="admin",
|
||||
status=RegistryStatus.ACTIVE
|
||||
)
|
||||
return policy, registry
|
||||
|
||||
# @TEST_SCENARIO: policy_valid
|
||||
def test_policy_valid(enterprise_clean_setup):
|
||||
policy, registry = enterprise_clean_setup
|
||||
engine = CleanPolicyEngine(policy, registry)
|
||||
result = engine.validate_policy()
|
||||
assert result.ok is True
|
||||
assert not result.blocking_reasons
|
||||
|
||||
# @TEST_EDGE: missing_registry_ref
|
||||
def test_missing_registry_ref(enterprise_clean_setup):
|
||||
policy, registry = enterprise_clean_setup
|
||||
policy.internal_source_registry_ref = " "
|
||||
engine = CleanPolicyEngine(policy, registry)
|
||||
result = engine.validate_policy()
|
||||
assert result.ok is False
|
||||
assert "Policy missing internal_source_registry_ref" in result.blocking_reasons
|
||||
|
||||
# @TEST_EDGE: conflicting_registry
|
||||
def test_conflicting_registry(enterprise_clean_setup):
|
||||
policy, registry = enterprise_clean_setup
|
||||
registry.registry_id = "WRONG-REG"
|
||||
engine = CleanPolicyEngine(policy, registry)
|
||||
result = engine.validate_policy()
|
||||
assert result.ok is False
|
||||
assert "Policy registry ref does not match provided registry" in result.blocking_reasons
|
||||
|
||||
# @TEST_INVARIANT: deterministic_classification
|
||||
def test_classify_artifact(enterprise_clean_setup):
|
||||
policy, registry = enterprise_clean_setup
|
||||
engine = CleanPolicyEngine(policy, registry)
|
||||
|
||||
# Required
|
||||
assert engine.classify_artifact({"category": "core", "path": "p1"}) == "required-system"
|
||||
# Prohibited
|
||||
assert engine.classify_artifact({"category": "demo", "path": "p2"}) == "excluded-prohibited"
|
||||
# Allowed
|
||||
assert engine.classify_artifact({"category": "others", "path": "p3"}) == "allowed"
|
||||
|
||||
# @TEST_EDGE: external_endpoint
|
||||
def test_validate_resource_source(enterprise_clean_setup):
|
||||
policy, registry = enterprise_clean_setup
|
||||
engine = CleanPolicyEngine(policy, registry)
|
||||
|
||||
# Internal (OK)
|
||||
res_ok = engine.validate_resource_source("internal.com")
|
||||
assert res_ok.ok is True
|
||||
|
||||
# External (Blocked)
|
||||
res_fail = engine.validate_resource_source("external.evil")
|
||||
assert res_fail.ok is False
|
||||
assert res_fail.violation["category"] == "external-source"
|
||||
assert res_fail.violation["blocked_release"] is True
|
||||
|
||||
def test_evaluate_candidate(enterprise_clean_setup):
|
||||
policy, registry = enterprise_clean_setup
|
||||
engine = CleanPolicyEngine(policy, registry)
|
||||
|
||||
artifacts = [
|
||||
{"path": "core.js", "category": "core"},
|
||||
{"path": "demo.sql", "category": "demo"}
|
||||
]
|
||||
sources = ["internal.com", "google.com"]
|
||||
|
||||
classified, violations = engine.evaluate_candidate(artifacts, sources)
|
||||
|
||||
assert len(classified) == 2
|
||||
assert classified[0]["classification"] == "required-system"
|
||||
assert classified[1]["classification"] == "excluded-prohibited"
|
||||
|
||||
# 1 violation for demo artifact + 1 for google.com source
|
||||
assert len(violations) == 2
|
||||
assert violations[0]["category"] == "data-purity"
|
||||
assert violations[1]["category"] == "external-source"
|
||||
@@ -0,0 +1,127 @@
|
||||
# [DEF:backend.tests.services.clean_release.test_preparation_service:Module]
|
||||
# @TIER: STANDARD
|
||||
# @SEMANTICS: tests, clean-release, preparation, flow
|
||||
# @PURPOSE: Validate release candidate preparation flow, including policy evaluation and manifest persisting.
|
||||
# @LAYER: Domain
|
||||
# @RELATION: TESTS -> backend.src.services.clean_release.preparation_service
|
||||
# @INVARIANT: Candidate preparation always persists manifest and candidate status deterministically.
|
||||
|
||||
import pytest
|
||||
from unittest.mock import MagicMock, patch
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from src.models.clean_release import (
|
||||
CleanProfilePolicy,
|
||||
ResourceSourceRegistry,
|
||||
ResourceSourceEntry,
|
||||
ReleaseCandidate,
|
||||
ReleaseCandidateStatus,
|
||||
ProfileType,
|
||||
DistributionManifest
|
||||
)
|
||||
from src.services.clean_release.preparation_service import prepare_candidate
|
||||
|
||||
def _mock_policy() -> CleanProfilePolicy:
|
||||
return CleanProfilePolicy(
|
||||
policy_id="pol-1",
|
||||
policy_version="1.0.0",
|
||||
active=True,
|
||||
prohibited_artifact_categories=["prohibited"],
|
||||
required_system_categories=["system"],
|
||||
external_source_forbidden=True,
|
||||
internal_source_registry_ref="reg-1",
|
||||
effective_from=datetime.now(timezone.utc),
|
||||
profile=ProfileType.ENTERPRISE_CLEAN,
|
||||
)
|
||||
|
||||
def _mock_registry() -> ResourceSourceRegistry:
|
||||
return ResourceSourceRegistry(
|
||||
registry_id="reg-1",
|
||||
name="Reg",
|
||||
entries=[ResourceSourceEntry(source_id="s1", host="nexus.internal", protocol="https", purpose="pkg", enabled=True)],
|
||||
updated_at=datetime.now(timezone.utc),
|
||||
updated_by="tester"
|
||||
)
|
||||
|
||||
def _mock_candidate(candidate_id: str) -> ReleaseCandidate:
|
||||
return ReleaseCandidate(
|
||||
candidate_id=candidate_id,
|
||||
version="1.0.0",
|
||||
profile=ProfileType.ENTERPRISE_CLEAN,
|
||||
created_at=datetime.now(timezone.utc),
|
||||
status=ReleaseCandidateStatus.DRAFT,
|
||||
created_by="tester",
|
||||
source_snapshot_ref="v1.0.0-snapshot"
|
||||
)
|
||||
|
||||
def test_prepare_candidate_success():
|
||||
# Setup
|
||||
repository = MagicMock()
|
||||
candidate_id = "cand-1"
|
||||
candidate = _mock_candidate(candidate_id)
|
||||
repository.get_candidate.return_value = candidate
|
||||
repository.get_active_policy.return_value = _mock_policy()
|
||||
repository.get_registry.return_value = _mock_registry()
|
||||
|
||||
artifacts = [{"path": "file1.txt", "category": "system"}]
|
||||
sources = ["nexus.internal"]
|
||||
|
||||
# Execute
|
||||
with patch("src.services.clean_release.preparation_service.CleanPolicyEngine") as MockEngine:
|
||||
mock_engine_instance = MockEngine.return_value
|
||||
mock_engine_instance.validate_policy.return_value.ok = True
|
||||
mock_engine_instance.evaluate_candidate.return_value = (
|
||||
[{"path": "file1.txt", "category": "system", "classification": "required-system", "reason": "system-core"}],
|
||||
[]
|
||||
)
|
||||
|
||||
result = prepare_candidate(repository, candidate_id, artifacts, sources, "operator-1")
|
||||
|
||||
# Verify
|
||||
assert result["status"] == ReleaseCandidateStatus.PREPARED.value
|
||||
assert candidate.status == ReleaseCandidateStatus.PREPARED
|
||||
repository.save_manifest.assert_called_once()
|
||||
repository.save_candidate.assert_called_with(candidate)
|
||||
|
||||
def test_prepare_candidate_with_violations():
|
||||
# Setup
|
||||
repository = MagicMock()
|
||||
candidate_id = "cand-1"
|
||||
candidate = _mock_candidate(candidate_id)
|
||||
repository.get_candidate.return_value = candidate
|
||||
repository.get_active_policy.return_value = _mock_policy()
|
||||
repository.get_registry.return_value = _mock_registry()
|
||||
|
||||
artifacts = [{"path": "bad.txt", "category": "prohibited"}]
|
||||
sources = []
|
||||
|
||||
# Execute
|
||||
with patch("src.services.clean_release.preparation_service.CleanPolicyEngine") as MockEngine:
|
||||
mock_engine_instance = MockEngine.return_value
|
||||
mock_engine_instance.validate_policy.return_value.ok = True
|
||||
mock_engine_instance.evaluate_candidate.return_value = (
|
||||
[{"path": "bad.txt", "category": "prohibited", "classification": "excluded-prohibited", "reason": "test-data"}],
|
||||
[{"category": "data-purity", "blocked_release": True}]
|
||||
)
|
||||
|
||||
result = prepare_candidate(repository, candidate_id, artifacts, sources, "operator-1")
|
||||
|
||||
# Verify
|
||||
assert result["status"] == ReleaseCandidateStatus.BLOCKED.value
|
||||
assert candidate.status == ReleaseCandidateStatus.BLOCKED
|
||||
assert len(result["violations"]) == 1
|
||||
|
||||
def test_prepare_candidate_not_found():
|
||||
repository = MagicMock()
|
||||
repository.get_candidate.return_value = None
|
||||
|
||||
with pytest.raises(ValueError, match="Candidate not found"):
|
||||
prepare_candidate(repository, "non-existent", [], [], "op")
|
||||
|
||||
def test_prepare_candidate_no_active_policy():
|
||||
repository = MagicMock()
|
||||
repository.get_candidate.return_value = _mock_candidate("cand-1")
|
||||
repository.get_active_policy.return_value = None
|
||||
|
||||
with pytest.raises(ValueError, match="Active clean policy not found"):
|
||||
prepare_candidate(repository, "cand-1", [], [], "op")
|
||||
27
backend/src/services/clean_release/__tests__/test_stages.py
Normal file
27
backend/src/services/clean_release/__tests__/test_stages.py
Normal file
@@ -0,0 +1,27 @@
|
||||
# [DEF:backend.tests.services.clean_release.test_stages:Module]
|
||||
# @TIER: STANDARD
|
||||
# @SEMANTICS: tests, clean-release, compliance, stages
|
||||
# @PURPOSE: Validate final status derivation logic from stage results.
|
||||
# @LAYER: Domain
|
||||
# @RELATION: TESTS -> backend.src.services.clean_release.stages
|
||||
|
||||
from src.models.clean_release import CheckFinalStatus, CheckStageName, CheckStageResult, CheckStageStatus
|
||||
from src.services.clean_release.stages import derive_final_status, MANDATORY_STAGE_ORDER
|
||||
|
||||
def test_derive_final_status_compliant():
|
||||
results = [CheckStageResult(stage=s, status=CheckStageStatus.PASS, details="ok") for s in MANDATORY_STAGE_ORDER]
|
||||
assert derive_final_status(results) == CheckFinalStatus.COMPLIANT
|
||||
|
||||
def test_derive_final_status_blocked():
|
||||
results = [CheckStageResult(stage=s, status=CheckStageStatus.PASS, details="ok") for s in MANDATORY_STAGE_ORDER]
|
||||
results[1].status = CheckStageStatus.FAIL
|
||||
assert derive_final_status(results) == CheckFinalStatus.BLOCKED
|
||||
|
||||
def test_derive_final_status_failed_missing():
|
||||
results = [CheckStageResult(stage=MANDATORY_STAGE_ORDER[0], status=CheckStageStatus.PASS, details="ok")]
|
||||
assert derive_final_status(results) == CheckFinalStatus.FAILED
|
||||
|
||||
def test_derive_final_status_failed_skipped():
|
||||
results = [CheckStageResult(stage=s, status=CheckStageStatus.PASS, details="ok") for s in MANDATORY_STAGE_ORDER]
|
||||
results[2].status = CheckStageStatus.SKIPPED
|
||||
assert derive_final_status(results) == CheckFinalStatus.FAILED
|
||||
@@ -1,144 +0,0 @@
|
||||
# [DEF:backend.tests.services.clean_release.test_policy_engine:Module]
|
||||
# @TIER: CRITICAL
|
||||
# @SEMANTICS: tests, clean-release, policy-engine, deterministic
|
||||
# @PURPOSE: Validate policy model contracts and deterministic classification prerequisites for US1.
|
||||
# @LAYER: Domain
|
||||
# @RELATION: VERIFIES -> backend.src.models.clean_release.CleanProfilePolicy
|
||||
# @INVARIANT: Enterprise policy rejects invalid activation states.
|
||||
|
||||
import pytest
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from src.models.clean_release import CleanProfilePolicy, ProfileType
|
||||
|
||||
|
||||
# [DEF:test_policy_enterprise_clean_valid:Function]
|
||||
# @PURPOSE: Ensure valid enterprise policy payload is accepted.
|
||||
# @PRE: Fixture-like payload contains prohibited categories and registry ref.
|
||||
# @POST: Model is created with external_source_forbidden=True.
|
||||
def test_policy_enterprise_clean_valid():
|
||||
policy = CleanProfilePolicy(
|
||||
policy_id="policy-enterprise-clean-v1",
|
||||
policy_version="1.0.0",
|
||||
active=True,
|
||||
prohibited_artifact_categories=["test-data", "demo-data"],
|
||||
required_system_categories=["system-init"],
|
||||
external_source_forbidden=True,
|
||||
internal_source_registry_ref="registry-internal-v1",
|
||||
effective_from=datetime.now(timezone.utc),
|
||||
profile=ProfileType.ENTERPRISE_CLEAN,
|
||||
)
|
||||
assert policy.external_source_forbidden is True
|
||||
assert policy.prohibited_artifact_categories == ["test-data", "demo-data"]
|
||||
# [/DEF:test_policy_enterprise_clean_valid:Function]
|
||||
|
||||
|
||||
# [DEF:test_policy_missing_registry_fails:Function]
|
||||
# @PURPOSE: Verify missing registry ref violates policy contract.
|
||||
# @PRE: enterprise-clean policy payload has blank registry ref.
|
||||
# @POST: Validation error is raised.
|
||||
def test_policy_missing_registry_fails():
|
||||
with pytest.raises(ValueError):
|
||||
CleanProfilePolicy(
|
||||
policy_id="policy-enterprise-clean-v1",
|
||||
policy_version="1.0.0",
|
||||
active=True,
|
||||
prohibited_artifact_categories=["test-data"],
|
||||
required_system_categories=["system-init"],
|
||||
external_source_forbidden=True,
|
||||
internal_source_registry_ref="",
|
||||
effective_from=datetime.now(timezone.utc),
|
||||
profile=ProfileType.ENTERPRISE_CLEAN,
|
||||
)
|
||||
# [/DEF:test_policy_missing_registry_fails:Function]
|
||||
|
||||
|
||||
# [DEF:test_policy_empty_prohibited_categories_fails:Function]
|
||||
# @PURPOSE: Verify enterprise policy cannot activate without prohibited categories.
|
||||
# @PRE: enterprise-clean policy payload has empty prohibited categories.
|
||||
# @POST: Validation error is raised.
|
||||
def test_policy_empty_prohibited_categories_fails():
|
||||
with pytest.raises(ValueError):
|
||||
CleanProfilePolicy(
|
||||
policy_id="policy-enterprise-clean-v1",
|
||||
policy_version="1.0.0",
|
||||
active=True,
|
||||
prohibited_artifact_categories=[],
|
||||
required_system_categories=["system-init"],
|
||||
external_source_forbidden=True,
|
||||
internal_source_registry_ref="registry-internal-v1",
|
||||
effective_from=datetime.now(timezone.utc),
|
||||
profile=ProfileType.ENTERPRISE_CLEAN,
|
||||
)
|
||||
# [/DEF:test_policy_empty_prohibited_categories_fails:Function]
|
||||
|
||||
|
||||
# [DEF:test_policy_conflicting_external_forbidden_flag_fails:Function]
|
||||
# @PURPOSE: Verify enterprise policy enforces external_source_forbidden=true.
|
||||
# @PRE: enterprise-clean policy payload sets external_source_forbidden to false.
|
||||
# @POST: Validation error is raised.
|
||||
def test_policy_conflicting_external_forbidden_flag_fails():
|
||||
with pytest.raises(ValueError):
|
||||
CleanProfilePolicy(
|
||||
policy_id="policy-enterprise-clean-v1",
|
||||
policy_version="1.0.0",
|
||||
active=True,
|
||||
prohibited_artifact_categories=["test-data"],
|
||||
required_system_categories=["system-init"],
|
||||
external_source_forbidden=False,
|
||||
internal_source_registry_ref="registry-internal-v1",
|
||||
effective_from=datetime.now(timezone.utc),
|
||||
profile=ProfileType.ENTERPRISE_CLEAN,
|
||||
)
|
||||
# [/DEF:test_policy_conflicting_external_forbidden_flag_fails:Function]
|
||||
# [/DEF:backend.tests.services.clean_release.test_policy_engine:Module]
|
||||
from src.models.clean_release import ResourceSourceRegistry, ResourceSourceEntry, RegistryStatus
|
||||
from src.services.clean_release.policy_engine import CleanPolicyEngine
|
||||
|
||||
def _policy_enterprise_clean() -> CleanProfilePolicy:
|
||||
return CleanProfilePolicy(
|
||||
policy_id="policy-enterprise-clean-v1",
|
||||
policy_version="1.0.0",
|
||||
active=True,
|
||||
prohibited_artifact_categories=["test-data"],
|
||||
required_system_categories=["system-init"],
|
||||
external_source_forbidden=True,
|
||||
internal_source_registry_ref="registry-internal-v1",
|
||||
effective_from=datetime.now(timezone.utc),
|
||||
profile=ProfileType.ENTERPRISE_CLEAN,
|
||||
)
|
||||
|
||||
def _registry() -> ResourceSourceRegistry:
|
||||
return ResourceSourceRegistry(
|
||||
registry_id="registry-internal-v1",
|
||||
name="Internal",
|
||||
entries=[ResourceSourceEntry(source_id="1", host="nexus.internal", protocol="https", purpose="pkg", enabled=True)],
|
||||
updated_at=datetime.now(timezone.utc),
|
||||
updated_by="tester",
|
||||
)
|
||||
|
||||
# [DEF:test_policy_valid:Function]
|
||||
# @PURPOSE: Validate policy valid scenario
|
||||
def test_policy_valid():
|
||||
engine = CleanPolicyEngine(_policy_enterprise_clean(), _registry())
|
||||
res = engine.validate_policy()
|
||||
assert res.ok is True
|
||||
|
||||
# [DEF:test_conflicting_registry:Function]
|
||||
# @PURPOSE: Validate policy conflicting registry edge
|
||||
def test_conflicting_registry():
|
||||
reg = _registry()
|
||||
reg.registry_id = "other-registry"
|
||||
engine = CleanPolicyEngine(_policy_enterprise_clean(), reg)
|
||||
res = engine.validate_policy()
|
||||
assert res.ok is False
|
||||
assert "Policy registry ref does not match provided registry" in res.blocking_reasons
|
||||
|
||||
# [DEF:test_external_endpoint:Function]
|
||||
# @PURPOSE: Validate policy external endpoint edge
|
||||
def test_external_endpoint():
|
||||
engine = CleanPolicyEngine(_policy_enterprise_clean(), _registry())
|
||||
res = engine.validate_resource_source("external.org")
|
||||
assert res.ok is False
|
||||
assert res.violation["category"] == "external-source"
|
||||
|
||||
Reference in New Issue
Block a user