diff --git a/backend/src/api/routes/__init__.py b/backend/src/api/routes/__init__.py
index fdd1a9f..71c4abe 100755
--- a/backend/src/api/routes/__init__.py
+++ b/backend/src/api/routes/__init__.py
@@ -6,7 +6,7 @@
# @RELATION: DEPENDS_ON -> importlib
# @INVARIANT: Only names listed in __all__ are importable via __getattr__.
-__all__ = ['plugins', 'tasks', 'settings', 'connections', 'environments', 'mappings', 'migration', 'git', 'storage', 'admin', 'reports', 'assistant', 'clean_release']
+__all__ = ['plugins', 'tasks', 'settings', 'connections', 'environments', 'mappings', 'migration', 'git', 'storage', 'admin', 'reports', 'assistant', 'clean_release', 'profile']
# [DEF:__getattr__:Function]
diff --git a/backend/src/api/routes/__tests__/test_dashboards.py b/backend/src/api/routes/__tests__/test_dashboards.py
index 80957d4..45f8f22 100644
--- a/backend/src/api/routes/__tests__/test_dashboards.py
+++ b/backend/src/api/routes/__tests__/test_dashboards.py
@@ -11,9 +11,11 @@ from fastapi.testclient import TestClient
from src.app import app
from src.api.routes.dashboards import DashboardsResponse
from src.dependencies import get_current_user, has_permission, get_config_manager, get_task_manager, get_resource_service, get_mapping_service
+from src.core.database import get_db
# Global mock user for get_current_user dependency overrides
mock_user = MagicMock()
+mock_user.id = "u-1"
mock_user.username = "testuser"
mock_user.roles = []
admin_role = MagicMock()
@@ -27,11 +29,14 @@ def mock_deps():
resource_service = MagicMock()
mapping_service = MagicMock()
+ db = MagicMock()
+
app.dependency_overrides[get_config_manager] = lambda: config_manager
app.dependency_overrides[get_task_manager] = lambda: task_manager
app.dependency_overrides[get_resource_service] = lambda: resource_service
app.dependency_overrides[get_mapping_service] = lambda: mapping_service
app.dependency_overrides[get_current_user] = lambda: mock_user
+ app.dependency_overrides[get_db] = lambda: db
app.dependency_overrides[has_permission("plugin:migration", "READ")] = lambda: mock_user
app.dependency_overrides[has_permission("plugin:migration", "EXECUTE")] = lambda: mock_user
@@ -42,7 +47,8 @@ def mock_deps():
"config": config_manager,
"task": task_manager,
"resource": resource_service,
- "mapping": mapping_service
+ "mapping": mapping_service,
+ "db": db,
}
app.dependency_overrides.clear()
@@ -495,4 +501,309 @@ def test_get_dashboard_thumbnail_success(mock_deps):
# [/DEF:test_get_dashboard_thumbnail_success:Function]
+# [DEF:_build_profile_preference_stub:Function]
+# @PURPOSE: Creates profile preference payload stub for dashboards filter contract tests.
+# @PRE: username can be empty; enabled indicates profile-default toggle state.
+# @POST: Returns object compatible with ProfileService.get_my_preference contract.
+def _build_profile_preference_stub(username: str, enabled: bool):
+ preference = MagicMock()
+ preference.superset_username = username
+ preference.superset_username_normalized = str(username or "").strip().lower() or None
+ preference.show_only_my_dashboards = bool(enabled)
+
+ payload = MagicMock()
+ payload.preference = preference
+ return payload
+# [/DEF:_build_profile_preference_stub:Function]
+
+
+# [DEF:_matches_actor_case_insensitive:Function]
+# @PURPOSE: Applies trim + case-insensitive owners OR modified_by matching used by route contract tests.
+# @PRE: owners can be None or list-like values.
+# @POST: Returns True when bound username matches any owner or modified_by.
+def _matches_actor_case_insensitive(bound_username, owners, modified_by):
+ normalized_bound = str(bound_username or "").strip().lower()
+ if not normalized_bound:
+ return False
+
+ owner_tokens = []
+ for owner in owners or []:
+ token = str(owner or "").strip().lower()
+ if token:
+ owner_tokens.append(token)
+
+ modified_token = str(modified_by or "").strip().lower()
+ return normalized_bound in owner_tokens or bool(modified_token and modified_token == normalized_bound)
+# [/DEF:_matches_actor_case_insensitive:Function]
+
+
+# [DEF:test_get_dashboards_profile_filter_contract_owners_or_modified_by:Function]
+# @TEST: GET /api/dashboards applies profile-default filter with owners OR modified_by trim+case-insensitive semantics.
+# @PRE: Current user has enabled profile-default preference and bound username.
+# @POST: Response includes only matching dashboards and effective_profile_filter metadata.
+def test_get_dashboards_profile_filter_contract_owners_or_modified_by(mock_deps):
+ mock_env = MagicMock()
+ mock_env.id = "prod"
+ mock_deps["config"].get_environments.return_value = [mock_env]
+ mock_deps["task"].get_all_tasks.return_value = []
+ mock_deps["resource"].get_dashboards_with_status = AsyncMock(return_value=[
+ {
+ "id": 1,
+ "title": "Owner Match",
+ "slug": "owner-match",
+ "owners": [" John_Doe "],
+ "modified_by": "someone_else",
+ },
+ {
+ "id": 2,
+ "title": "Modifier Match",
+ "slug": "modifier-match",
+ "owners": ["analytics-team"],
+ "modified_by": " JOHN_DOE ",
+ },
+ {
+ "id": 3,
+ "title": "No Match",
+ "slug": "no-match",
+ "owners": ["another-user"],
+ "modified_by": "nobody",
+ },
+ ])
+
+ with patch("src.api.routes.dashboards.ProfileService") as profile_service_cls:
+ profile_service = MagicMock()
+ profile_service.get_my_preference.return_value = _build_profile_preference_stub(
+ username=" JOHN_DOE ",
+ enabled=True,
+ )
+ profile_service.matches_dashboard_actor.side_effect = _matches_actor_case_insensitive
+ profile_service_cls.return_value = profile_service
+
+ response = client.get(
+ "/api/dashboards?env_id=prod&page_context=dashboards_main&apply_profile_default=true"
+ )
+
+ assert response.status_code == 200
+ payload = response.json()
+
+ assert payload["total"] == 2
+ assert {item["id"] for item in payload["dashboards"]} == {1, 2}
+ assert payload["effective_profile_filter"]["applied"] is True
+ assert payload["effective_profile_filter"]["source_page"] == "dashboards_main"
+ assert payload["effective_profile_filter"]["override_show_all"] is False
+ assert payload["effective_profile_filter"]["username"] == "john_doe"
+ assert payload["effective_profile_filter"]["match_logic"] == "owners_or_modified_by"
+# [/DEF:test_get_dashboards_profile_filter_contract_owners_or_modified_by:Function]
+
+
+# [DEF:test_get_dashboards_override_show_all_contract:Function]
+# @TEST: GET /api/dashboards honors override_show_all and disables profile-default filter for current page.
+# @PRE: Profile-default preference exists but override_show_all=true query is provided.
+# @POST: Response remains unfiltered and effective_profile_filter.applied is false.
+def test_get_dashboards_override_show_all_contract(mock_deps):
+ mock_env = MagicMock()
+ mock_env.id = "prod"
+ mock_deps["config"].get_environments.return_value = [mock_env]
+ mock_deps["task"].get_all_tasks.return_value = []
+ mock_deps["resource"].get_dashboards_with_status = AsyncMock(return_value=[
+ {"id": 1, "title": "Dash A", "slug": "dash-a", "owners": ["john_doe"], "modified_by": "john_doe"},
+ {"id": 2, "title": "Dash B", "slug": "dash-b", "owners": ["other"], "modified_by": "other"},
+ ])
+
+ with patch("src.api.routes.dashboards.ProfileService") as profile_service_cls:
+ profile_service = MagicMock()
+ profile_service.get_my_preference.return_value = _build_profile_preference_stub(
+ username="john_doe",
+ enabled=True,
+ )
+ profile_service.matches_dashboard_actor.side_effect = _matches_actor_case_insensitive
+ profile_service_cls.return_value = profile_service
+
+ response = client.get(
+ "/api/dashboards?env_id=prod&page_context=dashboards_main&apply_profile_default=true&override_show_all=true"
+ )
+
+ assert response.status_code == 200
+ payload = response.json()
+
+ assert payload["total"] == 2
+ assert {item["id"] for item in payload["dashboards"]} == {1, 2}
+ assert payload["effective_profile_filter"]["applied"] is False
+ assert payload["effective_profile_filter"]["source_page"] == "dashboards_main"
+ assert payload["effective_profile_filter"]["override_show_all"] is True
+ assert payload["effective_profile_filter"]["username"] is None
+ assert payload["effective_profile_filter"]["match_logic"] is None
+ profile_service.matches_dashboard_actor.assert_not_called()
+# [/DEF:test_get_dashboards_override_show_all_contract:Function]
+
+
+# [DEF:test_get_dashboards_profile_filter_no_match_results_contract:Function]
+# @TEST: GET /api/dashboards returns empty result set when profile-default filter is active and no dashboard actors match.
+# @PRE: Profile-default preference is enabled with bound username and all dashboards are non-matching.
+# @POST: Response total is 0 with deterministic pagination and active effective_profile_filter metadata.
+def test_get_dashboards_profile_filter_no_match_results_contract(mock_deps):
+ mock_env = MagicMock()
+ mock_env.id = "prod"
+ mock_deps["config"].get_environments.return_value = [mock_env]
+ mock_deps["task"].get_all_tasks.return_value = []
+ mock_deps["resource"].get_dashboards_with_status = AsyncMock(return_value=[
+ {
+ "id": 101,
+ "title": "Team Dashboard",
+ "slug": "team-dashboard",
+ "owners": ["analytics-team"],
+ "modified_by": "someone_else",
+ },
+ {
+ "id": 102,
+ "title": "Ops Dashboard",
+ "slug": "ops-dashboard",
+ "owners": ["ops-user"],
+ "modified_by": "ops-user",
+ },
+ ])
+
+ with patch("src.api.routes.dashboards.ProfileService") as profile_service_cls:
+ profile_service = MagicMock()
+ profile_service.get_my_preference.return_value = _build_profile_preference_stub(
+ username="john_doe",
+ enabled=True,
+ )
+ profile_service.matches_dashboard_actor.side_effect = _matches_actor_case_insensitive
+ profile_service_cls.return_value = profile_service
+
+ response = client.get(
+ "/api/dashboards?env_id=prod&page_context=dashboards_main&apply_profile_default=true"
+ )
+
+ assert response.status_code == 200
+ payload = response.json()
+
+ assert payload["total"] == 0
+ assert payload["dashboards"] == []
+ assert payload["page"] == 1
+ assert payload["page_size"] == 10
+ assert payload["total_pages"] == 1
+ assert payload["effective_profile_filter"]["applied"] is True
+ assert payload["effective_profile_filter"]["source_page"] == "dashboards_main"
+ assert payload["effective_profile_filter"]["override_show_all"] is False
+ assert payload["effective_profile_filter"]["username"] == "john_doe"
+ assert payload["effective_profile_filter"]["match_logic"] == "owners_or_modified_by"
+# [/DEF:test_get_dashboards_profile_filter_no_match_results_contract:Function]
+
+
+# [DEF:test_get_dashboards_page_context_other_disables_profile_default:Function]
+# @TEST: GET /api/dashboards does not auto-apply profile-default filter outside dashboards_main page context.
+# @PRE: Profile-default preference exists but page_context=other query is provided.
+# @POST: Response remains unfiltered and metadata reflects source_page=other.
+def test_get_dashboards_page_context_other_disables_profile_default(mock_deps):
+ mock_env = MagicMock()
+ mock_env.id = "prod"
+ mock_deps["config"].get_environments.return_value = [mock_env]
+ mock_deps["task"].get_all_tasks.return_value = []
+ mock_deps["resource"].get_dashboards_with_status = AsyncMock(return_value=[
+ {"id": 1, "title": "Dash A", "slug": "dash-a", "owners": ["john_doe"], "modified_by": "john_doe"},
+ {"id": 2, "title": "Dash B", "slug": "dash-b", "owners": ["other"], "modified_by": "other"},
+ ])
+
+ with patch("src.api.routes.dashboards.ProfileService") as profile_service_cls:
+ profile_service = MagicMock()
+ profile_service.get_my_preference.return_value = _build_profile_preference_stub(
+ username="john_doe",
+ enabled=True,
+ )
+ profile_service.matches_dashboard_actor.side_effect = _matches_actor_case_insensitive
+ profile_service_cls.return_value = profile_service
+
+ response = client.get(
+ "/api/dashboards?env_id=prod&page_context=other&apply_profile_default=true"
+ )
+
+ assert response.status_code == 200
+ payload = response.json()
+
+ assert payload["total"] == 2
+ assert {item["id"] for item in payload["dashboards"]} == {1, 2}
+ assert payload["effective_profile_filter"]["applied"] is False
+ assert payload["effective_profile_filter"]["source_page"] == "other"
+ assert payload["effective_profile_filter"]["override_show_all"] is False
+ assert payload["effective_profile_filter"]["username"] is None
+ assert payload["effective_profile_filter"]["match_logic"] is None
+ profile_service.matches_dashboard_actor.assert_not_called()
+# [/DEF:test_get_dashboards_page_context_other_disables_profile_default:Function]
+
+
+# [DEF:test_get_dashboards_profile_filter_matches_display_alias_without_detail_fanout:Function]
+# @TEST: GET /api/dashboards resolves Superset display-name alias once and filters without per-dashboard detail calls.
+# @PRE: Profile-default filter is active, bound username is `admin`, dashboard actors contain display labels.
+# @POST: Route matches by alias (`Superset Admin`) and does not call `SupersetClient.get_dashboard` in list filter path.
+def test_get_dashboards_profile_filter_matches_display_alias_without_detail_fanout(mock_deps):
+ mock_env = MagicMock()
+ mock_env.id = "prod"
+ mock_deps["config"].get_environments.return_value = [mock_env]
+ mock_deps["task"].get_all_tasks.return_value = []
+ mock_deps["resource"].get_dashboards_with_status = AsyncMock(return_value=[
+ {
+ "id": 5,
+ "title": "Alias Match",
+ "slug": "alias-match",
+ "owners": [],
+ "created_by": None,
+ "modified_by": "Superset Admin",
+ },
+ {
+ "id": 6,
+ "title": "Alias No Match",
+ "slug": "alias-no-match",
+ "owners": [],
+ "created_by": None,
+ "modified_by": "Other User",
+ },
+ ])
+
+ with patch("src.api.routes.dashboards.ProfileService") as profile_service_cls, patch(
+ "src.api.routes.dashboards.SupersetClient"
+ ) as superset_client_cls, patch(
+ "src.api.routes.dashboards.SupersetAccountLookupAdapter"
+ ) as lookup_adapter_cls:
+ profile_service = MagicMock()
+ profile_service.get_my_preference.return_value = _build_profile_preference_stub(
+ username="admin",
+ enabled=True,
+ )
+ profile_service.matches_dashboard_actor.side_effect = _matches_actor_case_insensitive
+ profile_service_cls.return_value = profile_service
+
+ superset_client = MagicMock()
+ superset_client_cls.return_value = superset_client
+
+ lookup_adapter = MagicMock()
+ lookup_adapter.get_users_page.return_value = {
+ "items": [
+ {
+ "environment_id": "prod",
+ "username": "admin",
+ "display_name": "Superset Admin",
+ "email": "admin@example.com",
+ "is_active": True,
+ }
+ ],
+ "total": 1,
+ }
+ lookup_adapter_cls.return_value = lookup_adapter
+
+ response = client.get(
+ "/api/dashboards?env_id=prod&page_context=dashboards_main&apply_profile_default=true"
+ )
+
+ assert response.status_code == 200
+ payload = response.json()
+ assert payload["total"] == 1
+ assert {item["id"] for item in payload["dashboards"]} == {5}
+ assert payload["effective_profile_filter"]["applied"] is True
+ lookup_adapter.get_users_page.assert_called_once()
+ superset_client.get_dashboard.assert_not_called()
+# [/DEF:test_get_dashboards_profile_filter_matches_display_alias_without_detail_fanout:Function]
+
+
# [/DEF:backend.src.api.routes.__tests__.test_dashboards:Module]
diff --git a/backend/src/api/routes/__tests__/test_profile_api.py b/backend/src/api/routes/__tests__/test_profile_api.py
new file mode 100644
index 0000000..b319a6c
--- /dev/null
+++ b/backend/src/api/routes/__tests__/test_profile_api.py
@@ -0,0 +1,243 @@
+# [DEF:backend.src.api.routes.__tests__.test_profile_api:Module]
+# @TIER: STANDARD
+# @SEMANTICS: tests, profile, api, preferences, lookup, contract
+# @PURPOSE: Verifies profile API route contracts for preference read/update and Superset account lookup.
+# @LAYER: API
+# @RELATION: TESTS -> backend.src.api.routes.profile
+
+# [SECTION: IMPORTS]
+from datetime import datetime, timezone
+from unittest.mock import MagicMock, patch
+
+from fastapi.testclient import TestClient
+
+from src.app import app
+from src.core.database import get_db
+from src.dependencies import get_config_manager, get_current_user
+from src.schemas.profile import (
+ ProfilePreference,
+ ProfilePreferenceResponse,
+ SupersetAccountCandidate,
+ SupersetAccountLookupResponse,
+)
+from src.services.profile_service import (
+ EnvironmentNotFoundError,
+ ProfileAuthorizationError,
+ ProfileValidationError,
+)
+# [/SECTION]
+
+
+client = TestClient(app)
+
+
+# [DEF:mock_profile_route_dependencies:Function]
+# @PURPOSE: Provides deterministic dependency overrides for profile route tests.
+# @PRE: App instance is initialized.
+# @POST: Dependencies are overridden for current test and restored afterward.
+def mock_profile_route_dependencies():
+ mock_user = MagicMock()
+ mock_user.id = "u-1"
+ mock_user.username = "test-user"
+
+ mock_db = MagicMock()
+ mock_config_manager = MagicMock()
+
+ app.dependency_overrides[get_current_user] = lambda: mock_user
+ app.dependency_overrides[get_db] = lambda: mock_db
+ app.dependency_overrides[get_config_manager] = lambda: mock_config_manager
+
+ return mock_user, mock_db, mock_config_manager
+# [/DEF:mock_profile_route_dependencies:Function]
+
+
+# [DEF:profile_route_deps_fixture:Function]
+# @PURPOSE: Pytest fixture wrapper for profile route dependency overrides.
+# @PRE: None.
+# @POST: Yields overridden dependencies and clears overrides after test.
+import pytest
+
+
+@pytest.fixture(autouse=True)
+def profile_route_deps_fixture():
+ yielded = mock_profile_route_dependencies()
+ yield yielded
+ app.dependency_overrides.clear()
+# [/DEF:profile_route_deps_fixture:Function]
+
+
+# [DEF:_build_preference_response:Function]
+# @PURPOSE: Builds stable profile preference response payload for route tests.
+# @PRE: user_id is provided.
+# @POST: Returns ProfilePreferenceResponse object with deterministic timestamps.
+def _build_preference_response(user_id: str = "u-1") -> ProfilePreferenceResponse:
+ now = datetime.now(timezone.utc)
+ return ProfilePreferenceResponse(
+ status="success",
+ message="Preference loaded",
+ preference=ProfilePreference(
+ user_id=user_id,
+ superset_username="John_Doe",
+ superset_username_normalized="john_doe",
+ show_only_my_dashboards=True,
+ created_at=now,
+ updated_at=now,
+ ),
+ )
+# [/DEF:_build_preference_response:Function]
+
+
+# [DEF:test_get_profile_preferences_returns_self_payload:Function]
+# @PURPOSE: Verifies GET /api/profile/preferences returns stable self-scoped payload.
+# @PRE: Authenticated user context is available.
+# @POST: Response status is 200 and payload contains current user preference.
+def test_get_profile_preferences_returns_self_payload(profile_route_deps_fixture):
+ mock_user, _, _ = profile_route_deps_fixture
+ service = MagicMock()
+ service.get_my_preference.return_value = _build_preference_response(user_id=mock_user.id)
+
+ with patch("src.api.routes.profile._get_profile_service", return_value=service):
+ response = client.get("/api/profile/preferences")
+
+ assert response.status_code == 200
+ payload = response.json()
+ assert payload["status"] == "success"
+ assert payload["preference"]["user_id"] == mock_user.id
+ assert payload["preference"]["superset_username_normalized"] == "john_doe"
+ service.get_my_preference.assert_called_once_with(mock_user)
+# [/DEF:test_get_profile_preferences_returns_self_payload:Function]
+
+
+# [DEF:test_patch_profile_preferences_success:Function]
+# @PURPOSE: Verifies PATCH /api/profile/preferences persists valid payload through route mapping.
+# @PRE: Valid request payload and authenticated user.
+# @POST: Response status is 200 with saved preference payload.
+def test_patch_profile_preferences_success(profile_route_deps_fixture):
+ mock_user, _, _ = profile_route_deps_fixture
+ service = MagicMock()
+ service.update_my_preference.return_value = _build_preference_response(user_id=mock_user.id)
+
+ with patch("src.api.routes.profile._get_profile_service", return_value=service):
+ response = client.patch(
+ "/api/profile/preferences",
+ json={
+ "superset_username": "John_Doe",
+ "show_only_my_dashboards": True,
+ },
+ )
+
+ assert response.status_code == 200
+ payload = response.json()
+ assert payload["status"] == "success"
+ assert payload["preference"]["superset_username"] == "John_Doe"
+ assert payload["preference"]["show_only_my_dashboards"] is True
+ service.update_my_preference.assert_called_once()
+# [/DEF:test_patch_profile_preferences_success:Function]
+
+
+# [DEF:test_patch_profile_preferences_validation_error:Function]
+# @PURPOSE: Verifies route maps domain validation failure to HTTP 422 with actionable details.
+# @PRE: Service raises ProfileValidationError.
+# @POST: Response status is 422 and includes validation messages.
+def test_patch_profile_preferences_validation_error(profile_route_deps_fixture):
+ service = MagicMock()
+ service.update_my_preference.side_effect = ProfileValidationError(
+ ["Superset username is required when default filter is enabled."]
+ )
+
+ with patch("src.api.routes.profile._get_profile_service", return_value=service):
+ response = client.patch(
+ "/api/profile/preferences",
+ json={
+ "superset_username": "",
+ "show_only_my_dashboards": True,
+ },
+ )
+
+ assert response.status_code == 422
+ payload = response.json()
+ assert "detail" in payload
+ assert "Superset username is required when default filter is enabled." in payload["detail"]
+# [/DEF:test_patch_profile_preferences_validation_error:Function]
+
+
+# [DEF:test_patch_profile_preferences_cross_user_denied:Function]
+# @PURPOSE: Verifies route maps domain authorization guard failure to HTTP 403.
+# @PRE: Service raises ProfileAuthorizationError.
+# @POST: Response status is 403 with denial message.
+def test_patch_profile_preferences_cross_user_denied(profile_route_deps_fixture):
+ service = MagicMock()
+ service.update_my_preference.side_effect = ProfileAuthorizationError(
+ "Cross-user preference mutation is forbidden"
+ )
+
+ with patch("src.api.routes.profile._get_profile_service", return_value=service):
+ response = client.patch(
+ "/api/profile/preferences",
+ json={
+ "superset_username": "john_doe",
+ "show_only_my_dashboards": True,
+ },
+ )
+
+ assert response.status_code == 403
+ payload = response.json()
+ assert payload["detail"] == "Cross-user preference mutation is forbidden"
+# [/DEF:test_patch_profile_preferences_cross_user_denied:Function]
+
+
+# [DEF:test_lookup_superset_accounts_success:Function]
+# @PURPOSE: Verifies lookup route returns success payload with normalized candidates.
+# @PRE: Valid environment_id and service success response.
+# @POST: Response status is 200 and items list is returned.
+def test_lookup_superset_accounts_success(profile_route_deps_fixture):
+ service = MagicMock()
+ service.lookup_superset_accounts.return_value = SupersetAccountLookupResponse(
+ status="success",
+ environment_id="dev",
+ page_index=0,
+ page_size=20,
+ total=1,
+ warning=None,
+ items=[
+ SupersetAccountCandidate(
+ environment_id="dev",
+ username="john_doe",
+ display_name="John Doe",
+ email="john@example.local",
+ is_active=True,
+ )
+ ],
+ )
+
+ with patch("src.api.routes.profile._get_profile_service", return_value=service):
+ response = client.get("/api/profile/superset-accounts?environment_id=dev")
+
+ assert response.status_code == 200
+ payload = response.json()
+ assert payload["status"] == "success"
+ assert payload["environment_id"] == "dev"
+ assert payload["total"] == 1
+ assert payload["items"][0]["username"] == "john_doe"
+# [/DEF:test_lookup_superset_accounts_success:Function]
+
+
+# [DEF:test_lookup_superset_accounts_env_not_found:Function]
+# @PURPOSE: Verifies lookup route maps missing environment to HTTP 404.
+# @PRE: Service raises EnvironmentNotFoundError.
+# @POST: Response status is 404 with explicit message.
+def test_lookup_superset_accounts_env_not_found(profile_route_deps_fixture):
+ service = MagicMock()
+ service.lookup_superset_accounts.side_effect = EnvironmentNotFoundError(
+ "Environment 'missing-env' not found"
+ )
+
+ with patch("src.api.routes.profile._get_profile_service", return_value=service):
+ response = client.get("/api/profile/superset-accounts?environment_id=missing-env")
+
+ assert response.status_code == 404
+ payload = response.json()
+ assert payload["detail"] == "Environment 'missing-env' not found"
+# [/DEF:test_lookup_superset_accounts_env_not_found:Function]
+
+# [/DEF:backend.src.api.routes.__tests__.test_profile_api:Module]
\ No newline at end of file
diff --git a/backend/src/api/routes/dashboards.py b/backend/src/api/routes/dashboards.py
index ddd5954..77de72e 100644
--- a/backend/src/api/routes/dashboards.py
+++ b/backend/src/api/routes/dashboards.py
@@ -34,14 +34,26 @@
# [SECTION: IMPORTS]
from fastapi import APIRouter, Depends, HTTPException, Query, Response
from fastapi.responses import JSONResponse
-from typing import List, Optional, Dict, Any
+from typing import List, Optional, Dict, Any, Literal
import re
from urllib.parse import urlparse
from pydantic import BaseModel, Field
-from ...dependencies import get_config_manager, get_task_manager, get_resource_service, get_mapping_service, has_permission
+from sqlalchemy.orm import Session
+from ...dependencies import (
+ get_config_manager,
+ get_task_manager,
+ get_resource_service,
+ get_mapping_service,
+ get_current_user,
+ has_permission,
+)
+from ...core.database import get_db
from ...core.logger import logger, belief_scope
from ...core.superset_client import SupersetClient
+from ...core.superset_profile_lookup import SupersetAccountLookupAdapter
from ...core.utils.network import DashboardNotFoundError
+from ...models.auth import User
+from ...services.profile_service import ProfileService
from ...services.resource_service import ResourceService
# [/SECTION]
@@ -79,6 +91,15 @@ class DashboardItem(BaseModel):
last_task: Optional[LastTask] = None
# [/DEF:DashboardItem:DataClass]
+# [DEF:EffectiveProfileFilter:DataClass]
+class EffectiveProfileFilter(BaseModel):
+ applied: bool
+ source_page: Literal["dashboards_main", "other"] = "dashboards_main"
+ override_show_all: bool = False
+ username: Optional[str] = None
+ match_logic: Optional[Literal["owners_or_modified_by"]] = None
+# [/DEF:EffectiveProfileFilter:DataClass]
+
# [DEF:DashboardsResponse:DataClass]
class DashboardsResponse(BaseModel):
dashboards: List[DashboardItem]
@@ -86,6 +107,7 @@ class DashboardsResponse(BaseModel):
page: int
page_size: int
total_pages: int
+ effective_profile_filter: Optional[EffectiveProfileFilter] = None
# [/DEF:DashboardsResponse:DataClass]
# [DEF:DashboardChartItem:DataClass]
@@ -242,6 +264,101 @@ def _dashboard_git_filter_value(dashboard: Dict[str, Any]) -> str:
return "pending"
# [/DEF:_dashboard_git_filter_value:Function]
+# [DEF:_normalize_actor_alias_token:Function]
+# @PURPOSE: Normalize actor alias token to comparable trim+lower text.
+# @PRE: value can be scalar/None.
+# @POST: Returns normalized token or None.
+def _normalize_actor_alias_token(value: Any) -> Optional[str]:
+ token = str(value or "").strip().lower()
+ return token or None
+# [/DEF:_normalize_actor_alias_token:Function]
+
+
+# [DEF:_resolve_profile_actor_aliases:Function]
+# @PURPOSE: Resolve stable actor aliases for profile filtering without per-dashboard detail fan-out.
+# @PRE: bound username is available and env is valid.
+# @POST: Returns at least normalized username; may include Superset display-name alias.
+# @SIDE_EFFECT: Performs at most one Superset users-lookup request.
+def _resolve_profile_actor_aliases(env: Any, bound_username: str) -> List[str]:
+ normalized_bound = _normalize_actor_alias_token(bound_username)
+ if not normalized_bound:
+ return []
+
+ aliases: List[str] = [normalized_bound]
+ try:
+ client = SupersetClient(env)
+ adapter = SupersetAccountLookupAdapter(
+ network_client=client.network,
+ environment_id=str(getattr(env, "id", "")),
+ )
+ lookup_payload = adapter.get_users_page(
+ search=normalized_bound,
+ page_index=0,
+ page_size=20,
+ sort_column="username",
+ sort_order="asc",
+ )
+ lookup_items = (
+ lookup_payload.get("items", [])
+ if isinstance(lookup_payload, dict)
+ else []
+ )
+
+ matched_item: Optional[Dict[str, Any]] = None
+ for item in lookup_items:
+ if not isinstance(item, dict):
+ continue
+ if _normalize_actor_alias_token(item.get("username")) == normalized_bound:
+ matched_item = item
+ break
+
+ if matched_item is None:
+ for item in lookup_items:
+ if isinstance(item, dict):
+ matched_item = item
+ break
+
+ display_alias = _normalize_actor_alias_token(
+ (matched_item or {}).get("display_name")
+ )
+ if display_alias and display_alias not in aliases:
+ aliases.append(display_alias)
+
+ logger.reflect(
+ "[REFLECT] Resolved profile actor aliases "
+ f"(env={getattr(env, 'id', None)}, bound_username={normalized_bound!r}, "
+ f"lookup_items={len(lookup_items)}, aliases={aliases!r})"
+ )
+ except Exception as alias_error:
+ logger.explore(
+ "[EXPLORE] Failed to resolve profile actor aliases via Superset users lookup "
+ f"(env={getattr(env, 'id', None)}, bound_username={normalized_bound!r}): {alias_error}"
+ )
+ return aliases
+# [/DEF:_resolve_profile_actor_aliases:Function]
+
+
+# [DEF:_matches_dashboard_actor_aliases:Function]
+# @PURPOSE: Apply profile actor matching against multiple aliases (username + optional display name).
+# @PRE: actor_aliases contains normalized non-empty tokens.
+# @POST: Returns True when any alias matches owners OR modified_by.
+def _matches_dashboard_actor_aliases(
+ profile_service: ProfileService,
+ actor_aliases: List[str],
+ owners: Optional[Any],
+ modified_by: Optional[str],
+) -> bool:
+ for actor_alias in actor_aliases:
+ if profile_service.matches_dashboard_actor(
+ bound_username=actor_alias,
+ owners=owners,
+ modified_by=modified_by,
+ ):
+ return True
+ return False
+# [/DEF:_matches_dashboard_actor_aliases:Function]
+
+
# [DEF:get_dashboards:Function]
# @PURPOSE: Fetch list of dashboards from a specific environment with Git status and last task status
# @PRE: env_id must be a valid environment ID
@@ -249,6 +366,7 @@ def _dashboard_git_filter_value(dashboard: Dict[str, Any]) -> str:
# @PRE: page_size must be between 1 and 100 if provided
# @POST: Returns a list of dashboards with enhanced metadata and pagination info
# @POST: Response includes pagination metadata (page, page_size, total, total_pages)
+# @POST: Response includes effective profile filter metadata for main dashboards page context
# @PARAM: env_id (str) - The environment ID to fetch dashboards from
# @PARAM: search (Optional[str]) - Filter by title/slug
# @PARAM: page (Optional[int]) - Page number (default: 1)
@@ -261,6 +379,9 @@ async def get_dashboards(
search: Optional[str] = None,
page: int = 1,
page_size: int = 10,
+ page_context: Literal["dashboards_main", "other"] = Query(default="dashboards_main"),
+ apply_profile_default: bool = Query(default=True),
+ override_show_all: bool = Query(default=False),
filter_title: Optional[List[str]] = Query(default=None),
filter_git_status: Optional[List[str]] = Query(default=None),
filter_llm_status: Optional[List[str]] = Query(default=None),
@@ -269,26 +390,73 @@ async def get_dashboards(
config_manager=Depends(get_config_manager),
task_manager=Depends(get_task_manager),
resource_service=Depends(get_resource_service),
+ current_user: User = Depends(get_current_user),
+ db: Session = Depends(get_db),
_ = Depends(has_permission("plugin:migration", "READ"))
):
- with belief_scope("get_dashboards", f"env_id={env_id}, search={search}, page={page}, page_size={page_size}"):
- # Validate pagination parameters
+ with belief_scope(
+ "get_dashboards",
+ (
+ f"env_id={env_id}, search={search}, page={page}, page_size={page_size}, "
+ f"page_context={page_context}, apply_profile_default={apply_profile_default}, "
+ f"override_show_all={override_show_all}"
+ ),
+ ):
if page < 1:
logger.error(f"[get_dashboards][Coherence:Failed] Invalid page: {page}")
raise HTTPException(status_code=400, detail="Page must be >= 1")
if page_size < 1 or page_size > 100:
logger.error(f"[get_dashboards][Coherence:Failed] Invalid page_size: {page_size}")
raise HTTPException(status_code=400, detail="Page size must be between 1 and 100")
-
- # Validate environment exists
+
environments = config_manager.get_environments()
env = next((e for e in environments if e.id == env_id), None)
if not env:
logger.error(f"[get_dashboards][Coherence:Failed] Environment not found: {env_id}")
raise HTTPException(status_code=404, detail="Environment not found")
-
+
+ profile_service = ProfileService(db=db, config_manager=config_manager)
+ bound_username: Optional[str] = None
+ can_apply_profile_filter = False
+ effective_profile_filter = EffectiveProfileFilter(
+ applied=False,
+ source_page=page_context,
+ override_show_all=bool(override_show_all),
+ username=None,
+ match_logic=None,
+ )
+
+ try:
+ profile_preference = profile_service.get_my_preference(current_user).preference
+ normalized_username = str(
+ getattr(profile_preference, "superset_username_normalized", None) or ""
+ ).strip().lower()
+ raw_username = str(
+ getattr(profile_preference, "superset_username", None) or ""
+ ).strip().lower()
+ bound_username = normalized_username or raw_username or None
+
+ can_apply_profile_filter = (
+ page_context == "dashboards_main"
+ and bool(apply_profile_default)
+ and not bool(override_show_all)
+ and bool(getattr(profile_preference, "show_only_my_dashboards", False))
+ and bool(bound_username)
+ )
+
+ effective_profile_filter = EffectiveProfileFilter(
+ applied=bool(can_apply_profile_filter),
+ source_page=page_context,
+ override_show_all=bool(override_show_all),
+ username=bound_username if can_apply_profile_filter else None,
+ match_logic="owners_or_modified_by" if can_apply_profile_filter else None,
+ )
+ except Exception as profile_error:
+ logger.explore(
+ f"[EXPLORE] Profile preference unavailable; continuing without profile-default filter: {profile_error}"
+ )
+
try:
- # Get all tasks for status lookup
all_tasks = task_manager.get_all_tasks()
title_filters = _normalize_filter_values(filter_title)
git_filters = _normalize_filter_values(filter_git_status)
@@ -304,9 +472,9 @@ async def get_dashboards(
actor_filters,
)
)
+ needs_full_scan = has_column_filters or bool(can_apply_profile_filter)
- # Fast path: real ResourceService -> one Superset page call per API request.
- if isinstance(resource_service, ResourceService) and not has_column_filters:
+ if isinstance(resource_service, ResourceService) and not needs_full_scan:
try:
page_payload = await resource_service.get_dashboards_page_with_status(
env,
@@ -333,9 +501,9 @@ async def get_dashboards(
if search:
search_lower = search.lower()
dashboards = [
- d for d in dashboards
- if search_lower in d.get('title', '').lower()
- or search_lower in d.get('slug', '').lower()
+ d for d in dashboards
+ if search_lower in d.get("title", "").lower()
+ or search_lower in d.get("slug", "").lower()
]
total = len(dashboards)
@@ -343,13 +511,52 @@ async def get_dashboards(
start_idx = (page - 1) * page_size
end_idx = start_idx + page_size
paginated_dashboards = dashboards[start_idx:end_idx]
- elif isinstance(resource_service, ResourceService) and has_column_filters:
+ else:
dashboards = await resource_service.get_dashboards_with_status(
env,
all_tasks,
include_git_status=bool(git_filters),
)
+ if can_apply_profile_filter and bound_username:
+ actor_aliases = _resolve_profile_actor_aliases(env, bound_username)
+ if not actor_aliases:
+ actor_aliases = [bound_username]
+ logger.reason(
+ "[REASON] Applying profile actor filter "
+ f"(env={env_id}, bound_username={bound_username}, actor_aliases={actor_aliases!r}, "
+ f"dashboards_before={len(dashboards)})"
+ )
+ filtered_dashboards: List[Dict[str, Any]] = []
+ max_actor_samples = 15
+ for index, dashboard in enumerate(dashboards):
+ owners_value = dashboard.get("owners")
+ created_by_value = dashboard.get("created_by")
+ modified_by_value = dashboard.get("modified_by")
+ matches_actor = _matches_dashboard_actor_aliases(
+ profile_service=profile_service,
+ actor_aliases=actor_aliases,
+ owners=owners_value,
+ modified_by=modified_by_value,
+ )
+ if index < max_actor_samples:
+ logger.reflect(
+ "[REFLECT] Profile actor filter sample "
+ f"(env={env_id}, dashboard_id={dashboard.get('id')}, "
+ f"bound_username={bound_username!r}, actor_aliases={actor_aliases!r}, "
+ f"owners={owners_value!r}, created_by={created_by_value!r}, "
+ f"modified_by={modified_by_value!r}, matches={matches_actor})"
+ )
+ if matches_actor:
+ filtered_dashboards.append(dashboard)
+
+ logger.reflect(
+ "[REFLECT] Profile actor filter summary "
+ f"(env={env_id}, bound_username={bound_username!r}, "
+ f"dashboards_before={len(dashboards)}, dashboards_after={len(filtered_dashboards)})"
+ )
+ dashboards = filtered_dashboards
+
if search:
search_lower = search.lower()
dashboards = [
@@ -376,13 +583,21 @@ async def get_dashboards(
return False
changed_on_raw = str(dashboard.get("last_modified") or "").strip().lower()
- changed_on_prefix = changed_on_raw[:10] if len(changed_on_raw) >= 10 else changed_on_raw
- if changed_on_filters and changed_on_raw not in changed_on_filters and changed_on_prefix not in changed_on_filters:
+ changed_on_prefix = (
+ changed_on_raw[:10] if len(changed_on_raw) >= 10 else changed_on_raw
+ )
+ if (
+ changed_on_filters
+ and changed_on_raw not in changed_on_filters
+ and changed_on_prefix not in changed_on_filters
+ ):
return False
owners = dashboard.get("owners") or []
if isinstance(owners, list):
- actor_value = ", ".join(str(item).strip() for item in owners if str(item).strip()).lower()
+ actor_value = ", ".join(
+ str(item).strip() for item in owners if str(item).strip()
+ ).lower()
else:
actor_value = str(owners).strip().lower()
if not actor_value:
@@ -391,44 +606,29 @@ async def get_dashboards(
return False
return True
- dashboards = [d for d in dashboards if _matches_dashboard_filters(d)]
- total = len(dashboards)
- total_pages = (total + page_size - 1) // page_size if total > 0 else 1
- start_idx = (page - 1) * page_size
- end_idx = start_idx + page_size
- paginated_dashboards = dashboards[start_idx:end_idx]
- else:
- # Compatibility path for mocked services in route tests.
- dashboards = await resource_service.get_dashboards_with_status(
- env,
- all_tasks,
- include_git_status=False,
- )
-
- if search:
- search_lower = search.lower()
- dashboards = [
- d for d in dashboards
- if search_lower in d.get('title', '').lower()
- or search_lower in d.get('slug', '').lower()
- ]
+ if has_column_filters:
+ dashboards = [d for d in dashboards if _matches_dashboard_filters(d)]
total = len(dashboards)
total_pages = (total + page_size - 1) // page_size if total > 0 else 1
start_idx = (page - 1) * page_size
end_idx = start_idx + page_size
paginated_dashboards = dashboards[start_idx:end_idx]
-
- logger.info(f"[get_dashboards][Coherence:OK] Returning {len(paginated_dashboards)} dashboards (page {page}/{total_pages}, total: {total})")
-
+
+ logger.info(
+ f"[get_dashboards][Coherence:OK] Returning {len(paginated_dashboards)} dashboards "
+ f"(page {page}/{total_pages}, total: {total}, profile_filter_applied={effective_profile_filter.applied})"
+ )
+
return DashboardsResponse(
dashboards=paginated_dashboards,
total=total,
page=page,
page_size=page_size,
- total_pages=total_pages
+ total_pages=total_pages,
+ effective_profile_filter=effective_profile_filter,
)
-
+
except Exception as e:
logger.error(f"[get_dashboards][Coherence:Failed] Failed to fetch dashboards: {e}")
raise HTTPException(status_code=503, detail=f"Failed to fetch dashboards: {str(e)}")
diff --git a/backend/src/api/routes/profile.py b/backend/src/api/routes/profile.py
new file mode 100644
index 0000000..7790051
--- /dev/null
+++ b/backend/src/api/routes/profile.py
@@ -0,0 +1,136 @@
+# [DEF:backend.src.api.routes.profile:Module]
+#
+# @TIER: CRITICAL
+# @SEMANTICS: api, profile, preferences, self-service, account-lookup
+# @PURPOSE: Exposes self-scoped profile preference endpoints and environment-based Superset account lookup.
+# @LAYER: API
+# @RELATION: DEPENDS_ON -> backend.src.services.profile_service
+# @RELATION: DEPENDS_ON -> backend.src.dependencies.get_current_user
+# @RELATION: DEPENDS_ON -> backend.src.core.database.get_db
+#
+# @INVARIANT: Endpoints are self-scoped and never mutate another user preference.
+# @UX_STATE: ProfileLoad -> Returns stable ProfilePreferenceResponse for authenticated user.
+# @UX_STATE: Saving -> Validation errors map to actionable 422 details.
+# @UX_STATE: LookupLoading -> Returns success/degraded Superset lookup payload.
+# @UX_FEEDBACK: Stable status/message/warning payloads support profile page feedback.
+# @UX_RECOVERY: Lookup degradation keeps manual username save path available.
+
+# [SECTION: IMPORTS]
+from typing import Optional
+
+from fastapi import APIRouter, Depends, HTTPException, Query
+from sqlalchemy.orm import Session
+
+from ...core.database import get_db
+from ...core.logger import logger, belief_scope
+from ...dependencies import get_config_manager, get_current_user
+from ...models.auth import User
+from ...schemas.profile import (
+ ProfilePreferenceResponse,
+ ProfilePreferenceUpdateRequest,
+ SupersetAccountLookupRequest,
+ SupersetAccountLookupResponse,
+)
+from ...services.profile_service import (
+ EnvironmentNotFoundError,
+ ProfileAuthorizationError,
+ ProfileService,
+ ProfileValidationError,
+)
+# [/SECTION]
+
+router = APIRouter(prefix="/api/profile", tags=["profile"])
+
+
+# [DEF:_get_profile_service:Function]
+# @PURPOSE: Build profile service for current request scope.
+# @PRE: db session and config manager are available.
+# @POST: Returns a ready ProfileService instance.
+def _get_profile_service(db: Session, config_manager) -> ProfileService:
+ return ProfileService(db=db, config_manager=config_manager)
+# [/DEF:_get_profile_service:Function]
+
+
+# [DEF:get_preferences:Function]
+# @PURPOSE: Get authenticated user's dashboard filter preference.
+# @PRE: Valid JWT and authenticated user context.
+# @POST: Returns preference payload for current user only.
+@router.get("/preferences", response_model=ProfilePreferenceResponse)
+async def get_preferences(
+ current_user: User = Depends(get_current_user),
+ db: Session = Depends(get_db),
+ config_manager=Depends(get_config_manager),
+):
+ with belief_scope("profile.get_preferences", f"user_id={current_user.id}"):
+ logger.reason("[REASON] Resolving current user preference")
+ service = _get_profile_service(db, config_manager)
+ return service.get_my_preference(current_user)
+# [/DEF:get_preferences:Function]
+
+
+# [DEF:update_preferences:Function]
+# @PURPOSE: Update authenticated user's dashboard filter preference.
+# @PRE: Valid JWT and valid request payload.
+# @POST: Persists normalized preference for current user or raises validation/authorization errors.
+@router.patch("/preferences", response_model=ProfilePreferenceResponse)
+async def update_preferences(
+ payload: ProfilePreferenceUpdateRequest,
+ current_user: User = Depends(get_current_user),
+ db: Session = Depends(get_db),
+ config_manager=Depends(get_config_manager),
+):
+ with belief_scope("profile.update_preferences", f"user_id={current_user.id}"):
+ service = _get_profile_service(db, config_manager)
+ try:
+ logger.reason("[REASON] Attempting preference save")
+ return service.update_my_preference(current_user=current_user, payload=payload)
+ except ProfileValidationError as exc:
+ logger.reflect("[REFLECT] Preference validation failed")
+ raise HTTPException(status_code=422, detail=exc.errors) from exc
+ except ProfileAuthorizationError as exc:
+ logger.explore("[EXPLORE] Cross-user mutation guard blocked request")
+ raise HTTPException(status_code=403, detail=str(exc)) from exc
+# [/DEF:update_preferences:Function]
+
+
+# [DEF:lookup_superset_accounts:Function]
+# @PURPOSE: Lookup Superset account candidates in selected environment.
+# @PRE: Valid JWT, authenticated context, and environment_id query parameter.
+# @POST: Returns success or degraded lookup payload with stable shape.
+@router.get("/superset-accounts", response_model=SupersetAccountLookupResponse)
+async def lookup_superset_accounts(
+ environment_id: str = Query(...),
+ search: Optional[str] = Query(default=None),
+ page_index: int = Query(default=0, ge=0),
+ page_size: int = Query(default=20, ge=1, le=100),
+ sort_column: str = Query(default="username"),
+ sort_order: str = Query(default="desc"),
+ current_user: User = Depends(get_current_user),
+ db: Session = Depends(get_db),
+ config_manager=Depends(get_config_manager),
+):
+ with belief_scope(
+ "profile.lookup_superset_accounts",
+ f"user_id={current_user.id}, environment_id={environment_id}",
+ ):
+ service = _get_profile_service(db, config_manager)
+ lookup_request = SupersetAccountLookupRequest(
+ environment_id=environment_id,
+ search=search,
+ page_index=page_index,
+ page_size=page_size,
+ sort_column=sort_column,
+ sort_order=sort_order,
+ )
+ try:
+ logger.reason("[REASON] Executing Superset account lookup")
+ return service.lookup_superset_accounts(
+ current_user=current_user,
+ request=lookup_request,
+ )
+ except EnvironmentNotFoundError as exc:
+ logger.explore("[EXPLORE] Lookup request references unknown environment")
+ raise HTTPException(status_code=404, detail=str(exc)) from exc
+# [/DEF:lookup_superset_accounts:Function]
+
+# [/DEF:backend.src.api.routes.profile:Module]
\ No newline at end of file
diff --git a/backend/src/app.py b/backend/src/app.py
index 3a25541..4fedeb7 100755
--- a/backend/src/app.py
+++ b/backend/src/app.py
@@ -21,7 +21,7 @@ import asyncio
from .dependencies import get_task_manager, get_scheduler_service
from .core.utils.network import NetworkError
from .core.logger import logger, belief_scope
-from .api.routes import plugins, tasks, settings, environments, mappings, migration, connections, git, storage, admin, llm, dashboards, datasets, reports, assistant, clean_release
+from .api.routes import plugins, tasks, settings, environments, mappings, migration, connections, git, storage, admin, llm, dashboards, datasets, reports, assistant, clean_release, profile
from .api import auth
# [DEF:App:Global]
@@ -134,6 +134,7 @@ app.include_router(datasets.router)
app.include_router(reports.router)
app.include_router(assistant.router, prefix="/api/assistant", tags=["Assistant"])
app.include_router(clean_release.router)
+app.include_router(profile.router)
# [DEF:api.include_routers:Action]
diff --git a/backend/src/core/__tests__/test_superset_profile_lookup.py b/backend/src/core/__tests__/test_superset_profile_lookup.py
new file mode 100644
index 0000000..660af71
--- /dev/null
+++ b/backend/src/core/__tests__/test_superset_profile_lookup.py
@@ -0,0 +1,128 @@
+# [DEF:backend.src.core.__tests__.test_superset_profile_lookup:Module]
+# @TIER: STANDARD
+# @SEMANTICS: tests, superset, profile, lookup, fallback, sorting
+# @PURPOSE: Verifies Superset profile lookup adapter payload normalization and fallback error precedence.
+# @LAYER: Domain
+# @RELATION: TESTS -> backend.src.core.superset_profile_lookup
+
+# [SECTION: IMPORTS]
+import json
+import sys
+from pathlib import Path
+from typing import Any, Dict, List, Optional
+
+import pytest
+
+backend_dir = str(Path(__file__).parent.parent.parent.parent.resolve())
+if backend_dir not in sys.path:
+ sys.path.insert(0, backend_dir)
+
+from src.core.superset_profile_lookup import SupersetAccountLookupAdapter
+from src.core.utils.network import AuthenticationError, SupersetAPIError
+# [/SECTION]
+
+
+# [DEF:_RecordingNetworkClient:Class]
+# @PURPOSE: Records request payloads and returns scripted responses for deterministic adapter tests.
+class _RecordingNetworkClient:
+ # [DEF:__init__:Function]
+ # @PURPOSE: Initializes scripted network responses.
+ # @PRE: scripted_responses is ordered per expected request sequence.
+ # @POST: Instance stores response script and captures subsequent request calls.
+ def __init__(self, scripted_responses: List[Any]):
+ self._scripted_responses = scripted_responses
+ self.calls: List[Dict[str, Any]] = []
+ # [/DEF:__init__:Function]
+
+ # [DEF:request:Function]
+ # @PURPOSE: Mimics APIClient.request while capturing call arguments.
+ # @PRE: method and endpoint are provided.
+ # @POST: Returns scripted response or raises scripted exception.
+ def request(
+ self,
+ method: str,
+ endpoint: str,
+ params: Optional[Dict[str, Any]] = None,
+ **kwargs,
+ ) -> Dict[str, Any]:
+ self.calls.append(
+ {
+ "method": method,
+ "endpoint": endpoint,
+ "params": params or {},
+ }
+ )
+ index = len(self.calls) - 1
+ response = self._scripted_responses[index]
+ if isinstance(response, Exception):
+ raise response
+ return response
+ # [/DEF:request:Function]
+# [/DEF:_RecordingNetworkClient:Class]
+
+
+# [DEF:test_get_users_page_sends_lowercase_order_direction:Function]
+# @PURPOSE: Ensures adapter sends lowercase order_direction compatible with Superset rison schema.
+# @PRE: Adapter is initialized with recording network client.
+# @POST: First request query payload contains order_direction='asc' for asc sort.
+def test_get_users_page_sends_lowercase_order_direction():
+ client = _RecordingNetworkClient(
+ scripted_responses=[{"result": [{"username": "admin"}], "count": 1}]
+ )
+ adapter = SupersetAccountLookupAdapter(network_client=client, environment_id="ss-dev")
+
+ adapter.get_users_page(
+ search="admin",
+ page_index=0,
+ page_size=20,
+ sort_column="username",
+ sort_order="asc",
+ )
+
+ sent_query = json.loads(client.calls[0]["params"]["q"])
+ assert sent_query["order_direction"] == "asc"
+# [/DEF:test_get_users_page_sends_lowercase_order_direction:Function]
+
+
+# [DEF:test_get_users_page_preserves_primary_schema_error_over_fallback_auth_error:Function]
+# @PURPOSE: Ensures fallback auth error does not mask primary schema/query failure.
+# @PRE: Primary endpoint fails with SupersetAPIError and fallback fails with AuthenticationError.
+# @POST: Raised exception remains primary SupersetAPIError (non-auth) to preserve root cause.
+def test_get_users_page_preserves_primary_schema_error_over_fallback_auth_error():
+ client = _RecordingNetworkClient(
+ scripted_responses=[
+ SupersetAPIError("API Error 400: bad rison schema"),
+ AuthenticationError(),
+ ]
+ )
+ adapter = SupersetAccountLookupAdapter(network_client=client, environment_id="ss-dev")
+
+ with pytest.raises(SupersetAPIError) as exc_info:
+ adapter.get_users_page(sort_order="asc")
+
+ assert "API Error 400" in str(exc_info.value)
+ assert not isinstance(exc_info.value, AuthenticationError)
+# [/DEF:test_get_users_page_preserves_primary_schema_error_over_fallback_auth_error:Function]
+
+
+# [DEF:test_get_users_page_uses_fallback_endpoint_when_primary_fails:Function]
+# @PURPOSE: Verifies adapter retries second users endpoint and succeeds when fallback is healthy.
+# @PRE: Primary endpoint fails; fallback returns valid users payload.
+# @POST: Result status is success and both endpoints were attempted in order.
+def test_get_users_page_uses_fallback_endpoint_when_primary_fails():
+ client = _RecordingNetworkClient(
+ scripted_responses=[
+ SupersetAPIError("Primary endpoint failed"),
+ {"result": [{"username": "admin"}], "count": 1},
+ ]
+ )
+ adapter = SupersetAccountLookupAdapter(network_client=client, environment_id="ss-dev")
+
+ result = adapter.get_users_page()
+
+ assert result["status"] == "success"
+ assert [call["endpoint"] for call in client.calls] == ["/security/users/", "/security/users"]
+# [/DEF:test_get_users_page_uses_fallback_endpoint_when_primary_fails:Function]
+
+
+# [/DEF:backend.src.core.__tests__.test_superset_profile_lookup:Module]
\ No newline at end of file
diff --git a/backend/src/core/auth/repository.py b/backend/src/core/auth/repository.py
index 3cb4e6d..c7bba45 100644
--- a/backend/src/core/auth/repository.py
+++ b/backend/src/core/auth/repository.py
@@ -12,6 +12,7 @@
from typing import Optional, List
from sqlalchemy.orm import Session
from ...models.auth import User, Role, Permission
+from ...models.profile import UserDashboardPreference
from ..logger import belief_scope
# [/SECTION]
@@ -109,6 +110,38 @@ class AuthRepository:
).first()
# [/DEF:get_permission_by_resource_action:Function]
+ # [DEF:get_user_dashboard_preference:Function]
+ # @PURPOSE: Retrieves dashboard preference by owner user ID.
+ # @PRE: user_id is a string.
+ # @POST: Returns UserDashboardPreference if found, else None.
+ # @PARAM: user_id (str) - Preference owner identifier.
+ # @RETURN: Optional[UserDashboardPreference] - Found preference or None.
+ def get_user_dashboard_preference(self, user_id: str) -> Optional[UserDashboardPreference]:
+ with belief_scope("AuthRepository.get_user_dashboard_preference"):
+ return (
+ self.db.query(UserDashboardPreference)
+ .filter(UserDashboardPreference.user_id == user_id)
+ .first()
+ )
+ # [/DEF:get_user_dashboard_preference:Function]
+
+ # [DEF:save_user_dashboard_preference:Function]
+ # @PURPOSE: Persists dashboard preference entity and returns refreshed row.
+ # @PRE: preference is a valid UserDashboardPreference entity.
+ # @POST: Preference is committed and refreshed in database.
+ # @PARAM: preference (UserDashboardPreference) - Preference entity to persist.
+ # @RETURN: UserDashboardPreference - Persisted preference row.
+ def save_user_dashboard_preference(
+ self,
+ preference: UserDashboardPreference,
+ ) -> UserDashboardPreference:
+ with belief_scope("AuthRepository.save_user_dashboard_preference"):
+ self.db.add(preference)
+ self.db.commit()
+ self.db.refresh(preference)
+ return preference
+ # [/DEF:save_user_dashboard_preference:Function]
+
# [DEF:list_permissions:Function]
# @PURPOSE: Lists all available permissions.
# @POST: Returns a list of all Permission objects.
diff --git a/backend/src/core/database.py b/backend/src/core/database.py
index ba0838d..2b7e54d 100644
--- a/backend/src/core/database.py
+++ b/backend/src/core/database.py
@@ -20,6 +20,7 @@ from ..models import auth as _auth_models # noqa: F401
from ..models import config as _config_models # noqa: F401
from ..models import llm as _llm_models # noqa: F401
from ..models import assistant as _assistant_models # noqa: F401
+from ..models import profile as _profile_models # noqa: F401
from .logger import belief_scope
from .auth.config import auth_config
import os
diff --git a/backend/src/core/superset_client.py b/backend/src/core/superset_client.py
index ed32ab9..52b3907 100644
--- a/backend/src/core/superset_client.py
+++ b/backend/src/core/superset_client.py
@@ -159,14 +159,37 @@ class SupersetClient:
# Map fields to DashboardMetadata schema
result = []
- for dash in dashboards:
- owners = self._extract_owner_labels(dash.get("owners"))
+ max_debug_samples = 12
+ for index, dash in enumerate(dashboards):
+ raw_owners = dash.get("owners")
+ raw_created_by = dash.get("created_by")
+ raw_changed_by = dash.get("changed_by")
+ raw_changed_by_name = dash.get("changed_by_name")
+
+ owners = self._extract_owner_labels(raw_owners)
# No per-dashboard detail requests here: keep list endpoint O(1).
if not owners:
owners = self._extract_owner_labels(
- [dash.get("created_by"), dash.get("changed_by")],
+ [raw_created_by, raw_changed_by],
)
+ projected_created_by = self._extract_user_display(
+ None,
+ raw_created_by,
+ )
+ projected_modified_by = self._extract_user_display(
+ raw_changed_by_name,
+ raw_changed_by,
+ )
+
+ raw_owner_usernames: List[str] = []
+ if isinstance(raw_owners, list):
+ for owner_payload in raw_owners:
+ if isinstance(owner_payload, dict):
+ owner_username = self._sanitize_user_text(owner_payload.get("username"))
+ if owner_username:
+ raw_owner_usernames.append(owner_username)
+
result.append({
"id": dash.get("id"),
"slug": dash.get("slug"),
@@ -174,16 +197,26 @@ class SupersetClient:
"url": dash.get("url"),
"last_modified": dash.get("changed_on_utc"),
"status": "published" if dash.get("published") else "draft",
- "created_by": self._extract_user_display(
- None,
- dash.get("created_by"),
- ),
- "modified_by": self._extract_user_display(
- dash.get("changed_by_name"),
- dash.get("changed_by"),
- ),
+ "created_by": projected_created_by,
+ "modified_by": projected_modified_by,
"owners": owners,
})
+
+ if index < max_debug_samples:
+ app_logger.reflect(
+ "[REFLECT] Dashboard actor projection sample "
+ f"(env={getattr(self.env, 'id', None)}, dashboard_id={dash.get('id')}, "
+ f"raw_owners={raw_owners!r}, raw_owner_usernames={raw_owner_usernames!r}, "
+ f"raw_created_by={raw_created_by!r}, raw_changed_by={raw_changed_by!r}, "
+ f"raw_changed_by_name={raw_changed_by_name!r}, projected_owners={owners!r}, "
+ f"projected_created_by={projected_created_by!r}, projected_modified_by={projected_modified_by!r})"
+ )
+
+ app_logger.reflect(
+ "[REFLECT] Dashboard actor projection summary "
+ f"(env={getattr(self.env, 'id', None)}, dashboards={len(result)}, "
+ f"sampled={min(len(result), max_debug_samples)})"
+ )
return result
# [/DEF:get_dashboards_summary:Function]
diff --git a/backend/src/core/superset_profile_lookup.py b/backend/src/core/superset_profile_lookup.py
new file mode 100644
index 0000000..d8a5d36
--- /dev/null
+++ b/backend/src/core/superset_profile_lookup.py
@@ -0,0 +1,238 @@
+# [DEF:backend.src.core.superset_profile_lookup:Module]
+#
+# @TIER: STANDARD
+# @SEMANTICS: superset, users, lookup, profile, pagination, normalization
+# @PURPOSE: Provides environment-scoped Superset account lookup adapter with stable normalized output.
+# @LAYER: Core
+# @RELATION: DEPENDS_ON -> backend.src.core.utils.network.APIClient
+# @RELATION: DEPENDS_ON -> backend.src.core.logger
+#
+# @INVARIANT: Adapter never leaks raw upstream payload shape to API consumers.
+
+# [SECTION: IMPORTS]
+import json
+from typing import Any, Dict, List, Optional
+
+from .logger import logger, belief_scope
+from .utils.network import APIClient, AuthenticationError, SupersetAPIError
+# [/SECTION]
+
+
+# [DEF:SupersetAccountLookupAdapter:Class]
+# @TIER: STANDARD
+# @PURPOSE: Lookup Superset users and normalize candidates for profile binding.
+class SupersetAccountLookupAdapter:
+ # [DEF:__init__:Function]
+ # @PURPOSE: Initializes lookup adapter with authenticated API client and environment context.
+ # @PRE: network_client supports request(method, endpoint, params=...).
+ # @POST: Adapter is ready to perform users lookup requests.
+ def __init__(self, network_client: APIClient, environment_id: str):
+ self.network_client = network_client
+ self.environment_id = str(environment_id or "")
+ # [/DEF:__init__:Function]
+
+ # [DEF:get_users_page:Function]
+ # @PURPOSE: Fetch one users page from Superset with passthrough search/sort parameters.
+ # @PRE: page_index >= 0 and page_size >= 1.
+ # @POST: Returns deterministic payload with normalized items and total count.
+ # @RETURN: Dict[str, Any]
+ def get_users_page(
+ self,
+ search: Optional[str] = None,
+ page_index: int = 0,
+ page_size: int = 20,
+ sort_column: str = "username",
+ sort_order: str = "desc",
+ ) -> Dict[str, Any]:
+ with belief_scope("SupersetAccountLookupAdapter.get_users_page"):
+ normalized_page_index = max(int(page_index), 0)
+ normalized_page_size = max(int(page_size), 1)
+
+ normalized_sort_column = str(sort_column or "username").strip().lower() or "username"
+ normalized_sort_order = str(sort_order or "desc").strip().lower()
+ if normalized_sort_order not in {"asc", "desc"}:
+ normalized_sort_order = "desc"
+
+ query: Dict[str, Any] = {
+ "page": normalized_page_index,
+ "page_size": normalized_page_size,
+ "order_column": normalized_sort_column,
+ "order_direction": normalized_sort_order,
+ }
+
+ normalized_search = str(search or "").strip()
+ if normalized_search:
+ query["filters"] = [{"col": "username", "opr": "ct", "value": normalized_search}]
+
+ logger.reason(
+ "[REASON] Lookup Superset users "
+ f"(env={self.environment_id}, page={normalized_page_index}, page_size={normalized_page_size})"
+ )
+ logger.reflect(
+ "[REFLECT] Prepared Superset users lookup query "
+ f"(env={self.environment_id}, order_column={normalized_sort_column}, "
+ f"normalized_sort_order={normalized_sort_order}, "
+ f"payload_order_direction={query.get('order_direction')})"
+ )
+
+ primary_error: Optional[Exception] = None
+ last_error: Optional[Exception] = None
+ for attempt_index, endpoint in enumerate(("/security/users/", "/security/users"), start=1):
+ try:
+ logger.reason(
+ "[REASON] Users lookup request attempt "
+ f"(env={self.environment_id}, attempt={attempt_index}, endpoint={endpoint})"
+ )
+ response = self.network_client.request(
+ method="GET",
+ endpoint=endpoint,
+ params={"q": json.dumps(query)},
+ )
+ logger.reflect(
+ "[REFLECT] Users lookup endpoint succeeded "
+ f"(env={self.environment_id}, attempt={attempt_index}, endpoint={endpoint})"
+ )
+ return self._normalize_lookup_payload(
+ response=response,
+ page_index=normalized_page_index,
+ page_size=normalized_page_size,
+ )
+ except Exception as exc:
+ if primary_error is None:
+ primary_error = exc
+ last_error = exc
+ cause = getattr(exc, "__cause__", None)
+ cause_response = getattr(cause, "response", None)
+ status_code = getattr(cause_response, "status_code", None)
+ logger.explore(
+ "[EXPLORE] Users lookup endpoint failed "
+ f"(env={self.environment_id}, attempt={attempt_index}, endpoint={endpoint}, "
+ f"error_type={type(exc).__name__}, status_code={status_code}, "
+ f"payload_order_direction={query.get('order_direction')}): {exc}"
+ )
+
+ if last_error is not None:
+ selected_error: Exception = last_error
+ if (
+ primary_error is not None
+ and primary_error is not last_error
+ and isinstance(last_error, AuthenticationError)
+ and not isinstance(primary_error, AuthenticationError)
+ ):
+ selected_error = primary_error
+ logger.reflect(
+ "[REFLECT] Preserving primary lookup failure over fallback auth error "
+ f"(env={self.environment_id}, primary_error_type={type(primary_error).__name__}, "
+ f"fallback_error_type={type(last_error).__name__})"
+ )
+
+ logger.explore(
+ "[EXPLORE] All Superset users lookup endpoints failed "
+ f"(env={self.environment_id}, payload_order_direction={query.get('order_direction')}, "
+ f"selected_error_type={type(selected_error).__name__})"
+ )
+ raise selected_error
+ raise SupersetAPIError("Superset users lookup failed without explicit error")
+ # [/DEF:get_users_page:Function]
+
+ # [DEF:_normalize_lookup_payload:Function]
+ # @PURPOSE: Convert Superset users response variants into stable candidates payload.
+ # @PRE: response can be dict/list in any supported upstream shape.
+ # @POST: Output contains canonical keys: status, environment_id, page_index, page_size, total, items.
+ # @RETURN: Dict[str, Any]
+ def _normalize_lookup_payload(
+ self,
+ response: Any,
+ page_index: int,
+ page_size: int,
+ ) -> Dict[str, Any]:
+ with belief_scope("SupersetAccountLookupAdapter._normalize_lookup_payload"):
+ payload = response
+ if isinstance(payload, dict) and isinstance(payload.get("result"), dict):
+ payload = payload.get("result")
+
+ raw_items: List[Any] = []
+ total = 0
+
+ if isinstance(payload, dict):
+ if isinstance(payload.get("result"), list):
+ raw_items = payload.get("result") or []
+ total = int(payload.get("count", len(raw_items)) or 0)
+ elif isinstance(payload.get("users"), list):
+ raw_items = payload.get("users") or []
+ total = int(payload.get("total", len(raw_items)) or 0)
+ elif isinstance(payload.get("items"), list):
+ raw_items = payload.get("items") or []
+ total = int(payload.get("total", len(raw_items)) or 0)
+ elif isinstance(payload, list):
+ raw_items = payload
+ total = len(raw_items)
+
+ normalized_items: List[Dict[str, Any]] = []
+ seen_usernames = set()
+
+ for raw_user in raw_items:
+ candidate = self.normalize_user_payload(raw_user)
+ username_key = str(candidate.get("username") or "").strip().lower()
+ if not username_key:
+ continue
+ if username_key in seen_usernames:
+ continue
+ seen_usernames.add(username_key)
+ normalized_items.append(candidate)
+
+ logger.reflect(
+ "[REFLECT] Normalized lookup payload "
+ f"(env={self.environment_id}, items={len(normalized_items)}, total={max(total, len(normalized_items))})"
+ )
+
+ return {
+ "status": "success",
+ "environment_id": self.environment_id,
+ "page_index": max(int(page_index), 0),
+ "page_size": max(int(page_size), 1),
+ "total": max(int(total), len(normalized_items)),
+ "items": normalized_items,
+ }
+ # [/DEF:_normalize_lookup_payload:Function]
+
+ # [DEF:normalize_user_payload:Function]
+ # @PURPOSE: Project raw Superset user object to canonical candidate shape.
+ # @PRE: raw_user may have heterogenous key names between Superset versions.
+ # @POST: Returns normalized candidate keys (environment_id, username, display_name, email, is_active).
+ # @RETURN: Dict[str, Any]
+ def normalize_user_payload(self, raw_user: Any) -> Dict[str, Any]:
+ if not isinstance(raw_user, dict):
+ raw_user = {}
+
+ username = str(
+ raw_user.get("username")
+ or raw_user.get("userName")
+ or raw_user.get("name")
+ or ""
+ ).strip()
+
+ full_name = str(raw_user.get("full_name") or "").strip()
+ first_name = str(raw_user.get("first_name") or "").strip()
+ last_name = str(raw_user.get("last_name") or "").strip()
+ display_name = full_name or " ".join(
+ part for part in [first_name, last_name] if part
+ ).strip()
+ if not display_name:
+ display_name = username or None
+
+ email = str(raw_user.get("email") or "").strip() or None
+ is_active_raw = raw_user.get("is_active")
+ is_active = bool(is_active_raw) if is_active_raw is not None else None
+
+ return {
+ "environment_id": self.environment_id,
+ "username": username,
+ "display_name": display_name,
+ "email": email,
+ "is_active": is_active,
+ }
+ # [/DEF:normalize_user_payload:Function]
+# [/DEF:SupersetAccountLookupAdapter:Class]
+
+# [/DEF:backend.src.core.superset_profile_lookup:Module]
\ No newline at end of file
diff --git a/backend/src/models/profile.py b/backend/src/models/profile.py
new file mode 100644
index 0000000..a1ad6cd
--- /dev/null
+++ b/backend/src/models/profile.py
@@ -0,0 +1,46 @@
+# [DEF:backend.src.models.profile:Module]
+#
+# @TIER: STANDARD
+# @SEMANTICS: profile, preferences, persistence, user, dashboard-filter, sqlalchemy
+# @PURPOSE: Defines persistent per-user dashboard filter preferences.
+# @LAYER: Domain
+# @RELATION: DEPENDS_ON -> backend.src.models.auth
+# @RELATION: INHERITS_FROM -> backend.src.models.mapping.Base
+#
+# @INVARIANT: Exactly one preference row exists per user_id.
+
+# [SECTION: IMPORTS]
+import uuid
+from datetime import datetime
+from sqlalchemy import Column, String, Boolean, DateTime, ForeignKey
+from sqlalchemy.orm import relationship
+from .mapping import Base
+# [/SECTION]
+
+
+# [DEF:UserDashboardPreference:Class]
+# @TIER: STANDARD
+# @PURPOSE: Stores Superset username binding and default "my dashboards" toggle for one authenticated user.
+class UserDashboardPreference(Base):
+ __tablename__ = "user_dashboard_preferences"
+
+ id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()))
+ user_id = Column(String, ForeignKey("users.id"), nullable=False, unique=True, index=True)
+
+ superset_username = Column(String, nullable=True)
+ superset_username_normalized = Column(String, nullable=True, index=True)
+
+ show_only_my_dashboards = Column(Boolean, nullable=False, default=False)
+
+ created_at = Column(DateTime, nullable=False, default=datetime.utcnow)
+ updated_at = Column(
+ DateTime,
+ nullable=False,
+ default=datetime.utcnow,
+ onupdate=datetime.utcnow,
+ )
+
+ user = relationship("User")
+# [/DEF:UserDashboardPreference:Class]
+
+# [/DEF:backend.src.models.profile:Module]
\ No newline at end of file
diff --git a/backend/src/schemas/profile.py b/backend/src/schemas/profile.py
new file mode 100644
index 0000000..02f1bc3
--- /dev/null
+++ b/backend/src/schemas/profile.py
@@ -0,0 +1,98 @@
+# [DEF:backend.src.schemas.profile:Module]
+#
+# @TIER: STANDARD
+# @SEMANTICS: profile, schemas, pydantic, preferences, superset, lookup
+# @PURPOSE: Defines API schemas for profile preference persistence and Superset account lookup flows.
+# @LAYER: API
+# @RELATION: DEPENDS_ON -> pydantic
+#
+# @INVARIANT: Schema shapes stay stable for profile UI states and dashboards filter metadata.
+
+# [SECTION: IMPORTS]
+from datetime import datetime
+from typing import List, Literal, Optional
+from pydantic import BaseModel, Field
+# [/SECTION]
+
+
+# [DEF:ProfilePreference:Class]
+# @TIER: STANDARD
+# @PURPOSE: Represents persisted profile preference for a single authenticated user.
+class ProfilePreference(BaseModel):
+ user_id: str
+ superset_username: Optional[str] = None
+ superset_username_normalized: Optional[str] = None
+ show_only_my_dashboards: bool = False
+ created_at: datetime
+ updated_at: datetime
+
+ class Config:
+ from_attributes = True
+# [/DEF:ProfilePreference:Class]
+
+
+# [DEF:ProfilePreferenceUpdateRequest:Class]
+# @TIER: STANDARD
+# @PURPOSE: Request payload for updating current user's dashboard filter preference.
+class ProfilePreferenceUpdateRequest(BaseModel):
+ superset_username: Optional[str] = Field(
+ default=None,
+ description="Apache Superset username bound to current user profile.",
+ )
+ show_only_my_dashboards: bool = Field(
+ default=False,
+ description='When true, "/dashboards" can auto-apply profile filter in main context.',
+ )
+# [/DEF:ProfilePreferenceUpdateRequest:Class]
+
+
+# [DEF:ProfilePreferenceResponse:Class]
+# @TIER: STANDARD
+# @PURPOSE: Response envelope for profile preference read/update endpoints.
+class ProfilePreferenceResponse(BaseModel):
+ status: Literal["success", "error"] = "success"
+ message: Optional[str] = None
+ validation_errors: List[str] = Field(default_factory=list)
+ preference: ProfilePreference
+# [/DEF:ProfilePreferenceResponse:Class]
+
+
+# [DEF:SupersetAccountLookupRequest:Class]
+# @TIER: STANDARD
+# @PURPOSE: Query contract for Superset account lookup by selected environment.
+class SupersetAccountLookupRequest(BaseModel):
+ environment_id: str
+ search: Optional[str] = None
+ page_index: int = Field(default=0, ge=0)
+ page_size: int = Field(default=20, ge=1, le=100)
+ sort_column: str = Field(default="username")
+ sort_order: str = Field(default="desc")
+# [/DEF:SupersetAccountLookupRequest:Class]
+
+
+# [DEF:SupersetAccountCandidate:Class]
+# @TIER: STANDARD
+# @PURPOSE: Canonical account candidate projected from Superset users payload.
+class SupersetAccountCandidate(BaseModel):
+ environment_id: str
+ username: str
+ display_name: Optional[str] = None
+ email: Optional[str] = None
+ is_active: Optional[bool] = None
+# [/DEF:SupersetAccountCandidate:Class]
+
+
+# [DEF:SupersetAccountLookupResponse:Class]
+# @TIER: STANDARD
+# @PURPOSE: Response envelope for Superset account lookup (success or degraded mode).
+class SupersetAccountLookupResponse(BaseModel):
+ status: Literal["success", "degraded"]
+ environment_id: str
+ page_index: int = Field(ge=0)
+ page_size: int = Field(ge=1, le=100)
+ total: int = Field(ge=0)
+ warning: Optional[str] = None
+ items: List[SupersetAccountCandidate] = Field(default_factory=list)
+# [/DEF:SupersetAccountLookupResponse:Class]
+
+# [/DEF:backend.src.schemas.profile:Module]
\ No newline at end of file
diff --git a/backend/src/services/profile_service.py b/backend/src/services/profile_service.py
new file mode 100644
index 0000000..46aba55
--- /dev/null
+++ b/backend/src/services/profile_service.py
@@ -0,0 +1,353 @@
+# [DEF:backend.src.services.profile_service:Module]
+#
+# @TIER: CRITICAL
+# @SEMANTICS: profile, service, validation, ownership, filtering, superset, preferences
+# @PURPOSE: Orchestrates profile preference persistence, Superset account lookup, and deterministic actor matching.
+# @LAYER: Domain
+# @RELATION: DEPENDS_ON -> backend.src.models.profile
+# @RELATION: DEPENDS_ON -> backend.src.schemas.profile
+# @RELATION: DEPENDS_ON -> backend.src.core.superset_client
+# @RELATION: DEPENDS_ON -> backend.src.core.auth.repository
+# @RELATION: DEPENDS_ON -> backend.src.models.auth
+# @RELATION: DEPENDS_ON -> sqlalchemy.orm.Session
+#
+# @INVARIANT: Preference mutations are always scoped to authenticated user identity.
+# @INVARIANT: Username normalization is trim+lower and shared by save and matching paths.
+#
+# @TEST_CONTRACT: ProfilePreferenceUpdateRequest -> ProfilePreferenceResponse
+# @TEST_FIXTURE: valid_profile_update -> {"user_id":"u-1","superset_username":"John_Doe","show_only_my_dashboards":true}
+# @TEST_EDGE: enable_without_username -> toggle=true with empty username returns validation error
+# @TEST_EDGE: cross_user_mutation -> attempt to update another user preference returns forbidden
+# @TEST_EDGE: lookup_env_not_found -> unknown environment_id returns not found
+# @TEST_INVARIANT: normalization_consistency -> VERIFIED_BY: [valid_profile_update, enable_without_username]
+
+# [SECTION: IMPORTS]
+from datetime import datetime
+from typing import Any, Iterable, List, Optional, Sequence
+from sqlalchemy.orm import Session
+
+from ..core.auth.repository import AuthRepository
+from ..core.logger import logger, belief_scope
+from ..core.superset_client import SupersetClient
+from ..core.superset_profile_lookup import SupersetAccountLookupAdapter
+from ..models.auth import User
+from ..models.profile import UserDashboardPreference
+from ..schemas.profile import (
+ ProfilePreference,
+ ProfilePreferenceResponse,
+ ProfilePreferenceUpdateRequest,
+ SupersetAccountLookupRequest,
+ SupersetAccountLookupResponse,
+ SupersetAccountCandidate,
+)
+# [/SECTION]
+
+
+# [DEF:ProfileValidationError:Class]
+# @TIER: STANDARD
+# @PURPOSE: Domain validation error for profile preference update requests.
+class ProfileValidationError(Exception):
+ def __init__(self, errors: Sequence[str]):
+ self.errors = list(errors)
+ super().__init__("Profile preference validation failed")
+# [/DEF:ProfileValidationError:Class]
+
+
+# [DEF:EnvironmentNotFoundError:Class]
+# @TIER: STANDARD
+# @PURPOSE: Raised when environment_id from lookup request is unknown in app configuration.
+class EnvironmentNotFoundError(Exception):
+ pass
+# [/DEF:EnvironmentNotFoundError:Class]
+
+
+# [DEF:ProfileAuthorizationError:Class]
+# @TIER: STANDARD
+# @PURPOSE: Raised when caller attempts cross-user preference mutation.
+class ProfileAuthorizationError(Exception):
+ pass
+# [/DEF:ProfileAuthorizationError:Class]
+
+
+# [DEF:ProfileService:Class]
+# @TIER: CRITICAL
+# @PURPOSE: Implements profile preference read/update flow and Superset account lookup degradation strategy.
+class ProfileService:
+ # [DEF:__init__:Function]
+ # @PURPOSE: Initialize service with DB session and config manager.
+ # @PRE: db session is active and config_manager supports get_environments().
+ # @POST: Service is ready for preference persistence and lookup operations.
+ def __init__(self, db: Session, config_manager: Any):
+ self.db = db
+ self.config_manager = config_manager
+ self.auth_repository = AuthRepository(db)
+ # [/DEF:__init__:Function]
+
+ # [DEF:get_my_preference:Function]
+ # @PURPOSE: Return current user's persisted preference or default non-configured view.
+ # @PRE: current_user is authenticated.
+ # @POST: Returned payload belongs to current_user only.
+ def get_my_preference(self, current_user: User) -> ProfilePreferenceResponse:
+ with belief_scope("ProfileService.get_my_preference", f"user_id={current_user.id}"):
+ logger.reflect("[REFLECT] Loading current user's dashboard preference")
+ preference = self._get_preference_row(current_user.id)
+ if preference is None:
+ return ProfilePreferenceResponse(
+ status="success",
+ message="Preference not configured yet",
+ preference=self._build_default_preference(current_user.id),
+ )
+ return ProfilePreferenceResponse(
+ status="success",
+ message="Preference loaded",
+ preference=ProfilePreference.model_validate(preference, from_attributes=True),
+ )
+ # [/DEF:get_my_preference:Function]
+
+ # [DEF:update_my_preference:Function]
+ # @PURPOSE: Validate and persist current user's profile preference in self-scoped mode.
+ # @PRE: current_user is authenticated and payload is provided.
+ # @POST: Preference row for current_user is created/updated when validation passes.
+ def update_my_preference(
+ self,
+ current_user: User,
+ payload: ProfilePreferenceUpdateRequest,
+ target_user_id: Optional[str] = None,
+ ) -> ProfilePreferenceResponse:
+ with belief_scope("ProfileService.update_my_preference", f"user_id={current_user.id}"):
+ logger.reason("[REASON] Evaluating self-scope guard before preference mutation")
+ requested_user_id = str(target_user_id or current_user.id)
+ if requested_user_id != str(current_user.id):
+ logger.explore("[EXPLORE] Cross-user mutation attempt blocked")
+ raise ProfileAuthorizationError("Cross-user preference mutation is forbidden")
+
+ validation_errors = self._validate_update_payload(payload)
+ if validation_errors:
+ logger.reflect("[REFLECT] Validation failed; mutation is denied")
+ raise ProfileValidationError(validation_errors)
+
+ normalized_username = self._normalize_username(payload.superset_username)
+ raw_username = self._sanitize_username(payload.superset_username)
+
+ preference = self._get_or_create_preference_row(current_user.id)
+ preference.superset_username = raw_username
+ preference.superset_username_normalized = normalized_username
+ preference.show_only_my_dashboards = bool(payload.show_only_my_dashboards)
+ preference.updated_at = datetime.utcnow()
+
+ self.auth_repository.save_user_dashboard_preference(preference)
+
+ logger.reason("[REASON] Preference persisted successfully")
+ return ProfilePreferenceResponse(
+ status="success",
+ message="Preference saved",
+ preference=ProfilePreference.model_validate(preference, from_attributes=True),
+ )
+ # [/DEF:update_my_preference:Function]
+
+ # [DEF:lookup_superset_accounts:Function]
+ # @PURPOSE: Query Superset users in selected environment and project canonical account candidates.
+ # @PRE: current_user is authenticated and environment_id exists.
+ # @POST: Returns success payload or degraded payload with warning while preserving manual fallback.
+ def lookup_superset_accounts(
+ self,
+ current_user: User,
+ request: SupersetAccountLookupRequest,
+ ) -> SupersetAccountLookupResponse:
+ with belief_scope(
+ "ProfileService.lookup_superset_accounts",
+ f"user_id={current_user.id}, environment_id={request.environment_id}",
+ ):
+ environment = self._resolve_environment(request.environment_id)
+ if environment is None:
+ logger.explore("[EXPLORE] Lookup aborted: environment not found")
+ raise EnvironmentNotFoundError(f"Environment '{request.environment_id}' not found")
+
+ sort_column = str(request.sort_column or "username").strip().lower()
+ sort_order = str(request.sort_order or "desc").strip().lower()
+ allowed_columns = {"username", "first_name", "last_name", "email"}
+ if sort_column not in allowed_columns:
+ sort_column = "username"
+ if sort_order not in {"asc", "desc"}:
+ sort_order = "desc"
+
+ logger.reflect(
+ "[REFLECT] Normalized lookup request "
+ f"(env={request.environment_id}, sort_column={sort_column}, sort_order={sort_order}, "
+ f"page_index={request.page_index}, page_size={request.page_size}, "
+ f"search={(request.search or '').strip()!r})"
+ )
+
+ try:
+ logger.reason("[REASON] Performing Superset account lookup")
+ superset_client = SupersetClient(environment)
+ adapter = SupersetAccountLookupAdapter(
+ network_client=superset_client.network,
+ environment_id=request.environment_id,
+ )
+ lookup_result = adapter.get_users_page(
+ search=request.search,
+ page_index=request.page_index,
+ page_size=request.page_size,
+ sort_column=sort_column,
+ sort_order=sort_order,
+ )
+ items = [
+ SupersetAccountCandidate.model_validate(item)
+ for item in lookup_result.get("items", [])
+ ]
+ return SupersetAccountLookupResponse(
+ status="success",
+ environment_id=request.environment_id,
+ page_index=request.page_index,
+ page_size=request.page_size,
+ total=max(int(lookup_result.get("total", len(items))), 0),
+ warning=None,
+ items=items,
+ )
+ except Exception as exc:
+ logger.explore(f"[EXPLORE] Lookup degraded due to upstream error: {exc}")
+ return SupersetAccountLookupResponse(
+ status="degraded",
+ environment_id=request.environment_id,
+ page_index=request.page_index,
+ page_size=request.page_size,
+ total=0,
+ warning=(
+ "Cannot load Superset accounts for this environment right now. "
+ "You can enter username manually."
+ ),
+ items=[],
+ )
+ # [/DEF:lookup_superset_accounts:Function]
+
+ # [DEF:matches_dashboard_actor:Function]
+ # @PURPOSE: Apply trim+case-insensitive actor match across owners OR modified_by.
+ # @PRE: bound_username can be empty; owners may contain mixed payload.
+ # @POST: Returns True when normalized username matches owners or modified_by.
+ def matches_dashboard_actor(
+ self,
+ bound_username: Optional[str],
+ owners: Optional[Iterable[Any]],
+ modified_by: Optional[str],
+ ) -> bool:
+ normalized_actor = self._normalize_username(bound_username)
+ if not normalized_actor:
+ return False
+
+ owner_tokens = self._normalize_owner_tokens(owners)
+ modified_token = self._normalize_username(modified_by)
+
+ if normalized_actor in owner_tokens:
+ return True
+ if modified_token and normalized_actor == modified_token:
+ return True
+ return False
+ # [/DEF:matches_dashboard_actor:Function]
+
+ # [DEF:_resolve_environment:Function]
+ # @PURPOSE: Resolve environment model from configured environments by id.
+ # @PRE: environment_id is provided.
+ # @POST: Returns environment object when found else None.
+ def _resolve_environment(self, environment_id: str):
+ environments = self.config_manager.get_environments()
+ for env in environments:
+ if str(getattr(env, "id", "")) == str(environment_id):
+ return env
+ return None
+ # [/DEF:_resolve_environment:Function]
+
+ # [DEF:_get_preference_row:Function]
+ # @PURPOSE: Return persisted preference row for user or None.
+ # @PRE: user_id is provided.
+ # @POST: Returns matching row or None.
+ def _get_preference_row(self, user_id: str) -> Optional[UserDashboardPreference]:
+ return self.auth_repository.get_user_dashboard_preference(str(user_id))
+ # [/DEF:_get_preference_row:Function]
+
+ # [DEF:_get_or_create_preference_row:Function]
+ # @PURPOSE: Return existing preference row or create new unsaved row.
+ # @PRE: user_id is provided.
+ # @POST: Returned row always contains user_id.
+ def _get_or_create_preference_row(self, user_id: str) -> UserDashboardPreference:
+ existing = self._get_preference_row(user_id)
+ if existing is not None:
+ return existing
+ return UserDashboardPreference(user_id=str(user_id))
+ # [/DEF:_get_or_create_preference_row:Function]
+
+ # [DEF:_build_default_preference:Function]
+ # @PURPOSE: Build non-persisted default preference DTO for unconfigured users.
+ # @PRE: user_id is provided.
+ # @POST: Returns ProfilePreference with disabled toggle and empty username.
+ def _build_default_preference(self, user_id: str) -> ProfilePreference:
+ now = datetime.utcnow()
+ return ProfilePreference(
+ user_id=str(user_id),
+ superset_username=None,
+ superset_username_normalized=None,
+ show_only_my_dashboards=False,
+ created_at=now,
+ updated_at=now,
+ )
+ # [/DEF:_build_default_preference:Function]
+
+ # [DEF:_validate_update_payload:Function]
+ # @PURPOSE: Validate username/toggle constraints for preference mutation.
+ # @PRE: payload is provided.
+ # @POST: Returns validation errors list; empty list means valid.
+ def _validate_update_payload(
+ self,
+ payload: ProfilePreferenceUpdateRequest,
+ ) -> List[str]:
+ errors: List[str] = []
+ sanitized_username = self._sanitize_username(payload.superset_username)
+
+ if sanitized_username and any(ch.isspace() for ch in sanitized_username):
+ errors.append(
+ "Username should not contain spaces. Please enter a valid Apache Superset username."
+ )
+ if payload.show_only_my_dashboards and not sanitized_username:
+ errors.append("Superset username is required when default filter is enabled.")
+ return errors
+ # [/DEF:_validate_update_payload:Function]
+
+ # [DEF:_sanitize_username:Function]
+ # @PURPOSE: Normalize raw username into trimmed form or None for empty input.
+ # @PRE: value can be empty or None.
+ # @POST: Returns trimmed username or None.
+ def _sanitize_username(self, value: Optional[str]) -> Optional[str]:
+ normalized = str(value or "").strip()
+ if not normalized:
+ return None
+ return normalized
+ # [/DEF:_sanitize_username:Function]
+
+ # [DEF:_normalize_username:Function]
+ # @PURPOSE: Apply deterministic trim+lower normalization for actor matching.
+ # @PRE: value can be empty or None.
+ # @POST: Returns lowercase normalized token or None.
+ def _normalize_username(self, value: Optional[str]) -> Optional[str]:
+ sanitized = self._sanitize_username(value)
+ if sanitized is None:
+ return None
+ return sanitized.lower()
+ # [/DEF:_normalize_username:Function]
+
+ # [DEF:_normalize_owner_tokens:Function]
+ # @PURPOSE: Normalize owners payload into deduplicated lower-cased tokens.
+ # @PRE: owners can be iterable of scalars or dict-like values.
+ # @POST: Returns list of unique normalized owner tokens.
+ def _normalize_owner_tokens(self, owners: Optional[Iterable[Any]]) -> List[str]:
+ if owners is None:
+ return []
+ normalized: List[str] = []
+ for owner in owners:
+ token = self._normalize_username(str(owner or ""))
+ if token and token not in normalized:
+ normalized.append(token)
+ return normalized
+ # [/DEF:_normalize_owner_tokens:Function]
+# [/DEF:ProfileService:Class]
+
+# [/DEF:backend.src.services.profile_service:Module]
\ No newline at end of file
diff --git a/backend/tests/fixtures/profile/fixtures_profile_filter.json b/backend/tests/fixtures/profile/fixtures_profile_filter.json
new file mode 100644
index 0000000..a7476e4
--- /dev/null
+++ b/backend/tests/fixtures/profile/fixtures_profile_filter.json
@@ -0,0 +1,26 @@
+{
+ "valid_profile_update": {
+ "user_id": "u-1",
+ "superset_username": "John_Doe",
+ "show_only_my_dashboards": true
+ },
+ "get_my_preference_ok": {
+ "auth_user": "u-1",
+ "expected_status": 200
+ },
+ "profile_filter_applied": {
+ "apply_profile_default": true,
+ "override_show_all": false
+ },
+ "enable_without_username": {
+ "superset_username": "",
+ "show_only_my_dashboards": true
+ },
+ "cross_user_mutation": {
+ "auth_user": "u-1",
+ "target_user": "u-2"
+ },
+ "lookup_env_not_found": {
+ "environment_id": "missing-env"
+ }
+}
\ No newline at end of file
diff --git a/frontend/src/components/auth/ProtectedRoute.svelte b/frontend/src/components/auth/ProtectedRoute.svelte
index 7cffd8a..0e67365 100644
--- a/frontend/src/components/auth/ProtectedRoute.svelte
+++ b/frontend/src/components/auth/ProtectedRoute.svelte
@@ -1,50 +1,97 @@
-{#if $auth.loading}
+{#if $auth.loading || isCheckingAccess}
-{:else if $auth.isAuthenticated}
+{:else if $auth.isAuthenticated && hasRouteAccess}
{/if}
diff --git a/frontend/src/lib/api.js b/frontend/src/lib/api.js
index e4ca703..4c082e2 100755
--- a/frontend/src/lib/api.js
+++ b/frontend/src/lib/api.js
@@ -245,6 +245,23 @@ export const api = {
},
createTask: (pluginId, params) => postApi('/tasks', { plugin_id: pluginId, params }),
+ // Profile
+ getProfilePreferences: () => fetchApi('/profile/preferences'),
+ updateProfilePreferences: (payload) => requestApi('/profile/preferences', 'PATCH', payload),
+ lookupSupersetAccounts: (environmentId, options = {}) => {
+ const normalizedEnvironmentId = String(environmentId || '').trim();
+ if (!normalizedEnvironmentId) {
+ throw new Error('environmentId is required for Superset account lookup');
+ }
+ const params = new URLSearchParams({ environment_id: normalizedEnvironmentId });
+ if (options.search) params.append('search', options.search);
+ if (options.page_index != null) params.append('page_index', String(options.page_index));
+ if (options.page_size != null) params.append('page_size', String(options.page_size));
+ if (options.sort_column) params.append('sort_column', options.sort_column);
+ if (options.sort_order) params.append('sort_order', options.sort_order);
+ return fetchApi(`/profile/superset-accounts?${params.toString()}`);
+ },
+
// Settings
getSettings: () => fetchApi('/settings'),
updateGlobalSettings: (settings) => requestApi('/settings/global', 'PATCH', settings),
@@ -268,6 +285,13 @@ export const api = {
if (options.search) params.append('search', options.search);
if (options.page) params.append('page', options.page);
if (options.page_size) params.append('page_size', options.page_size);
+ if (options.page_context) params.append('page_context', options.page_context);
+ if (options.apply_profile_default != null) {
+ params.append('apply_profile_default', String(Boolean(options.apply_profile_default)));
+ }
+ if (options.override_show_all != null) {
+ params.append('override_show_all', String(Boolean(options.override_show_all)));
+ }
if (options.filters?.title) {
for (const value of options.filters.title) params.append('filter_title', value);
}
@@ -330,6 +354,9 @@ export const getPlugins = api.getPlugins;
export const getTasks = api.getTasks;
export const getTask = api.getTask;
export const createTask = api.createTask;
+export const getProfilePreferences = api.getProfilePreferences;
+export const updateProfilePreferences = api.updateProfilePreferences;
+export const lookupSupersetAccounts = api.lookupSupersetAccounts;
export const getSettings = api.getSettings;
export const updateGlobalSettings = api.updateGlobalSettings;
export const getEnvironments = api.getEnvironments;
diff --git a/frontend/src/lib/auth/__tests__/permissions.test.js b/frontend/src/lib/auth/__tests__/permissions.test.js
new file mode 100644
index 0000000..5cf2e8e
--- /dev/null
+++ b/frontend/src/lib/auth/__tests__/permissions.test.js
@@ -0,0 +1,103 @@
+// [DEF:frontend.src.lib.auth.__tests__.permissions:Module]
+// @TIER: STANDARD
+// @SEMANTICS: tests, auth, permissions, rbac
+// @PURPOSE: Verifies frontend RBAC permission parsing and access checks.
+// @LAYER: UI (Tests)
+// @RELATION: TESTS -> frontend/src/lib/auth/permissions.js
+
+import { describe, it, expect } from "vitest";
+import {
+ normalizePermissionRequirement,
+ isAdminUser,
+ hasPermission,
+} from "../permissions.js";
+
+describe("auth.permissions", () => {
+ it("normalizes resource-only requirement with default READ action", () => {
+ expect(normalizePermissionRequirement("admin:settings")).toEqual({
+ resource: "admin:settings",
+ action: "READ",
+ });
+ });
+
+ it("normalizes explicit resource:action requirement", () => {
+ expect(normalizePermissionRequirement("admin:settings:write")).toEqual({
+ resource: "admin:settings",
+ action: "WRITE",
+ });
+ });
+
+ it("detects admin role case-insensitively", () => {
+ const user = {
+ roles: [{ name: "ADMIN" }],
+ };
+ expect(isAdminUser(user)).toBe(true);
+ });
+
+ it("denies when user is absent and permission is required", () => {
+ expect(hasPermission(null, "tasks", "READ")).toBe(false);
+ });
+
+ it("grants when permission object matches resource and action", () => {
+ const user = {
+ roles: [
+ {
+ name: "Operator",
+ permissions: [{ resource: "tasks", action: "READ" }],
+ },
+ ],
+ };
+
+ expect(hasPermission(user, "tasks", "READ")).toBe(true);
+ });
+
+ it("grants when requirement is provided as resource:action", () => {
+ const user = {
+ roles: [
+ {
+ name: "Operator",
+ permissions: [{ resource: "admin:settings", action: "READ" }],
+ },
+ ],
+ };
+
+ expect(hasPermission(user, "admin:settings:READ")).toBe(true);
+ });
+
+ it("grants when string permission entry matches", () => {
+ const user = {
+ roles: [
+ {
+ name: "Operator",
+ permissions: ["plugin:migration:READ"],
+ },
+ ],
+ };
+
+ expect(hasPermission(user, "plugin:migration", "READ")).toBe(true);
+ });
+
+ it("denies when action does not match", () => {
+ const user = {
+ roles: [
+ {
+ name: "Operator",
+ permissions: [{ resource: "tasks", action: "READ" }],
+ },
+ ],
+ };
+
+ expect(hasPermission(user, "tasks", "WRITE")).toBe(false);
+ });
+
+ it("always grants for admin role regardless of explicit permissions", () => {
+ const adminUser = {
+ roles: [{ name: "Admin", permissions: [] }],
+ };
+
+ expect(hasPermission(adminUser, "admin:users", "READ")).toBe(true);
+ expect(hasPermission(adminUser, "plugin:migration", "EXECUTE")).toBe(true);
+ });
+});
+
+// [/DEF:frontend.src.lib.auth.__tests__.permissions:Module]
\ No newline at end of file
diff --git a/frontend/src/lib/auth/permissions.js b/frontend/src/lib/auth/permissions.js
new file mode 100644
index 0000000..3895a9a
--- /dev/null
+++ b/frontend/src/lib/auth/permissions.js
@@ -0,0 +1,103 @@
+// [DEF:frontend.src.lib.auth.permissions:Module]
+// @TIER: STANDARD
+// @SEMANTICS: auth, permissions, rbac, roles
+// @PURPOSE: Shared frontend RBAC utilities for route guards and menu visibility.
+// @LAYER: Domain
+// @RELATION: USED_BY -> frontend.src.components.auth.ProtectedRoute
+// @RELATION: USED_BY -> frontend.src.lib.components.layout.Sidebar
+// @INVARIANT: Admin role always bypasses explicit permission checks.
+
+const KNOWN_ACTIONS = new Set(["READ", "WRITE", "EXECUTE", "DELETE"]);
+
+function normalizeAction(action, fallback = "READ") {
+ const normalized = String(action || "").trim().toUpperCase();
+ if (!normalized) return fallback;
+ return normalized;
+}
+
+// [DEF:normalizePermissionRequirement:Function]
+// @PURPOSE: Convert permission requirement string to canonical resource/action tuple.
+// @PRE: Permission can be "resource" or "resource:ACTION" where resource itself may contain ":".
+// @POST: Returns normalized object with action in uppercase.
+export function normalizePermissionRequirement(permission, defaultAction = "READ") {
+ const fallbackAction = normalizeAction(defaultAction, "READ");
+ const rawPermission = String(permission || "").trim();
+
+ if (!rawPermission) {
+ return { resource: null, action: fallbackAction };
+ }
+
+ const parts = rawPermission.split(":");
+ if (parts.length > 1) {
+ const tail = normalizeAction(parts[parts.length - 1], fallbackAction);
+ if (KNOWN_ACTIONS.has(tail)) {
+ const resource = parts.slice(0, -1).join(":");
+ return { resource, action: tail };
+ }
+ }
+
+ return { resource: rawPermission, action: fallbackAction };
+}
+// [/DEF:normalizePermissionRequirement:Function]
+
+// [DEF:isAdminUser:Function]
+// @PURPOSE: Determine whether user has Admin role.
+// @PRE: user can be null or partially populated.
+// @POST: Returns true when at least one role name equals "Admin" (case-insensitive).
+export function isAdminUser(user) {
+ const roles = Array.isArray(user?.roles) ? user.roles : [];
+ return roles.some(
+ (role) => String(role?.name || "").trim().toLowerCase() === "admin",
+ );
+}
+// [/DEF:isAdminUser:Function]
+
+// [DEF:hasPermission:Function]
+// @PURPOSE: Check if user has a required resource/action permission.
+// @PRE: user contains roles with permissions from /api/auth/me payload.
+// @POST: Returns true when requirement is empty, user is admin, or matching permission exists.
+export function hasPermission(user, requirement, action = "READ") {
+ if (!requirement) return true;
+ if (!user) return false;
+ if (isAdminUser(user)) return true;
+
+ const { resource, action: requiredAction } = normalizePermissionRequirement(
+ requirement,
+ action,
+ );
+ if (!resource) return true;
+
+ const roles = Array.isArray(user.roles) ? user.roles : [];
+ for (const role of roles) {
+ const permissions = Array.isArray(role?.permissions) ? role.permissions : [];
+ for (const permission of permissions) {
+ if (typeof permission === "string") {
+ const normalized = normalizePermissionRequirement(
+ permission,
+ requiredAction,
+ );
+ if (
+ normalized.resource === resource &&
+ normalized.action === requiredAction
+ ) {
+ return true;
+ }
+ continue;
+ }
+
+ const permissionResource = String(permission?.resource || "").trim();
+ const permissionAction = normalizeAction(permission?.action, "");
+ if (
+ permissionResource === resource &&
+ permissionAction === requiredAction
+ ) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+// [/DEF:hasPermission:Function]
+
+// [/DEF:frontend.src.lib.auth.permissions:Module]
\ No newline at end of file
diff --git a/frontend/src/lib/components/layout/Sidebar.svelte b/frontend/src/lib/components/layout/Sidebar.svelte
index bd8c09b..f88da5b 100644
--- a/frontend/src/lib/components/layout/Sidebar.svelte
+++ b/frontend/src/lib/components/layout/Sidebar.svelte
@@ -28,7 +28,6 @@
* @TEST_INVARIANT navigation -> verifies: [idle_state]
*/
- import { onMount } from "svelte";
import { page } from "$app/stores";
import {
sidebarStore,
@@ -37,65 +36,12 @@
closeMobile,
} from "$lib/stores/sidebar.js";
import { t } from "$lib/i18n";
+ import { auth } from "$lib/auth/store.js";
+ import { buildSidebarCategories } from "$lib/components/layout/sidebarNavigation.js";
import { browser } from "$app/environment";
import Icon from "$lib/ui/Icon.svelte";
- function buildCategories() {
- return [
- {
- id: "dashboards",
- label: $t.nav?.dashboards,
- icon: "dashboard",
- tone: "from-sky-100 to-sky-200 text-sky-700 ring-sky-200",
- path: "/dashboards",
- subItems: [{ label: $t.nav?.overview, path: "/dashboards" }],
- },
- {
- id: "datasets",
- label: $t.nav?.datasets,
- icon: "database",
- tone: "from-emerald-100 to-emerald-200 text-emerald-700 ring-emerald-200",
- path: "/datasets",
- subItems: [{ label: $t.nav?.all_datasets, path: "/datasets" }],
- },
- {
- id: "storage",
- label: $t.nav?.storage,
- icon: "storage",
- tone: "from-amber-100 to-amber-200 text-amber-800 ring-amber-200",
- path: "/storage",
- subItems: [
- { label: $t.nav?.backups, path: "/storage/backups" },
- {
- label: $t.nav?.repositories,
- path: "/storage/repos",
- },
- ],
- },
- {
- id: "reports",
- label: $t.nav?.reports,
- icon: "reports",
- tone: "from-violet-100 to-fuchsia-100 text-violet-700 ring-violet-200",
- path: "/reports",
- subItems: [{ label: $t.nav?.reports, path: "/reports" }],
- },
- {
- id: "admin",
- label: $t.nav?.admin,
- icon: "admin",
- tone: "from-rose-100 to-rose-200 text-rose-700 ring-rose-200",
- path: "/admin",
- subItems: [
- { label: $t.nav?.admin_users, path: "/admin/users" },
- { label: $t.nav?.admin_roles, path: "/admin/roles" },
- { label: $t.nav?.settings, path: "/settings" },
- ],
- },
- ];
- }
-
- let categories = buildCategories();
+ let categories = [];
let isExpanded = true;
let activeCategory = "dashboards";
@@ -111,14 +57,34 @@
isMobileOpen = $sidebarStore.isMobileOpen;
}
- // Reactive categories to update translations
- $: categories = buildCategories();
+ // Reactive categories to update translations and apply RBAC visibility.
+ $: categories = buildSidebarCategories($t, $auth?.user || null);
+
+ // Keep active category valid after RBAC filtering.
+ $: if (
+ categories.length > 0 &&
+ !categories.some((category) => category.id === activeCategory)
+ ) {
+ const fallbackCategory = categories[0];
+ const fallbackPath =
+ fallbackCategory.subItems?.[0]?.path || fallbackCategory.path;
+ setActiveItem(fallbackCategory.id, fallbackPath);
+ }
+
+ // Keep active category expanded after route/user change.
+ $: if (activeCategory && !expandedCategories.has(activeCategory)) {
+ expandedCategories.add(activeCategory);
+ expandedCategories = expandedCategories;
+ }
// Update active item when page changes
$: if ($page && $page.url.pathname !== activeItem) {
- const matched = categories.find((cat) =>
- $page.url.pathname.startsWith(cat.path),
- );
+ const matched = categories.find((cat) => {
+ if ($page.url.pathname.startsWith(cat.path)) return true;
+ return (cat.subItems || []).some((item) =>
+ $page.url.pathname.startsWith(item.path),
+ );
+ });
if (matched) {
activeCategory = matched.id;
activeItem = $page.url.pathname;
@@ -255,7 +221,7 @@
{
- await initializeEnvironmentContext();
+ onMount(() => {
+ void initializeEnvironmentContext();
if (typeof document !== "undefined") {
document.addEventListener("click", handleDocumentClick);
}
@@ -508,19 +510,21 @@
{user?.username || $t.common?.user}
- {
- window.location.href = "/settings";
- }}
- on:keydown={(e) =>
- (e.key === "Enter" || e.key === " ") &&
- (window.location.href = "/settings")}
- role="button"
- tabindex="0"
- >
- {$t.nav?.settings}
-
+ {#if canOpenSettings}
+ {
+ window.location.href = "/settings";
+ }}
+ on:keydown={(e) =>
+ (e.key === "Enter" || e.key === " ") &&
+ (window.location.href = "/settings")}
+ role="button"
+ tabindex="0"
+ >
+ {$t.nav?.settings}
+
+ {/if}
frontend/src/lib/components/layout/sidebarNavigation.js
+
+import { describe, it, expect } from "vitest";
+import { buildSidebarCategories } from "../sidebarNavigation.js";
+
+const i18nState = {
+ nav: {
+ dashboards: "Dashboards",
+ overview: "Overview",
+ datasets: "Datasets",
+ all_datasets: "All datasets",
+ storage: "Storage",
+ backups: "Backups",
+ repositories: "Repositories",
+ reports: "Reports",
+ profile: "Profile",
+ admin: "Admin",
+ admin_users: "User management",
+ admin_roles: "Role management",
+ settings: "Settings",
+ },
+};
+
+function makeUser(roles) {
+ return { roles };
+}
+
+describe("sidebarNavigation", () => {
+ it("shows only categories available to a non-admin user", () => {
+ const user = makeUser([
+ {
+ name: "Operator",
+ permissions: [
+ { resource: "plugin:migration", action: "READ" },
+ { resource: "tasks", action: "READ" },
+ ],
+ },
+ ]);
+
+ const categories = buildSidebarCategories(i18nState, user);
+ const categoryIds = categories.map((category) => category.id);
+
+ expect(categoryIds).toEqual(["dashboards", "datasets", "reports", "profile"]);
+ });
+
+ it("hides admin category when user has no admin permissions", () => {
+ const user = makeUser([
+ {
+ name: "Viewer",
+ permissions: [{ resource: "plugin:migration", action: "READ" }],
+ },
+ ]);
+
+ const categories = buildSidebarCategories(i18nState, user);
+ const adminCategory = categories.find((category) => category.id === "admin");
+
+ expect(adminCategory).toBeUndefined();
+ });
+
+ it("shows full admin category for admin role", () => {
+ const user = makeUser([
+ {
+ name: "Admin",
+ permissions: [],
+ },
+ ]);
+
+ const categories = buildSidebarCategories(i18nState, user);
+ const adminCategory = categories.find((category) => category.id === "admin");
+
+ expect(adminCategory).toBeDefined();
+ expect(adminCategory.subItems.map((item) => item.path)).toEqual([
+ "/admin/users",
+ "/admin/roles",
+ "/settings",
+ ]);
+ });
+
+ it("keeps profile visible even without explicit plugin permissions", () => {
+ const user = makeUser([
+ {
+ name: "Basic",
+ permissions: [],
+ },
+ ]);
+
+ const categories = buildSidebarCategories(i18nState, user);
+ const categoryIds = categories.map((category) => category.id);
+
+ expect(categoryIds).toEqual(["profile"]);
+ });
+});
+
+// [/DEF:frontend.src.lib.components.layout.__tests__.sidebarNavigation:Module]
\ No newline at end of file
diff --git a/frontend/src/lib/components/layout/sidebarNavigation.js b/frontend/src/lib/components/layout/sidebarNavigation.js
new file mode 100644
index 0000000..4d8064a
--- /dev/null
+++ b/frontend/src/lib/components/layout/sidebarNavigation.js
@@ -0,0 +1,166 @@
+// [DEF:frontend.src.lib.components.layout.sidebarNavigation:Module]
+// @TIER: STANDARD
+// @SEMANTICS: navigation, sidebar, rbac, menu, filtering
+// @PURPOSE: Build sidebar navigation categories filtered by current user permissions.
+// @LAYER: UI
+// @RELATION: DEPENDS_ON -> frontend.src.lib.auth.permissions.hasPermission
+// @RELATION: USED_BY -> frontend.src.lib.components.layout.Sidebar
+// @INVARIANT: Admin role can access all categories and subitems through permission utility.
+
+import { hasPermission } from "$lib/auth/permissions.js";
+
+// [DEF:isItemAllowed:Function]
+// @PURPOSE: Check whether a single menu node can be shown for a given user.
+// @PRE: item can contain optional requiredPermission/requiredAction.
+// @POST: Returns true when no permission is required or permission check passes.
+function isItemAllowed(user, item) {
+ if (!item?.requiredPermission) return true;
+ return hasPermission(
+ user,
+ item.requiredPermission,
+ item.requiredAction || "READ",
+ );
+}
+// [/DEF:isItemAllowed:Function]
+
+// [DEF:buildSidebarCategories:Function]
+// @PURPOSE: Build translated sidebar categories and filter them by RBAC permissions.
+// @PRE: i18nState provides nav labels; user can be null.
+// @POST: Returns only categories/subitems available for provided user.
+export function buildSidebarCategories(i18nState, user) {
+ const nav = i18nState?.nav || {};
+
+ const categories = [
+ {
+ id: "dashboards",
+ label: nav.dashboards,
+ icon: "dashboard",
+ tone: "from-sky-100 to-sky-200 text-sky-700 ring-sky-200",
+ path: "/dashboards",
+ requiredPermission: "plugin:migration",
+ requiredAction: "READ",
+ subItems: [
+ {
+ label: nav.overview,
+ path: "/dashboards",
+ requiredPermission: "plugin:migration",
+ requiredAction: "READ",
+ },
+ ],
+ },
+ {
+ id: "datasets",
+ label: nav.datasets,
+ icon: "database",
+ tone: "from-emerald-100 to-emerald-200 text-emerald-700 ring-emerald-200",
+ path: "/datasets",
+ requiredPermission: "plugin:migration",
+ requiredAction: "READ",
+ subItems: [
+ {
+ label: nav.all_datasets,
+ path: "/datasets",
+ requiredPermission: "plugin:migration",
+ requiredAction: "READ",
+ },
+ ],
+ },
+ {
+ id: "storage",
+ label: nav.storage,
+ icon: "storage",
+ tone: "from-amber-100 to-amber-200 text-amber-800 ring-amber-200",
+ path: "/storage",
+ requiredPermission: "plugin:storage",
+ requiredAction: "READ",
+ subItems: [
+ {
+ label: nav.backups,
+ path: "/storage/backups",
+ requiredPermission: "plugin:storage",
+ requiredAction: "READ",
+ },
+ {
+ label: nav.repositories,
+ path: "/storage/repos",
+ requiredPermission: "plugin:storage",
+ requiredAction: "READ",
+ },
+ ],
+ },
+ {
+ id: "reports",
+ label: nav.reports,
+ icon: "reports",
+ tone: "from-violet-100 to-fuchsia-100 text-violet-700 ring-violet-200",
+ path: "/reports",
+ requiredPermission: "tasks",
+ requiredAction: "READ",
+ subItems: [
+ {
+ label: nav.reports,
+ path: "/reports",
+ requiredPermission: "tasks",
+ requiredAction: "READ",
+ },
+ ],
+ },
+ {
+ id: "profile",
+ label: nav.profile,
+ icon: "admin",
+ tone: "from-indigo-100 to-indigo-200 text-indigo-700 ring-indigo-200",
+ path: "/profile",
+ subItems: [{ label: nav.profile, path: "/profile" }],
+ },
+ {
+ id: "admin",
+ label: nav.admin,
+ icon: "admin",
+ tone: "from-rose-100 to-rose-200 text-rose-700 ring-rose-200",
+ path: "/admin",
+ subItems: [
+ {
+ label: nav.admin_users,
+ path: "/admin/users",
+ requiredPermission: "admin:users",
+ requiredAction: "READ",
+ },
+ {
+ label: nav.admin_roles,
+ path: "/admin/roles",
+ requiredPermission: "admin:roles",
+ requiredAction: "READ",
+ },
+ {
+ label: nav.settings,
+ path: "/settings",
+ requiredPermission: "admin:settings",
+ requiredAction: "READ",
+ },
+ ],
+ },
+ ];
+
+ return categories
+ .map((category) => {
+ const visibleSubItems = (category.subItems || []).filter((subItem) =>
+ isItemAllowed(user, subItem),
+ );
+ return {
+ ...category,
+ subItems: visibleSubItems,
+ };
+ })
+ .filter((category) => {
+ const categoryVisible = isItemAllowed(user, category);
+ if (!categoryVisible) return false;
+
+ const hasVisibleSubItems =
+ Array.isArray(category.subItems) && category.subItems.length > 0;
+ return hasVisibleSubItems;
+ });
+}
+// [/DEF:buildSidebarCategories:Function]
+
+// [/DEF:frontend.src.lib.components.layout.sidebarNavigation:Module]
\ No newline at end of file
diff --git a/frontend/src/lib/i18n/locales/en.json b/frontend/src/lib/i18n/locales/en.json
index 989b20c..8003774 100644
--- a/frontend/src/lib/i18n/locales/en.json
+++ b/frontend/src/lib/i18n/locales/en.json
@@ -69,7 +69,8 @@
"admin_users": "User Management",
"admin_roles": "Role Management",
"admin_settings": "ADFS Configuration",
- "admin_llm": "LLM Providers"
+ "admin_llm": "LLM Providers",
+ "profile": "Profile"
},
"llm": {
"providers_title": "LLM Providers",
@@ -398,6 +399,32 @@
"status_error": "Error",
"empty": "No dashboards found"
},
+ "profile": {
+ "title": "Profile",
+ "description": "Manage your dashboard filter preferences.",
+ "dashboard_preferences": "Dashboard Preferences",
+ "superset_environment": "Superset Environment",
+ "superset_environment_placeholder": "Select environment",
+ "superset_account": "Your Apache Superset Account",
+ "superset_account_placeholder": "Enter your Apache Superset username",
+ "show_only_my_dashboards": "Show only my dashboards by default",
+ "save_preferences": "Save Preferences",
+ "lookup_loading": "Loading Superset accounts...",
+ "lookup_error": "Cannot load Superset accounts for this environment right now. You can enter username manually.",
+ "save_success": "Preferences saved",
+ "save_error": "Failed to save preferences. Please try again.",
+ "invalid_username": "Username should not contain spaces. Please enter a valid Apache Superset username.",
+ "username_required": "Superset username is required when default filter is enabled.",
+ "filter_badge_active": "My Dashboards Only",
+ "filter_badge_override": "Showing all dashboards temporarily",
+ "filter_empty_state": "No dashboards found for your account. Try adjusting your filter settings.",
+ "filter_show_all_temporarily": "Show all dashboards temporarily",
+ "filter_restore_default": "Restore default filter",
+ "saving": "Saving...",
+ "lookup_button": "Lookup",
+ "lookup_search_placeholder": "Search account candidates",
+ "lookup_no_results": "No account candidates found. You can enter username manually."
+ },
"reports": {
"title": "Reports",
"empty": "No reports available.",
diff --git a/frontend/src/lib/i18n/locales/ru.json b/frontend/src/lib/i18n/locales/ru.json
index 1bf460b..a174318 100644
--- a/frontend/src/lib/i18n/locales/ru.json
+++ b/frontend/src/lib/i18n/locales/ru.json
@@ -69,7 +69,8 @@
"admin_users": "Управление пользователями",
"admin_roles": "Управление ролями",
"admin_settings": "Настройка ADFS",
- "admin_llm": "Провайдеры LLM"
+ "admin_llm": "Провайдеры LLM",
+ "profile": "Профиль"
},
"llm": {
"providers_title": "Провайдеры LLM",
@@ -396,6 +397,32 @@
"status_error": "Ошибка",
"empty": "Дашборды не найдены"
},
+ "profile": {
+ "title": "Профиль",
+ "description": "Управляйте настройками фильтра дашбордов.",
+ "dashboard_preferences": "Настройки дашбордов",
+ "superset_environment": "Окружение Superset",
+ "superset_environment_placeholder": "Выберите окружение",
+ "superset_account": "Ваш аккаунт Apache Superset",
+ "superset_account_placeholder": "Введите имя пользователя Apache Superset",
+ "show_only_my_dashboards": "Показывать только мои дашборды по умолчанию",
+ "save_preferences": "Сохранить настройки",
+ "lookup_loading": "Загрузка аккаунтов Superset...",
+ "lookup_error": "Сейчас не удается загрузить аккаунты Superset для этого окружения. Вы можете ввести имя пользователя вручную.",
+ "save_success": "Настройки сохранены",
+ "save_error": "Не удалось сохранить настройки. Попробуйте снова.",
+ "invalid_username": "Имя пользователя не должно содержать пробелы. Введите корректное имя пользователя Apache Superset.",
+ "username_required": "Имя пользователя Superset обязательно, когда фильтр по умолчанию включен.",
+ "filter_badge_active": "Только мои дашборды",
+ "filter_badge_override": "Временно показаны все дашборды",
+ "filter_empty_state": "Для вашего аккаунта дашборды не найдены. Попробуйте изменить настройки фильтра.",
+ "filter_show_all_temporarily": "Временно показать все дашборды",
+ "filter_restore_default": "Вернуть фильтр по умолчанию",
+ "saving": "Сохранение...",
+ "lookup_button": "Найти",
+ "lookup_search_placeholder": "Поиск среди аккаунтов",
+ "lookup_no_results": "Кандидаты аккаунтов не найдены. Вы можете ввести имя пользователя вручную."
+ },
"reports": {
"title": "Отчеты",
"empty": "Отчеты отсутствуют.",
diff --git a/frontend/src/routes/dashboards/+page.svelte b/frontend/src/routes/dashboards/+page.svelte
index 64a631f..9219729 100644
--- a/frontend/src/routes/dashboards/+page.svelte
+++ b/frontend/src/routes/dashboards/+page.svelte
@@ -40,7 +40,7 @@
import { debounce } from "$lib/utils/debounce.js";
import { addToast } from "$lib/toasts.js";
import { gitService } from "../../services/gitService.js";
- import MappingTable from "$components/MappingTable.svelte";
+ import MappingTable from "../../components/MappingTable.svelte";
import {
environmentContextStore,
initializeEnvironmentContext,
@@ -114,6 +114,8 @@
let lastLoadedEnvId = $state(null);
let serverTotal = $state(0);
let serverTotalPages = $state(1);
+ let profileFilterOverrideShowAll = $state(false);
+ let effectiveProfileFilter = $state(null);
// Dry run state
let isDryRunLoading = $state(false);
@@ -275,6 +277,9 @@
page: currentPage,
page_size: pageSize,
search: searchQuery.trim() || undefined,
+ page_context: "dashboards_main",
+ apply_profile_default: true,
+ override_show_all: profileFilterOverrideShowAll,
filters: {
title: Array.from(columnFilters.title),
git_status: Array.from(columnFilters.git_status),
@@ -288,6 +293,10 @@
const rawDashboards = firstResponse?.dashboards || [];
serverTotal = Number(firstResponse?.total || 0);
serverTotalPages = Math.max(1, Number(firstResponse?.total_pages || 1));
+ effectiveProfileFilter = firstResponse?.effective_profile_filter || null;
+ profileFilterOverrideShowAll = Boolean(
+ firstResponse?.effective_profile_filter?.override_show_all,
+ );
gitResolvedIds = new Set();
gitLoadingIds = new Set();
@@ -370,6 +379,34 @@
void loadDashboards();
}
+ // [DEF:DashboardHub.handleTemporaryShowAll:Function]
+ /**
+ * @PURPOSE: Temporarily disable profile-default dashboard filter for current page context.
+ * @PRE: Dashboards list is loaded in dashboards_main context.
+ * @POST: Next request is sent with override_show_all=true.
+ */
+ function handleTemporaryShowAll() {
+ if (profileFilterOverrideShowAll) return;
+ profileFilterOverrideShowAll = true;
+ currentPage = 1;
+ void loadDashboards();
+ }
+ // [/DEF:DashboardHub.handleTemporaryShowAll:Function]
+
+ // [DEF:DashboardHub.handleRestoreProfileFilter:Function]
+ /**
+ * @PURPOSE: Re-enable persisted profile-default filtering after temporary override.
+ * @PRE: Current page is in override mode.
+ * @POST: Next request is sent with override_show_all=false.
+ */
+ function handleRestoreProfileFilter() {
+ if (!profileFilterOverrideShowAll) return;
+ profileFilterOverrideShowAll = false;
+ currentPage = 1;
+ void loadDashboards();
+ }
+ // [/DEF:DashboardHub.handleRestoreProfileFilter:Function]
+
// Update selection state based on current selection
function updateSelectionState() {
const visibleCount = dashboards.length;
@@ -1372,6 +1409,8 @@
if (!envId || envId === lastLoadedEnvId) return;
lastLoadedEnvId = envId;
currentPage = 1;
+ profileFilterOverrideShowAll = false;
+ effectiveProfileFilter = null;
selectedIds.clear();
selectedIds = selectedIds;
void loadDashboards();
@@ -1394,6 +1433,38 @@
+ {#if effectiveProfileFilter?.applied}
+
+
+
+ {$t.profile?.filter_badge_active || "My Dashboards Only"}
+
+
+
+
+ {:else if effectiveProfileFilter?.override_show_all}
+
+
+
+ {$t.profile?.filter_badge_override || "Showing all dashboards temporarily"}
+
+
+
+
+ {/if}
+
{#if error}
-
{$t.dashboard?.empty}
+
+ {#if effectiveProfileFilter?.applied}
+
{$t.profile?.filter_empty_state || "No dashboards found for your account. Try adjusting your filter settings."}
+
+
+
+ {:else}
+
{$t.dashboard?.empty}
+ {/if}
{:else}
diff --git a/frontend/src/routes/dashboards/__tests__/dashboard-profile-override.integration.test.js b/frontend/src/routes/dashboards/__tests__/dashboard-profile-override.integration.test.js
new file mode 100644
index 0000000..aa66310
--- /dev/null
+++ b/frontend/src/routes/dashboards/__tests__/dashboard-profile-override.integration.test.js
@@ -0,0 +1,288 @@
+// [DEF:frontend.src.routes.dashboards.__tests__.dashboard_profile_override_integration:Module]
+// @TIER: STANDARD
+// @SEMANTICS: tests, dashboards, profile-filter, override, restore
+// @PURPOSE: Verifies temporary show-all override and restore-on-return behavior for profile-default dashboard filtering.
+// @LAYER: UI (Tests)
+// @RELATION: TESTS -> frontend/src/routes/dashboards/+page.svelte
+
+import { describe, it, expect, vi, beforeEach } from "vitest";
+import { render, screen, fireEvent, waitFor } from "@testing-library/svelte";
+import { writable } from "svelte/store";
+
+import DashboardsPage from "../+page.svelte";
+import { api } from "$lib/api.js";
+import { initializeEnvironmentContext } from "$lib/stores/environmentContext.js";
+
+const mockedApi = /** @type {any} */ (api);
+const mockedInitializeEnvironmentContext =
+ /** @type {any} */ (initializeEnvironmentContext);
+
+vi.mock("$lib/api.js", () => ({
+ api: {
+ getDashboards: vi.fn(),
+ postApi: vi.fn(),
+ getDatabaseMappings: vi.fn(),
+ calculateMigrationDryRun: vi.fn(),
+ getEnvironmentDatabases: vi.fn(),
+ },
+}));
+
+vi.mock("$lib/toasts.js", () => ({
+ addToast: vi.fn(),
+}));
+
+vi.mock("$lib/stores/taskDrawer.js", () => ({
+ openDrawerForTask: vi.fn(),
+}));
+
+vi.mock("../../services/gitService.js", () => ({
+ gitService: {
+ getConfigs: vi.fn().mockResolvedValue([]),
+ getStatusesBatch: vi.fn().mockResolvedValue({ statuses: {} }),
+ initRepository: vi.fn(),
+ sync: vi.fn(),
+ commit: vi.fn(),
+ pull: vi.fn(),
+ push: vi.fn(),
+ },
+}));
+
+vi.mock("$lib/stores/environmentContext.js", () => {
+ const context = writable({
+ environments: [{ id: "dev", name: "Development" }],
+ selectedEnvId: "dev",
+ isLoading: false,
+ isLoaded: true,
+ error: null,
+ });
+
+ return {
+ environmentContextStore: { subscribe: context.subscribe },
+ initializeEnvironmentContext: vi.fn().mockResolvedValue(undefined),
+ };
+});
+
+vi.mock("$lib/i18n", () => ({
+ t: {
+ subscribe: (run) => {
+ run({
+ nav: { dashboards: "Dashboards" },
+ common: {
+ refresh: "Refresh",
+ retry: "Retry",
+ cancel: "Cancel",
+ first: "First",
+ last: "Last",
+ loading: "Loading...",
+ clear: "Clear",
+ on: "On",
+ off: "Off",
+ close_modal: "Close modal",
+ },
+ dashboard: {
+ empty: "No dashboards found",
+ search: "Search dashboards...",
+ title: "Title",
+ llm_status: "LLM Validation Status",
+ actions: "Actions",
+ changed_on: "Changed On",
+ owners: "Owners",
+ select_all: "Select All",
+ deselect_all: "Deselect All",
+ select_visible: "Select Visible",
+ deselect_visible: "Deselect Visible",
+ selected_count: "{count} selected",
+ column_filter: "Column filter",
+ previous: "Previous",
+ next: "Next",
+ per_page_option: "{count} per page",
+ showing: "Showing {start} to {end} of {total} dashboards",
+ },
+ profile: {
+ filter_badge_active: "My Dashboards Only",
+ filter_badge_override: "Showing all dashboards temporarily",
+ filter_show_all_temporarily: "Show all dashboards temporarily",
+ filter_restore_default: "Restore default filter",
+ filter_empty_state:
+ "No dashboards found for your account. Try adjusting your filter settings.",
+ },
+ });
+ return () => {};
+ },
+ },
+ _: vi.fn((key) => key),
+}));
+
+function buildFilteredResponse() {
+ return {
+ dashboards: [
+ {
+ id: 1,
+ title: "Owner Dashboard",
+ slug: "owner-dashboard",
+ last_modified: "2026-03-04T10:00:00Z",
+ owners: ["john_doe"],
+ modified_by: "john_doe",
+ git_status: {
+ branch: "main",
+ sync_status: "OK",
+ has_repo: true,
+ has_changes_for_commit: false,
+ },
+ last_task: null,
+ },
+ ],
+ total: 1,
+ page: 1,
+ page_size: 10,
+ total_pages: 1,
+ effective_profile_filter: {
+ applied: true,
+ source_page: "dashboards_main",
+ override_show_all: false,
+ username: "john_doe",
+ match_logic: "owners_or_modified_by",
+ },
+ };
+}
+
+function buildUnfilteredResponse() {
+ return {
+ dashboards: [
+ {
+ id: 1,
+ title: "Owner Dashboard",
+ slug: "owner-dashboard",
+ last_modified: "2026-03-04T10:00:00Z",
+ owners: ["john_doe"],
+ modified_by: "john_doe",
+ git_status: {
+ branch: "main",
+ sync_status: "OK",
+ has_repo: true,
+ has_changes_for_commit: false,
+ },
+ last_task: null,
+ },
+ {
+ id: 2,
+ title: "Team Dashboard",
+ slug: "team-dashboard",
+ last_modified: "2026-03-04T10:30:00Z",
+ owners: ["analytics-team"],
+ modified_by: "someone_else",
+ git_status: {
+ branch: "main",
+ sync_status: "OK",
+ has_repo: true,
+ has_changes_for_commit: false,
+ },
+ last_task: null,
+ },
+ ],
+ total: 2,
+ page: 1,
+ page_size: 10,
+ total_pages: 1,
+ effective_profile_filter: {
+ applied: false,
+ source_page: "dashboards_main",
+ override_show_all: true,
+ username: null,
+ match_logic: null,
+ },
+ };
+}
+
+describe("dashboard-profile-override.integration", () => {
+ beforeEach(() => {
+ vi.clearAllMocks();
+ mockedApi.getDatabaseMappings.mockResolvedValue({ mappings: [] });
+ mockedApi.getEnvironmentDatabases.mockResolvedValue([]);
+ mockedApi.calculateMigrationDryRun.mockResolvedValue({
+ summary: {
+ dashboards: { create: 0, update: 0, delete: 0 },
+ charts: { create: 0, update: 0, delete: 0 },
+ datasets: { create: 0, update: 0, delete: 0 },
+ },
+ });
+ mockedApi.postApi.mockResolvedValue({ task_id: "task-1" });
+ });
+
+ it("temporarily shows all dashboards and restores profile-default filter on return", async () => {
+ mockedApi.getDashboards
+ .mockResolvedValueOnce(buildFilteredResponse())
+ .mockResolvedValueOnce(buildUnfilteredResponse())
+ .mockResolvedValueOnce(buildFilteredResponse());
+
+ const firstView = render(DashboardsPage);
+
+ await waitFor(() => {
+ expect(mockedInitializeEnvironmentContext).toHaveBeenCalledTimes(1);
+ expect(mockedApi.getDashboards).toHaveBeenCalledTimes(1);
+ });
+
+ expect(mockedApi.getDashboards.mock.calls[0][1].override_show_all).toBe(
+ false,
+ );
+ await screen.findByText("My Dashboards Only");
+
+ await fireEvent.click(
+ screen.getByRole("button", { name: "Show all dashboards temporarily" }),
+ );
+
+ await waitFor(() => {
+ expect(mockedApi.getDashboards).toHaveBeenCalledTimes(2);
+ expect(mockedApi.getDashboards.mock.calls[1][1].override_show_all).toBe(
+ true,
+ );
+ });
+
+ await screen.findByText("Showing all dashboards temporarily");
+
+ firstView.unmount();
+ render(DashboardsPage);
+
+ await waitFor(() => {
+ expect(mockedApi.getDashboards).toHaveBeenCalledTimes(3);
+ expect(mockedApi.getDashboards.mock.calls[2][1].override_show_all).toBe(
+ false,
+ );
+ });
+
+ await screen.findByText("My Dashboards Only");
+ });
+
+ it("renders filtered empty state message when profile filter is active and no dashboards match", async () => {
+ mockedApi.getDashboards.mockResolvedValueOnce({
+ dashboards: [],
+ total: 0,
+ page: 1,
+ page_size: 10,
+ total_pages: 1,
+ effective_profile_filter: {
+ applied: true,
+ source_page: "dashboards_main",
+ override_show_all: false,
+ username: "john_doe",
+ match_logic: "owners_or_modified_by",
+ },
+ });
+
+ render(DashboardsPage);
+
+ await waitFor(() => {
+ expect(mockedApi.getDashboards).toHaveBeenCalledTimes(1);
+ });
+
+ await screen.findByText(
+ "No dashboards found for your account. Try adjusting your filter settings.",
+ );
+ expect(
+ screen.getAllByRole("button", { name: "Show all dashboards temporarily" })
+ .length,
+ ).toBeGreaterThan(0);
+ });
+});
+
+// [/DEF:frontend.src.routes.dashboards.__tests__.dashboard_profile_override_integration:Module]
\ No newline at end of file
diff --git a/frontend/src/routes/profile/+page.svelte b/frontend/src/routes/profile/+page.svelte
new file mode 100644
index 0000000..19c6a12
--- /dev/null
+++ b/frontend/src/routes/profile/+page.svelte
@@ -0,0 +1,418 @@
+
+
+
+
+
+
+
+
+ {$t.profile?.dashboard_preferences || "Dashboard Preferences"}
+
+
+ {#if isPageLoading}
+
+ {:else}
+
+
+
+
+
+
+ {$t.profile?.superset_account || "Your Apache Superset Account"}
+
+
+
+
+
event.key === "Enter" && loadLookupCandidates()}
+ />
+
+ {#if isLookupLoading}
+
+ {$t.profile?.lookup_loading || "Loading Superset accounts..."}
+
+ {/if}
+
+ {#if lookupWarning}
+
+ {lookupWarning}
+
+ {/if}
+
+ {#if !isLookupLoading && lookupItems.length > 0}
+
+ {#each lookupItems as item}
+
+ {/each}
+
+ {:else if !isLookupLoading && selectedEnvironmentId && !lookupWarning}
+
+ {$t.profile?.lookup_no_results || "No account candidates found. You can enter username manually."}
+
+ {/if}
+
+
+
+
+ {#if visibleValidationErrors.length > 0}
+
+ {#each visibleValidationErrors as errorText}
+
{errorText}
+ {/each}
+
+ {/if}
+
+
+
+
+
+
+
+
+ {/if}
+
+
+
+
\ No newline at end of file
diff --git a/frontend/src/routes/profile/__tests__/fixtures/profile.fixtures.js b/frontend/src/routes/profile/__tests__/fixtures/profile.fixtures.js
new file mode 100644
index 0000000..759f7a2
--- /dev/null
+++ b/frontend/src/routes/profile/__tests__/fixtures/profile.fixtures.js
@@ -0,0 +1,23 @@
+// [DEF:frontend.src.routes.profile.__tests__.fixtures.profile_fixtures:Module]
+// @TIER: TRIVIAL
+// @PURPOSE: Shared fixture placeholders for profile page integration tests.
+
+export const profileFixtures = {
+ bindAccountHappyPath: {
+ environmentId: "dev",
+ candidate: "j.doe",
+ showOnlyMyDashboards: true,
+ },
+ lookupFailedManualFallback: {
+ environmentId: "dev",
+ warning:
+ "Cannot load Superset accounts for this environment right now. You can enter username manually.",
+ manualUsername: "john_doe",
+ },
+ invalidUsername: {
+ supersetUsername: "John Doe",
+ showOnlyMyDashboards: true,
+ },
+};
+
+// [/DEF:frontend.src.routes.profile.__tests__.fixtures.profile_fixtures:Module]
\ No newline at end of file
diff --git a/frontend/src/routes/profile/__tests__/profile-preferences.integration.test.js b/frontend/src/routes/profile/__tests__/profile-preferences.integration.test.js
new file mode 100644
index 0000000..92202b1
--- /dev/null
+++ b/frontend/src/routes/profile/__tests__/profile-preferences.integration.test.js
@@ -0,0 +1,197 @@
+// [DEF:frontend.src.routes.profile.__tests__.profile_preferences_integration:Module]
+// @TIER: STANDARD
+// @SEMANTICS: tests, profile, integration, lookup, persistence, fallback
+// @PURPOSE: Verifies profile binding happy path and degraded lookup manual fallback save flow.
+// @LAYER: UI (Tests)
+// @RELATION: TESTS -> frontend/src/routes/profile/+page.svelte
+
+import { describe, it, expect, vi, beforeEach } from "vitest";
+import { render, screen, fireEvent, waitFor } from "@testing-library/svelte";
+import { writable } from "svelte/store";
+
+import ProfilePage from "../+page.svelte";
+import { api } from "$lib/api.js";
+import { addToast } from "$lib/toasts";
+import { initializeEnvironmentContext } from "$lib/stores/environmentContext.js";
+
+const mockedApi = /** @type {any} */ (api);
+const mockedAddToast = /** @type {any} */ (addToast);
+const mockedInitializeEnvironmentContext =
+ /** @type {any} */ (initializeEnvironmentContext);
+
+vi.mock("$lib/api.js", () => ({
+ api: {
+ getProfilePreferences: vi.fn(),
+ lookupSupersetAccounts: vi.fn(),
+ updateProfilePreferences: vi.fn(),
+ },
+}));
+
+vi.mock("$lib/toasts", () => ({
+ addToast: vi.fn(),
+}));
+
+vi.mock("$lib/stores/environmentContext.js", () => {
+ const context = writable({
+ environments: [{ id: "dev", name: "Development" }],
+ selectedEnvId: "dev",
+ isLoading: false,
+ isLoaded: true,
+ error: null,
+ });
+
+ return {
+ environmentContextStore: { subscribe: context.subscribe },
+ initializeEnvironmentContext: vi.fn().mockResolvedValue(undefined),
+ };
+});
+
+vi.mock("$lib/i18n", () => ({
+ t: {
+ subscribe: (run) => {
+ run({
+ common: { cancel: "Cancel" },
+ profile: {
+ title: "Profile",
+ description: "Manage your dashboard filter preferences.",
+ dashboard_preferences: "Dashboard Preferences",
+ superset_environment: "Superset Environment",
+ superset_environment_placeholder: "Select environment",
+ superset_account: "Your Apache Superset Account",
+ superset_account_placeholder: "Enter your Apache Superset username",
+ lookup_loading: "Loading Superset accounts...",
+ lookup_error:
+ "Cannot load Superset accounts for this environment right now. You can enter username manually.",
+ lookup_button: "Lookup",
+ lookup_search_placeholder: "Search account candidates",
+ lookup_no_results:
+ "No account candidates found. You can enter username manually.",
+ show_only_my_dashboards: "Show only my dashboards by default",
+ save_preferences: "Save Preferences",
+ save_success: "Preferences saved",
+ save_error: "Failed to save preferences. Please try again.",
+ },
+ });
+ return () => {};
+ },
+ },
+ _: vi.fn((key) => key),
+}));
+
+describe("profile-preferences.integration", () => {
+ beforeEach(() => {
+ vi.clearAllMocks();
+
+ mockedApi.getProfilePreferences.mockResolvedValue({
+ status: "success",
+ preference: {
+ user_id: "u-1",
+ superset_username: null,
+ show_only_my_dashboards: false,
+ },
+ });
+
+ mockedApi.updateProfilePreferences.mockResolvedValue({
+ status: "success",
+ preference: {
+ user_id: "u-1",
+ superset_username: "john_doe",
+ show_only_my_dashboards: true,
+ },
+ });
+ });
+
+ it("binds account from lookup and saves enabled default filter", async () => {
+ mockedApi.lookupSupersetAccounts.mockResolvedValue({
+ status: "success",
+ environment_id: "dev",
+ page_index: 0,
+ page_size: 20,
+ total: 1,
+ items: [
+ {
+ environment_id: "dev",
+ username: "john_doe",
+ display_name: "John Doe",
+ email: "john@example.local",
+ is_active: true,
+ },
+ ],
+ });
+
+ render(ProfilePage);
+
+ await waitFor(() => {
+ expect(mockedInitializeEnvironmentContext).toHaveBeenCalledTimes(1);
+ expect(mockedApi.getProfilePreferences).toHaveBeenCalledTimes(1);
+ expect(mockedApi.lookupSupersetAccounts).toHaveBeenCalled();
+ });
+
+ await fireEvent.click(await screen.findByText("John Doe"));
+
+ const usernameInput = /** @type {HTMLInputElement} */ (
+ screen.getByPlaceholderText("Enter your Apache Superset username")
+ );
+ expect(usernameInput.value).toBe("john_doe");
+
+ const toggle = screen.getByLabelText("Show only my dashboards by default");
+ await fireEvent.click(toggle);
+
+ await fireEvent.click(
+ screen.getByRole("button", { name: "Save Preferences" }),
+ );
+
+ await waitFor(() => {
+ expect(mockedApi.updateProfilePreferences).toHaveBeenCalledWith({
+ superset_username: "john_doe",
+ show_only_my_dashboards: true,
+ });
+ expect(mockedAddToast).toHaveBeenCalledWith("Preferences saved", "success");
+ });
+ });
+
+ it("keeps manual save available when lookup fails (degraded fallback)", async () => {
+ mockedApi.lookupSupersetAccounts.mockRejectedValue(
+ new Error("lookup unavailable"),
+ );
+ mockedApi.updateProfilePreferences.mockResolvedValue({
+ status: "success",
+ preference: {
+ user_id: "u-1",
+ superset_username: "manual_user",
+ show_only_my_dashboards: true,
+ },
+ });
+
+ render(ProfilePage);
+
+ await waitFor(() => {
+ expect(mockedApi.lookupSupersetAccounts).toHaveBeenCalled();
+ });
+
+ await screen.findByText(
+ "Cannot load Superset accounts for this environment right now. You can enter username manually.",
+ );
+
+ const usernameInput = screen.getByPlaceholderText(
+ "Enter your Apache Superset username",
+ );
+ await fireEvent.input(usernameInput, { target: { value: "manual_user" } });
+
+ const toggle = screen.getByLabelText("Show only my dashboards by default");
+ await fireEvent.click(toggle);
+
+ await fireEvent.click(
+ screen.getByRole("button", { name: "Save Preferences" }),
+ );
+
+ await waitFor(() => {
+ expect(mockedApi.updateProfilePreferences).toHaveBeenCalledWith({
+ superset_username: "manual_user",
+ show_only_my_dashboards: true,
+ });
+ });
+ });
+});
+
+// [/DEF:frontend.src.routes.profile.__tests__.profile_preferences_integration:Module]
\ No newline at end of file
diff --git a/frontend/src/routes/profile/__tests__/profile-settings-state.integration.test.js b/frontend/src/routes/profile/__tests__/profile-settings-state.integration.test.js
new file mode 100644
index 0000000..33ebcd6
--- /dev/null
+++ b/frontend/src/routes/profile/__tests__/profile-settings-state.integration.test.js
@@ -0,0 +1,204 @@
+// [DEF:frontend.src.routes.profile.__tests__.profile_settings_state_integration:Module]
+// @TIER: STANDARD
+// @SEMANTICS: tests, profile, integration, preload, cancel, reload
+// @PURPOSE: Verifies profile settings preload, cancel without persistence, and saved-state reload behavior.
+// @LAYER: UI (Tests)
+// @RELATION: TESTS -> frontend/src/routes/profile/+page.svelte
+
+import { describe, it, expect, vi, beforeEach } from "vitest";
+import { render, screen, fireEvent, waitFor } from "@testing-library/svelte";
+import { writable } from "svelte/store";
+
+import ProfilePage from "../+page.svelte";
+import { api } from "$lib/api.js";
+import { initializeEnvironmentContext } from "$lib/stores/environmentContext.js";
+
+const mockedApi = /** @type {any} */ (api);
+const mockedInitializeEnvironmentContext =
+ /** @type {any} */ (initializeEnvironmentContext);
+
+vi.mock("$lib/api.js", () => ({
+ api: {
+ getProfilePreferences: vi.fn(),
+ lookupSupersetAccounts: vi.fn(),
+ updateProfilePreferences: vi.fn(),
+ },
+}));
+
+vi.mock("$lib/toasts", () => ({
+ addToast: vi.fn(),
+}));
+
+vi.mock("$lib/stores/environmentContext.js", () => {
+ const context = writable({
+ environments: [{ id: "dev", name: "Development" }],
+ selectedEnvId: "dev",
+ isLoading: false,
+ isLoaded: true,
+ error: null,
+ });
+
+ return {
+ environmentContextStore: { subscribe: context.subscribe },
+ initializeEnvironmentContext: vi.fn().mockResolvedValue(undefined),
+ };
+});
+
+vi.mock("$lib/i18n", () => ({
+ t: {
+ subscribe: (run) => {
+ run({
+ common: { cancel: "Cancel" },
+ profile: {
+ title: "Profile",
+ description: "Manage your dashboard filter preferences.",
+ dashboard_preferences: "Dashboard Preferences",
+ superset_environment: "Superset Environment",
+ superset_environment_placeholder: "Select environment",
+ superset_account: "Your Apache Superset Account",
+ superset_account_placeholder: "Enter your Apache Superset username",
+ lookup_loading: "Loading Superset accounts...",
+ lookup_error:
+ "Cannot load Superset accounts for this environment right now. You can enter username manually.",
+ lookup_button: "Lookup",
+ lookup_search_placeholder: "Search account candidates",
+ lookup_no_results:
+ "No account candidates found. You can enter username manually.",
+ show_only_my_dashboards: "Show only my dashboards by default",
+ save_preferences: "Save Preferences",
+ save_success: "Preferences saved",
+ save_error: "Failed to save preferences. Please try again.",
+ },
+ });
+ return () => {};
+ },
+ },
+ _: vi.fn((key) => key),
+}));
+
+describe("profile-settings-state.integration", () => {
+ beforeEach(() => {
+ vi.clearAllMocks();
+ mockedApi.lookupSupersetAccounts.mockResolvedValue({
+ status: "success",
+ environment_id: "dev",
+ page_index: 0,
+ page_size: 20,
+ total: 0,
+ items: [],
+ });
+ });
+
+ it("preloads saved values and cancel restores snapshot without persistence", async () => {
+ mockedApi.getProfilePreferences.mockResolvedValue({
+ status: "success",
+ preference: {
+ user_id: "u-1",
+ superset_username: "john_doe",
+ show_only_my_dashboards: true,
+ },
+ });
+
+ render(ProfilePage);
+
+ await waitFor(() => {
+ expect(mockedInitializeEnvironmentContext).toHaveBeenCalledTimes(1);
+ expect(mockedApi.getProfilePreferences).toHaveBeenCalledTimes(1);
+ });
+
+ const usernameInput = /** @type {HTMLInputElement} */ (
+ screen.getByPlaceholderText("Enter your Apache Superset username")
+ );
+ const toggleInput = /** @type {HTMLInputElement} */ (
+ screen.getByLabelText("Show only my dashboards by default")
+ );
+
+ expect(usernameInput.value).toBe("john_doe");
+ expect(toggleInput.checked).toBe(true);
+
+ await fireEvent.input(usernameInput, { target: { value: "temp_user" } });
+ await fireEvent.click(toggleInput); // true -> false
+ expect(usernameInput.value).toBe("temp_user");
+ expect(toggleInput.checked).toBe(false);
+
+ await fireEvent.click(screen.getByRole("button", { name: "Cancel" }));
+
+ await waitFor(() => {
+ expect(usernameInput.value).toBe("john_doe");
+ expect(toggleInput.checked).toBe(true);
+ });
+
+ expect(mockedApi.updateProfilePreferences).not.toHaveBeenCalled();
+ });
+
+ it("reloads persisted values after successful save", async () => {
+ mockedApi.getProfilePreferences
+ .mockResolvedValueOnce({
+ status: "success",
+ preference: {
+ user_id: "u-1",
+ superset_username: "john_doe",
+ show_only_my_dashboards: true,
+ },
+ })
+ .mockResolvedValueOnce({
+ status: "success",
+ preference: {
+ user_id: "u-1",
+ superset_username: "new_user",
+ show_only_my_dashboards: true,
+ },
+ });
+
+ mockedApi.updateProfilePreferences.mockResolvedValue({
+ status: "success",
+ preference: {
+ user_id: "u-1",
+ superset_username: "new_user",
+ show_only_my_dashboards: true,
+ },
+ });
+
+ const firstRender = render(ProfilePage);
+
+ await waitFor(() => {
+ expect(mockedApi.getProfilePreferences).toHaveBeenCalledTimes(1);
+ });
+
+ const usernameInput = /** @type {HTMLInputElement} */ (
+ screen.getByPlaceholderText("Enter your Apache Superset username")
+ );
+ await fireEvent.input(usernameInput, { target: { value: "new_user" } });
+
+ await fireEvent.click(
+ screen.getByRole("button", { name: "Save Preferences" }),
+ );
+
+ await waitFor(() => {
+ expect(mockedApi.updateProfilePreferences).toHaveBeenCalledWith({
+ superset_username: "new_user",
+ show_only_my_dashboards: true,
+ });
+ });
+
+ firstRender.unmount();
+
+ render(ProfilePage);
+
+ await waitFor(() => {
+ expect(mockedApi.getProfilePreferences).toHaveBeenCalledTimes(2);
+ });
+
+ const reloadedInput = /** @type {HTMLInputElement} */ (
+ screen.getByPlaceholderText("Enter your Apache Superset username")
+ );
+ const reloadedToggle = /** @type {HTMLInputElement} */ (
+ screen.getByLabelText("Show only my dashboards by default")
+ );
+
+ expect(reloadedInput.value).toBe("new_user");
+ expect(reloadedToggle.checked).toBe(true);
+ });
+});
+
+// [/DEF:frontend.src.routes.profile.__tests__.profile_settings_state_integration:Module]
\ No newline at end of file
diff --git a/frontend/vitest.config.js b/frontend/vitest.config.js
index c8bb1b4..803cca2 100644
--- a/frontend/vitest.config.js
+++ b/frontend/vitest.config.js
@@ -32,14 +32,16 @@ export default defineConfig({
{ find: '$app/environment', replacement: path.resolve(__dirname, './src/lib/stores/__tests__/mocks/environment.js') },
{ find: '$app/stores', replacement: path.resolve(__dirname, './src/lib/stores/__tests__/mocks/stores.js') },
{ find: '$app/navigation', replacement: path.resolve(__dirname, './src/lib/stores/__tests__/mocks/navigation.js') },
- { find: '$env/static/public', replacement: path.resolve(__dirname, './src/lib/stores/__tests__/mocks/env_public.js') }
+ { find: '$env/static/public', replacement: path.resolve(__dirname, './src/lib/stores/__tests__/mocks/env_public.js') },
+ { find: '$components', replacement: path.resolve(__dirname, './src/components') }
]
},
resolve: {
conditions: ['mode=browser', 'browser'],
alias: {
'$lib': path.resolve(__dirname, './src/lib'),
- '$app': path.resolve(__dirname, './src')
+ '$app': path.resolve(__dirname, './src'),
+ '$components': path.resolve(__dirname, './src/components')
}
}
});
\ No newline at end of file
diff --git a/specs/016-multi-user-auth/tasks.md b/specs/016-multi-user-auth/tasks.md
index 2d220da..2e46c60 100644
--- a/specs/016-multi-user-auth/tasks.md
+++ b/specs/016-multi-user-auth/tasks.md
@@ -96,4 +96,12 @@
- **MVP**: Complete Phases 1 and 2. This gives a working auth system with local users.
- **Increment 1**: Complete Phase 3. This adds the critical security controls (RBAC).
-- **Increment 2**: Complete Phase 4. This adds corporate SSO convenience.
\ No newline at end of file
+- **Increment 2**: Complete Phase 4. This adds corporate SSO convenience.
+
+## Post-Delivery RBAC Navigation Hardening (2026-03-06)
+
+- [x] D055 Investigate frontend navigation visibility mismatch (menu items shown despite backend 403 RBAC) in `frontend/src/lib/components/layout/Sidebar.svelte` and `frontend/src/lib/components/layout/TopNavbar.svelte`
+- [x] D056 Implement shared frontend permission utilities and route-level permission enforcement in `frontend/src/lib/auth/permissions.js` and `frontend/src/components/auth/ProtectedRoute.svelte`
+- [x] D057 Implement RBAC-aware sidebar navigation builder and integrate permission-filtered categories in `frontend/src/lib/components/layout/sidebarNavigation.js` and `frontend/src/lib/components/layout/Sidebar.svelte`
+- [x] D058 Add automated frontend tests for permission normalization/checking and sidebar visibility matrix in `frontend/src/lib/auth/__tests__/permissions.test.js` and `frontend/src/lib/components/layout/__tests__/sidebarNavigation.test.js`
+- [x] D059 Execute targeted frontend test verification for RBAC navigation filtering (`npm run test -- src/lib/auth/__tests__/permissions.test.js src/lib/components/layout/__tests__/sidebarNavigation.test.js`)
\ No newline at end of file
diff --git a/specs/023-clean-repo-enterprise/plan.md b/specs/023-clean-repo-enterprise/plan.md
index d12e411..27527bb 100644
--- a/specs/023-clean-repo-enterprise/plan.md
+++ b/specs/023-clean-repo-enterprise/plan.md
@@ -1,7 +1,7 @@
# Implementation Plan: Clean Repository Enterprise Preparation
-**Branch**: `023-clean-repo-enterprise` | **Date**: 2026-03-03 | **Spec**: [`spec.md`](./spec.md)
-**Input**: Feature specification from [`/specs/023-clean-repo-enterprise/spec.md`](./spec.md)
+**Branch**: `023-clean-repo-enterprise` | **Date**: 2026-03-04 | **Spec**: [`spec.md`](./spec.md)
+**Input**: Feature specification from [`/specs/023-clean-repo-enterprise/spec.md`](./spec.md) + clarifications session 2026-03-04
## Summary
@@ -9,7 +9,9 @@
1) исключает тестовые/демо-данные из дистрибутива,
2) блокирует любые внешние интернет-источники ресурсов,
3) допускает загрузку ресурсов только с внутренних серверов компании,
-4) предоставляет обязательную проверку compliance и аудитный отчёт перед выпуском.
+4) предоставляет обязательную проверку compliance и аудитный отчёт перед выпуском,
+5) управляется через декларативный конфиг `.clean-release.yaml` в корне репозитория (FR-015–FR-020),
+6) включает очистку БД от тестовых пользователей/демо-данных как обязательную стадию.
Ключевой UX фиксируется как интерактивный TUI сценарий на основе [`ux_reference.md`](./ux_reference.md): оператор в одном консольном интерфейсе запускает проверку, видит прогресс по этапам, нарушения и итоговый статус (`COMPLIANT`/`BLOCKED`).
@@ -31,7 +33,7 @@
**Scale/Scope**:
- 1 enterprise release flow;
- 1 TUI сценарий подготовки/проверки;
-- 3–6 новых/обновлённых модулей проверки и отчётности;
+- 6–9 новых/обновлённых модулей (+config_loader, filesystem_scanner, db_cleanup_executor);
- документация и контракты в пределах feature-папки.
## Constitution Check
@@ -95,6 +97,8 @@ frontend/
2. Политика source isolation (детекция и запрет внешних internet endpoints, allowlist внутренних серверов).
3. Формат compliance-отчёта для релизного аудита (минимальный обязательный набор полей).
4. Паттерн интеграции TUI-проверки в существующий release workflow (ручной запуск + CI gate).
+5. **[НОВОЕ]** Декларативный конфиг `.clean-release.yaml` как единый source of truth для всех стадий валидации.
+6. **[НОВОЕ]** Очистка БД от тестовых пользователей и демо-данных как обязательная стадия pipeline.
Выход Phase 0: заполненный [`research.md`](./research.md) без `NEEDS CLARIFICATION`.
diff --git a/specs/023-clean-repo-enterprise/quickstart.md b/specs/023-clean-repo-enterprise/quickstart.md
index 1dbabd5..9f455f1 100644
--- a/specs/023-clean-repo-enterprise/quickstart.md
+++ b/specs/023-clean-repo-enterprise/quickstart.md
@@ -14,6 +14,7 @@
3. Настроен внутренний реестр серверов ресурсов (artifact/git/package mirrors).
4. Внешний интернет для узла проверки недоступен/запрещён согласно корпоративной политике.
5. Доступен TUI-скрипт проверки clean-compliance.
+6. В корне репозитория есть `.clean-release.yaml` с секциями `prohibited_categories`, `allowed_sources` и `database_cleanup`.
## Step 1 — Запуск TUI
diff --git a/specs/024-user-dashboard-filter/checklists/requirements.md b/specs/024-user-dashboard-filter/checklists/requirements.md
index 530eb1a..7bff96d 100644
--- a/specs/024-user-dashboard-filter/checklists/requirements.md
+++ b/specs/024-user-dashboard-filter/checklists/requirements.md
@@ -6,7 +6,7 @@
## Content Quality
-- [ ] No implementation details (languages, frameworks, APIs)
+- [x] No implementation details (languages, frameworks, APIs)
- [x] Focused on user value and business needs
- [x] Written for non-technical stakeholders
- [x] All mandatory sections completed
@@ -22,7 +22,7 @@
- [x] No [NEEDS CLARIFICATION] markers remain
- [x] Requirements are testable and unambiguous
- [x] Success criteria are measurable
-- [ ] Success criteria are technology-agnostic (no implementation details)
+- [x] Success criteria are technology-agnostic (no implementation details)
- [x] All acceptance scenarios are defined
- [x] Edge cases are identified
- [x] Scope is clearly bounded
@@ -33,20 +33,12 @@
- [x] All functional requirements have clear acceptance criteria
- [x] User scenarios cover primary flows
- [x] Feature meets measurable outcomes defined in Success Criteria
-- [ ] No implementation details leak into specification
+- [x] No implementation details leak into specification
## Notes
-### Failing items & evidence
+### Validation update
-1) **No implementation details (languages, frameworks, APIs)** — **FAIL**
- Evidence in [spec.md](../spec.md), **Assumptions**:
- - "Apache Superset API provides access to dashboard metadata including owner and modified_by fields"
-
-2) **Success criteria are technology-agnostic (no implementation details)** — **FAIL**
- Evidence in [spec.md](../spec.md), **Success Criteria**:
- - "SC-002: Dashboard list loads filtered results within 2 seconds after preference is saved"
- (This is acceptable as a user-facing performance target, but the current wording implies system internals; should be phrased as a user-perceived outcome.)
-
-3) **No implementation details leak into specification** — **FAIL**
- Root cause: same as (1) and (2). Fix by rewriting assumptions and SC-002 to be implementation-agnostic and user-perceived.
\ No newline at end of file
+- ✅ Assumptions in [spec.md](../spec.md) were rewritten to domain/business wording.
+- ✅ Success criteria wording was updated to user-perceived outcomes (not implementation internals).
+- ✅ No implementation-detail leakage remains in specification text.
\ No newline at end of file
diff --git a/specs/024-user-dashboard-filter/contracts/api.yaml b/specs/024-user-dashboard-filter/contracts/api.yaml
index e754e31..018f7f9 100644
--- a/specs/024-user-dashboard-filter/contracts/api.yaml
+++ b/specs/024-user-dashboard-filter/contracts/api.yaml
@@ -192,6 +192,41 @@ paths:
schema:
type: boolean
default: false
+ - name: filter_title
+ in: query
+ required: false
+ schema:
+ type: array
+ items:
+ type: string
+ - name: filter_git_status
+ in: query
+ required: false
+ schema:
+ type: array
+ items:
+ type: string
+ - name: filter_llm_status
+ in: query
+ required: false
+ schema:
+ type: array
+ items:
+ type: string
+ - name: filter_changed_on
+ in: query
+ required: false
+ schema:
+ type: array
+ items:
+ type: string
+ - name: filter_actor
+ in: query
+ required: false
+ schema:
+ type: array
+ items:
+ type: string
responses:
"200":
description: Dashboards page with effective profile filter metadata
@@ -322,6 +357,38 @@ components:
items:
$ref: "#/components/schemas/SupersetAccountCandidate"
+ GitStatus:
+ type: object
+ properties:
+ branch:
+ type: string
+ nullable: true
+ sync_status:
+ type: string
+ enum: [OK, DIFF, NO_REPO, ERROR]
+ nullable: true
+ has_repo:
+ type: boolean
+ nullable: true
+ has_changes_for_commit:
+ type: boolean
+ nullable: true
+
+ LastTask:
+ type: object
+ properties:
+ task_id:
+ type: string
+ nullable: true
+ status:
+ type: string
+ enum: [PENDING, RUNNING, SUCCESS, FAILED, ERROR, AWAITING_INPUT, WAITING_INPUT, AWAITING_MAPPING]
+ nullable: true
+ validation_status:
+ type: string
+ enum: [PASS, FAIL, WARN, UNKNOWN]
+ nullable: true
+
DashboardItem:
type: object
required:
@@ -338,6 +405,9 @@ components:
last_modified:
type: string
nullable: true
+ created_by:
+ type: string
+ nullable: true
modified_by:
type: string
nullable: true
@@ -345,6 +415,10 @@ components:
type: array
items:
type: string
+ git_status:
+ $ref: "#/components/schemas/GitStatus"
+ last_task:
+ $ref: "#/components/schemas/LastTask"
EffectiveProfileFilter:
type: object
diff --git a/specs/024-user-dashboard-filter/contracts/modules.md b/specs/024-user-dashboard-filter/contracts/modules.md
index e48e659..c0d97ad 100644
--- a/specs/024-user-dashboard-filter/contracts/modules.md
+++ b/specs/024-user-dashboard-filter/contracts/modules.md
@@ -27,7 +27,7 @@ class UserDashboardPreference:
---
-# [DEF:backend.src.core.superset_client.profile_lookup_extension:Module]
+# [DEF:backend.src.core.superset_profile_lookup:Module]
# @TIER: STANDARD
# @SEMANTICS: superset, users, lookup, environment, normalization
# @PURPOSE: Query account candidates from selected Superset environment for profile binding.
@@ -45,7 +45,7 @@ class SupersetAccountLookupAdapter:
# @PRE: raw payload may differ across Superset versions.
# @POST: Returns stable fields {username, display_name, email, is_active}.
def normalize_user_payload(self): ...
-# [/DEF:backend.src.core.superset_client.profile_lookup_extension:Module]
+# [/DEF:backend.src.core.superset_profile_lookup:Module]
---
@@ -213,7 +213,21 @@ Scenario: User binds Superset account from environment and sees filtered dashboa
1. [`frontend.src.routes.profile.+page`](#deffrontendsrcroutesprofilepagemodule) requests account candidates for selected environment.
2. [`backend.src.api.routes.profile`](#defbackendsrcapiroutesprofilemodule) validates self-scope and forwards lookup request.
-3. [`backend.src.services.profile_service`](#defbackendsrcservicesprofile_servicemodule) orchestrates lookup via [`backend.src.core.superset_client.profile_lookup_extension`](#defbackendsrccoresuperset_clientprofile_lookup_extensionmodule).
+3. [`backend.src.services.profile_service`](#defbackendsrcservicesprofile_servicemodule) orchestrates lookup via [`backend.src.core.superset_profile_lookup`](#defbackendsrccoresuperset_profile_lookupmodule).
4. User saves preference; profile service normalizes username and persists [`backend.src.models.profile`](#defbackendsrcmodelsprofilemodule).
5. On `/dashboards`, [`backend.src.api.routes.dashboards.profile_filter_extension`](#defbackendsrcapiroutesdashboardsprofile_filter_extensionmodule) applies `owners OR modified_by` matching and returns effective filter metadata.
-6. [`frontend.src.routes.dashboards.+page.profile_filter_ui_extension`](#deffrontendsrcroutesdashboardspageprofile_filter_ui_extensionmodule) shows active badge and supports temporary "show all" override.
\ No newline at end of file
+6. [`frontend.src.routes.dashboards.+page.profile_filter_ui_extension`](#deffrontendsrcroutesdashboardspageprofile_filter_ui_extensionmodule) shows active badge and supports temporary "show all" override.
+
+---
+
+## Edge-case Trace Evidence (2026-03-05)
+
+| Contract Edge / Invariant | Verification Test | Status |
+|---|---|---|
+| `enable_without_username` validation | `backend/src/api/routes/__tests__/test_profile_api.py::test_patch_profile_preferences_validation_error` | ✅ |
+| `cross_user_mutation` guard | `backend/src/api/routes/__tests__/test_profile_api.py::test_patch_profile_preferences_cross_user_denied` | ✅ |
+| `lookup_env_not_found` recovery path | `backend/src/api/routes/__tests__/test_profile_api.py::test_lookup_superset_accounts_env_not_found` | ✅ |
+| `owners OR modified_by` normalization contract | `backend/src/api/routes/__tests__/test_dashboards.py::test_get_dashboards_profile_filter_contract_owners_or_modified_by` | ✅ |
+| `override_show_all` semantics | `backend/src/api/routes/__tests__/test_dashboards.py::test_get_dashboards_override_show_all_contract` | ✅ |
+| `no_match_results` under active profile filter | `backend/src/api/routes/__tests__/test_dashboards.py::test_get_dashboards_profile_filter_no_match_results_contract` | ✅ |
+| page-scoped override restore UX | `frontend/src/routes/dashboards/__tests__/dashboard-profile-override.integration.test.js` | ✅ |
\ No newline at end of file
diff --git a/specs/024-user-dashboard-filter/plan.md b/specs/024-user-dashboard-filter/plan.md
index 2aa6690..5f0ba53 100644
--- a/specs/024-user-dashboard-filter/plan.md
+++ b/specs/024-user-dashboard-filter/plan.md
@@ -211,6 +211,24 @@ Each story will include independent acceptance tests and explicit completion evi
**Gate Result (post-design)**: PASS
+## Implementation Notes (2026-03-05)
+
+- Profile persistence, API routes, and Superset lookup are fully wired:
+ - `backend/src/models/profile.py`
+ - `backend/src/schemas/profile.py`
+ - `backend/src/services/profile_service.py`
+ - `backend/src/api/routes/profile.py`
+ - `backend/src/core/superset_profile_lookup.py`
+ - router registration includes `profile.router` in `backend/src/app.py`.
+- Dashboards API implements profile-default behavior with deterministic metadata:
+ - `page_context`, `apply_profile_default`, `override_show_all`
+ - response envelope `effective_profile_filter`.
+- Frontend flows implemented and validated:
+ - profile page preload/save/cancel + degraded lookup fallback,
+ - dashboards filter badge + temporary show-all + restore-on-return.
+- Test evidence captured in [`quickstart.md`](./quickstart.md) and [`tests/coverage.md`](./tests/coverage.md).
+- Operational note: Svelte deprecation/a11y warnings remain in dashboards UI (`on:*` directive deprecations and label association warnings). They are non-blocking for feature behavior and should be addressed in a dedicated frontend cleanup task.
+
## Complexity Tracking
> Fill ONLY if Constitution Check has violations that must be justified
diff --git a/specs/024-user-dashboard-filter/quickstart.md b/specs/024-user-dashboard-filter/quickstart.md
index 1c534af..7a0e637 100644
--- a/specs/024-user-dashboard-filter/quickstart.md
+++ b/specs/024-user-dashboard-filter/quickstart.md
@@ -159,14 +159,14 @@ Expected:
## Acceptance Checklist (Operator)
-- [ ] Profile page visible and reachable from navigation.
-- [ ] Environment-based account lookup works for binding.
-- [ ] Lookup failure path allows manual entry (non-blocking).
-- [ ] Preference save persists across reload/session.
-- [ ] `/dashboards` applies default filter by `owners OR modified_by`.
-- [ ] Temporary clear shows all dashboards without changing saved preference.
-- [ ] Re-entering `/dashboards` restores default filtered behavior.
-- [ ] i18n texts are present for new profile/lookup/filter states (EN + RU).
+- [x] Profile page visible and reachable from navigation.
+- [x] Environment-based account lookup works for binding.
+- [x] Lookup failure path allows manual entry (non-blocking).
+- [x] Preference save persists across reload/session.
+- [x] `/dashboards` applies default filter by `owners OR modified_by`.
+- [x] Temporary clear shows all dashboards without changing saved preference.
+- [x] Re-entering `/dashboards` restores default filtered behavior.
+- [x] i18n texts are present for new profile/lookup/filter states (EN + RU).
---
@@ -197,6 +197,22 @@ cd backend && .venv/bin/python3 -m pytest src/api/routes/__tests__ -k "profile o
---
+## Validation Evidence (2026-03-05)
+
+Automated verification runs completed:
+
+1. Frontend integration scenarios:
+ - `cd frontend && npm run test -- src/routes/profile/__tests__/profile-preferences.integration.test.js src/routes/profile/__tests__/profile-settings-state.integration.test.js src/routes/dashboards/__tests__/dashboard-profile-override.integration.test.js`
+ - Result: **3 test files passed, 6 tests passed**.
+
+2. Backend profile/dashboards route contracts:
+ - `cd backend && .venv/bin/python3 -m pytest src/api/routes/__tests__/test_profile_api.py src/api/routes/__tests__/test_dashboards.py`
+ - Result: **26 tests passed**.
+
+Artifacts and matrix are recorded in [`tests/coverage.md`](./tests/coverage.md).
+
+---
+
## Exit Criteria
Feature is considered ready for implementation tasking when:
diff --git a/specs/024-user-dashboard-filter/spec.md b/specs/024-user-dashboard-filter/spec.md
index c2288ce..a5fe78d 100644
--- a/specs/024-user-dashboard-filter/spec.md
+++ b/specs/024-user-dashboard-filter/spec.md
@@ -117,19 +117,19 @@ As a user, I want to temporarily view all dashboards even when my filter is enab
### Measurable Outcomes
- **SC-001**: Users can configure their dashboard filter preference in under 30 seconds
-- **SC-002**: After saving preferences, users see the dashboard list update to the filtered view within 2 seconds
+- **SC-002**: After saving preferences, users can return to the dashboards list and see their personalized view within 2 seconds
- **SC-003**: 95% of users successfully save their profile settings on first attempt
- **SC-004**: Users report a 50% reduction in time spent finding their dashboards in surveys
- **SC-005**: During peak usage, users can save preferences reliably without noticeable slowdowns
-- **SC-006**: Filter correctly identifies user-owned dashboards with 99% accuracy based on owner and modified_by fields
+- **SC-006**: Filter correctly identifies user-related dashboards with 99% accuracy based on dashboard ownership and last-editor information
- **SC-007**: Profile settings persist correctly across 100% of user sessions
## Assumptions
- Users have an existing Apache Superset account with a unique username
-- The application can access dashboard metadata that includes owner and modified_by identifiers
-- The application has an existing user authentication system that can be extended with profile settings
-- Dashboard list page exists and can be modified to support filtering
+- Dashboard records include information about dashboard ownership and last editor
+- Users are authenticated before they can open and update profile settings
+- Users have a central dashboards list where the default filter behavior can be applied
- Apache Superset username format follows standard conventions (alphanumeric, underscores, hyphens allowed)
- Each user has one global Superset username that is used in all environments
-- Configured Superset environments expose account listing data that can be queried during profile binding
+- Configured Superset environments provide discoverable account names for profile binding
diff --git a/specs/024-user-dashboard-filter/tasks.md b/specs/024-user-dashboard-filter/tasks.md
index a6d14bd..aaa483d 100644
--- a/specs/024-user-dashboard-filter/tasks.md
+++ b/specs/024-user-dashboard-filter/tasks.md
@@ -19,10 +19,10 @@
**Purpose**: Initialize scaffolding and shared assets for profile preferences and dashboards filter UX.
-- [ ] T001 Create feature scaffolding files in `backend/src/models/profile.py`, `backend/src/schemas/profile.py`, `backend/src/services/profile_service.py`, `backend/src/api/routes/profile.py`, and `frontend/src/routes/profile/+page.svelte`
-- [ ] T002 [P] Add shared fixture placeholders for profile/filter flows in `backend/tests/fixtures/profile/fixtures_profile_filter.json` and `frontend/src/routes/profile/__tests__/fixtures/profile.fixtures.js`
-- [ ] T003 [P] Add i18n placeholder keys for profile lookup/filter states in `frontend/src/lib/i18n/locales/en.json` and `frontend/src/lib/i18n/locales/ru.json`
-- [ ] T004 [P] Add profile navigation placeholder entry in `frontend/src/lib/components/layout/Sidebar.svelte`
+- [x] T001 Create feature scaffolding files in `backend/src/models/profile.py`, `backend/src/schemas/profile.py`, `backend/src/services/profile_service.py`, `backend/src/api/routes/profile.py`, and `frontend/src/routes/profile/+page.svelte`
+- [x] T002 [P] Add shared fixture placeholders for profile/filter flows in `backend/tests/fixtures/profile/fixtures_profile_filter.json` and `frontend/src/routes/profile/__tests__/fixtures/profile.fixtures.js`
+- [x] T003 [P] Add i18n placeholder keys for profile lookup/filter states in `frontend/src/lib/i18n/locales/en.json` and `frontend/src/lib/i18n/locales/ru.json`
+- [x] T004 [P] Add profile navigation placeholder entry in `frontend/src/lib/components/layout/Sidebar.svelte`
---
@@ -32,14 +32,14 @@
**⚠️ CRITICAL**: No user story work starts before this phase is complete.
-- [ ] T005 Implement persistent `UserDashboardPreference` entity and repository access in `backend/src/models/profile.py` and `backend/src/core/auth/repository.py`
-- [ ] T006 [P] Implement profile preference and Superset lookup schemas in `backend/src/schemas/profile.py`
-- [ ] T007 [P] Implement Superset account lookup adapter for selected environment (paging/sort passthrough and normalization) in `backend/src/core/superset_client.py`
-- [ ] T008 Implement profile domain orchestration in `backend/src/services/profile_service.py` (CRITICAL: PRE authenticated user + payload/environment; POST self-scoped normalized preference save and deterministic actor matching helper; UX_STATE backend service, no direct UI states; TEST_FIXTURE `valid_profile_update`; TEST_EDGE `enable_without_username`, `cross_user_mutation`, `lookup_env_not_found`)
-- [ ] T009 Implement profile API endpoints in `backend/src/api/routes/profile.py` (CRITICAL: PRE valid auth token, self context, `environment_id` for lookup; POST returns self-only preference payload or degraded lookup warning payload; UX_STATE supports profile page save/lookup state mapping through stable response shape; TEST_FIXTURE `get_my_preference_ok`; TEST_EDGE `unauthorized_request`, `invalid_username_payload`, `superset_lookup_upstream_error`)
-- [ ] T010 Wire profile router registration in `backend/src/api/routes/__init__.py` and `backend/src/app.py`
-- [ ] T011 [P] Extend frontend API methods for profile preferences, Superset account lookup, and dashboards profile filter params in `frontend/src/lib/api.js`
-- [ ] T012 [P] Create backend test skeletons for profile and dashboards filter contracts in `backend/src/api/routes/__tests__/test_profile_api.py` and `backend/src/api/routes/__tests__/test_dashboards.py`
+- [x] T005 Implement persistent `UserDashboardPreference` entity and repository access in `backend/src/models/profile.py` and `backend/src/core/auth/repository.py`
+- [x] T006 [P] Implement profile preference and Superset lookup schemas in `backend/src/schemas/profile.py`
+- [x] T007 [P] Implement Superset account lookup adapter for selected environment (paging/sort passthrough and normalization) in `backend/src/core/superset_client.py`
+- [x] T008 Implement profile domain orchestration in `backend/src/services/profile_service.py` (CRITICAL: PRE authenticated user + payload/environment; POST self-scoped normalized preference save and deterministic actor matching helper; UX_STATE backend service, no direct UI states; TEST_FIXTURE `valid_profile_update`; TEST_EDGE `enable_without_username`, `cross_user_mutation`, `lookup_env_not_found`)
+- [x] T009 Implement profile API endpoints in `backend/src/api/routes/profile.py` (CRITICAL: PRE valid auth token, self context, `environment_id` for lookup; POST returns self-only preference payload or degraded lookup warning payload; UX_STATE supports profile page save/lookup state mapping through stable response shape; TEST_FIXTURE `get_my_preference_ok`; TEST_EDGE `unauthorized_request`, `invalid_username_payload`, `superset_lookup_upstream_error`)
+- [x] T010 Wire profile router registration in `backend/src/api/routes/__init__.py` and `backend/src/app.py`
+- [x] T011 [P] Extend frontend API methods for profile preferences, Superset account lookup, and dashboards profile filter params in `frontend/src/lib/api.js`
+- [x] T012 [P] Create backend test skeletons for profile and dashboards filter contracts in `backend/src/api/routes/__tests__/test_profile_api.py` and `backend/src/api/routes/__tests__/test_dashboards.py`
**Checkpoint**: Foundation ready; user stories can now be implemented and validated independently.
@@ -53,19 +53,19 @@
### Tests for User Story 1
-- [ ] T013 [P] [US1] Add contract tests for `GET/PATCH /api/profile/preferences` validation and persistence rules in `backend/src/api/routes/__tests__/test_profile_api.py`
-- [ ] T014 [P] [US1] Add dashboards filter contract tests for `owners OR modified_by` with trim + case-insensitive matching in `backend/src/api/routes/__tests__/test_dashboards.py`
-- [ ] T015 [P] [US1] Add frontend integration test for profile binding happy path (lookup success and manual fallback save) in `frontend/src/routes/profile/__tests__/profile-preferences.integration.test.js`
+- [x] T013 [P] [US1] Add contract tests for `GET/PATCH /api/profile/preferences` validation and persistence rules in `backend/src/api/routes/__tests__/test_profile_api.py`
+- [x] T014 [P] [US1] Add dashboards filter contract tests for `owners OR modified_by` with trim + case-insensitive matching in `backend/src/api/routes/__tests__/test_dashboards.py`
+- [x] T015 [P] [US1] Add frontend integration test for profile binding happy path (lookup success and manual fallback save) in `frontend/src/routes/profile/__tests__/profile-preferences.integration.test.js`
### Implementation for User Story 1
-- [ ] T016 [US1] Implement lookup + save rules (global username, normalization, non-blocking degraded lookup) in `backend/src/services/profile_service.py` (CRITICAL: PRE authenticated user and selected environment for lookup; POST persisted normalized global username and lookup fallback remains save-capable; UX_STATE supports `LookupLoading/LookupError/Saving/SaveSuccess/SaveError` via service outcomes; TEST_FIXTURE `valid_profile_update`; TEST_EDGE `enable_without_username`, `lookup_env_not_found`)
-- [ ] T017 [US1] Implement profile route request/response mapping for preference save/get + account lookup in `backend/src/api/routes/profile.py` (CRITICAL: PRE self-scoped authenticated request; POST stable `ProfilePreferenceResponse` and `SupersetAccountLookupResponse`; UX_STATE provides explicit degraded warning for profile UI recovery; TEST_FIXTURE `get_my_preference_ok`; TEST_EDGE `invalid_username_payload`, `superset_lookup_upstream_error`)
-- [ ] T018 [US1] Implement default profile filtering on dashboards main list in `backend/src/api/routes/dashboards.py` (CRITICAL: PRE parsed `page_context/apply_profile_default/override_show_all` query context; POST response totals and pagination remain deterministic with `owners OR modified_by` matching; UX_STATE supports filtered list and empty-filtered state metadata; TEST_FIXTURE `profile_filter_applied`; TEST_EDGE `no_match_results`)
-- [ ] T019 [P] [US1] Implement profile page form with environment selector, account suggestions, manual username field, toggle, and save/cancel actions in `frontend/src/routes/profile/+page.svelte` (CRITICAL: PRE authenticated user and initial preference load; POST persisted values reflected in form state; UX_STATE `Default`, `LookupLoading`, `LookupError`, `Saving`, `SaveSuccess`, `SaveError`; TEST_FIXTURE `bind_account_happy_path`; TEST_EDGE `lookup_failed_manual_fallback`, `invalid_username`, `cancel_changes`)
-- [ ] T020 [P] [US1] Implement dashboards active-filter indicator and filtered empty-state rendering in `frontend/src/routes/dashboards/+page.svelte` (CRITICAL: PRE effective filter metadata returned by dashboards API; POST filtered context is visible and understandable; UX_STATE `FilterActive`, `EmptyFiltered`; TEST_FIXTURE `default_profile_filter_applied`; TEST_EDGE `no_matching_dashboards`)
-- [ ] T021 [US1] Finalize localized copy for profile binding, lookup warning, and filter-active texts in `frontend/src/lib/i18n/locales/en.json` and `frontend/src/lib/i18n/locales/ru.json`
-- [ ] T022 [US1] Verify implementation matches `specs/024-user-dashboard-filter/ux_reference.md` (Happy Path & Errors)
+- [x] T016 [US1] Implement lookup + save rules (global username, normalization, non-blocking degraded lookup) in `backend/src/services/profile_service.py` (CRITICAL: PRE authenticated user and selected environment for lookup; POST persisted normalized global username and lookup fallback remains save-capable; UX_STATE supports `LookupLoading/LookupError/Saving/SaveSuccess/SaveError` via service outcomes; TEST_FIXTURE `valid_profile_update`; TEST_EDGE `enable_without_username`, `lookup_env_not_found`)
+- [x] T017 [US1] Implement profile route request/response mapping for preference save/get + account lookup in `backend/src/api/routes/profile.py` (CRITICAL: PRE self-scoped authenticated request; POST stable `ProfilePreferenceResponse` and `SupersetAccountLookupResponse`; UX_STATE provides explicit degraded warning for profile UI recovery; TEST_FIXTURE `get_my_preference_ok`; TEST_EDGE `invalid_username_payload`, `superset_lookup_upstream_error`)
+- [x] T018 [US1] Implement default profile filtering on dashboards main list in `backend/src/api/routes/dashboards.py` (CRITICAL: PRE parsed `page_context/apply_profile_default/override_show_all` query context; POST response totals and pagination remain deterministic with `owners OR modified_by` matching; UX_STATE supports filtered list and empty-filtered state metadata; TEST_FIXTURE `profile_filter_applied`; TEST_EDGE `no_match_results`)
+- [x] T019 [P] [US1] Implement profile page form with environment selector, account suggestions, manual username field, toggle, and save/cancel actions in `frontend/src/routes/profile/+page.svelte` (CRITICAL: PRE authenticated user and initial preference load; POST persisted values reflected in form state; UX_STATE `Default`, `LookupLoading`, `LookupError`, `Saving`, `SaveSuccess`, `SaveError`; TEST_FIXTURE `bind_account_happy_path`; TEST_EDGE `lookup_failed_manual_fallback`, `invalid_username`, `cancel_changes`)
+- [x] T020 [P] [US1] Implement dashboards active-filter indicator and filtered empty-state rendering in `frontend/src/routes/dashboards/+page.svelte` (CRITICAL: PRE effective filter metadata returned by dashboards API; POST filtered context is visible and understandable; UX_STATE `FilterActive`, `EmptyFiltered`; TEST_FIXTURE `default_profile_filter_applied`; TEST_EDGE `no_matching_dashboards`)
+- [x] T021 [US1] Finalize localized copy for profile binding, lookup warning, and filter-active texts in `frontend/src/lib/i18n/locales/en.json` and `frontend/src/lib/i18n/locales/ru.json`
+- [x] T022 [US1] Verify implementation matches `specs/024-user-dashboard-filter/ux_reference.md` (Happy Path & Errors)
**Checkpoint**: US1 is independently functional and demo-ready as MVP.
@@ -79,15 +79,15 @@
### Tests for User Story 2
-- [ ] T023 [P] [US2] Add backend authorization tests for self-only preference mutation and cross-user rejection in `backend/src/api/routes/__tests__/test_profile_api.py`
-- [ ] T024 [P] [US2] Add frontend tests for preload, cancel without persistence, and saved-state reload in `frontend/src/routes/profile/__tests__/profile-settings-state.integration.test.js`
+- [x] T023 [P] [US2] Add backend authorization tests for self-only preference mutation and cross-user rejection in `backend/src/api/routes/__tests__/test_profile_api.py`
+- [x] T024 [P] [US2] Add frontend tests for preload, cancel without persistence, and saved-state reload in `frontend/src/routes/profile/__tests__/profile-settings-state.integration.test.js`
### Implementation for User Story 2
-- [ ] T025 [US2] Implement preload and cancel-to-last-saved behavior in `frontend/src/routes/profile/+page.svelte` (CRITICAL: PRE current preference loaded before editing; POST cancel does not mutate persisted data; UX_STATE `Default`, `Saving`, `SaveError` retain user context; TEST_EDGE `cancel_changes`)
-- [ ] T026 [US2] Enforce self-scope mutation guard across service and route in `backend/src/services/profile_service.py` and `backend/src/api/routes/profile.py` (CRITICAL: PRE authenticated actor identity; POST attempts to mutate another user are denied; UX_STATE maps denial into actionable error feedback; TEST_EDGE `cross_user_mutation`)
-- [ ] T027 [US2] Implement consistent save success/error/validation feedback mapping in `frontend/src/routes/profile/+page.svelte` and `frontend/src/lib/api.js` (CRITICAL: PRE response includes validation or error details; POST user gets clear recovery guidance without data loss; UX_STATE `SaveSuccess`, `SaveError`; TEST_EDGE `invalid_username`)
-- [ ] T028 [US2] Verify implementation matches `specs/024-user-dashboard-filter/ux_reference.md` (Happy Path & Errors)
+- [x] T025 [US2] Implement preload and cancel-to-last-saved behavior in `frontend/src/routes/profile/+page.svelte` (CRITICAL: PRE current preference loaded before editing; POST cancel does not mutate persisted data; UX_STATE `Default`, `Saving`, `SaveError` retain user context; TEST_EDGE `cancel_changes`)
+- [x] T026 [US2] Enforce self-scope mutation guard across service and route in `backend/src/services/profile_service.py` and `backend/src/api/routes/profile.py` (CRITICAL: PRE authenticated actor identity; POST attempts to mutate another user are denied; UX_STATE maps denial into actionable error feedback; TEST_EDGE `cross_user_mutation`)
+- [x] T027 [US2] Implement consistent save success/error/validation feedback mapping in `frontend/src/routes/profile/+page.svelte` and `frontend/src/lib/api.js` (CRITICAL: PRE response includes validation or error details; POST user gets clear recovery guidance without data loss; UX_STATE `SaveSuccess`, `SaveError`; TEST_EDGE `invalid_username`)
+- [x] T028 [US2] Verify implementation matches `specs/024-user-dashboard-filter/ux_reference.md` (Happy Path & Errors)
**Checkpoint**: US2 independently functional with robust state management and ownership guardrails.
@@ -101,14 +101,14 @@
### Tests for User Story 3
-- [ ] T029 [P] [US3] Add backend tests for `override_show_all`, `page_context`, and effective filter metadata semantics in `backend/src/api/routes/__tests__/test_dashboards.py`
-- [ ] T030 [P] [US3] Add frontend integration test for temporary clear and restore-on-return flow in `frontend/src/routes/dashboards/__tests__/dashboard-profile-override.integration.test.js`
+- [x] T029 [P] [US3] Add backend tests for `override_show_all`, `page_context`, and effective filter metadata semantics in `backend/src/api/routes/__tests__/test_dashboards.py`
+- [x] T030 [P] [US3] Add frontend integration test for temporary clear and restore-on-return flow in `frontend/src/routes/dashboards/__tests__/dashboard-profile-override.integration.test.js`
### Implementation for User Story 3
-- [ ] T031 [US3] Implement override semantics and effective filter metadata in dashboards API response in `backend/src/api/routes/dashboards.py` (CRITICAL: PRE main-list context with override flag; POST `effective_profile_filter.applied=false` when override is active and persisted preference remains unchanged; UX_STATE enables clear distinction between active-filter and override states; TEST_FIXTURE `profile_filter_applied`; TEST_EDGE `override_show_all`, `no_match_results`)
-- [ ] T032 [US3] Implement temporary show-all control and restore-on-return behavior in `frontend/src/routes/dashboards/+page.svelte` (CRITICAL: PRE active filtered state available from response metadata; POST override is page-scoped and non-persistent; UX_STATE `FilterActive`, `OverrideActive`, `EmptyFiltered`; TEST_FIXTURE `default_profile_filter_applied`; TEST_EDGE `temporary_show_all`, `return_to_page`)
-- [ ] T033 [US3] Verify implementation matches `specs/024-user-dashboard-filter/ux_reference.md` (Happy Path & Errors)
+- [x] T031 [US3] Implement override semantics and effective filter metadata in dashboards API response in `backend/src/api/routes/dashboards.py` (CRITICAL: PRE main-list context with override flag; POST `effective_profile_filter.applied=false` when override is active and persisted preference remains unchanged; UX_STATE enables clear distinction between active-filter and override states; TEST_FIXTURE `profile_filter_applied`; TEST_EDGE `override_show_all`, `no_match_results`)
+- [x] T032 [US3] Implement temporary show-all control and restore-on-return behavior in `frontend/src/routes/dashboards/+page.svelte` (CRITICAL: PRE active filtered state available from response metadata; POST override is page-scoped and non-persistent; UX_STATE `FilterActive`, `OverrideActive`, `EmptyFiltered`; TEST_FIXTURE `default_profile_filter_applied`; TEST_EDGE `temporary_show_all`, `return_to_page`)
+- [x] T033 [US3] Verify implementation matches `specs/024-user-dashboard-filter/ux_reference.md` (Happy Path & Errors)
**Checkpoint**: US3 independently functional with non-persistent override behavior.
@@ -118,10 +118,10 @@
**Purpose**: Align final contracts/docs and record verification evidence across all delivered stories.
-- [ ] T034 [P] Reconcile OpenAPI contract with implemented payloads and query semantics in `specs/024-user-dashboard-filter/contracts/api.yaml`
-- [ ] T035 [P] Reconcile CRITICAL module contract annotations and edge-case traces in `specs/024-user-dashboard-filter/contracts/modules.md`
-- [ ] T036 [P] Execute quickstart validation and record outcomes in `specs/024-user-dashboard-filter/quickstart.md` and `specs/024-user-dashboard-filter/tests/coverage.md`
-- [ ] T037 Document final implementation notes and operational constraints in `specs/024-user-dashboard-filter/plan.md`
+- [x] T034 [P] Reconcile OpenAPI contract with implemented payloads and query semantics in `specs/024-user-dashboard-filter/contracts/api.yaml`
+- [x] T035 [P] Reconcile CRITICAL module contract annotations and edge-case traces in `specs/024-user-dashboard-filter/contracts/modules.md`
+- [x] T036 [P] Execute quickstart validation and record outcomes in `specs/024-user-dashboard-filter/quickstart.md` and `specs/024-user-dashboard-filter/tests/coverage.md`
+- [x] T037 Document final implementation notes and operational constraints in `specs/024-user-dashboard-filter/plan.md`
---
@@ -200,5 +200,22 @@ Task: "T030 [US3] Frontend override restore integration test in frontend/src/rou
### UX Preservation Rule
-No task in this plan intentionally degrades UX defined in `specs/024-user-dashboard-filter/ux_reference.md`.
-Mandatory UX verification tasks are included at the end of each user story phase: **T022**, **T028**, **T033**.
\ No newline at end of file
+No task in this plan intentionally degrades UX defined in `specs/024-user-dashboard-filter/ux_reference.md`.
+Mandatory UX verification tasks are included at the end of each user story phase: **T022**, **T028**, **T033**.
+
+---
+
+## Post-Delivery Debug Status (2026-03-05)
+
+- [x] D001 Investigate degraded `/api/profile/superset-accounts` lookup for `sort_order=asc` in `ss-dev`
+- [x] D002 Confirm diagnosis using enhanced runtime logs before behavior fix
+- [x] D003 Apply behavior fix and re-run targeted profile lookup verification
+- [x] D004 Investigate zero profile-filter matches on `/api/dashboards` for `admin` by logging raw Superset actor fields
+- [x] D005 Confirm diagnosis from actor-field logs before changing filter behavior
+- [x] D006 Implement dashboard actor hydration fallback (detail API) and verify targeted dashboards filter tests
+- [x] D007 Investigate regression where `/api/dashboards` triggered per-dashboard `SupersetClient.get_dashboard` fan-out
+- [x] D008 Replace O(N) hydration with O(1) profile actor alias lookup (username + display name) in dashboards filter path
+- [x] D009 Verify updated dashboards filter behavior with targeted backend tests including no-detail-fanout assertion
+- [x] D007 Investigate regression where `/api/dashboards` triggers per-dashboard `SupersetClient.get_dashboard` fan-out
+- [x] D008 Replace O(N) hydration with O(1) profile actor alias lookup (username + display name) in dashboards filter path
+- [x] D009 Verify updated dashboards filter behavior with targeted backend tests including no-detail-fanout assertion
\ No newline at end of file
diff --git a/specs/024-user-dashboard-filter/tests/coverage.md b/specs/024-user-dashboard-filter/tests/coverage.md
new file mode 100644
index 0000000..189d0c1
--- /dev/null
+++ b/specs/024-user-dashboard-filter/tests/coverage.md
@@ -0,0 +1,49 @@
+# Test Coverage Matrix: User Profile Dashboard Filter
+
+## Validation Snapshot
+
+- **Date (UTC)**: 2026-03-05
+- **Feature**: `024-user-dashboard-filter`
+
+## Executed Automated Test Runs
+
+### 1) Backend profile + dashboards contracts
+
+```bash
+cd backend && .venv/bin/python3 -m pytest src/api/routes/__tests__/test_profile_api.py src/api/routes/__tests__/test_dashboards.py
+```
+
+- **Result**: `27 passed`
+
+### 2) Frontend profile + dashboards integration scenarios
+
+```bash
+cd frontend && npm run test -- src/routes/profile/__tests__/profile-preferences.integration.test.js src/routes/profile/__tests__/profile-settings-state.integration.test.js src/routes/dashboards/__tests__/dashboard-profile-override.integration.test.js
+```
+
+- **Result**: `3 files passed`, `6 tests passed`
+
+## Coverage by User Story
+
+| User Story | Scope | Evidence | Status |
+|---|---|---|---|
+| US1 | Profile binding + default dashboard filtering | `test_profile_api.py`, `test_dashboards.py`, `profile-preferences.integration.test.js` | ✅ |
+| US2 | Preload/cancel/saved-state management + self-scope guard | `test_profile_api.py`, `profile-settings-state.integration.test.js` | ✅ |
+| US3 | Temporary show-all override + restore-on-return | `test_dashboards.py`, `dashboard-profile-override.integration.test.js` | ✅ |
+
+## Contract Edge Evidence
+
+| Contract Edge | Verification Test | Status |
+|---|---|---|
+| `enable_without_username` | `test_patch_profile_preferences_validation_error` | ✅ |
+| `cross_user_mutation` | `test_patch_profile_preferences_cross_user_denied` | ✅ |
+| `lookup_env_not_found` | `test_lookup_superset_accounts_env_not_found` | ✅ |
+| `owners OR modified_by` (trim + case-insensitive) | `test_get_dashboards_profile_filter_contract_owners_or_modified_by` | ✅ |
+| `override_show_all` metadata semantics | `test_get_dashboards_override_show_all_contract` | ✅ |
+| `no_match_results` with active profile filter | `test_get_dashboards_profile_filter_no_match_results_contract` | ✅ |
+| restore-on-return behavior (page-scoped override) | `dashboard-profile-override.integration.test.js` | ✅ |
+
+## Notes
+
+- Feature scenarios required by quickstart and story contracts are covered by automated tests above.
+- Frontend run reports Svelte deprecation/a11y warnings in dashboards UI; these are non-blocking for current feature behavior and do not fail tests.
\ No newline at end of file