Newer
Older
cortex-hub / ai-hub / integration_tests / test_system_config_v2.py
import os
import httpx
import pytest
from conftest import BASE_URL

def _headers(user_id=None):
    uid = user_id or os.getenv("SYNC_TEST_USER_ID", "")
    return {"X-User-ID": uid}

def test_system_config_decoupling_flow():
    """
    E2E Flow:
    1. Admin sets global LLM default to 'openai'.
    2. Admin verifies global config.
    3. Normal User fetches config, sees 'openai' as effective default.
    4. Normal User overrides to 'gemini' personally.
    5. Admin fetches global config, it's still 'openai'.
    6. Admin fetches own personal config, it's 'openai' (decoupled).
    """
    admin_id = os.getenv("SYNC_TEST_USER_ID")
    with httpx.Client(timeout=10.0) as client:
        # 0. Clear personal overrides and update group policy to allow 'openai'
        r_groups = client.get(f"{BASE_URL}/users/admin/groups", headers=_headers(admin_id))
        groups = r_groups.json()
        for g in groups:
            if g["name"] == "Integration Default Group":
                client.put(f"{BASE_URL}/users/admin/groups/{g['id']}", json={"name": g["name"], "policy": {"llm": ["gemini", "openai"]}}, headers=_headers(admin_id))
                break
        
        client.put(f"{BASE_URL}/users/me/config", json={"llm": {}, "tts": {}, "stt": {}, "statuses": {}}, headers=_headers(admin_id))

        # 1. Admin sets global LLM default
        global_payload = {
            "llm": {
                "active_provider": "openai",
                "providers": {
                    "openai": {"model": "gpt-4", "api_key": "sk-global-key"}
                }
            },
            "tts": {}, "stt": {}, "statuses": {}
        }
        r = client.put(f"{BASE_URL}/admin/config/providers", json=global_payload, headers=_headers(admin_id))
        assert r.status_code == 200, f"Admin failed to update global config: {r.text}"

        # 2. Verify global config
        r = client.get(f"{BASE_URL}/admin/config/providers", headers=_headers(admin_id))
        assert r.status_code == 200
        assert r.json()["llm"]["active_provider"] == "openai"

        # 3. Test: Normal user cannot call admin config endpoint
        r = client.get(f"{BASE_URL}/admin/config/providers", headers=_headers("some-random-user"))
        assert r.status_code in (401, 403, 404), f"Normal user should be blocked from admin config: {r.status_code}"

        # 4. Normal user fetches their own config (which should now merge from global default)
        r = client.get(f"{BASE_URL}/users/me/config", headers=_headers(admin_id)) # Still using admin_id as 'me'
        assert r.status_code == 200
        data = r.json()
        # If the user has no personal overrides, they get the global default
        assert data["effective"]["llm"]["active_provider"] == "openai"

        # 5. Admin sets personal override (even admins have personal prefs decoupled from global)
        personal_payload = {
            "llm": {
                "active_provider": "gemini",
                "providers": {
                    "gemini": {"model": "gemini-pro", "api_key": "sk-personal-key"}
                }
            },
            "tts": {}, "stt": {}, "statuses": {}
        }
        r = client.put(f"{BASE_URL}/users/me/config", json=personal_payload, headers=_headers(admin_id))
        assert r.status_code == 200

        # 6. Verify personal override vs global
        r = client.get(f"{BASE_URL}/users/me/config", headers=_headers(admin_id))
        data = r.json()
        assert data["preferences"]["llm"]["active_provider"] == "gemini"
        assert data["effective"]["llm"]["active_provider"] == "gemini"

        # 7. Verify global is still 'openai'
        r = client.get(f"{BASE_URL}/admin/config/providers", headers=_headers(admin_id))
        assert r.json()["llm"]["active_provider"] == "openai"

        # 8. Cleanup global config to gemini for other tests
        global_payload_reset = {
            "llm": {
                "active_provider": "gemini",
                "providers": {
                    "gemini": {"model": "gemini-3-flash-preview", "api_key": os.getenv("GEMINI_API_KEY", "")}
                }
            },
            "tts": {}, "stt": {}, "statuses": {}
        }
        client.put(f"{BASE_URL}/admin/config/providers", json=global_payload_reset, headers=_headers(admin_id))

def test_statuses_persistence():
    """Verifies that provider statuses are correctly handled in global config."""
    admin_id = os.getenv("SYNC_TEST_USER_ID")
    with httpx.Client(timeout=10.0) as client:
        # 1. Update global statuses
        full_payload = {
            "llm": {}, "tts": {}, "stt": {},
            "statuses": {"llm_openai": "success", "llm_gemini": "error"}
        }
        r = client.put(f"{BASE_URL}/admin/config/providers", json=full_payload, headers=_headers(admin_id))
        assert r.status_code == 200
        
        # 2. Verify global statuses
        r = client.get(f"{BASE_URL}/admin/config/providers", headers=_headers(admin_id))
        assert r.json()["statuses"]["llm_openai"] == "success"
        
        # 3. Verify normal user sees these statuses in effective config
        r = client.get(f"{BASE_URL}/users/me/config", headers=_headers(admin_id))
        assert r.json()["effective"]["statuses"]["llm_openai"] == "success"