import os
import httpx
import pytest
import json

BASE_URL = os.getenv("SYNC_TEST_BASE_URL", "http://127.0.0.1:8002/api/v1")

def _headers():
    return {
        "X-User-ID": os.environ.get("SYNC_TEST_USER_ID", "")
    }

@pytest.mark.skipif(os.getenv("SKIP_DOCKER_NODES", "false").lower() == "true", reason="Browser skill requires a fully-loaded Docker container environment to access Chromium.")
def test_browser_skill_weather():
    """
    Test explicitly asking the LLM context to leverage its browser skill 
    to fetch real-time data indicating that tool resolution and execution works.
    """
    user_id = os.environ.get("SYNC_TEST_USER_ID", "")
    assert user_id, "User ID not found in environment from conftest."
    
    with httpx.Client(timeout=45.0) as client:
        # Step 1: Create a new session bound to Gemini
        session_payload = {
            "user_id": user_id,
            "provider_name": "gemini",
            "feature_name": "agent_harness"
        }
        r_sess = client.post(f"{BASE_URL}/sessions/", headers=_headers(), json=session_payload)
        assert r_sess.status_code == 200, f"Failed to create session: {r_sess.text}"
        
        session_id = r_sess.json()["id"]
        
        # Step 2: Ask a question that requires the browser to take a snapshot/screenshot
        # We explicitly ask it to navigate and snapshot to guarantee image output.
        chat_payload = {
            "prompt": "Use your browser_automation_agent tool to navigate to https://example.com, take a screenshot of the page, and tell me the heading you see on the page.",
            "provider_name": "gemini"
        }
        
        full_response = ""
        tool_invoked = False
        
        # We expect a tool call block to occur indicating success
        with client.stream("POST", f"{BASE_URL}/sessions/{session_id}/chat", headers=_headers(), json=chat_payload) as r_chat:
            assert r_chat.status_code == 200, "Chat request failed to initialize."
            
            for line in r_chat.iter_lines():
                if line.startswith("data: "):
                    data_str = line[len("data: "):]
                    if data_str == "[DONE]":
                        break
                        
                    try:
                        event = json.loads(data_str)
                        event_type = event.get("type")
                        
                        if event_type == "content":
                            full_response += event.get("content", "")
                        elif event_type == "tool_start":
                            if event.get("name") == "browser_automation_agent":
                                tool_invoked = True
                        elif event_type == "error":
                            pytest.fail(f"LLM backend emitted an error: {event.get('content')}")
                        elif event_type == "done":
                            break
                    except json.JSONDecodeError:
                        pass
        
        full_response = full_response.strip().lower()
        assert len(full_response) > 0, "LLM returned an entirely silent response."
        assert tool_invoked, "The LLM didn't attempt to invoke the browser tool as instructed."
        
        # The prompt asked for example.com heading ("Example Domain").
        assert "example domain" in full_response, f"LLM did not identify the correct heading. Response: {full_response}"

