from typing import List, Dict, Any, Optional
from sqlalchemy.orm import Session
from app.db import models
from app.core.skills.base import BaseSkill
import logging
import time
import os

from app.core.tools.registry import tool_registry
import time

logger = logging.getLogger(__name__)

class ToolService:
    """
    Orchestrates AI tools (Skills) available to users.
    Handles discovery, permission checks, and execution routing.
    """
    
    def __init__(self, services: Any = None, local_skills: List[BaseSkill] = []):
        self._services = services
        self._local_skills = {s.name: s for s in local_skills}
        tool_registry.load_plugins()

    def get_available_tools(self, db: Session, user_id: str, feature: str = None) -> List[Dict[str, Any]]:
        """
        Retrieves all tools the user is authorized to use, optionally filtered by feature.
        """
        # 1. Fetch system/local skills and filter by feature if requested
        local_skills = self._local_skills.values()
        if feature:
            local_skills = [s for s in local_skills if feature in getattr(s, "features", ["chat"])]
        
        tools = [s.to_tool_definition() for s in local_skills]
        
        # 2. Add DB-defined skills (System skills or user-owned)
        query = db.query(models.Skill).filter(
            (models.Skill.is_system == True) | 
            (models.Skill.owner_id == user_id)
        ).filter(models.Skill.is_enabled == True)
        
        if feature:
            # SQLAlchemy JSON containment check (SQLite specific or generic enough)
            # For simplicity, we filter in Python if the DB driver is tricky
            db_skills = query.all()
            db_skills = [ds for ds in db_skills if feature in (ds.features or [])]
        else:
            db_skills = query.all()
        
        for ds in db_skills:
            # Prevent duplicates if name overlaps with local
            if any(t["function"]["name"] == ds.name for t in tools):
                continue
            
            # M3: Use the public description, but append internal AI instructions if available
            # This makes the "system prompt" invisible to end users but fully visible to the Orchestrator.
            description = ds.description or ""
            if ds.system_prompt:
                description += f"\n\nInternal Intelligence Protocol:\n{ds.system_prompt}"
            
            tools.append({
                "type": "function",
                "function": {
                    "name": ds.name,
                    "description": description,
                    "parameters": ds.config.get("parameters", {})
                }
            })
            
        return tools

    async def call_tool(self, tool_name: str, arguments: Dict[str, Any], db: Session = None, user_id: str = None, session_id: str = None, session_db_id: int = None, on_event = None) -> Any:
        """
        Executes a registered skill.
        """
        # 1. Try local/native skill first
        if tool_name in self._local_skills:
            skill = self._local_skills[tool_name]
            result = await skill.execute(**arguments)
            return result.dict()
        
        # 2. Handle System / DB Skills
        if db:
            db_skill = db.query(models.Skill).filter(models.Skill.name == tool_name).first()
            if db_skill and db_skill.is_system:
                return await self._execute_system_skill(db_skill, arguments, user_id=user_id, db=db, session_id=session_id, session_db_id=session_db_id, on_event=on_event)
        
        logger.error(f"Tool '{tool_name}' not found or handled yet.")
        return {"success": False, "error": "Tool not found"}

    async def _execute_system_skill(self, skill: models.Skill, args: Dict[str, Any], user_id: str = None, db: Session = None, session_id: str = None, session_db_id: int = None, on_event = None) -> Any:
        """Routes system skill execution to a stateful SubAgent."""
        from app.core.services.sub_agent import SubAgent
        from app.core.providers.factory import get_llm_provider
        
        # --- Programmatic Access Control (M3/M6) ---
        # If targeting a mesh node, we MUST ensure it's actually attached to this session in the DB.
        # This prevents AI from 'guessing' node IDs and executing on unauthorized infrastructure.
        node_id = args.get("node_id")
        node_ids = args.get("node_ids")
        
        if db and session_db_id:
            session = db.query(models.Session).filter(models.Session.id == session_db_id).first()
            if session:
                attached = session.attached_node_ids or []
                
                # Allow virtual node IDs for system maintenance
                allowed_ids = attached + ["hub", "server", "local"]
                
                # Check single node target
                if node_id and node_id not in allowed_ids:
                     logger.warning(f"[Security] AI attempted to access unattached node '{node_id}' in session {session_db_id}")
                     return {"success": False, "error": f"Node '{node_id}' is NOT attached to this session. Access denied."}
                
                # Check swarm target
                if node_ids:
                     illegal = [nid for nid in node_ids if nid not in allowed_ids]
                     if illegal:
                          logger.warning(f"[Security] AI attempted to access unattached nodes {illegal} in swarm call")
                          return {"success": False, "error": f"Nodes {illegal} are NOT attached to this session. Access denied."}

        # --- Standard Preparation ---
        llm_provider = None
        orchestrator = getattr(self._services, "orchestrator", None)
        if not orchestrator:
            return {"success": False, "error": "Orchestrator not available"}
        
        assistant = orchestrator.assistant
        
        # M3: Resolve session_id from either arguments OR the passed session_id context
        # (AI might use placeholders like 'current' which we resolve here)
        session_id_arg = args.get("session_id")
        if not session_id_arg or session_id_arg == "current":
             resolved_sid = session_id or "__fs_explorer__"
        else:
             resolved_sid = session_id_arg
             
        logger.info(f"[ToolService] Executing {skill.name} on {node_id or 'swarm'} (Resolved Session: {resolved_sid})")

        if db and user_id:
            user = db.query(models.User).filter(models.User.id == user_id).first()
            if user:
                # Use user's preferred model, or fallback to system default
                p_name = user.preferences.get("llm_provider", "gemini")
                m_name = user.preferences.get("llm_model", "")
                
                # Fetch provider-specific keys from user or system defaults
                llm_prefs = user.preferences.get("llm", {}).get("providers", {}).get(p_name, {})
                user_service = getattr(self._services, "user_service", None)
                
                if (not llm_prefs or not llm_prefs.get("api_key") or "*" in str(llm_prefs.get("api_key"))) and user_service:
                    system_prefs = user_service.get_system_settings(db)
                    system_provider_prefs = system_prefs.get("llm", {}).get("providers", {}).get(p_name, {})
                    if system_provider_prefs:
                        merged = system_provider_prefs.copy()
                        if llm_prefs: merged.update({k: v for k, v in llm_prefs.items() if v})
                        llm_prefs = merged
                
                api_key_override = llm_prefs.get("api_key")
                actual_m_name = m_name or llm_prefs.get("model", "")
                kwargs = {k: v for k, v in llm_prefs.items() if k not in ["api_key", "model"]}

                try:
                    llm_provider = get_llm_provider(p_name, model_name=actual_m_name, api_key_override=api_key_override, **kwargs)
                    logger.info(f"[ToolService] AI Sub-Agent enabled using {p_name}/{actual_m_name}")
                except Exception as e:
                    logger.warning(f"[ToolService] Could not init LLM for sub-agent: {e}")

        # Define the task function and arguments for the SubAgent
        task_fn = None
        task_args = {}

        plugin = tool_registry.get_plugin(skill.name)
        if not plugin:
             return {"success": False, "error": f"Tool implementation '{skill.name}' not found in registry"}

        context = {
             "db": db,
             "user_id": user_id,
             "session_id": resolved_sid,
             "node_id": node_id,
             "node_ids": node_ids,
             "assistant": assistant,
             "orchestrator": orchestrator,
             "services": self._services,
             "on_event": on_event
        }

        task_fn, task_args = plugin.prepare_task(args, context)
        if not task_fn:
            return task_args # error dict returned by prepare_task

        try:
            if task_fn:
                # Create and run the SubAgent (potentially AI-powered)
                sub_agent = SubAgent(
                    name=f"{skill.name}_{node_id or 'swarm'}",
                    task_fn=task_fn,
                    args=task_args,
                    retries=plugin.retries,
                    llm_provider=llm_provider,
                    assistant=assistant,
                    on_event=on_event
                )
                res = await sub_agent.run()
                
                # Standardize output for AI: 
                # If it's a string (our new Intelligence Report), pass it through directly.
                # If it's a dict, only wrap as failure if a non-None error exists.
                if isinstance(res, dict) and res.get("error"):
                    return {"success": False, "error": res["error"], "sub_agent_status": sub_agent.status}
                
                # M6: Post-processing for Binary Artifacts (Screenshots, etc.)
                if skill.name == "browser_automation_agent" and isinstance(res, dict):
                    # Organise browser data by session for better UX
                    if resolved_sid and resolved_sid != "__fs_explorer__":
                        try:
                            abs_workspace = assistant.mirror.get_workspace_path(resolved_sid)
                            # M6: Use .browser_data (ignored from node sync)
                            base_dir = os.path.join(abs_workspace, ".browser_data")
                            os.makedirs(base_dir, exist_ok=True)
                            
                            timestamp = int(time.time())
                            action = args.get("action", "unknown").lower()
                            
                            # Clean filename for the image: {timestamp}_{action}.png
                            # This allows better "next/prev" sorting in the parent gallery
                            ss_filename = f"{timestamp}_{action}.png"

                            # Save Screenshot if available
                            if "_screenshot_bytes" in res:
                                bits = res.pop("_screenshot_bytes")
                                if bits:
                                    ss_path = os.path.join(base_dir, ss_filename)
                                    with open(ss_path, "wb") as f:
                                        f.write(bits)
                                    res["screenshot_url"] = f"/.browser_data/{resolved_sid}/{ss_filename}"
                                    res["_visual_feedback"] = f"Action screenshot captured: {res['screenshot_url']}"

                            # Save Metadata/A11y into a hidden or specific sub-folder if needed, 
                            # but keep images in the root of the session for quick gallery view.
                            action_dir = os.path.join(base_dir, ".metadata", f"{timestamp}_{action}")
                            os.makedirs(action_dir, exist_ok=True)

                            # Save Metadata/Result for easy debugging in file explorer
                            import json
                            meta = {
                                "timestamp": timestamp,
                                "action": action,
                                "url": res.get("url"),
                                "title": res.get("title"),
                                "success": res.get("success"),
                                "error": res.get("error"),
                                "eval_result": res.get("eval_result")
                            }
                            with open(os.path.join(action_dir, "metadata.json"), "w") as f:
                                json.dump(meta, f, indent=2)
                            
                            # Optional: Save A11y summary for quick viewing
                            if "a11y_summary" in res:
                                with open(os.path.join(action_dir, "a11y_summary.txt"), "w") as f:
                                    f.write(res["a11y_summary"])

                            logger.info(f"[ToolService] Browser artifacts saved to: {action_dir}")
                        except Exception as sse:
                            logger.warning(f"Failed to persist browser data to workspace: {sse}")

                logger.info(f"[ToolService] System skill '{skill.name}' completed (Status: {sub_agent.status}).")
                return res

        except Exception as e:
            logger.exception(f"System skill execution failed: {e}")
            return {"success": False, "error": str(e)}

        return {"success": False, "error": "Skill execution logic not found"}

