diff --git a/agent-node/VERSION b/agent-node/VERSION new file mode 100644 index 0000000..7dea76e --- /dev/null +++ b/agent-node/VERSION @@ -0,0 +1 @@ +1.0.1 diff --git a/agent-node/agent_node/config.py b/agent-node/agent_node/config.py index de9e64f..f321761 100644 --- a/agent-node/agent_node/config.py +++ b/agent-node/agent_node/config.py @@ -16,6 +16,8 @@ "tls": True, "max_skill_workers": 10, "health_report_interval": 10, + "auto_update": True, + "update_check_interval": 300, } # 1. Load from YAML if present @@ -43,6 +45,11 @@ DEBUG_GRPC = os.getenv("DEBUG_GRPC", "false").lower() == "true" SECRET_KEY = os.getenv("AGENT_SECRET_KEY", _config.get("secret_key", "dev-secret-key-1337")) +# Auto-update settings +HUB_URL = os.getenv("AGENT_HUB_URL", _config.get("hub_url", "https://ai.jerxie.com")) +AUTO_UPDATE = os.getenv("AGENT_AUTO_UPDATE", str(_config.get("auto_update", True))).lower() == "true" +UPDATE_CHECK_INTERVAL = int(os.getenv("AGENT_UPDATE_CHECK_INTERVAL", _config.get("update_check_interval", 300))) + # These are still available but likely replaced by AUTH_TOKEN / TLS_ENABLED logic CERT_CA = os.getenv("CERT_CA", "certs/ca.crt") CERT_CLIENT_CRT = os.getenv("CERT_CLIENT_CRT", "certs/client.crt") diff --git a/agent-node/agent_node/core/updater.py b/agent-node/agent_node/core/updater.py new file mode 100644 index 0000000..7b9562b --- /dev/null +++ b/agent-node/agent_node/core/updater.py @@ -0,0 +1,134 @@ +""" +Auto-Update Trigger for Cortex Agent Node. + +Detects when the running agent is behind the hub's version and +delegates to bootstrap_installer.py to perform the update — the same +program used for Day 0 installation. + +Both bootstrap and version bump follow the exact same code path: + bootstrap_installer.py → download → extract → install deps → launch + +Channel: Stable HTTP REST only. No gRPC/proto. This contract is frozen. +""" + +import os +import sys +import time +import json +import logging +import threading +import subprocess +import urllib.request + +logger = logging.getLogger(__name__) + +_HUB_HTTP_URL: str = "" +_AUTH_TOKEN: str = "" +_CHECK_INTERVAL_SECS: int = 300 + +# bootstrap_installer.py lives at the agent-node root (two levels up from here) +_AGENT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..")) +_VERSION_FILE = os.path.join(_AGENT_ROOT, "VERSION") +_BOOTSTRAPPER = os.path.join(_AGENT_ROOT, "bootstrap_installer.py") + + +def _read_local_version() -> str: + try: + with open(_VERSION_FILE) as f: + return f.read().strip() + except FileNotFoundError: + return "0.0.0" + + +def _fetch_remote_version() -> str | None: + url = f"{_HUB_HTTP_URL}/api/v1/agent/version" + try: + req = urllib.request.Request(url, headers={"X-Agent-Token": _AUTH_TOKEN}) + with urllib.request.urlopen(req, timeout=10) as resp: + return json.loads(resp.read().decode()).get("version") + except Exception as e: + logger.warning(f"[Updater] Version check failed: {e}") + return None + + +def _version_tuple(v: str): + try: + return tuple(int(x) for x in v.split(".")) + except Exception: + return (0, 0, 0) + + +def _apply_update_via_bootstrapper(): + """ + Delegates to bootstrap_installer.py --update-only — the same code path + as Day 0 installation — then restarts this process. + Does not return on success. + """ + if not os.path.exists(_BOOTSTRAPPER): + logger.error(f"[Updater] bootstrap_installer.py not found at {_BOOTSTRAPPER}") + return False + + logger.info("[Updater] ⬇️ Delegating update to bootstrap_installer.py ...") + result = subprocess.run( + [sys.executable, _BOOTSTRAPPER, + "--hub", _HUB_HTTP_URL, + "--token", _AUTH_TOKEN, + "--update-only", + "--install-dir", _AGENT_ROOT], + cwd=_AGENT_ROOT + ) + + if result.returncode == 0: + logger.info("[Updater] ✅ Update applied. Restarting agent process...") + sys.stdout.flush() + sys.stderr.flush() + os.execv(sys.executable, [sys.executable] + sys.argv) # in-place restart, no return + else: + logger.error(f"[Updater] bootstrap_installer.py failed (exit {result.returncode}). Continuing with current version.") + return False + + +def check_and_update_once(): + """ + Single version check against the hub. If a newer version is available, + triggers bootstrap_installer.py and restarts (does not return if applied). + """ + local = _read_local_version() + logger.info(f"[Updater] Local version: {local}") + + remote = _fetch_remote_version() + if remote is None: + logger.info("[Updater] Hub unreachable — skipping update check.") + return + + logger.info(f"[Updater] Remote version: {remote}") + + if _version_tuple(remote) <= _version_tuple(local): + logger.info("[Updater] ✅ Already up to date.") + return + + logger.info(f"[Updater] 🆕 Update available: {local} → {remote}") + _apply_update_via_bootstrapper() # does not return on success + + +def start_background_updater(): + """Starts a daemon thread that periodically checks for new versions.""" + def _loop(): + while True: + time.sleep(_CHECK_INTERVAL_SECS) + try: + check_and_update_once() + except Exception as e: + logger.error(f"[Updater] Background check error: {e}") + + t = threading.Thread(target=_loop, daemon=True, name="AutoUpdater") + t.start() + logger.info(f"[Updater] Background updater started (interval: {_CHECK_INTERVAL_SECS}s)") + + +def init(hub_http_url: str, auth_token: str, check_interval_secs: int = 300): + """Initialize with hub connection details. Call before any other function.""" + global _HUB_HTTP_URL, _AUTH_TOKEN, _CHECK_INTERVAL_SECS + _HUB_HTTP_URL = hub_http_url.rstrip("/") + _AUTH_TOKEN = auth_token + _CHECK_INTERVAL_SECS = check_interval_secs diff --git a/agent-node/agent_node/main.py b/agent-node/agent_node/main.py index 6fa4eb0..02faf4d 100644 --- a/agent-node/agent_node/main.py +++ b/agent-node/agent_node/main.py @@ -6,11 +6,17 @@ import signal from agent_node.node import AgentNode -from agent_node.config import NODE_ID +from agent_node.config import NODE_ID, HUB_URL, AUTH_TOKEN, SECRET_KEY, AUTO_UPDATE, UPDATE_CHECK_INTERVAL +from agent_node.core import updater def main(): - print(f"[*] Starting Antigravity Agent Node: {NODE_ID}...") - + print(f"[*] Starting Agent Node: {NODE_ID}...") + + # 0. Auto-Update Check (before anything else — if we're behind, restart now) + if AUTO_UPDATE: + updater.init(hub_http_url=HUB_URL, auth_token=SECRET_KEY, check_interval_secs=UPDATE_CHECK_INTERVAL) + updater.check_and_update_once() # May restart process — does not return if update applied + # 1. Initialization node = AgentNode() @@ -27,8 +33,12 @@ # 3. Background: Start health reporting (Heartbeats) node.start_health_reporting() - - # 4. Foreground: Run Persistent Task Stream + + # 4. Background: Periodic auto-update checks + if AUTO_UPDATE: + updater.start_background_updater() + + # 5. Foreground: Run Persistent Task Stream node.run_task_stream() if __name__ == '__main__': diff --git a/agent-node/bootstrap_installer.py b/agent-node/bootstrap_installer.py new file mode 100644 index 0000000..de27f46 --- /dev/null +++ b/agent-node/bootstrap_installer.py @@ -0,0 +1,215 @@ +#!/usr/bin/env python3 +""" +Cortex Agent Node — Bootstrap Installer +======================================== +This is the ONLY file a new user needs to download. +It handles everything else automatically: + + 1. Connects to the Cortex Hub and downloads the full agent-node code + 2. Installs Python dependencies + 3. Launches the agent (which then handles all future self-updates) + +Usage: + python3 bootstrap_installer.py --hub https://ai.jerxie.com --token --node-id my-node + +Or with an agent_config.yaml in the same directory — see docs. +""" + +import os +import sys +import json +import shutil +import tarfile +import tempfile +import argparse +import subprocess +import urllib.request +import urllib.error + +# ── Minimal defaults — overridden by CLI args or agent_config.yaml ──────────── +DEFAULT_HUB = "https://ai.jerxie.com" +INSTALL_DIR = os.path.join(os.path.expanduser("~"), ".cortex", "agent-node") + + +def _print(msg: str): + print(f"[Cortex Bootstrap] {msg}", flush=True) + + +def _fetch_version(hub_url: str, token: str) -> str: + url = f"{hub_url.rstrip('/')}/api/v1/agent/version" + try: + req = urllib.request.Request(url, headers={"X-Agent-Token": token}) + with urllib.request.urlopen(req, timeout=15) as resp: + return json.loads(resp.read().decode()).get("version", "unknown") + except Exception as e: + _print(f"ERROR: Could not reach hub at {url}: {e}") + sys.exit(1) + + +def _download_tarball(hub_url: str, token: str, dest_path: str): + url = f"{hub_url.rstrip('/')}/api/v1/agent/download" + _print(f"Downloading agent from {url} ...") + try: + req = urllib.request.Request(url, headers={"X-Agent-Token": token}) + with urllib.request.urlopen(req, timeout=120) as resp, open(dest_path, "wb") as f: + total = 0 + while True: + chunk = resp.read(65536) + if not chunk: + break + f.write(chunk) + total += len(chunk) + print(f"\r {total // 1024} KB downloaded...", end="", flush=True) + print() + _print("Download complete.") + except Exception as e: + _print(f"ERROR: Download failed: {e}") + sys.exit(1) + + +def _install(hub_url: str, token: str, install_dir: str): + """Downloads and installs the agent-node into install_dir.""" + tmp_dir = tempfile.mkdtemp(prefix="cortex_bootstrap_") + try: + tarball = os.path.join(tmp_dir, "agent.tar.gz") + _download_tarball(hub_url, token, tarball) + + _print(f"Extracting to {install_dir} ...") + if os.path.exists(install_dir): + shutil.rmtree(install_dir) + os.makedirs(install_dir, exist_ok=True) + + with tarfile.open(tarball, "r:gz") as tar: + # Strip the top-level 'agent-node/' prefix from paths + for member in tar.getmembers(): + parts = member.name.split("/", 1) + if len(parts) > 1: + member.name = parts[1] + else: + member.name = parts[0] + if member.name: + tar.extract(member, install_dir) + + _print("Extraction complete.") + finally: + shutil.rmtree(tmp_dir, ignore_errors=True) + + +def _install_deps(install_dir: str): + req_file = os.path.join(install_dir, "requirements.txt") + if not os.path.exists(req_file): + _print("No requirements.txt found — skipping dependency install.") + return + _print("Installing Python dependencies ...") + subprocess.check_call( + [sys.executable, "-m", "pip", "install", "-r", req_file, "--quiet"], + cwd=install_dir + ) + _print("Dependencies installed.") + + +def _write_config(install_dir: str, node_id: str, hub_url: str, token: str, grpc_endpoint: str): + """Writes an agent_config.yaml into the install directory.""" + import yaml + config = { + "node_id": node_id, + "hub_url": hub_url, + "grpc_endpoint": grpc_endpoint, + "auth_token": token, + "auto_update": True, + "update_check_interval": 300, + } + config_path = os.path.join(install_dir, "agent_config.yaml") + try: + with open(config_path, "w") as f: + yaml.dump(config, f, default_flow_style=False) + _print(f"Config written to {config_path}") + except ImportError: + # yaml not yet installed — write manually + lines = [f"{k}: {v}\n" for k, v in config.items()] + with open(config_path, "w") as f: + f.writelines(lines) + _print(f"Config written (raw) to {config_path}") + + +def _launch(install_dir: str): + """Launches the agent in-place, replacing the bootstrapper process.""" + entry = os.path.join(install_dir, "agent_node", "main.py") + _print(f"Launching agent: {sys.executable} {entry}") + sys.stdout.flush() + sys.stderr.flush() + os.chdir(install_dir) + # Add install_dir to path so imports resolve + sys.path.insert(0, install_dir) + os.execv(sys.executable, [sys.executable, entry]) + + +def main(): + parser = argparse.ArgumentParser( + description="Cortex Agent Node Bootstrap Installer", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + python3 bootstrap_installer.py --hub https://ai.jerxie.com --token abc123 --node-id my-laptop + python3 bootstrap_installer.py # reads from agent_config.yaml in current dir + """ + ) + parser.add_argument("--hub", default=None, help=f"Hub URL (default: {DEFAULT_HUB})") + parser.add_argument("--token", default=None, help="Agent auth token") + parser.add_argument("--node-id", default=None, help="Unique node identifier") + parser.add_argument("--grpc", default=None, help="gRPC endpoint (default: derived from hub URL)") + parser.add_argument("--install-dir", default=INSTALL_DIR, help=f"Install path (default: {INSTALL_DIR})") + parser.add_argument("--update-only", action="store_true", help="Only pull latest code, don't re-launch") + args = parser.parse_args() + + # Try loading existing config for defaults + config_path = "agent_config.yaml" + existing_config = {} + if os.path.exists(config_path): + try: + import yaml + with open(config_path) as f: + existing_config = yaml.safe_load(f) or {} + _print(f"Loaded existing config from {config_path}") + except Exception: + pass + + hub_url = args.hub or existing_config.get("hub_url", DEFAULT_HUB) + token = args.token or existing_config.get("auth_token") or os.getenv("AGENT_AUTH_TOKEN", "") + node_id = args.node_id or existing_config.get("node_id", "cortex-node-001") + grpc = args.grpc or existing_config.get("grpc_endpoint") or hub_url.replace("https://", "").replace("http://", "") + ":50051" + install_dir = args.install_dir + + if not token: + _print("ERROR: --token is required (or set AGENT_AUTH_TOKEN env var)") + sys.exit(1) + + _print(f"Hub: {hub_url}") + remote_version = _fetch_version(hub_url, token) + _print(f"Remote agent version: {remote_version}") + + # Check if already installed and up to date + local_version_file = os.path.join(install_dir, "VERSION") + if os.path.exists(local_version_file): + with open(local_version_file) as f: + local_version = f.read().strip() + if local_version == remote_version and not args.update_only: + _print(f"Already at {local_version} — launching existing installation.") + _launch(install_dir) + return # unreachable + _print(f"Updating {local_version} → {remote_version}") + + _install(hub_url, token, install_dir) + _install_deps(install_dir) + _write_config(install_dir, node_id, hub_url, token, grpc) + + if args.update_only: + _print(f"✅ Updated to v{remote_version}. Not launching (--update-only).") + return + + _print(f"✅ Agent v{remote_version} installed at {install_dir}") + _launch(install_dir) # replaces this process + + +if __name__ == "__main__": + main() diff --git a/ai-hub/app/api/routes/agent_update.py b/ai-hub/app/api/routes/agent_update.py new file mode 100644 index 0000000..b86d4a4 --- /dev/null +++ b/ai-hub/app/api/routes/agent_update.py @@ -0,0 +1,128 @@ +""" +Agent Update Distribution Endpoint. + +Stable, frozen HTTP API — this contract NEVER changes shape. + GET /api/v1/agent/version → {"version": "x.y.z"} + GET /api/v1/agent/download → application/gzip (agent-node tarball) + +Auth: X-Agent-Token header must match the hub's secret key. +""" + +import os +import io +import tarfile +import logging +from fastapi import APIRouter, HTTPException, Request +from fastapi.responses import StreamingResponse, JSONResponse + +logger = logging.getLogger(__name__) + +# Path to the agent-node source tree on the hub container. +# In Docker: mounted at /app/agent-node-source +# Overridable via env var for flexibility in other deployments. +_AGENT_NODE_DIR = os.environ.get( + "AGENT_NODE_SRC_DIR", + os.path.join(os.path.dirname(__file__), "..", "..", "..", "agent-node-source") +) +_AGENT_NODE_DIR = os.path.abspath(_AGENT_NODE_DIR) +_VERSION_FILE = os.path.join(_AGENT_NODE_DIR, "VERSION") + +# Directories/files to exclude from the distributed tarball +_EXCLUDE_PATTERNS = { + "__pycache__", ".git", "*.pyc", "*.pyo", + "sync-node-1", "sync-node-2", # Test workspace dirs + "docker-compose.yml", # Deployment-specific, not for generic clients +} + + +def _read_version() -> str: + try: + with open(_VERSION_FILE, "r") as f: + return f.read().strip() + except FileNotFoundError: + return "0.0.0" + + +def _auth_ok(request: Request) -> bool: + """Validates the agent auth token from the request header.""" + from app.config import settings + token = request.headers.get("X-Agent-Token", "") + return token == settings.SECRET_KEY + + +def _should_exclude(path: str) -> bool: + """Returns True if the path should be excluded from the tarball.""" + parts = path.replace("\\", "/").split("/") + for part in parts: + if part in _EXCLUDE_PATTERNS: + return True + if part.endswith(".pyc") or part.endswith(".pyo"): + return True + return False + + +def _build_tarball() -> bytes: + """ + Builds an in-memory gzipped tarball of the agent-node directory. + The tarball root is 'agent-node/' so extraction is self-contained. + """ + buf = io.BytesIO() + with tarfile.open(fileobj=buf, mode="w:gz") as tar: + for root, dirs, files in os.walk(_AGENT_NODE_DIR): + # Prune excluded dirs in-place so os.walk doesn't descend into them + dirs[:] = [d for d in dirs if not _should_exclude(os.path.join(root, d))] + + for filename in files: + abs_path = os.path.join(root, filename) + rel_path = os.path.relpath(abs_path, os.path.dirname(_AGENT_NODE_DIR)) + if not _should_exclude(rel_path): + tar.add(abs_path, arcname=rel_path) + + return buf.getvalue() + + +def create_agent_update_router() -> APIRouter: + router = APIRouter(prefix="/agent", tags=["Agent Update"]) + + @router.get("/version", summary="Get current agent version") + def get_agent_version(request: Request): + """ + Returns the current agent-node version. Called by agent nodes at startup + and periodically to detect if they need to self-update. + """ + if not _auth_ok(request): + raise HTTPException(status_code=401, detail="Unauthorized") + + version = _read_version() + logger.info(f"[AgentUpdate] Version check → {version}") + return JSONResponse({"version": version}) + + @router.get("/download", summary="Download agent node tarball") + def download_agent(request: Request): + """ + Streams the current agent-node source as a gzipped tarball. + Only called when an agent detects it is behind the hub's version. + """ + if not _auth_ok(request): + raise HTTPException(status_code=401, detail="Unauthorized") + + if not os.path.isdir(_AGENT_NODE_DIR): + raise HTTPException(status_code=503, detail="Agent source not available on this hub.") + + version = _read_version() + logger.info(f"[AgentUpdate] Serving agent tarball v{version}") + + try: + tarball_bytes = _build_tarball() + except Exception as e: + logger.error(f"[AgentUpdate] Failed to build tarball: {e}") + raise HTTPException(status_code=500, detail="Failed to package agent.") + + filename = f"cortex-agent-node-{version}.tar.gz" + return StreamingResponse( + iter([tarball_bytes]), + media_type="application/gzip", + headers={"Content-Disposition": f"attachment; filename={filename}"} + ) + + return router diff --git a/ai-hub/app/api/routes/api.py b/ai-hub/app/api/routes/api.py index 7161f26..529e771 100644 --- a/ai-hub/app/api/routes/api.py +++ b/ai-hub/app/api/routes/api.py @@ -10,6 +10,7 @@ from .user import create_users_router from .nodes import create_nodes_router from .skills import create_skills_router +from .agent_update import create_agent_update_router def create_api_router(services: ServiceContainer) -> APIRouter: """ @@ -27,5 +28,6 @@ router.include_router(create_users_router(services)) router.include_router(create_nodes_router(services)) router.include_router(create_skills_router(services)) + router.include_router(create_agent_update_router()) return router \ No newline at end of file diff --git a/ai-hub/app/api/routes/sessions.py b/ai-hub/app/api/routes/sessions.py index 9595739..f931ac8 100644 --- a/ai-hub/app/api/routes/sessions.py +++ b/ai-hub/app/api/routes/sessions.py @@ -143,7 +143,29 @@ raise except Exception as e: raise HTTPException(status_code=500, detail=f"An error occurred: {e}") + + @router.post("/{session_id}/clear-history", summary="Clear Chat History (Preserve Session)") + def clear_session_history(session_id: int, db: Session = Depends(get_db)): + """ + Deletes all messages for a session but preserves the session itself + (node attachments, workspace ID, sync config, title all remain intact). + Useful in Swarm Control to start a fresh chat without losing the file sync workspace. + """ + try: + session = db.query(models.Session).filter(models.Session.id == session_id).first() + if not session: + raise HTTPException(status_code=404, detail="Session not found.") + deleted = db.query(models.Message).filter(models.Message.session_id == session_id).delete() + db.commit() + return {"message": f"Cleared {deleted} messages. Session and workspace preserved."} + except HTTPException: + raise + except Exception as e: + db.rollback() + raise HTTPException(status_code=500, detail=f"Failed to clear history: {e}") + + @router.get("/{session_id}/tokens", response_model=schemas.SessionTokenUsageResponse, summary="Get Session Token Usage") def get_session_token_usage(session_id: int, db: Session = Depends(get_db)): try: diff --git a/ai-hub/app/core/orchestration/architect.py b/ai-hub/app/core/orchestration/architect.py index f5f03dd..c125b1b 100644 --- a/ai-hub/app/core/orchestration/architect.py +++ b/ai-hub/app/core/orchestration/architect.py @@ -126,12 +126,16 @@ self._accumulate_tool_calls(delta, tool_calls_map) # End Stream & Flush buffers + # IMPORTANT: In buffered (voice) mode, do NOT yield end_stream events directly. + # They get accumulated into accumulated_content and then yielded ONCE by the + # buffer_content block below. Yielding here AND there causes the response to repeat twice. async for event in self.stream.end_stream(turn): if event["type"] == "content": accumulated_content += event["content"] if event["type"] == "reasoning": accumulated_reasoning += event["content"] - yield event + # Only forward events immediately in non-buffered (chat) mode + if not profile.buffer_content: + yield event - # E. Branch: Tools or Exit? # Heartbeat Fallback: If no content was sent but tools are being called, force a bridge sentence if tool_calls_map and not self.stream.header_sent and not profile.silent_stream: fallback_text = f"Strategy: Executing orchestrated tasks in progress..." @@ -139,7 +143,7 @@ if event["type"] == "content": accumulated_content += event["content"] yield event - # E. Branch: Tools or Exit? + # Branch: Tools or Exit? if not tool_calls_map: # Final Turn: Yield the accumulated content if it was empty if not accumulated_content.strip(): @@ -156,7 +160,7 @@ # In chat mode, just send the fallback if no content ever came through yield {"type": "content", "content": fallback} elif profile.buffer_content: - # Standard buffered yield + # Standard buffered yield — yield the full accumulated content ONCE import re content_to_yield = accumulated_content for pattern in profile.strip_headers: diff --git a/ai-hub/app/core/orchestration/memory.py b/ai-hub/app/core/orchestration/memory.py index 97d49df..d1ddf10 100644 --- a/ai-hub/app/core/orchestration/memory.py +++ b/ai-hub/app/core/orchestration/memory.py @@ -64,10 +64,13 @@ # Handle escaped session_id marker in DEFAULT_PROMPT_TEMPLATE system_prompt = system_prompt.replace("{{session_id}}", "{session_id}") - # Enforce Tool-belt awareness - system_prompt += f"\n\n## 🛠️ ACTIVE TOOL-BELT (FORBIDDEN to mention others):\n" - system_prompt += f"You have ONLY these {len(available_skills)} calibrated tools: [{skill_list_str}].\n" - system_prompt += "If a user asks about your capabilities, you MUST only list these specific tools. DO NOT promise Google Calendar, Wolfram Alpha, or any external integrations not in this list." + # Enforce Tool-belt awareness — only inject when tools actually exist + # With zero tools, appending "You have 0 tools: [NONE]" causes the AI to refuse + # all creative/conversational requests thinking it has no capabilities. + if available_skills: + system_prompt += f"\n\n## 🛠️ ACTIVE TOOL-BELT (FORBIDDEN to mention others):\n" + system_prompt += f"You have ONLY these {len(available_skills)} calibrated tools: [{skill_list_str}].\n" + system_prompt += "If a user asks about your capabilities, you MUST only list these specific tools. DO NOT promise Google Calendar, Wolfram Alpha, or any external integrations not in this list." return [ {"role": "system", "content": system_prompt}, diff --git a/ai-hub/app/core/orchestration/profiles.py b/ai-hub/app/core/orchestration/profiles.py index 92ca1dd..1a1e076 100644 --- a/ai-hub/app/core/orchestration/profiles.py +++ b/ai-hub/app/core/orchestration/profiles.py @@ -47,18 +47,13 @@ Answer:""" -VOICE_PROMPT_TEMPLATE = """You are a conversational voice assistant. -Keep your responses short, natural, and friendly. +VOICE_PROMPT_TEMPLATE = """You are Cortex, a friendly and helpful voice assistant. Respond naturally and conversationally — like talking to a knowledgeable friend. -## 🎤 Voice Interaction Rules: -- **Final Result Only**: You are a voice assistant. ONLY the final, direct answer will be vocalized. Avoid speaking during intermediate tool-calling turns. -- **Direct Responses**: Do NOT provide any "Master-Architect" analysis, bridging sentences, or internal strategy. JUST provide the final content for the user. -- **Fixed Tool-belt**: You ONLY have access to the tools literally provided to you. DO NOT hallucinate external 'Agents' or 'Tool Explorers' (e.g. `mesh_tool_explorer`). -- **Final Spoken Result**: In your final turn (after any tool calls), you MUST provide a direct, concise summary or answer for the user to hear (e.g., "I've checked the servers; all systems are green."). -- **Outside Thinking Tags**: You MUST provide your final spoken answer OUTSIDE of any `` tags. Anything inside tags is used for internal processing and will NOT be spoken to the user. -- **Conversational Tone**: Focus on rhythm and prosody. - -Current Infrastructure: {mesh_context} +## Guidelines: +- **Be helpful with anything**: Answer questions, tell stories, explain concepts, have casual conversations — there are no topic restrictions. +- **Keep it speakable**: Responses should sound natural when read aloud. Avoid markdown, bullet points, code blocks, or heavy formatting — use plain flowing sentences instead. +- **Appropriate length**: Match your response length to the request. A casual question gets a short answer. A request for a story or explanation gets a fuller response — but stay engaging, not exhaustive. +- **Warm tone**: Be friendly, direct, and personable. No corporate stiffness. Conversation History: {chat_history} @@ -110,29 +105,6 @@ name="swarm_control", template=DEFAULT_PROMPT_TEMPLATE ), - "swarm": FeatureProfile( - name="swarm_control", - template=DEFAULT_PROMPT_TEMPLATE - ), - "workflow": FeatureProfile( - name="workflow", - template=DEFAULT_PROMPT_TEMPLATE - ), - "voice": FeatureProfile( - name="voice", - template=VOICE_PROMPT_TEMPLATE, - silent_stream=True, - show_heartbeat=False, - buffer_content=True, - strip_headers=[ - r"###\s+🛰️\s+\*\*\[Turn\s+\d+\]\s+Master-Architect\s+Analysis\*\*", - r"🛰️\s+\[Turn\s+\d+\]\s+Master-Architect\s+Analysis", - r"Turn\s+\d+:\s+architecting\s+next\s+step\.\.\." - ], - default_prompt_slug="voice-pipeline", - include_mesh_context=False, - autonomous_limit=10 - ), "voice_chat": FeatureProfile( name="voice", template=VOICE_PROMPT_TEMPLATE, diff --git a/deployment/test-nodes/docker-compose.test-nodes.yml b/deployment/test-nodes/docker-compose.test-nodes.yml index 45ad0d0..58d20ee 100644 --- a/deployment/test-nodes/docker-compose.test-nodes.yml +++ b/deployment/test-nodes/docker-compose.test-nodes.yml @@ -10,6 +10,7 @@ - AGENT_NODE_ID=test-node-1 - AGENT_NODE_DESC=Primary Test Node - GRPC_ENDPOINT=ai_hub_service:50051 + - AGENT_HUB_URL=http://ai_hub_service:8000 - AGENT_SECRET_KEY=aYc2j1lYUUZXkBFFUndnleZI - AGENT_AUTH_TOKEN=cortex-secret-shared-key - AGENT_TLS_ENABLED=false @@ -29,6 +30,7 @@ - AGENT_NODE_ID=test-node-2 - AGENT_NODE_DESC=Secondary Test Node - GRPC_ENDPOINT=ai_hub_service:50051 + - AGENT_HUB_URL=http://ai_hub_service:8000 - AGENT_SECRET_KEY=aYc2j1lYUUZXkBFFUndnleZI - AGENT_AUTH_TOKEN=ysHjZIRXeWo-YYK6EWtBsIgJ4uNBihSnZMtt0BQW3eI - AGENT_TLS_ENABLED=false diff --git a/skills/browser-automation-agent/SKILL.md b/skills/browser-automation-agent/SKILL.md index 30968ee..8f18b86 100644 --- a/skills/browser-automation-agent/SKILL.md +++ b/skills/browser-automation-agent/SKILL.md @@ -7,7 +7,6 @@ is_enabled: true features: - chat -- workflow - swarm_control config: service: BrowserService diff --git a/skills/mesh-file-explorer/SKILL.md b/skills/mesh-file-explorer/SKILL.md index a6b05ca..1c7f064 100644 --- a/skills/mesh-file-explorer/SKILL.md +++ b/skills/mesh-file-explorer/SKILL.md @@ -7,7 +7,6 @@ is_enabled: true features: - chat -- workflow - swarm_control config: internal_module: app.core.grpc.core.mirror diff --git a/skills/mesh-inspect-drift/SKILL.md b/skills/mesh-inspect-drift/SKILL.md index bfd8d1f..0ee9f22 100644 --- a/skills/mesh-inspect-drift/SKILL.md +++ b/skills/mesh-inspect-drift/SKILL.md @@ -7,7 +7,6 @@ is_enabled: true features: - chat -- workflow - swarm_control config: parameters: diff --git a/skills/mesh-sync-control/SKILL.md b/skills/mesh-sync-control/SKILL.md index 5d9e2c5..7481c3e 100644 --- a/skills/mesh-sync-control/SKILL.md +++ b/skills/mesh-sync-control/SKILL.md @@ -7,7 +7,6 @@ is_enabled: true features: - chat -- workflow - swarm_control config: actions: diff --git a/skills/voice-interaction-handler/SKILL.md b/skills/voice-interaction-handler/SKILL.md deleted file mode 100644 index d06f903..0000000 --- a/skills/voice-interaction-handler/SKILL.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -name: voice_interaction_handler -emoji: "🎤" -description: Handle real-time voice interruptions, tone analysis, and speech-to-speech - feedback loops. -skill_type: local -is_enabled: true -features: -- voice -config: - interaction_mode: speech-to-speech - latency_target: 300 - parameters: - type: object - properties: - mode: - type: string - enum: - - active - - passive - description: Voice interaction mode. -is_system: true ---- - -# Voice Interaction Handler - -You are a voice-first AI. Keep your responses concise and conversational. Focus on natural prosody and handle interruptions gracefully. diff --git a/ui/client-app/src/pages/SwarmControlPage.js b/ui/client-app/src/pages/SwarmControlPage.js index e4a4999..fef05b2 100644 --- a/ui/client-app/src/pages/SwarmControlPage.js +++ b/ui/client-app/src/pages/SwarmControlPage.js @@ -5,7 +5,8 @@ import useSwarmControl from "../hooks/useSwarmControl"; import { updateSession, getSessionNodeStatus, attachNodesToSession, - detachNodeFromSession, getUserAccessibleNodes, getUserNodePreferences, nodeFsList + detachNodeFromSession, getUserAccessibleNodes, getUserNodePreferences, nodeFsList, + clearSessionHistory } from "../services/apiService"; import FileSystemNavigator from "../components/FileSystemNavigator"; @@ -68,6 +69,22 @@ } = useSwarmControl({ pageContainerRef, onNewSessionCreated }); const [showConfigModal, setShowConfigModal] = useState(false); + const [isClearingHistory, setIsClearingHistory] = useState(false); + + const handleClearHistory = async () => { + if (!sessionId) return; + if (!window.confirm("Clear all chat messages? Your nodes, workspace, and file sync will be preserved.")) return; + setIsClearingHistory(true); + try { + await clearSessionHistory(sessionId); + // Reload the page to refresh chat history from the server + window.location.reload(); + } catch (e) { + alert(`Failed to clear history: ${e.message}`); + } finally { + setIsClearingHistory(false); + } + }; const [showNodeSelector, setShowNodeSelector] = useState(false); const isEditingMeshRef = useRef(false); useEffect(() => { @@ -474,6 +491,19 @@ +
+ +
+ Clear Chat History
+ Nodes & workspace sync are preserved +
+