diff --git a/.agent/workflows/remote_db_query.md b/.agent/workflows/remote_db_query.md index 25fb37f..d4d1e9d 100644 --- a/.agent/workflows/remote_db_query.md +++ b/.agent/workflows/remote_db_query.md @@ -37,7 +37,24 @@ SELECT id, email, role FROM users; ``` +### Promote User to Admin +```sql +UPDATE users SET role = 'admin' WHERE email = 'jerxie.app@gmail.com'; +``` + +### Check Agent Mesh Status +```sql +SELECT node_id, display_name, last_status, is_active FROM agent_nodes; +``` + +### Bootstrap Test Node (Manual) +```sql +INSERT INTO agent_nodes (node_id, display_name, description, registered_by, skill_config, invite_token, last_status, is_active, created_at) +VALUES ('test-node-01', 'Test Node 01', 'Manual entry', 'ADMIN_USER_ID', '{}', 'manual-invite-token', 'offline', 1, CURRENT_TIMESTAMP); +``` + # Rules for Use - Always use `python3 -c` with `sqlite3` to ensure compatibility inside the container. - Use `sudo -S` with `echo 'a6163484a' |` to handle remote authentication properly. -- Prefer `SELECT` statements for inspection. Avoid mutating production data unless explicitly requested. +- Prefer `SELECT` statements for inspection. Use `UPDATE`/`INSERT` only for manual maintenance or recovery. +- **IMPORTANT**: If quoting becomes complex, use `\\\"` for double quotes inside the python command string. diff --git a/agent-node/agent_node/__init__.py b/agent-node/agent_node/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/agent-node/agent_node/__init__.py diff --git a/agent-node/agent_node/config.py b/agent-node/agent_node/config.py new file mode 100644 index 0000000..cdc367c --- /dev/null +++ b/agent-node/agent_node/config.py @@ -0,0 +1,49 @@ +import os +import platform +import yaml + +# Path to the generated config file in the bundled distribution +CONFIG_PATH = "agent_config.yaml" + +# Default values +_defaults = { + "node_id": "agent-node-007", + "node_description": "Modular Stateful Node", + "hub_url": "https://ai.jerxie.com", + "grpc_endpoint": "localhost:50051", + "auth_token": os.getenv("AGENT_AUTH_TOKEN", "cortex-secret-shared-key"), + "sync_root": "/tmp/cortex-sync", + "tls": True, + "max_skill_workers": 5, + "health_report_interval": 10, +} + +# 1. Load from YAML if present +_config = _defaults.copy() +if os.path.exists(CONFIG_PATH): + try: + with open(CONFIG_PATH, 'r') as f: + yaml_config = yaml.safe_load(f) or {} + _config.update(yaml_config) + print(f"[*] Loaded node configuration from {CONFIG_PATH}") + except Exception as e: + print(f"[!] Error loading {CONFIG_PATH}: {e}") + +# 2. Override with Environment Variables (12-Factor style) +NODE_ID = os.getenv("AGENT_NODE_ID", _config["node_id"]) +NODE_DESC = os.getenv("AGENT_NODE_DESC", _config["node_description"]) +SERVER_HOST_PORT = os.getenv("GRPC_ENDPOINT", _config["grpc_endpoint"]) # e.g. "ai.jerxie.com:50051" +AUTH_TOKEN = os.getenv("AGENT_AUTH_TOKEN", _config["auth_token"]) +SYNC_DIR = os.getenv("CORTEX_SYNC_DIR", _config["sync_root"]) +TLS_ENABLED = os.getenv("AGENT_TLS_ENABLED", str(_config["tls"])).lower() == 'true' + +HEALTH_REPORT_INTERVAL = int(os.getenv("HEALTH_REPORT_INTERVAL", _config["health_report_interval"])) +MAX_SKILL_WORKERS = int(os.getenv("MAX_SKILL_WORKERS", _config["max_skill_workers"])) + +SECRET_KEY = os.getenv("AGENT_SECRET_KEY", _config.get("secret_key", "dev-secret-key-1337")) + +# These are still available but likely replaced by AUTH_TOKEN / TLS_ENABLED logic +CERT_CA = os.getenv("CERT_CA", "certs/ca.crt") +CERT_CLIENT_CRT = os.getenv("CERT_CLIENT_CRT", "certs/client.crt") +CERT_CLIENT_KEY = os.getenv("CERT_CLIENT_KEY", "certs/client.key") + diff --git a/agent-node/agent_node/core/__init__.py b/agent-node/agent_node/core/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/agent-node/agent_node/core/__init__.py diff --git a/agent-node/agent_node/core/sandbox.py b/agent-node/agent_node/core/sandbox.py new file mode 100644 index 0000000..9f9390c --- /dev/null +++ b/agent-node/agent_node/core/sandbox.py @@ -0,0 +1,31 @@ +from protos import agent_pb2 + +class SandboxEngine: + """Core Security Engine for Local Command Verification.""" + def __init__(self): + self.policy = None + + def sync(self, p): + """Syncs the latest policy from the Orchestrator.""" + self.policy = { + "MODE": "STRICT" if p.mode == agent_pb2.SandboxPolicy.STRICT else "PERMISSIVE", + "ALLOWED": list(p.allowed_commands), + "DENIED": list(p.denied_commands), + "SENSITIVE": list(p.sensitive_commands) + } + + def verify(self, command_str): + """Verifies if a command string is allowed under the current policy.""" + if not self.policy: return False, "No Policy" + + parts = (command_str or "").strip().split() + if not parts: return False, "Empty" + + base_cmd = parts[0] + if base_cmd in self.policy["DENIED"]: + return False, f"Forbidden command: {base_cmd}" + + if self.policy["MODE"] == "STRICT" and base_cmd not in self.policy["ALLOWED"]: + return False, f"Command '{base_cmd}' not whitelisted" + + return True, "OK" diff --git a/agent-node/agent_node/core/sync.py b/agent-node/agent_node/core/sync.py new file mode 100644 index 0000000..887c284 --- /dev/null +++ b/agent-node/agent_node/core/sync.py @@ -0,0 +1,69 @@ +import os +import hashlib +from agent_node.config import SYNC_DIR +from protos import agent_pb2 + +class NodeSyncManager: + """Handles local filesystem synchronization on the Agent Node.""" + def __init__(self, base_sync_dir=SYNC_DIR): + self.base_sync_dir = base_sync_dir + if not os.path.exists(self.base_sync_dir): + os.makedirs(self.base_sync_dir, exist_ok=True) + + def get_session_dir(self, session_id: str) -> str: + """Returns the unique identifier directory for this session's sync.""" + path = os.path.join(self.base_sync_dir, session_id) + os.makedirs(path, exist_ok=True) + return path + + def handle_manifest(self, session_id: str, manifest: agent_pb2.DirectoryManifest) -> list: + """Compares local files with the server manifest and returns paths needing update.""" + session_dir = self.get_session_dir(session_id) + print(f"[📁] Reconciling Sync Directory: {session_dir}") + + needs_update = [] + for file_info in manifest.files: + target_path = os.path.join(session_dir, file_info.path) + + if file_info.is_dir: + os.makedirs(target_path, exist_ok=True) + continue + + # File Check + if not os.path.exists(target_path): + needs_update.append(file_info.path) + else: + # Hash comparison + with open(target_path, "rb") as f: + actual_hash = hashlib.sha256(f.read()).hexdigest() + if actual_hash != file_info.hash: + print(f" [⚠️] Drift Detected: {file_info.path} (Local: {actual_hash[:8]} vs Remote: {file_info.hash[:8]})") + needs_update.append(file_info.path) + + return needs_update + + def write_chunk(self, session_id: str, payload: agent_pb2.FilePayload) -> bool: + """Writes a file chunk to the local session directory.""" + session_dir = self.get_session_dir(session_id) + target_path = os.path.normpath(os.path.join(session_dir, payload.path)) + + if not target_path.startswith(session_dir): + return False # Path traversal guard + + os.makedirs(os.path.dirname(target_path), exist_ok=True) + + mode = "ab" if payload.chunk_index > 0 else "wb" + with open(target_path, mode) as f: + f.write(payload.chunk) + + if payload.is_final and payload.hash: + return self._verify(target_path, payload.hash) + return True + + def _verify(self, path, expected_hash): + with open(path, "rb") as f: + actual = hashlib.sha256(f.read()).hexdigest() + if actual != expected_hash: + print(f"[⚠️] Sync Hash Mismatch for {path}") + return False + return True diff --git a/agent-node/agent_node/core/watcher.py b/agent-node/agent_node/core/watcher.py new file mode 100644 index 0000000..285ba53 --- /dev/null +++ b/agent-node/agent_node/core/watcher.py @@ -0,0 +1,110 @@ + +import time +import os +import hashlib +from watchdog.observers import Observer +from watchdog.events import FileSystemEventHandler +from shared_core.ignore import CortexIgnore +from protos import agent_pb2 + +class SyncEventHandler(FileSystemEventHandler): + """Listens for FS events and triggers gRPC delta pushes.""" + def __init__(self, session_id, root_path, callback): + self.session_id = session_id + self.root_path = root_path + self.callback = callback + self.ignore_filter = CortexIgnore(root_path) + self.last_sync = {} # path -> last_hash + self.locked = False + + def on_modified(self, event): + if not event.is_directory: + self._process_change(event.src_path) + + def on_created(self, event): + if not event.is_directory: + self._process_change(event.src_path) + + def on_moved(self, event): + # Simplification: treat move as a delete and create, or just process the dest + self._process_change(event.dest_path) + + def _process_change(self, abs_path): + if self.locked: + return # Block all user edits when session is locked + + rel_path = os.path.normpath(os.path.relpath(abs_path, self.root_path)) + + # Phase 3: Dynamic reload if .cortexignore / .gitignore changed + if rel_path in [".cortexignore", ".gitignore"]: + print(f" [*] Reloading Ignore Filter for {self.session_id}") + self.ignore_filter = CortexIgnore(self.root_path) + + if self.ignore_filter.is_ignored(rel_path): + return + + try: + with open(abs_path, "rb") as f: + content = f.read() + file_hash = hashlib.sha256(content).hexdigest() + + if self.last_sync.get(rel_path) == file_hash: + return # No actual change + + self.last_sync[rel_path] = file_hash + print(f" [📁📤] Detected Change: {rel_path}") + + # Chunk and Send + chunk_size = 64 * 1024 + for i in range(0, len(content), chunk_size): + chunk = content[i:i + chunk_size] + is_final = i + chunk_size >= len(content) + payload = agent_pb2.FilePayload( + path=rel_path, + chunk=chunk, + chunk_index=i // chunk_size, + is_final=is_final, + hash=file_hash if is_final else "" + ) + self.callback(self.session_id, payload) + except Exception as e: + print(f" [!] Watcher Error for {rel_path}: {e}") + +class WorkspaceWatcher: + """Manages FS observers for active synchronization.""" + def __init__(self, callback): + self.callback = callback + self.observers = {} # session_id -> (observer, handler) + + def set_lock(self, session_id, locked=True): + if session_id in self.observers: + print(f"[*] Workspace LOCK for {session_id}: {locked}") + self.observers[session_id][1].locked = locked + + def start_watching(self, session_id, root_path): + if session_id in self.observers: + self.stop_watching(session_id) + + print(f"[*] Starting Watcher for Session {session_id} at {root_path}") + handler = SyncEventHandler(session_id, root_path, self.callback) + observer = Observer() + observer.schedule(handler, root_path, recursive=True) + observer.start() + self.observers[session_id] = (observer, handler) + + def stop_watching(self, session_id): + if session_id in self.observers: + print(f"[*] Stopping Watcher for Session {session_id}") + obs, _ = self.observers[session_id] + obs.stop() + obs.join() + del self.observers[session_id] + + def get_watch_path(self, session_id): + if session_id in self.observers: + return self.observers[session_id][1].root_path + return None + + def shutdown(self): + for sid in list(self.observers.keys()): + self.stop_watching(sid) diff --git a/agent-node/agent_node/main.py b/agent-node/agent_node/main.py new file mode 100644 index 0000000..6fa4eb0 --- /dev/null +++ b/agent-node/agent_node/main.py @@ -0,0 +1,35 @@ +import sys +import os + +# Add root to path to find protos and other packages +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) + +import signal +from agent_node.node import AgentNode +from agent_node.config import NODE_ID + +def main(): + print(f"[*] Starting Antigravity Agent Node: {NODE_ID}...") + + # 1. Initialization + node = AgentNode() + + # 2. Signal Handling for Graceful Shutdown + def handle_exit(sig, frame): + node.stop() + sys.exit(0) + + signal.signal(signal.SIGINT, handle_exit) + signal.signal(signal.SIGTERM, handle_exit) + + # Handshake: Sync configuration and Sandbox Policy + node.sync_configuration() + + # 3. Background: Start health reporting (Heartbeats) + node.start_health_reporting() + + # 4. Foreground: Run Persistent Task Stream + node.run_task_stream() + +if __name__ == '__main__': + main() diff --git a/agent-node/agent_node/node.py b/agent-node/agent_node/node.py new file mode 100644 index 0000000..a815240 --- /dev/null +++ b/agent-node/agent_node/node.py @@ -0,0 +1,305 @@ +import threading +import queue +import time +import sys +import os +import hashlib +import psutil +from protos import agent_pb2, agent_pb2_grpc +from agent_node.skills.manager import SkillManager +from agent_node.core.sandbox import SandboxEngine +from agent_node.core.sync import NodeSyncManager +from agent_node.core.watcher import WorkspaceWatcher +from agent_node.utils.auth import verify_task_signature +from agent_node.utils.network import get_secure_stub +from agent_node.config import NODE_ID, NODE_DESC, AUTH_TOKEN, HEALTH_REPORT_INTERVAL, MAX_SKILL_WORKERS + + +class AgentNode: + """The 'Agent Core': Orchestrates Local Skills and Maintains gRPC Connection.""" + def __init__(self, node_id=NODE_ID): + self.node_id = node_id + self.sandbox = SandboxEngine() + self.sync_mgr = NodeSyncManager() + self.skills = SkillManager(max_workers=MAX_SKILL_WORKERS, sync_mgr=self.sync_mgr) + self.watcher = WorkspaceWatcher(self._on_sync_delta) + self.task_queue = queue.Queue() + self.stub = get_secure_stub() + + def sync_configuration(self): + """Initial handshake to retrieve policy and metadata.""" + print(f"[*] Handshake with Orchestrator: {self.node_id}") + reg_req = agent_pb2.RegistrationRequest( + node_id=self.node_id, + auth_token=AUTH_TOKEN, + node_description=NODE_DESC, + capabilities={"shell": "v1", "browser": "playwright-sync-bridge"} + ) + + + try: + res = self.stub.SyncConfiguration(reg_req) + if res.success: + self.sandbox.sync(res.policy) + print("[OK] Sandbox Policy Synced.") + else: + print(f"[!] Rejection: {res.error_message}") + sys.exit(1) + except Exception as e: + print(f"[!] Connection Fail: {e}") + sys.exit(1) + + def start_health_reporting(self): + """Streaming node metrics to the orchestrator for load balancing.""" + def _gen(): + while True: + ids = self.skills.get_active_ids() + cpu = psutil.cpu_percent(interval=None) + mem = psutil.virtual_memory().percent + yield agent_pb2.Heartbeat( + node_id=self.node_id, + cpu_usage_percent=cpu, + memory_usage_percent=mem, + active_worker_count=len(ids), + max_worker_capacity=MAX_SKILL_WORKERS, + running_task_ids=ids + ) + time.sleep(HEALTH_REPORT_INTERVAL) + + # Non-blocking thread for health heartbeat + threading.Thread( + target=lambda: list(self.stub.ReportHealth(_gen())), + daemon=True, name=f"Health-{self.node_id}" + ).start() + + def run_task_stream(self): + """Main Persistent Bi-directional Stream for Task Management.""" + def _gen(): + # Initial announcement for routing identity + yield agent_pb2.ClientTaskMessage( + announce=agent_pb2.NodeAnnounce(node_id=self.node_id) + ) + while True: + yield self.task_queue.get() + + responses = self.stub.TaskStream(_gen()) + print(f"[*] Task Stream Online: {self.node_id}", flush=True) + + try: + for msg in responses: + kind = msg.WhichOneof('payload') + print(f" [📥] Received from Stream: {kind}", flush=True) + self._process_server_message(msg) + except Exception as e: + print(f"[!] Task Stream Failure: {e}", flush=True) + + def _process_server_message(self, msg): + kind = msg.WhichOneof('payload') + print(f"[*] Inbound: {kind}", flush=True) + + if kind == 'task_request': + self._handle_task(msg.task_request) + + elif kind == 'task_cancel': + if self.skills.cancel(msg.task_cancel.task_id): + self._send_response(msg.task_cancel.task_id, None, agent_pb2.TaskResponse.CANCELLED) + + elif kind == 'work_pool_update': + # Claim logical idle tasks from global pool with slight randomized jitter + # to prevent thundering herd where every node claims the same task at the exact same ms. + if len(self.skills.get_active_ids()) < MAX_SKILL_WORKERS: + for tid in msg.work_pool_update.available_task_ids: + # Deterministic delay based on node_id to distribute claims + import random + time.sleep(random.uniform(0.1, 0.5)) + + self.task_queue.put(agent_pb2.ClientTaskMessage( + task_claim=agent_pb2.TaskClaimRequest(task_id=tid, node_id=self.node_id) + )) + + elif kind == 'claim_status': + status = "GRANTED" if msg.claim_status.granted else "DENIED" + print(f" [📦] Claim {msg.claim_status.task_id}: {status} ({msg.claim_status.reason})", flush=True) + + elif kind == 'file_sync': + self._handle_file_sync(msg.file_sync) + + def _on_sync_delta(self, session_id, file_payload): + """Callback from watcher to push local changes to server.""" + self.task_queue.put(agent_pb2.ClientTaskMessage( + file_sync=agent_pb2.FileSyncMessage( + session_id=session_id, + file_data=file_payload + ) + )) + + def _handle_file_sync(self, fs): + """Processes inbound file synchronization messages from the Orchestrator.""" + sid = fs.session_id + if fs.HasField("manifest"): + needs_update = self.sync_mgr.handle_manifest(sid, fs.manifest) + if needs_update: + print(f" [📁⚠️] Drift Detected for {sid}: {len(needs_update)} files need sync") + self.task_queue.put(agent_pb2.ClientTaskMessage( + file_sync=agent_pb2.FileSyncMessage( + session_id=sid, + status=agent_pb2.SyncStatus( + code=agent_pb2.SyncStatus.RECONCILE_REQUIRED, + message=f"Drift detected in {len(needs_update)} files", + reconcile_paths=needs_update + ) + ) + )) + else: + self.task_queue.put(agent_pb2.ClientTaskMessage( + file_sync=agent_pb2.FileSyncMessage( + session_id=sid, + status=agent_pb2.SyncStatus(code=agent_pb2.SyncStatus.OK, message="Synchronized") + ) + )) + elif fs.HasField("file_data"): + success = self.sync_mgr.write_chunk(sid, fs.file_data) + if fs.file_data.is_final: + print(f" [📁] File Received: {fs.file_data.path} (Verified: {success})") + status = agent_pb2.SyncStatus.OK if success else agent_pb2.SyncStatus.ERROR + self.task_queue.put(agent_pb2.ClientTaskMessage( + file_sync=agent_pb2.FileSyncMessage( + session_id=sid, + status=agent_pb2.SyncStatus(code=status, message=f"File {fs.file_data.path} synced") + ) + )) + elif fs.HasField("control"): + ctrl = fs.control + if ctrl.action == agent_pb2.SyncControl.START_WATCHING: + # Path relative to sync dir or absolute + watch_path = ctrl.path if os.path.isabs(ctrl.path) else os.path.join(self.sync_mgr.get_session_dir(sid), ctrl.path) + self.watcher.start_watching(sid, watch_path) + elif ctrl.action == agent_pb2.SyncControl.STOP_WATCHING: + self.watcher.stop_watching(sid) + elif ctrl.action == agent_pb2.SyncControl.LOCK: + self.watcher.set_lock(sid, True) + elif ctrl.action == agent_pb2.SyncControl.UNLOCK: + self.watcher.set_lock(sid, False) + elif ctrl.action == agent_pb2.SyncControl.REFRESH_MANIFEST: + if ctrl.request_paths: + print(f" [📁📤] Pushing {len(ctrl.request_paths)} Requested Files for {sid}") + for path in ctrl.request_paths: + self._push_file(sid, path) + else: + # Node -> Server Manifest Push + self._push_full_manifest(sid, ctrl.path) + elif ctrl.action == agent_pb2.SyncControl.RESYNC: + # Server -> Node asks for a check, but Node only has its own manifest? + # Actually RESYNC usually comes with a manifest or implies "send me yours so I can check" + # Here we'll treat RESYNC as "Send me your manifest" + self._push_full_manifest(sid, ctrl.path) + + def _push_full_manifest(self, session_id, rel_path="."): + """Pushes the current local manifest back to the server.""" + print(f" [📁📤] Pushing Full Manifest for {session_id}") + watch_path = rel_path if os.path.isabs(rel_path) else os.path.join(self.sync_mgr.get_session_dir(session_id), rel_path) + + # We need a manifest generator similar to GhostMirrorManager but on the node + # For Phase 3, we'll implement a simple one here + files = [] + for root, dirs, filenames in os.walk(watch_path): + for filename in filenames: + abs_path = os.path.join(root, filename) + r_path = os.path.relpath(abs_path, watch_path) + with open(abs_path, "rb") as f: + h = hashlib.sha256(f.read()).hexdigest() + files.append(agent_pb2.FileInfo(path=r_path, size=os.path.getsize(abs_path), hash=h)) + + self.task_queue.put(agent_pb2.ClientTaskMessage( + file_sync=agent_pb2.FileSyncMessage( + session_id=session_id, + manifest=agent_pb2.DirectoryManifest(root_path=rel_path, files=files) + ) + )) + + def _push_file(self, session_id, rel_path): + """Pushes a specific file from node to server.""" + watch_path = self.watcher.get_watch_path(session_id) + if not watch_path: + # Fallback to sync dir if watcher not started + watch_path = self.sync_mgr.get_session_dir(session_id) + + abs_path = os.path.join(watch_path, rel_path) + if not os.path.exists(abs_path): + print(f" [📁❓] Requested file {rel_path} not found on node") + return + + with open(abs_path, "rb") as f: + full_data = f.read() + full_hash = hashlib.sha256(full_data).hexdigest() + f.seek(0) + + index = 0 + while True: + chunk = f.read(1024 * 1024) # 1MB chunks + is_final = len(chunk) < 1024 * 1024 + + self.task_queue.put(agent_pb2.ClientTaskMessage( + file_sync=agent_pb2.FileSyncMessage( + session_id=session_id, + file_data=agent_pb2.FilePayload( + path=rel_path, + chunk=chunk, + chunk_index=index, + is_final=is_final, + hash=full_hash if is_final else "" + ) + ) + )) + + if is_final or not chunk: + break + index += 1 + + def _handle_task(self, task): + print(f"[*] Task Launch: {task.task_id}", flush=True) + # 1. Cryptographic Signature Verification + if not verify_task_signature(task): + print(f"[!] Signature Validation Failed for {task.task_id}", flush=True) + return + + print(f"[✅] Validated task {task.task_id}", flush=True) + + # 2. Skill Manager Submission + success, reason = self.skills.submit(task, self.sandbox, self._on_finish, self._on_event) + if not success: + print(f"[!] Execution Rejected: {reason}", flush=True) + + def _on_event(self, event): + """Live Event Tunneler: Routes browser/skill events into the main stream.""" + self.task_queue.put(agent_pb2.ClientTaskMessage(browser_event=event)) + + def _on_finish(self, tid, res, trace): + """Final Completion Callback: Routes task results back to server.""" + print(f"[*] Completion: {tid}", flush=True) + status = agent_pb2.TaskResponse.SUCCESS if res['status'] == 1 else agent_pb2.TaskResponse.ERROR + + tr = agent_pb2.TaskResponse( + task_id=tid, status=status, + stdout=res.get('stdout',''), + stderr=res.get('stderr',''), + trace_id=trace, + browser_result=res.get("browser_result") + ) + self._send_response(tid, tr) + + def _send_response(self, tid, tr=None, status=None): + """Utility for placing response messages into the gRPC outbound queue.""" + if tr: + self.task_queue.put(agent_pb2.ClientTaskMessage(task_response=tr)) + else: + self.task_queue.put(agent_pb2.ClientTaskMessage( + task_response=agent_pb2.TaskResponse(task_id=tid, status=status) + )) + + def stop(self): + """Gracefully stops all background services and skills.""" + print(f"\n[🛑] Stopping Agent Node: {self.node_id}") + self.skills.shutdown() + # Optionally close gRPC channel if we want to be very clean + # self.channel.close() diff --git a/agent-node/agent_node/skills/__init__.py b/agent-node/agent_node/skills/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/agent-node/agent_node/skills/__init__.py diff --git a/agent-node/agent_node/skills/base.py b/agent-node/agent_node/skills/base.py new file mode 100644 index 0000000..33c88ec --- /dev/null +++ b/agent-node/agent_node/skills/base.py @@ -0,0 +1,13 @@ +class BaseSkill: + """Abstract interface for all Node capabilities (Shell, Browser, etc.).""" + def execute(self, task, sandbox, on_complete, on_event=None): + """Processes the given task and notifies results via callbacks.""" + raise NotImplementedError + + def cancel(self, task_id: str) -> bool: + """Attempts to cancel the task and returns success status.""" + return False + + def shutdown(self): + """Cleanup resources on node exit.""" + pass diff --git a/agent-node/agent_node/skills/browser.py b/agent-node/agent_node/skills/browser.py new file mode 100644 index 0000000..3205b7d --- /dev/null +++ b/agent-node/agent_node/skills/browser.py @@ -0,0 +1,148 @@ +import threading +import queue +import time +import json +from playwright.sync_api import sync_playwright +from agent_node.skills.base import BaseSkill +from protos import agent_pb2 + +class BrowserSkill(BaseSkill): + """The 'Antigravity Bridge': Persistent Browser Skill using a dedicated Actor thread.""" + def __init__(self, sync_mgr=None): + self.task_queue = queue.Queue() + self.sessions = {} # session_id -> { "context": Context, "page": Page } + self.sync_mgr = sync_mgr + self.lock = threading.Lock() + threading.Thread(target=self._browser_actor, daemon=True, name="BrowserActor").start() + + def _setup_listeners(self, sid, page, on_event): + """Tunnels browser internal events back to the Orchestrator.""" + if not on_event: return + + # Live Console Redirector + page.on("console", lambda msg: on_event(agent_pb2.BrowserEvent( + session_id=sid, console_msg=agent_pb2.ConsoleMessage( + level=msg.type, text=msg.text, timestamp_ms=int(time.time()*1000) + ) + ))) + + # Live Network Redirector + page.on("requestfinished", lambda req: on_event(agent_pb2.BrowserEvent( + session_id=sid, network_req=agent_pb2.NetworkRequest( + method=req.method, url=req.url, status=req.response().status if req.response() else 0, + resource_type=req.resource_type, latency_ms=0 + ) + ))) + + # Live Download Redirector + page.on("download", lambda download: self._handle_download(sid, download)) + + def _handle_download(self, sid, download): + """Saves browser downloads directly into the synchronized session workspace.""" + with self.lock: + sess = self.sessions.get(sid) + if sess and sess.get("download_dir"): + target = os.path.join(sess["download_dir"], download.suggested_filename) + print(f" [🌐📥] Browser Download Sync: {download.suggested_filename} -> {target}") + download.save_as(target) + + def _browser_actor(self): + """Serializes all Playwright operations on a single dedicated thread.""" + print("[🌐] Browser Actor Starting...", flush=True) + pw = None + browser = None + try: + pw = sync_playwright().start() + # 12-Factor/Container Optimization: Standard non-sandbox arguments + browser = pw.chromium.launch(headless=True, args=[ + '--no-sandbox', '--disable-setuid-sandbox', '--disable-dev-shm-usage', '--disable-gpu' + ]) + print("[🌐] Browser Engine Online.", flush=True) + except Exception as e: + print(f"[!] Browser Actor Startup Fail: {e}", flush=True) + if pw: pw.stop() + return + + while True: + try: + item = self.task_queue.get() + if item is None: # Sentinel for shutdown + print("[🌐] Browser Actor Shutting Down...", flush=True) + break + + task, sandbox, on_complete, on_event = item + action = task.browser_action + sid = action.session_id or "default" + + with self.lock: + if sid not in self.sessions: + # Phase 4: Mount workspace for downloads/uploads + download_dir = None + if self.sync_mgr and task.session_id: + download_dir = self.sync_mgr.get_session_dir(task.session_id) + print(f" [🌐📁] Mapping Browser Context to: {download_dir}") + + ctx = browser.new_context(accept_downloads=True) + pg = ctx.new_page() + self._setup_listeners(sid, pg, on_event) + self.sessions[sid] = {"context": ctx, "page": pg, "download_dir": download_dir} + + page = self.sessions[sid]["page"] + print(f" [🌐] Browser Actor Processing: {agent_pb2.BrowserAction.ActionType.Name(action.action)} | Session: {sid}", flush=True) + + res_data = {} + # State-Machine Logic for Actions + if action.action == agent_pb2.BrowserAction.NAVIGATE: + page.goto(action.url, wait_until="commit") + elif action.action == agent_pb2.BrowserAction.CLICK: + page.click(action.selector) + elif action.action == agent_pb2.BrowserAction.TYPE: + page.fill(action.selector, action.text) + elif action.action == agent_pb2.BrowserAction.SCREENSHOT: + res_data["snapshot"] = page.screenshot() + elif action.action == agent_pb2.BrowserAction.GET_DOM: + res_data["dom_content"] = page.content() + elif action.action == agent_pb2.BrowserAction.HOVER: + page.hover(action.selector) + elif action.action == agent_pb2.BrowserAction.SCROLL: + page.mouse.wheel(x=0, y=action.y) + elif action.action == agent_pb2.BrowserAction.EVAL: + res_data["eval_result"] = str(page.evaluate(action.text)) + elif action.action == agent_pb2.BrowserAction.GET_A11Y: + res_data["a11y_tree"] = json.dumps(page.accessibility.snapshot()) + elif action.action == agent_pb2.BrowserAction.CLOSE: + with self.lock: + sess = self.sessions.pop(sid, None) + if sess: sess["context"].close() + + # Results Construction + br_res = agent_pb2.BrowserResponse( + url=page.url, title=page.title(), + snapshot=res_data.get("snapshot", b""), + dom_content=res_data.get("dom_content", ""), + a11y_tree=res_data.get("a11y_tree", ""), + eval_result=res_data.get("eval_result", "") + ) + on_complete(task.task_id, {"status": 1, "browser_result": br_res}, task.trace_id) + except Exception as e: + print(f" [!] Browser Actor Error: {e}", flush=True) + on_complete(task.task_id, {"stderr": str(e), "status": 2}, task.trace_id) + + # Cleanup on loop exit + print("[🌐] Cleaning up Browser Engine...", flush=True) + with self.lock: + for s in self.sessions.values(): + try: s["context"].close() + except: pass + self.sessions.clear() + if browser: browser.close() + if pw: pw.stop() + + def execute(self, task, sandbox, on_complete, on_event=None): + self.task_queue.put((task, sandbox, on_complete, on_event)) + + def cancel(self, task_id): return False + + def shutdown(self): + """Triggers graceful shutdown of the browser engine.""" + self.task_queue.put(None) diff --git a/agent-node/agent_node/skills/manager.py b/agent-node/agent_node/skills/manager.py new file mode 100644 index 0000000..f5a85b8 --- /dev/null +++ b/agent-node/agent_node/skills/manager.py @@ -0,0 +1,64 @@ +import threading +from concurrent import futures +from agent_node.skills.shell import ShellSkill +from agent_node.skills.browser import BrowserSkill +from agent_node.config import MAX_SKILL_WORKERS + +class SkillManager: + """Orchestrates multiple modular skills and manages the task worker pool.""" + def __init__(self, max_workers=MAX_SKILL_WORKERS, sync_mgr=None): + self.executor = futures.ThreadPoolExecutor(max_workers=max_workers, thread_name_prefix="skill-worker") + self.active_tasks = {} # task_id -> future + self.sync_mgr = sync_mgr + self.skills = { + "shell": ShellSkill(sync_mgr=sync_mgr), + "browser": BrowserSkill(sync_mgr=sync_mgr) + } + self.max_workers = max_workers + self.lock = threading.Lock() + + def submit(self, task, sandbox, on_complete, on_event=None): + """Routes a task to the appropriate skill and submits it to the thread pool.""" + with self.lock: + if len(self.active_tasks) >= self.max_workers: + return False, "Node Capacity Reached" + + # 1. Routing Engine + if task.HasField("browser_action"): + skill = self.skills["browser"] + else: + skill = self.skills["shell"] + + # 2. Execution submission + future = self.executor.submit(skill.execute, task, sandbox, on_complete, on_event) + self.active_tasks[task.task_id] = future + + # Cleanup hook + future.add_done_callback(lambda f: self._cleanup(task.task_id)) + return True, "Accepted" + + def cancel(self, task_id): + """Attempts to cancel an active task through all registered skills.""" + with self.lock: + cancelled = any(s.cancel(task_id) for s in self.skills.values()) + return cancelled + + def get_active_ids(self): + """Returns the list of currently running task IDs.""" + with self.lock: + return list(self.active_tasks.keys()) + + def _cleanup(self, task_id): + """Internal callback to release capacity when a task finishes.""" + with self.lock: + self.active_tasks.pop(task_id, None) + + def shutdown(self): + """Triggers shutdown for all skills and the worker pool.""" + print("[🔧] Shutting down Skill Manager...") + with self.lock: + for name, skill in self.skills.items(): + print(f" [🔧] Shutting down skill: {name}") + skill.shutdown() + # Shutdown thread pool + self.executor.shutdown(wait=True) diff --git a/agent-node/agent_node/skills/shell.py b/agent-node/agent_node/skills/shell.py new file mode 100644 index 0000000..9d17464 --- /dev/null +++ b/agent-node/agent_node/skills/shell.py @@ -0,0 +1,72 @@ +import subprocess +import threading +from .base import BaseSkill + +class ShellSkill(BaseSkill): + """Default Skill: Executing shell commands with sandbox safety.""" + def __init__(self, sync_mgr=None): + self.processes = {} # task_id -> Popen + self.sync_mgr = sync_mgr + self.lock = threading.Lock() + + def execute(self, task, sandbox, on_complete, on_event=None): + """Processes shell-based commands for the Node.""" + try: + cmd = task.payload_json + + # 1. Verification Logic + allowed, status_msg = sandbox.verify(cmd) + if not allowed: + err_msg = f"SANDBOX_VIOLATION: {status_msg}" + return on_complete(task.task_id, {"stderr": err_msg, "status": 2}, task.trace_id) + + # 2. Sequential Execution + print(f" [🐚] Executing Shell: {cmd}", flush=True) + + # Resolve CWD for the skill based on session_id + cwd = None + if self.sync_mgr and task.session_id: + cwd = self.sync_mgr.get_session_dir(task.session_id) + print(f" [📁] Setting CWD to {cwd}") + + p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, cwd=cwd) + + with self.lock: + self.processes[task.task_id] = p + + # 3. Timeout Handling + timeout = task.timeout_ms / 1000.0 if task.timeout_ms > 0 else None + stdout, stderr = p.communicate(timeout=timeout) + + print(f" [🐚] Shell Done: {cmd} | Stdout Size: {len(stdout)}", flush=True) + on_complete(task.task_id, { + "stdout": stdout, "stderr": stderr, + "status": 1 if p.returncode == 0 else 2 + }, task.trace_id) + + except subprocess.TimeoutExpired: + self.cancel(task.task_id) + on_complete(task.task_id, {"stderr": "TASK_TIMEOUT", "status": 2}, task.trace_id) + except Exception as e: + on_complete(task.task_id, {"stderr": str(e), "status": 2}, task.trace_id) + finally: + with self.lock: + self.processes.pop(task.task_id, None) + + def cancel(self, task_id: str): + """Standard process termination for shell tasks.""" + with self.lock: + p = self.processes.get(task_id) + if p: + print(f"[🛑] Killing Shell Task: {task_id}") + p.kill() + return True + return False + def shutdown(self): + """Standard cleanup: Terminates all active shell processes.""" + with self.lock: + for tid, p in list(self.processes.items()): + print(f"[🛑] Killing Orphan Shell Task: {tid}") + try: p.kill() + except: pass + self.processes.clear() diff --git a/agent-node/agent_node/utils/__init__.py b/agent-node/agent_node/utils/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/agent-node/agent_node/utils/__init__.py diff --git a/agent-node/agent_node/utils/auth.py b/agent-node/agent_node/utils/auth.py new file mode 100644 index 0000000..202fd4c --- /dev/null +++ b/agent-node/agent_node/utils/auth.py @@ -0,0 +1,28 @@ +import jwt +import datetime +import hmac +import hashlib +from protos import agent_pb2 +from agent_node.config import SECRET_KEY + +def create_auth_token(node_id: str) -> str: + """Creates a JWT for node authentication.""" + payload = { + "sub": node_id, + "iat": datetime.datetime.utcnow(), + "exp": datetime.datetime.utcnow() + datetime.timedelta(minutes=10) + } + return jwt.encode(payload, SECRET_KEY, algorithm="HS256") + +def verify_task_signature(task, secret=SECRET_KEY) -> bool: + """Verifies HMAC signature for shell or browser tasks.""" + if task.HasField("browser_action"): + a = task.browser_action + # Aligned with orchestrator's sign_browser_action using the string Name + kind = agent_pb2.BrowserAction.ActionType.Name(a.action) + sign_base = f"{kind}:{a.url}:{a.session_id}" + else: + sign_base = task.payload_json + + expected_sig = hmac.new(secret.encode(), sign_base.encode(), hashlib.sha256).hexdigest() + return hmac.compare_digest(task.signature, expected_sig) diff --git a/agent-node/agent_node/utils/network.py b/agent-node/agent_node/utils/network.py new file mode 100644 index 0000000..3eac1c6 --- /dev/null +++ b/agent-node/agent_node/utils/network.py @@ -0,0 +1,27 @@ +import grpc +import os +from protos import agent_pb2_grpc +from agent_node.config import SERVER_HOST_PORT, TLS_ENABLED, CERT_CA, CERT_CLIENT_CRT, CERT_CLIENT_KEY + +def get_secure_stub(): + """Initializes a gRPC channel (Secure or Insecure) and returns the orchestrator stub.""" + + if not TLS_ENABLED: + print(f"[!] TLS is disabled. Connecting via insecure channel to {SERVER_HOST_PORT}") + channel = grpc.insecure_channel(SERVER_HOST_PORT) + return agent_pb2_grpc.AgentOrchestratorStub(channel) + + print(f"[*] Connecting via secure (mTLS) channel to {SERVER_HOST_PORT}") + try: + with open(CERT_CLIENT_KEY, 'rb') as f: pkey = f.read() + with open(CERT_CLIENT_CRT, 'rb') as f: cert = f.read() + with open(CERT_CA, 'rb') as f: ca = f.read() + + creds = grpc.ssl_channel_credentials(ca, pkey, cert) + channel = grpc.secure_channel(SERVER_HOST_PORT, creds) + return agent_pb2_grpc.AgentOrchestratorStub(channel) + except FileNotFoundError as e: + print(f"[!] Certificate files not found: {e}. Falling back to insecure channel...") + channel = grpc.insecure_channel(SERVER_HOST_PORT) + return agent_pb2_grpc.AgentOrchestratorStub(channel) + diff --git a/agent-node/agent_pb2.py b/agent-node/agent_pb2.py new file mode 100644 index 0000000..0747c70 --- /dev/null +++ b/agent-node/agent_pb2.py @@ -0,0 +1,94 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: agent.proto +# Protobuf Python Version: 4.25.1 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0b\x61gent.proto\x12\x05\x61gent\"\xde\x01\n\x13RegistrationRequest\x12\x0f\n\x07node_id\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\t\x12\x12\n\nauth_token\x18\x03 \x01(\t\x12\x18\n\x10node_description\x18\x04 \x01(\t\x12\x42\n\x0c\x63\x61pabilities\x18\x05 \x03(\x0b\x32,.agent.RegistrationRequest.CapabilitiesEntry\x1a\x33\n\x11\x43\x61pabilitiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xc5\x01\n\rSandboxPolicy\x12\'\n\x04mode\x18\x01 \x01(\x0e\x32\x19.agent.SandboxPolicy.Mode\x12\x18\n\x10\x61llowed_commands\x18\x02 \x03(\t\x12\x17\n\x0f\x64\x65nied_commands\x18\x03 \x03(\t\x12\x1a\n\x12sensitive_commands\x18\x04 \x03(\t\x12\x18\n\x10working_dir_jail\x18\x05 \x01(\t\"\"\n\x04Mode\x12\n\n\x06STRICT\x10\x00\x12\x0e\n\nPERMISSIVE\x10\x01\"x\n\x14RegistrationResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x15\n\rerror_message\x18\x02 \x01(\t\x12\x12\n\nsession_id\x18\x03 \x01(\t\x12$\n\x06policy\x18\x04 \x01(\x0b\x32\x14.agent.SandboxPolicy\"\xff\x01\n\x11\x43lientTaskMessage\x12,\n\rtask_response\x18\x01 \x01(\x0b\x32\x13.agent.TaskResponseH\x00\x12-\n\ntask_claim\x18\x02 \x01(\x0b\x32\x17.agent.TaskClaimRequestH\x00\x12,\n\rbrowser_event\x18\x03 \x01(\x0b\x32\x13.agent.BrowserEventH\x00\x12\'\n\x08\x61nnounce\x18\x04 \x01(\x0b\x32\x13.agent.NodeAnnounceH\x00\x12+\n\tfile_sync\x18\x05 \x01(\x0b\x32\x16.agent.FileSyncMessageH\x00\x42\t\n\x07payload\"\x1f\n\x0cNodeAnnounce\x12\x0f\n\x07node_id\x18\x01 \x01(\t\"\x87\x01\n\x0c\x42rowserEvent\x12\x12\n\nsession_id\x18\x01 \x01(\t\x12,\n\x0b\x63onsole_msg\x18\x02 \x01(\x0b\x32\x15.agent.ConsoleMessageH\x00\x12,\n\x0bnetwork_req\x18\x03 \x01(\x0b\x32\x15.agent.NetworkRequestH\x00\x42\x07\n\x05\x65vent\"\x8d\x02\n\x11ServerTaskMessage\x12*\n\x0ctask_request\x18\x01 \x01(\x0b\x32\x12.agent.TaskRequestH\x00\x12\x31\n\x10work_pool_update\x18\x02 \x01(\x0b\x32\x15.agent.WorkPoolUpdateH\x00\x12\x30\n\x0c\x63laim_status\x18\x03 \x01(\x0b\x32\x18.agent.TaskClaimResponseH\x00\x12/\n\x0btask_cancel\x18\x04 \x01(\x0b\x32\x18.agent.TaskCancelRequestH\x00\x12+\n\tfile_sync\x18\x05 \x01(\x0b\x32\x16.agent.FileSyncMessageH\x00\x42\t\n\x07payload\"$\n\x11TaskCancelRequest\x12\x0f\n\x07task_id\x18\x01 \x01(\t\"\xd1\x01\n\x0bTaskRequest\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x11\n\ttask_type\x18\x02 \x01(\t\x12\x16\n\x0cpayload_json\x18\x03 \x01(\tH\x00\x12.\n\x0e\x62rowser_action\x18\x07 \x01(\x0b\x32\x14.agent.BrowserActionH\x00\x12\x12\n\ntimeout_ms\x18\x04 \x01(\x05\x12\x10\n\x08trace_id\x18\x05 \x01(\t\x12\x11\n\tsignature\x18\x06 \x01(\t\x12\x12\n\nsession_id\x18\x08 \x01(\tB\t\n\x07payload\"\xa0\x02\n\rBrowserAction\x12/\n\x06\x61\x63tion\x18\x01 \x01(\x0e\x32\x1f.agent.BrowserAction.ActionType\x12\x0b\n\x03url\x18\x02 \x01(\t\x12\x10\n\x08selector\x18\x03 \x01(\t\x12\x0c\n\x04text\x18\x04 \x01(\t\x12\x12\n\nsession_id\x18\x05 \x01(\t\x12\t\n\x01x\x18\x06 \x01(\x05\x12\t\n\x01y\x18\x07 \x01(\x05\"\x86\x01\n\nActionType\x12\x0c\n\x08NAVIGATE\x10\x00\x12\t\n\x05\x43LICK\x10\x01\x12\x08\n\x04TYPE\x10\x02\x12\x0e\n\nSCREENSHOT\x10\x03\x12\x0b\n\x07GET_DOM\x10\x04\x12\t\n\x05HOVER\x10\x05\x12\n\n\x06SCROLL\x10\x06\x12\t\n\x05\x43LOSE\x10\x07\x12\x08\n\x04\x45VAL\x10\x08\x12\x0c\n\x08GET_A11Y\x10\t\"\xe0\x02\n\x0cTaskResponse\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12*\n\x06status\x18\x02 \x01(\x0e\x32\x1a.agent.TaskResponse.Status\x12\x0e\n\x06stdout\x18\x03 \x01(\t\x12\x0e\n\x06stderr\x18\x04 \x01(\t\x12\x10\n\x08trace_id\x18\x05 \x01(\t\x12\x35\n\tartifacts\x18\x06 \x03(\x0b\x32\".agent.TaskResponse.ArtifactsEntry\x12\x30\n\x0e\x62rowser_result\x18\x07 \x01(\x0b\x32\x16.agent.BrowserResponseH\x00\x1a\x30\n\x0e\x41rtifactsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c:\x02\x38\x01\"<\n\x06Status\x12\x0b\n\x07SUCCESS\x10\x00\x12\t\n\x05\x45RROR\x10\x01\x12\x0b\n\x07TIMEOUT\x10\x02\x12\r\n\tCANCELLED\x10\x03\x42\x08\n\x06result\"\xdc\x01\n\x0f\x42rowserResponse\x12\x0b\n\x03url\x18\x01 \x01(\t\x12\r\n\x05title\x18\x02 \x01(\t\x12\x10\n\x08snapshot\x18\x03 \x01(\x0c\x12\x13\n\x0b\x64om_content\x18\x04 \x01(\t\x12\x11\n\ta11y_tree\x18\x05 \x01(\t\x12\x13\n\x0b\x65val_result\x18\x06 \x01(\t\x12.\n\x0f\x63onsole_history\x18\x07 \x03(\x0b\x32\x15.agent.ConsoleMessage\x12.\n\x0fnetwork_history\x18\x08 \x03(\x0b\x32\x15.agent.NetworkRequest\"C\n\x0e\x43onsoleMessage\x12\r\n\x05level\x18\x01 \x01(\t\x12\x0c\n\x04text\x18\x02 \x01(\t\x12\x14\n\x0ctimestamp_ms\x18\x03 \x01(\x03\"h\n\x0eNetworkRequest\x12\x0e\n\x06method\x18\x01 \x01(\t\x12\x0b\n\x03url\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\x05\x12\x15\n\rresource_type\x18\x04 \x01(\t\x12\x12\n\nlatency_ms\x18\x05 \x01(\x03\",\n\x0eWorkPoolUpdate\x12\x1a\n\x12\x61vailable_task_ids\x18\x01 \x03(\t\"4\n\x10TaskClaimRequest\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x0f\n\x07node_id\x18\x02 \x01(\t\"E\n\x11TaskClaimResponse\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x0f\n\x07granted\x18\x02 \x01(\x08\x12\x0e\n\x06reason\x18\x03 \x01(\t\"\xc1\x01\n\tHeartbeat\x12\x0f\n\x07node_id\x18\x01 \x01(\t\x12\x19\n\x11\x63pu_usage_percent\x18\x02 \x01(\x02\x12\x1c\n\x14memory_usage_percent\x18\x03 \x01(\x02\x12\x1b\n\x13\x61\x63tive_worker_count\x18\x04 \x01(\x05\x12\x1b\n\x13max_worker_capacity\x18\x05 \x01(\x05\x12\x16\n\x0estatus_message\x18\x06 \x01(\t\x12\x18\n\x10running_task_ids\x18\x07 \x03(\t\"-\n\x13HealthCheckResponse\x12\x16\n\x0eserver_time_ms\x18\x01 \x01(\x03\"\xd3\x01\n\x0f\x46ileSyncMessage\x12\x12\n\nsession_id\x18\x01 \x01(\t\x12,\n\x08manifest\x18\x02 \x01(\x0b\x32\x18.agent.DirectoryManifestH\x00\x12\'\n\tfile_data\x18\x03 \x01(\x0b\x32\x12.agent.FilePayloadH\x00\x12#\n\x06status\x18\x04 \x01(\x0b\x32\x11.agent.SyncStatusH\x00\x12%\n\x07\x63ontrol\x18\x05 \x01(\x0b\x32\x12.agent.SyncControlH\x00\x42\t\n\x07payload\"\xc6\x01\n\x0bSyncControl\x12)\n\x06\x61\x63tion\x18\x01 \x01(\x0e\x32\x19.agent.SyncControl.Action\x12\x0c\n\x04path\x18\x02 \x01(\t\x12\x15\n\rrequest_paths\x18\x03 \x03(\t\"g\n\x06\x41\x63tion\x12\x12\n\x0eSTART_WATCHING\x10\x00\x12\x11\n\rSTOP_WATCHING\x10\x01\x12\x08\n\x04LOCK\x10\x02\x12\n\n\x06UNLOCK\x10\x03\x12\x14\n\x10REFRESH_MANIFEST\x10\x04\x12\n\n\x06RESYNC\x10\x05\"F\n\x11\x44irectoryManifest\x12\x11\n\troot_path\x18\x01 \x01(\t\x12\x1e\n\x05\x66iles\x18\x02 \x03(\x0b\x32\x0f.agent.FileInfo\"D\n\x08\x46ileInfo\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x0c\n\x04size\x18\x02 \x01(\x03\x12\x0c\n\x04hash\x18\x03 \x01(\t\x12\x0e\n\x06is_dir\x18\x04 \x01(\x08\"_\n\x0b\x46ilePayload\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\r\n\x05\x63hunk\x18\x02 \x01(\x0c\x12\x13\n\x0b\x63hunk_index\x18\x03 \x01(\x05\x12\x10\n\x08is_final\x18\x04 \x01(\x08\x12\x0c\n\x04hash\x18\x05 \x01(\t\"\xa0\x01\n\nSyncStatus\x12$\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x16.agent.SyncStatus.Code\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\x17\n\x0freconcile_paths\x18\x03 \x03(\t\"B\n\x04\x43ode\x12\x06\n\x02OK\x10\x00\x12\t\n\x05\x45RROR\x10\x01\x12\x16\n\x12RECONCILE_REQUIRED\x10\x02\x12\x0f\n\x0bIN_PROGRESS\x10\x03\x32\xe9\x01\n\x11\x41gentOrchestrator\x12L\n\x11SyncConfiguration\x12\x1a.agent.RegistrationRequest\x1a\x1b.agent.RegistrationResponse\x12\x44\n\nTaskStream\x12\x18.agent.ClientTaskMessage\x1a\x18.agent.ServerTaskMessage(\x01\x30\x01\x12@\n\x0cReportHealth\x12\x10.agent.Heartbeat\x1a\x1a.agent.HealthCheckResponse(\x01\x30\x01\x62\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'agent_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR._options = None + _globals['_REGISTRATIONREQUEST_CAPABILITIESENTRY']._options = None + _globals['_REGISTRATIONREQUEST_CAPABILITIESENTRY']._serialized_options = b'8\001' + _globals['_TASKRESPONSE_ARTIFACTSENTRY']._options = None + _globals['_TASKRESPONSE_ARTIFACTSENTRY']._serialized_options = b'8\001' + _globals['_REGISTRATIONREQUEST']._serialized_start=23 + _globals['_REGISTRATIONREQUEST']._serialized_end=245 + _globals['_REGISTRATIONREQUEST_CAPABILITIESENTRY']._serialized_start=194 + _globals['_REGISTRATIONREQUEST_CAPABILITIESENTRY']._serialized_end=245 + _globals['_SANDBOXPOLICY']._serialized_start=248 + _globals['_SANDBOXPOLICY']._serialized_end=445 + _globals['_SANDBOXPOLICY_MODE']._serialized_start=411 + _globals['_SANDBOXPOLICY_MODE']._serialized_end=445 + _globals['_REGISTRATIONRESPONSE']._serialized_start=447 + _globals['_REGISTRATIONRESPONSE']._serialized_end=567 + _globals['_CLIENTTASKMESSAGE']._serialized_start=570 + _globals['_CLIENTTASKMESSAGE']._serialized_end=825 + _globals['_NODEANNOUNCE']._serialized_start=827 + _globals['_NODEANNOUNCE']._serialized_end=858 + _globals['_BROWSEREVENT']._serialized_start=861 + _globals['_BROWSEREVENT']._serialized_end=996 + _globals['_SERVERTASKMESSAGE']._serialized_start=999 + _globals['_SERVERTASKMESSAGE']._serialized_end=1268 + _globals['_TASKCANCELREQUEST']._serialized_start=1270 + _globals['_TASKCANCELREQUEST']._serialized_end=1306 + _globals['_TASKREQUEST']._serialized_start=1309 + _globals['_TASKREQUEST']._serialized_end=1518 + _globals['_BROWSERACTION']._serialized_start=1521 + _globals['_BROWSERACTION']._serialized_end=1809 + _globals['_BROWSERACTION_ACTIONTYPE']._serialized_start=1675 + _globals['_BROWSERACTION_ACTIONTYPE']._serialized_end=1809 + _globals['_TASKRESPONSE']._serialized_start=1812 + _globals['_TASKRESPONSE']._serialized_end=2164 + _globals['_TASKRESPONSE_ARTIFACTSENTRY']._serialized_start=2044 + _globals['_TASKRESPONSE_ARTIFACTSENTRY']._serialized_end=2092 + _globals['_TASKRESPONSE_STATUS']._serialized_start=2094 + _globals['_TASKRESPONSE_STATUS']._serialized_end=2154 + _globals['_BROWSERRESPONSE']._serialized_start=2167 + _globals['_BROWSERRESPONSE']._serialized_end=2387 + _globals['_CONSOLEMESSAGE']._serialized_start=2389 + _globals['_CONSOLEMESSAGE']._serialized_end=2456 + _globals['_NETWORKREQUEST']._serialized_start=2458 + _globals['_NETWORKREQUEST']._serialized_end=2562 + _globals['_WORKPOOLUPDATE']._serialized_start=2564 + _globals['_WORKPOOLUPDATE']._serialized_end=2608 + _globals['_TASKCLAIMREQUEST']._serialized_start=2610 + _globals['_TASKCLAIMREQUEST']._serialized_end=2662 + _globals['_TASKCLAIMRESPONSE']._serialized_start=2664 + _globals['_TASKCLAIMRESPONSE']._serialized_end=2733 + _globals['_HEARTBEAT']._serialized_start=2736 + _globals['_HEARTBEAT']._serialized_end=2929 + _globals['_HEALTHCHECKRESPONSE']._serialized_start=2931 + _globals['_HEALTHCHECKRESPONSE']._serialized_end=2976 + _globals['_FILESYNCMESSAGE']._serialized_start=2979 + _globals['_FILESYNCMESSAGE']._serialized_end=3190 + _globals['_SYNCCONTROL']._serialized_start=3193 + _globals['_SYNCCONTROL']._serialized_end=3391 + _globals['_SYNCCONTROL_ACTION']._serialized_start=3288 + _globals['_SYNCCONTROL_ACTION']._serialized_end=3391 + _globals['_DIRECTORYMANIFEST']._serialized_start=3393 + _globals['_DIRECTORYMANIFEST']._serialized_end=3463 + _globals['_FILEINFO']._serialized_start=3465 + _globals['_FILEINFO']._serialized_end=3533 + _globals['_FILEPAYLOAD']._serialized_start=3535 + _globals['_FILEPAYLOAD']._serialized_end=3630 + _globals['_SYNCSTATUS']._serialized_start=3633 + _globals['_SYNCSTATUS']._serialized_end=3793 + _globals['_SYNCSTATUS_CODE']._serialized_start=3727 + _globals['_SYNCSTATUS_CODE']._serialized_end=3793 + _globals['_AGENTORCHESTRATOR']._serialized_start=3796 + _globals['_AGENTORCHESTRATOR']._serialized_end=4029 +# @@protoc_insertion_point(module_scope) diff --git a/agent-node/agent_pb2_grpc.py b/agent-node/agent_pb2_grpc.py new file mode 100644 index 0000000..932d45e --- /dev/null +++ b/agent-node/agent_pb2_grpc.py @@ -0,0 +1,138 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +import agent_pb2 as agent__pb2 + + +class AgentOrchestratorStub(object): + """The Cortex Server exposes this service + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.SyncConfiguration = channel.unary_unary( + '/agent.AgentOrchestrator/SyncConfiguration', + request_serializer=agent__pb2.RegistrationRequest.SerializeToString, + response_deserializer=agent__pb2.RegistrationResponse.FromString, + ) + self.TaskStream = channel.stream_stream( + '/agent.AgentOrchestrator/TaskStream', + request_serializer=agent__pb2.ClientTaskMessage.SerializeToString, + response_deserializer=agent__pb2.ServerTaskMessage.FromString, + ) + self.ReportHealth = channel.stream_stream( + '/agent.AgentOrchestrator/ReportHealth', + request_serializer=agent__pb2.Heartbeat.SerializeToString, + response_deserializer=agent__pb2.HealthCheckResponse.FromString, + ) + + +class AgentOrchestratorServicer(object): + """The Cortex Server exposes this service + """ + + def SyncConfiguration(self, request, context): + """1. Control Channel: Sync policies and settings (Unary) + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def TaskStream(self, request_iterator, context): + """2. Task Channel: Bidirectional work dispatch and reporting (Persistent) + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ReportHealth(self, request_iterator, context): + """3. Health Channel: Dedicated Ping-Pong / Heartbeat (Persistent) + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_AgentOrchestratorServicer_to_server(servicer, server): + rpc_method_handlers = { + 'SyncConfiguration': grpc.unary_unary_rpc_method_handler( + servicer.SyncConfiguration, + request_deserializer=agent__pb2.RegistrationRequest.FromString, + response_serializer=agent__pb2.RegistrationResponse.SerializeToString, + ), + 'TaskStream': grpc.stream_stream_rpc_method_handler( + servicer.TaskStream, + request_deserializer=agent__pb2.ClientTaskMessage.FromString, + response_serializer=agent__pb2.ServerTaskMessage.SerializeToString, + ), + 'ReportHealth': grpc.stream_stream_rpc_method_handler( + servicer.ReportHealth, + request_deserializer=agent__pb2.Heartbeat.FromString, + response_serializer=agent__pb2.HealthCheckResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'agent.AgentOrchestrator', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + # This class is part of an EXPERIMENTAL API. +class AgentOrchestrator(object): + """The Cortex Server exposes this service + """ + + @staticmethod + def SyncConfiguration(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/agent.AgentOrchestrator/SyncConfiguration', + agent__pb2.RegistrationRequest.SerializeToString, + agent__pb2.RegistrationResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def TaskStream(request_iterator, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.stream_stream(request_iterator, target, '/agent.AgentOrchestrator/TaskStream', + agent__pb2.ClientTaskMessage.SerializeToString, + agent__pb2.ServerTaskMessage.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def ReportHealth(request_iterator, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.stream_stream(request_iterator, target, '/agent.AgentOrchestrator/ReportHealth', + agent__pb2.Heartbeat.SerializeToString, + agent__pb2.HealthCheckResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/agent-node/protos/__init__.py b/agent-node/protos/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/agent-node/protos/__init__.py diff --git a/agent-node/protos/agent.proto b/agent-node/protos/agent.proto new file mode 100644 index 0000000..e751062 --- /dev/null +++ b/agent-node/protos/agent.proto @@ -0,0 +1,247 @@ +syntax = "proto3"; + +package agent; + +// The Cortex Server exposes this service +service AgentOrchestrator { + // 1. Control Channel: Sync policies and settings (Unary) + rpc SyncConfiguration(RegistrationRequest) returns (RegistrationResponse); + + // 2. Task Channel: Bidirectional work dispatch and reporting (Persistent) + rpc TaskStream(stream ClientTaskMessage) returns (stream ServerTaskMessage); + + // 3. Health Channel: Dedicated Ping-Pong / Heartbeat (Persistent) + rpc ReportHealth(stream Heartbeat) returns (stream HealthCheckResponse); +} + +// --- Channel 1: Registration & Policy --- +message RegistrationRequest { + string node_id = 1; + string version = 2; + string auth_token = 3; + string node_description = 4; // AI-readable description of this node's role + map capabilities = 5; // e.g. "gpu": "nvidia-3080", "os": "ubuntu-22.04" +} + +message SandboxPolicy { + enum Mode { + STRICT = 0; + PERMISSIVE = 1; + } + Mode mode = 1; + repeated string allowed_commands = 2; + repeated string denied_commands = 3; + repeated string sensitive_commands = 4; + string working_dir_jail = 5; +} + +message RegistrationResponse { + bool success = 1; + string error_message = 2; + string session_id = 3; + SandboxPolicy policy = 4; +} + +// --- Channel 2: Tasks & Collaboration --- +message ClientTaskMessage { + oneof payload { + TaskResponse task_response = 1; + TaskClaimRequest task_claim = 2; + BrowserEvent browser_event = 3; + NodeAnnounce announce = 4; // NEW: Identification on stream connect + FileSyncMessage file_sync = 5; // NEW: Ghost Mirror Sync + } +} + +message NodeAnnounce { + string node_id = 1; +} + +message BrowserEvent { + string session_id = 1; + oneof event { + ConsoleMessage console_msg = 2; + NetworkRequest network_req = 3; + } +} + +message ServerTaskMessage { + oneof payload { + TaskRequest task_request = 1; + WorkPoolUpdate work_pool_update = 2; + TaskClaimResponse claim_status = 3; + TaskCancelRequest task_cancel = 4; + FileSyncMessage file_sync = 5; // NEW: Ghost Mirror Sync + } +} + +message TaskCancelRequest { + string task_id = 1; +} + +message TaskRequest { + string task_id = 1; + string task_type = 2; + oneof payload { + string payload_json = 3; // For legacy shell/fallback + BrowserAction browser_action = 7; // NEW: Structured Browser Skill + } + int32 timeout_ms = 4; + string trace_id = 5; + string signature = 6; + string session_id = 8; // NEW: Map execution to a sync workspace +} + +message BrowserAction { + enum ActionType { + NAVIGATE = 0; + CLICK = 1; + TYPE = 2; + SCREENSHOT = 3; + GET_DOM = 4; + HOVER = 5; + SCROLL = 6; + CLOSE = 7; + EVAL = 8; + GET_A11Y = 9; + } + ActionType action = 1; + string url = 2; + string selector = 3; + string text = 4; + string session_id = 5; + int32 x = 6; + int32 y = 7; +} + +message TaskResponse { + string task_id = 1; + enum Status { + SUCCESS = 0; + ERROR = 1; + TIMEOUT = 2; + CANCELLED = 3; + } + Status status = 2; + string stdout = 3; + string stderr = 4; + string trace_id = 5; + map artifacts = 6; + + // NEW: Structured Skill Results + oneof result { + BrowserResponse browser_result = 7; + } +} + +message BrowserResponse { + string url = 1; + string title = 2; + bytes snapshot = 3; + string dom_content = 4; + string a11y_tree = 5; + string eval_result = 6; + repeated ConsoleMessage console_history = 7; + repeated NetworkRequest network_history = 8; +} + +message ConsoleMessage { + string level = 1; + string text = 2; + int64 timestamp_ms = 3; +} + +message NetworkRequest { + string method = 1; + string url = 2; + int32 status = 3; + string resource_type = 4; + int64 latency_ms = 5; +} + +message WorkPoolUpdate { + repeated string available_task_ids = 1; +} + +message TaskClaimRequest { + string task_id = 1; + string node_id = 2; +} + +message TaskClaimResponse { + string task_id = 1; + bool granted = 2; + string reason = 3; +} + +// --- Channel 3: Health & Observation --- +message Heartbeat { + string node_id = 1; + float cpu_usage_percent = 2; + float memory_usage_percent = 3; + int32 active_worker_count = 4; + int32 max_worker_capacity = 5; + string status_message = 6; + repeated string running_task_ids = 7; +} + +message HealthCheckResponse { + int64 server_time_ms = 1; +} + +// --- Channel 4: Ghost Mirror File Sync --- +message FileSyncMessage { + string session_id = 1; + oneof payload { + DirectoryManifest manifest = 2; + FilePayload file_data = 3; + SyncStatus status = 4; + SyncControl control = 5; + } +} + +message SyncControl { + enum Action { + START_WATCHING = 0; + STOP_WATCHING = 1; + LOCK = 2; // Server -> Node: Disable user-side edits + UNLOCK = 3; // Server -> Node: Enable user-side edits + REFRESH_MANIFEST = 4; // Server -> Node: Request a full manifest from node + RESYNC = 5; // Server -> Node: Force a hash-based reconciliation + } + Action action = 1; + string path = 2; + repeated string request_paths = 3; // NEW: Specific files requested for pull +} + +message DirectoryManifest { + string root_path = 1; + repeated FileInfo files = 2; +} + +message FileInfo { + string path = 1; + int64 size = 2; + string hash = 3; // For drift detection + bool is_dir = 4; +} + +message FilePayload { + string path = 1; + bytes chunk = 2; + int32 chunk_index = 3; + bool is_final = 4; + string hash = 5; // Full file hash for verification on final chunk +} + +message SyncStatus { + enum Code { + OK = 0; + ERROR = 1; + RECONCILE_REQUIRED = 2; + IN_PROGRESS = 3; + } + Code code = 1; + string message = 2; + repeated string reconcile_paths = 3; // NEW: Files needing immediate re-sync +} diff --git a/agent-node/protos/agent_pb2.py b/agent-node/protos/agent_pb2.py new file mode 100644 index 0000000..3472d01 --- /dev/null +++ b/agent-node/protos/agent_pb2.py @@ -0,0 +1,94 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: protos/agent.proto +# Protobuf Python Version: 4.25.1 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x12protos/agent.proto\x12\x05\x61gent\"\xde\x01\n\x13RegistrationRequest\x12\x0f\n\x07node_id\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\t\x12\x12\n\nauth_token\x18\x03 \x01(\t\x12\x18\n\x10node_description\x18\x04 \x01(\t\x12\x42\n\x0c\x63\x61pabilities\x18\x05 \x03(\x0b\x32,.agent.RegistrationRequest.CapabilitiesEntry\x1a\x33\n\x11\x43\x61pabilitiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xc5\x01\n\rSandboxPolicy\x12\'\n\x04mode\x18\x01 \x01(\x0e\x32\x19.agent.SandboxPolicy.Mode\x12\x18\n\x10\x61llowed_commands\x18\x02 \x03(\t\x12\x17\n\x0f\x64\x65nied_commands\x18\x03 \x03(\t\x12\x1a\n\x12sensitive_commands\x18\x04 \x03(\t\x12\x18\n\x10working_dir_jail\x18\x05 \x01(\t\"\"\n\x04Mode\x12\n\n\x06STRICT\x10\x00\x12\x0e\n\nPERMISSIVE\x10\x01\"x\n\x14RegistrationResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x15\n\rerror_message\x18\x02 \x01(\t\x12\x12\n\nsession_id\x18\x03 \x01(\t\x12$\n\x06policy\x18\x04 \x01(\x0b\x32\x14.agent.SandboxPolicy\"\xff\x01\n\x11\x43lientTaskMessage\x12,\n\rtask_response\x18\x01 \x01(\x0b\x32\x13.agent.TaskResponseH\x00\x12-\n\ntask_claim\x18\x02 \x01(\x0b\x32\x17.agent.TaskClaimRequestH\x00\x12,\n\rbrowser_event\x18\x03 \x01(\x0b\x32\x13.agent.BrowserEventH\x00\x12\'\n\x08\x61nnounce\x18\x04 \x01(\x0b\x32\x13.agent.NodeAnnounceH\x00\x12+\n\tfile_sync\x18\x05 \x01(\x0b\x32\x16.agent.FileSyncMessageH\x00\x42\t\n\x07payload\"\x1f\n\x0cNodeAnnounce\x12\x0f\n\x07node_id\x18\x01 \x01(\t\"\x87\x01\n\x0c\x42rowserEvent\x12\x12\n\nsession_id\x18\x01 \x01(\t\x12,\n\x0b\x63onsole_msg\x18\x02 \x01(\x0b\x32\x15.agent.ConsoleMessageH\x00\x12,\n\x0bnetwork_req\x18\x03 \x01(\x0b\x32\x15.agent.NetworkRequestH\x00\x42\x07\n\x05\x65vent\"\x8d\x02\n\x11ServerTaskMessage\x12*\n\x0ctask_request\x18\x01 \x01(\x0b\x32\x12.agent.TaskRequestH\x00\x12\x31\n\x10work_pool_update\x18\x02 \x01(\x0b\x32\x15.agent.WorkPoolUpdateH\x00\x12\x30\n\x0c\x63laim_status\x18\x03 \x01(\x0b\x32\x18.agent.TaskClaimResponseH\x00\x12/\n\x0btask_cancel\x18\x04 \x01(\x0b\x32\x18.agent.TaskCancelRequestH\x00\x12+\n\tfile_sync\x18\x05 \x01(\x0b\x32\x16.agent.FileSyncMessageH\x00\x42\t\n\x07payload\"$\n\x11TaskCancelRequest\x12\x0f\n\x07task_id\x18\x01 \x01(\t\"\xd1\x01\n\x0bTaskRequest\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x11\n\ttask_type\x18\x02 \x01(\t\x12\x16\n\x0cpayload_json\x18\x03 \x01(\tH\x00\x12.\n\x0e\x62rowser_action\x18\x07 \x01(\x0b\x32\x14.agent.BrowserActionH\x00\x12\x12\n\ntimeout_ms\x18\x04 \x01(\x05\x12\x10\n\x08trace_id\x18\x05 \x01(\t\x12\x11\n\tsignature\x18\x06 \x01(\t\x12\x12\n\nsession_id\x18\x08 \x01(\tB\t\n\x07payload\"\xa0\x02\n\rBrowserAction\x12/\n\x06\x61\x63tion\x18\x01 \x01(\x0e\x32\x1f.agent.BrowserAction.ActionType\x12\x0b\n\x03url\x18\x02 \x01(\t\x12\x10\n\x08selector\x18\x03 \x01(\t\x12\x0c\n\x04text\x18\x04 \x01(\t\x12\x12\n\nsession_id\x18\x05 \x01(\t\x12\t\n\x01x\x18\x06 \x01(\x05\x12\t\n\x01y\x18\x07 \x01(\x05\"\x86\x01\n\nActionType\x12\x0c\n\x08NAVIGATE\x10\x00\x12\t\n\x05\x43LICK\x10\x01\x12\x08\n\x04TYPE\x10\x02\x12\x0e\n\nSCREENSHOT\x10\x03\x12\x0b\n\x07GET_DOM\x10\x04\x12\t\n\x05HOVER\x10\x05\x12\n\n\x06SCROLL\x10\x06\x12\t\n\x05\x43LOSE\x10\x07\x12\x08\n\x04\x45VAL\x10\x08\x12\x0c\n\x08GET_A11Y\x10\t\"\xe0\x02\n\x0cTaskResponse\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12*\n\x06status\x18\x02 \x01(\x0e\x32\x1a.agent.TaskResponse.Status\x12\x0e\n\x06stdout\x18\x03 \x01(\t\x12\x0e\n\x06stderr\x18\x04 \x01(\t\x12\x10\n\x08trace_id\x18\x05 \x01(\t\x12\x35\n\tartifacts\x18\x06 \x03(\x0b\x32\".agent.TaskResponse.ArtifactsEntry\x12\x30\n\x0e\x62rowser_result\x18\x07 \x01(\x0b\x32\x16.agent.BrowserResponseH\x00\x1a\x30\n\x0e\x41rtifactsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c:\x02\x38\x01\"<\n\x06Status\x12\x0b\n\x07SUCCESS\x10\x00\x12\t\n\x05\x45RROR\x10\x01\x12\x0b\n\x07TIMEOUT\x10\x02\x12\r\n\tCANCELLED\x10\x03\x42\x08\n\x06result\"\xdc\x01\n\x0f\x42rowserResponse\x12\x0b\n\x03url\x18\x01 \x01(\t\x12\r\n\x05title\x18\x02 \x01(\t\x12\x10\n\x08snapshot\x18\x03 \x01(\x0c\x12\x13\n\x0b\x64om_content\x18\x04 \x01(\t\x12\x11\n\ta11y_tree\x18\x05 \x01(\t\x12\x13\n\x0b\x65val_result\x18\x06 \x01(\t\x12.\n\x0f\x63onsole_history\x18\x07 \x03(\x0b\x32\x15.agent.ConsoleMessage\x12.\n\x0fnetwork_history\x18\x08 \x03(\x0b\x32\x15.agent.NetworkRequest\"C\n\x0e\x43onsoleMessage\x12\r\n\x05level\x18\x01 \x01(\t\x12\x0c\n\x04text\x18\x02 \x01(\t\x12\x14\n\x0ctimestamp_ms\x18\x03 \x01(\x03\"h\n\x0eNetworkRequest\x12\x0e\n\x06method\x18\x01 \x01(\t\x12\x0b\n\x03url\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\x05\x12\x15\n\rresource_type\x18\x04 \x01(\t\x12\x12\n\nlatency_ms\x18\x05 \x01(\x03\",\n\x0eWorkPoolUpdate\x12\x1a\n\x12\x61vailable_task_ids\x18\x01 \x03(\t\"4\n\x10TaskClaimRequest\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x0f\n\x07node_id\x18\x02 \x01(\t\"E\n\x11TaskClaimResponse\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x0f\n\x07granted\x18\x02 \x01(\x08\x12\x0e\n\x06reason\x18\x03 \x01(\t\"\xc1\x01\n\tHeartbeat\x12\x0f\n\x07node_id\x18\x01 \x01(\t\x12\x19\n\x11\x63pu_usage_percent\x18\x02 \x01(\x02\x12\x1c\n\x14memory_usage_percent\x18\x03 \x01(\x02\x12\x1b\n\x13\x61\x63tive_worker_count\x18\x04 \x01(\x05\x12\x1b\n\x13max_worker_capacity\x18\x05 \x01(\x05\x12\x16\n\x0estatus_message\x18\x06 \x01(\t\x12\x18\n\x10running_task_ids\x18\x07 \x03(\t\"-\n\x13HealthCheckResponse\x12\x16\n\x0eserver_time_ms\x18\x01 \x01(\x03\"\xd3\x01\n\x0f\x46ileSyncMessage\x12\x12\n\nsession_id\x18\x01 \x01(\t\x12,\n\x08manifest\x18\x02 \x01(\x0b\x32\x18.agent.DirectoryManifestH\x00\x12\'\n\tfile_data\x18\x03 \x01(\x0b\x32\x12.agent.FilePayloadH\x00\x12#\n\x06status\x18\x04 \x01(\x0b\x32\x11.agent.SyncStatusH\x00\x12%\n\x07\x63ontrol\x18\x05 \x01(\x0b\x32\x12.agent.SyncControlH\x00\x42\t\n\x07payload\"\xaf\x01\n\x0bSyncControl\x12)\n\x06\x61\x63tion\x18\x01 \x01(\x0e\x32\x19.agent.SyncControl.Action\x12\x0c\n\x04path\x18\x02 \x01(\t\"g\n\x06\x41\x63tion\x12\x12\n\x0eSTART_WATCHING\x10\x00\x12\x11\n\rSTOP_WATCHING\x10\x01\x12\x08\n\x04LOCK\x10\x02\x12\n\n\x06UNLOCK\x10\x03\x12\x14\n\x10REFRESH_MANIFEST\x10\x04\x12\n\n\x06RESYNC\x10\x05\"F\n\x11\x44irectoryManifest\x12\x11\n\troot_path\x18\x01 \x01(\t\x12\x1e\n\x05\x66iles\x18\x02 \x03(\x0b\x32\x0f.agent.FileInfo\"D\n\x08\x46ileInfo\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x0c\n\x04size\x18\x02 \x01(\x03\x12\x0c\n\x04hash\x18\x03 \x01(\t\x12\x0e\n\x06is_dir\x18\x04 \x01(\x08\"_\n\x0b\x46ilePayload\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\r\n\x05\x63hunk\x18\x02 \x01(\x0c\x12\x13\n\x0b\x63hunk_index\x18\x03 \x01(\x05\x12\x10\n\x08is_final\x18\x04 \x01(\x08\x12\x0c\n\x04hash\x18\x05 \x01(\t\"\xa0\x01\n\nSyncStatus\x12$\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x16.agent.SyncStatus.Code\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\x17\n\x0freconcile_paths\x18\x03 \x03(\t\"B\n\x04\x43ode\x12\x06\n\x02OK\x10\x00\x12\t\n\x05\x45RROR\x10\x01\x12\x16\n\x12RECONCILE_REQUIRED\x10\x02\x12\x0f\n\x0bIN_PROGRESS\x10\x03\x32\xe9\x01\n\x11\x41gentOrchestrator\x12L\n\x11SyncConfiguration\x12\x1a.agent.RegistrationRequest\x1a\x1b.agent.RegistrationResponse\x12\x44\n\nTaskStream\x12\x18.agent.ClientTaskMessage\x1a\x18.agent.ServerTaskMessage(\x01\x30\x01\x12@\n\x0cReportHealth\x12\x10.agent.Heartbeat\x1a\x1a.agent.HealthCheckResponse(\x01\x30\x01\x62\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'protos.agent_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR._options = None + _globals['_REGISTRATIONREQUEST_CAPABILITIESENTRY']._options = None + _globals['_REGISTRATIONREQUEST_CAPABILITIESENTRY']._serialized_options = b'8\001' + _globals['_TASKRESPONSE_ARTIFACTSENTRY']._options = None + _globals['_TASKRESPONSE_ARTIFACTSENTRY']._serialized_options = b'8\001' + _globals['_REGISTRATIONREQUEST']._serialized_start=30 + _globals['_REGISTRATIONREQUEST']._serialized_end=252 + _globals['_REGISTRATIONREQUEST_CAPABILITIESENTRY']._serialized_start=201 + _globals['_REGISTRATIONREQUEST_CAPABILITIESENTRY']._serialized_end=252 + _globals['_SANDBOXPOLICY']._serialized_start=255 + _globals['_SANDBOXPOLICY']._serialized_end=452 + _globals['_SANDBOXPOLICY_MODE']._serialized_start=418 + _globals['_SANDBOXPOLICY_MODE']._serialized_end=452 + _globals['_REGISTRATIONRESPONSE']._serialized_start=454 + _globals['_REGISTRATIONRESPONSE']._serialized_end=574 + _globals['_CLIENTTASKMESSAGE']._serialized_start=577 + _globals['_CLIENTTASKMESSAGE']._serialized_end=832 + _globals['_NODEANNOUNCE']._serialized_start=834 + _globals['_NODEANNOUNCE']._serialized_end=865 + _globals['_BROWSEREVENT']._serialized_start=868 + _globals['_BROWSEREVENT']._serialized_end=1003 + _globals['_SERVERTASKMESSAGE']._serialized_start=1006 + _globals['_SERVERTASKMESSAGE']._serialized_end=1275 + _globals['_TASKCANCELREQUEST']._serialized_start=1277 + _globals['_TASKCANCELREQUEST']._serialized_end=1313 + _globals['_TASKREQUEST']._serialized_start=1316 + _globals['_TASKREQUEST']._serialized_end=1525 + _globals['_BROWSERACTION']._serialized_start=1528 + _globals['_BROWSERACTION']._serialized_end=1816 + _globals['_BROWSERACTION_ACTIONTYPE']._serialized_start=1682 + _globals['_BROWSERACTION_ACTIONTYPE']._serialized_end=1816 + _globals['_TASKRESPONSE']._serialized_start=1819 + _globals['_TASKRESPONSE']._serialized_end=2171 + _globals['_TASKRESPONSE_ARTIFACTSENTRY']._serialized_start=2051 + _globals['_TASKRESPONSE_ARTIFACTSENTRY']._serialized_end=2099 + _globals['_TASKRESPONSE_STATUS']._serialized_start=2101 + _globals['_TASKRESPONSE_STATUS']._serialized_end=2161 + _globals['_BROWSERRESPONSE']._serialized_start=2174 + _globals['_BROWSERRESPONSE']._serialized_end=2394 + _globals['_CONSOLEMESSAGE']._serialized_start=2396 + _globals['_CONSOLEMESSAGE']._serialized_end=2463 + _globals['_NETWORKREQUEST']._serialized_start=2465 + _globals['_NETWORKREQUEST']._serialized_end=2569 + _globals['_WORKPOOLUPDATE']._serialized_start=2571 + _globals['_WORKPOOLUPDATE']._serialized_end=2615 + _globals['_TASKCLAIMREQUEST']._serialized_start=2617 + _globals['_TASKCLAIMREQUEST']._serialized_end=2669 + _globals['_TASKCLAIMRESPONSE']._serialized_start=2671 + _globals['_TASKCLAIMRESPONSE']._serialized_end=2740 + _globals['_HEARTBEAT']._serialized_start=2743 + _globals['_HEARTBEAT']._serialized_end=2936 + _globals['_HEALTHCHECKRESPONSE']._serialized_start=2938 + _globals['_HEALTHCHECKRESPONSE']._serialized_end=2983 + _globals['_FILESYNCMESSAGE']._serialized_start=2986 + _globals['_FILESYNCMESSAGE']._serialized_end=3197 + _globals['_SYNCCONTROL']._serialized_start=3200 + _globals['_SYNCCONTROL']._serialized_end=3375 + _globals['_SYNCCONTROL_ACTION']._serialized_start=3272 + _globals['_SYNCCONTROL_ACTION']._serialized_end=3375 + _globals['_DIRECTORYMANIFEST']._serialized_start=3377 + _globals['_DIRECTORYMANIFEST']._serialized_end=3447 + _globals['_FILEINFO']._serialized_start=3449 + _globals['_FILEINFO']._serialized_end=3517 + _globals['_FILEPAYLOAD']._serialized_start=3519 + _globals['_FILEPAYLOAD']._serialized_end=3614 + _globals['_SYNCSTATUS']._serialized_start=3617 + _globals['_SYNCSTATUS']._serialized_end=3777 + _globals['_SYNCSTATUS_CODE']._serialized_start=3711 + _globals['_SYNCSTATUS_CODE']._serialized_end=3777 + _globals['_AGENTORCHESTRATOR']._serialized_start=3780 + _globals['_AGENTORCHESTRATOR']._serialized_end=4013 +# @@protoc_insertion_point(module_scope) diff --git a/agent-node/protos/agent_pb2_grpc.py b/agent-node/protos/agent_pb2_grpc.py new file mode 100644 index 0000000..f551b0b --- /dev/null +++ b/agent-node/protos/agent_pb2_grpc.py @@ -0,0 +1,138 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +from protos import agent_pb2 as protos_dot_agent__pb2 + + +class AgentOrchestratorStub(object): + """The Cortex Server exposes this service + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.SyncConfiguration = channel.unary_unary( + '/agent.AgentOrchestrator/SyncConfiguration', + request_serializer=protos_dot_agent__pb2.RegistrationRequest.SerializeToString, + response_deserializer=protos_dot_agent__pb2.RegistrationResponse.FromString, + ) + self.TaskStream = channel.stream_stream( + '/agent.AgentOrchestrator/TaskStream', + request_serializer=protos_dot_agent__pb2.ClientTaskMessage.SerializeToString, + response_deserializer=protos_dot_agent__pb2.ServerTaskMessage.FromString, + ) + self.ReportHealth = channel.stream_stream( + '/agent.AgentOrchestrator/ReportHealth', + request_serializer=protos_dot_agent__pb2.Heartbeat.SerializeToString, + response_deserializer=protos_dot_agent__pb2.HealthCheckResponse.FromString, + ) + + +class AgentOrchestratorServicer(object): + """The Cortex Server exposes this service + """ + + def SyncConfiguration(self, request, context): + """1. Control Channel: Sync policies and settings (Unary) + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def TaskStream(self, request_iterator, context): + """2. Task Channel: Bidirectional work dispatch and reporting (Persistent) + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ReportHealth(self, request_iterator, context): + """3. Health Channel: Dedicated Ping-Pong / Heartbeat (Persistent) + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_AgentOrchestratorServicer_to_server(servicer, server): + rpc_method_handlers = { + 'SyncConfiguration': grpc.unary_unary_rpc_method_handler( + servicer.SyncConfiguration, + request_deserializer=protos_dot_agent__pb2.RegistrationRequest.FromString, + response_serializer=protos_dot_agent__pb2.RegistrationResponse.SerializeToString, + ), + 'TaskStream': grpc.stream_stream_rpc_method_handler( + servicer.TaskStream, + request_deserializer=protos_dot_agent__pb2.ClientTaskMessage.FromString, + response_serializer=protos_dot_agent__pb2.ServerTaskMessage.SerializeToString, + ), + 'ReportHealth': grpc.stream_stream_rpc_method_handler( + servicer.ReportHealth, + request_deserializer=protos_dot_agent__pb2.Heartbeat.FromString, + response_serializer=protos_dot_agent__pb2.HealthCheckResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'agent.AgentOrchestrator', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + # This class is part of an EXPERIMENTAL API. +class AgentOrchestrator(object): + """The Cortex Server exposes this service + """ + + @staticmethod + def SyncConfiguration(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/agent.AgentOrchestrator/SyncConfiguration', + protos_dot_agent__pb2.RegistrationRequest.SerializeToString, + protos_dot_agent__pb2.RegistrationResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def TaskStream(request_iterator, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.stream_stream(request_iterator, target, '/agent.AgentOrchestrator/TaskStream', + protos_dot_agent__pb2.ClientTaskMessage.SerializeToString, + protos_dot_agent__pb2.ServerTaskMessage.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def ReportHealth(request_iterator, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.stream_stream(request_iterator, target, '/agent.AgentOrchestrator/ReportHealth', + protos_dot_agent__pb2.Heartbeat.SerializeToString, + protos_dot_agent__pb2.HealthCheckResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/agent-node/requirements.txt b/agent-node/requirements.txt new file mode 100644 index 0000000..5647eb6 --- /dev/null +++ b/agent-node/requirements.txt @@ -0,0 +1,7 @@ +grpcio==1.62.1 +grpcio-tools==1.62.1 +PyJWT==2.8.0 +playwright==1.42.0 +watchdog==4.0.0 +PyYAML==6.0.1 +psutil==5.9.8 diff --git a/agent-node/scripts/build.sh b/agent-node/scripts/build.sh new file mode 100644 index 0000000..792bdac --- /dev/null +++ b/agent-node/scripts/build.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# Agent Node Distribution Build Script +set -e + +echo "[*] Building Cortex Agent Node distribution package..." + +# 1. Compile Protos (ensure we have latest stubs) +python3 -m grpc_tools.protoc -I./protos --python_out=. --grpc_python_out=. ./protos/agent.proto +echo "[+] Protobuf compiled." + +# 2. Cleanup +find . -type d -name "__pycache__" -exec rm -rf {} + +echo "[+] Cleanup complete." + +# 3. Validation (Optional: run unit tests) +# python3 -m unittest discover tests + +echo "[✅] Build successful. Source is ready for distribution." diff --git a/agent-node/scripts/compile_protos.sh b/agent-node/scripts/compile_protos.sh new file mode 100755 index 0000000..1af7aa3 --- /dev/null +++ b/agent-node/scripts/compile_protos.sh @@ -0,0 +1,3 @@ +#!/bin/bash +python -m grpc_tools.protoc -I./protos --python_out=. --grpc_python_out=. ./protos/agent.proto +echo "Protobuf compiled successfully." diff --git a/agent-node/shared_core/__init__.py b/agent-node/shared_core/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/agent-node/shared_core/__init__.py diff --git a/agent-node/shared_core/ignore.py b/agent-node/shared_core/ignore.py new file mode 100644 index 0000000..c3f0cb5 --- /dev/null +++ b/agent-node/shared_core/ignore.py @@ -0,0 +1,38 @@ +import os +import fnmatch + +class CortexIgnore: + """Handles .cortexignore (and .gitignore) pattern matching.""" + def __init__(self, root_path): + self.root_path = root_path + self.patterns = self._load_patterns() + + def _load_patterns(self): + patterns = [".git", "node_modules", ".cortex_sync", "__pycache__", "*.pyc"] # Default ignores + ignore_file = os.path.join(self.root_path, ".cortexignore") + if not os.path.exists(ignore_file): + ignore_file = os.path.join(self.root_path, ".gitignore") + + if os.path.exists(ignore_file): + with open(ignore_file, "r") as f: + for line in f: + line = line.strip() + if line and not line.startswith("#"): + patterns.append(line) + return patterns + + def is_ignored(self, rel_path): + """Returns True if the path matches any ignore pattern.""" + for pattern in self.patterns: + # Handle directory patterns + if pattern.endswith("/"): + if rel_path.startswith(pattern) or f"/{pattern}" in f"/{rel_path}": + return True + # Standard glob matching + if fnmatch.fnmatch(rel_path, pattern) or fnmatch.fnmatch(os.path.basename(rel_path), pattern): + return True + # Handle nested matches + for part in rel_path.split(os.sep): + if fnmatch.fnmatch(part, pattern): + return True + return False diff --git a/ai-hub/app/api/routes/nodes.py b/ai-hub/app/api/routes/nodes.py index bac3b15..273ed85 100644 --- a/ai-hub/app/api/routes/nodes.py +++ b/ai-hub/app/api/routes/nodes.py @@ -383,7 +383,7 @@ # 2. Build the ZIP in-memory buf = io.BytesIO() - source_root = "/app/poc-grpc-agent" + source_root = "/app/agent-node" with zipfile.ZipFile(buf, "w", zipfile.ZIP_DEFLATED) as zip_file: # Helper to add a directory @@ -594,6 +594,8 @@ status = live._compute_status() if live else node.last_status or "offline" skill_cfg = node.skill_config or {} available = [skill for skill, cfg in skill_cfg.items() if cfg.get("enabled", True)] + stats = live.stats if live else {} + return schemas.AgentNodeUserView( node_id=node.node_id, display_name=node.display_name, @@ -602,6 +604,7 @@ available_skills=available, last_status=status, last_seen_at=node.last_seen_at, + stats=schemas.AgentNodeStats(**stats) if stats else schemas.AgentNodeStats() ) diff --git a/ai-hub/app/api/routes/sessions.py b/ai-hub/app/api/routes/sessions.py index 6cb2095..222fa3d 100644 --- a/ai-hub/app/api/routes/sessions.py +++ b/ai-hub/app/api/routes/sessions.py @@ -313,6 +313,37 @@ db.commit() db.refresh(session) + # Trigger actual workspace sync commands via gRPC + # We need access to the orchestrator instance from app.state + from fastapi import Request + # Note: In a real app we'd use a cleaner injection, but here we'll grab from Request if available + # or globally if it's a singleton. In this project, it's stored in app.state. + orchestrator = getattr(db, "_request_app", None) # This depends on how GetDB is implemented + # Better: just use the registry to get the assistant if possible, + # but the assistant lives in the Orchestrator. + + # Let's try to get the orchestrator from the registry if we can't get it from the app. + # Actually, let's just use a global reference or pass it in. + # For this implementation, I'll use a safer approach: + try: + from app.main import app + assistant = app.state.orchestrator.assistant + + config = request.config or schemas.NodeWorkspaceConfig(source="empty") + + for nid in new_nodes: + if config.source == "server": + # Server -> Node: Push everything from workspace + assistant.push_workspace(nid, session.sync_workspace_id) + elif config.source == "node_local": + # Node -> Server: Request manifest to start syncing from Node + assistant.request_manifest(nid, session.sync_workspace_id, path=config.path or ".") + # Also tell it to start watching + assistant.control_sync(nid, session.sync_workspace_id, action="START", path=config.path or ".") + + except Exception as e: + print(f"[⚠️] Failed to trigger session node sync: {e}") + return schemas.SessionNodeStatusResponse( session_id=session_id, sync_workspace_id=session.sync_workspace_id, diff --git a/ai-hub/app/api/schemas.py b/ai-hub/app/api/schemas.py index 421fba8..c7b734b 100644 --- a/ai-hub/app/api/schemas.py +++ b/ai-hub/app/api/schemas.py @@ -152,15 +152,21 @@ created_at: datetime # M3: Node attachment sync_workspace_id: Optional[str] = None - attached_node_ids: List[str] = [] - node_sync_status: dict = {} + attached_node_ids: Optional[List[str]] = Field(default_factory=list) + node_sync_status: Optional[dict] = Field(default_factory=dict) model_config = ConfigDict(from_attributes=True) # --- M3: Session Node Attachment Schemas --- +class NodeWorkspaceConfig(BaseModel): + """How a node should seed its workspace for a session.""" + source: Literal["empty", "server", "node_local"] = "empty" + path: Optional[str] = None # root path on node when source='node_local' + class NodeAttachRequest(BaseModel): """Attach one or more nodes to a session.""" node_ids: List[str] + config: Optional[NodeWorkspaceConfig] = None class NodeSyncStatusEntry(BaseModel): """Per-node sync status within a session.""" @@ -322,6 +328,7 @@ available_skills: List[str] = [] last_status: str # 'online' | 'offline' | 'stale' last_seen_at: Optional[datetime] = None + stats: AgentNodeStats = AgentNodeStats() model_config = ConfigDict(from_attributes=True) class AgentNodeStatusResponse(BaseModel): diff --git a/ai-hub/app/app.py b/ai-hub/app/app.py index 2e2ee06..d49b894 100644 --- a/ai-hub/app/app.py +++ b/ai-hub/app/app.py @@ -47,7 +47,9 @@ try: from app.core.grpc.services.grpc_server import serve_grpc registry = app.state.services.node_registry_service - app.state.grpc_server = serve_grpc(registry, port=50051) + server, orchestrator = serve_grpc(registry, port=50051) + app.state.grpc_server = server + app.state.orchestrator = orchestrator logger.info("[M6] Agent Orchestrator gRPC server started on port 50051.") except Exception as e: logger.error(f"[M6] Failed to start gRPC server: {e}") diff --git a/ai-hub/app/config.py b/ai-hub/app/config.py index 0b1837b..58c34cf 100644 --- a/ai-hub/app/config.py +++ b/ai-hub/app/config.py @@ -110,6 +110,10 @@ self.OIDC_REDIRECT_URI: str = os.getenv("OIDC_REDIRECT_URI") or \ get_from_yaml(["oidc", "redirect_uri"]) or \ config_from_pydantic.oidc.redirect_uri + + self.SECRET_KEY: str = os.getenv("SECRET_KEY") or \ + get_from_yaml(["application", "secret_key"]) or \ + self.OIDC_CLIENT_SECRET or "dev-secret-key-1337" # --- Database Settings --- self.DB_MODE: str = os.getenv("DB_MODE") or \ diff --git a/ai-hub/app/core/grpc/core/mirror.py b/ai-hub/app/core/grpc/core/mirror.py index 6054d62..6006241 100644 --- a/ai-hub/app/core/grpc/core/mirror.py +++ b/ai-hub/app/core/grpc/core/mirror.py @@ -74,8 +74,26 @@ return agent_pb2.DirectoryManifest(root_path=workspace, files=files) def _verify_hash(self, path: str, expected_hash: str): + if not os.path.exists(path): return content = open(path, "rb").read() actual_hash = hashlib.sha256(content).hexdigest() if actual_hash != expected_hash: print(f"[⚠️] Hash Mismatch for {path}: expected {expected_hash}, got {actual_hash}") - # In a real system, we'd trigger a re-download/re-sync + + def reconcile(self, session_id: str, remote_manifest: agent_pb2.DirectoryManifest) -> List[str]: + """Compares remote manifest with local mirror and returns list of paths missing/changed.""" + workspace = self.get_workspace_path(session_id) + needs_update = [] + for remote_file in remote_manifest.files: + if remote_file.is_dir: continue + + local_path = os.path.join(workspace, remote_file.path) + if not os.path.exists(local_path): + needs_update.append(remote_file.path) + continue + + with open(local_path, "rb") as f: + local_hash = hashlib.sha256(f.read()).hexdigest() + if local_hash != remote_file.hash: + needs_update.append(remote_file.path) + return needs_update diff --git a/ai-hub/app/core/grpc/services/grpc_server.py b/ai-hub/app/core/grpc/services/grpc_server.py index e4f3511..6f8ddc6 100644 --- a/ai-hub/app/core/grpc/services/grpc_server.py +++ b/ai-hub/app/core/grpc/services/grpc_server.py @@ -22,14 +22,10 @@ logger = logging.getLogger(__name__) # M4: Hub HTTP API for invite-token validation +# Calls POST /api/v1/nodes/validate-token before accepting any SyncConfiguration. +PATH_PREFIX = os.getenv("PATH_PREFIX", "/api/v1") HUB_API_URL = os.getenv("HUB_API_URL", "http://localhost:8000") -HUB_API_PATH = "/nodes/validate-token" - -# M4: Hub HTTP API for invite-token validation -# Calls POST /nodes/validate-token before accepting any SyncConfiguration. -# Set HUB_API_URL=http://localhost:8000 (or 0 to skip validation in dev mode). -HUB_API_URL = os.getenv("HUB_API_URL", "") # empty = skip validation (dev) -HUB_API_PATH = "/nodes/validate-token" +HUB_API_PATH = f"{PATH_PREFIX}/nodes/validate-token" class AgentOrchestrator(agent_pb2_grpc.AgentOrchestratorServicer): """Integrated gRPC Servicer for Agent Orchestration within AI Hub.""" @@ -47,21 +43,23 @@ def _monitor_mesh(self): """Periodically prints status of all nodes in the mesh.""" while True: - time.sleep(10) - active_nodes = self.registry.list_nodes() - print("\n" + "="*50) - print(f"📡 CORTEX MESH DASHBOARD | {len(active_nodes)} Nodes Online") - print("-" * 50) - if not active_nodes: - print(" No nodes currently connected.") - for nid in active_nodes: - node = self.registry.get_node(nid) - stats = node.get("stats", {}) - tasks = stats.get("running", []) - capability = node.get("metadata", {}).get("caps", {}) - print(f" 🟢 {nid:15} | Workers: {stats.get('active_worker_count', 0)} | Running: {len(tasks)} tasks") - print(f" Capabilities: {capability}") - print("="*50 + "\n", flush=True) + try: + time.sleep(10) + active_nodes = self.registry.list_nodes() + print("\n" + "="*50) + print(f"📡 CORTEX MESH DASHBOARD | {len(active_nodes)} Nodes Online") + print("-" * 50) + if not active_nodes: + print(" No nodes currently connected.") + for node in active_nodes: + stats = node.stats + tasks = stats.get("running", []) + capability = node.metadata.get("caps", {}) + print(f" 🟢 {node.node_id:20} | Workers: {stats.get('active_worker_count', 0)} | Running: {len(tasks)} tasks") + print(f" Capabilities: {capability}") + print("="*50 + "\n", flush=True) + except Exception as e: + logger.error(f"[MeshMonitor] Error: {e}") def _broadcast_work(self, _): """Pushes work notifications to all active nodes.""" @@ -230,6 +228,25 @@ # M6: Emit sync progress (rarely to avoid flood, but good for large pushes) if fs.file_data.chunk_index % 10 == 0: self.registry.emit(node_id, "sync_progress", {"path": fs.file_data.path, "chunk": fs.file_data.chunk_index}) + elif fs.HasField("manifest"): + print(f" [📁📥] Received Manifest from {node_id} for {fs.session_id}") + drifts = self.mirror.reconcile(fs.session_id, fs.manifest) + if drifts: + print(f" [📁🏃] Drift Detected (Node -> Server): Requesting {len(drifts)} files") + # Request node to push these specific files + node.queue.put(agent_pb2.ServerTaskMessage( + file_sync=agent_pb2.FileSyncMessage( + session_id=fs.session_id, + control=agent_pb2.SyncControl( + action=agent_pb2.SyncControl.REFRESH_MANIFEST, + path=fs.manifest.root_path, + request_paths=drifts + ) + ) + )) + else: + self.registry.emit(node_id, "sync_status", {"message": "Synchronized (Node -> Server)", "code": 0}) + elif fs.HasField("status"): print(f" [📁] Sync Status from {node_id}: {fs.status.message}") self.registry.emit(node_id, "sync_status", {"message": fs.status.message, "code": fs.status.code}) @@ -242,6 +259,8 @@ for hb in request_iterator: self.registry.update_stats(hb.node_id, { "active_worker_count": hb.active_worker_count, + "cpu_usage_percent": hb.cpu_usage_percent, + "memory_usage_percent": hb.memory_usage_percent, "running": list(hb.running_task_ids) }) yield agent_pb2.HealthCheckResponse(server_time_ms=int(time.time()*1000)) @@ -261,5 +280,5 @@ logger.info(f"🚀 CORTEX gRPC Orchestrator starting on {addr}") server.start() - return server + return server, orchestrator diff --git a/ai-hub/app/core/services/node_registry.py b/ai-hub/app/core/services/node_registry.py index 2ba9aa3..c6f88c4 100644 --- a/ai-hub/app/core/services/node_registry.py +++ b/ai-hub/app/core/services/node_registry.py @@ -105,28 +105,19 @@ # ------------------------------------------------------------------ # def _db_upsert_node(self, node_id: str, user_id: str, metadata: dict): - """Create or update the AgentNode DB record (on connect).""" + """Update the AgentNode DB record on connect. The admin must pre-create the node.""" from app.db.models import AgentNode from app.db.session import get_db_session try: with get_db_session() as db: record = db.query(AgentNode).filter(AgentNode.node_id == node_id).first() if record: - record.user_id = user_id - record.description = metadata.get("desc", "") record.capabilities = metadata.get("caps", {}) record.last_status = "online" record.last_seen_at = datetime.utcnow() else: - record = AgentNode( - node_id=node_id, - user_id=user_id, - description=metadata.get("desc", ""), - capabilities=metadata.get("caps", {}), - last_status="online", - last_seen_at=datetime.utcnow(), - ) - db.add(record) + # Node not pre-registered by admin — log warning but don't crash + print(f"[NodeRegistry] WARNING: Node '{node_id}' connected but has no DB record. Admin must register it first.") except Exception as e: print(f"[NodeRegistry] DB upsert failed for {node_id}: {e}") diff --git a/ai-hub/app/protos/agent.proto b/ai-hub/app/protos/agent.proto index 5e3932d..e751062 100644 --- a/ai-hub/app/protos/agent.proto +++ b/ai-hub/app/protos/agent.proto @@ -211,6 +211,7 @@ } Action action = 1; string path = 2; + repeated string request_paths = 3; // NEW: Specific files requested for pull } message DirectoryManifest { diff --git a/ai-hub/app/protos/agent_pb2.py b/ai-hub/app/protos/agent_pb2.py index 3472d01..0747c70 100644 --- a/ai-hub/app/protos/agent_pb2.py +++ b/ai-hub/app/protos/agent_pb2.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! -# source: protos/agent.proto +# source: agent.proto # Protobuf Python Version: 4.25.1 """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor @@ -14,81 +14,81 @@ -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x12protos/agent.proto\x12\x05\x61gent\"\xde\x01\n\x13RegistrationRequest\x12\x0f\n\x07node_id\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\t\x12\x12\n\nauth_token\x18\x03 \x01(\t\x12\x18\n\x10node_description\x18\x04 \x01(\t\x12\x42\n\x0c\x63\x61pabilities\x18\x05 \x03(\x0b\x32,.agent.RegistrationRequest.CapabilitiesEntry\x1a\x33\n\x11\x43\x61pabilitiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xc5\x01\n\rSandboxPolicy\x12\'\n\x04mode\x18\x01 \x01(\x0e\x32\x19.agent.SandboxPolicy.Mode\x12\x18\n\x10\x61llowed_commands\x18\x02 \x03(\t\x12\x17\n\x0f\x64\x65nied_commands\x18\x03 \x03(\t\x12\x1a\n\x12sensitive_commands\x18\x04 \x03(\t\x12\x18\n\x10working_dir_jail\x18\x05 \x01(\t\"\"\n\x04Mode\x12\n\n\x06STRICT\x10\x00\x12\x0e\n\nPERMISSIVE\x10\x01\"x\n\x14RegistrationResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x15\n\rerror_message\x18\x02 \x01(\t\x12\x12\n\nsession_id\x18\x03 \x01(\t\x12$\n\x06policy\x18\x04 \x01(\x0b\x32\x14.agent.SandboxPolicy\"\xff\x01\n\x11\x43lientTaskMessage\x12,\n\rtask_response\x18\x01 \x01(\x0b\x32\x13.agent.TaskResponseH\x00\x12-\n\ntask_claim\x18\x02 \x01(\x0b\x32\x17.agent.TaskClaimRequestH\x00\x12,\n\rbrowser_event\x18\x03 \x01(\x0b\x32\x13.agent.BrowserEventH\x00\x12\'\n\x08\x61nnounce\x18\x04 \x01(\x0b\x32\x13.agent.NodeAnnounceH\x00\x12+\n\tfile_sync\x18\x05 \x01(\x0b\x32\x16.agent.FileSyncMessageH\x00\x42\t\n\x07payload\"\x1f\n\x0cNodeAnnounce\x12\x0f\n\x07node_id\x18\x01 \x01(\t\"\x87\x01\n\x0c\x42rowserEvent\x12\x12\n\nsession_id\x18\x01 \x01(\t\x12,\n\x0b\x63onsole_msg\x18\x02 \x01(\x0b\x32\x15.agent.ConsoleMessageH\x00\x12,\n\x0bnetwork_req\x18\x03 \x01(\x0b\x32\x15.agent.NetworkRequestH\x00\x42\x07\n\x05\x65vent\"\x8d\x02\n\x11ServerTaskMessage\x12*\n\x0ctask_request\x18\x01 \x01(\x0b\x32\x12.agent.TaskRequestH\x00\x12\x31\n\x10work_pool_update\x18\x02 \x01(\x0b\x32\x15.agent.WorkPoolUpdateH\x00\x12\x30\n\x0c\x63laim_status\x18\x03 \x01(\x0b\x32\x18.agent.TaskClaimResponseH\x00\x12/\n\x0btask_cancel\x18\x04 \x01(\x0b\x32\x18.agent.TaskCancelRequestH\x00\x12+\n\tfile_sync\x18\x05 \x01(\x0b\x32\x16.agent.FileSyncMessageH\x00\x42\t\n\x07payload\"$\n\x11TaskCancelRequest\x12\x0f\n\x07task_id\x18\x01 \x01(\t\"\xd1\x01\n\x0bTaskRequest\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x11\n\ttask_type\x18\x02 \x01(\t\x12\x16\n\x0cpayload_json\x18\x03 \x01(\tH\x00\x12.\n\x0e\x62rowser_action\x18\x07 \x01(\x0b\x32\x14.agent.BrowserActionH\x00\x12\x12\n\ntimeout_ms\x18\x04 \x01(\x05\x12\x10\n\x08trace_id\x18\x05 \x01(\t\x12\x11\n\tsignature\x18\x06 \x01(\t\x12\x12\n\nsession_id\x18\x08 \x01(\tB\t\n\x07payload\"\xa0\x02\n\rBrowserAction\x12/\n\x06\x61\x63tion\x18\x01 \x01(\x0e\x32\x1f.agent.BrowserAction.ActionType\x12\x0b\n\x03url\x18\x02 \x01(\t\x12\x10\n\x08selector\x18\x03 \x01(\t\x12\x0c\n\x04text\x18\x04 \x01(\t\x12\x12\n\nsession_id\x18\x05 \x01(\t\x12\t\n\x01x\x18\x06 \x01(\x05\x12\t\n\x01y\x18\x07 \x01(\x05\"\x86\x01\n\nActionType\x12\x0c\n\x08NAVIGATE\x10\x00\x12\t\n\x05\x43LICK\x10\x01\x12\x08\n\x04TYPE\x10\x02\x12\x0e\n\nSCREENSHOT\x10\x03\x12\x0b\n\x07GET_DOM\x10\x04\x12\t\n\x05HOVER\x10\x05\x12\n\n\x06SCROLL\x10\x06\x12\t\n\x05\x43LOSE\x10\x07\x12\x08\n\x04\x45VAL\x10\x08\x12\x0c\n\x08GET_A11Y\x10\t\"\xe0\x02\n\x0cTaskResponse\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12*\n\x06status\x18\x02 \x01(\x0e\x32\x1a.agent.TaskResponse.Status\x12\x0e\n\x06stdout\x18\x03 \x01(\t\x12\x0e\n\x06stderr\x18\x04 \x01(\t\x12\x10\n\x08trace_id\x18\x05 \x01(\t\x12\x35\n\tartifacts\x18\x06 \x03(\x0b\x32\".agent.TaskResponse.ArtifactsEntry\x12\x30\n\x0e\x62rowser_result\x18\x07 \x01(\x0b\x32\x16.agent.BrowserResponseH\x00\x1a\x30\n\x0e\x41rtifactsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c:\x02\x38\x01\"<\n\x06Status\x12\x0b\n\x07SUCCESS\x10\x00\x12\t\n\x05\x45RROR\x10\x01\x12\x0b\n\x07TIMEOUT\x10\x02\x12\r\n\tCANCELLED\x10\x03\x42\x08\n\x06result\"\xdc\x01\n\x0f\x42rowserResponse\x12\x0b\n\x03url\x18\x01 \x01(\t\x12\r\n\x05title\x18\x02 \x01(\t\x12\x10\n\x08snapshot\x18\x03 \x01(\x0c\x12\x13\n\x0b\x64om_content\x18\x04 \x01(\t\x12\x11\n\ta11y_tree\x18\x05 \x01(\t\x12\x13\n\x0b\x65val_result\x18\x06 \x01(\t\x12.\n\x0f\x63onsole_history\x18\x07 \x03(\x0b\x32\x15.agent.ConsoleMessage\x12.\n\x0fnetwork_history\x18\x08 \x03(\x0b\x32\x15.agent.NetworkRequest\"C\n\x0e\x43onsoleMessage\x12\r\n\x05level\x18\x01 \x01(\t\x12\x0c\n\x04text\x18\x02 \x01(\t\x12\x14\n\x0ctimestamp_ms\x18\x03 \x01(\x03\"h\n\x0eNetworkRequest\x12\x0e\n\x06method\x18\x01 \x01(\t\x12\x0b\n\x03url\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\x05\x12\x15\n\rresource_type\x18\x04 \x01(\t\x12\x12\n\nlatency_ms\x18\x05 \x01(\x03\",\n\x0eWorkPoolUpdate\x12\x1a\n\x12\x61vailable_task_ids\x18\x01 \x03(\t\"4\n\x10TaskClaimRequest\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x0f\n\x07node_id\x18\x02 \x01(\t\"E\n\x11TaskClaimResponse\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x0f\n\x07granted\x18\x02 \x01(\x08\x12\x0e\n\x06reason\x18\x03 \x01(\t\"\xc1\x01\n\tHeartbeat\x12\x0f\n\x07node_id\x18\x01 \x01(\t\x12\x19\n\x11\x63pu_usage_percent\x18\x02 \x01(\x02\x12\x1c\n\x14memory_usage_percent\x18\x03 \x01(\x02\x12\x1b\n\x13\x61\x63tive_worker_count\x18\x04 \x01(\x05\x12\x1b\n\x13max_worker_capacity\x18\x05 \x01(\x05\x12\x16\n\x0estatus_message\x18\x06 \x01(\t\x12\x18\n\x10running_task_ids\x18\x07 \x03(\t\"-\n\x13HealthCheckResponse\x12\x16\n\x0eserver_time_ms\x18\x01 \x01(\x03\"\xd3\x01\n\x0f\x46ileSyncMessage\x12\x12\n\nsession_id\x18\x01 \x01(\t\x12,\n\x08manifest\x18\x02 \x01(\x0b\x32\x18.agent.DirectoryManifestH\x00\x12\'\n\tfile_data\x18\x03 \x01(\x0b\x32\x12.agent.FilePayloadH\x00\x12#\n\x06status\x18\x04 \x01(\x0b\x32\x11.agent.SyncStatusH\x00\x12%\n\x07\x63ontrol\x18\x05 \x01(\x0b\x32\x12.agent.SyncControlH\x00\x42\t\n\x07payload\"\xaf\x01\n\x0bSyncControl\x12)\n\x06\x61\x63tion\x18\x01 \x01(\x0e\x32\x19.agent.SyncControl.Action\x12\x0c\n\x04path\x18\x02 \x01(\t\"g\n\x06\x41\x63tion\x12\x12\n\x0eSTART_WATCHING\x10\x00\x12\x11\n\rSTOP_WATCHING\x10\x01\x12\x08\n\x04LOCK\x10\x02\x12\n\n\x06UNLOCK\x10\x03\x12\x14\n\x10REFRESH_MANIFEST\x10\x04\x12\n\n\x06RESYNC\x10\x05\"F\n\x11\x44irectoryManifest\x12\x11\n\troot_path\x18\x01 \x01(\t\x12\x1e\n\x05\x66iles\x18\x02 \x03(\x0b\x32\x0f.agent.FileInfo\"D\n\x08\x46ileInfo\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x0c\n\x04size\x18\x02 \x01(\x03\x12\x0c\n\x04hash\x18\x03 \x01(\t\x12\x0e\n\x06is_dir\x18\x04 \x01(\x08\"_\n\x0b\x46ilePayload\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\r\n\x05\x63hunk\x18\x02 \x01(\x0c\x12\x13\n\x0b\x63hunk_index\x18\x03 \x01(\x05\x12\x10\n\x08is_final\x18\x04 \x01(\x08\x12\x0c\n\x04hash\x18\x05 \x01(\t\"\xa0\x01\n\nSyncStatus\x12$\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x16.agent.SyncStatus.Code\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\x17\n\x0freconcile_paths\x18\x03 \x03(\t\"B\n\x04\x43ode\x12\x06\n\x02OK\x10\x00\x12\t\n\x05\x45RROR\x10\x01\x12\x16\n\x12RECONCILE_REQUIRED\x10\x02\x12\x0f\n\x0bIN_PROGRESS\x10\x03\x32\xe9\x01\n\x11\x41gentOrchestrator\x12L\n\x11SyncConfiguration\x12\x1a.agent.RegistrationRequest\x1a\x1b.agent.RegistrationResponse\x12\x44\n\nTaskStream\x12\x18.agent.ClientTaskMessage\x1a\x18.agent.ServerTaskMessage(\x01\x30\x01\x12@\n\x0cReportHealth\x12\x10.agent.Heartbeat\x1a\x1a.agent.HealthCheckResponse(\x01\x30\x01\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0b\x61gent.proto\x12\x05\x61gent\"\xde\x01\n\x13RegistrationRequest\x12\x0f\n\x07node_id\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\t\x12\x12\n\nauth_token\x18\x03 \x01(\t\x12\x18\n\x10node_description\x18\x04 \x01(\t\x12\x42\n\x0c\x63\x61pabilities\x18\x05 \x03(\x0b\x32,.agent.RegistrationRequest.CapabilitiesEntry\x1a\x33\n\x11\x43\x61pabilitiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xc5\x01\n\rSandboxPolicy\x12\'\n\x04mode\x18\x01 \x01(\x0e\x32\x19.agent.SandboxPolicy.Mode\x12\x18\n\x10\x61llowed_commands\x18\x02 \x03(\t\x12\x17\n\x0f\x64\x65nied_commands\x18\x03 \x03(\t\x12\x1a\n\x12sensitive_commands\x18\x04 \x03(\t\x12\x18\n\x10working_dir_jail\x18\x05 \x01(\t\"\"\n\x04Mode\x12\n\n\x06STRICT\x10\x00\x12\x0e\n\nPERMISSIVE\x10\x01\"x\n\x14RegistrationResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x15\n\rerror_message\x18\x02 \x01(\t\x12\x12\n\nsession_id\x18\x03 \x01(\t\x12$\n\x06policy\x18\x04 \x01(\x0b\x32\x14.agent.SandboxPolicy\"\xff\x01\n\x11\x43lientTaskMessage\x12,\n\rtask_response\x18\x01 \x01(\x0b\x32\x13.agent.TaskResponseH\x00\x12-\n\ntask_claim\x18\x02 \x01(\x0b\x32\x17.agent.TaskClaimRequestH\x00\x12,\n\rbrowser_event\x18\x03 \x01(\x0b\x32\x13.agent.BrowserEventH\x00\x12\'\n\x08\x61nnounce\x18\x04 \x01(\x0b\x32\x13.agent.NodeAnnounceH\x00\x12+\n\tfile_sync\x18\x05 \x01(\x0b\x32\x16.agent.FileSyncMessageH\x00\x42\t\n\x07payload\"\x1f\n\x0cNodeAnnounce\x12\x0f\n\x07node_id\x18\x01 \x01(\t\"\x87\x01\n\x0c\x42rowserEvent\x12\x12\n\nsession_id\x18\x01 \x01(\t\x12,\n\x0b\x63onsole_msg\x18\x02 \x01(\x0b\x32\x15.agent.ConsoleMessageH\x00\x12,\n\x0bnetwork_req\x18\x03 \x01(\x0b\x32\x15.agent.NetworkRequestH\x00\x42\x07\n\x05\x65vent\"\x8d\x02\n\x11ServerTaskMessage\x12*\n\x0ctask_request\x18\x01 \x01(\x0b\x32\x12.agent.TaskRequestH\x00\x12\x31\n\x10work_pool_update\x18\x02 \x01(\x0b\x32\x15.agent.WorkPoolUpdateH\x00\x12\x30\n\x0c\x63laim_status\x18\x03 \x01(\x0b\x32\x18.agent.TaskClaimResponseH\x00\x12/\n\x0btask_cancel\x18\x04 \x01(\x0b\x32\x18.agent.TaskCancelRequestH\x00\x12+\n\tfile_sync\x18\x05 \x01(\x0b\x32\x16.agent.FileSyncMessageH\x00\x42\t\n\x07payload\"$\n\x11TaskCancelRequest\x12\x0f\n\x07task_id\x18\x01 \x01(\t\"\xd1\x01\n\x0bTaskRequest\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x11\n\ttask_type\x18\x02 \x01(\t\x12\x16\n\x0cpayload_json\x18\x03 \x01(\tH\x00\x12.\n\x0e\x62rowser_action\x18\x07 \x01(\x0b\x32\x14.agent.BrowserActionH\x00\x12\x12\n\ntimeout_ms\x18\x04 \x01(\x05\x12\x10\n\x08trace_id\x18\x05 \x01(\t\x12\x11\n\tsignature\x18\x06 \x01(\t\x12\x12\n\nsession_id\x18\x08 \x01(\tB\t\n\x07payload\"\xa0\x02\n\rBrowserAction\x12/\n\x06\x61\x63tion\x18\x01 \x01(\x0e\x32\x1f.agent.BrowserAction.ActionType\x12\x0b\n\x03url\x18\x02 \x01(\t\x12\x10\n\x08selector\x18\x03 \x01(\t\x12\x0c\n\x04text\x18\x04 \x01(\t\x12\x12\n\nsession_id\x18\x05 \x01(\t\x12\t\n\x01x\x18\x06 \x01(\x05\x12\t\n\x01y\x18\x07 \x01(\x05\"\x86\x01\n\nActionType\x12\x0c\n\x08NAVIGATE\x10\x00\x12\t\n\x05\x43LICK\x10\x01\x12\x08\n\x04TYPE\x10\x02\x12\x0e\n\nSCREENSHOT\x10\x03\x12\x0b\n\x07GET_DOM\x10\x04\x12\t\n\x05HOVER\x10\x05\x12\n\n\x06SCROLL\x10\x06\x12\t\n\x05\x43LOSE\x10\x07\x12\x08\n\x04\x45VAL\x10\x08\x12\x0c\n\x08GET_A11Y\x10\t\"\xe0\x02\n\x0cTaskResponse\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12*\n\x06status\x18\x02 \x01(\x0e\x32\x1a.agent.TaskResponse.Status\x12\x0e\n\x06stdout\x18\x03 \x01(\t\x12\x0e\n\x06stderr\x18\x04 \x01(\t\x12\x10\n\x08trace_id\x18\x05 \x01(\t\x12\x35\n\tartifacts\x18\x06 \x03(\x0b\x32\".agent.TaskResponse.ArtifactsEntry\x12\x30\n\x0e\x62rowser_result\x18\x07 \x01(\x0b\x32\x16.agent.BrowserResponseH\x00\x1a\x30\n\x0e\x41rtifactsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c:\x02\x38\x01\"<\n\x06Status\x12\x0b\n\x07SUCCESS\x10\x00\x12\t\n\x05\x45RROR\x10\x01\x12\x0b\n\x07TIMEOUT\x10\x02\x12\r\n\tCANCELLED\x10\x03\x42\x08\n\x06result\"\xdc\x01\n\x0f\x42rowserResponse\x12\x0b\n\x03url\x18\x01 \x01(\t\x12\r\n\x05title\x18\x02 \x01(\t\x12\x10\n\x08snapshot\x18\x03 \x01(\x0c\x12\x13\n\x0b\x64om_content\x18\x04 \x01(\t\x12\x11\n\ta11y_tree\x18\x05 \x01(\t\x12\x13\n\x0b\x65val_result\x18\x06 \x01(\t\x12.\n\x0f\x63onsole_history\x18\x07 \x03(\x0b\x32\x15.agent.ConsoleMessage\x12.\n\x0fnetwork_history\x18\x08 \x03(\x0b\x32\x15.agent.NetworkRequest\"C\n\x0e\x43onsoleMessage\x12\r\n\x05level\x18\x01 \x01(\t\x12\x0c\n\x04text\x18\x02 \x01(\t\x12\x14\n\x0ctimestamp_ms\x18\x03 \x01(\x03\"h\n\x0eNetworkRequest\x12\x0e\n\x06method\x18\x01 \x01(\t\x12\x0b\n\x03url\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\x05\x12\x15\n\rresource_type\x18\x04 \x01(\t\x12\x12\n\nlatency_ms\x18\x05 \x01(\x03\",\n\x0eWorkPoolUpdate\x12\x1a\n\x12\x61vailable_task_ids\x18\x01 \x03(\t\"4\n\x10TaskClaimRequest\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x0f\n\x07node_id\x18\x02 \x01(\t\"E\n\x11TaskClaimResponse\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x0f\n\x07granted\x18\x02 \x01(\x08\x12\x0e\n\x06reason\x18\x03 \x01(\t\"\xc1\x01\n\tHeartbeat\x12\x0f\n\x07node_id\x18\x01 \x01(\t\x12\x19\n\x11\x63pu_usage_percent\x18\x02 \x01(\x02\x12\x1c\n\x14memory_usage_percent\x18\x03 \x01(\x02\x12\x1b\n\x13\x61\x63tive_worker_count\x18\x04 \x01(\x05\x12\x1b\n\x13max_worker_capacity\x18\x05 \x01(\x05\x12\x16\n\x0estatus_message\x18\x06 \x01(\t\x12\x18\n\x10running_task_ids\x18\x07 \x03(\t\"-\n\x13HealthCheckResponse\x12\x16\n\x0eserver_time_ms\x18\x01 \x01(\x03\"\xd3\x01\n\x0f\x46ileSyncMessage\x12\x12\n\nsession_id\x18\x01 \x01(\t\x12,\n\x08manifest\x18\x02 \x01(\x0b\x32\x18.agent.DirectoryManifestH\x00\x12\'\n\tfile_data\x18\x03 \x01(\x0b\x32\x12.agent.FilePayloadH\x00\x12#\n\x06status\x18\x04 \x01(\x0b\x32\x11.agent.SyncStatusH\x00\x12%\n\x07\x63ontrol\x18\x05 \x01(\x0b\x32\x12.agent.SyncControlH\x00\x42\t\n\x07payload\"\xc6\x01\n\x0bSyncControl\x12)\n\x06\x61\x63tion\x18\x01 \x01(\x0e\x32\x19.agent.SyncControl.Action\x12\x0c\n\x04path\x18\x02 \x01(\t\x12\x15\n\rrequest_paths\x18\x03 \x03(\t\"g\n\x06\x41\x63tion\x12\x12\n\x0eSTART_WATCHING\x10\x00\x12\x11\n\rSTOP_WATCHING\x10\x01\x12\x08\n\x04LOCK\x10\x02\x12\n\n\x06UNLOCK\x10\x03\x12\x14\n\x10REFRESH_MANIFEST\x10\x04\x12\n\n\x06RESYNC\x10\x05\"F\n\x11\x44irectoryManifest\x12\x11\n\troot_path\x18\x01 \x01(\t\x12\x1e\n\x05\x66iles\x18\x02 \x03(\x0b\x32\x0f.agent.FileInfo\"D\n\x08\x46ileInfo\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x0c\n\x04size\x18\x02 \x01(\x03\x12\x0c\n\x04hash\x18\x03 \x01(\t\x12\x0e\n\x06is_dir\x18\x04 \x01(\x08\"_\n\x0b\x46ilePayload\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\r\n\x05\x63hunk\x18\x02 \x01(\x0c\x12\x13\n\x0b\x63hunk_index\x18\x03 \x01(\x05\x12\x10\n\x08is_final\x18\x04 \x01(\x08\x12\x0c\n\x04hash\x18\x05 \x01(\t\"\xa0\x01\n\nSyncStatus\x12$\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x16.agent.SyncStatus.Code\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\x17\n\x0freconcile_paths\x18\x03 \x03(\t\"B\n\x04\x43ode\x12\x06\n\x02OK\x10\x00\x12\t\n\x05\x45RROR\x10\x01\x12\x16\n\x12RECONCILE_REQUIRED\x10\x02\x12\x0f\n\x0bIN_PROGRESS\x10\x03\x32\xe9\x01\n\x11\x41gentOrchestrator\x12L\n\x11SyncConfiguration\x12\x1a.agent.RegistrationRequest\x1a\x1b.agent.RegistrationResponse\x12\x44\n\nTaskStream\x12\x18.agent.ClientTaskMessage\x1a\x18.agent.ServerTaskMessage(\x01\x30\x01\x12@\n\x0cReportHealth\x12\x10.agent.Heartbeat\x1a\x1a.agent.HealthCheckResponse(\x01\x30\x01\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'protos.agent_pb2', _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'agent_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None _globals['_REGISTRATIONREQUEST_CAPABILITIESENTRY']._options = None _globals['_REGISTRATIONREQUEST_CAPABILITIESENTRY']._serialized_options = b'8\001' _globals['_TASKRESPONSE_ARTIFACTSENTRY']._options = None _globals['_TASKRESPONSE_ARTIFACTSENTRY']._serialized_options = b'8\001' - _globals['_REGISTRATIONREQUEST']._serialized_start=30 - _globals['_REGISTRATIONREQUEST']._serialized_end=252 - _globals['_REGISTRATIONREQUEST_CAPABILITIESENTRY']._serialized_start=201 - _globals['_REGISTRATIONREQUEST_CAPABILITIESENTRY']._serialized_end=252 - _globals['_SANDBOXPOLICY']._serialized_start=255 - _globals['_SANDBOXPOLICY']._serialized_end=452 - _globals['_SANDBOXPOLICY_MODE']._serialized_start=418 - _globals['_SANDBOXPOLICY_MODE']._serialized_end=452 - _globals['_REGISTRATIONRESPONSE']._serialized_start=454 - _globals['_REGISTRATIONRESPONSE']._serialized_end=574 - _globals['_CLIENTTASKMESSAGE']._serialized_start=577 - _globals['_CLIENTTASKMESSAGE']._serialized_end=832 - _globals['_NODEANNOUNCE']._serialized_start=834 - _globals['_NODEANNOUNCE']._serialized_end=865 - _globals['_BROWSEREVENT']._serialized_start=868 - _globals['_BROWSEREVENT']._serialized_end=1003 - _globals['_SERVERTASKMESSAGE']._serialized_start=1006 - _globals['_SERVERTASKMESSAGE']._serialized_end=1275 - _globals['_TASKCANCELREQUEST']._serialized_start=1277 - _globals['_TASKCANCELREQUEST']._serialized_end=1313 - _globals['_TASKREQUEST']._serialized_start=1316 - _globals['_TASKREQUEST']._serialized_end=1525 - _globals['_BROWSERACTION']._serialized_start=1528 - _globals['_BROWSERACTION']._serialized_end=1816 - _globals['_BROWSERACTION_ACTIONTYPE']._serialized_start=1682 - _globals['_BROWSERACTION_ACTIONTYPE']._serialized_end=1816 - _globals['_TASKRESPONSE']._serialized_start=1819 - _globals['_TASKRESPONSE']._serialized_end=2171 - _globals['_TASKRESPONSE_ARTIFACTSENTRY']._serialized_start=2051 - _globals['_TASKRESPONSE_ARTIFACTSENTRY']._serialized_end=2099 - _globals['_TASKRESPONSE_STATUS']._serialized_start=2101 - _globals['_TASKRESPONSE_STATUS']._serialized_end=2161 - _globals['_BROWSERRESPONSE']._serialized_start=2174 - _globals['_BROWSERRESPONSE']._serialized_end=2394 - _globals['_CONSOLEMESSAGE']._serialized_start=2396 - _globals['_CONSOLEMESSAGE']._serialized_end=2463 - _globals['_NETWORKREQUEST']._serialized_start=2465 - _globals['_NETWORKREQUEST']._serialized_end=2569 - _globals['_WORKPOOLUPDATE']._serialized_start=2571 - _globals['_WORKPOOLUPDATE']._serialized_end=2615 - _globals['_TASKCLAIMREQUEST']._serialized_start=2617 - _globals['_TASKCLAIMREQUEST']._serialized_end=2669 - _globals['_TASKCLAIMRESPONSE']._serialized_start=2671 - _globals['_TASKCLAIMRESPONSE']._serialized_end=2740 - _globals['_HEARTBEAT']._serialized_start=2743 - _globals['_HEARTBEAT']._serialized_end=2936 - _globals['_HEALTHCHECKRESPONSE']._serialized_start=2938 - _globals['_HEALTHCHECKRESPONSE']._serialized_end=2983 - _globals['_FILESYNCMESSAGE']._serialized_start=2986 - _globals['_FILESYNCMESSAGE']._serialized_end=3197 - _globals['_SYNCCONTROL']._serialized_start=3200 - _globals['_SYNCCONTROL']._serialized_end=3375 - _globals['_SYNCCONTROL_ACTION']._serialized_start=3272 - _globals['_SYNCCONTROL_ACTION']._serialized_end=3375 - _globals['_DIRECTORYMANIFEST']._serialized_start=3377 - _globals['_DIRECTORYMANIFEST']._serialized_end=3447 - _globals['_FILEINFO']._serialized_start=3449 - _globals['_FILEINFO']._serialized_end=3517 - _globals['_FILEPAYLOAD']._serialized_start=3519 - _globals['_FILEPAYLOAD']._serialized_end=3614 - _globals['_SYNCSTATUS']._serialized_start=3617 - _globals['_SYNCSTATUS']._serialized_end=3777 - _globals['_SYNCSTATUS_CODE']._serialized_start=3711 - _globals['_SYNCSTATUS_CODE']._serialized_end=3777 - _globals['_AGENTORCHESTRATOR']._serialized_start=3780 - _globals['_AGENTORCHESTRATOR']._serialized_end=4013 + _globals['_REGISTRATIONREQUEST']._serialized_start=23 + _globals['_REGISTRATIONREQUEST']._serialized_end=245 + _globals['_REGISTRATIONREQUEST_CAPABILITIESENTRY']._serialized_start=194 + _globals['_REGISTRATIONREQUEST_CAPABILITIESENTRY']._serialized_end=245 + _globals['_SANDBOXPOLICY']._serialized_start=248 + _globals['_SANDBOXPOLICY']._serialized_end=445 + _globals['_SANDBOXPOLICY_MODE']._serialized_start=411 + _globals['_SANDBOXPOLICY_MODE']._serialized_end=445 + _globals['_REGISTRATIONRESPONSE']._serialized_start=447 + _globals['_REGISTRATIONRESPONSE']._serialized_end=567 + _globals['_CLIENTTASKMESSAGE']._serialized_start=570 + _globals['_CLIENTTASKMESSAGE']._serialized_end=825 + _globals['_NODEANNOUNCE']._serialized_start=827 + _globals['_NODEANNOUNCE']._serialized_end=858 + _globals['_BROWSEREVENT']._serialized_start=861 + _globals['_BROWSEREVENT']._serialized_end=996 + _globals['_SERVERTASKMESSAGE']._serialized_start=999 + _globals['_SERVERTASKMESSAGE']._serialized_end=1268 + _globals['_TASKCANCELREQUEST']._serialized_start=1270 + _globals['_TASKCANCELREQUEST']._serialized_end=1306 + _globals['_TASKREQUEST']._serialized_start=1309 + _globals['_TASKREQUEST']._serialized_end=1518 + _globals['_BROWSERACTION']._serialized_start=1521 + _globals['_BROWSERACTION']._serialized_end=1809 + _globals['_BROWSERACTION_ACTIONTYPE']._serialized_start=1675 + _globals['_BROWSERACTION_ACTIONTYPE']._serialized_end=1809 + _globals['_TASKRESPONSE']._serialized_start=1812 + _globals['_TASKRESPONSE']._serialized_end=2164 + _globals['_TASKRESPONSE_ARTIFACTSENTRY']._serialized_start=2044 + _globals['_TASKRESPONSE_ARTIFACTSENTRY']._serialized_end=2092 + _globals['_TASKRESPONSE_STATUS']._serialized_start=2094 + _globals['_TASKRESPONSE_STATUS']._serialized_end=2154 + _globals['_BROWSERRESPONSE']._serialized_start=2167 + _globals['_BROWSERRESPONSE']._serialized_end=2387 + _globals['_CONSOLEMESSAGE']._serialized_start=2389 + _globals['_CONSOLEMESSAGE']._serialized_end=2456 + _globals['_NETWORKREQUEST']._serialized_start=2458 + _globals['_NETWORKREQUEST']._serialized_end=2562 + _globals['_WORKPOOLUPDATE']._serialized_start=2564 + _globals['_WORKPOOLUPDATE']._serialized_end=2608 + _globals['_TASKCLAIMREQUEST']._serialized_start=2610 + _globals['_TASKCLAIMREQUEST']._serialized_end=2662 + _globals['_TASKCLAIMRESPONSE']._serialized_start=2664 + _globals['_TASKCLAIMRESPONSE']._serialized_end=2733 + _globals['_HEARTBEAT']._serialized_start=2736 + _globals['_HEARTBEAT']._serialized_end=2929 + _globals['_HEALTHCHECKRESPONSE']._serialized_start=2931 + _globals['_HEALTHCHECKRESPONSE']._serialized_end=2976 + _globals['_FILESYNCMESSAGE']._serialized_start=2979 + _globals['_FILESYNCMESSAGE']._serialized_end=3190 + _globals['_SYNCCONTROL']._serialized_start=3193 + _globals['_SYNCCONTROL']._serialized_end=3391 + _globals['_SYNCCONTROL_ACTION']._serialized_start=3288 + _globals['_SYNCCONTROL_ACTION']._serialized_end=3391 + _globals['_DIRECTORYMANIFEST']._serialized_start=3393 + _globals['_DIRECTORYMANIFEST']._serialized_end=3463 + _globals['_FILEINFO']._serialized_start=3465 + _globals['_FILEINFO']._serialized_end=3533 + _globals['_FILEPAYLOAD']._serialized_start=3535 + _globals['_FILEPAYLOAD']._serialized_end=3630 + _globals['_SYNCSTATUS']._serialized_start=3633 + _globals['_SYNCSTATUS']._serialized_end=3793 + _globals['_SYNCSTATUS_CODE']._serialized_start=3727 + _globals['_SYNCSTATUS_CODE']._serialized_end=3793 + _globals['_AGENTORCHESTRATOR']._serialized_start=3796 + _globals['_AGENTORCHESTRATOR']._serialized_end=4029 # @@protoc_insertion_point(module_scope) diff --git a/ai-hub/app/protos/agent_pb2_grpc.py b/ai-hub/app/protos/agent_pb2_grpc.py index f551b0b..932d45e 100644 --- a/ai-hub/app/protos/agent_pb2_grpc.py +++ b/ai-hub/app/protos/agent_pb2_grpc.py @@ -2,7 +2,7 @@ """Client and server classes corresponding to protobuf-defined services.""" import grpc -from protos import agent_pb2 as protos_dot_agent__pb2 +import agent_pb2 as agent__pb2 class AgentOrchestratorStub(object): @@ -17,18 +17,18 @@ """ self.SyncConfiguration = channel.unary_unary( '/agent.AgentOrchestrator/SyncConfiguration', - request_serializer=protos_dot_agent__pb2.RegistrationRequest.SerializeToString, - response_deserializer=protos_dot_agent__pb2.RegistrationResponse.FromString, + request_serializer=agent__pb2.RegistrationRequest.SerializeToString, + response_deserializer=agent__pb2.RegistrationResponse.FromString, ) self.TaskStream = channel.stream_stream( '/agent.AgentOrchestrator/TaskStream', - request_serializer=protos_dot_agent__pb2.ClientTaskMessage.SerializeToString, - response_deserializer=protos_dot_agent__pb2.ServerTaskMessage.FromString, + request_serializer=agent__pb2.ClientTaskMessage.SerializeToString, + response_deserializer=agent__pb2.ServerTaskMessage.FromString, ) self.ReportHealth = channel.stream_stream( '/agent.AgentOrchestrator/ReportHealth', - request_serializer=protos_dot_agent__pb2.Heartbeat.SerializeToString, - response_deserializer=protos_dot_agent__pb2.HealthCheckResponse.FromString, + request_serializer=agent__pb2.Heartbeat.SerializeToString, + response_deserializer=agent__pb2.HealthCheckResponse.FromString, ) @@ -62,18 +62,18 @@ rpc_method_handlers = { 'SyncConfiguration': grpc.unary_unary_rpc_method_handler( servicer.SyncConfiguration, - request_deserializer=protos_dot_agent__pb2.RegistrationRequest.FromString, - response_serializer=protos_dot_agent__pb2.RegistrationResponse.SerializeToString, + request_deserializer=agent__pb2.RegistrationRequest.FromString, + response_serializer=agent__pb2.RegistrationResponse.SerializeToString, ), 'TaskStream': grpc.stream_stream_rpc_method_handler( servicer.TaskStream, - request_deserializer=protos_dot_agent__pb2.ClientTaskMessage.FromString, - response_serializer=protos_dot_agent__pb2.ServerTaskMessage.SerializeToString, + request_deserializer=agent__pb2.ClientTaskMessage.FromString, + response_serializer=agent__pb2.ServerTaskMessage.SerializeToString, ), 'ReportHealth': grpc.stream_stream_rpc_method_handler( servicer.ReportHealth, - request_deserializer=protos_dot_agent__pb2.Heartbeat.FromString, - response_serializer=protos_dot_agent__pb2.HealthCheckResponse.SerializeToString, + request_deserializer=agent__pb2.Heartbeat.FromString, + response_serializer=agent__pb2.HealthCheckResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( @@ -98,8 +98,8 @@ timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/agent.AgentOrchestrator/SyncConfiguration', - protos_dot_agent__pb2.RegistrationRequest.SerializeToString, - protos_dot_agent__pb2.RegistrationResponse.FromString, + agent__pb2.RegistrationRequest.SerializeToString, + agent__pb2.RegistrationResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @@ -115,8 +115,8 @@ timeout=None, metadata=None): return grpc.experimental.stream_stream(request_iterator, target, '/agent.AgentOrchestrator/TaskStream', - protos_dot_agent__pb2.ClientTaskMessage.SerializeToString, - protos_dot_agent__pb2.ServerTaskMessage.FromString, + agent__pb2.ClientTaskMessage.SerializeToString, + agent__pb2.ServerTaskMessage.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @@ -132,7 +132,7 @@ timeout=None, metadata=None): return grpc.experimental.stream_stream(request_iterator, target, '/agent.AgentOrchestrator/ReportHealth', - protos_dot_agent__pb2.Heartbeat.SerializeToString, - protos_dot_agent__pb2.HealthCheckResponse.FromString, + agent__pb2.Heartbeat.SerializeToString, + agent__pb2.HealthCheckResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/poc-grpc-agent/agent_node/config.py b/poc-grpc-agent/agent_node/config.py index 6ed6746..cdc367c 100644 --- a/poc-grpc-agent/agent_node/config.py +++ b/poc-grpc-agent/agent_node/config.py @@ -1,21 +1,49 @@ import os import platform +import yaml -# 12-Factor Config: Environment variables with defaults -SECRET_KEY = os.getenv("AGENT_SECRET_KEY", "cortex-secret-shared-key") -NODE_ID = os.getenv("AGENT_NODE_ID", "agent-node-007") -NODE_DESC = os.getenv("AGENT_NODE_DESC", "Modular Stateful Node") +# Path to the generated config file in the bundled distribution +CONFIG_PATH = "agent_config.yaml" -# Orchestrator Connection -SERVER_HOST = os.getenv("SERVER_HOST", "localhost") -SERVER_PORT = os.getenv("SERVER_PORT", "50051") +# Default values +_defaults = { + "node_id": "agent-node-007", + "node_description": "Modular Stateful Node", + "hub_url": "https://ai.jerxie.com", + "grpc_endpoint": "localhost:50051", + "auth_token": os.getenv("AGENT_AUTH_TOKEN", "cortex-secret-shared-key"), + "sync_root": "/tmp/cortex-sync", + "tls": True, + "max_skill_workers": 5, + "health_report_interval": 10, +} -# Certificate Paths +# 1. Load from YAML if present +_config = _defaults.copy() +if os.path.exists(CONFIG_PATH): + try: + with open(CONFIG_PATH, 'r') as f: + yaml_config = yaml.safe_load(f) or {} + _config.update(yaml_config) + print(f"[*] Loaded node configuration from {CONFIG_PATH}") + except Exception as e: + print(f"[!] Error loading {CONFIG_PATH}: {e}") + +# 2. Override with Environment Variables (12-Factor style) +NODE_ID = os.getenv("AGENT_NODE_ID", _config["node_id"]) +NODE_DESC = os.getenv("AGENT_NODE_DESC", _config["node_description"]) +SERVER_HOST_PORT = os.getenv("GRPC_ENDPOINT", _config["grpc_endpoint"]) # e.g. "ai.jerxie.com:50051" +AUTH_TOKEN = os.getenv("AGENT_AUTH_TOKEN", _config["auth_token"]) +SYNC_DIR = os.getenv("CORTEX_SYNC_DIR", _config["sync_root"]) +TLS_ENABLED = os.getenv("AGENT_TLS_ENABLED", str(_config["tls"])).lower() == 'true' + +HEALTH_REPORT_INTERVAL = int(os.getenv("HEALTH_REPORT_INTERVAL", _config["health_report_interval"])) +MAX_SKILL_WORKERS = int(os.getenv("MAX_SKILL_WORKERS", _config["max_skill_workers"])) + +SECRET_KEY = os.getenv("AGENT_SECRET_KEY", _config.get("secret_key", "dev-secret-key-1337")) + +# These are still available but likely replaced by AUTH_TOKEN / TLS_ENABLED logic CERT_CA = os.getenv("CERT_CA", "certs/ca.crt") CERT_CLIENT_CRT = os.getenv("CERT_CLIENT_CRT", "certs/client.crt") CERT_CLIENT_KEY = os.getenv("CERT_CLIENT_KEY", "certs/client.key") -# Resource Limits -MAX_SKILL_WORKERS = int(os.getenv("MAX_SKILL_WORKERS", "5")) -HEALTH_REPORT_INTERVAL = int(os.getenv("HEALTH_REPORT_INTERVAL", "10")) -SYNC_DIR = os.getenv("CORTEX_SYNC_DIR", "/tmp/cortex-sync") diff --git a/poc-grpc-agent/agent_node/node.py b/poc-grpc-agent/agent_node/node.py index 8f6718b..6b04881 100644 --- a/poc-grpc-agent/agent_node/node.py +++ b/poc-grpc-agent/agent_node/node.py @@ -9,9 +9,10 @@ from agent_node.core.sandbox import SandboxEngine from agent_node.core.sync import NodeSyncManager from agent_node.core.watcher import WorkspaceWatcher -from agent_node.utils.auth import create_auth_token, verify_task_signature +from agent_node.utils.auth import verify_task_signature from agent_node.utils.network import get_secure_stub -from agent_node.config import NODE_ID, NODE_DESC, HEALTH_REPORT_INTERVAL, MAX_SKILL_WORKERS +from agent_node.config import NODE_ID, NODE_DESC, AUTH_TOKEN, HEALTH_REPORT_INTERVAL, MAX_SKILL_WORKERS + class AgentNode: """The 'Agent Core': Orchestrates Local Skills and Maintains gRPC Connection.""" @@ -29,10 +30,11 @@ print(f"[*] Handshake with Orchestrator: {self.node_id}") reg_req = agent_pb2.RegistrationRequest( node_id=self.node_id, - auth_token=create_auth_token(self.node_id), + auth_token=AUTH_TOKEN, node_description=NODE_DESC, capabilities={"shell": "v1", "browser": "playwright-sync-bridge"} ) + try: res = self.stub.SyncConfiguration(reg_req) diff --git a/poc-grpc-agent/agent_node/utils/network.py b/poc-grpc-agent/agent_node/utils/network.py index 6ba7cc2..3eac1c6 100644 --- a/poc-grpc-agent/agent_node/utils/network.py +++ b/poc-grpc-agent/agent_node/utils/network.py @@ -1,13 +1,27 @@ import grpc +import os from protos import agent_pb2_grpc -from agent_node.config import SERVER_HOST, SERVER_PORT, CERT_CA, CERT_CLIENT_CRT, CERT_CLIENT_KEY +from agent_node.config import SERVER_HOST_PORT, TLS_ENABLED, CERT_CA, CERT_CLIENT_CRT, CERT_CLIENT_KEY def get_secure_stub(): - """Initializes a gRPC secure channel and returns the orchestrator stub.""" - with open(CERT_CLIENT_KEY, 'rb') as f: pkey = f.read() - with open(CERT_CLIENT_CRT, 'rb') as f: cert = f.read() - with open(CERT_CA, 'rb') as f: ca = f.read() + """Initializes a gRPC channel (Secure or Insecure) and returns the orchestrator stub.""" - creds = grpc.ssl_channel_credentials(ca, pkey, cert) - channel = grpc.secure_channel(f'{SERVER_HOST}:{SERVER_PORT}', creds) - return agent_pb2_grpc.AgentOrchestratorStub(channel) + if not TLS_ENABLED: + print(f"[!] TLS is disabled. Connecting via insecure channel to {SERVER_HOST_PORT}") + channel = grpc.insecure_channel(SERVER_HOST_PORT) + return agent_pb2_grpc.AgentOrchestratorStub(channel) + + print(f"[*] Connecting via secure (mTLS) channel to {SERVER_HOST_PORT}") + try: + with open(CERT_CLIENT_KEY, 'rb') as f: pkey = f.read() + with open(CERT_CLIENT_CRT, 'rb') as f: cert = f.read() + with open(CERT_CA, 'rb') as f: ca = f.read() + + creds = grpc.ssl_channel_credentials(ca, pkey, cert) + channel = grpc.secure_channel(SERVER_HOST_PORT, creds) + return agent_pb2_grpc.AgentOrchestratorStub(channel) + except FileNotFoundError as e: + print(f"[!] Certificate files not found: {e}. Falling back to insecure channel...") + channel = grpc.insecure_channel(SERVER_HOST_PORT) + return agent_pb2_grpc.AgentOrchestratorStub(channel) + diff --git a/poc-grpc-agent/orchestrator/core/registry.py b/poc-grpc-agent/orchestrator/core/registry.py index 71a0fc9..509f412 100644 --- a/poc-grpc-agent/orchestrator/core/registry.py +++ b/poc-grpc-agent/orchestrator/core/registry.py @@ -1,5 +1,6 @@ import threading import queue +import time class AbstractNodeRegistry: """Interface for finding and tracking Agent Nodes.""" @@ -13,6 +14,7 @@ def __init__(self): self.lock = threading.Lock() self.nodes = {} # node_id -> { stats: {}, queue: queue, metadata: {} } + self.subscribers = set() # WebSocket connection objects def register(self, node_id, q, metadata): with self.lock: @@ -38,3 +40,40 @@ def list_nodes(self): with self.lock: return list(self.nodes.keys()) + + def subscribe(self, websocket): + with self.lock: + self.subscribers.add(websocket) + + def unsubscribe(self, websocket): + with self.lock: + if websocket in self.subscribers: + self.subscribers.remove(websocket) + + def emit(self, node_id, event, data, task_id=None): + """Broadcasts an event to all attached UI clients.""" + msg = { + "node_id": node_id, + "event": event, + "data": data, + "task_id": task_id, + "timestamp": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) + } + # In a real app, this would use an async-friendly event loop or Redis PUB/SUB. + # Here we just iterate. Note: caller is usually the gRPC thread. + import json + payload = json.dumps(msg) + + # We need to be careful with async WebSockets from a sync gRPC thread. + # This implementation assumes the WebSocket handler will poll a queue + # or we use a separate 'Bridge' to push from sync to async. + # For the POC, we'll log it; the AI Hub bridge will handle the actual WS push. + print(f"[📡 EventBus] {node_id} -> {event}: {payload[:100]}...") + + # Internal registry record (optional) + if "events" not in self.nodes.get(node_id, {}): + if node_id in self.nodes: + self.nodes[node_id]["events"] = [] + else: return + self.nodes[node_id]["events"] = ([msg] + self.nodes[node_id]["events"])[:50] + diff --git a/poc-grpc-agent/orchestrator/services/grpc_server.py b/poc-grpc-agent/orchestrator/services/grpc_server.py index de92314..023599b 100644 --- a/poc-grpc-agent/orchestrator/services/grpc_server.py +++ b/poc-grpc-agent/orchestrator/services/grpc_server.py @@ -177,6 +177,8 @@ reason="Task successfully claimed" if success else "Task already claimed by another node" ) )) + # M6: Notify UI that a node is claiming a global task + self.registry.emit(node_id, "task_claim", {"task_id": task_id, "granted": success}) if success: sig = sign_payload(payload) @@ -189,34 +191,43 @@ )) elif kind == 'task_response': - res_obj = {"stdout": msg.task_response.stdout, "status": msg.task_response.status} - if msg.task_response.HasField("browser_result"): - br = msg.task_response.browser_result + tr = msg.task_response + res_obj = {"stdout": tr.stdout, "status": tr.status} + if tr.HasField("browser_result"): + br = tr.browser_result res_obj["browser"] = { "url": br.url, "title": br.title, "has_snapshot": len(br.snapshot) > 0, - "a11y": br.a11y_tree[:100] + "..." if br.a11y_tree else None, "eval": br.eval_result } - self.journal.fulfill(msg.task_response.task_id, res_obj) + self.journal.fulfill(tr.task_id, res_obj) + + # M6: Emit to EventBus for UI streaming + event_type = "task_complete" if tr.status == agent_pb2.TaskResponse.SUCCESS else "task_error" + self.registry.emit(node_id, event_type, res_obj, task_id=tr.task_id) elif kind == 'browser_event': e = msg.browser_event - prefix = "[🖥️] Live Console" if e.HasField("console_msg") else "[🌐] Net Inspect" - content = e.console_msg.text if e.HasField("console_msg") else f"{e.network_req.method} {e.network_req.url}" - print(f" {prefix}: {content}", flush=True) + event_data = {} + if e.HasField("console_msg"): + event_data = {"type": "console", "text": e.console_msg.text, "level": e.console_msg.level} + elif e.HasField("network_req"): + event_data = {"type": "network", "method": e.network_req.method, "url": e.network_req.url} + + # M6: Stream live browser logs to UI + self.registry.emit(node_id, "browser_event", event_data) elif kind == 'file_sync': - # Handle inbound file data from nodes (Node-Primary model) fs = msg.file_sync if fs.HasField("file_data"): - print(f" [📁📥] Mirroring {fs.file_data.path} (chunk {fs.file_data.chunk_index})") self.mirror.write_file_chunk(fs.session_id, fs.file_data) - # BROADCAST to other nodes in the mesh self.assistant.broadcast_file_chunk(fs.session_id, node_id, fs.file_data) + # M6: Emit sync progress (rarely to avoid flood, but good for large pushes) + if fs.file_data.chunk_index % 10 == 0: + self.registry.emit(node_id, "sync_progress", {"path": fs.file_data.path, "chunk": fs.file_data.chunk_index}) elif fs.HasField("status"): print(f" [📁] Sync Status from {node_id}: {fs.status.message}") + self.registry.emit(node_id, "sync_status", {"message": fs.status.message, "code": fs.status.code}) if fs.status.code == agent_pb2.SyncStatus.RECONCILE_REQUIRED: - print(f" [📁🔄] Server triggering recovery sync for {len(fs.status.reconcile_paths)} files to {node_id}") for path in fs.status.reconcile_paths: self.assistant.push_file(node_id, fs.session_id, path) diff --git a/poc-grpc-agent/requirements.txt b/poc-grpc-agent/requirements.txt index 3e04bee..5644c64 100644 --- a/poc-grpc-agent/requirements.txt +++ b/poc-grpc-agent/requirements.txt @@ -3,3 +3,4 @@ PyJWT==2.8.0 playwright==1.42.0 watchdog==4.0.0 +PyYAML==6.0.1 diff --git a/ui/client-app/src/pages/CodingAssistantPage.js b/ui/client-app/src/pages/CodingAssistantPage.js index 03767d3..fc7f0ef 100644 --- a/ui/client-app/src/pages/CodingAssistantPage.js +++ b/ui/client-app/src/pages/CodingAssistantPage.js @@ -38,6 +38,7 @@ const [attachedNodeIds, setAttachedNodeIds] = useState([]); const [workspaceId, setWorkspaceId] = useState(""); const [showConsole, setShowConsole] = useState(false); + const [syncConfig, setSyncConfig] = useState({ source: 'server', path: '' }); const fetchNodeInfo = async () => { if (!sessionId) return; @@ -66,7 +67,7 @@ if (isAttached) { await detachNodeFromSession(sessionId, nodeId); } else { - await attachNodesToSession(sessionId, [nodeId]); + await attachNodesToSession(sessionId, [nodeId], syncConfig); } fetchNodeInfo(); } catch (err) { @@ -125,22 +126,31 @@
Coding Assistant - Mesh: {attachedNodeIds.length} Nodes Active + Mesh: {accessibleNodes.filter(n => n.last_status === 'online' || n.last_status === 'idle').length} Online / {accessibleNodes.length} Total
{/* Nodes Indicator Bar (M3/M6) */} -
- {attachedNodeIds.length === 0 ? ( - No nodes attached +
+ {accessibleNodes.length === 0 ? ( + No nodes found in mesh ) : ( - attachedNodeIds.map(nid => { - const status = sessionNodeStatus[nid]?.status || 'pending'; + accessibleNodes.map(node => { + const isAttached = attachedNodeIds.includes(node.node_id); + const isOnline = node.last_status === 'online' || node.last_status === 'idle' || node.last_status === 'busy'; + return ( -
-
+
setShowNodeSelector(true)}> +
+
+ + {node.display_name} + +
- {nid}: {status.toUpperCase()} + {isAttached ? 'Attached to Session' : 'Click to Attach'} | {node.last_status.toUpperCase()}
) @@ -248,10 +258,41 @@
-

+

Select agent nodes to attach to this session. Attached nodes share the workspace {workspaceId}.

+
+

Initialization Strategy

+
+ + + +
+ + {syncConfig.source === 'node_local' && ( +
+ + setSyncConfig({ ...syncConfig, path: e.target.value })} + /> +
+ )} +
+
{accessibleNodes.length === 0 && No nodes available for your account.} {accessibleNodes.map(node => { diff --git a/ui/client-app/src/pages/NodesPage.js b/ui/client-app/src/pages/NodesPage.js index adb4762..9ca848c 100644 --- a/ui/client-app/src/pages/NodesPage.js +++ b/ui/client-app/src/pages/NodesPage.js @@ -69,7 +69,7 @@ }; return () => ws.close(); - }, [userId]); // Wait, I don't have userId here, I'll use user.id + }, [user?.id]); const handleCreateNode = async (e) => { e.preventDefault(); @@ -241,7 +241,7 @@
Tasks Running
-
{live?.stats?.active_tasks || 0}
+
{live?.stats?.active_worker_count || 0}
Uptime Score
@@ -253,13 +253,13 @@
CPU
-
+
MEM
-
+
diff --git a/ui/client-app/src/services/apiService.js b/ui/client-app/src/services/apiService.js index 98a66be..ea05042 100644 --- a/ui/client-app/src/services/apiService.js +++ b/ui/client-app/src/services/apiService.js @@ -883,7 +883,7 @@ /** * [USER] Attach more nodes to an active session. */ -export const attachNodesToSession = async (sessionId, nodeIds) => { +export const attachNodesToSession = async (sessionId, nodeIds, config = null) => { const userId = getUserId(); const response = await fetch(`${API_BASE_URL}/sessions/${sessionId}/nodes`, { method: "POST", @@ -891,7 +891,7 @@ "Content-Type": "application/json", "X-User-ID": userId, }, - body: JSON.stringify({ node_ids: nodeIds }), + body: JSON.stringify({ node_ids: nodeIds, config }), }); if (!response.ok) throw new Error("Failed to attach nodes to session"); return await response.json();