diff --git a/.agent/workflows/deploy_to_production.md b/.agent/workflows/deploy_to_production.md index 7926209..a45e955 100644 --- a/.agent/workflows/deploy_to_production.md +++ b/.agent/workflows/deploy_to_production.md @@ -7,7 +7,7 @@ **MAIN KNOWLEDGE POINT:** Agents and Users should refer to `.agent/workflows/deployment_reference.md` to understand the full proxy and architecture layout prior to running production debugging. -1. **Automated Secret Fetching**: The `deploy_remote.sh` script will automatically pull the production password from the GitBucket Secret Vault if the `GITBUCKET_TOKEN` is available in `/app/.env.gitbucket`. +1. **Automated Secret Fetching**: The `deploy_prod.sh` script will automatically pull the production password from the GitBucket Secret Vault if the `GITBUCKET_TOKEN` is available in `/app/.env.gitbucket`. 2. **Sync**: Sync local codebase to `/tmp/cortex-hub/` on the server. 3. **Proto Regeneration**: If `ai-hub/app/protos/agent.proto` has changed, the agent must regenerate the Python stubs: ```bash @@ -15,17 +15,32 @@ # And for the agent node cd /app/agent-node && python3 -m grpc_tools.protoc -Iprotos --python_out=. --grpc_python_out=. protos/agent.proto ``` -4. **Migrate & Rebuild**: Overwrite production files and run `bash deploy_local.sh` on the server. -5. **Post-Deployment Health Check**: Run the `/frontend_tester` check to verify the UI and AI engine are still functional. +4. **Migrate & Rebuild**: Overwrite production files and run `bash local_rebuild.sh` on the server. +5. **Post-Deployment Health Check**: Perform a backend connectivity check (Python Trick). Only use `/frontend_tester` as a last resort if UI behavior is suspect. ### Automated Command ```bash # This script handles authentication, syncing, and remote rebuilding. -bash /app/deploy_remote.sh +bash /app/deploy_prod.sh ``` ### Post-Deployment (MANDATORY) -After the script completes, the agent MUST run the `frontend_tester` workflow: +After the script completes, the agent MUST run a basic connectivity check directly inside the production container: +```bash +# SSH into the host and run (you may need to provide the SSH pass or be logged in) +docker exec ai_hub_service python3 -c " +import requests +# Ensure you use a valid user_id +headers = {'X-User-ID': '9a333ccd-9c3f-432f-a030-7b1e1284a436'} +r = requests.get('http://localhost:8000/api/v1/nodes/test-prod-node/fs/ls?path=.', headers=headers) +print(f'Status: {r.status_code}') +print(r.text) +" +``` +**Expected Outcome**: A `200 OK` status and a JSON body. + +### Visual Verification (Worst Case) +If the backend check passes but UI behaviors are suspect or need deeper frontend validation, use the `/frontend_tester` workflow: ```bash /frontend_tester ``` diff --git a/agent-node/agent_node/node.py b/agent-node/agent_node/node.py index 8ef16a0..e7a2e74 100644 --- a/agent-node/agent_node/node.py +++ b/agent-node/agent_node/node.py @@ -215,6 +215,10 @@ def _handle_file_sync(self, fs): """Processes inbound file synchronization messages from the Orchestrator.""" sid = fs.session_id + # LOGGING + type_str = fs.WhichOneof('payload') + print(f" [📁] Sync MSG: {type_str} | Session: {sid}") + if fs.HasField("manifest"): needs_update = self.sync_mgr.handle_manifest(sid, fs.manifest) if needs_update: @@ -249,9 +253,11 @@ )) elif fs.HasField("control"): ctrl = fs.control + print(f" [📁] Control Action: {ctrl.action} (Path: {ctrl.path})") if ctrl.action == agent_pb2.SyncControl.START_WATCHING: # Path relative to sync dir or absolute watch_path = ctrl.path if os.path.isabs(ctrl.path) else os.path.join(self.sync_mgr.get_session_dir(sid), ctrl.path) + print(f" [📁👁️] Starting Watcher on: {watch_path}") self.watcher.start_watching(sid, watch_path) elif ctrl.action == agent_pb2.SyncControl.STOP_WATCHING: self.watcher.stop_watching(sid) @@ -284,27 +290,45 @@ print(f" [📁🗑️] Delete Fragment: {ctrl.path}") self._handle_fs_delete(sid, ctrl.path, task_id=fs.task_id) + def _get_base_dir(self, session_id, create=False): + """Helper to resolve the effective root for a session (Watcher > SyncDir).""" + if session_id == "__fs_explorer__": + root = os.getenv("CORTEX_FS_ROOT", "/") + print(f" [📁] Explorer Root: {root}") + return root + + # Priority 1: If we have an active watcher, use its root (e.g. Seed from Local) + watched = self.watcher.get_watch_path(session_id) + if watched: + print(f" [📁] Using Watched Path as Base: {watched}") + return watched + + # Priority 2: Standard session-scoped sync directory + fallback = self.sync_mgr.get_session_dir(session_id, create=create) + print(f" [📁] Falling back to SyncDir: {fallback}") + return fallback + def _push_full_manifest(self, session_id, rel_path=".", task_id="", shallow=False): """Pushes the current local manifest back to the server.""" print(f" [📁📤] Pushing {'Shallow' if shallow else 'Full'} Manifest for {session_id}") - # M6: If __fs_explorer__, we are browsing the root filesystem (terminal-like), otherwise session-scoped - if session_id == "__fs_explorer__": - base_dir = os.getenv("CORTEX_FS_ROOT", "/") - else: - base_dir = self.sync_mgr.get_session_dir(session_id) + base_dir = self._get_base_dir(session_id, create=True) - watch_path = rel_path if os.path.isabs(rel_path) else os.path.normpath(os.path.join(base_dir, rel_path)) + watch_path = os.path.normpath(os.path.join(base_dir, rel_path)) if not os.path.exists(watch_path): - self.task_queue.put(agent_pb2.ClientTaskMessage( - file_sync=agent_pb2.FileSyncMessage( - session_id=session_id, - task_id=task_id, - status=agent_pb2.SyncStatus(code=agent_pb2.SyncStatus.ERROR, message=f"Path {rel_path} not found") - ) - )) - return + # If the specific sub-path doesn't exist, try to create it if it's within the session dir + if session_id != "__fs_explorer__": + os.makedirs(watch_path, exist_ok=True) + else: + self.task_queue.put(agent_pb2.ClientTaskMessage( + file_sync=agent_pb2.FileSyncMessage( + session_id=session_id, + task_id=task_id, + status=agent_pb2.SyncStatus(code=agent_pb2.SyncStatus.ERROR, message=f"Path {rel_path} not found") + ) + )) + return files = [] try: @@ -364,14 +388,12 @@ def _handle_fs_write(self, session_id, rel_path, content, is_dir, task_id=""): """Modular FS Write/Create.""" try: - if session_id == "__fs_explorer__": - base_dir = os.getenv("CORTEX_FS_ROOT", "/") - else: - base_dir = self.sync_mgr.get_session_dir(session_id) - + base_dir = self._get_base_dir(session_id, create=True) target_path = os.path.normpath(os.path.join(base_dir, rel_path)) + print(f" [📁💾] target_path: {target_path} (base_dir: {base_dir})") + if not target_path.startswith(base_dir): - raise Exception("Path traversal attempt blocked") + raise Exception(f"Path traversal attempt blocked: {target_path} is outside {base_dir}") if is_dir: os.makedirs(target_path, exist_ok=True) @@ -402,10 +424,7 @@ def _handle_fs_delete(self, session_id, rel_path, task_id=""): """Modular FS Delete.""" try: - if session_id == "__fs_explorer__": - base_dir = os.getenv("CORTEX_FS_ROOT", "/") - else: - base_dir = self.sync_mgr.get_session_dir(session_id) + base_dir = self._get_base_dir(session_id) target_path = os.path.normpath(os.path.join(base_dir, rel_path)) if not target_path.startswith(base_dir): @@ -441,13 +460,7 @@ def _push_file(self, session_id, rel_path, task_id=""): """Pushes a specific file from node to server.""" - if session_id == "__fs_explorer__": - watch_path = os.getenv("CORTEX_FS_ROOT", "/") - else: - watch_path = self.watcher.get_watch_path(session_id) - if not watch_path: - # Fallback to sync dir if watcher not started - watch_path = self.sync_mgr.get_session_dir(session_id) + watch_path = self._get_base_dir(session_id, create=False) abs_path = os.path.normpath(os.path.join(watch_path, rel_path)) if not abs_path.startswith(watch_path): diff --git a/agent-node/agent_node/utils/network.py b/agent-node/agent_node/utils/network.py index 3ef8bda..04b97c3 100644 --- a/agent-node/agent_node/utils/network.py +++ b/agent-node/agent_node/utils/network.py @@ -10,7 +10,9 @@ ('grpc.keepalive_time_ms', 30000), # Send keepalive ping every 30s ('grpc.keepalive_timeout_ms', 10000), # Wait 10s for pong ('grpc.keepalive_permit_without_calls', True), - ('grpc.http2.max_pings_without_data', 0) # Allow infinite pings + ('grpc.http2.max_pings_without_data', 0), # Allow infinite pings + ('grpc.max_receive_message_length', 128 * 1024 * 1024), + ('grpc.max_send_message_length', 128 * 1024 * 1024), ] if not TLS_ENABLED: diff --git a/ai-hub/app/api/routes/nodes.py b/ai-hub/app/api/routes/nodes.py index 371207f..2558af5 100644 --- a/ai-hub/app/api/routes/nodes.py +++ b/ai-hub/app/api/routes/nodes.py @@ -21,6 +21,7 @@ WS /nodes/stream/all?user_id=... — All-nodes global bus (multi-pane UI) """ import asyncio +import os import json import queue import uuid @@ -665,7 +666,7 @@ raise HTTPException(status_code=500, detail=str(e)) @router.post("/{node_id}/fs/touch", summary="Create File or Directory") - def fs_touch(node_id: str, req: schemas.FileWriteRequest, session_id: str = "__fs_explorer__"): + def fs_touch(node_id: str, req: schemas.FileWriteRequest): """ Create a new file or directory on the node. """ @@ -676,7 +677,7 @@ req.path, req.content.encode('utf-8'), req.is_dir, - session_id=session_id + session_id=req.session_id ) if not res: raise HTTPException(status_code=500, detail="Node returned an empty response.") @@ -689,23 +690,19 @@ logger.error(f"[FS] Touch error: {e}") raise HTTPException(status_code=500, detail=str(e)) - @router.delete("/{node_id}/fs/rm", summary="Delete File/Directory") - def fs_rm(node_id: str, path: str, session_id: str = "__fs_explorer__"): + @router.post("/{node_id}/fs/rm", summary="Delete File/Directory") + def fs_rm(node_id: str, req: schemas.FileDeleteRequest): """ Delete a file or directory from a remote node. """ try: orchestrator = services.orchestrator - res = orchestrator.assistant.rm(node_id, path, session_id=session_id) + res = orchestrator.assistant.rm(node_id, req.path, session_id=req.session_id) if not res: raise HTTPException(status_code=500, detail="Node returned an empty response.") - if isinstance(res, dict) and "error" in res: - raise HTTPException(status_code=500, detail=res["error"]) - return res # Expecting {"success": bool, "message": str} - except AttributeError: - raise HTTPException(status_code=500, detail="Orchestrator unavailable.") + return res except Exception as e: - logger.error(f"[FS] Rm error: {e}") + logger.error(f"[FS] Delete error: {e}") raise HTTPException(status_code=500, detail=str(e)) return router diff --git a/ai-hub/app/api/routes/sessions.py b/ai-hub/app/api/routes/sessions.py index 5855fb5..bc007f5 100644 --- a/ai-hub/app/api/routes/sessions.py +++ b/ai-hub/app/api/routes/sessions.py @@ -336,35 +336,47 @@ db.refresh(session) # Trigger actual workspace sync commands via gRPC - # We need access to the orchestrator instance from app.state - from fastapi import Request - # Note: In a real app we'd use a cleaner injection, but here we'll grab from Request if available - # or globally if it's a singleton. In this project, it's stored in app.state. - orchestrator = getattr(db, "_request_app", None) # This depends on how GetDB is implemented - # Better: just use the registry to get the assistant if possible, - # but the assistant lives in the Orchestrator. - - # Let's try to get the orchestrator from the registry if we can't get it from the app. - # Actually, let's just use a global reference or pass it in. - # For this implementation, I'll use a safer approach: - try: - from app.main import app - assistant = app.state.orchestrator.assistant - - config = request.config or schemas.NodeWorkspaceConfig(source="empty") - - for nid in new_nodes: - if config.source == "server": - # Server -> Node: Push everything from workspace - assistant.push_workspace(nid, session.sync_workspace_id) - elif config.source == "node_local": - # Node -> Server: Request manifest to start syncing from Node - assistant.request_manifest(nid, session.sync_workspace_id, path=config.path or ".") - # Also tell it to start watching - assistant.control_sync(nid, session.sync_workspace_id, action="START", path=config.path or ".") - - except Exception as e: - print(f"[⚠️] Failed to trigger session node sync: {e}") + # We use the injected orchestrator service from the container + orchestrator = getattr(services, "orchestrator", None) + if not orchestrator: + print("[⚠️] Orchestrator not found in ServiceContainer; cannot trigger sync.") + else: + try: + assistant = orchestrator.assistant + config = request.config or schemas.NodeWorkspaceConfig(source="empty") + + print(f"[📁] Triggering sync for session {session_id} (strategy: {config.source})") + session.sync_config = config.model_dump() + db.commit() + + # M3: Loop through request.node_ids instead of just new_nodes + # so the 'Initiate Sync' button can re-trigger sync for existing nodes + for nid in request.node_ids: + if config.source == "server": + # Server -> Node: Push everything from workspace + print(f" [📁📤] Pushing workspace {session.sync_workspace_id} to {nid}") + assistant.push_workspace(nid, session.sync_workspace_id) + assistant.control_sync(nid, session.sync_workspace_id, action="LOCK") + elif config.source == "node_local": + # Only the designated source node pulls from local disk + if config.source_node_id == nid: + print(f" [📁📥] Seeding from local disk on {nid}: {config.path}") + assistant.request_manifest(nid, session.sync_workspace_id, path=config.path or ".") + assistant.control_sync(nid, session.sync_workspace_id, action="START", path=config.path or ".") + assistant.control_sync(nid, session.sync_workspace_id, action="UNLOCK") + else: + # Other nodes in a 'node_local' initialization should start in receiver mode + print(f" [📁👀] Starting receiver mode on {nid}") + assistant.control_sync(nid, session.sync_workspace_id, action="START") + assistant.control_sync(nid, session.sync_workspace_id, action="LOCK") + + # Apply manual Read-Only Override if specifically requested (for flexibility) + if config.read_only_node_ids and nid in config.read_only_node_ids: + print(f" [🔒] Locking node {nid} (Manual Read-Only Overlay)") + assistant.control_sync(nid, session.sync_workspace_id, action="LOCK") + + except Exception as e: + print(f"[⚠️] Failed to trigger session node sync: {e}") return schemas.SessionNodeStatusResponse( session_id=session_id, @@ -376,7 +388,8 @@ last_sync=sync_status.get(nid, {}).get("last_sync"), ) for nid in session.attached_node_ids - ] + ], + sync_config=session.sync_config or {} ) @router.delete("/{session_id}/nodes/{node_id}", summary="Detach Node from Session") @@ -443,6 +456,7 @@ session_id=session_id, sync_workspace_id=session.sync_workspace_id, nodes=entries, + sync_config=session.sync_config or {}, ) return router \ No newline at end of file diff --git a/ai-hub/app/api/schemas.py b/ai-hub/app/api/schemas.py index 18e4b5b..7d8b7af 100644 --- a/ai-hub/app/api/schemas.py +++ b/ai-hub/app/api/schemas.py @@ -154,6 +154,7 @@ sync_workspace_id: Optional[str] = None attached_node_ids: Optional[List[str]] = Field(default_factory=list) node_sync_status: Optional[dict] = Field(default_factory=dict) + sync_config: Optional[dict] = Field(default_factory=dict) model_config = ConfigDict(from_attributes=True) # --- M3: Session Node Attachment Schemas --- @@ -162,6 +163,8 @@ """How a node should seed its workspace for a session.""" source: Literal["empty", "server", "node_local"] = "empty" path: Optional[str] = None # root path on node when source='node_local' + source_node_id: Optional[str] = None # which node to pull from if source='node_local' + read_only_node_ids: List[str] = Field(default_factory=list) # M3: Nodes that should not push changes back class NodeAttachRequest(BaseModel): """Attach one or more nodes to a session.""" @@ -180,6 +183,7 @@ session_id: int sync_workspace_id: Optional[str] = None nodes: List[NodeSyncStatusEntry] = [] + sync_config: Optional[dict] = Field(default_factory=dict) # --- M4: Node Config YAML --- @@ -409,9 +413,11 @@ path: str content: str = "" # Plain text content is_dir: bool = False + session_id: Optional[str] = "__fs_explorer__" class FileDeleteRequest(BaseModel): path: str + session_id: Optional[str] = "__fs_explorer__" # Keep backward-compat alias AgentNodeSummary = AgentNodeUserView diff --git a/ai-hub/app/core/grpc/services/assistant.py b/ai-hub/app/core/grpc/services/assistant.py index c9f6d01..bc6312c 100644 --- a/ai-hub/app/core/grpc/services/assistant.py +++ b/ai-hub/app/core/grpc/services/assistant.py @@ -94,11 +94,13 @@ def broadcast_file_chunk(self, session_id: str, sender_node_id: str, file_payload): """Broadcasts a file chunk received from one node to all other nodes in the mesh.""" - print(f" [📁📢] Broadcasting {file_payload.path} from {sender_node_id} to other nodes...") - for node_id in self.registry.list_nodes(): - if node_id == sender_node_id: - continue - + session_members = self.memberships.get(session_id, []) + destinations = [n for n in session_members if n != sender_node_id] + + if destinations: + print(f" [📁📢] Broadcasting {file_payload.path} from {sender_node_id} to: {', '.join(destinations)}") + + for node_id in destinations: node = self.registry.get_node(node_id) if not node: continue @@ -142,6 +144,12 @@ "UNLOCK": agent_pb2.SyncControl.UNLOCK } proto_action = action_map.get(action, agent_pb2.SyncControl.START_WATCHING) + + # Track for recovery & broadcast + if session_id not in self.memberships: + self.memberships[session_id] = [] + if node_id not in self.memberships[session_id]: + self.memberships[session_id].append(node_id) node.queue.put(agent_pb2.ServerTaskMessage( file_sync=agent_pb2.FileSyncMessage( @@ -243,7 +251,7 @@ self.journal.pop(tid) # M6: Update mirror locally on hub so ls sees it as synced (Only for real sessions) - if self.mirror and res.get("status") == "OK" and session_id != "__fs_explorer__": + if self.mirror and res.get("success") and session_id != "__fs_explorer__": workspace_mirror = self.mirror.get_workspace_path(session_id) dest = os.path.join(workspace_mirror, path) if is_dir: @@ -278,7 +286,7 @@ self.journal.pop(tid) # M6: remove from mirror if successful (Only for real sessions) - if self.mirror and res.get("status") == "OK" and session_id != "__fs_explorer__": + if self.mirror and res.get("success") and session_id != "__fs_explorer__": import shutil dest = os.path.join(self.mirror.get_workspace_path(session_id), path) if os.path.isdir(dest): shutil.rmtree(dest) diff --git a/ai-hub/app/core/grpc/services/grpc_server.py b/ai-hub/app/core/grpc/services/grpc_server.py index 0f92fd6..1b1b0d4 100644 --- a/ai-hub/app/core/grpc/services/grpc_server.py +++ b/ai-hub/app/core/grpc/services/grpc_server.py @@ -344,7 +344,15 @@ from concurrent import futures from app.protos import agent_pb2_grpc - server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) + # M6: Increase message size limits to 128MB (prevents manifest sync drops) + options = [ + ('grpc.max_receive_message_length', 128 * 1024 * 1024), + ('grpc.max_send_message_length', 128 * 1024 * 1024), + ('grpc.keepalive_time_ms', 30000), # Send keepalive ping every 30s + ('grpc.keepalive_timeout_ms', 10000), # Wait 10s for pong + ('grpc.keepalive_permit_without_calls', True), + ] + server = grpc.server(futures.ThreadPoolExecutor(max_workers=10), options=options) orchestrator = AgentOrchestrator(registry) agent_pb2_grpc.add_AgentOrchestratorServicer_to_server(orchestrator, server) diff --git a/ai-hub/app/core/services/node_registry.py b/ai-hub/app/core/services/node_registry.py index 86537bf..fdbb790 100644 --- a/ai-hub/app/core/services/node_registry.py +++ b/ai-hub/app/core/services/node_registry.py @@ -122,6 +122,7 @@ record.capabilities = metadata.get("caps", {}) record.last_status = "online" record.last_seen_at = datetime.utcnow() + db.commit() else: # Node not pre-registered by admin — log warning but don't crash print(f"[NodeRegistry] WARNING: Node '{node_id}' connected but has no DB record. Admin must register it first.") @@ -138,6 +139,7 @@ if record: record.last_status = "offline" record.last_seen_at = datetime.utcnow() + db.commit() except Exception as e: print(f"[NodeRegistry] DB mark-offline failed for {node_id}: {e}") diff --git a/ai-hub/app/db/migrate.py b/ai-hub/app/db/migrate.py index e098f0a..1f6e174 100644 --- a/ai-hub/app/db/migrate.py +++ b/ai-hub/app/db/migrate.py @@ -48,6 +48,7 @@ ("sync_workspace_id", "TEXT"), ("attached_node_ids", "TEXT"), ("node_sync_status", "TEXT"), + ("sync_config", "TEXT"), ] for col_name, col_type in session_required_columns: if col_name not in session_columns: diff --git a/ai-hub/app/db/models.py b/ai-hub/app/db/models.py index 294d38b..140ff74 100644 --- a/ai-hub/app/db/models.py +++ b/ai-hub/app/db/models.py @@ -104,6 +104,8 @@ attached_node_ids = Column(JSON, default=[], nullable=True) # Per-node sync status snapshot: {"node-alpha": {"status": "synced", "last_sync": "..."}} node_sync_status = Column(JSON, default={}, nullable=True) + # M6: Store the synchronization configuration for the session + sync_config = Column(JSON, default={}, nullable=True) messages = relationship("Message", back_populates="session", cascade="all, delete-orphan") user = relationship("User", back_populates="sessions") diff --git a/docs/features/coding_assistant.md b/docs/features/coding_assistant.md index 0cd80af..8a9b4ec 100644 --- a/docs/features/coding_assistant.md +++ b/docs/features/coding_assistant.md @@ -15,11 +15,10 @@ ### B. Workspace Header - **Title Block**: Displays the "Coding Assistant" title and an at-a-glance mesh summary (e.g., `Mesh: 2 Online / 5 Total`). - **Node Attachment Bar (M3/M6)**: - - Horizontally scrollable list of nodes accessible to the user. - - Color-coded badges with pulsing green status indicators for live nodes. - - Hovering over a node displays its current status (`IDLE`, `BUSY`) and attachment state (`Attached to Session`). + - **Compact Display**: Instead of listing all accessible nodes, the UI displays a compact visualization of currently *attached* nodes (e.g., overlapping indicators or a single summary pill). + - **File Sync Indicator**: If nodes are attached and a sync strategy is active (i.e., not 'empty'), an icon indicates that file sync is enabled. Hovering over this indicator reveals a tooltip with the exact mounting source (e.g., 'Server Sync' or 'Local Node Path: /workspace'). - **Quick Action Icons**: - - **Manage Mesh Nodes**: Opens the detailed Node Selector modal. + - **Manage Mesh Nodes**: Opens the detailed Node Selector modal to attach/detach compute resources. - **Toggle Execution Console**: Reveals/hides the bottom `MultiNodeConsole` (only visible when nodes are attached). - **Session Engine (Gear)**: Opens LLM provider configurations. - **Resource Monitors**: diff --git a/local_rebuild.sh b/local_rebuild.sh index d5dca87..a394718 100755 --- a/local_rebuild.sh +++ b/local_rebuild.sh @@ -41,18 +41,18 @@ # echo "📥 Pulling latest Docker images..." # sudo $DOCKER_CMD pull -# Build new images and start the services. The `--build` flag ensures -# the images are re-built from their respective Dockerfiles. -# The `--remove-orphans` flag ensures old service containers are cleaned up. -echo "🏗️ Building and starting new containers (Hub & Frontend)..." -sudo $DOCKER_CMD up -d --build --remove-orphans +# Build new images and start the services. +echo "🏗️ Building and starting new containers..." +COMPOSE_FILES="-f docker-compose.yml" if [ -f "docker-compose.test-nodes.yml" ]; then - echo "🏗️ Building and starting Internal Test Nodes..." - # IMPORTANT: DO NOT use --remove-orphans here, as it will kill the Hub/Frontend - sudo $DOCKER_CMD -f docker-compose.test-nodes.yml up -d --build + echo "🔗 Including Internal Test Nodes in deployment..." + COMPOSE_FILES="$COMPOSE_FILES -f docker-compose.test-nodes.yml" fi +# We use --remove-orphans only if we are SURE we want to clean up everything not in these files. +sudo $DOCKER_CMD $COMPOSE_FILES up -d --build --remove-orphans + echo "✅ Containers started! Checking status..." sudo docker ps --filter "name=ai_" sudo docker ps --filter "name=cortex-" diff --git a/ui/client-app/src/components/ChatArea.js b/ui/client-app/src/components/ChatArea.js index 3080fea..73396c3 100644 --- a/ui/client-app/src/components/ChatArea.js +++ b/ui/client-app/src/components/ChatArea.js @@ -2,14 +2,22 @@ import ChatWindow from "./ChatWindow"; import './ChatArea.css'; -const ChatArea = ({ chatHistory, onSendMessage, isProcessing, featureName = "default" }) => { +const ChatArea = ({ + chatHistory, + onSendMessage, + isProcessing, + featureName = "default", + workspaceId = null, + syncConfig = null, + isSourceDisconnected = false +}) => { const [inputValue, setInputValue] = useState(""); const inputRef = useRef(null); const chatScrollRef = useRef(null); const handleSendMessage = (e) => { e.preventDefault(); - if (inputValue.trim() !== "") { + if (inputValue.trim() !== "" && !isSourceDisconnected) { onSendMessage(inputValue); setInputValue(""); } @@ -38,20 +46,72 @@ {/* Sticky Input */}
+ {featureName === "coding_assistant" && workspaceId && ( +
+
+
+
+
+
+
+ + {isSourceDisconnected ? 'Source Node Disconnected' : 'Workspace Sync Active'} + +
+ + {workspaceId} + +
+ + {syncConfig && ( +
+
+ Source: + {syncConfig.source === 'node_local' ? 'Node Local' : syncConfig.source === 'server' ? 'Hub' : 'Empty'} +
+ {syncConfig.source === 'node_local' && ( + <> +
+
+ ⚠️ SOURCE NODE: + {syncConfig.source_node_id} +
+
+
+ Path: + {syncConfig.path} +
+ + )} +
+ )} +
+ )}
{attachedNodeIds.length > 0 && ( - +
+ + {workspaceId && ( + + )} +
)}
@@ -246,6 +413,16 @@ onSendMessage={handleSendChat} isProcessing={isProcessing} featureName="coding_assistant" + workspaceId={workspaceId} + syncConfig={activeSyncConfig} + isSourceDisconnected={ + activeSyncConfig?.source === 'node_local' && + activeSyncConfig?.source_node_id && + (() => { + const sourceNode = accessibleNodes.find(n => n.node_id === activeSyncConfig.source_node_id); + return !sourceNode || (sourceNode.last_status !== 'online' && sourceNode.last_status !== 'idle'); + })() + } /> @@ -260,6 +437,18 @@ + {/* Workspace File Explorer (M6) */} + {showFileExplorer && workspaceId && attachedNodeIds.length > 0 && ( +
+ +
+ )} + {/* Node Selector Modal (M3) */} {showNodeSelector && (
@@ -280,31 +469,96 @@

Initialization Strategy

-
-
- +
+ {isAttached && ( + + )} + +
); })} -
+
+ diff --git a/ui/client-app/src/services/apiService.js b/ui/client-app/src/services/apiService.js index 41e1d17..cee4c76 100644 --- a/ui/client-app/src/services/apiService.js +++ b/ui/client-app/src/services/apiService.js @@ -870,9 +870,10 @@ /** * [FS] List directory contents on an agent node. */ -export const nodeFsList = async (nodeId, path = ".") => { +export const nodeFsList = async (nodeId, path = ".", sessionId = null) => { const userId = getUserId(); const params = new URLSearchParams({ path }); + if (sessionId) params.append("session_id", sessionId); const response = await fetch(`${NODES_BASE_ENDPOINT}/${nodeId}/fs/ls?${params.toString()}`, { method: "GET", headers: { "X-User-ID": userId }, @@ -891,9 +892,10 @@ /** * [FS] Read file content from an agent node. */ -export const nodeFsCat = async (nodeId, path) => { +export const nodeFsCat = async (nodeId, path, sessionId = null) => { const userId = getUserId(); const params = new URLSearchParams({ path }); + if (sessionId) params.append("session_id", sessionId); const response = await fetch(`${NODES_BASE_ENDPOINT}/${nodeId}/fs/cat?${params.toString()}`, { method: "GET", headers: { "X-User-ID": userId }, @@ -905,7 +907,7 @@ /** * [FS] Create or update a file or directory on an agent node. */ -export const nodeFsTouch = async (nodeId, path, content = "", isDir = false) => { +export const nodeFsTouch = async (nodeId, path, content = "", isDir = false, sessionId = null) => { const userId = getUserId(); const response = await fetch(`${NODES_BASE_ENDPOINT}/${nodeId}/fs/touch`, { method: "POST", @@ -913,7 +915,7 @@ "Content-Type": "application/json", "X-User-ID": userId, }, - body: JSON.stringify({ path, content, is_dir: isDir }), + body: JSON.stringify({ path, content, is_dir: isDir, session_id: sessionId }), }); if (!response.ok) throw new Error("Failed to create file/directory"); return await response.json(); @@ -922,12 +924,15 @@ /** * [FS] Delete a file or directory from an agent node. */ -export const nodeFsRm = async (nodeId, path) => { +export const nodeFsRm = async (nodeId, path, sessionId = null) => { const userId = getUserId(); - const params = new URLSearchParams({ path }); - const response = await fetch(`${NODES_BASE_ENDPOINT}/${nodeId}/fs/rm?${params.toString()}`, { - method: "DELETE", - headers: { "X-User-ID": userId }, + const response = await fetch(`${NODES_BASE_ENDPOINT}/${nodeId}/fs/rm`, { + method: "POST", + headers: { + "Content-Type": "application/json", + "X-User-ID": userId, + }, + body: JSON.stringify({ path, session_id: sessionId }), }); if (!response.ok) throw new Error("Failed to delete file/directory"); return await response.json();