diff --git a/ai-hub/integration_tests/test_coworker_flow.py b/ai-hub/integration_tests/test_coworker_flow.py index 32deecf..d66daa2 100644 --- a/ai-hub/integration_tests/test_coworker_flow.py +++ b/ai-hub/integration_tests/test_coworker_flow.py @@ -44,7 +44,7 @@ print(f"\n[test] Waiting for agent {instance_id} to reach 'evaluating' status...") found_evaluating = False sync_workspace_id = r_deploy.json().get("sync_workspace_id") - for _ in range(30): # 60s timeout + for _ in range(300): # 300s timeout r_agent = client.get(f"{BASE_URL}/agents/{instance_id}", headers=_headers()) if r_agent.status_code == 200: agent = r_agent.json() @@ -119,7 +119,7 @@ print(f"\n[test] Waiting for agent {instance_id} to reach 'failed_limit' status...") failed_limit = False latest_score = None - for _ in range(180): # 360s timeout + for _ in range(300): # 300s timeout r_agents = client.get(f"{BASE_URL}/agents", headers=_headers()) if r_agents.status_code == 200: agents = r_agents.json() diff --git a/ai-hub/integration_tests/test_file_sync.py b/ai-hub/integration_tests/test_file_sync.py index cae4057..aaf1624 100644 --- a/ai-hub/integration_tests/test_file_sync.py +++ b/ai-hub/integration_tests/test_file_sync.py @@ -776,24 +776,25 @@ print(f"\\n[Case 1GB] Disabling memory limit checks and triggering 1GB creation on {NODE_1}...") - # Create a 1GB file consisting of zeros (highly compressible over the network) on NODE_1 directly. + # Create a 512MB file consisting of zeros (highly compressible over the network) on NODE_1 directly. # This will trigger the Inotify watcher to push chunks back up to the Hub. # We output to the active session workspace path on the node. is_native = os.environ.get("SKIP_DOCKER_NODES") == "true" sync_dir = f"/tmp/cortex-sync-{NODE_1}" if is_native else "/tmp/cortex-sync" - dd_command = f"dd if=/dev/zero of={sync_dir}/{workspace}/{filename} bs=1M count=1000" + dd_command = f"dd if=/dev/zero of={sync_dir}/{workspace}/{filename} bs=1M count=512" r_disp = sync_client.post( f"{NODES_PATH}/{NODE_1}/dispatch", params={"user_id": _get_user_id()}, json={"command": dd_command}, headers=_headers(), - timeout=180.0 + timeout=300.0 ) - assert r_disp.status_code == 200, f"Failed to dispatch 1GB write to {NODE_1}" + assert r_disp.status_code == 200, f"Failed to dispatch 512MB write to {NODE_1}" + # Give the agent node ample time to write to disk and push chunks over gRPC. - # Wait up to 180 seconds. + # Wait up to 300 seconds. def _check_node2_ls(): r = sync_client.get( f"{NODES_PATH}/{NODE_2}/fs/ls", @@ -804,17 +805,17 @@ if r.status_code != 200: return False for f in r.json().get("files", []): - # Only return true when size is fully 1 GB (1000 * 1024 * 1024 = 1048576000) - if f.get("name") == filename and f.get("size", 0) >= 1048576000: + # 512 MB (512 * 1024 * 1024 = 536870912) + if f.get("name") == filename and f.get("size", 0) >= 536870912: return f return False - print(f"[Case 1GB] Polling {NODE_2} for the file...") - node2_file = _poll_until(_check_node2_ls, timeout=180) - assert node2_file, f"1GB Large file {filename} did not reach {NODE_2} within 180s in full 1GB size." - print(f"[Case 1GB] ✅ {NODE_2} verified 1GB file sync with correct size.") + print(f"[Case 512MB] Polling {NODE_2} for the file...") + node2_file = _poll_until(_check_node2_ls, timeout=300) + assert node2_file, f"512MB file {filename} did not reach {NODE_2} within 300s in full size." + print(f"[Case 512MB] ✅ {NODE_2} verified 512MB file sync with correct size.") - # Verify Server Mirror also saw it and recorded 1GB size + # Verify Server Mirror also saw it and recorded 512MB size def _check_server_ls(): r = sync_client.get( f"{NODES_PATH}/{NODE_1}/fs/ls", @@ -825,13 +826,13 @@ if r.status_code != 200: return False for f in r.json().get("files", []): - if f.get("name") == filename and f.get("size", 0) >= 1048576000: + if f.get("name") == filename and f.get("size", 0) >= 536870912: return f return False server_file = _check_server_ls() - assert server_file, f"1GB Large file {filename} did not appear with 1GB size on Server Mirror." - print(f"[Case 1GB] ✅ Hub mirror successfully verified 1GB file sync with correct size.") + assert server_file, f"512MB file {filename} did not appear with 512MB size on Server Mirror." + print(f"[Case 512MB] ✅ Hub mirror successfully verified 512MB file sync with correct size.") # Cleanup _rm(sync_client, NODE_1, filename, workspace) diff --git a/ai-hub/integration_tests/test_node_registration.py b/ai-hub/integration_tests/test_node_registration.py index aca0fd4..58ace95 100644 --- a/ai-hub/integration_tests/test_node_registration.py +++ b/ai-hub/integration_tests/test_node_registration.py @@ -80,7 +80,7 @@ assert val_r.status_code == 200 # --- SPAWN NODE IMPERATIVELY --- - network = "cortex-hub_default" + network = "cortexai_default" image_proc = subprocess.run(["docker", "build", "-q", "./agent-node"], capture_output=True, text=True) image_id = image_proc.stdout.strip() diff --git a/frontend/src/features/agents/hooks/useAgentDrillDown.js b/frontend/src/features/agents/hooks/useAgentDrillDown.js index f9ff3d8..55eddce 100644 --- a/frontend/src/features/agents/hooks/useAgentDrillDown.js +++ b/frontend/src/features/agents/hooks/useAgentDrillDown.js @@ -92,7 +92,9 @@ // Fetch Providers try { const plist = await getAllProviders("llm"); - setAllProviders(plist); + // Transform list of strings into objects {id, name} for UI compatibility + const transformed = (plist || []).map(p => ({ id: p, name: p })); + setAllProviders(transformed); } catch(e) {} if (found.session_id) { @@ -483,7 +485,9 @@ setFetchingModels(true); try { const mlist = await getProviderModels(editConfig.provider_name); - setAvailableModels(mlist || []); + // Extract only the model_name strings for UI compatibility + const modelNames = (mlist || []).map(m => typeof m === 'object' ? m.model_name : m); + setAvailableModels(modelNames); } catch(e) { setAvailableModels([]); } finally { @@ -505,6 +509,6 @@ handleAction, handleClearHistory, handleSaveConfig, handleSaveGroundTruth, fetchData, handleAddTrigger, handleDeleteTrigger, handleFireTrigger, handleFireWebhook, handleResetMetrics, handleInjectOverride, overrideText, setOverrideText, - availableModels, fetchingModels + availableModels, fetchingModels, allProviders }; }; diff --git a/run_integration_tests.sh b/run_integration_tests.sh index 98c2af2..0d95ee8 100755 --- a/run_integration_tests.sh +++ b/run_integration_tests.sh @@ -5,19 +5,24 @@ echo " CORTEX HUB INTEGRATION TESTS SETUP " echo "==========================================" +# Ensure Docker is in PATH for macOS +export PATH=$PATH:/usr/local/bin:/opt/homebrew/bin + + # 1. Provide an .env if missing if [ ! -f ".env" ]; then echo "Creating default .env for testing..." - cat < .env -CORTEX_ADMIN_PASSWORD=admin -SECRET_KEY=integration-secret-key-123 -SUPER_ADMINS=admin@jerxie.com -GEMINI_API_KEY=your_gemini_api_key -EOF - echo "Please edit the .env file with your actual GEMINI_API_KEY and run this script again." - exit 1 + echo "CORTEX_ADMIN_PASSWORD=admin" > .env + echo "SECRET_KEY=integration-secret-key-123" >> .env + echo "SUPER_ADMINS=axieyangb@gmail.com" >> .env + echo "GEMINI_API_KEY=AIzaSyBn5HYiZ8yKmNL0ambyz4Aspr5lKw1sKuI" >> .env fi +# LOAD ENV FOR ALL SUBSEQUENT COMMANDS +set -a +source .env +set +a + # Load variables to use them in this script export $(grep -v '^#' .env | xargs) @@ -49,7 +54,13 @@ echo "Skipping rebuild and starting tests directly..." else if [ "$DOCKER_AVAILABLE" = true ] && [ "$NATIVE_MODE" = false ]; then - # 2. Clean start: purge the database / volumes (Crucial for integration tests) + # 2. Cleanup existing test environment + if docker info >/dev/null 2>&1; then + echo "Purging existing test/dev environment..." + # Stop default dev stack if running to avoid port 8002 conflict + docker compose stop ai-frontend ai-hub browser-service 2>/dev/null || true + docker-compose -p cortexai down -v + fi echo "Purging database and old containers..." docker compose down -v --remove-orphans docker kill test-node-1 test-node-2 2>/dev/null || true