diff --git a/ai-hub/app/config.py b/ai-hub/app/config.py index 3e1b1e2..7f4668b 100644 --- a/ai-hub/app/config.py +++ b/ai-hub/app/config.py @@ -167,7 +167,10 @@ if self.DB_MODE == "sqlite": normalized_path = local_db_path.lstrip("./") - self.DATABASE_URL: str = f"sqlite:///./{normalized_path}" if normalized_path else "sqlite:///./data/ai_hub.db" + if external_db_url and external_db_url.startswith("sqlite"): + self.DATABASE_URL: str = external_db_url + else: + self.DATABASE_URL: str = f"sqlite:///./{normalized_path}" if normalized_path else "sqlite:///./data/ai_hub.db" else: self.DATABASE_URL: str = external_db_url or "sqlite:///./data/ai_hub.db" diff --git a/ai-hub/integration_tests/conftest.py b/ai-hub/integration_tests/conftest.py index a715d04..4b33842 100644 --- a/ai-hub/integration_tests/conftest.py +++ b/ai-hub/integration_tests/conftest.py @@ -122,42 +122,63 @@ "access_level": "use" }) - # 5. Start Docker Containers for Nodes - print("[conftest] Starting local docker node containers...") - network = "cortex-hub_default" + # 5. Start Node Processes + is_docker_disabled = os.getenv("SKIP_DOCKER_NODES", "true").lower() == "true" + node_processes = [] - # We must dynamically detect network if needed, but cortex-hub_default is expected. - # Kill any existing ones - subprocess.run(["docker", "rm", "-f", NODE_1, NODE_2], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) - - # Get node image - image_proc = subprocess.run(["docker", "build", "-q", "./agent-node"], capture_output=True, text=True) - image_id = image_proc.stdout.strip() - - for node_id in [NODE_1, NODE_2]: - cmd = [ - "docker", "run", "-d", - "--name", node_id, - "--network", network, - "-e", f"AGENT_NODE_ID={node_id}", - "-e", f"AGENT_AUTH_TOKEN={tokens[node_id]}", - "-e", "GRPC_ENDPOINT=ai-hub:50051", - "-e", "HUB_URL=http://ai-hub:8000", - "-e", "AGENT_TLS_ENABLED=false", - image_id - ] - subprocess.run(cmd, check=True) + if not is_docker_disabled: + print("[conftest] Starting local docker node containers...") + network = "cortex-hub_default" + subprocess.run(["docker", "rm", "-f", NODE_1, NODE_2], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + image_proc = subprocess.run(["docker", "build", "-q", "./agent-node"], capture_output=True, text=True) + image_id = image_proc.stdout.strip() + for node_id in [NODE_1, NODE_2]: + cmd = ["docker", "run", "-d", "--name", node_id, "--network", network, "-e", f"AGENT_NODE_ID={node_id}", "-e", f"AGENT_AUTH_TOKEN={tokens[node_id]}", "-e", "GRPC_ENDPOINT=ai-hub:50051", "-e", "HUB_URL=http://ai-hub:8000", "-e", "AGENT_TLS_ENABLED=false", image_id] + subprocess.run(cmd, check=True) + else: + print("[conftest] Starting nodes as local Python background processes...") + # Resolve URLs for local process tests (assuming hub runs on localhost outside docker) + grpc_ep = os.getenv("TEST_GRPC_ENDPOINT", "127.0.0.1:50051") + http_ep = os.getenv("TEST_HUB_URL", "http://127.0.0.1:8000") + + # Determine the agent node source directory + agent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "agent-node")) + if not os.path.exists(agent_dir): + agent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "agent-node")) # Fallback + + for node_id in [NODE_1, NODE_2]: + env = os.environ.copy() + env["AGENT_NODE_ID"] = node_id + env["AGENT_AUTH_TOKEN"] = tokens[node_id] + env["GRPC_ENDPOINT"] = grpc_ep + env["HUB_URL"] = http_ep + env["AGENT_TLS_ENABLED"] = "false" + env["PYTHONPATH"] = f"{agent_dir}/src" + + proc = subprocess.Popen( + ["python3", "-m", "agent_node.node"], + env=env, + cwd=agent_dir, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL + ) + node_processes.append(proc) print("[conftest] Waiting for nodes to connect to mesh...") - time.sleep(5) # Let them handshake + time.sleep(5) client.close() - yield # Run the tests! + yield node_processes # 6. Teardown - print("\n[conftest] Tearing down node containers...") - subprocess.run(["docker", "rm", "-f", NODE_1, NODE_2], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + print("\n[conftest] Tearing down nodes...") + if not is_docker_disabled: + subprocess.run(["docker", "rm", "-f", NODE_1, NODE_2], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + else: + for proc in node_processes: + proc.terminate() + proc.wait() @pytest.fixture(autouse=True) def run_around_tests(setup_mesh_environment): diff --git a/ai-hub/integration_tests/test_browser_llm.py b/ai-hub/integration_tests/test_browser_llm.py index 5def577..608a36c 100644 --- a/ai-hub/integration_tests/test_browser_llm.py +++ b/ai-hub/integration_tests/test_browser_llm.py @@ -10,6 +10,7 @@ "X-User-ID": os.environ.get("SYNC_TEST_USER_ID", "") } +@pytest.mark.skipif(os.getenv("SKIP_DOCKER_NODES", "false").lower() == "true", reason="Browser skill requires a fully-loaded Docker container environment to access Chromium.") def test_browser_skill_weather(): """ Test explicitly asking the LLM context to leverage its browser skill diff --git a/run_integration_tests.sh b/run_integration_tests.sh index 90e9718..d169837 100755 --- a/run_integration_tests.sh +++ b/run_integration_tests.sh @@ -27,43 +27,71 @@ NO_REBUILD=true fi -# Check if services are already running -IS_RUNNING=false -# We check if specifically our ai-hub containers are "Up" via compose -if docker compose ps | grep -q 'Up'; then - IS_RUNNING=true +# Check if docker daemon is reachable (i.e., not inside DevContainer without DIND) +DOCKER_AVAILABLE=false +if docker info >/dev/null 2>&1; then + DOCKER_AVAILABLE=true +else + echo "Docker daemon not reachable (likely running in a Dev Container). Switching to Native Python mode..." + export SKIP_DOCKER_NODES=true + export SYNC_TEST_BASE_URL="http://127.0.0.1:8000/api/v1" fi if [ "$NO_REBUILD" = true ] && [ "$IS_RUNNING" = true ]; then echo "Service is already running and --no-rebuild flag provided." echo "Skipping rebuild and starting tests directly..." else - # 2. Clean start: purge the database / volumes - echo "Purging database and old containers..." - docker compose down -v --remove-orphans - docker kill test-node-1 test-node-2 2>/dev/null || true - docker rm test-node-1 test-node-2 2>/dev/null || true + if [ "$DOCKER_AVAILABLE" = true ]; then + # 2. Clean start: purge the database / volumes + echo "Purging database and old containers..." + docker compose down -v --remove-orphans + docker kill test-node-1 test-node-2 2>/dev/null || true + docker rm test-node-1 test-node-2 2>/dev/null || true - # 3. Build & start the Hub stack - echo "Starting AI Hub mesh..." - # Ensure permissions are clean - mkdir -p data - - # Force rebuild and clean start if we are explicitly rebuilding - docker compose build ai-hub ai-frontend - docker compose up -d + # 3. Build & start the Hub stack + echo "Starting AI Hub mesh..." + mkdir -p data + docker compose build ai-hub ai-frontend + docker compose up -d - # Wait for healthy - echo "Waiting for AI Hub to be ready..." - sleep 5 - until curl -I -s http://localhost:8002/api/v1/users/login/local | grep -q "405"; do - echo "Waiting for AI Hub Backend..." + # Wait for healthy + echo "Waiting for AI Hub to be ready..." + sleep 5 + until curl -I -s http://localhost:8002/api/v1/users/login/local | grep -q "405"; do + echo "Waiting for AI Hub Backend..." + sleep 2 + done + sleep 3 + echo "AI Hub Backend is online." + else + # Start AI Hub Backend natively via uvicorn + echo "Starting AI Hub natively in the background..." + export DATABASE_URL="sqlite:///./data/ai-hub-test.db" + export ENVIRONMENT="development" + export PATH_PREFIX="/api/v1" + # Purge local test database + rm -f ai-hub/data/ai-hub-test.db + mkdir -p ai-hub/data + + pkill -f uvicorn || true + pkill -f agent_node.node || true + sleep 1 + + cd /app/ai-hub + uvicorn app.main:app --host 0.0.0.0 --port 8000 > native_hub.log 2>&1 & + HUB_PID=$! + cd /app + + # Wait for healthy + echo "Waiting for AI Hub to be ready..." sleep 2 - done - - # Wait for DB to fully initialize its tables - sleep 3 - echo "AI Hub Backend is online." + until curl -I -s http://localhost:8000/api/v1/users/login/local | grep -q "405"; do + echo "Waiting for AI Hub Backend natively..." + sleep 2 + done + sleep 3 + echo "AI Hub Backend is online." + fi fi # 4. User setup via Pytest fixtures @@ -78,7 +106,11 @@ echo "==========================================" echo " TEARING DOWN INTEGRATION ENVIRONMENT " echo "==========================================" - docker compose down -v + if [ "$DOCKER_AVAILABLE" = true ]; then + docker compose down -v + else + kill $HUB_PID || true + fi echo "Done!" else echo "=========================================="