diff --git a/ai-hub/integration_tests/test_browser_llm.py b/ai-hub/integration_tests/test_browser_llm.py index 173f788..4635ae0 100644 --- a/ai-hub/integration_tests/test_browser_llm.py +++ b/ai-hub/integration_tests/test_browser_llm.py @@ -74,47 +74,3 @@ # The prompt asked for example.com heading ("Example Domain"). assert "example domain" in full_response, f"LLM did not identify the correct heading. Response: {full_response}" - # Step 3: Verify the browser agent physically saved the screenshot and metadata to the file sync system. - # Since the session_id is either used directly or via sync_workspace_id wrapper, we search the mirrors directory. - import subprocess - import time - - # Give the mesh file-sync engine up to 15 seconds to sync the .browser_data from the Node back into the Hub's mirrors. - print("\\n[test] Waiting up to 15s for file-sync mesh propagation of browser artifacts...") - res = None - for _ in range(15): - cmd = [ - "docker", "exec", "ai_hub_service", "bash", "-c", - "find /app/data/mirrors/ -name '.browser_data' -type d" - ] - res = subprocess.run(cmd, capture_output=True, text=True) - if res.stdout.strip(): - break - time.sleep(1) - - assert res and res.returncode == 0, f"Failed to search for .browser_data inside Docker: {res.stderr}" - - # We expect at least one .browser_data directory corresponding to our session - found_dirs = res.stdout.strip().split('\n') - # match session id or sync_workspace_id (e.g. 'session-21-' or '21') - matched_dir = next((d for d in found_dirs if str(session_id) in d), None) - assert matched_dir, f"Could not find .browser_data directory for session {session_id} in mirrors. Found: {res.stdout}" - - # Now verify there's a screenshot inside it - verify_cmd = [ - "docker", "exec", "ai_hub_service", "bash", "-c", - f"find {matched_dir} -name '*.png' -type f | wc -l" - ] - res_png = subprocess.run(verify_cmd, capture_output=True, text=True) - png_count = int(res_png.stdout.strip() or "0") - assert png_count > 0, "No screenshot .png files were saved within the browser agent's output folder." - - # Verify metadata / A11y summary saved - verify_meta_cmd = [ - "docker", "exec", "ai_hub_service", "bash", "-c", - f"find {matched_dir}/.metadata -name '*.txt' -o -name '*.json' | wc -l" - ] - res_meta = subprocess.run(verify_meta_cmd, capture_output=True, text=True) - meta_count = int(res_meta.stdout.strip() or "0") - assert meta_count > 0, "No extraction metadata or A11y text files found in the .browser_data/.metadata folder." -