diff --git a/ReferenceSurfaceGenerator/backend/app/features/__init__.py b/ReferenceSurfaceGenerator/backend/app/features/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/ReferenceSurfaceGenerator/backend/app/features/__init__.py
diff --git a/ReferenceSurfaceGenerator/backend/app/features/dxf_layered_curves/__init__.py b/ReferenceSurfaceGenerator/backend/app/features/dxf_layered_curves/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/ReferenceSurfaceGenerator/backend/app/features/dxf_layered_curves/__init__.py
diff --git a/ReferenceSurfaceGenerator/backend/app/features/dxf_layered_curves/main.py b/ReferenceSurfaceGenerator/backend/app/features/dxf_layered_curves/main.py
new file mode 100644
index 0000000..fe53ed5
--- /dev/null
+++ b/ReferenceSurfaceGenerator/backend/app/features/dxf_layered_curves/main.py
@@ -0,0 +1,59 @@
+import os
+import uuid
+from fastapi import APIRouter, File, UploadFile, Form, Depends
+from ...models import Job, JobStatus
+from .models import JobParams
+from ...job_manager import save_job_metadata
+
+router = APIRouter()
+
+UPLOAD_DIR = "/app/data/uploads"
+OUTPUT_DIR = "/app/data/outputs"
+JOB_QUEUE_DIR = "/app/data/job_queue"
+
+
+def get_job_manager():
+ # This is a placeholder for a more robust job manager
+ # For now, it just provides the necessary directories
+ return {
+ "upload_dir": UPLOAD_DIR,
+ "output_dir": OUTPUT_DIR,
+ "job_queue_dir": JOB_QUEUE_DIR
+ }
+
+@router.post("/upload/")
+async def upload_mesh_file(
+ file: UploadFile = File(...),
+ params: JobParams = Depends(),
+ job_manager: dict = Depends(get_job_manager)
+):
+ """
+ Accepts a file upload and saves it to a temporary location.
+ Creates a new job and returns its ID.
+ """
+ job_id = uuid.uuid4()
+ input_path = os.path.join(job_manager["upload_dir"], f"{job_id}_{file.filename}")
+ output_path = os.path.join(job_manager["output_dir"], f"{job_id}_curves.dxf")
+
+ # Save the uploaded file
+ with open(input_path, "wb") as buffer:
+ buffer.write(await file.read())
+
+ # Create and save the initial job metadata
+ job = Job(
+ id=job_id,
+ feature_id="dxf_layered_curves",
+ filename=file.filename,
+ input_path=input_path,
+ output_path=output_path,
+ params=params.dict(),
+ status=JobStatus.QUEUED,
+ message=f"File ''{file.filename}'' uploaded, job queued."
+ )
+ save_job_metadata(job)
+
+ # Create a trigger file for the worker
+ with open(os.path.join(job_manager["job_queue_dir"], f"{job_id}.trigger"), "w") as f:
+ f.write(str(job_id))
+
+ return {"job_id": str(job.id), "filename": job.filename, "status": job.status.value}
\ No newline at end of file
diff --git a/ReferenceSurfaceGenerator/backend/app/features/dxf_layered_curves/models.py b/ReferenceSurfaceGenerator/backend/app/features/dxf_layered_curves/models.py
new file mode 100644
index 0000000..d4431d3
--- /dev/null
+++ b/ReferenceSurfaceGenerator/backend/app/features/dxf_layered_curves/models.py
@@ -0,0 +1,28 @@
+from pydantic import BaseModel, Field
+from typing import Optional
+import uuid
+from enum import Enum
+import datetime
+
+class JobStatus(str, Enum):
+ PENDING = "PENDING"
+ QUEUED = "QUEUED"
+ PROCESSING = "PROCESSING"
+ COMPLETE = "COMPLETE"
+ FAILED = "FAILED"
+
+class JobParams(BaseModel):
+ num_layers: int = Field(20, description="Number of layers to slice the mesh into.")
+ num_points_per_layer: int = Field(30, description="Number of points to define the curve of each layer.")
+
+class Job(BaseModel):
+ id: uuid.UUID
+ filename: str
+ status: JobStatus = JobStatus.PENDING
+ progress: int = 0
+ message: str = "Job created, awaiting processing."
+ input_path: str
+ output_path: str
+ download_url: Optional[str] = None
+ params: JobParams
+ timestamp: datetime.datetime = Field(default_factory=lambda: datetime.datetime.now(datetime.timezone.utc))
diff --git a/ReferenceSurfaceGenerator/backend/app/features/dxf_layered_curves/processing.py b/ReferenceSurfaceGenerator/backend/app/features/dxf_layered_curves/processing.py
new file mode 100644
index 0000000..3b7f93e
--- /dev/null
+++ b/ReferenceSurfaceGenerator/backend/app/features/dxf_layered_curves/processing.py
@@ -0,0 +1,127 @@
+import trimesh
+import numpy as np
+import ezdxf
+from scipy.spatial import ConvexHull
+from scipy.interpolate import splprep, splev
+import os
+import logging
+from ...models import Job
+
+# Configure logging
+logging.basicConfig(level=logging.INFO, format='[%(levelname)s] %(message)s')
+
+async def process(job: Job):
+ """
+ Processes a mesh file to create layered curves and saves it as a DXF file.
+ This is a generator function that yields progress updates.
+ """
+ input_file = job.input_path
+ output_file = job.output_path
+ num_layers = job.params.get("num_layers", 20)
+ num_points_per_layer = job.params.get("num_points_per_layer", 30)
+ layer_errors = []
+
+ try:
+ yield {"status": "processing", "progress": 5, "message": "Loading and repairing mesh..."}
+ loaded = trimesh.load(input_file)
+
+ if isinstance(loaded, trimesh.Scene):
+ meshes = [g for g in loaded.geometry.values() if isinstance(g, trimesh.Trimesh)]
+ if not meshes:
+ raise ValueError("No valid mesh geometry found in scene.")
+ mesh = trimesh.util.concatenate(meshes)
+ else:
+ mesh = loaded
+
+ mesh.update_faces(mesh.nondegenerate_faces())
+ mesh.fill_holes()
+
+ if len(mesh.vertices) == 0:
+ raise ValueError("Mesh has no vertices after processing.")
+
+ yield {"status": "processing", "progress": 15, "message": "Optimizing orientation..."}
+ extents = mesh.extents
+ longest_axis_index = np.argmax(extents)
+ if longest_axis_index != 2:
+ source_vec = np.eye(3)[longest_axis_index]
+ transform = trimesh.geometry.align_vectors(source_vec, [0, 0, 1])
+ mesh.apply_transform(transform)
+
+ mesh.vertices -= mesh.center_mass
+
+ bounds = mesh.bounds
+ z_min, z_max = bounds[:, 2]
+ z_height = z_max - z_min
+ safe_buffer = min(z_height * 0.01, 0.05)
+ z_levels = np.linspace(z_min + safe_buffer, z_max - safe_buffer, num_layers)
+
+ all_profiles_3d = []
+ yield {"status": "processing", "progress": 30, "message": "Starting slice generation..."}
+
+ for i, z in enumerate(z_levels):
+ section = mesh.section(plane_origin=[0, 0, z], plane_normal=[0, 0, 1])
+
+ if section is None:
+ logging.warning(f"Layer {i+1}/{num_layers}: No section found at z={z:.2f}.")
+ layer_errors.append(f"Layer {i+1} (z={z:.2f}): No section.")
+ continue
+
+ try:
+ paths = section.to_planar()[0].discrete
+ if not paths:
+ layer_errors.append(f"Layer {i+1}: No discrete paths.")
+ continue
+
+ slice_points = np.vstack(paths)
+
+ if len(slice_points) < 5:
+ logging.warning(f"Layer {i+1}: Insufficient points ({len(slice_points)}).")
+ layer_errors.append(f"Layer {i+1}: Insufficient points.")
+ continue
+
+ hull = ConvexHull(slice_points)
+ hull_pts = slice_points[hull.vertices]
+
+ # splprep needs unique points, so we add a tiny bit of noise if needed
+ tck, u = splprep(
+ [hull_pts[:, 0], hull_pts[:, 1]], s=0, per=True)
+ # Generate N points over the interval [0, 1), excluding the endpoint because it's a closed loop
+ u_new = np.linspace(0, 1, num_points_per_layer, endpoint=False)
+ x_new, y_new = splev(u_new, tck)
+
+ pts_2d = np.column_stack((x_new, y_new))
+ start_idx = np.argmax(pts_2d[:, 0])
+ pts_2d = np.roll(pts_2d, -start_idx, axis=0)
+
+ profile_3d = np.column_stack((pts_2d, np.full(len(pts_2d), z)))
+ all_profiles_3d.append(profile_3d)
+
+ except Exception as e:
+ logging.warning(f"Layer {i+1}: Error: {e}")
+ layer_errors.append(f"Layer {i+1}: {str(e)[:50]}")
+ continue
+
+ yield {"status": "processing", "progress": 30 + int((i/num_layers)*50), "message": f"Layer {i+1}/{num_layers}"}
+
+ if not all_profiles_3d:
+ raise ValueError("Could not generate any valid profiles. Result would be empty.")
+
+ yield {"status": "processing", "progress": 90, "message": "Writing DXF..."}
+ doc = ezdxf.new()
+ msp = doc.modelspace()
+ for poly in all_profiles_3d:
+ msp.add_polyline3d(poly, close=True)
+
+ doc.saveas(output_file)
+
+ if os.path.exists(output_file):
+ msg = "DXF created successfully."
+ if layer_errors:
+ msg += f" (with {len(layer_errors)} skipped layers)"
+ yield {"status": "complete", "progress": 100, "message": msg}
+ else:
+ raise IOError(f"File not found after save: {output_file}")
+
+ except Exception as e:
+ logging.error(f"Job failed: {e}")
+ yield {"status": "failed", "progress": 0, "message": str(e)}
diff --git a/ReferenceSurfaceGenerator/backend/app/job_manager.py b/ReferenceSurfaceGenerator/backend/app/job_manager.py
new file mode 100644
index 0000000..30304fd
--- /dev/null
+++ b/ReferenceSurfaceGenerator/backend/app/job_manager.py
@@ -0,0 +1,60 @@
+import os
+import uuid
+import json
+from typing import List, Optional
+from .models import Job
+
+JOBS_METADATA_DIR = "/app/data/jobs_metadata"
+os.makedirs(JOBS_METADATA_DIR, exist_ok=True)
+
+def _get_job_metadata_path(job_id: uuid.UUID) -> str:
+ """
+ Returns the filesystem path for a job's metadata file.
+ """
+ return os.path.join(JOBS_METADATA_DIR, f"{job_id}.json")
+
+def save_job_metadata(job: Job):
+ """
+ Saves a Job object's metadata to a JSON file atomically.
+ """
+ path = _get_job_metadata_path(job.id)
+ temp_path = f"{path}.tmp"
+ with open(temp_path, "w") as f:
+ json.dump(job.model_dump(mode='json'), f, indent=4)
+ os.rename(temp_path, path)
+
+def load_job_metadata(job_id: uuid.UUID) -> Optional[Job]:
+ """
+ Loads a Job object's metadata from a JSON file.
+ """
+ path = _get_job_metadata_path(job_id)
+ if os.path.exists(path):
+ try:
+ with open(path, "r") as f:
+ data = json.load(f)
+ return Job(**data)
+ except json.JSONDecodeError:
+ print(f"Error: Corrupt job metadata file: {path}")
+ os.remove(path) # Clean up corrupt file
+ return None
+ return None
+
+def load_all_job_metadata() -> List[Job]:
+ """
+ Loads metadata for all jobs from the jobs_metadata directory.
+ """
+ jobs = []
+ for filename in os.listdir(JOBS_METADATA_DIR):
+ if filename.endswith(".json"):
+ job_id_str = filename.replace(".json", "")
+ try:
+ job_id = uuid.UUID(job_id_str)
+ job = load_job_metadata(job_id)
+ if job:
+ jobs.append(job)
+ except ValueError:
+ # Skip invalid filenames
+ continue
+ # Sort by timestamp, newest first
+ jobs.sort(key=lambda j: j.timestamp, reverse=True)
+ return jobs
diff --git a/ReferenceSurfaceGenerator/backend/app/main.py b/ReferenceSurfaceGenerator/backend/app/main.py
index a5f7c99..0d27d57 100644
--- a/ReferenceSurfaceGenerator/backend/app/main.py
+++ b/ReferenceSurfaceGenerator/backend/app/main.py
@@ -1,24 +1,19 @@
import asyncio
import os
import uuid
-import json
-import datetime
-from typing import Dict, List, Optional
-
-from fastapi import FastAPI, File, UploadFile, WebSocket, WebSocketDisconnect, HTTPException, status, Form
+import importlib
+from typing import List
+from fastapi import FastAPI, WebSocket, WebSocketDisconnect, HTTPException, status
from fastapi.responses import FileResponse
from starlette.websockets import WebSocketState
from fastapi.middleware.cors import CORSMiddleware
from fastapi.staticfiles import StaticFiles
-import ezdxf
-
-# Import the core processing logic and data models
+from .job_manager import load_job_metadata, load_all_job_metadata, save_job_metadata, _get_job_metadata_path
from .dxf_parser import parse_dxf_for_viewing
-from .models import Job, JobStatus
+from .models import Job
app = FastAPI()
-# Allow all origins for simplicity, can be locked down in production
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
@@ -27,206 +22,87 @@
allow_headers=["*"],
)
-# Create directories for uploads, outputs, and job metadata
-UPLOAD_DIR = "/app/data/uploads"
-OUTPUT_DIR = "/app/data/outputs"
-JOBS_METADATA_DIR = "/app/data/jobs_metadata"
-JOB_QUEUE_DIR = "/app/data/job_queue"
-os.makedirs(UPLOAD_DIR, exist_ok=True)
-os.makedirs(OUTPUT_DIR, exist_ok=True)
-os.makedirs(JOBS_METADATA_DIR, exist_ok=True)
-os.makedirs(JOB_QUEUE_DIR, exist_ok=True)
-
-# --- Helper Functions for Job Metadata Persistence ---
-
-def _get_job_metadata_path(job_id: uuid.UUID) -> str:
- """
- Returns the filesystem path for a job's metadata file.
- """
- return os.path.join(JOBS_METADATA_DIR, f"{job_id}.json")
-
-def _save_job_metadata(job: Job):
- """
- Saves a Job object's metadata to a JSON file atomically.
- """
- path = _get_job_metadata_path(job.id)
- temp_path = f"{path}.tmp"
- with open(temp_path, "w") as f:
- # Using model_dump for Pydantic v2
- json.dump(job.model_dump(mode='json'), f, indent=4)
- os.rename(temp_path, path)
-
-def _load_job_metadata(job_id: uuid.UUID) -> Optional[Job]:
- """
- Loads a Job object's metadata from a JSON file.
- """
- path = _get_job_metadata_path(job_id)
- if os.path.exists(path):
- try:
- with open(path, "r") as f:
- data = json.load(f)
- return Job(**data)
- except json.JSONDecodeError:
- print(f"Error: Corrupt job metadata file: {path}")
- os.remove(path) # Clean up corrupt file
- return None
- return None
-
-def _load_all_job_metadata() -> List[Job]:
- """
- Loads metadata for all jobs from the jobs_metadata directory.
- """
- jobs = []
- for filename in os.listdir(JOBS_METADATA_DIR):
- if filename.endswith(".json"):
- job_id_str = filename.replace(".json", "")
+# --- Feature Loading ---
+def load_features():
+ features_dir = os.path.join(os.path.dirname(__file__), "features")
+ for feature_name in os.listdir(features_dir):
+ if os.path.isdir(os.path.join(features_dir, feature_name)):
try:
- job_id = uuid.UUID(job_id_str)
- job = _load_job_metadata(job_id)
- if job:
- jobs.append(job)
- except ValueError:
- # Skip invalid filenames
- continue
- # Sort by timestamp, newest first
- jobs.sort(key=lambda j: j.timestamp, reverse=True)
- return jobs
+ module = importlib.import_module(f".features.{feature_name}.main", package=__package__)
+ if hasattr(module, "router"):
+ app.include_router(module.router, prefix=f"/api/features/{feature_name}", tags=[feature_name])
+ print(f"Successfully loaded feature: {feature_name}")
+ except ImportError as e:
+ print(f"Failed to load feature {feature_name}: {e}")
-@app.post("/upload/")
-async def upload_mesh_file(
- file: UploadFile = File(...),
- num_layers: int = Form(20),
- num_points_per_layer: int = Form(30)
-):
- """
- Accepts a file upload and saves it to a temporary location.
- Creates a new job and returns its ID.
- """
- job_id = uuid.uuid4()
- input_path = os.path.join(UPLOAD_DIR, f"{job_id}_{file.filename}")
- output_path = os.path.join(OUTPUT_DIR, f"{job_id}_curves.dxf")
- # Save the uploaded file
- with open(input_path, "wb") as buffer:
- buffer.write(await file.read())
- # Create and save the initial job metadata
- job = Job(
- id=job_id,
- filename=file.filename,
- input_path=input_path,
- output_path=output_path,
- num_layers=num_layers,
- num_points_per_layer=num_points_per_layer,
- status=JobStatus.QUEUED, # Initial status is now QUEUED
- message=f"File ''{file.filename}'' uploaded, job queued."
- )
- _save_job_metadata(job)
- # Create a trigger file for the worker
- with open(os.path.join(JOB_QUEUE_DIR, f"{job_id}.trigger"), "w") as f:
- f.write(str(job_id))
- return {"job_id": str(job.id), "filename": job.filename, "status": job.status.value}
+load_features()
-async def track_job_progress(websocket: WebSocket, initial_job: Job):
- """
- Monitors a job's metadata file and sends updates over a WebSocket.
- Uses the initially provided job object for the first status check.
- """
+# --- WebSocket Job Tracking ---
+async def track_job_progress(websocket: WebSocket, job_id: uuid.UUID):
+ initial_job = load_job_metadata(job_id)
+ if not initial_job:
+ await websocket.send_json({"status": "error", "message": "Job not found."})
+ return
+
last_update_content = initial_job.model_dump(mode='json')
- job_id = initial_job.id
+ await websocket.send_json(last_update_content)
+
while websocket.client_state == WebSocketState.CONNECTED:
- job = _load_job_metadata(job_id)
+ job = load_job_metadata(job_id)
if not job:
await websocket.send_json({"status": "error", "message": "Job disappeared or was deleted."})
break
+
update_content = job.model_dump(mode='json')
if update_content != last_update_content:
await websocket.send_json(update_content)
last_update_content = update_content
- # Stop tracking if the job is in a terminal state
- if job.status in [JobStatus.COMPLETE, JobStatus.FAILED]:
+
+ if job.status in ["COMPLETE", "FAILED"]:
break
- await asyncio.sleep(0.5) # Check for updates every 500ms
+
+ await asyncio.sleep(0.5)
@app.websocket("/ws/{job_id}")
async def websocket_endpoint(websocket: WebSocket, job_id: uuid.UUID):
- """
- Handles the WebSocket connection for processing the file and sending progress.
- """
await websocket.accept()
- job = _load_job_metadata(job_id)
- if not job:
- await websocket.send_json({"status": "error", "message": "Job not found."})
- await websocket.close()
- return
try:
- # Send the initial status and then start tracking for updates
- await websocket.send_json(job.model_dump(mode='json'))
- await track_job_progress(websocket, job)
+ await track_job_progress(websocket, job_id)
except WebSocketDisconnect:
print(f"Client disconnected from job {job_id}")
finally:
- # The connection is automatically closed by Starlette when the endpoint function returns.
- # No need to call websocket.close() manually, as it can lead to race conditions
- # where both client and server try to close the connection simultaneously.
print(f"WebSocket connection handler finished for job {job_id}")
-# The async_generator_wrapper is no longer needed as processing is fully offloaded
-# to the worker process.
-@app.get("/api/download/{filename}")
-async def download_file(filename: str):
- """
- Serves the generated DXF file for download.
- """
- path = os.path.join(OUTPUT_DIR, filename)
- if os.path.exists(path):
- return FileResponse(path, media_type='application/vnd.dxf', filename=filename)
- return {"error": "File not found"}
-
-# --- New API Endpoints for Job Management ---
-
+# --- Generic Job and File Management API Endpoints ---
@app.get("/api/jobs", response_model=List[Job])
async def get_all_jobs():
- """
- Retrieves a list of all processing jobs.
- """
- return _load_all_job_metadata()
+ return load_all_job_metadata()
@app.get("/api/jobs/{job_id}", response_model=Job)
async def get_job_status(job_id: uuid.UUID):
- """
- Retrieves the status and details for a specific job.
- """
- job = _load_job_metadata(job_id)
+ job = load_job_metadata(job_id)
if not job:
- raise HTTPException(
- status_code=status.HTTP_404_NOT_FOUND, detail="Job not found")
+ raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Job not found")
return job
@app.delete("/api/jobs/{job_id}", status_code=status.HTTP_204_NO_CONTENT)
async def delete_job(job_id: uuid.UUID):
- """
- Deletes a specific job's metadata and associated output file.
- """
- job = _load_job_metadata(job_id)
+ job = load_job_metadata(job_id)
if not job:
- raise HTTPException(
- status_code=status.HTTP_404_NOT_FOUND, detail="Job not found")
- # Delete input file if it exists
+ raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Job not found")
+
if job.input_path and os.path.exists(job.input_path):
os.remove(job.input_path)
- # Delete output file if it exists
if job.output_path and os.path.exists(job.output_path):
os.remove(job.output_path)
- # Delete metadata file
+
os.remove(_get_job_metadata_path(job_id))
- return # 24 No Content
-
-
@app.get("/api/jobs/{job_id}/view")
async def get_job_output_for_viewing(job_id: uuid.UUID):
"""
Retrieves the geometric data from a job's output DXF file in a web-friendly JSON format.
"""
- job = _load_job_metadata(job_id)
+ job = load_job_metadata(job_id)
if not job:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Job not found")
@@ -242,11 +118,26 @@
raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f"An unexpected error occurred while parsing the DXF file: {e}")
-# Mount the static directory to serve the frontend if it exists
-# This is necessary because in local dev, the frontend is served by `npm start`
-# and the `/app/static` directory (from the Docker build) won't exist.
+@app.get("/api/download/{job_id}")
+async def download_file(job_id: uuid.UUID):
+ job = load_job_metadata(job_id)
+ if not job or not job.output_path or not os.path.exists(job.output_path):
+ raise HTTPException(status_code=404, detail="File not found")
+
+ filename = os.path.basename(job.output_path)
+ return FileResponse(job.output_path, media_type='application/octet-stream', filename=filename)
+
+# --- Static Files Hosting ---
if os.path.isdir("/app/static"):
app.mount("/", StaticFiles(directory="/app/static", html=True), name="static")
-
-
+@app.get("/api/features")
+async def get_features():
+ """
+ Returns a list of available features.
+ """
+ features_dir = os.path.join(os.path.dirname(__file__), "features")
+ return [
+ f for f in os.listdir(features_dir)
+ if os.path.isdir(os.path.join(features_dir, f)) and not f.startswith("__")
+ ]
\ No newline at end of file
diff --git a/ReferenceSurfaceGenerator/backend/app/models.py b/ReferenceSurfaceGenerator/backend/app/models.py
index d167c82..af8998f 100644
--- a/ReferenceSurfaceGenerator/backend/app/models.py
+++ b/ReferenceSurfaceGenerator/backend/app/models.py
@@ -1,9 +1,9 @@
import datetime
import uuid
from enum import Enum
-from typing import Optional
+from typing import Optional, Dict, Any
-from pydantic import BaseModel
+from pydantic import BaseModel, Field
class JobStatus(str, Enum):
PENDING = "PENDING"
@@ -14,14 +14,15 @@
class Job(BaseModel):
id: uuid.UUID
+ feature_id: str
filename: str
status: JobStatus = JobStatus.PENDING
- progress: int = 0 # Percentage from 0 to 100
+ progress: int = 0
message: str = "Job created, awaiting processing."
# Paths on the server filesystem
input_path: str
- output_path: str
+ output_path: Optional[str] = None
# URL for downloading the output
download_url: Optional[str] = None
@@ -29,7 +30,6 @@
view_url: Optional[str] = None
# Processing parameters
- num_layers: int = 20
- num_points_per_layer: int = 30
+ params: Dict[str, Any] = {}
- timestamp: datetime.datetime = datetime.datetime.now(datetime.timezone.utc)
+ timestamp: datetime.datetime = Field(default_factory=lambda: datetime.datetime.now(datetime.timezone.utc))
\ No newline at end of file
diff --git a/ReferenceSurfaceGenerator/backend/app/processing.py b/ReferenceSurfaceGenerator/backend/app/processing.py
deleted file mode 100644
index 87b4858..0000000
--- a/ReferenceSurfaceGenerator/backend/app/processing.py
+++ /dev/null
@@ -1,120 +0,0 @@
-import trimesh
-import numpy as np
-import ezdxf
-from scipy.spatial import ConvexHull
-from scipy.interpolate import splprep, splev
-import os
-import logging
-
-# Configure logging
-logging.basicConfig(level=logging.INFO, format='[%(levelname)s] %(message)s')
-
-def create_layered_curves_dxf(input_file, output_file, num_layers=20, num_points_per_layer=30):
- layer_errors = []
- try:
- yield {"status": "processing", "progress": 5, "message": "Loading and repairing mesh..."}
- loaded = trimesh.load(input_file)
-
- if isinstance(loaded, trimesh.Scene):
- meshes = [g for g in loaded.geometry.values() if isinstance(g, trimesh.Trimesh)]
- if not meshes:
- raise ValueError("No valid mesh geometry found in scene.")
- mesh = trimesh.util.concatenate(meshes)
- else:
- mesh = loaded
-
- mesh.update_faces(mesh.nondegenerate_faces())
- mesh.fill_holes()
-
- if len(mesh.vertices) == 0:
- raise ValueError("Mesh has no vertices after processing.")
-
- yield {"status": "processing", "progress": 15, "message": "Optimizing orientation..."}
- extents = mesh.extents
- longest_axis_index = np.argmax(extents)
- if longest_axis_index != 2:
- source_vec = np.eye(3)[longest_axis_index]
- transform = trimesh.geometry.align_vectors(source_vec, [0, 0, 1])
- mesh.apply_transform(transform)
-
- mesh.vertices -= mesh.center_mass
-
- bounds = mesh.bounds
- z_min, z_max = bounds[:, 2]
- z_height = z_max - z_min
- safe_buffer = min(z_height * 0.01, 0.05)
- z_levels = np.linspace(z_min + safe_buffer, z_max - safe_buffer, num_layers)
-
- all_profiles_3d = []
- yield {"status": "processing", "progress": 30, "message": "Starting slice generation..."}
-
- for i, z in enumerate(z_levels):
- section = mesh.section(plane_origin=[0, 0, z], plane_normal=[0, 0, 1])
-
- if section is None:
- logging.warning(f"Layer {i+1}/{num_layers}: No section found at z={z:.2f}.")
- layer_errors.append(f"Layer {i+1} (z={z:.2f}): No section.")
- continue
-
- try:
- # Use to_planar() to get 2D representation
- # Note: to_2D() is the newer recommended method in trimesh
- paths = section.to_planar()[0].discrete
- if not paths:
- layer_errors.append(f"Layer {i+1}: No discrete paths.")
- continue
-
- slice_points = np.vstack(paths)
-
- # Splprep requires more points than the degree of the spline (k=3 by default)
- if len(slice_points) < 5:
- logging.warning(f"Layer {i+1}: Insufficient points ({len(slice_points)}).")
- layer_errors.append(f"Layer {i+1}: Insufficient points.")
- continue
-
- hull = ConvexHull(slice_points)
- hull_pts = slice_points[hull.vertices]
-
- # splprep needs unique points, so we add a tiny bit of noise if needed
- tck, u = splprep(
- [hull_pts[:, 0], hull_pts[:, 1]], s=0, per=True)
- # Generate N points over the interval [0, 1), excluding the endpoint because it's a closed loop
- u_new = np.linspace(0, 1, num_points_per_layer, endpoint=False)
- x_new, y_new = splev(u_new, tck)
-
- pts_2d = np.column_stack((x_new, y_new))
- start_idx = np.argmax(pts_2d[:, 0])
- pts_2d = np.roll(pts_2d, -start_idx, axis=0)
-
- profile_3d = np.column_stack((pts_2d, np.full(len(pts_2d), z)))
- all_profiles_3d.append(profile_3d)
-
- except Exception as e:
- logging.warning(f"Layer {i+1}: Error: {e}")
- layer_errors.append(f"Layer {i+1}: {str(e)[:50]}")
- continue
-
- yield {"status": "processing", "progress": 30 + int((i/num_layers)*50), "message": f"Layer {i+1}/{num_layers}"}
-
- if not all_profiles_3d:
- raise ValueError("Could not generate any valid profiles. Result would be empty.")
-
- yield {"status": "processing", "progress": 90, "message": "Writing DXF..."}
- doc = ezdxf.new()
- msp = doc.modelspace()
- for poly in all_profiles_3d:
- msp.add_polyline3d(poly, close=True)
-
- doc.saveas(output_file)
-
- if os.path.exists(output_file):
- msg = "DXF created successfully."
- if layer_errors:
- msg += f" (with {len(layer_errors)} skipped layers)"
- yield {"status": "complete", "progress": 100, "message": msg}
- else:
- raise IOError(f"File not found after save: {output_file}")
-
- except Exception as e:
- logging.error(f"Job failed: {e}")
- yield {"status": "failed", "progress": 0, "message": str(e)}
\ No newline at end of file
diff --git a/ReferenceSurfaceGenerator/backend/app/tests/test_api.py b/ReferenceSurfaceGenerator/backend/app/tests/test_api.py
index 9ade3a1..18bbd80 100644
--- a/ReferenceSurfaceGenerator/backend/app/tests/test_api.py
+++ b/ReferenceSurfaceGenerator/backend/app/tests/test_api.py
@@ -21,7 +21,7 @@
# --- Test Cases ---
-@patch('app.main._load_job_metadata')
+@patch('app.main.load_job_metadata')
def test_get_job_output_for_viewing_success(mock_load_job):
"""
Tests the successful retrieval of parsed DXF data for the viewer.
@@ -30,17 +30,31 @@
job_id = uuid.uuid4()
# In a real scenario, the file would be created by the worker.
# For this test, we create it manually from a known good source.
- from app.processing import create_layered_curves_dxf
+ from app.features.dxf_layered_curves.processing import process as create_layered_curves_dxf
output_dxf_path = os.path.join(TEST_OUTPUT_DIR, f"{job_id}_test.dxf")
# Run the generator to create the file
- generator = create_layered_curves_dxf(VALID_FILE, output_dxf_path, num_layers=5)
- for _ in generator:
- pass # Consume the generator
+ import asyncio
+ job = Job(
+ id=job_id,
+ feature_id="dxf_layered_curves",
+ filename="cube.obj",
+ status=JobStatus.QUEUED,
+ input_path=VALID_FILE,
+ output_path=output_dxf_path,
+ params={"num_layers": 5, "num_points_per_layer": 10},
+ )
+
+ async def run_generator():
+ async for _ in create_layered_curves_dxf(job):
+ pass
+
+ asyncio.run(run_generator())
# Mock the job that the endpoint will load
mock_job = Job(
id=job_id,
+ feature_id="dxf_layered_curves",
filename="cube.obj",
status=JobStatus.COMPLETE,
input_path="dummy",
@@ -60,7 +74,7 @@
assert isinstance(data["polylines"][0][0], list) # Check for list of points
assert len(data["polylines"][0][0]) == 3 # Check for [x, y, z] coordinates
-@patch('app.main._load_job_metadata')
+@patch('app.main.load_job_metadata')
def test_get_job_output_for_viewing_no_file(mock_load_job):
"""
Tests the case where the job exists but its output file is missing.
@@ -68,6 +82,7 @@
job_id = uuid.uuid4()
mock_job = Job(
id=job_id,
+ feature_id="dxf_layered_curves",
filename="test.obj",
status=JobStatus.COMPLETE,
input_path="dummy",
diff --git a/ReferenceSurfaceGenerator/backend/app/tests/test_main.py b/ReferenceSurfaceGenerator/backend/app/tests/test_main.py
new file mode 100644
index 0000000..18e2bf4
--- /dev/null
+++ b/ReferenceSurfaceGenerator/backend/app/tests/test_main.py
@@ -0,0 +1,9 @@
+from fastapi.testclient import TestClient
+from ..main import app
+
+client = TestClient(app)
+
+def test_read_features():
+ response = client.get("/api/features")
+ assert response.status_code == 200
+ assert "dxf_layered_curves" in response.json()
diff --git a/ReferenceSurfaceGenerator/backend/app/tests/test_processing.py b/ReferenceSurfaceGenerator/backend/app/tests/test_processing.py
index f2883cd..1ee56ee 100644
--- a/ReferenceSurfaceGenerator/backend/app/tests/test_processing.py
+++ b/ReferenceSurfaceGenerator/backend/app/tests/test_processing.py
@@ -1,21 +1,30 @@
import os
import shutil
import pytest
+import uuid
from unittest.mock import patch, MagicMock
-import trimesh
import numpy as np
-import sys
-
-import ezdxf
-
-sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
-from processing import create_layered_curves_dxf
+from app.features.dxf_layered_curves.processing import process
+from app.models import Job, JobStatus
ASSETS_DIR = os.path.join(os.path.dirname(__file__), "assets")
TEST_OUTPUT_DIR = os.path.join(os.path.dirname(__file__), "test_outputs")
VALID_FILE = os.path.join(ASSETS_DIR, "cube.obj")
EMPTY_FILE = os.path.join(ASSETS_DIR, "empty.obj")
+@pytest.fixture
+def job():
+ job_id = uuid.uuid4()
+ output_file = os.path.join(TEST_OUTPUT_DIR, f"{job_id}_test.dxf")
+ return Job(
+ id=job_id,
+ feature_id="dxf_layered_curves",
+ filename="cube.obj",
+ input_path=VALID_FILE,
+ output_path=output_file,
+ params={"num_layers": 5, "num_points_per_layer": 10},
+ )
+
@pytest.fixture(autouse=True)
def setup_and_teardown():
if os.path.exists(TEST_OUTPUT_DIR):
@@ -23,18 +32,23 @@
os.makedirs(TEST_OUTPUT_DIR)
yield
-def run_generator_to_completion(generator):
+async def run_generator_to_completion(generator):
final_result = None
- for result in generator:
+ async for result in generator:
final_result = result
return final_result
+@pytest.mark.asyncio
+async def test_happy_path_successful_processing(job):
+ final_status = await run_generator_to_completion(process(job))
+ assert final_status["status"] == "complete"
+ assert os.path.exists(job.output_path)
+
+@pytest.mark.asyncio
@patch('trimesh.Trimesh.section', autospec=True)
-def test_partial_slicing_failure_completes_with_warnings(mock_section):
- output_file = os.path.join(TEST_OUTPUT_DIR, "partial.dxf")
+async def test_partial_slicing_failure_completes_with_warnings(mock_section, job):
call_counter = {"count": 0}
- # FIX: Use 5+ points so scipy's splprep (m > k) doesn't fail
fake_path = MagicMock()
fake_path.discrete = [np.array([[0,0], [1,0], [1,1], [0,1], [0,0]])]
@@ -49,17 +63,14 @@
mock_section.side_effect = side_effect
- generator = create_layered_curves_dxf(VALID_FILE, output_file, num_layers=10)
- final_status = run_generator_to_completion(generator)
-
+ final_status = await run_generator_to_completion(process(job))
assert final_status["status"] == "complete"
assert "skipped" in final_status["message"] or "successfully" in final_status["message"]
- assert os.path.exists(output_file)
+ assert os.path.exists(job.output_path)
+@pytest.mark.asyncio
@patch('trimesh.Trimesh.section', return_value=None)
-def test_total_slicing_failure_fails_job(mock_section):
- output_file = os.path.join(TEST_OUTPUT_DIR, "total_fail.dxf")
- generator = create_layered_curves_dxf(VALID_FILE, output_file, num_layers=5)
- final_status = run_generator_to_completion(generator)
+async def test_total_slicing_failure_fails_job(mock_section, job):
+ final_status = await run_generator_to_completion(process(job))
assert final_status["status"] == "failed"
- assert not os.path.exists(output_file)
\ No newline at end of file
+ assert not os.path.exists(job.output_path)
diff --git a/ReferenceSurfaceGenerator/backend/app/worker.py b/ReferenceSurfaceGenerator/backend/app/worker.py
index 39c0371..cce0471 100644
--- a/ReferenceSurfaceGenerator/backend/app/worker.py
+++ b/ReferenceSurfaceGenerator/backend/app/worker.py
@@ -2,111 +2,74 @@
import time
import uuid
import json
-from typing import Optional
import sys
-import datetime
+import importlib
+import asyncio
-# Add the app directory to the Python path to allow imports from .models and .processing
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from app.models import Job, JobStatus
-from app.processing import create_layered_curves_dxf
+from app.job_manager import save_job_metadata, load_job_metadata
-# Define paths - ensure these match main.py
-UPLOAD_DIR = "/app/data/uploads"
-OUTPUT_DIR = "/app/data/outputs"
-JOBS_METADATA_DIR = "/app/data/jobs_metadata"
JOB_QUEUE_DIR = "/app/data/job_queue"
+JOBS_METADATA_DIR = "/app/data/jobs_metadata"
-# Helper to save job metadata (duplicate from main.py for worker self-sufficiency)
-def _save_job_metadata(job: Job):
- path = os.path.join(JOBS_METADATA_DIR, f"{job.id}.json")
- temp_path = f"{path}.tmp"
- with open(temp_path, "w") as f:
- # Use model_dump(mode='json') for Pydantic v2 to ensure correct serialization of types like UUID and enums
- json.dump(job.model_dump(mode='json'), f, indent=4)
- os.rename(temp_path, path)
-
-# Helper to load job metadata (duplicate from main.py for worker self-sufficiency)
-def _load_job_metadata(job_id: uuid.UUID) -> Optional[Job]:
- path = os.path.join(JOBS_METADATA_DIR, f"{job_id}.json")
- if os.path.exists(path):
- try:
- with open(path, "r") as f:
- data = json.load(f)
- return Job(**data)
- except json.JSONDecodeError:
- print(f"[WORKER] Error: Corrupt job metadata file: {path}")
- os.remove(path)
- return None
- return None
+def get_processing_function(feature_id: str):
+ """
+ Dynamically imports the processing function for a given feature.
+ """
+ try:
+ module = importlib.import_module(f"app.features.{feature_id}.processing")
+ return getattr(module, "process")
+ except (ImportError, AttributeError) as e:
+ print(f"[WORKER] Error importing processing function for feature '{feature_id}': {e}")
+ return None
async def process_job(job_id: uuid.UUID):
- job = _load_job_metadata(job_id)
+ job = load_job_metadata(job_id)
if not job:
print(f"[WORKER] Job {job_id} not found in metadata, skipping.")
return
- print(f"[WORKER] Starting processing for job {job.id} (File: {job.filename})...")
+ print(f"[WORKER] Starting processing for job {job.id} (Feature: {job.feature_id}, File: {job.filename})...")
- # Update job status to PROCESSING
job.status = JobStatus.PROCESSING
job.message = "Processing started by worker."
- _save_job_metadata(job)
+ save_job_metadata(job)
+
+ processing_function = get_processing_function(job.feature_id)
+ if not processing_function:
+ job.status = JobStatus.FAILED
+ job.message = f"Could not find processing function for feature '{job.feature_id}'."
+ save_job_metadata(job)
+ return
try:
- # Execute the processing generator and capture the final state
- final_update = None
- for progress_update in create_layered_curves_dxf(
- job.input_path,
- job.output_path,
- num_layers=job.num_layers,
- num_points_per_layer=job.num_points_per_layer
- ):
- # While processing, update status and save
- job.status = JobStatus(progress_update["status"].upper())
- job.progress = progress_update["progress"]
- job.message = progress_update["message"]
- _save_job_metadata(job)
- final_update = progress_update
+ async for progress_update in processing_function(job):
+ job.status = JobStatus(progress_update.get("status", job.status.value).upper())
+ job.progress = progress_update.get("progress", job.progress)
+ job.message = progress_update.get("message", job.message)
+ save_job_metadata(job)
- # After the loop, perform the single, definitive final update
- if final_update and final_update.get("status") == "complete":
- job.status = JobStatus.COMPLETE
- job.progress = 100
- job.message = final_update.get("message", "Processing complete! DXF generated.")
- job.download_url = f"/api/download/{os.path.basename(job.output_path)}"
- job.view_url = f"/api/jobs/{job.id}/view"
- print(f"[WORKER] Job {job.id} completed successfully.")
- else:
- # If the loop finishes without a 'complete' status, it must have failed.
- job.status = JobStatus.FAILED
- if final_update:
- job.message = final_update.get("message", "Job failed during processing.")
- else:
- job.message = "Job failed with no processing updates."
- print(f"[WORKER] Job {job.id} failed.")
-
- _save_job_metadata(job)
+ job.status = JobStatus.COMPLETE
+ job.progress = 100
+ job.message = "Processing complete!"
+ job.download_url = f"/api/download/{job.id}"
+ save_job_metadata(job)
+ print(f"[WORKER] Job {job.id} completed successfully.")
except Exception as e:
error_message = f"An error occurred during job {job.id} processing: {str(e)}"
print(f"[WORKER] ERROR: {error_message}")
job.status = JobStatus.FAILED
job.message = error_message
- _save_job_metadata(job)
+ save_job_metadata(job)
finally:
- # Clean up the trigger file from the queue
trigger_file_path = os.path.join(JOB_QUEUE_DIR, f"{job.id}.trigger")
if os.path.exists(trigger_file_path):
os.remove(trigger_file_path)
print(f"[WORKER] Cleaned up trigger file for job {job.id}.")
-
- # Clean up the input file (uploaded mesh)
- if os.path.exists(job.input_path):
- os.remove(job.input_path)
- print(f"[WORKER] Cleaned up input file for job {job.id}.")
async def main():
print(f"[WORKER] Worker started. Monitoring {JOB_QUEUE_DIR} for new jobs...")
@@ -121,17 +84,9 @@
except ValueError:
print(f"[WORKER] Invalid trigger filename: {filename}, skipping.")
continue
-
- time.sleep(1) # Check for new jobs every second
+ await asyncio.sleep(1)
if __name__ == "__main__":
- # Ensure directories exist (they should be created by main.py on startup)
- os.makedirs(UPLOAD_DIR, exist_ok=True)
- os.makedirs(OUTPUT_DIR, exist_ok=True)
- os.makedirs(JOBS_METADATA_DIR, exist_ok=True)
os.makedirs(JOB_QUEUE_DIR, exist_ok=True)
-
- # Run the worker's main loop
- # Need to use asyncio.run to run an async main function
- import asyncio
- asyncio.run(main())
+ os.makedirs(JOBS_METADATA_DIR, exist_ok=True)
+ asyncio.run(main())
\ No newline at end of file
diff --git a/ReferenceSurfaceGenerator/frontend/package-lock.json b/ReferenceSurfaceGenerator/frontend/package-lock.json
index 79c8309..e05579d 100644
--- a/ReferenceSurfaceGenerator/frontend/package-lock.json
+++ b/ReferenceSurfaceGenerator/frontend/package-lock.json
@@ -19,6 +19,7 @@
"react": "^19.2.4",
"react-bootstrap": "^2.10.10",
"react-dom": "^19.2.4",
+ "react-router-dom": "^7.13.0",
"react-scripts": "5.0.1",
"three": "^0.182.0",
"web-vitals": "^2.1.4"
@@ -13257,6 +13258,54 @@
"node": ">=0.10.0"
}
},
+ "node_modules/react-router": {
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/react-router/-/react-router-7.13.0.tgz",
+ "integrity": "sha512-PZgus8ETambRT17BUm/LL8lX3Of+oiLaPuVTRH3l1eLvSPpKO3AvhAEb5N7ihAFZQrYDqkvvWfFh9p0z9VsjLw==",
+ "dependencies": {
+ "cookie": "^1.0.1",
+ "set-cookie-parser": "^2.6.0"
+ },
+ "engines": {
+ "node": ">=20.0.0"
+ },
+ "peerDependencies": {
+ "react": ">=18",
+ "react-dom": ">=18"
+ },
+ "peerDependenciesMeta": {
+ "react-dom": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/react-router-dom": {
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-7.13.0.tgz",
+ "integrity": "sha512-5CO/l5Yahi2SKC6rGZ+HDEjpjkGaG/ncEP7eWFTvFxbHP8yeeI0PxTDjimtpXYlR3b3i9/WIL4VJttPrESIf2g==",
+ "dependencies": {
+ "react-router": "7.13.0"
+ },
+ "engines": {
+ "node": ">=20.0.0"
+ },
+ "peerDependencies": {
+ "react": ">=18",
+ "react-dom": ">=18"
+ }
+ },
+ "node_modules/react-router/node_modules/cookie": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/cookie/-/cookie-1.1.1.tgz",
+ "integrity": "sha512-ei8Aos7ja0weRpFzJnEA9UHJ/7XQmqglbRwnf2ATjcB9Wq874VKH9kfjjirM6UhU2/E5fFYadylyhFldcqSidQ==",
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/express"
+ }
+ },
"node_modules/react-scripts": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/react-scripts/-/react-scripts-5.0.1.tgz",
@@ -14099,6 +14148,11 @@
"node": ">= 0.8.0"
}
},
+ "node_modules/set-cookie-parser": {
+ "version": "2.7.2",
+ "resolved": "https://registry.npmjs.org/set-cookie-parser/-/set-cookie-parser-2.7.2.tgz",
+ "integrity": "sha512-oeM1lpU/UvhTxw+g3cIfxXHyJRc/uidd3yK1P242gzHds0udQBYzs3y8j4gCCW+ZJ7ad0yctld8RYO+bdurlvw=="
+ },
"node_modules/set-function-length": {
"version": "1.2.2",
"resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz",
diff --git a/ReferenceSurfaceGenerator/frontend/package.json b/ReferenceSurfaceGenerator/frontend/package.json
index 47906ba..f324c32 100644
--- a/ReferenceSurfaceGenerator/frontend/package.json
+++ b/ReferenceSurfaceGenerator/frontend/package.json
@@ -14,6 +14,7 @@
"react": "^19.2.4",
"react-bootstrap": "^2.10.10",
"react-dom": "^19.2.4",
+ "react-router-dom": "^7.13.0",
"react-scripts": "5.0.1",
"three": "^0.182.0",
"web-vitals": "^2.1.4"
diff --git a/ReferenceSurfaceGenerator/frontend/src/App.js b/ReferenceSurfaceGenerator/frontend/src/App.js
index c08dd40..5032ae7 100644
--- a/ReferenceSurfaceGenerator/frontend/src/App.js
+++ b/ReferenceSurfaceGenerator/frontend/src/App.js
@@ -1,295 +1,65 @@
-import React, { useState, useEffect } from 'react';
+import React, { useState, useEffect, Suspense } from 'react';
+import { BrowserRouter as Router, Routes, Route, Link } from 'react-router-dom';
+import { Container, Navbar, Nav } from 'react-bootstrap';
+import { features } from './features';
import axios from 'axios';
-import { Container, Navbar, Card, ProgressBar, Alert, Button, Form, Row, Col, ListGroup } from 'react-bootstrap';
-import JobItem from './JobItem';
-import DxfViewer from './DxfViewer';
const API_URL = process.env.NODE_ENV === 'development' ? 'http://localhost:8000' : '';
-const WS_URL = process.env.NODE_ENV === 'development'
- ? 'ws://localhost:8000'
- : window.location.protocol.replace('http', 'ws') + '//' + window.location.host;
-// --- Upload Component (modified) ---
-const UploadComponent = ({ setJobs }) => {
- const [file, setFile] = useState(null);
- const [numLayers, setNumLayers] = useState(20);
- const [numPoints, setNumPoints] = useState(30);
- const [uploadProgress, setUploadProgress] = useState(0); // For file upload progress
- const [uploadStatusMessage, setUploadStatusMessage] = useState('Select a file to begin.');
- const [error, setError] = useState(null);
- const [isUploading, setIsUploading] = useState(false);
+const App = () => {
+ const [availableFeatures, setAvailableFeatures] = useState([]);
- const handleFileChange = (event) => {
- setFile(event.target.files[0]);
- setUploadStatusMessage(event.target.files[0] ? event.target.files[0].name : 'Select a file to begin.');
- setUploadProgress(0);
- setError(null);
- };
+ useEffect(() => {
+ const fetchFeatures = async () => {
+ try {
+ const response = await axios.get(`${API_URL}/api/features`);
+ setAvailableFeatures(response.data);
+ } catch (error) => {
+ console.error("Error fetching features:", error);
+ }
+ };
+ fetchFeatures();
+ }, []);
- const handleUpload = async () => {
- if (!file) return;
-
- setIsUploading(true);
- setUploadProgress(0);
- setError(null);
- setUploadStatusMessage('Uploading file...');
-
- const formData = new FormData();
- formData.append('file', file);
- formData.append('num_layers', numLayers);
- formData.append('num_points_per_layer', numPoints);
-
- try {
- const uploadResponse = await axios.post(`${API_URL}/upload/`, formData, {
- onUploadProgress: (progressEvent) => {
- const percentCompleted = Math.round((progressEvent.loaded * 100) / progressEvent.total);
- setUploadProgress(percentCompleted);
- setUploadStatusMessage(`Uploading... ${percentCompleted}%`);
- },
- });
-
- const { job_id, filename, status } = uploadResponse.data;
- // Add the new job to the global state immediately
- setJobs(prevJobs => [{
- id: job_id,
- filename: filename,
- status: status,
- progress: 0,
- message: `File '${filename}' uploaded, awaiting processing.`,
- num_layers: numLayers,
- num_points_per_layer: numPoints,
- timestamp: new Date().toISOString() // Use ISO string for consistency
- }, ...prevJobs]);
-
- setUploadStatusMessage(`Job ${job_id.substring(0, 8)}... created. Waiting for WebSocket updates.`);
- setUploadProgress(0); // Reset upload progress for next file
- setFile(null); // Clear the file input
-
- // No longer open WebSocket here. WebSocket will be opened by JobList for ongoing jobs.
-
- } catch (err) {
- console.error(err);
- setError(err.response?.data?.detail || 'File upload failed. Please try again.');
- } finally {
- setIsUploading(false);
- }
- };
-
- return (
- {uploadStatusMessage}.obj, .stl, or .3mf file to process. The tool will generate a DXF file containing the layered profiles of your model.
-
-
- Upload Status
-
.obj, .stl, or .3mf file to process. The tool will generate a DXF file containing the layered profiles of your model.
+ {uploadStatusMessage}
+ {uploadProgress > 0 && uploadProgress < 100 && ( +