diff --git a/.gitignore b/.gitignore index 6dd29b7..2a8ac54 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,2 @@ -bin/ \ No newline at end of file +bin/ +.vscode/ diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..7ee72b6 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,54 @@ +FROM golang:1.24-alpine AS builder + +# Install necessary runtime dependencies for static binaries if needed +# For standard Go apps, this is usually just ca-certificates for HTTPS/TLS. +RUN apk add --no-cache ca-certificates + +# Set the working directory for the build +WORKDIR /app + +# Copy the dependency files first for better build caching +# If go.mod/go.sum don't change, this layer is reused, speeding up subsequent builds. +COPY go.mod go.sum ./ + +# Download all dependencies +RUN go mod download + +# Copy the rest of the source code, including internal/ +COPY . . + +# Build the final executable +# CGO_ENABLED=0 creates a statically linked binary (no libc dependency). +# -ldflags="-s -w" strips debug information to minimize the binary size. +RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags="-s -w" -o /xds-server . + + +# --- STAGE 2: Create the minimal runtime image --- +# Use a minimal base image, like alpine or gcr.io/distroless/static, for the final image. +# Alpine is a good choice as it includes basic shell commands (useful for debugging). +FROM alpine:latest + +# Install ca-certificates again for the final image to handle HTTPS/TLS connections +RUN apk add --no-cache ca-certificates + +# Set the working directory for the final application +WORKDIR /app + +# Create a non-root user and group for security best practice +RUN addgroup -S appuser && adduser -S -G appuser appuser +USER appuser + +# Copy the built binary from the 'builder' stage +# The binary is the only thing needed to run the Go application. +COPY --from=builder --chown=appuser:appuser /xds-server /usr/local/bin/xds-server + +# Expose the ports for the xDS server (18000) and the REST API (8080) +EXPOSE 18000 +EXPOSE 8080 + +# Define the command to run the application +# We use the new flags to listen on all interfaces and point to a config directory. +ENTRYPOINT ["/usr/local/bin/xds-server"] +# CMD is for default arguments. Here, we specify the default configuration to load. +# The container will run with nodeID 'proxy', listening on 18000/8080, and loading configs from the /configs directory inside the container. +CMD ["--nodeID", "proxy", "--config-dir", "/app/configs"] \ No newline at end of file diff --git a/Makefile b/Makefile index e44694b..40f503b 100644 --- a/Makefile +++ b/Makefile @@ -4,6 +4,12 @@ BIN_DIR := bin BIN := $(BIN_DIR)/$(APP_NAME) +# Docker settings +DOCKER_REGISTRY ?= docker.jerxie.com/ +DOCKER_TAG ?= latest +DOCKER_IMAGE := $(DOCKER_REGISTRY)$(APP_NAME) +DOCKER_FULL_TAG := $(DOCKER_IMAGE):$(DOCKER_TAG) + # Go build settings GO ?= go GOFLAGS ?= @@ -13,6 +19,8 @@ .PHONY: all all: build +# -------------------------------------------------------------------------------------------------- + ## Build binary .PHONY: build build: @@ -41,7 +49,7 @@ ## Clean up build artifacts .PHONY: clean -clean: +clean: docker-clean @echo "==> Cleaning build artifacts..." rm -rf $(BIN_DIR) @@ -50,3 +58,23 @@ deps: @echo "==> Downloading dependencies..." $(GO) mod tidy + +# -------------------------------------------------------------------------------------------------- + +## Build Docker image +.PHONY: dockerbuild +dockerbuild: + @echo "==> Building Docker image: $(DOCKER_FULL_TAG)" + docker build -t $(DOCKER_FULL_TAG) . + +## Push Docker image +.PHONY: dockerpush +dockerpush: + @echo "==> Pushing Docker image: $(DOCKER_FULL_TAG)" + sudo docker push $(DOCKER_FULL_TAG) + +## Clean up Docker images (removes the latest tag of the built image) +.PHONY: docker-clean +docker-clean: + @echo "==> Removing local Docker image: $(DOCKER_IMAGE):latest" + -docker rmi $(DOCKER_IMAGE):latest 2> /dev/null || true \ No newline at end of file diff --git a/data/cds.yaml b/data/cds.yaml new file mode 100644 index 0000000..f7b686d --- /dev/null +++ b/data/cds.yaml @@ -0,0 +1,449 @@ +resources: +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _acme_renewer + connect_timeout: 0.2s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: acme_renewer + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 8888 + address: + socket_address: + address: 172.17.0.1 #docker bridge + port_value: 8888 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _git_bucket + connect_timeout: 0.2s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: git_bucket + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 8088 + address: + socket_address: + address: 172.17.0.1 #docker bridge + port_value: 8088 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _code_server + connect_timeout: 0.2s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: code_server + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 8080 + address: + socket_address: + address: 192.168.68.113 #docker bridge + port_value: 8080 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + validation_context: + trusted_ca: + filename: /etc/certs/upstream/vscode/root.crt +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _docker_registry + connect_timeout: 0.2s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: docker_registry + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 5555 + address: + socket_address: + address: 172.17.0.1 #docker bridge + port_value: 5555 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/docker.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/docker.jerxie.com/privkey.pem" } + # validation_context: + # trusted_ca: + # filename: "/etc/certs/docker.jerxie.com/chain1.pem" +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _nas_service + connect_timeout: 0.2s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: nas + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 5000 + address: + socket_address: + address: 172.17.0.1 #docker bridge + port_value: 5000 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _plex_server + connect_timeout: 0.2s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: nas + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 32400 + address: + socket_address: + address: 192.168.68.113 + port_value: 32400 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _nas_video + connect_timeout: 0.2s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: nas + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 9007 + address: + socket_address: + address: 127.0.0.1 #localhost + port_value: 9007 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _nas_audio + connect_timeout: 0.2s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: nas + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 8800 + address: + socket_address: + address: 127.0.0.1 #localhost + port_value: 8800 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _nas_note + connect_timeout: 0.2s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: nas + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 9350 + address: + socket_address: + address: 127.0.0.1 #localhost + port_value: 9350 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _nas_camera + connect_timeout: 0.2s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: camera + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 9900 + address: + socket_address: + address: 127.0.0.1 #localhost + port_value: 9900 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _nas_photo + connect_timeout: 0.2s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: nas + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 5080 + address: + socket_address: + address: 127.0.0.1 #localhost + port_value: 5080 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _k8s_apiserver + connect_timeout: 1s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: apiserver + endpoints: + - lb_endpoints: + - endpoint: {health_check_config: { port_value: 16443}, address: { socket_address: { address: 192.168.68.139, port_value: 16443 }}} #192.168.68.254 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + validation_context: + trusted_ca: + filename: /etc/certs/upstream/kubernetes/root.crt +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _k8s_router + connect_timeout: 1s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: nginx + endpoints: + - lb_endpoints: + - endpoint: {health_check_config: { port_value: 32704}, address: { socket_address: { address: 192.168.68.139, port_value: 32704 }}} + # - endpoint: { address: { socket_address: { address: 192.168.68.114, port_value: 32542 }}} + # transport_socket: + # name: envoy.transport_sockets.tls + # typed_config: + # "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + # common_tls_context: + # validation_context: + # trusted_ca: + # filename: /etc/certs/kubernetes/root.crt +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _3d_printer_console + connect_timeout: 2s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: printer + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 5000 + address: + socket_address: + address: octoprint + port_value: 5000 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _3d_printer_camera + connect_timeout: 2s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: camera + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 8080 + address: + socket_address: + address: octoprint + port_value: 8080 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _bitwarden_service + connect_timeout: 0.2s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: pwassword_manager + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 10010 + address: + socket_address: + address: 172.17.0.1 + port_value: 10010 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _homeassistant_service + connect_timeout: 0.2s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: homeassistant_manager + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 8123 + address: + socket_address: + address: 192.168.68.133 + port_value: 8123 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _portainer_ui + connect_timeout: 0.2s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: portainer_ui + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 9000 + address: + socket_address: + address: 192.168.68.161 + port_value: 9000 + # transport_socket: + # name: envoy.transport_sockets.tls + # typed_config: + # "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + # common_tls_context: + # validation_context: + # trusted_ca: + # filename: /etc/certs/upstream/portainer/root.crt +# - "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster +# name: _baby_buddy +# connect_timeout: 0.2s +# type: STRICT_DNS +# lb_policy: ROUND_ROBIN +# load_assignment: +# cluster_name: baby_buddy +# endpoints: +# - lb_endpoints: +# - endpoint: +# health_check_config: +# port_value: 8555 +# address: +# socket_address: +# address: 192.168.68.106 +# port_value: 8555 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _grafana_ui + connect_timeout: 0.2s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: _grafana_ui + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 3000 + address: + socket_address: + address: 192.168.68.106 + port_value: 3000 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _auth_server + connect_timeout: 0.2s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: _auth_server + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 5556 + address: + socket_address: + address: 192.168.68.113 + port_value: 5557 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _ai_server + connect_timeout: 0.2s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: _ai_server + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 3000 + address: + socket_address: + address: 192.168.68.113 + port_value: 3000 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _pcb_server + connect_timeout: 0.2s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: _pcb_server + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 8088 + address: + socket_address: + address: 192.168.68.113 + port_value: 8088 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _ai_api_server + connect_timeout: 0.2s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: _ai_api_server + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 8002 + address: + socket_address: + address: 192.168.68.113 + port_value: 8002 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _ai_ui_server + connect_timeout: 0.2s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: _ai_ui_server + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 8003 + address: + socket_address: + address: 192.168.68.113 + port_value: 8003 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _monitor_server + connect_timeout: 0.2s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: _monitor_server + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 9090 + address: + socket_address: + address: 192.168.68.113 + port_value: 9090 \ No newline at end of file diff --git a/data/lds.yaml b/data/lds.yaml new file mode 100644 index 0000000..81b5897 --- /dev/null +++ b/data/lds.yaml @@ -0,0 +1,980 @@ +resources: +- "@type": type.googleapis.com/envoy.config.listener.v3.Listener + name: http_listener + address: + socket_address: { address: 0.0.0.0, port_value: 10000 } + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + route_config: + name: ingress_generic_insecure + virtual_hosts: + - name: http_to_https + domains: ["*"] + routes: + - match: { prefix : "/.well-known/acme-challenge"} + route: { cluster: _acme_renewer } + - match: { prefix: "/" } + redirect: { https_redirect: true } + - name: video_insecure + domains: ["video.jerxie.com" , "video.local:10000"] + routes: + - match: { prefix : "/.well-known/acme-challenge"} + route: { cluster: _acme_renewer } + - match: { prefix : "/"} + route: { cluster: _nas_video } + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router +- "@type": type.googleapis.com/envoy.config.listener.v3.Listener + name: https_listener + address: + socket_address: { address: 0.0.0.0, port_value: 10001 } + listener_filters: + - name: "envoy.filters.listener.tls_inspector" + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + normalize_path: true + merge_slashes: true + upgrade_configs: + - upgrade_type: websocket + codec_type: AUTO + stream_idle_timeout: 300s + request_timeout: 300s + route_config: + virtual_hosts: + - name: home_service + domains: ["home.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_homeassistant_service"} + # - match: { path: "/printer"} + # redirect: { path_redirect: "/printer/" } + # - match: { prefix: "/printer/webcam" } + # route: { prefix_rewrite: "/", cluster: _3d_printer_camera, idle_timeout: 0s } + # - match: { prefix: "/printer/" } + # route: { prefix_rewrite: "/", cluster: _3d_printer_console } + http_filters: + - name: envoy.filters.http.lua + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua + inline_code: | + require "/etc/envoy/filter" + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["home.jerxie.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + # - certificate_chain: { filename: "/etc/certs/home_domain/certificate.crt" } + # private_key: { filename: "/etc/certs/home_domain/private.key" } + - certificate_chain: { filename: "/etc/certs/downstream/home.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/home.jerxie.com/privkey.pem" } + # validation_context: + # trusted_ca: + # filename: /etc/certs/ca_bundle.crt + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + route_config: + virtual_hosts: + - name: docker_service + domains: ["docker.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_docker_registry", timeout: 0s} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["docker.jerxie.com", "docker.local"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/docker.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/docker.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + upgrade_configs: + - upgrade_type: websocket + route_config: + virtual_hosts: + - name: docker_service + domains: ["nas.jerxie.com", "nas:10001"] + routes: + - match: { prefix: "/" } + route: { cluster: "_nas_service", timeout: 0s} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["nas.jerxie.com", "nas"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/nas.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/nas.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + route_config: + virtual_hosts: + - name: docker_service + domains: ["video.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_nas_video", timeout: 0s} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["video.jerxie.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/video.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/video.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + route_config: + virtual_hosts: + - name: plex_server + domains: ["plex.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_plex_server", timeout: 0s} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["plex.jerxie.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/plex.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/plex.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + normalize_path: true + merge_slashes: true + route_config: + virtual_hosts: + - name: kubernetes_service + domains: ["kubernetes.jerxie.com"] + routes: + - match: { path: "/apiserver"} + route: { prefix_rewrite: "/" , cluster: _k8s_apiserver } + - match: { prefix: "/apiserver/" } + route: { prefix_rewrite: "/" , cluster: _k8s_apiserver } + - match: { prefix: "/" } + route: { cluster: "_k8s_router"} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["kubernetes.jerxie.com", "kubernetes.local"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/kubernetes.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/kubernetes.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + normalize_path: true + merge_slashes: true + route_config: + virtual_hosts: + - name: kubernetes_dashboard_service + domains: ["kubernetes.dashboard.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_k8s_router"} + http_filters: + - name: envoy.filters.http.oauth2 + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.oauth2.v3.OAuth2 + config: + token_endpoint: + cluster: _auth_server + uri: auth.jerxie.com/token + timeout: 3s + authorization_endpoint: https://auth.jerxie.com/auth + redirect_uri: "%REQ(x-forwarded-proto)%://%REQ(:authority)%/callback" + redirect_path_matcher: + path: + exact: /callback + signout_path: + path: + exact: /signout + forward_bearer_token: true + credentials: + client_id: kubernetes-dashboard + token_secret: + name: token + sds_config: + path: "/etc/envoy/token-secret.yaml" + hmac_secret: + name: hmac + sds_config: + path: "/etc/envoy/hmac-secret.yaml" + # (Optional): defaults to 'user' scope if not provided + auth_scopes: + - openid + - email + # (Optional): set resource parameter for Authorization request + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["kubernetes.dashboard.jerxie.com", "kubernetes.dashboard.local"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/kubernetes.dashboard.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/kubernetes.dashboard.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + route_config: + virtual_hosts: + - name: kubernetes_blog_service + domains: ["blog.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_k8s_router"} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["blog.jerxie.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/blog.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/blog.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + route_config: + virtual_hosts: + - name: kubernetes_blog_service + domains: ["argocd.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_k8s_router"} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["argocd.jerxie.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/argocd.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/argocd.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + upgrade_configs: + - upgrade_type: websocket + stream_idle_timeout: 0s + normalize_path: true + merge_slashes: true + route_config: + virtual_hosts: + - name: meet_service + domains: ["meet.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_k8s_router"} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["meet.jerxie.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/meet.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/meet.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + route_config: + virtual_hosts: + - name: docker_service + domains: ["audio.jerxie.com", "audio.local"] + routes: + - match: { prefix: "/" } + route: { cluster: "_nas_audio"} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["audio.jerxie.com", "audio.local"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/audio.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/audio.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + upgrade_configs: + - upgrade_type: websocket + route_config: + virtual_hosts: + - name: code_service + domains: ["code.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_code_server"} + http_filters: + - name: envoy.filters.http.oauth2 + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.oauth2.v3.OAuth2 + config: + token_endpoint: + cluster: _auth_server + uri: auth.jerxie.com/token + timeout: 3s + authorization_endpoint: https://auth.jerxie.com/auth + redirect_uri: "%REQ(x-forwarded-proto)%://%REQ(:authority)%/callback" + forward_bearer_token: true + redirect_path_matcher: + path: + exact: /callback + signout_path: + path: + exact: /signout + credentials: + client_id: code-server + token_secret: + name: token + sds_config: + path: "/etc/envoy/token-secret.yaml" + hmac_secret: + name: hmac + sds_config: + path: "/etc/envoy/hmac-secret.yaml" + # (Optional): defaults to 'user' scope if not provided + auth_scopes: + - openid + - email + # (Optional): set resource parameter for Authorization request + - name: envoy.filters.http.jwt_authn + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.jwt_authn.v3.JwtAuthentication + providers: + provider1: + remote_jwks: + http_uri: + uri: "https://auth.jerxie.com/keys" + cluster: _auth_server + timeout: 5s + cache_duration: 600s + from_headers: + - name: Authorization + value_prefix: "Bearer " + from_cookies: + - BearerToken + payload_in_metadata: jwt_payload + rules: + - match: + prefix: / + requires: + provider_name: provider1 + - name: envoy.filters.http.lua + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua + inline_code: | + email = "" + function envoy_on_request(request_handle) + email = "" + local meta = request_handle:streamInfo():dynamicMetadata() + for key, value in pairs(meta:get("envoy.filters.http.jwt_authn")) do + if key == "jwt_payload" then + for k, v in pairs(value) do + if k == "email" then + request_handle:logInfo("login codeserver: " ..v) + email = v + end + end + end + end + end + + function envoy_on_response(response_handle) + if email ~="" and email ~= "axieyangb@gmail.com" then + response_handle:logInfo("Got unauthorized user, return 403 for user " ..email) + response_handle:headers():add("set-cookie", "BearerToken=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT") + response_handle:headers():add("set-cookie", "OauthHMAC=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT") + response_handle:headers():add("set-cookie", "IdToken=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT") + response_handle:headers():add("set-cookie", "OauthExpires=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT") + end + email = "" + end + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["code.jerxie.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/code.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/code.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + route_config: + virtual_hosts: + - name: photo_service + domains: ["photo.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_nas_photo", timeout: 0s} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["photo.jerxie.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/photo.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/photo.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + route_config: + virtual_hosts: + - name: password_service + domains: ["password.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_bitwarden_service"} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["password.jerxie.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/password.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/password.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + route_config: + virtual_hosts: + - name: gitbucket_service + domains: ["gitbucket.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_git_bucket"} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["gitbucket.jerxie.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/gitbucket.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/gitbucket.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + upgrade_configs: + - upgrade_type: websocket + stream_idle_timeout: 0s + normalize_path: true + merge_slashes: true + route_config: + virtual_hosts: + - name: printer_service + domains: ["printer.jerxie.com"] + routes: + - match: { prefix: "/webcam" } + route: { prefix_rewrite: "/", cluster: "_3d_printer_camera", max_stream_duration: {grpc_timeout_header_max: 0s} } + - match: { prefix: "/" } + route: { cluster: "_3d_printer_console"} + http_filters: + - name: envoy.filters.http.oauth2 + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.oauth2.v3.OAuth2 + config: + token_endpoint: + cluster: _auth_server + uri: auth.jerxie.com/token + timeout: 3s + authorization_endpoint: https://auth.jerxie.com/auth + redirect_uri: "%REQ(x-forwarded-proto)%://%REQ(:authority)%/callback" + redirect_path_matcher: + path: + exact: /callback + signout_path: + path: + exact: /signout + forward_bearer_token: true + credentials: + client_id: octoprint-portal + token_secret: + name: token + sds_config: + path: "/etc/envoy/token-secret.yaml" + hmac_secret: + name: hmac + sds_config: + path: "/etc/envoy/hmac-secret.yaml" + # (Optional): defaults to 'user' scope if not provided + auth_scopes: + - openid + - email + # (Optional): set resource parameter for Authorization request + - name: envoy.filters.http.jwt_authn + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.jwt_authn.v3.JwtAuthentication + providers: + provider1: + remote_jwks: + http_uri: + uri: "https://auth.jerxie.com/keys" + cluster: _auth_server + timeout: 5s + cache_duration: 600s + from_headers: + - name: Authorization + value_prefix: "Bearer " + # from_cookies: + # - BearerToken + payload_in_metadata: jwt_payload + rules: + - match: + prefix: / + requires: + provider_name: provider1 + - name: envoy.filters.http.lua + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua + inline_code: | + email = "" + function envoy_on_request(request_handle) + email = "" + local meta = request_handle:streamInfo():dynamicMetadata() + for key, value in pairs(meta:get("envoy.filters.http.jwt_authn")) do + if key == "jwt_payload" then + for k, v in pairs(value) do + if k == "email" then + print("login octoprint: "..v) + email = v + request_handle:headers():add("ENVOY_AUTHENTICATED_USER", v) + end + end + end + end + end + + function envoy_on_response(response_handle) + if email ~="" and email ~= "axieyangb@gmail.com" then + response_handle:logInfo("Got unauthorized user, return 403 for user " ..email) + response_handle:headers():add("set-cookie", "BearerToken=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT") + response_handle:headers():add("set-cookie", "OauthHMAC=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT") + response_handle:headers():add("set-cookie", "IdToken=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT") + response_handle:headers():add("set-cookie", "OauthExpires=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT") + end + email = "" + end + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["printer.jerxie.com", "printer.local"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/printer.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/printer.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + upgrade_configs: + - upgrade_type: websocket + route_config: + virtual_hosts: + - name: camera_service + domains: ["camera.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_nas_camera"} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["camera.jerxie.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/camera.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/camera.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + route_config: + virtual_hosts: + - name: note_service + domains: ["note.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_nas_note"} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["note.jerxie.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/note.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/note.jerxie.com/privkey.pem" } + # - filters: + # - name: envoy.filters.network.http_connection_manager + # typed_config: + # "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + # stat_prefix: ingress_http + # codec_type: AUTO + # route_config: + # virtual_hosts: + # - name: baby_service + # domains: ["baby.jerxie.com"] + # routes: + # - match: { prefix: "/" } + # route: { cluster: "_baby_buddy"} + # http_filters: + # - name: envoy.filters.http.router + # typed_config: + # "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + # filter_chain_match: + # server_names: ["baby.jerxie.com"] + # transport_socket: + # name: envoy.transport_sockets.tls + # typed_config: + # "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + # common_tls_context: + # tls_certificates: + # - certificate_chain: { filename: "/etc/certs/downstream/baby.jerxie.com/fullchain.pem" } + # private_key: { filename: "/etc/certs/downstream/baby.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + upgrade_configs: + - upgrade_type: websocket + codec_type: AUTO + route_config: + virtual_hosts: + - name: container_service + domains: ["container.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_portainer_ui"} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["container.jerxie.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/container.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/container.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + upgrade_configs: + - upgrade_type: websocket + codec_type: AUTO + route_config: + virtual_hosts: + - name: grafana_service + domains: ["grafana.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_grafana_ui"} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["grafana.jerxie.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/grafana.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/grafana.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + upgrade_configs: + - upgrade_type: websocket + codec_type: AUTO + route_config: + virtual_hosts: + - name: auth_service + domains: ["auth.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_auth_server"} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["auth.jerxie.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/auth.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/auth.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + upgrade_configs: + - upgrade_type: websocket + codec_type: AUTO + route_config: + virtual_hosts: + - name: ai_service + domains: ["ai.jerxie.com"] + routes: + - match: { prefix: "/api" } + route: { cluster: "_ai_api_server", timeout: 0s} + - match: { prefix: "/" } + route: { cluster: "_ai_ui_server", timeout: 0s} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["ai.jerxie.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/ai.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/ai.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + upgrade_configs: + - upgrade_type: websocket + codec_type: AUTO + route_config: + virtual_hosts: + - name: pcb_service + domains: ["pcb.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_pcb_server", timeout: 0s} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["pcb.jerxie.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/pcb.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/pcb.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + upgrade_configs: + - upgrade_type: websocket + codec_type: AUTO + route_config: + virtual_hosts: + - name: monitor_service + domains: ["monitor.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_monitor_server", timeout: 0s} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["monitor.jerxie.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/monitor.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/monitor.jerxie.com/privkey.pem" } \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..0997c5e --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,31 @@ +version: "3.9" + +services: + envoy-control-plane: + # Use the Dockerfile in the current directory to build the image + image: docker.jerxie.com/xds-server:latest + # Set a custom container name for easier management + container_name: envoy-control-plane + # Restart policy to ensure the service comes back up unless manually stopped + restart: unless-stopped + # Mount the named volume (defined below) to the /app directory in the container + ports: + - "8888:8080" + - "18000:18000" + volumes: + - data_volume:/app/data:rw + command: ["--nodeID", "proxy", "--config-dir", "app/data/config"] +# Define the volumes used by the services +volumes: + # Define a named volume for your code + data_volume: + # Use the 'local' driver which supports mounting remote filesystems + driver: local + # Specify the options for the driver + driver_opts: + # Set the filesystem type to NFS + type: "nfs" + # Set the NFS options, including the server address + o: "addr=192.168.68.90,rw" + # Specify the remote path (device) on the NFS server to mount + device: ":/volume1/docker/envoy-control-plane/data" \ No newline at end of file diff --git a/go.mod b/go.mod index be4474c..19d5ad9 100644 --- a/go.mod +++ b/go.mod @@ -15,10 +15,13 @@ github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 // indirect github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect golang.org/x/net v0.41.0 // indirect golang.org/x/sys v0.33.0 // indirect golang.org/x/text v0.26.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/klog/v2 v2.130.1 // indirect ) diff --git a/go.sum b/go.sum index 997d5f7..50c4b3f 100644 --- a/go.sum +++ b/go.sum @@ -58,5 +58,8 @@ google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= diff --git a/internal/log.go b/internal/log.go new file mode 100644 index 0000000..50a57de --- /dev/null +++ b/internal/log.go @@ -0,0 +1,41 @@ +package internal + +import ( + "k8s.io/klog/v2" // Import klog +) + +// DefaultLogger is enabled when no consuming clients provide +// a logger to the server/cache subsystem. +type DefaultLogger struct { +} + +// NewDefaultLogger creates a DefaultLogger. +func NewDefaultLogger() *DefaultLogger { + // klog is globally initialized. You might call klog.InitFlags(nil) + // and flag.Parse() earlier in your main function to configure it. + // We don't do it here as it would conflict with other flag parsing. + return &DefaultLogger{} +} + +// Debugf logs a message at level debug. +// klog's standard Verbosity (V) is used for debugging/info levels. +// V(0) is typically equivalent to Infof, V(1) or higher is for debugging. +func (l *DefaultLogger) Debugf(format string, args ...interface{}) { + // Using V(2) for typical debug output + klog.V(2).Infof(format, args...) +} + +// Infof logs a message at level info. +func (l *DefaultLogger) Infof(format string, args ...interface{}) { + klog.Infof(format, args...) +} + +// Warnf logs a message at level warn. +func (l *DefaultLogger) Warnf(format string, args ...interface{}) { + klog.Warningf(format, args...) +} + +// Errorf logs a message at level error. +func (l *DefaultLogger) Errorf(format string, args ...interface{}) { + klog.Errorf(format, args...) +} diff --git a/internal/rest_api.go b/internal/rest_api.go index 5d195b2..8ed8d2b 100644 --- a/internal/rest_api.go +++ b/internal/rest_api.go @@ -2,11 +2,14 @@ import ( "encoding/json" + "fmt" "net/http" - "os" - cachev3 "github.com/envoyproxy/go-control-plane/pkg/cache/v3" + "github.com/envoyproxy/go-control-plane/pkg/cache/types" + resourcev3 "github.com/envoyproxy/go-control-plane/pkg/resource/v3" "github.com/google/uuid" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/reflect/protoreflect" ) // API holds reference to snapshot manager @@ -41,131 +44,205 @@ Path string `json:"path"` } +// AddListenerRequest defines payload to add a listener +type AddListenerRequest struct { + Name string `json:"name"` + Port uint32 `json:"port"` +} + +// RemoveListenerRequest defines payload to remove a listener +type RemoveListenerRequest struct { + Name string `json:"name"` +} + // NewAPI returns a new REST API handler -func NewAPI(cache cachev3.SnapshotCache, nodeID string) *API { +func NewAPI(sm *SnapshotManager) *API { return &API{ - Manager: NewSnapshotManager(cache, nodeID), + Manager: sm, } } // RegisterRoutes mounts REST handlers func (api *API) RegisterRoutes(mux *http.ServeMux) { + // Management Handlers (Add / Remove) mux.HandleFunc("/add-cluster", api.addCluster) - mux.HandleFunc("/remove-cluster", api.removeCluster) + mux.HandleFunc("/remove-cluster", func(w http.ResponseWriter, r *http.Request) { + api.removeResourceHandler(w, r, resourcev3.ClusterType) + }) mux.HandleFunc("/add-route", api.addRoute) - mux.HandleFunc("/remove-route", api.removeRoute) - mux.HandleFunc("/load-snapshot", api.loadSnapshot) - mux.HandleFunc("/save-snapshot", api.saveSnapshot) + mux.HandleFunc("/remove-route", func(w http.ResponseWriter, r *http.Request) { + api.removeResourceHandler(w, r, resourcev3.RouteType) + }) + mux.HandleFunc("/add-listener", api.addListener) + mux.HandleFunc("/remove-listener", func(w http.ResponseWriter, r *http.Request) { + api.removeResourceHandler(w, r, resourcev3.ListenerType) + }) + + // Query / List Handlers + mux.HandleFunc("/list-clusters", func(w http.ResponseWriter, r *http.Request) { + api.listResourceHandler(w, r, resourcev3.ClusterType) + }) + mux.HandleFunc("/get-cluster", func(w http.ResponseWriter, r *http.Request) { + api.getResourceHandler(w, r, resourcev3.ClusterType) + }) + + mux.HandleFunc("/list-routes", func(w http.ResponseWriter, r *http.Request) { + api.listResourceHandler(w, r, resourcev3.RouteType) + }) + mux.HandleFunc("/get-route", func(w http.ResponseWriter, r *http.Request) { + api.getResourceHandler(w, r, resourcev3.RouteType) + }) + + mux.HandleFunc("/list-listeners", func(w http.ResponseWriter, r *http.Request) { + api.listResourceHandler(w, r, resourcev3.ListenerType) + }) + mux.HandleFunc("/get-listener", func(w http.ResponseWriter, r *http.Request) { + api.getResourceHandler(w, r, resourcev3.ListenerType) + }) } -// ---------------- Cluster Handlers ---------------- +// ---------------- Cluster / Route / Listener Handlers Using Generic ---------------- func (api *API) addCluster(w http.ResponseWriter, r *http.Request) { - var req AddClusterRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + api.addResourceHandler(w, r, resourcev3.ClusterType, func(req interface{}) types.Resource { + cr := req.(*AddClusterRequest) + name := cr.Name + if name == "" { + name = uuid.NewString() + } + return NewCluster(name) + }) +} + +func (api *API) addRoute(w http.ResponseWriter, r *http.Request) { + api.addResourceHandler(w, r, resourcev3.RouteType, func(req interface{}) types.Resource { + rr := req.(*AddRouteRequest) + return NewRoute(rr.Name, rr.Cluster, rr.PathPrefix) + }) +} + +func (api *API) addListener(w http.ResponseWriter, r *http.Request) { + api.addResourceHandler(w, r, resourcev3.ListenerType, func(req interface{}) types.Resource { + lr := req.(*AddListenerRequest) + return NewListener(lr.Name, lr.Port) + }) +} + +// ---------------- Generic REST Handlers ---------------- + +// createFn returns a types.Resource +func (api *API) addResourceHandler(w http.ResponseWriter, r *http.Request, typ resourcev3.Type, createFn func(interface{}) types.Resource) { + if r.Method != http.MethodPost { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + w.Header().Set("Content-Type", "application/json") + + var req interface{} + switch typ { + case resourcev3.ClusterType: + req = &AddClusterRequest{} + case resourcev3.RouteType: + req = &AddRouteRequest{} + case resourcev3.ListenerType: + req = &AddListenerRequest{} + default: + http.Error(w, "unsupported type", http.StatusBadRequest) + return + } + + if err := json.NewDecoder(r.Body).Decode(req); err != nil { http.Error(w, "invalid request", http.StatusBadRequest) return } - if req.Name == "" { - req.Name = uuid.NewString() - } - cluster := NewCluster(req.Name) - if err := api.Manager.AddCluster(cluster); err != nil { - http.Error(w, "failed to add cluster", http.StatusInternalServerError) + res := createFn(req) + if err := api.Manager.AddResource(res, typ); err != nil { + http.Error(w, fmt.Sprintf("failed to add resource: %v", err), http.StatusInternalServerError) return } w.WriteHeader(http.StatusCreated) - json.NewEncoder(w).Encode(map[string]string{"cuid": req.Name}) + json.NewEncoder(w).Encode(map[string]string{"name": res.(interface{ GetName() string }).GetName()}) } -func (api *API) removeCluster(w http.ResponseWriter, r *http.Request) { - var req RemoveClusterRequest +func (api *API) removeResourceHandler(w http.ResponseWriter, r *http.Request, typ resourcev3.Type) { + if r.Method != http.MethodPost { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + w.Header().Set("Content-Type", "application/json") + + var req struct{ Name string } if err := json.NewDecoder(r.Body).Decode(&req); err != nil || req.Name == "" { http.Error(w, "name required", http.StatusBadRequest) return } - if err := api.Manager.RemoveCluster(req.Name); err != nil { - http.Error(w, "failed to remove cluster", http.StatusInternalServerError) + if err := api.Manager.RemoveResource(req.Name, typ); err != nil { + http.Error(w, fmt.Sprintf("failed to remove resource: %v", err), http.StatusInternalServerError) return } w.WriteHeader(http.StatusOK) } -// ---------------- Route Handlers ---------------- +func (api *API) listResourceHandler(w http.ResponseWriter, r *http.Request, typ resourcev3.Type) { + if r.Method != http.MethodGet { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + w.Header().Set("Content-Type", "application/json") -func (api *API) addRoute(w http.ResponseWriter, r *http.Request) { - var req AddRouteRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil || - req.Name == "" || req.Cluster == "" || req.PathPrefix == "" { - http.Error(w, "invalid request", http.StatusBadRequest) + resources, err := api.Manager.ListResources(typ) + if err != nil { + http.Error(w, fmt.Sprintf("failed to list resources: %v", err), http.StatusInternalServerError) return } - route := NewRoute(req.Name, req.Cluster, req.PathPrefix) - if err := api.Manager.AddRoute(route); err != nil { - http.Error(w, "failed to add route", http.StatusInternalServerError) - return - } - - w.WriteHeader(http.StatusCreated) - json.NewEncoder(w).Encode(map[string]string{"route": req.Name}) -} - -func (api *API) removeRoute(w http.ResponseWriter, r *http.Request) { - var req RemoveRouteRequest - // Decode request and check for required 'Name' field - if err := json.NewDecoder(r.Body).Decode(&req); err != nil || req.Name == "" { - http.Error(w, "route name required", http.StatusBadRequest) - return - } - - // Call the SnapshotManager's RemoveRoute method - if err := api.Manager.RemoveRoute(req.Name); err != nil { - // If the route doesn't exist, the manager handles the snapshot update anyway, - // so we mainly worry about cache read/write failures here. - http.Error(w, "failed to remove route", http.StatusInternalServerError) - return + out := []json.RawMessage{} + for _, res := range resources { + if pb, ok := res.(interface{ ProtoReflect() protoreflect.Message }); ok { + data, _ := protojson.Marshal(pb) + out = append(out, data) + } } w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(out) } -// ---------------- Snapshot File Handlers ---------------- - -func (api *API) loadSnapshot(w http.ResponseWriter, r *http.Request) { - var req SnapshotFileRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil || req.Path == "" { - http.Error(w, "path required", http.StatusBadRequest) +func (api *API) getResourceHandler(w http.ResponseWriter, r *http.Request, typ resourcev3.Type) { + if r.Method != http.MethodGet { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) return } - if _, err := os.Stat(req.Path); os.IsNotExist(err) { - http.Error(w, "file not found", http.StatusBadRequest) + w.Header().Set("Content-Type", "application/json") + + name := r.URL.Query().Get("name") + if name == "" { + http.Error(w, "name query parameter required", http.StatusBadRequest) return } - if err := api.Manager.LoadSnapshotFromFile(req.Path); err != nil { - http.Error(w, "failed to load snapshot", http.StatusInternalServerError) + res, err := api.Manager.GetResource(name, typ) + if err != nil { + http.Error(w, fmt.Sprintf("resource not found: %v", err), http.StatusNotFound) return } - w.WriteHeader(http.StatusOK) -} - -func (api *API) saveSnapshot(w http.ResponseWriter, r *http.Request) { - var req SnapshotFileRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil || req.Path == "" { - http.Error(w, "path required", http.StatusBadRequest) + // Marshal using protojson for full nested fields + if pb, ok := res.(interface{ ProtoReflect() protoreflect.Message }); ok { + data, err := protojson.Marshal(pb) + if err != nil { + http.Error(w, fmt.Sprintf("failed to marshal protobuf: %v", err), http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusOK) + w.Write(data) return } - if err := api.Manager.SaveSnapshotToFile(req.Path); err != nil { - http.Error(w, "failed to save snapshot", http.StatusInternalServerError) - return - } - - w.WriteHeader(http.StatusOK) + // fallback for non-proto resources + http.Error(w, "resource is not a protobuf message", http.StatusInternalServerError) } diff --git a/internal/snapshot.go b/internal/snapshot.go index ba72f84..5082e37 100644 --- a/internal/snapshot.go +++ b/internal/snapshot.go @@ -3,15 +3,30 @@ import ( "context" "encoding/json" + "fmt" "os" "time" clusterv3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + corev3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + endpointv3 "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + listenerv3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" routev3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/jwt_authn/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/lua/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/oauth2/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/listener/tls_inspector/v3" + secretv3 "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + + // New Import for Router Filter + _ "github.com/envoyproxy/go-control-plane/envoy/service/runtime/v3" "github.com/envoyproxy/go-control-plane/pkg/cache/types" cachev3 "github.com/envoyproxy/go-control-plane/pkg/cache/v3" resourcev3 "github.com/envoyproxy/go-control-plane/pkg/resource/v3" + "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/types/known/durationpb" + yaml "gopkg.in/yaml.v3" ) // SnapshotManager wraps a SnapshotCache and provides file loading/modifying @@ -28,138 +43,193 @@ } } -// LoadSnapshotFromFile loads a snapshot from a JSON file -func (sm *SnapshotManager) LoadSnapshotFromFile(filePath string) error { +// YamlResources is a helper struct to unmarshal the common Envoy YAML file structure +type YamlResources struct { + Resources []yaml.Node `yaml:"resources"` +} + +func (sm *SnapshotManager) LoadSnapshotFromFile(filePath string) (map[resourcev3.Type][]types.Resource, error) { data, err := os.ReadFile(filePath) if err != nil { - return err + return nil, fmt.Errorf("failed to read file: %w", err) } - var raw map[string][]json.RawMessage - if err := json.Unmarshal(data, &raw); err != nil { - return err + var raw interface{} + if err := yaml.Unmarshal(data, &raw); err != nil { + return nil, fmt.Errorf("failed to unmarshal YAML/JSON file %s: %w", filePath, err) } resources := make(map[resourcev3.Type][]types.Resource) - for typStr, arr := range raw { - typ := resourcev3.Type(typStr) - for _, r := range arr { - switch typ { - case resourcev3.ClusterType: - var c clusterv3.Cluster - if err := json.Unmarshal(r, &c); err != nil { + var walk func(node interface{}) error + walk = func(node interface{}) error { + switch v := node.(type) { + case map[string]interface{}: + if typStr, ok := v["@type"].(string); ok { + typ := resourcev3.Type(typStr) + + // only process known top-level xDS resources + switch typ { + case resourcev3.ClusterType, + resourcev3.RouteType, + resourcev3.ListenerType, + resourcev3.EndpointType, + resourcev3.SecretType, + resourcev3.RuntimeType: + + // Remove @type before unmarshalling + delete(v, "@type") + + jsonBytes, err := json.Marshal(v) + if err != nil { + return fmt.Errorf("failed to marshal resource node to JSON: %w", err) + } + + fmt.Printf("Detected resource type: %s\n", typ) + + switch typ { + case resourcev3.ClusterType: + var c clusterv3.Cluster + if err := protojson.Unmarshal(jsonBytes, &c); err != nil { + return fmt.Errorf("failed to unmarshal Cluster: %w", err) + } + resources[typ] = append(resources[typ], &c) + + case resourcev3.RouteType: + var rt routev3.RouteConfiguration + if err := protojson.Unmarshal(jsonBytes, &rt); err != nil { + return fmt.Errorf("failed to unmarshal RouteConfiguration: %w", err) + } + resources[typ] = append(resources[typ], &rt) + + case resourcev3.ListenerType: + var l listenerv3.Listener + if err := protojson.Unmarshal(jsonBytes, &l); err != nil { + return fmt.Errorf("failed to unmarshal Listener: %w", err) + } + resources[typ] = append(resources[typ], &l) + + case resourcev3.EndpointType: + var eds endpointv3.ClusterLoadAssignment + if err := protojson.Unmarshal(jsonBytes, &eds); err != nil { + return fmt.Errorf("failed to unmarshal ClusterLoadAssignment: %w", err) + } + resources[typ] = append(resources[typ], &eds) + + case resourcev3.SecretType: + var sec secretv3.Secret + if err := protojson.Unmarshal(jsonBytes, &sec); err != nil { + return fmt.Errorf("failed to unmarshal Secret: %w", err) + } + resources[typ] = append(resources[typ], &sec) + } + default: + // skip nested extension/filter types (handled inside parent) + fmt.Printf("Skipping nested type: %s\n", typStr) + } + } + + // recurse into children + for _, child := range v { + if err := walk(child); err != nil { return err } - resources[typ] = append(resources[typ], &c) - case resourcev3.RouteType: - var rt routev3.RouteConfiguration - if err := json.Unmarshal(r, &rt); err != nil { + } + + case []interface{}: + for _, item := range v { + if err := walk(item); err != nil { return err } - resources[typ] = append(resources[typ], &rt) - default: - // skip unknown types } } + return nil } - snap, _ := cachev3.NewSnapshot("snap-from-file", resources) - return sm.Cache.SetSnapshot(context.TODO(), sm.NodeID, snap) + if err := walk(raw); err != nil { + return nil, err + } + + return resources, nil } +// SetSnapshot sets a full snapshot +func (sm *SnapshotManager) SetSnapshot(ctx context.Context, version string, resources map[resourcev3.Type][]types.Resource) error { + snap, err := cachev3.NewSnapshot(version, resources) + if err != nil { + return fmt.Errorf("failed to create snapshot: %w", err) + } + return sm.Cache.SetSnapshot(ctx, sm.NodeID, snap) +} + +// ---------------- Add / Remove / List ---------------- + // AddCluster adds a cluster to the snapshot func (sm *SnapshotManager) AddCluster(cluster *clusterv3.Cluster) error { - snap, err := sm.Cache.GetSnapshot(sm.NodeID) - var clusters []types.Resource - var routes []types.Resource - if err != nil { - clusters = []types.Resource{} - routes = []types.Resource{} - } else { - // Convert map to slice - clusters = mapToSlice(snap.GetResources(string(resourcev3.ClusterType))) - routes = mapToSlice(snap.GetResources(string(resourcev3.RouteType))) - } + snap, _ := sm.Cache.GetSnapshot(sm.NodeID) + clusters := mapToSlice(snap.GetResources(string(resourcev3.ClusterType))) + routes := mapToSlice(snap.GetResources(string(resourcev3.RouteType))) + listeners := mapToSlice(snap.GetResources(string(resourcev3.ListenerType))) clusters = append(clusters, cluster) newSnap, _ := cachev3.NewSnapshot( "snap-"+cluster.GetName(), map[resourcev3.Type][]types.Resource{ - resourcev3.ClusterType: clusters, - resourcev3.RouteType: routes, + resourcev3.ClusterType: clusters, + resourcev3.RouteType: routes, + resourcev3.ListenerType: listeners, }, ) - return sm.Cache.SetSnapshot(context.TODO(), sm.NodeID, newSnap) } // AddRoute adds a route configuration to the snapshot func (sm *SnapshotManager) AddRoute(route *routev3.RouteConfiguration) error { - snap, err := sm.Cache.GetSnapshot(sm.NodeID) - var clusters []types.Resource - var routes []types.Resource - if err != nil { - clusters = []types.Resource{} - routes = []types.Resource{} - } else { - clusters = mapToSlice(snap.GetResources(string(resourcev3.ClusterType))) - routes = mapToSlice(snap.GetResources(string(resourcev3.RouteType))) - } + snap, _ := sm.Cache.GetSnapshot(sm.NodeID) + clusters := mapToSlice(snap.GetResources(string(resourcev3.ClusterType))) + routes := mapToSlice(snap.GetResources(string(resourcev3.RouteType))) + listeners := mapToSlice(snap.GetResources(string(resourcev3.ListenerType))) routes = append(routes, route) newSnap, _ := cachev3.NewSnapshot( "snap-"+route.GetName(), map[resourcev3.Type][]types.Resource{ - resourcev3.ClusterType: clusters, - resourcev3.RouteType: routes, + resourcev3.ClusterType: clusters, + resourcev3.RouteType: routes, + resourcev3.ListenerType: listeners, }, ) - return sm.Cache.SetSnapshot(context.TODO(), sm.NodeID, newSnap) } -// RemoveRoute removes a route configuration by name -func (sm *SnapshotManager) RemoveRoute(name string) error { - snap, err := sm.Cache.GetSnapshot(sm.NodeID) - if err != nil { - return err - } - - // Keep clusters unchanged +// AddListener adds a listener to the snapshot +func (sm *SnapshotManager) AddListener(listener *listenerv3.Listener) error { + snap, _ := sm.Cache.GetSnapshot(sm.NodeID) clusters := mapToSlice(snap.GetResources(string(resourcev3.ClusterType))) - routes := []types.Resource{} + routes := mapToSlice(snap.GetResources(string(resourcev3.RouteType))) + listeners := mapToSlice(snap.GetResources(string(resourcev3.ListenerType))) - // Filter routes: keep only those that do not match the given name - for _, r := range snap.GetResources(string(resourcev3.RouteType)) { - if rt, ok := r.(*routev3.RouteConfiguration); ok && rt.GetName() != name { - routes = append(routes, rt) - } - } + listeners = append(listeners, listener) - // Create a new snapshot with the filtered route list newSnap, _ := cachev3.NewSnapshot( - "snap-remove-route-"+name, + "snap-"+listener.GetName(), map[resourcev3.Type][]types.Resource{ - resourcev3.ClusterType: clusters, - resourcev3.RouteType: routes, + resourcev3.ClusterType: clusters, + resourcev3.RouteType: routes, + resourcev3.ListenerType: listeners, }, ) - return sm.Cache.SetSnapshot(context.TODO(), sm.NodeID, newSnap) } // RemoveCluster removes a cluster by name func (sm *SnapshotManager) RemoveCluster(name string) error { - snap, err := sm.Cache.GetSnapshot(sm.NodeID) - if err != nil { - return err - } - + snap, _ := sm.Cache.GetSnapshot(sm.NodeID) clusters := []types.Resource{} routes := mapToSlice(snap.GetResources(string(resourcev3.RouteType))) + listeners := mapToSlice(snap.GetResources(string(resourcev3.ListenerType))) for _, r := range snap.GetResources(string(resourcev3.ClusterType)) { if c, ok := r.(*clusterv3.Cluster); ok && c.GetName() != name { @@ -170,15 +240,155 @@ newSnap, _ := cachev3.NewSnapshot( "snap-remove-"+name, map[resourcev3.Type][]types.Resource{ - resourcev3.ClusterType: clusters, - resourcev3.RouteType: routes, + resourcev3.ClusterType: clusters, + resourcev3.RouteType: routes, + resourcev3.ListenerType: listeners, }, ) - return sm.Cache.SetSnapshot(context.TODO(), sm.NodeID, newSnap) } -// SaveSnapshotToFile saves snapshot to a JSON file +// RemoveRoute removes a route by name +func (sm *SnapshotManager) RemoveRoute(name string) error { + snap, _ := sm.Cache.GetSnapshot(sm.NodeID) + clusters := mapToSlice(snap.GetResources(string(resourcev3.ClusterType))) + routes := []types.Resource{} + listeners := mapToSlice(snap.GetResources(string(resourcev3.ListenerType))) + + for _, r := range snap.GetResources(string(resourcev3.RouteType)) { + if rt, ok := r.(*routev3.RouteConfiguration); ok && rt.GetName() != name { + routes = append(routes, rt) + } + } + + newSnap, _ := cachev3.NewSnapshot( + "snap-remove-route-"+name, + map[resourcev3.Type][]types.Resource{ + resourcev3.ClusterType: clusters, + resourcev3.RouteType: routes, + resourcev3.ListenerType: listeners, + }, + ) + return sm.Cache.SetSnapshot(context.TODO(), sm.NodeID, newSnap) +} + +// RemoveListener removes a listener by name +func (sm *SnapshotManager) RemoveListener(name string) error { + snap, _ := sm.Cache.GetSnapshot(sm.NodeID) + clusters := mapToSlice(snap.GetResources(string(resourcev3.ClusterType))) + routes := mapToSlice(snap.GetResources(string(resourcev3.RouteType))) + listeners := []types.Resource{} + + for _, r := range snap.GetResources(string(resourcev3.ListenerType)) { + if l, ok := r.(*listenerv3.Listener); ok && l.GetName() != name { + listeners = append(listeners, l) + } + } + + newSnap, _ := cachev3.NewSnapshot( + "snap-remove-listener-"+name, + map[resourcev3.Type][]types.Resource{ + resourcev3.ClusterType: clusters, + resourcev3.RouteType: routes, + resourcev3.ListenerType: listeners, + }, + ) + return sm.Cache.SetSnapshot(context.TODO(), sm.NodeID, newSnap) +} + +// ---------------- List ---------------- + +func (sm *SnapshotManager) ListClusters() ([]*clusterv3.Cluster, error) { + snap, err := sm.Cache.GetSnapshot(sm.NodeID) + if err != nil { + return nil, err + } + clusters := []*clusterv3.Cluster{} + for _, r := range snap.GetResources(string(resourcev3.ClusterType)) { + if c, ok := r.(*clusterv3.Cluster); ok { + clusters = append(clusters, c) + } + } + return clusters, nil +} + +func (sm *SnapshotManager) ListRoutes() ([]*routev3.RouteConfiguration, error) { + snap, err := sm.Cache.GetSnapshot(sm.NodeID) + if err != nil { + return nil, err + } + routes := []*routev3.RouteConfiguration{} + for _, r := range snap.GetResources(string(resourcev3.RouteType)) { + if rt, ok := r.(*routev3.RouteConfiguration); ok { + routes = append(routes, rt) + } + } + return routes, nil +} + +func (sm *SnapshotManager) ListListeners() ([]*listenerv3.Listener, error) { + snap, err := sm.Cache.GetSnapshot(sm.NodeID) + if err != nil { + return nil, err + } + listeners := []*listenerv3.Listener{} + for _, r := range snap.GetResources(string(resourcev3.ListenerType)) { + if l, ok := r.(*listenerv3.Listener); ok { + listeners = append(listeners, l) + } + } + return listeners, nil +} + +// ---------------- Get ---------------- + +func (sm *SnapshotManager) GetCluster(name string) (*clusterv3.Cluster, error) { + snap, err := sm.Cache.GetSnapshot(sm.NodeID) + if err != nil { + return nil, err + } + r, ok := snap.GetResources(string(resourcev3.ClusterType))[name] + if !ok { + return nil, fmt.Errorf("cluster %s not found", name) + } + if c, ok := r.(*clusterv3.Cluster); ok { + return c, nil + } + return nil, fmt.Errorf("resource %s found, but is not a Cluster", name) +} + +func (sm *SnapshotManager) GetRoute(name string) (*routev3.RouteConfiguration, error) { + snap, err := sm.Cache.GetSnapshot(sm.NodeID) + if err != nil { + return nil, err + } + r, ok := snap.GetResources(string(resourcev3.RouteType))[name] + if !ok { + return nil, fmt.Errorf("route %s not found", name) + } + if rt, ok := r.(*routev3.RouteConfiguration); ok { + return rt, nil + } + return nil, fmt.Errorf("resource %s found, but is not a RouteConfiguration", name) +} + +func (sm *SnapshotManager) GetListener(name string) (*listenerv3.Listener, error) { + snap, err := sm.Cache.GetSnapshot(sm.NodeID) + if err != nil { + return nil, err + } + r, ok := snap.GetResources(string(resourcev3.ListenerType))[name] + if !ok { + return nil, fmt.Errorf("listener %s not found", name) + } + if l, ok := r.(*listenerv3.Listener); ok { + return l, nil + } + return nil, fmt.Errorf("resource %s found, but is not a Listener", name) +} + +// ---------------- Save ---------------- + func (sm *SnapshotManager) SaveSnapshotToFile(filePath string) error { snap, err := sm.Cache.GetSnapshot(sm.NodeID) if err != nil { @@ -192,12 +402,16 @@ out[string(resourcev3.ClusterType)] = append(out[string(resourcev3.ClusterType)], c) } } - for _, r := range snap.GetResources(string(resourcev3.RouteType)) { if rt, ok := r.(*routev3.RouteConfiguration); ok { out[string(resourcev3.RouteType)] = append(out[string(resourcev3.RouteType)], rt) } } + for _, r := range snap.GetResources(string(resourcev3.ListenerType)) { + if l, ok := r.(*listenerv3.Listener); ok { + out[string(resourcev3.ListenerType)] = append(out[string(resourcev3.ListenerType)], l) + } + } data, err := json.MarshalIndent(out, "", " ") if err != nil { @@ -207,11 +421,10 @@ return os.WriteFile(filePath, data, 0644) } -// ----------------- Helpers ----------------- +// ---------------- Helpers ---------------- -// Convert map[name]Resource → slice of Resource func mapToSlice(m map[string]types.Resource) []types.Resource { - out := make([]types.Resource, 0, len(m)) + out := []types.Resource{} for _, r := range m { out = append(out, r) } @@ -254,3 +467,157 @@ }, } } + +// NewListener creates a simple TCP listener for a given port +func NewListener(name string, port uint32) *listenerv3.Listener { + return &listenerv3.Listener{ + Name: name, + Address: &corev3.Address{ + Address: &corev3.Address_SocketAddress{ + SocketAddress: &corev3.SocketAddress{ + Protocol: corev3.SocketAddress_TCP, + Address: "0.0.0.0", + PortSpecifier: &corev3.SocketAddress_PortValue{ + PortValue: port, + }, + }, + }, + }, + } +} + +// ---------------- Generic Helpers for all xDS types ---------------- + +// AddResource adds any resource to the snapshot dynamically +func (sm *SnapshotManager) AddResource(resource types.Resource, typ resourcev3.Type) error { + snap, _ := sm.Cache.GetSnapshot(sm.NodeID) + + // Convert existing resources to slices + clusters := mapToSlice(snap.GetResources(string(resourcev3.ClusterType))) + routes := mapToSlice(snap.GetResources(string(resourcev3.RouteType))) + listeners := mapToSlice(snap.GetResources(string(resourcev3.ListenerType))) + endpoints := mapToSlice(snap.GetResources(string(resourcev3.EndpointType))) + secrets := mapToSlice(snap.GetResources(string(resourcev3.SecretType))) + runtimes := mapToSlice(snap.GetResources(string(resourcev3.RuntimeType))) + extConfigs := mapToSlice(snap.GetResources(string(resourcev3.ExtensionConfigType))) + + // Append to the appropriate slice + switch typ { + case resourcev3.ClusterType: + clusters = append(clusters, resource) + case resourcev3.RouteType: + routes = append(routes, resource) + case resourcev3.ListenerType: + listeners = append(listeners, resource) + case resourcev3.EndpointType: + endpoints = append(endpoints, resource) + case resourcev3.SecretType: + secrets = append(secrets, resource) + case resourcev3.RuntimeType: + runtimes = append(runtimes, resource) + case resourcev3.ExtensionConfigType: + extConfigs = append(extConfigs, resource) + default: + return fmt.Errorf("unsupported resource type: %s", typ) + } + + newSnap, _ := cachev3.NewSnapshot( + "snap-generic-"+resource.(interface{ GetName() string }).GetName(), + map[resourcev3.Type][]types.Resource{ + resourcev3.ClusterType: clusters, + resourcev3.RouteType: routes, + resourcev3.ListenerType: listeners, + resourcev3.EndpointType: endpoints, + resourcev3.SecretType: secrets, + resourcev3.RuntimeType: runtimes, + resourcev3.ExtensionConfigType: extConfigs, + }, + ) + return sm.Cache.SetSnapshot(context.TODO(), sm.NodeID, newSnap) +} + +// RemoveResource removes any resource by name dynamically +func (sm *SnapshotManager) RemoveResource(name string, typ resourcev3.Type) error { + snap, _ := sm.Cache.GetSnapshot(sm.NodeID) + + // Convert all to slices + clusters := mapToSlice(snap.GetResources(string(resourcev3.ClusterType))) + routes := mapToSlice(snap.GetResources(string(resourcev3.RouteType))) + listeners := mapToSlice(snap.GetResources(string(resourcev3.ListenerType))) + endpoints := mapToSlice(snap.GetResources(string(resourcev3.EndpointType))) + secrets := mapToSlice(snap.GetResources(string(resourcev3.SecretType))) + runtimes := mapToSlice(snap.GetResources(string(resourcev3.RuntimeType))) + extConfigs := mapToSlice(snap.GetResources(string(resourcev3.ExtensionConfigType))) + + // Filter the target type + switch typ { + case resourcev3.ClusterType: + clusters = filterResourcesByName[*clusterv3.Cluster](clusters, name) + case resourcev3.RouteType: + routes = filterResourcesByName[*routev3.RouteConfiguration](routes, name) + case resourcev3.ListenerType: + listeners = filterResourcesByName[*listenerv3.Listener](listeners, name) + case resourcev3.EndpointType: + endpoints = filterResourcesByName[types.Resource](endpoints, name) // ClusterLoadAssignment + case resourcev3.SecretType: + secrets = filterResourcesByName[types.Resource](secrets, name) // Secret + case resourcev3.RuntimeType: + runtimes = filterResourcesByName[types.Resource](runtimes, name) + case resourcev3.ExtensionConfigType: + extConfigs = filterResourcesByName[types.Resource](extConfigs, name) + default: + return fmt.Errorf("unsupported resource type: %s", typ) + } + + newSnap, _ := cachev3.NewSnapshot( + "snap-remove-generic-"+name, + map[resourcev3.Type][]types.Resource{ + resourcev3.ClusterType: clusters, + resourcev3.RouteType: routes, + resourcev3.ListenerType: listeners, + resourcev3.EndpointType: endpoints, + resourcev3.SecretType: secrets, + resourcev3.RuntimeType: runtimes, + resourcev3.ExtensionConfigType: extConfigs, + }, + ) + return sm.Cache.SetSnapshot(context.TODO(), sm.NodeID, newSnap) +} + +// ListResources returns all resources of a given type +func (sm *SnapshotManager) ListResources(typ resourcev3.Type) ([]types.Resource, error) { + snap, err := sm.Cache.GetSnapshot(sm.NodeID) + if err != nil { + return nil, err + } + return mapToSlice(snap.GetResources(string(typ))), nil +} + +// GetResource retrieves a resource by name and type +func (sm *SnapshotManager) GetResource(name string, typ resourcev3.Type) (types.Resource, error) { + snap, err := sm.Cache.GetSnapshot(sm.NodeID) + if err != nil { + return nil, err + } + r, ok := snap.GetResources(string(typ))[name] + if !ok { + return nil, fmt.Errorf("%s %s not found", typ, name) + } + return r, nil +} + +// ---------------- Generic filter helper ---------------- +func filterResourcesByName[T any](resources []types.Resource, name string) []types.Resource { + filtered := []types.Resource{} + for _, r := range resources { + if getNameFunc, ok := r.(interface{ GetName() string }); ok { + if getNameFunc.GetName() != name { + filtered = append(filtered, r) + } + } else { + // fallback, include unknown type + filtered = append(filtered, r) + } + } + return filtered +} diff --git a/main.go b/main.go index b60ea9f..73f8194 100644 --- a/main.go +++ b/main.go @@ -6,71 +6,153 @@ "fmt" "net/http" "os" + "path/filepath" // ADDED: for directory and file path operations + "strings" // ADDED: for string manipulation "github.com/envoyproxy/go-control-plane/pkg/cache/types" - "github.com/envoyproxy/go-control-plane/pkg/cache/v3" cachev3 "github.com/envoyproxy/go-control-plane/pkg/cache/v3" - "github.com/envoyproxy/go-control-plane/pkg/log" resourcev3 "github.com/envoyproxy/go-control-plane/pkg/resource/v3" "github.com/envoyproxy/go-control-plane/pkg/server/v3" "github.com/envoyproxy/go-control-plane/pkg/test/v3" + "k8s.io/klog/v2" "envoy-control-plane/internal" ) var ( - logger *log.DefaultLogger + logger *internal.DefaultLogger port uint nodeID string restPort uint - snapshotFile string + snapshotFile string // Single file flag (kept for backwards compatibility) + configDir string // NEW FLAG: Directory containing config files ) func init() { - logger = log.NewDefaultLogger() + logger = internal.NewDefaultLogger() + klog.InitFlags(nil) flag.UintVar(&port, "port", 18000, "xDS management server port") flag.StringVar(&nodeID, "nodeID", "test-id", "Node ID") flag.UintVar(&restPort, "rest-port", 8080, "REST API server port") - flag.StringVar(&snapshotFile, "snapshot-file", "", "Optional initial snapshot JSON file") + // Keeping snapshotFile for backwards compatibility, though configDir is preferred for multiple files + flag.StringVar(&snapshotFile, "snapshot-file", "", "Optional initial snapshot JSON/YAML file (single file)") + flag.StringVar(&configDir, "config-dir", "", "Optional directory containing multiple config files (.yaml, .json)") // NEW +} + +// loadConfigFiles iterates over the specified directory and loads all .yaml or .json files. +func loadConfigFiles(manager *internal.SnapshotManager, dir string) error { + logger.Infof("loading configuration files from directory: %s", dir) + + files, err := os.ReadDir(dir) + if err != nil { + return fmt.Errorf("failed to read directory %s: %w", dir, err) + } + + var resourceFiles map[string][]types.Resource = make(map[string][]types.Resource) + for _, file := range files { + if file.IsDir() { + continue + } + + fileName := file.Name() + // Only process files with .yaml, .yml, or .json extensions + if strings.HasSuffix(fileName, ".yaml") || strings.HasSuffix(fileName, ".yml") || strings.HasSuffix(fileName, ".json") { + filePath := filepath.Join(dir, fileName) + logger.Infof(" -> loading config file: %s", filePath) + + var rf map[string][]types.Resource + if rf, err = manager.LoadSnapshotFromFile(filePath); err != nil { + return fmt.Errorf("failed to load snapshot from file %s: %w", filePath, err) + } + for k, v := range rf { + resourceFiles[k] = append(resourceFiles[k], v...) + } + logger.Infof("loaded %d resources from %s", len(rf), filePath) + // Note: SnapshotManager.LoadSnapshotFromFile must be implemented to *add* + // resources to the existing snapshot, not replace it entirely, for this + // iteration to work correctly. (Assuming the implementation you showed + // previously, which replaced the resources, is now updated to merge/add them, + // or that LoadSnapshotFromFile always creates a complete snapshot.) + // Since your `snapshot.go` was updated to load a *complete* snapshot from a file, + // we will rely on the user to provide files that contain complete, non-conflicting + // configurations. If you want true file merging, the internal logic needs to change. + // However, for this change, we'll keep the current behavior of LoadSnapshotFromFile + // and proceed. + } + } + if err := manager.SetSnapshot(context.TODO(), "snap-from-file", resourceFiles); err != nil { + return fmt.Errorf("failed to set combined snapshot from files: %w", err) + } + logger.Infof("successfully loaded %d configuration files from %s", len(files), dir) + return nil } func main() { flag.Parse() + // Optional: Configure klog to flush logs when the application exits + defer klog.Flush() // Create snapshot cache - cache := cache.NewSnapshotCache(false, cache.IDHash{}, logger) + cache := cachev3.NewSnapshotCache(false, cachev3.IDHash{}, logger) // Create SnapshotManager manager := internal.NewSnapshotManager(cache, nodeID) - // Load initial snapshot from file if provided - if snapshotFile != "" { + loadedConfigs := false + // OPTION 1: Load multiple config files from a directory + if configDir != "" { + if err := loadConfigFiles(manager, configDir); err != nil { + logger.Errorf("failed to load configs from directory: %v", err) + os.Exit(1) + } + + loadedConfigs = true + logger.Infof("successfully loaded all configuration files from %s", configDir) + } + + // OPTION 2: Load single initial snapshot file (backwards compatible/single file mode) + if snapshotFile != "" && !loadedConfigs { if _, err := os.Stat(snapshotFile); err == nil { - if err := manager.LoadSnapshotFromFile(snapshotFile); err != nil { + if resources, err := manager.LoadSnapshotFromFile(snapshotFile); err != nil { logger.Errorf("failed to load snapshot from file: %v", err) os.Exit(1) + } else { + if err := manager.SetSnapshot(context.TODO(), "snap-from-file", resources); err != nil { + logger.Errorf("failed to set loaded snapshot: %v", err) + os.Exit(1) + } } + loadedConfigs = true logger.Infof("loaded initial snapshot from %s", snapshotFile) } else { - logger.Warnf("snapshot file not found: %s, starting empty snapshot", snapshotFile) + logger.Warnf("snapshot file not found: %s", snapshotFile) } } - // Ensure snapshot is consistent or create empty snapshot - snap, err := cache.GetSnapshot(nodeID) - if err != nil { - snap, _ = cachev3.NewSnapshot("snap-init", map[resourcev3.Type][]types.Resource{ - resourcev3.ClusterType: {}, - resourcev3.RouteType: {}, - }) + // Ensure snapshot is consistent or create empty snapshot if no configs were loaded + snap, err := manager.Cache.GetSnapshot(nodeID) + if err != nil || !loadedConfigs { + // If an error occurred or no files were loaded, set an initial empty snapshot + if !loadedConfigs { + logger.Warnf("no configuration files loaded, creating empty snapshot.") + snap, _ = cachev3.NewSnapshot("snap-init", map[resourcev3.Type][]types.Resource{ + resourcev3.ClusterType: {}, + resourcev3.RouteType: {}, + resourcev3.ListenerType: {}, // Assuming ListenerType support is added in internal + }) + } else { + // This case should ideally not happen if loading was successful, but handles initial cache miss. + logger.Warnf("no snapshot found in cache, creating empty snapshot.") + } + if err := cache.SetSnapshot(context.Background(), nodeID, snap); err != nil { logger.Errorf("failed to set initial snapshot: %v", err) os.Exit(1) } } - logger.Infof("xDS snapshot ready: %+v", snap) + logger.Infof("xDS snapshot ready: version %s", snap.GetVersion(string(resourcev3.ClusterType))) // Use a specific type to show version // Start xDS gRPC server ctx := context.Background() @@ -79,7 +161,7 @@ go internal.RunServer(srv, port) // your existing RunServer implementation // Start REST API server - api := internal.NewAPI(cache, nodeID) + api := internal.NewAPI(manager) mux := http.NewServeMux() api.RegisterRoutes(mux)