diff --git a/data/cds.yaml b/data/cds.yaml deleted file mode 100644 index f7b686d..0000000 --- a/data/cds.yaml +++ /dev/null @@ -1,449 +0,0 @@ -resources: -- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster - name: _acme_renewer - connect_timeout: 0.2s - type: STATIC - lb_policy: ROUND_ROBIN - load_assignment: - cluster_name: acme_renewer - endpoints: - - lb_endpoints: - - endpoint: - health_check_config: - port_value: 8888 - address: - socket_address: - address: 172.17.0.1 #docker bridge - port_value: 8888 -- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster - name: _git_bucket - connect_timeout: 0.2s - type: STATIC - lb_policy: ROUND_ROBIN - load_assignment: - cluster_name: git_bucket - endpoints: - - lb_endpoints: - - endpoint: - health_check_config: - port_value: 8088 - address: - socket_address: - address: 172.17.0.1 #docker bridge - port_value: 8088 -- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster - name: _code_server - connect_timeout: 0.2s - type: STATIC - lb_policy: ROUND_ROBIN - load_assignment: - cluster_name: code_server - endpoints: - - lb_endpoints: - - endpoint: - health_check_config: - port_value: 8080 - address: - socket_address: - address: 192.168.68.113 #docker bridge - port_value: 8080 - transport_socket: - name: envoy.transport_sockets.tls - typed_config: - "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext - common_tls_context: - validation_context: - trusted_ca: - filename: /etc/certs/upstream/vscode/root.crt -- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster - name: _docker_registry - connect_timeout: 0.2s - type: STATIC - lb_policy: ROUND_ROBIN - load_assignment: - cluster_name: docker_registry - endpoints: - - lb_endpoints: - - endpoint: - health_check_config: - port_value: 5555 - address: - socket_address: - address: 172.17.0.1 #docker bridge - port_value: 5555 - transport_socket: - name: envoy.transport_sockets.tls - typed_config: - "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext - common_tls_context: - tls_certificates: - - certificate_chain: { filename: "/etc/certs/downstream/docker.jerxie.com/fullchain.pem" } - private_key: { filename: "/etc/certs/downstream/docker.jerxie.com/privkey.pem" } - # validation_context: - # trusted_ca: - # filename: "/etc/certs/docker.jerxie.com/chain1.pem" -- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster - name: _nas_service - connect_timeout: 0.2s - type: STATIC - lb_policy: ROUND_ROBIN - load_assignment: - cluster_name: nas - endpoints: - - lb_endpoints: - - endpoint: - health_check_config: - port_value: 5000 - address: - socket_address: - address: 172.17.0.1 #docker bridge - port_value: 5000 -- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster - name: _plex_server - connect_timeout: 0.2s - type: STATIC - lb_policy: ROUND_ROBIN - load_assignment: - cluster_name: nas - endpoints: - - lb_endpoints: - - endpoint: - health_check_config: - port_value: 32400 - address: - socket_address: - address: 192.168.68.113 - port_value: 32400 -- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster - name: _nas_video - connect_timeout: 0.2s - type: STATIC - lb_policy: ROUND_ROBIN - load_assignment: - cluster_name: nas - endpoints: - - lb_endpoints: - - endpoint: - health_check_config: - port_value: 9007 - address: - socket_address: - address: 127.0.0.1 #localhost - port_value: 9007 -- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster - name: _nas_audio - connect_timeout: 0.2s - type: STATIC - lb_policy: ROUND_ROBIN - load_assignment: - cluster_name: nas - endpoints: - - lb_endpoints: - - endpoint: - health_check_config: - port_value: 8800 - address: - socket_address: - address: 127.0.0.1 #localhost - port_value: 8800 -- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster - name: _nas_note - connect_timeout: 0.2s - type: STATIC - lb_policy: ROUND_ROBIN - load_assignment: - cluster_name: nas - endpoints: - - lb_endpoints: - - endpoint: - health_check_config: - port_value: 9350 - address: - socket_address: - address: 127.0.0.1 #localhost - port_value: 9350 -- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster - name: _nas_camera - connect_timeout: 0.2s - type: STATIC - lb_policy: ROUND_ROBIN - load_assignment: - cluster_name: camera - endpoints: - - lb_endpoints: - - endpoint: - health_check_config: - port_value: 9900 - address: - socket_address: - address: 127.0.0.1 #localhost - port_value: 9900 -- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster - name: _nas_photo - connect_timeout: 0.2s - type: STATIC - lb_policy: ROUND_ROBIN - load_assignment: - cluster_name: nas - endpoints: - - lb_endpoints: - - endpoint: - health_check_config: - port_value: 5080 - address: - socket_address: - address: 127.0.0.1 #localhost - port_value: 5080 -- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster - name: _k8s_apiserver - connect_timeout: 1s - type: STRICT_DNS - lb_policy: ROUND_ROBIN - load_assignment: - cluster_name: apiserver - endpoints: - - lb_endpoints: - - endpoint: {health_check_config: { port_value: 16443}, address: { socket_address: { address: 192.168.68.139, port_value: 16443 }}} #192.168.68.254 - transport_socket: - name: envoy.transport_sockets.tls - typed_config: - "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext - common_tls_context: - validation_context: - trusted_ca: - filename: /etc/certs/upstream/kubernetes/root.crt -- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster - name: _k8s_router - connect_timeout: 1s - type: STRICT_DNS - lb_policy: ROUND_ROBIN - load_assignment: - cluster_name: nginx - endpoints: - - lb_endpoints: - - endpoint: {health_check_config: { port_value: 32704}, address: { socket_address: { address: 192.168.68.139, port_value: 32704 }}} - # - endpoint: { address: { socket_address: { address: 192.168.68.114, port_value: 32542 }}} - # transport_socket: - # name: envoy.transport_sockets.tls - # typed_config: - # "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext - # common_tls_context: - # validation_context: - # trusted_ca: - # filename: /etc/certs/kubernetes/root.crt -- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster - name: _3d_printer_console - connect_timeout: 2s - type: STRICT_DNS - lb_policy: ROUND_ROBIN - load_assignment: - cluster_name: printer - endpoints: - - lb_endpoints: - - endpoint: - health_check_config: - port_value: 5000 - address: - socket_address: - address: octoprint - port_value: 5000 -- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster - name: _3d_printer_camera - connect_timeout: 2s - type: STRICT_DNS - lb_policy: ROUND_ROBIN - load_assignment: - cluster_name: camera - endpoints: - - lb_endpoints: - - endpoint: - health_check_config: - port_value: 8080 - address: - socket_address: - address: octoprint - port_value: 8080 -- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster - name: _bitwarden_service - connect_timeout: 0.2s - type: STATIC - lb_policy: ROUND_ROBIN - load_assignment: - cluster_name: pwassword_manager - endpoints: - - lb_endpoints: - - endpoint: - health_check_config: - port_value: 10010 - address: - socket_address: - address: 172.17.0.1 - port_value: 10010 -- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster - name: _homeassistant_service - connect_timeout: 0.2s - type: STRICT_DNS - lb_policy: ROUND_ROBIN - load_assignment: - cluster_name: homeassistant_manager - endpoints: - - lb_endpoints: - - endpoint: - health_check_config: - port_value: 8123 - address: - socket_address: - address: 192.168.68.133 - port_value: 8123 -- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster - name: _portainer_ui - connect_timeout: 0.2s - type: STRICT_DNS - lb_policy: ROUND_ROBIN - load_assignment: - cluster_name: portainer_ui - endpoints: - - lb_endpoints: - - endpoint: - health_check_config: - port_value: 9000 - address: - socket_address: - address: 192.168.68.161 - port_value: 9000 - # transport_socket: - # name: envoy.transport_sockets.tls - # typed_config: - # "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext - # common_tls_context: - # validation_context: - # trusted_ca: - # filename: /etc/certs/upstream/portainer/root.crt -# - "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster -# name: _baby_buddy -# connect_timeout: 0.2s -# type: STRICT_DNS -# lb_policy: ROUND_ROBIN -# load_assignment: -# cluster_name: baby_buddy -# endpoints: -# - lb_endpoints: -# - endpoint: -# health_check_config: -# port_value: 8555 -# address: -# socket_address: -# address: 192.168.68.106 -# port_value: 8555 -- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster - name: _grafana_ui - connect_timeout: 0.2s - type: STRICT_DNS - lb_policy: ROUND_ROBIN - load_assignment: - cluster_name: _grafana_ui - endpoints: - - lb_endpoints: - - endpoint: - health_check_config: - port_value: 3000 - address: - socket_address: - address: 192.168.68.106 - port_value: 3000 -- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster - name: _auth_server - connect_timeout: 0.2s - type: STRICT_DNS - lb_policy: ROUND_ROBIN - load_assignment: - cluster_name: _auth_server - endpoints: - - lb_endpoints: - - endpoint: - health_check_config: - port_value: 5556 - address: - socket_address: - address: 192.168.68.113 - port_value: 5557 -- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster - name: _ai_server - connect_timeout: 0.2s - type: STRICT_DNS - lb_policy: ROUND_ROBIN - load_assignment: - cluster_name: _ai_server - endpoints: - - lb_endpoints: - - endpoint: - health_check_config: - port_value: 3000 - address: - socket_address: - address: 192.168.68.113 - port_value: 3000 -- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster - name: _pcb_server - connect_timeout: 0.2s - type: STRICT_DNS - lb_policy: ROUND_ROBIN - load_assignment: - cluster_name: _pcb_server - endpoints: - - lb_endpoints: - - endpoint: - health_check_config: - port_value: 8088 - address: - socket_address: - address: 192.168.68.113 - port_value: 8088 -- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster - name: _ai_api_server - connect_timeout: 0.2s - type: STRICT_DNS - lb_policy: ROUND_ROBIN - load_assignment: - cluster_name: _ai_api_server - endpoints: - - lb_endpoints: - - endpoint: - health_check_config: - port_value: 8002 - address: - socket_address: - address: 192.168.68.113 - port_value: 8002 -- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster - name: _ai_ui_server - connect_timeout: 0.2s - type: STRICT_DNS - lb_policy: ROUND_ROBIN - load_assignment: - cluster_name: _ai_ui_server - endpoints: - - lb_endpoints: - - endpoint: - health_check_config: - port_value: 8003 - address: - socket_address: - address: 192.168.68.113 - port_value: 8003 -- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster - name: _monitor_server - connect_timeout: 0.2s - type: STRICT_DNS - lb_policy: ROUND_ROBIN - load_assignment: - cluster_name: _monitor_server - endpoints: - - lb_endpoints: - - endpoint: - health_check_config: - port_value: 9090 - address: - socket_address: - address: 192.168.68.113 - port_value: 9090 \ No newline at end of file diff --git a/data/config/cds.yaml b/data/config/cds.yaml new file mode 100644 index 0000000..f7b686d --- /dev/null +++ b/data/config/cds.yaml @@ -0,0 +1,449 @@ +resources: +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _acme_renewer + connect_timeout: 0.2s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: acme_renewer + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 8888 + address: + socket_address: + address: 172.17.0.1 #docker bridge + port_value: 8888 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _git_bucket + connect_timeout: 0.2s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: git_bucket + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 8088 + address: + socket_address: + address: 172.17.0.1 #docker bridge + port_value: 8088 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _code_server + connect_timeout: 0.2s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: code_server + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 8080 + address: + socket_address: + address: 192.168.68.113 #docker bridge + port_value: 8080 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + validation_context: + trusted_ca: + filename: /etc/certs/upstream/vscode/root.crt +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _docker_registry + connect_timeout: 0.2s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: docker_registry + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 5555 + address: + socket_address: + address: 172.17.0.1 #docker bridge + port_value: 5555 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/docker.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/docker.jerxie.com/privkey.pem" } + # validation_context: + # trusted_ca: + # filename: "/etc/certs/docker.jerxie.com/chain1.pem" +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _nas_service + connect_timeout: 0.2s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: nas + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 5000 + address: + socket_address: + address: 172.17.0.1 #docker bridge + port_value: 5000 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _plex_server + connect_timeout: 0.2s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: nas + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 32400 + address: + socket_address: + address: 192.168.68.113 + port_value: 32400 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _nas_video + connect_timeout: 0.2s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: nas + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 9007 + address: + socket_address: + address: 127.0.0.1 #localhost + port_value: 9007 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _nas_audio + connect_timeout: 0.2s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: nas + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 8800 + address: + socket_address: + address: 127.0.0.1 #localhost + port_value: 8800 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _nas_note + connect_timeout: 0.2s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: nas + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 9350 + address: + socket_address: + address: 127.0.0.1 #localhost + port_value: 9350 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _nas_camera + connect_timeout: 0.2s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: camera + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 9900 + address: + socket_address: + address: 127.0.0.1 #localhost + port_value: 9900 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _nas_photo + connect_timeout: 0.2s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: nas + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 5080 + address: + socket_address: + address: 127.0.0.1 #localhost + port_value: 5080 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _k8s_apiserver + connect_timeout: 1s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: apiserver + endpoints: + - lb_endpoints: + - endpoint: {health_check_config: { port_value: 16443}, address: { socket_address: { address: 192.168.68.139, port_value: 16443 }}} #192.168.68.254 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + validation_context: + trusted_ca: + filename: /etc/certs/upstream/kubernetes/root.crt +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _k8s_router + connect_timeout: 1s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: nginx + endpoints: + - lb_endpoints: + - endpoint: {health_check_config: { port_value: 32704}, address: { socket_address: { address: 192.168.68.139, port_value: 32704 }}} + # - endpoint: { address: { socket_address: { address: 192.168.68.114, port_value: 32542 }}} + # transport_socket: + # name: envoy.transport_sockets.tls + # typed_config: + # "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + # common_tls_context: + # validation_context: + # trusted_ca: + # filename: /etc/certs/kubernetes/root.crt +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _3d_printer_console + connect_timeout: 2s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: printer + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 5000 + address: + socket_address: + address: octoprint + port_value: 5000 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _3d_printer_camera + connect_timeout: 2s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: camera + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 8080 + address: + socket_address: + address: octoprint + port_value: 8080 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _bitwarden_service + connect_timeout: 0.2s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: pwassword_manager + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 10010 + address: + socket_address: + address: 172.17.0.1 + port_value: 10010 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _homeassistant_service + connect_timeout: 0.2s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: homeassistant_manager + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 8123 + address: + socket_address: + address: 192.168.68.133 + port_value: 8123 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _portainer_ui + connect_timeout: 0.2s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: portainer_ui + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 9000 + address: + socket_address: + address: 192.168.68.161 + port_value: 9000 + # transport_socket: + # name: envoy.transport_sockets.tls + # typed_config: + # "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + # common_tls_context: + # validation_context: + # trusted_ca: + # filename: /etc/certs/upstream/portainer/root.crt +# - "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster +# name: _baby_buddy +# connect_timeout: 0.2s +# type: STRICT_DNS +# lb_policy: ROUND_ROBIN +# load_assignment: +# cluster_name: baby_buddy +# endpoints: +# - lb_endpoints: +# - endpoint: +# health_check_config: +# port_value: 8555 +# address: +# socket_address: +# address: 192.168.68.106 +# port_value: 8555 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _grafana_ui + connect_timeout: 0.2s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: _grafana_ui + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 3000 + address: + socket_address: + address: 192.168.68.106 + port_value: 3000 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _auth_server + connect_timeout: 0.2s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: _auth_server + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 5556 + address: + socket_address: + address: 192.168.68.113 + port_value: 5557 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _ai_server + connect_timeout: 0.2s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: _ai_server + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 3000 + address: + socket_address: + address: 192.168.68.113 + port_value: 3000 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _pcb_server + connect_timeout: 0.2s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: _pcb_server + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 8088 + address: + socket_address: + address: 192.168.68.113 + port_value: 8088 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _ai_api_server + connect_timeout: 0.2s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: _ai_api_server + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 8002 + address: + socket_address: + address: 192.168.68.113 + port_value: 8002 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _ai_ui_server + connect_timeout: 0.2s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: _ai_ui_server + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 8003 + address: + socket_address: + address: 192.168.68.113 + port_value: 8003 +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster + name: _monitor_server + connect_timeout: 0.2s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: _monitor_server + endpoints: + - lb_endpoints: + - endpoint: + health_check_config: + port_value: 9090 + address: + socket_address: + address: 192.168.68.113 + port_value: 9090 \ No newline at end of file diff --git a/data/config/lds.yaml b/data/config/lds.yaml new file mode 100644 index 0000000..81b5897 --- /dev/null +++ b/data/config/lds.yaml @@ -0,0 +1,980 @@ +resources: +- "@type": type.googleapis.com/envoy.config.listener.v3.Listener + name: http_listener + address: + socket_address: { address: 0.0.0.0, port_value: 10000 } + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + route_config: + name: ingress_generic_insecure + virtual_hosts: + - name: http_to_https + domains: ["*"] + routes: + - match: { prefix : "/.well-known/acme-challenge"} + route: { cluster: _acme_renewer } + - match: { prefix: "/" } + redirect: { https_redirect: true } + - name: video_insecure + domains: ["video.jerxie.com" , "video.local:10000"] + routes: + - match: { prefix : "/.well-known/acme-challenge"} + route: { cluster: _acme_renewer } + - match: { prefix : "/"} + route: { cluster: _nas_video } + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router +- "@type": type.googleapis.com/envoy.config.listener.v3.Listener + name: https_listener + address: + socket_address: { address: 0.0.0.0, port_value: 10001 } + listener_filters: + - name: "envoy.filters.listener.tls_inspector" + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + normalize_path: true + merge_slashes: true + upgrade_configs: + - upgrade_type: websocket + codec_type: AUTO + stream_idle_timeout: 300s + request_timeout: 300s + route_config: + virtual_hosts: + - name: home_service + domains: ["home.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_homeassistant_service"} + # - match: { path: "/printer"} + # redirect: { path_redirect: "/printer/" } + # - match: { prefix: "/printer/webcam" } + # route: { prefix_rewrite: "/", cluster: _3d_printer_camera, idle_timeout: 0s } + # - match: { prefix: "/printer/" } + # route: { prefix_rewrite: "/", cluster: _3d_printer_console } + http_filters: + - name: envoy.filters.http.lua + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua + inline_code: | + require "/etc/envoy/filter" + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["home.jerxie.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + # - certificate_chain: { filename: "/etc/certs/home_domain/certificate.crt" } + # private_key: { filename: "/etc/certs/home_domain/private.key" } + - certificate_chain: { filename: "/etc/certs/downstream/home.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/home.jerxie.com/privkey.pem" } + # validation_context: + # trusted_ca: + # filename: /etc/certs/ca_bundle.crt + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + route_config: + virtual_hosts: + - name: docker_service + domains: ["docker.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_docker_registry", timeout: 0s} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["docker.jerxie.com", "docker.local"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/docker.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/docker.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + upgrade_configs: + - upgrade_type: websocket + route_config: + virtual_hosts: + - name: docker_service + domains: ["nas.jerxie.com", "nas:10001"] + routes: + - match: { prefix: "/" } + route: { cluster: "_nas_service", timeout: 0s} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["nas.jerxie.com", "nas"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/nas.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/nas.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + route_config: + virtual_hosts: + - name: docker_service + domains: ["video.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_nas_video", timeout: 0s} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["video.jerxie.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/video.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/video.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + route_config: + virtual_hosts: + - name: plex_server + domains: ["plex.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_plex_server", timeout: 0s} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["plex.jerxie.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/plex.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/plex.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + normalize_path: true + merge_slashes: true + route_config: + virtual_hosts: + - name: kubernetes_service + domains: ["kubernetes.jerxie.com"] + routes: + - match: { path: "/apiserver"} + route: { prefix_rewrite: "/" , cluster: _k8s_apiserver } + - match: { prefix: "/apiserver/" } + route: { prefix_rewrite: "/" , cluster: _k8s_apiserver } + - match: { prefix: "/" } + route: { cluster: "_k8s_router"} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["kubernetes.jerxie.com", "kubernetes.local"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/kubernetes.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/kubernetes.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + normalize_path: true + merge_slashes: true + route_config: + virtual_hosts: + - name: kubernetes_dashboard_service + domains: ["kubernetes.dashboard.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_k8s_router"} + http_filters: + - name: envoy.filters.http.oauth2 + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.oauth2.v3.OAuth2 + config: + token_endpoint: + cluster: _auth_server + uri: auth.jerxie.com/token + timeout: 3s + authorization_endpoint: https://auth.jerxie.com/auth + redirect_uri: "%REQ(x-forwarded-proto)%://%REQ(:authority)%/callback" + redirect_path_matcher: + path: + exact: /callback + signout_path: + path: + exact: /signout + forward_bearer_token: true + credentials: + client_id: kubernetes-dashboard + token_secret: + name: token + sds_config: + path: "/etc/envoy/token-secret.yaml" + hmac_secret: + name: hmac + sds_config: + path: "/etc/envoy/hmac-secret.yaml" + # (Optional): defaults to 'user' scope if not provided + auth_scopes: + - openid + - email + # (Optional): set resource parameter for Authorization request + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["kubernetes.dashboard.jerxie.com", "kubernetes.dashboard.local"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/kubernetes.dashboard.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/kubernetes.dashboard.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + route_config: + virtual_hosts: + - name: kubernetes_blog_service + domains: ["blog.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_k8s_router"} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["blog.jerxie.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/blog.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/blog.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + route_config: + virtual_hosts: + - name: kubernetes_blog_service + domains: ["argocd.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_k8s_router"} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["argocd.jerxie.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/argocd.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/argocd.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + upgrade_configs: + - upgrade_type: websocket + stream_idle_timeout: 0s + normalize_path: true + merge_slashes: true + route_config: + virtual_hosts: + - name: meet_service + domains: ["meet.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_k8s_router"} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["meet.jerxie.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/meet.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/meet.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + route_config: + virtual_hosts: + - name: docker_service + domains: ["audio.jerxie.com", "audio.local"] + routes: + - match: { prefix: "/" } + route: { cluster: "_nas_audio"} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["audio.jerxie.com", "audio.local"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/audio.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/audio.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + upgrade_configs: + - upgrade_type: websocket + route_config: + virtual_hosts: + - name: code_service + domains: ["code.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_code_server"} + http_filters: + - name: envoy.filters.http.oauth2 + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.oauth2.v3.OAuth2 + config: + token_endpoint: + cluster: _auth_server + uri: auth.jerxie.com/token + timeout: 3s + authorization_endpoint: https://auth.jerxie.com/auth + redirect_uri: "%REQ(x-forwarded-proto)%://%REQ(:authority)%/callback" + forward_bearer_token: true + redirect_path_matcher: + path: + exact: /callback + signout_path: + path: + exact: /signout + credentials: + client_id: code-server + token_secret: + name: token + sds_config: + path: "/etc/envoy/token-secret.yaml" + hmac_secret: + name: hmac + sds_config: + path: "/etc/envoy/hmac-secret.yaml" + # (Optional): defaults to 'user' scope if not provided + auth_scopes: + - openid + - email + # (Optional): set resource parameter for Authorization request + - name: envoy.filters.http.jwt_authn + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.jwt_authn.v3.JwtAuthentication + providers: + provider1: + remote_jwks: + http_uri: + uri: "https://auth.jerxie.com/keys" + cluster: _auth_server + timeout: 5s + cache_duration: 600s + from_headers: + - name: Authorization + value_prefix: "Bearer " + from_cookies: + - BearerToken + payload_in_metadata: jwt_payload + rules: + - match: + prefix: / + requires: + provider_name: provider1 + - name: envoy.filters.http.lua + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua + inline_code: | + email = "" + function envoy_on_request(request_handle) + email = "" + local meta = request_handle:streamInfo():dynamicMetadata() + for key, value in pairs(meta:get("envoy.filters.http.jwt_authn")) do + if key == "jwt_payload" then + for k, v in pairs(value) do + if k == "email" then + request_handle:logInfo("login codeserver: " ..v) + email = v + end + end + end + end + end + + function envoy_on_response(response_handle) + if email ~="" and email ~= "axieyangb@gmail.com" then + response_handle:logInfo("Got unauthorized user, return 403 for user " ..email) + response_handle:headers():add("set-cookie", "BearerToken=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT") + response_handle:headers():add("set-cookie", "OauthHMAC=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT") + response_handle:headers():add("set-cookie", "IdToken=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT") + response_handle:headers():add("set-cookie", "OauthExpires=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT") + end + email = "" + end + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["code.jerxie.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/code.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/code.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + route_config: + virtual_hosts: + - name: photo_service + domains: ["photo.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_nas_photo", timeout: 0s} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["photo.jerxie.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/photo.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/photo.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + route_config: + virtual_hosts: + - name: password_service + domains: ["password.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_bitwarden_service"} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["password.jerxie.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/password.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/password.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + route_config: + virtual_hosts: + - name: gitbucket_service + domains: ["gitbucket.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_git_bucket"} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["gitbucket.jerxie.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/gitbucket.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/gitbucket.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + upgrade_configs: + - upgrade_type: websocket + stream_idle_timeout: 0s + normalize_path: true + merge_slashes: true + route_config: + virtual_hosts: + - name: printer_service + domains: ["printer.jerxie.com"] + routes: + - match: { prefix: "/webcam" } + route: { prefix_rewrite: "/", cluster: "_3d_printer_camera", max_stream_duration: {grpc_timeout_header_max: 0s} } + - match: { prefix: "/" } + route: { cluster: "_3d_printer_console"} + http_filters: + - name: envoy.filters.http.oauth2 + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.oauth2.v3.OAuth2 + config: + token_endpoint: + cluster: _auth_server + uri: auth.jerxie.com/token + timeout: 3s + authorization_endpoint: https://auth.jerxie.com/auth + redirect_uri: "%REQ(x-forwarded-proto)%://%REQ(:authority)%/callback" + redirect_path_matcher: + path: + exact: /callback + signout_path: + path: + exact: /signout + forward_bearer_token: true + credentials: + client_id: octoprint-portal + token_secret: + name: token + sds_config: + path: "/etc/envoy/token-secret.yaml" + hmac_secret: + name: hmac + sds_config: + path: "/etc/envoy/hmac-secret.yaml" + # (Optional): defaults to 'user' scope if not provided + auth_scopes: + - openid + - email + # (Optional): set resource parameter for Authorization request + - name: envoy.filters.http.jwt_authn + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.jwt_authn.v3.JwtAuthentication + providers: + provider1: + remote_jwks: + http_uri: + uri: "https://auth.jerxie.com/keys" + cluster: _auth_server + timeout: 5s + cache_duration: 600s + from_headers: + - name: Authorization + value_prefix: "Bearer " + # from_cookies: + # - BearerToken + payload_in_metadata: jwt_payload + rules: + - match: + prefix: / + requires: + provider_name: provider1 + - name: envoy.filters.http.lua + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua + inline_code: | + email = "" + function envoy_on_request(request_handle) + email = "" + local meta = request_handle:streamInfo():dynamicMetadata() + for key, value in pairs(meta:get("envoy.filters.http.jwt_authn")) do + if key == "jwt_payload" then + for k, v in pairs(value) do + if k == "email" then + print("login octoprint: "..v) + email = v + request_handle:headers():add("ENVOY_AUTHENTICATED_USER", v) + end + end + end + end + end + + function envoy_on_response(response_handle) + if email ~="" and email ~= "axieyangb@gmail.com" then + response_handle:logInfo("Got unauthorized user, return 403 for user " ..email) + response_handle:headers():add("set-cookie", "BearerToken=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT") + response_handle:headers():add("set-cookie", "OauthHMAC=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT") + response_handle:headers():add("set-cookie", "IdToken=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT") + response_handle:headers():add("set-cookie", "OauthExpires=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT") + end + email = "" + end + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["printer.jerxie.com", "printer.local"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/printer.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/printer.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + upgrade_configs: + - upgrade_type: websocket + route_config: + virtual_hosts: + - name: camera_service + domains: ["camera.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_nas_camera"} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["camera.jerxie.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/camera.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/camera.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + route_config: + virtual_hosts: + - name: note_service + domains: ["note.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_nas_note"} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["note.jerxie.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/note.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/note.jerxie.com/privkey.pem" } + # - filters: + # - name: envoy.filters.network.http_connection_manager + # typed_config: + # "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + # stat_prefix: ingress_http + # codec_type: AUTO + # route_config: + # virtual_hosts: + # - name: baby_service + # domains: ["baby.jerxie.com"] + # routes: + # - match: { prefix: "/" } + # route: { cluster: "_baby_buddy"} + # http_filters: + # - name: envoy.filters.http.router + # typed_config: + # "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + # filter_chain_match: + # server_names: ["baby.jerxie.com"] + # transport_socket: + # name: envoy.transport_sockets.tls + # typed_config: + # "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + # common_tls_context: + # tls_certificates: + # - certificate_chain: { filename: "/etc/certs/downstream/baby.jerxie.com/fullchain.pem" } + # private_key: { filename: "/etc/certs/downstream/baby.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + upgrade_configs: + - upgrade_type: websocket + codec_type: AUTO + route_config: + virtual_hosts: + - name: container_service + domains: ["container.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_portainer_ui"} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["container.jerxie.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/container.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/container.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + upgrade_configs: + - upgrade_type: websocket + codec_type: AUTO + route_config: + virtual_hosts: + - name: grafana_service + domains: ["grafana.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_grafana_ui"} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["grafana.jerxie.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/grafana.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/grafana.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + upgrade_configs: + - upgrade_type: websocket + codec_type: AUTO + route_config: + virtual_hosts: + - name: auth_service + domains: ["auth.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_auth_server"} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["auth.jerxie.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/auth.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/auth.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + upgrade_configs: + - upgrade_type: websocket + codec_type: AUTO + route_config: + virtual_hosts: + - name: ai_service + domains: ["ai.jerxie.com"] + routes: + - match: { prefix: "/api" } + route: { cluster: "_ai_api_server", timeout: 0s} + - match: { prefix: "/" } + route: { cluster: "_ai_ui_server", timeout: 0s} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["ai.jerxie.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/ai.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/ai.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + upgrade_configs: + - upgrade_type: websocket + codec_type: AUTO + route_config: + virtual_hosts: + - name: pcb_service + domains: ["pcb.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_pcb_server", timeout: 0s} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["pcb.jerxie.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/pcb.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/pcb.jerxie.com/privkey.pem" } + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + upgrade_configs: + - upgrade_type: websocket + codec_type: AUTO + route_config: + virtual_hosts: + - name: monitor_service + domains: ["monitor.jerxie.com"] + routes: + - match: { prefix: "/" } + route: { cluster: "_monitor_server", timeout: 0s} + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["monitor.jerxie.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/monitor.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/monitor.jerxie.com/privkey.pem" } \ No newline at end of file diff --git a/data/lds.yaml b/data/lds.yaml deleted file mode 100644 index 81b5897..0000000 --- a/data/lds.yaml +++ /dev/null @@ -1,980 +0,0 @@ -resources: -- "@type": type.googleapis.com/envoy.config.listener.v3.Listener - name: http_listener - address: - socket_address: { address: 0.0.0.0, port_value: 10000 } - filter_chains: - - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - stat_prefix: ingress_http - codec_type: AUTO - route_config: - name: ingress_generic_insecure - virtual_hosts: - - name: http_to_https - domains: ["*"] - routes: - - match: { prefix : "/.well-known/acme-challenge"} - route: { cluster: _acme_renewer } - - match: { prefix: "/" } - redirect: { https_redirect: true } - - name: video_insecure - domains: ["video.jerxie.com" , "video.local:10000"] - routes: - - match: { prefix : "/.well-known/acme-challenge"} - route: { cluster: _acme_renewer } - - match: { prefix : "/"} - route: { cluster: _nas_video } - http_filters: - - name: envoy.filters.http.router - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router -- "@type": type.googleapis.com/envoy.config.listener.v3.Listener - name: https_listener - address: - socket_address: { address: 0.0.0.0, port_value: 10001 } - listener_filters: - - name: "envoy.filters.listener.tls_inspector" - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector - filter_chains: - - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - stat_prefix: ingress_http - normalize_path: true - merge_slashes: true - upgrade_configs: - - upgrade_type: websocket - codec_type: AUTO - stream_idle_timeout: 300s - request_timeout: 300s - route_config: - virtual_hosts: - - name: home_service - domains: ["home.jerxie.com"] - routes: - - match: { prefix: "/" } - route: { cluster: "_homeassistant_service"} - # - match: { path: "/printer"} - # redirect: { path_redirect: "/printer/" } - # - match: { prefix: "/printer/webcam" } - # route: { prefix_rewrite: "/", cluster: _3d_printer_camera, idle_timeout: 0s } - # - match: { prefix: "/printer/" } - # route: { prefix_rewrite: "/", cluster: _3d_printer_console } - http_filters: - - name: envoy.filters.http.lua - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua - inline_code: | - require "/etc/envoy/filter" - - name: envoy.filters.http.router - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router - filter_chain_match: - server_names: ["home.jerxie.com"] - transport_socket: - name: envoy.transport_sockets.tls - typed_config: - "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext - common_tls_context: - tls_certificates: - # - certificate_chain: { filename: "/etc/certs/home_domain/certificate.crt" } - # private_key: { filename: "/etc/certs/home_domain/private.key" } - - certificate_chain: { filename: "/etc/certs/downstream/home.jerxie.com/fullchain.pem" } - private_key: { filename: "/etc/certs/downstream/home.jerxie.com/privkey.pem" } - # validation_context: - # trusted_ca: - # filename: /etc/certs/ca_bundle.crt - - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - stat_prefix: ingress_http - codec_type: AUTO - route_config: - virtual_hosts: - - name: docker_service - domains: ["docker.jerxie.com"] - routes: - - match: { prefix: "/" } - route: { cluster: "_docker_registry", timeout: 0s} - http_filters: - - name: envoy.filters.http.router - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router - filter_chain_match: - server_names: ["docker.jerxie.com", "docker.local"] - transport_socket: - name: envoy.transport_sockets.tls - typed_config: - "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext - common_tls_context: - tls_certificates: - - certificate_chain: { filename: "/etc/certs/downstream/docker.jerxie.com/fullchain.pem" } - private_key: { filename: "/etc/certs/downstream/docker.jerxie.com/privkey.pem" } - - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - stat_prefix: ingress_http - codec_type: AUTO - upgrade_configs: - - upgrade_type: websocket - route_config: - virtual_hosts: - - name: docker_service - domains: ["nas.jerxie.com", "nas:10001"] - routes: - - match: { prefix: "/" } - route: { cluster: "_nas_service", timeout: 0s} - http_filters: - - name: envoy.filters.http.router - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router - filter_chain_match: - server_names: ["nas.jerxie.com", "nas"] - transport_socket: - name: envoy.transport_sockets.tls - typed_config: - "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext - common_tls_context: - tls_certificates: - - certificate_chain: { filename: "/etc/certs/downstream/nas.jerxie.com/fullchain.pem" } - private_key: { filename: "/etc/certs/downstream/nas.jerxie.com/privkey.pem" } - - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - stat_prefix: ingress_http - codec_type: AUTO - route_config: - virtual_hosts: - - name: docker_service - domains: ["video.jerxie.com"] - routes: - - match: { prefix: "/" } - route: { cluster: "_nas_video", timeout: 0s} - http_filters: - - name: envoy.filters.http.router - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router - filter_chain_match: - server_names: ["video.jerxie.com"] - transport_socket: - name: envoy.transport_sockets.tls - typed_config: - "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext - common_tls_context: - tls_certificates: - - certificate_chain: { filename: "/etc/certs/downstream/video.jerxie.com/fullchain.pem" } - private_key: { filename: "/etc/certs/downstream/video.jerxie.com/privkey.pem" } - - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - stat_prefix: ingress_http - codec_type: AUTO - route_config: - virtual_hosts: - - name: plex_server - domains: ["plex.jerxie.com"] - routes: - - match: { prefix: "/" } - route: { cluster: "_plex_server", timeout: 0s} - http_filters: - - name: envoy.filters.http.router - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router - filter_chain_match: - server_names: ["plex.jerxie.com"] - transport_socket: - name: envoy.transport_sockets.tls - typed_config: - "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext - common_tls_context: - tls_certificates: - - certificate_chain: { filename: "/etc/certs/downstream/plex.jerxie.com/fullchain.pem" } - private_key: { filename: "/etc/certs/downstream/plex.jerxie.com/privkey.pem" } - - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - stat_prefix: ingress_http - codec_type: AUTO - normalize_path: true - merge_slashes: true - route_config: - virtual_hosts: - - name: kubernetes_service - domains: ["kubernetes.jerxie.com"] - routes: - - match: { path: "/apiserver"} - route: { prefix_rewrite: "/" , cluster: _k8s_apiserver } - - match: { prefix: "/apiserver/" } - route: { prefix_rewrite: "/" , cluster: _k8s_apiserver } - - match: { prefix: "/" } - route: { cluster: "_k8s_router"} - http_filters: - - name: envoy.filters.http.router - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router - filter_chain_match: - server_names: ["kubernetes.jerxie.com", "kubernetes.local"] - transport_socket: - name: envoy.transport_sockets.tls - typed_config: - "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext - common_tls_context: - tls_certificates: - - certificate_chain: { filename: "/etc/certs/downstream/kubernetes.jerxie.com/fullchain.pem" } - private_key: { filename: "/etc/certs/downstream/kubernetes.jerxie.com/privkey.pem" } - - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - stat_prefix: ingress_http - codec_type: AUTO - normalize_path: true - merge_slashes: true - route_config: - virtual_hosts: - - name: kubernetes_dashboard_service - domains: ["kubernetes.dashboard.jerxie.com"] - routes: - - match: { prefix: "/" } - route: { cluster: "_k8s_router"} - http_filters: - - name: envoy.filters.http.oauth2 - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.oauth2.v3.OAuth2 - config: - token_endpoint: - cluster: _auth_server - uri: auth.jerxie.com/token - timeout: 3s - authorization_endpoint: https://auth.jerxie.com/auth - redirect_uri: "%REQ(x-forwarded-proto)%://%REQ(:authority)%/callback" - redirect_path_matcher: - path: - exact: /callback - signout_path: - path: - exact: /signout - forward_bearer_token: true - credentials: - client_id: kubernetes-dashboard - token_secret: - name: token - sds_config: - path: "/etc/envoy/token-secret.yaml" - hmac_secret: - name: hmac - sds_config: - path: "/etc/envoy/hmac-secret.yaml" - # (Optional): defaults to 'user' scope if not provided - auth_scopes: - - openid - - email - # (Optional): set resource parameter for Authorization request - - name: envoy.filters.http.router - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router - filter_chain_match: - server_names: ["kubernetes.dashboard.jerxie.com", "kubernetes.dashboard.local"] - transport_socket: - name: envoy.transport_sockets.tls - typed_config: - "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext - common_tls_context: - tls_certificates: - - certificate_chain: { filename: "/etc/certs/downstream/kubernetes.dashboard.jerxie.com/fullchain.pem" } - private_key: { filename: "/etc/certs/downstream/kubernetes.dashboard.jerxie.com/privkey.pem" } - - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - stat_prefix: ingress_http - codec_type: AUTO - route_config: - virtual_hosts: - - name: kubernetes_blog_service - domains: ["blog.jerxie.com"] - routes: - - match: { prefix: "/" } - route: { cluster: "_k8s_router"} - http_filters: - - name: envoy.filters.http.router - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router - filter_chain_match: - server_names: ["blog.jerxie.com"] - transport_socket: - name: envoy.transport_sockets.tls - typed_config: - "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext - common_tls_context: - tls_certificates: - - certificate_chain: { filename: "/etc/certs/downstream/blog.jerxie.com/fullchain.pem" } - private_key: { filename: "/etc/certs/downstream/blog.jerxie.com/privkey.pem" } - - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - stat_prefix: ingress_http - codec_type: AUTO - route_config: - virtual_hosts: - - name: kubernetes_blog_service - domains: ["argocd.jerxie.com"] - routes: - - match: { prefix: "/" } - route: { cluster: "_k8s_router"} - http_filters: - - name: envoy.filters.http.router - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router - filter_chain_match: - server_names: ["argocd.jerxie.com"] - transport_socket: - name: envoy.transport_sockets.tls - typed_config: - "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext - common_tls_context: - tls_certificates: - - certificate_chain: { filename: "/etc/certs/downstream/argocd.jerxie.com/fullchain.pem" } - private_key: { filename: "/etc/certs/downstream/argocd.jerxie.com/privkey.pem" } - - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - stat_prefix: ingress_http - codec_type: AUTO - upgrade_configs: - - upgrade_type: websocket - stream_idle_timeout: 0s - normalize_path: true - merge_slashes: true - route_config: - virtual_hosts: - - name: meet_service - domains: ["meet.jerxie.com"] - routes: - - match: { prefix: "/" } - route: { cluster: "_k8s_router"} - http_filters: - - name: envoy.filters.http.router - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router - filter_chain_match: - server_names: ["meet.jerxie.com"] - transport_socket: - name: envoy.transport_sockets.tls - typed_config: - "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext - common_tls_context: - tls_certificates: - - certificate_chain: { filename: "/etc/certs/downstream/meet.jerxie.com/fullchain.pem" } - private_key: { filename: "/etc/certs/downstream/meet.jerxie.com/privkey.pem" } - - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - stat_prefix: ingress_http - codec_type: AUTO - route_config: - virtual_hosts: - - name: docker_service - domains: ["audio.jerxie.com", "audio.local"] - routes: - - match: { prefix: "/" } - route: { cluster: "_nas_audio"} - http_filters: - - name: envoy.filters.http.router - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router - filter_chain_match: - server_names: ["audio.jerxie.com", "audio.local"] - transport_socket: - name: envoy.transport_sockets.tls - typed_config: - "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext - common_tls_context: - tls_certificates: - - certificate_chain: { filename: "/etc/certs/downstream/audio.jerxie.com/fullchain.pem" } - private_key: { filename: "/etc/certs/downstream/audio.jerxie.com/privkey.pem" } - - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - stat_prefix: ingress_http - codec_type: AUTO - upgrade_configs: - - upgrade_type: websocket - route_config: - virtual_hosts: - - name: code_service - domains: ["code.jerxie.com"] - routes: - - match: { prefix: "/" } - route: { cluster: "_code_server"} - http_filters: - - name: envoy.filters.http.oauth2 - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.oauth2.v3.OAuth2 - config: - token_endpoint: - cluster: _auth_server - uri: auth.jerxie.com/token - timeout: 3s - authorization_endpoint: https://auth.jerxie.com/auth - redirect_uri: "%REQ(x-forwarded-proto)%://%REQ(:authority)%/callback" - forward_bearer_token: true - redirect_path_matcher: - path: - exact: /callback - signout_path: - path: - exact: /signout - credentials: - client_id: code-server - token_secret: - name: token - sds_config: - path: "/etc/envoy/token-secret.yaml" - hmac_secret: - name: hmac - sds_config: - path: "/etc/envoy/hmac-secret.yaml" - # (Optional): defaults to 'user' scope if not provided - auth_scopes: - - openid - - email - # (Optional): set resource parameter for Authorization request - - name: envoy.filters.http.jwt_authn - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.jwt_authn.v3.JwtAuthentication - providers: - provider1: - remote_jwks: - http_uri: - uri: "https://auth.jerxie.com/keys" - cluster: _auth_server - timeout: 5s - cache_duration: 600s - from_headers: - - name: Authorization - value_prefix: "Bearer " - from_cookies: - - BearerToken - payload_in_metadata: jwt_payload - rules: - - match: - prefix: / - requires: - provider_name: provider1 - - name: envoy.filters.http.lua - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua - inline_code: | - email = "" - function envoy_on_request(request_handle) - email = "" - local meta = request_handle:streamInfo():dynamicMetadata() - for key, value in pairs(meta:get("envoy.filters.http.jwt_authn")) do - if key == "jwt_payload" then - for k, v in pairs(value) do - if k == "email" then - request_handle:logInfo("login codeserver: " ..v) - email = v - end - end - end - end - end - - function envoy_on_response(response_handle) - if email ~="" and email ~= "axieyangb@gmail.com" then - response_handle:logInfo("Got unauthorized user, return 403 for user " ..email) - response_handle:headers():add("set-cookie", "BearerToken=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT") - response_handle:headers():add("set-cookie", "OauthHMAC=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT") - response_handle:headers():add("set-cookie", "IdToken=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT") - response_handle:headers():add("set-cookie", "OauthExpires=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT") - end - email = "" - end - - name: envoy.filters.http.router - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router - filter_chain_match: - server_names: ["code.jerxie.com"] - transport_socket: - name: envoy.transport_sockets.tls - typed_config: - "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext - common_tls_context: - tls_certificates: - - certificate_chain: { filename: "/etc/certs/downstream/code.jerxie.com/fullchain.pem" } - private_key: { filename: "/etc/certs/downstream/code.jerxie.com/privkey.pem" } - - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - stat_prefix: ingress_http - codec_type: AUTO - route_config: - virtual_hosts: - - name: photo_service - domains: ["photo.jerxie.com"] - routes: - - match: { prefix: "/" } - route: { cluster: "_nas_photo", timeout: 0s} - http_filters: - - name: envoy.filters.http.router - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router - filter_chain_match: - server_names: ["photo.jerxie.com"] - transport_socket: - name: envoy.transport_sockets.tls - typed_config: - "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext - common_tls_context: - tls_certificates: - - certificate_chain: { filename: "/etc/certs/downstream/photo.jerxie.com/fullchain.pem" } - private_key: { filename: "/etc/certs/downstream/photo.jerxie.com/privkey.pem" } - - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - stat_prefix: ingress_http - codec_type: AUTO - route_config: - virtual_hosts: - - name: password_service - domains: ["password.jerxie.com"] - routes: - - match: { prefix: "/" } - route: { cluster: "_bitwarden_service"} - http_filters: - - name: envoy.filters.http.router - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router - filter_chain_match: - server_names: ["password.jerxie.com"] - transport_socket: - name: envoy.transport_sockets.tls - typed_config: - "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext - common_tls_context: - tls_certificates: - - certificate_chain: { filename: "/etc/certs/downstream/password.jerxie.com/fullchain.pem" } - private_key: { filename: "/etc/certs/downstream/password.jerxie.com/privkey.pem" } - - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - stat_prefix: ingress_http - codec_type: AUTO - route_config: - virtual_hosts: - - name: gitbucket_service - domains: ["gitbucket.jerxie.com"] - routes: - - match: { prefix: "/" } - route: { cluster: "_git_bucket"} - http_filters: - - name: envoy.filters.http.router - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router - filter_chain_match: - server_names: ["gitbucket.jerxie.com"] - transport_socket: - name: envoy.transport_sockets.tls - typed_config: - "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext - common_tls_context: - tls_certificates: - - certificate_chain: { filename: "/etc/certs/downstream/gitbucket.jerxie.com/fullchain.pem" } - private_key: { filename: "/etc/certs/downstream/gitbucket.jerxie.com/privkey.pem" } - - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - stat_prefix: ingress_http - codec_type: AUTO - upgrade_configs: - - upgrade_type: websocket - stream_idle_timeout: 0s - normalize_path: true - merge_slashes: true - route_config: - virtual_hosts: - - name: printer_service - domains: ["printer.jerxie.com"] - routes: - - match: { prefix: "/webcam" } - route: { prefix_rewrite: "/", cluster: "_3d_printer_camera", max_stream_duration: {grpc_timeout_header_max: 0s} } - - match: { prefix: "/" } - route: { cluster: "_3d_printer_console"} - http_filters: - - name: envoy.filters.http.oauth2 - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.oauth2.v3.OAuth2 - config: - token_endpoint: - cluster: _auth_server - uri: auth.jerxie.com/token - timeout: 3s - authorization_endpoint: https://auth.jerxie.com/auth - redirect_uri: "%REQ(x-forwarded-proto)%://%REQ(:authority)%/callback" - redirect_path_matcher: - path: - exact: /callback - signout_path: - path: - exact: /signout - forward_bearer_token: true - credentials: - client_id: octoprint-portal - token_secret: - name: token - sds_config: - path: "/etc/envoy/token-secret.yaml" - hmac_secret: - name: hmac - sds_config: - path: "/etc/envoy/hmac-secret.yaml" - # (Optional): defaults to 'user' scope if not provided - auth_scopes: - - openid - - email - # (Optional): set resource parameter for Authorization request - - name: envoy.filters.http.jwt_authn - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.jwt_authn.v3.JwtAuthentication - providers: - provider1: - remote_jwks: - http_uri: - uri: "https://auth.jerxie.com/keys" - cluster: _auth_server - timeout: 5s - cache_duration: 600s - from_headers: - - name: Authorization - value_prefix: "Bearer " - # from_cookies: - # - BearerToken - payload_in_metadata: jwt_payload - rules: - - match: - prefix: / - requires: - provider_name: provider1 - - name: envoy.filters.http.lua - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua - inline_code: | - email = "" - function envoy_on_request(request_handle) - email = "" - local meta = request_handle:streamInfo():dynamicMetadata() - for key, value in pairs(meta:get("envoy.filters.http.jwt_authn")) do - if key == "jwt_payload" then - for k, v in pairs(value) do - if k == "email" then - print("login octoprint: "..v) - email = v - request_handle:headers():add("ENVOY_AUTHENTICATED_USER", v) - end - end - end - end - end - - function envoy_on_response(response_handle) - if email ~="" and email ~= "axieyangb@gmail.com" then - response_handle:logInfo("Got unauthorized user, return 403 for user " ..email) - response_handle:headers():add("set-cookie", "BearerToken=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT") - response_handle:headers():add("set-cookie", "OauthHMAC=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT") - response_handle:headers():add("set-cookie", "IdToken=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT") - response_handle:headers():add("set-cookie", "OauthExpires=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT") - end - email = "" - end - - name: envoy.filters.http.router - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router - filter_chain_match: - server_names: ["printer.jerxie.com", "printer.local"] - transport_socket: - name: envoy.transport_sockets.tls - typed_config: - "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext - common_tls_context: - tls_certificates: - - certificate_chain: { filename: "/etc/certs/downstream/printer.jerxie.com/fullchain.pem" } - private_key: { filename: "/etc/certs/downstream/printer.jerxie.com/privkey.pem" } - - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - stat_prefix: ingress_http - codec_type: AUTO - upgrade_configs: - - upgrade_type: websocket - route_config: - virtual_hosts: - - name: camera_service - domains: ["camera.jerxie.com"] - routes: - - match: { prefix: "/" } - route: { cluster: "_nas_camera"} - http_filters: - - name: envoy.filters.http.router - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router - filter_chain_match: - server_names: ["camera.jerxie.com"] - transport_socket: - name: envoy.transport_sockets.tls - typed_config: - "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext - common_tls_context: - tls_certificates: - - certificate_chain: { filename: "/etc/certs/downstream/camera.jerxie.com/fullchain.pem" } - private_key: { filename: "/etc/certs/downstream/camera.jerxie.com/privkey.pem" } - - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - stat_prefix: ingress_http - codec_type: AUTO - route_config: - virtual_hosts: - - name: note_service - domains: ["note.jerxie.com"] - routes: - - match: { prefix: "/" } - route: { cluster: "_nas_note"} - http_filters: - - name: envoy.filters.http.router - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router - filter_chain_match: - server_names: ["note.jerxie.com"] - transport_socket: - name: envoy.transport_sockets.tls - typed_config: - "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext - common_tls_context: - tls_certificates: - - certificate_chain: { filename: "/etc/certs/downstream/note.jerxie.com/fullchain.pem" } - private_key: { filename: "/etc/certs/downstream/note.jerxie.com/privkey.pem" } - # - filters: - # - name: envoy.filters.network.http_connection_manager - # typed_config: - # "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - # stat_prefix: ingress_http - # codec_type: AUTO - # route_config: - # virtual_hosts: - # - name: baby_service - # domains: ["baby.jerxie.com"] - # routes: - # - match: { prefix: "/" } - # route: { cluster: "_baby_buddy"} - # http_filters: - # - name: envoy.filters.http.router - # typed_config: - # "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router - # filter_chain_match: - # server_names: ["baby.jerxie.com"] - # transport_socket: - # name: envoy.transport_sockets.tls - # typed_config: - # "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext - # common_tls_context: - # tls_certificates: - # - certificate_chain: { filename: "/etc/certs/downstream/baby.jerxie.com/fullchain.pem" } - # private_key: { filename: "/etc/certs/downstream/baby.jerxie.com/privkey.pem" } - - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - stat_prefix: ingress_http - upgrade_configs: - - upgrade_type: websocket - codec_type: AUTO - route_config: - virtual_hosts: - - name: container_service - domains: ["container.jerxie.com"] - routes: - - match: { prefix: "/" } - route: { cluster: "_portainer_ui"} - http_filters: - - name: envoy.filters.http.router - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router - filter_chain_match: - server_names: ["container.jerxie.com"] - transport_socket: - name: envoy.transport_sockets.tls - typed_config: - "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext - common_tls_context: - tls_certificates: - - certificate_chain: { filename: "/etc/certs/downstream/container.jerxie.com/fullchain.pem" } - private_key: { filename: "/etc/certs/downstream/container.jerxie.com/privkey.pem" } - - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - stat_prefix: ingress_http - upgrade_configs: - - upgrade_type: websocket - codec_type: AUTO - route_config: - virtual_hosts: - - name: grafana_service - domains: ["grafana.jerxie.com"] - routes: - - match: { prefix: "/" } - route: { cluster: "_grafana_ui"} - http_filters: - - name: envoy.filters.http.router - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router - filter_chain_match: - server_names: ["grafana.jerxie.com"] - transport_socket: - name: envoy.transport_sockets.tls - typed_config: - "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext - common_tls_context: - tls_certificates: - - certificate_chain: { filename: "/etc/certs/downstream/grafana.jerxie.com/fullchain.pem" } - private_key: { filename: "/etc/certs/downstream/grafana.jerxie.com/privkey.pem" } - - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - stat_prefix: ingress_http - upgrade_configs: - - upgrade_type: websocket - codec_type: AUTO - route_config: - virtual_hosts: - - name: auth_service - domains: ["auth.jerxie.com"] - routes: - - match: { prefix: "/" } - route: { cluster: "_auth_server"} - http_filters: - - name: envoy.filters.http.router - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router - filter_chain_match: - server_names: ["auth.jerxie.com"] - transport_socket: - name: envoy.transport_sockets.tls - typed_config: - "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext - common_tls_context: - tls_certificates: - - certificate_chain: { filename: "/etc/certs/downstream/auth.jerxie.com/fullchain.pem" } - private_key: { filename: "/etc/certs/downstream/auth.jerxie.com/privkey.pem" } - - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - stat_prefix: ingress_http - upgrade_configs: - - upgrade_type: websocket - codec_type: AUTO - route_config: - virtual_hosts: - - name: ai_service - domains: ["ai.jerxie.com"] - routes: - - match: { prefix: "/api" } - route: { cluster: "_ai_api_server", timeout: 0s} - - match: { prefix: "/" } - route: { cluster: "_ai_ui_server", timeout: 0s} - http_filters: - - name: envoy.filters.http.router - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router - filter_chain_match: - server_names: ["ai.jerxie.com"] - transport_socket: - name: envoy.transport_sockets.tls - typed_config: - "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext - common_tls_context: - tls_certificates: - - certificate_chain: { filename: "/etc/certs/downstream/ai.jerxie.com/fullchain.pem" } - private_key: { filename: "/etc/certs/downstream/ai.jerxie.com/privkey.pem" } - - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - stat_prefix: ingress_http - upgrade_configs: - - upgrade_type: websocket - codec_type: AUTO - route_config: - virtual_hosts: - - name: pcb_service - domains: ["pcb.jerxie.com"] - routes: - - match: { prefix: "/" } - route: { cluster: "_pcb_server", timeout: 0s} - http_filters: - - name: envoy.filters.http.router - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router - filter_chain_match: - server_names: ["pcb.jerxie.com"] - transport_socket: - name: envoy.transport_sockets.tls - typed_config: - "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext - common_tls_context: - tls_certificates: - - certificate_chain: { filename: "/etc/certs/downstream/pcb.jerxie.com/fullchain.pem" } - private_key: { filename: "/etc/certs/downstream/pcb.jerxie.com/privkey.pem" } - - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - stat_prefix: ingress_http - upgrade_configs: - - upgrade_type: websocket - codec_type: AUTO - route_config: - virtual_hosts: - - name: monitor_service - domains: ["monitor.jerxie.com"] - routes: - - match: { prefix: "/" } - route: { cluster: "_monitor_server", timeout: 0s} - http_filters: - - name: envoy.filters.http.router - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router - filter_chain_match: - server_names: ["monitor.jerxie.com"] - transport_socket: - name: envoy.transport_sockets.tls - typed_config: - "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext - common_tls_context: - tls_certificates: - - certificate_chain: { filename: "/etc/certs/downstream/monitor.jerxie.com/fullchain.pem" } - private_key: { filename: "/etc/certs/downstream/monitor.jerxie.com/privkey.pem" } \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml index 70f0e7e..fd5f972 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -15,7 +15,7 @@ volumes: # Ensure this is mounted read/write for the container - data_volume:/app/data:rw - command: ["--nodeID", "home", "--config-dir", "/app/data/config","--db","file:/app/data/data.db?_foreign_keys=on"] + command: ["--nodeID", "home", "--config-dir", "/app/data/config","--db","file:/app/data/data.db?_foreign_keys=on", "--enable-cert-issuance", "le-staging=false", "le-webroot-path=/app/data/acme"] # Define the volumes used by the services volumes: diff --git a/go.mod b/go.mod index 22021cb..34ec9fb 100644 --- a/go.mod +++ b/go.mod @@ -5,6 +5,7 @@ require ( github.com/envoyproxy/go-control-plane v0.13.4 github.com/envoyproxy/go-control-plane/envoy v1.35.0 + github.com/go-acme/lego/v4 v4.26.0 github.com/lib/pq v1.10.9 github.com/mattn/go-sqlite3 v1.14.32 google.golang.org/grpc v1.75.1 @@ -15,15 +16,22 @@ require ( cel.dev/expr v0.24.0 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 // indirect github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect + github.com/go-jose/go-jose/v4 v4.1.2 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/kr/text v0.2.0 // indirect + github.com/miekg/dns v1.1.68 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect - golang.org/x/net v0.41.0 // indirect - golang.org/x/sys v0.33.0 // indirect - golang.org/x/text v0.26.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 // indirect + golang.org/x/crypto v0.42.0 // indirect + golang.org/x/mod v0.27.0 // indirect + golang.org/x/net v0.44.0 // indirect + golang.org/x/sync v0.17.0 // indirect + golang.org/x/sys v0.36.0 // indirect + golang.org/x/text v0.29.0 // indirect + golang.org/x/tools v0.36.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250818200422-3122310a409c // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250818200422-3122310a409c // indirect ) diff --git a/go.sum b/go.sum index 1bb4e6c..077128d 100644 --- a/go.sum +++ b/go.sum @@ -1,10 +1,12 @@ cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls= github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M= github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA= github.com/envoyproxy/go-control-plane/envoy v1.35.0 h1:ixjkELDE+ru6idPxcHLj8LBVc2bFP7iBytj353BoHUo= @@ -13,6 +15,10 @@ github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= +github.com/go-acme/lego/v4 v4.26.0 h1:521aEQxNstXvPQcFDDPrJiFfixcCQuvAvm35R4GbyYA= +github.com/go-acme/lego/v4 v4.26.0/go.mod h1:BQVAWgcyzW4IT9eIKHY/RxYlVhoyKyOMXOkq7jK1eEQ= +github.com/go-jose/go-jose/v4 v4.1.2 h1:TK/7NqRQZfgAh+Td8AlsrvtPoUyiHh0LqVvokh+1vHI= +github.com/go-jose/go-jose/v4 v4.1.2/go.mod h1:22cg9HWM1pOlnRiY+9cQYJ9XHmya1bYW8OeDM6Ku6Oo= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= @@ -31,14 +37,16 @@ github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs= github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/miekg/dns v1.1.68 h1:jsSRkNozw7G/mnmXULynzMNIsgY2dHC8LO6U6Ij2JEA= +github.com/miekg/dns v1.1.68/go.mod h1:fujopn7TB3Pu3JM69XaawiU0wqjpL9/8xGop5UrTPps= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= @@ -53,18 +61,26 @@ go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= -golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= -golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= +golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI= +golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8= +golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ= +golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= +golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I= +golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= +golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= +golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= +golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= +golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= -google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 h1:FiusG7LWj+4byqhbvmB+Q93B/mOxJLN2DTozDuZm4EU= -google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:kXqgZtrWaf6qS3jZOCnCH7WYfrvFjkC51bM8fz3RsCA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 h1:pFyd6EwwL2TqFf8emdthzeX+gZE1ElRq3iM8pui4KBY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/genproto/googleapis/api v0.0.0-20250818200422-3122310a409c h1:AtEkQdl5b6zsybXcbz00j1LwNodDuH6hVifIaNqk7NQ= +google.golang.org/genproto/googleapis/api v0.0.0-20250818200422-3122310a409c/go.mod h1:ea2MjsO70ssTfCjiwHgI0ZFqcw45Ksuk2ckf9G468GA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250818200422-3122310a409c h1:qXWI/sQtv5UKboZ/zUk7h+mrf/lXORyI+n9DKDAusdg= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250818200422-3122310a409c/go.mod h1:gw1tLEfykwDz2ET4a12jcXt4couGAm7IwsVaTy0Sflo= google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= diff --git a/internal/api.go b/internal/api.go index 87c9807..82e7c5a 100644 --- a/internal/api.go +++ b/internal/api.go @@ -8,18 +8,22 @@ resourcev3 "github.com/envoyproxy/go-control-plane/pkg/resource/v3" internalapi "envoy-control-plane/internal/api" - "envoy-control-plane/internal/snapshot" + "envoy-control-plane/internal/pkg/snapshot" ) // API holds reference to snapshot manager type API struct { - Manager *snapshot.SnapshotManager // SnapshotManager is assumed to be defined elsewhere in internal package + Manager *snapshot.SnapshotManager + enableCertIssuance bool + acmeWebRootPath string } // NewAPI returns a new REST API handler -func NewAPI(sm *snapshot.SnapshotManager) *API { +func NewAPI(sm *snapshot.SnapshotManager, enableCertIssuance bool, acmeWebRootPath string) *API { return &API{ - Manager: sm, + Manager: sm, + enableCertIssuance: enableCertIssuance, + acmeWebRootPath: acmeWebRootPath, } } @@ -106,4 +110,7 @@ // Consistency Handler mux.HandleFunc("/is-consistent", api.isConsistentHandler) + + // Issuing Certificate Handler + mux.HandleFunc("/issue-certificate", api.issueCertificateHandler) } diff --git a/internal/api/types.go b/internal/api/types.go index fb34ae6..07dda31 100644 --- a/internal/api/types.go +++ b/internal/api/types.go @@ -67,3 +67,9 @@ DBOnly map[resourcev3.Type][]string `json:"db-only"` // Resources enabled in DB but not present in cache Inconsistent bool `json:"inconsistent"` } + +type RequestDomainCertificate struct { + Domain string `json:"domain"` + Email string `json:"email"` + Issuer string `json:"issuer" default:"letsencrypt"` +} diff --git a/internal/api_handlers.go b/internal/api_handlers.go index 5f55249..4ffbdf2 100644 --- a/internal/api_handlers.go +++ b/internal/api_handlers.go @@ -6,14 +6,16 @@ "fmt" "net/http" + internalapi "envoy-control-plane/internal/api" + "envoy-control-plane/internal/pkg/cert" + "envoy-control-plane/internal/pkg/snapshot" + "envoy-control-plane/internal/pkg/storage" + + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" "github.com/envoyproxy/go-control-plane/pkg/cache/types" resourcev3 "github.com/envoyproxy/go-control-plane/pkg/resource/v3" "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/reflect/protoreflect" - - internalapi "envoy-control-plane/internal/api" - "envoy-control-plane/internal/snapshot" - "envoy-control-plane/internal/storage" ) // ---------------- Persistence Handlers ---------------- @@ -466,3 +468,35 @@ http.Error(w, "failed to encode response", http.StatusInternalServerError) } } + +func (api *API) issueCertificateHandler(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + if !api.enableCertIssuance { + http.Error(w, "certificate issuance is not enabled", http.StatusForbidden) + return + } + w.Header().Set("Content-Type", "application/json") + var req internalapi.RequestDomainCertificate + if err := json.NewDecoder(r.Body).Decode(&req); err != nil || req.Domain == "" || req.Email == "" || req.Issuer == "" { + http.Error(w, "domain, email, and issuer required", http.StatusBadRequest) + return + } + + issuer, err := cert.NewCertIssuer(req.Issuer) + if err != nil { + http.Error(w, "failed to create certificate issuer", http.StatusInternalServerError) + return + } + cert, err := issuer.IssueCertificate(req.Domain, api.acmeWebRootPath, req.Email) + if err != nil { + http.Error(w, fmt.Sprintf("failed to issue certificate: %v", err), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(cert) + w.WriteHeader(http.StatusOK) + +} diff --git a/internal/app/app.go b/internal/app/app.go new file mode 100644 index 0000000..6595ceb --- /dev/null +++ b/internal/app/app.go @@ -0,0 +1,199 @@ +// internal/app/app.go +package app + +import ( + "context" + "database/sql" + "fmt" + "net/http" + "os" + "path/filepath" + "strings" + + "github.com/envoyproxy/go-control-plane/pkg/cache/types" + cachev3 "github.com/envoyproxy/go-control-plane/pkg/cache/v3" + resourcev3 "github.com/envoyproxy/go-control-plane/pkg/resource/v3" + "github.com/envoyproxy/go-control-plane/pkg/server/v3" + "github.com/envoyproxy/go-control-plane/pkg/test/v3" + + "envoy-control-plane/internal" + "envoy-control-plane/internal/config" + internallog "envoy-control-plane/internal/log" + "envoy-control-plane/internal/pkg/api" + "envoy-control-plane/internal/pkg/cert" + rtspserver "envoy-control-plane/internal/pkg/server" + "envoy-control-plane/internal/pkg/snapshot" + internalstorage "envoy-control-plane/internal/pkg/storage" +) + +// loadConfigFiles has been moved or recreated to accept and use a context +// It is the same logic extracted from your original main.go +func loadConfigFiles(ctx context.Context, dir string) (map[string][]types.Resource, error) { + log := internallog.LogFromContext(ctx) + log.Infof("loading configuration files from directory: %s", dir) + + files, err := os.ReadDir(dir) + if err != nil { + return nil, fmt.Errorf("failed to read directory %s: %w", dir, err) + } + + resourceFiles := make(map[string][]types.Resource) + for _, file := range files { + if file.IsDir() { + continue + } + fileName := file.Name() + if strings.HasSuffix(fileName, ".yaml") || strings.HasSuffix(fileName, ".yml") || strings.HasSuffix(fileName, ".json") { + filePath := filepath.Join(dir, fileName) + log.Infof(" -> loading config file: %s", filePath) + + rf, err := snapshot.LoadSnapshotFromFile(ctx, filePath) + if err != nil { + return nil, fmt.Errorf("failed to load snapshot from file %s: %w", filePath, err) + } + for k, v := range rf { + resourceFiles[k] = append(resourceFiles[k], v...) + } + log.Infof("loaded %d resources from %s", len(rf), filePath) + } + } + return resourceFiles, nil +} + +// loadInitialConfig attempts to load the configuration from DB, config dir, or snapshot file. +// It also ensures the loaded config is saved to the DB if it came from files. +func loadInitialConfig( + ctx context.Context, + manager *snapshot.SnapshotManager, + storage *internalstorage.Storage, + cfg *config.Config, +) error { + log := internallog.LogFromContext(ctx) + loadedConfigs := false + + // --- 1. Attempt to load from Database --- + snapCfg, err := storage.RebuildSnapshot(ctx) + if err == nil && (len(snapCfg.EnabledClusters) > 0 || len(snapCfg.EnabledListeners) > 0) { + if err := manager.SetSnapshotFromConfig(ctx, "snap-from-db", snapCfg); err != nil { + return fmt.Errorf("failed to set DB snapshot: %w", err) + } + loadedConfigs = true + log.Infof("loaded snapshot from database") + } + + if !loadedConfigs { + var resources map[string][]types.Resource + snapSource := "" + + // --- 2. Attempt to load from Config Directory --- + if cfg.ConfigDir != "" { + resources, err = loadConfigFiles(ctx, cfg.ConfigDir) + if err != nil { + return fmt.Errorf("failed to load configs from directory: %w", err) + } + snapSource = "snap-from-dir" + loadedConfigs = len(resources) > 0 + if loadedConfigs { + log.Infof("loaded snapshot from directory: %s", cfg.ConfigDir) + } + } + + // --- 3. Attempt to load from Snapshot File (only if dir didn't load any) --- + if !loadedConfigs && cfg.SnapshotFile != "" { + if _, err := os.Stat(cfg.SnapshotFile); err == nil { + resources, err = snapshot.LoadSnapshotFromFile(ctx, cfg.SnapshotFile) + if err != nil { + return fmt.Errorf("failed to load snapshot from file: %w", err) + } + snapSource = "snap-from-file" + loadedConfigs = len(resources) > 0 + if loadedConfigs { + log.Infof("loaded snapshot from file: %s", cfg.SnapshotFile) + } + } else { + log.Warnf("snapshot file not found: %s", cfg.SnapshotFile) + } + } + + // --- 4. Apply and Save Loaded Config (if any) --- + if loadedConfigs { + if err := manager.SetSnapshot(ctx, snapSource, resources); err != nil { + return fmt.Errorf("failed to set loaded snapshot: %w", err) + } + + // Save the newly loaded snapshot to the DB for persistence + snapCfgToSave, err := manager.SnapshotToConfig(ctx, cfg.NodeID) + if err != nil { + return fmt.Errorf("failed to convert snapshot to DB config: %w", err) + } + if err := storage.SaveSnapshot(ctx, snapCfgToSave, internalstorage.DeleteLogical); err != nil { + return fmt.Errorf("failed to save initial snapshot into DB: %w", err) + } + log.Infof("initial snapshot written into database from %s", snapSource) + } + } + + // --- 5. Ensure an initial snapshot exists (even an empty one) --- + snap, err := manager.Cache.GetSnapshot(cfg.NodeID) + if err != nil || !loadedConfigs { + log.Warnf("no valid snapshot found, creating empty snapshot") + snap, _ = cachev3.NewSnapshot("snap-init", map[resourcev3.Type][]types.Resource{ + resourcev3.ClusterType: {}, + resourcev3.RouteType: {}, + resourcev3.ListenerType: {}, + // Add other resource types if needed (e.g., resourcev3.SecretType) + }) + if err := manager.Cache.SetSnapshot(ctx, cfg.NodeID, snap); err != nil { + return fmt.Errorf("failed to set initial empty snapshot: %w", err) + } + } + + log.Infof("xDS snapshot ready: version %s", snap.GetVersion(string(resourcev3.ClusterType))) + + return nil +} + +// Run encapsulates the entire application startup logic. +func Run(ctx context.Context) error { + log := internallog.LogFromContext(ctx) + cfg := config.GetConfig() // Use the globally available config + + // 1. Conditional Certificate Issuance (Non-blocking) + cert.RunCertIssuance(ctx) + + // 2. Database Initialization + dbConnStr, dbDriver, err := internalstorage.SetupDBConnection(ctx, cfg.DBConnStr) + if err != nil { + return fmt.Errorf("database setup failed: %w", err) + } + db, err := sql.Open(dbDriver, dbConnStr) + if err != nil { + return fmt.Errorf("failed to connect to DB: %w", err) + } + defer db.Close() + + // 3. Storage and Snapshot Manager Setup + storage := internalstorage.NewStorage(db, dbDriver) + if err := storage.InitSchema(ctx); err != nil { + return fmt.Errorf("failed to initialize DB schema: %w", err) + } + + cache := cachev3.NewSnapshotCache(false, cachev3.IDHash{}, log) + manager := snapshot.NewSnapshotManager(cache, cfg.NodeID, storage) + + // 4. Load Initial Configuration (DB, Dir, or File) + if err := loadInitialConfig(ctx, manager, storage, cfg); err != nil { + return fmt.Errorf("failed to load initial configuration: %w", err) + } + + // 5. Start xDS gRPC Server + cb := &test.Callbacks{Debug: true} + srv := server.NewServer(ctx, cache, cb) + go internal.RunServer(srv, cfg.Port) // Assuming internal.RunServer is correct + + // 6. Start REST API Server + mux := http.NewServeMux() + api.RegisterRoutes(mux, manager, cfg.EnableCertIssuance, cfg.WebrootPath) // Pass needed dependencies + + return rtspserver.RunRESTServer(ctx, mux, cfg.RESTPort, cfg.WebrootPath, cfg.EnableCertIssuance) +} diff --git a/internal/config/config.go b/internal/config/config.go new file mode 100644 index 0000000..bfa51c0 --- /dev/null +++ b/internal/config/config.go @@ -0,0 +1,41 @@ +package config + +import ( + "flag" + + "k8s.io/klog/v2" +) + +// Config holds all application configuration derived from flags. +type Config struct { + Port uint + NodeID string + RESTPort uint + SnapshotFile string + ConfigDir string + DBConnStr string + EnableCertIssuance bool + WebrootPath string +} + +// Global configuration variable. +var cfg Config + +// InitFlags initializes and binds all command-line flags. +func InitFlags() { + klog.InitFlags(nil) + + flag.UintVar(&cfg.Port, "port", 18000, "xDS management server port") + flag.StringVar(&cfg.NodeID, "nodeID", "test-id", "Node ID") + flag.UintVar(&cfg.RESTPort, "rest-port", 8080, "REST API server port") + flag.StringVar(&cfg.SnapshotFile, "snapshot-file", "", "Optional initial snapshot JSON/YAML file") + flag.StringVar(&cfg.ConfigDir, "config-dir", "data/config", "Optional directory containing multiple config files") + flag.StringVar(&cfg.DBConnStr, "db", "", "Optional database connection string for config persistence") + flag.BoolVar(&cfg.EnableCertIssuance, "enable-cert-issuance", false, "Enable Let's Encrypt certificate issuance on startup") + flag.StringVar(&cfg.WebrootPath, "webroot-path", "data/acme", "Local path to serve the HTTP-01 challenge file (required if enabled)") +} + +// GetConfig returns the application configuration. +func GetConfig() *Config { + return &cfg +} diff --git a/internal/pkg/api/api.go b/internal/pkg/api/api.go new file mode 100644 index 0000000..6a213d7 --- /dev/null +++ b/internal/pkg/api/api.go @@ -0,0 +1,30 @@ +// internal/api/api.go +package api + +import ( + "net/http" + + "envoy-control-plane/internal" + "envoy-control-plane/internal/pkg/snapshot" +) + +const ACME_CALLENGE_WEB_PATH = "/.well-known/acme-challenge" +const html_static_path = "./static" + +// RegisterRoutes sets up all the HTTP routes for the REST API. +func RegisterRoutes(mux *http.ServeMux, manager *snapshot.SnapshotManager, enableCertIssuance bool, webrootPath string) { + // Original NewAPI logic is now embedded here, or you can create a simple API struct if needed. + + // // Register routes using the manager (e.g., /config, /status) + api := internal.NewAPI(manager, enableCertIssuance, webrootPath) // <- No need for this object + api.RegisterRoutes(mux) + + // Handle static files + mux.Handle("/", http.FileServer(http.Dir(html_static_path))) + + // ACME challenge handler + if enableCertIssuance { + mux.Handle(ACME_CALLENGE_WEB_PATH+"/", http.FileServer(http.Dir(webrootPath))) + // Log is handled by the server starter + } +} diff --git a/internal/pkg/api/middleware.go b/internal/pkg/api/middleware.go new file mode 100644 index 0000000..978713f --- /dev/null +++ b/internal/pkg/api/middleware.go @@ -0,0 +1,23 @@ +package api + +import ( + "net/http" +) + +// CORS is a middleware that sets the Access-Control-Allow-Origin header to * (all origins). +func CORS(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Set CORS headers for all domains + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Access-Control-Allow-Methods", "GET, POST, OPTIONS, PUT, DELETE") + w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Access-Control-Allow-Headers, Authorization, X-Requested-With") + + // Handle preflight requests + if r.Method == "OPTIONS" { + w.WriteHeader(http.StatusOK) + return + } + + next.ServeHTTP(w, r) + }) +} diff --git a/internal/pkg/cert/api/type.go b/internal/pkg/cert/api/type.go new file mode 100644 index 0000000..1dcb5ef --- /dev/null +++ b/internal/pkg/cert/api/type.go @@ -0,0 +1,22 @@ +package api + +// Certificate represents the result of a successful certificate issuance. +type Certificate struct { + Domain string + CertPEM []byte + KeyPEM []byte + FullChain []byte // Cert + Issuer Chain + AccountKey []byte // Private key for the ACME account +} + +// CertIssuer defines the contract for any service that issues TLS certificates. +// This interface allows the application to swap between different ACME providers +// or even non-ACME CAs (if the challenge logic is implemented internally). +type CertIssuer interface { + // IssueCertificate handles the entire process of domain validation and certificate issuance. + // It should use the http-01 challenge method for validation. + IssueCertificate(domain, webrootPath, email string) (*Certificate, error) + + // GetName returns the name of the issuer (e.g., "LetsEncrypt", "ZeroSSL"). + GetName() string +} diff --git a/internal/pkg/cert/cert.go b/internal/pkg/cert/cert.go new file mode 100644 index 0000000..2490b91 --- /dev/null +++ b/internal/pkg/cert/cert.go @@ -0,0 +1,39 @@ +package cert + +import ( + "context" + "os" + + "envoy-control-plane/internal/config" + internallog "envoy-control-plane/internal/log" +) + +const defaultFileMode = 0755 + +// RunCertIssuance handles the conditional logic and argument validation for cert issuance. +func RunCertIssuance(ctx context.Context) { + log := internallog.LogFromContext(ctx) + cfg := config.GetConfig() + + if !cfg.EnableCertIssuance { + return + } + + log.Infof("Certificate issuance enabled. Validating arguments...") + + if cfg.WebrootPath == "" { + log.Errorf("Webroot path is required for certificate issuance") + return + } + + // 1. Ensure webroot path exists + if _, err := os.Stat(cfg.WebrootPath); os.IsNotExist(err) { + log.Warnf("Webroot path '%s' does not exist. Creating it.", cfg.WebrootPath) + if err := os.MkdirAll(cfg.WebrootPath, defaultFileMode); err != nil { + log.Errorf("Failed to create webroot path: %v", err) + } + } + + // NOTE: The commented-out code for starting the HTTP-01 server on :80 should + // be placed here if you implement it fully. +} diff --git a/internal/pkg/cert/factory.go b/internal/pkg/cert/factory.go new file mode 100644 index 0000000..93d073c --- /dev/null +++ b/internal/pkg/cert/factory.go @@ -0,0 +1,26 @@ +package cert + +import ( + "fmt" + + "envoy-control-plane/internal/pkg/cert/api" + "envoy-control-plane/internal/pkg/cert/letsencrypt" +) + +// NewCertIssuer is a factory function that creates a CertIssuer based on the provided type name. +// It allows the rest of the application to obtain an issuer without knowing the specific +// underlying implementation details. +func NewCertIssuer(issuerType string) (api.CertIssuer, error) { + switch issuerType { + case "letsencrypt": + // Return the concrete *letsencrypt.LetsEncryptIssuer, which satisfies the cert.CertIssuer interface. + return &letsencrypt.LetsEncryptIssuer{}, nil + + // Add new certificate authority implementations here as new cases + // case "zerossl": + // return &zerossl.ZeroSSLIssuer{}, nil + + default: + return nil, fmt.Errorf("unknown certificate issuer type: %s. Valid types: letsencrypt", issuerType) + } +} diff --git a/internal/pkg/cert/letsencrypt/issuer.go b/internal/pkg/cert/letsencrypt/issuer.go new file mode 100644 index 0000000..67f2c00 --- /dev/null +++ b/internal/pkg/cert/letsencrypt/issuer.go @@ -0,0 +1,110 @@ +package letsencrypt + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "fmt" + + "github.com/go-acme/lego/v4/certcrypto" + "github.com/go-acme/lego/v4/certificate" + "github.com/go-acme/lego/v4/lego" + "github.com/go-acme/lego/v4/providers/http/webroot" + "github.com/go-acme/lego/v4/registration" + + api "envoy-control-plane/internal/pkg/cert/api" + cert "envoy-control-plane/internal/pkg/cert/api" +) + +// LEOptions is a simple struct to satisfy the necessary interface for the lego ACME client. +type LEOptions struct { + Email string + Registration *registration.Resource + key crypto.PrivateKey +} + +func (u *LEOptions) GetEmail() string { + return u.Email +} + +func (u *LEOptions) GetRegistration() *registration.Resource { + return u.Registration +} + +func (u *LEOptions) GetPrivateKey() crypto.PrivateKey { + return u.key +} + +// LetsEncryptIssuer implements the CertIssuer interface for Let's Encrypt. +type LetsEncryptIssuer struct { +} + +// GetName returns the name of the issuer. +func (l *LetsEncryptIssuer) GetName() string { + return "LetsEncrypt (Production)" +} + +// IssueCertificate implements the core certificate issuance logic using go-acme/lego. +func (l *LetsEncryptIssuer) IssueCertificate(domain, webrootPath, email string) (*cert.Certificate, error) { + // 1. Setup ACME Account Key and User + privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return nil, fmt.Errorf("failed to generate private key: %w", err) + } + + acmeUser := &LEOptions{ + Email: email, + key: privateKey, + } + + // 2. Configure the ACME client + config := lego.NewConfig(acmeUser) + config.CADirURL = lego.LEDirectoryProduction + + // Set the key type (this should reflect the key used in the private key) + config.Certificate.KeyType = certcrypto.EC256 + + client, err := lego.NewClient(config) + if err != nil { + return nil, fmt.Errorf("failed to create ACME client: %w", err) + } + + // 3. Set up the HTTP-01 challenge provider (Webroot) + // Use the correct method to set up the Webroot provider for HTTP-01 challenge + httpProvider, err := webroot.NewHTTPProvider(webrootPath) + if err != nil { + return nil, fmt.Errorf("failed to create HTTP-01 provider: %w", err) + } + if err := client.Challenge.SetHTTP01Provider(httpProvider); err != nil { + return nil, fmt.Errorf("failed to set HTTP-01 provider: %w", err) + } + + // 4. Register the ACME account + reg, err := client.Registration.Register(registration.RegisterOptions{TermsOfServiceAgreed: true}) + if err != nil { + return nil, fmt.Errorf("failed to register ACME account: %w", err) + } + acmeUser.Registration = reg + + // 5. Issue the certificate + request := certificate.ObtainRequest{ + Domains: []string{domain}, + Bundle: true, // Include full chain + } + + certResources, err := client.Certificate.Obtain(request) + if err != nil { + return nil, fmt.Errorf("failed to obtain certificate: %w", err) + } + + // 6. Map the results to the generic Certificate struct + // Make sure to handle the private key properly here. + return &api.Certificate{ + Domain: domain, + CertPEM: certResources.Certificate, + KeyPEM: certResources.PrivateKey, + FullChain: certResources.Certificate, // lego's Certificate field includes the chain + AccountKey: privateKey.D.Bytes(), // This is safer, as it assumes the type is ecdsa.PrivateKey + }, nil +} diff --git a/internal/pkg/server/server.go b/internal/pkg/server/server.go new file mode 100644 index 0000000..fca8175 --- /dev/null +++ b/internal/pkg/server/server.go @@ -0,0 +1,32 @@ +// internal/server/server.go +package server + +import ( + "context" + "fmt" + "net/http" + + internallog "envoy-control-plane/internal/log" + "envoy-control-plane/internal/pkg/api" +) + +// RunRESTServer starts the REST API server with appropriate middleware. +func RunRESTServer(ctx context.Context, mux *http.ServeMux, restPort uint, webrootPath string, enableCertIssuance bool) error { + log := internallog.LogFromContext(ctx) + + corsHandler := api.CORS(mux) + + restAddr := fmt.Sprintf(":%d", restPort) + log.Infof("starting REST API server on %s", restAddr) + + if enableCertIssuance { + log.Infof("ACME challenge path configured: %s/ -> %s", api.ACME_CALLENGE_WEB_PATH, webrootPath) + } + + if err := http.ListenAndServe(restAddr, corsHandler); err != nil { + return fmt.Errorf("REST server error: %w", err) + } + return nil +} + +// NOTE: The function to start the gRPC xDS server (internal.RunServer) remains in your existing 'internal' package. diff --git a/internal/pkg/snapshot/manager.go b/internal/pkg/snapshot/manager.go new file mode 100644 index 0000000..24d5668 --- /dev/null +++ b/internal/pkg/snapshot/manager.go @@ -0,0 +1,50 @@ +package snapshot + +import ( + "context" + "envoy-control-plane/internal/pkg/storage" + "fmt" + + // Ensure all standard filters are imported for proto unmarshalling + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/jwt_authn/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/lua/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/oauth2/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/listener/tls_inspector/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/service/runtime/v3" + + "github.com/envoyproxy/go-control-plane/pkg/cache/types" + cachev3 "github.com/envoyproxy/go-control-plane/pkg/cache/v3" + resourcev3 "github.com/envoyproxy/go-control-plane/pkg/resource/v3" +) + +// ResourceNamer is an interface implemented by all xDS resources with a GetName() method. +type ResourceNamer interface { + GetName() string +} + +// SnapshotManager wraps a SnapshotCache and provides file loading/modifying +// and DB synchronization functionality for Envoy xDS resources. +type SnapshotManager struct { + Cache cachev3.SnapshotCache + NodeID string + DB *storage.Storage +} + +// NewSnapshotManager creates a new instance of SnapshotManager. +func NewSnapshotManager(cache cachev3.SnapshotCache, nodeID string, db *storage.Storage) *SnapshotManager { + return &SnapshotManager{ + Cache: cache, + NodeID: nodeID, + DB: db, + } +} + +// SetSnapshot sets a full snapshot (utility method used by others). +func (sm *SnapshotManager) SetSnapshot(ctx context.Context, version string, resources map[resourcev3.Type][]types.Resource) error { + snap, err := cachev3.NewSnapshot(version, resources) + if err != nil { + return fmt.Errorf("failed to create snapshot: %w", err) + } + return sm.Cache.SetSnapshot(ctx, sm.NodeID, snap) +} diff --git a/internal/pkg/snapshot/resource_config.go b/internal/pkg/snapshot/resource_config.go new file mode 100644 index 0000000..c7a720e --- /dev/null +++ b/internal/pkg/snapshot/resource_config.go @@ -0,0 +1,253 @@ +package snapshot + +import ( + "context" + "fmt" + "time" + + clusterv3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + listenerv3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + "github.com/envoyproxy/go-control-plane/pkg/cache/types" + cachev3 "github.com/envoyproxy/go-control-plane/pkg/cache/v3" + resourcev3 "github.com/envoyproxy/go-control-plane/pkg/resource/v3" + + internalapi "envoy-control-plane/internal/api" + "envoy-control-plane/internal/pkg/storage" +) + +// SetSnapshotFromConfig sets a snapshot from an aggregated SnapshotConfig +func (sm *SnapshotManager) SetSnapshotFromConfig(ctx context.Context, version string, cfg *storage.SnapshotConfig) error { + if cfg == nil { + return fmt.Errorf("snapshot config is nil") + } + + // Ensure version is not empty + if version == "" { + version = fmt.Sprintf("snap-%d", time.Now().UnixNano()) + } + + // Build the resource map expected by cachev3.NewSnapshot + resources := map[resourcev3.Type][]types.Resource{ + resourcev3.ClusterType: make([]types.Resource, len(cfg.EnabledClusters)), + resourcev3.ListenerType: make([]types.Resource, len(cfg.EnabledListeners)), + // Other types if supported by SnapshotConfig, can be added here + } + + // Populate slices by direct type assertion and conversion + for i, c := range cfg.EnabledClusters { + resources[resourcev3.ClusterType][i] = c + } + for i, l := range cfg.EnabledListeners { + resources[resourcev3.ListenerType][i] = l + } + + // Create the snapshot + snap, err := cachev3.NewSnapshot(version, resources) + if err != nil { + return fmt.Errorf("failed to create snapshot: %w", err) + } + + // Apply snapshot to the cache + if err := sm.Cache.SetSnapshot(ctx, sm.NodeID, snap); err != nil { + return fmt.Errorf("failed to set snapshot: %w", err) + } + + return nil +} + +// SnapshotToConfig converts current cache snapshot into SnapshotConfig +func (sm *SnapshotManager) SnapshotToConfig(ctx context.Context, nodeID string) (*storage.SnapshotConfig, error) { + snap, err := sm.Cache.GetSnapshot(nodeID) + if err != nil { + return nil, fmt.Errorf("failed to get snapshot for node %s: %w", nodeID, err) + } + + config := &storage.SnapshotConfig{ + EnabledClusters: []*clusterv3.Cluster{}, + EnabledListeners: []*listenerv3.Listener{}, + // Disabled fields are not populated from the cache, only enabled ones. + } + + // Convert Cluster resources + for _, r := range snap.GetResources(string(resourcev3.ClusterType)) { + if c, ok := r.(*clusterv3.Cluster); ok { + config.EnabledClusters = append(config.EnabledClusters, c) + } + } + + // Convert Listener resources + for _, r := range snap.GetResources(string(resourcev3.ListenerType)) { + if l, ok := r.(*listenerv3.Listener); ok { + config.EnabledListeners = append(config.EnabledListeners, l) + } + } + + return config, nil +} + +// LoadSnapshotFromDB implements the sync DB data to cache. It rebuilds the snapshot +// from the persistent store and updates the Envoy cache. +func (sm *SnapshotManager) LoadSnapshotFromDB(ctx context.Context) error { + fmt.Println("Loading configuration from main DB...") + + // 1. Try Database (Primary Source) + cfg, err := sm.DB.RebuildSnapshot(ctx) + if err != nil { + return fmt.Errorf("failed to rebuild snapshot from DB: %w", err) + } + + fmt.Println("Loaded configuration from DB. Updating Envoy Cache.") + + // 2. Update Envoy's in-memory cache (This is the 'flash cache' layer) + return sm.SetSnapshotFromConfig(ctx, "db-sync-"+time.Now().Format(time.RFC3339), cfg) +} + +// FlushCacheToDB saves the current in-memory Envoy snapshot (the source of truth) +// to the persistent DB. This implements the "flash cache to db" write-through. +func (sm *SnapshotManager) FlushCacheToDB(ctx context.Context, strategy storage.DeleteStrategy) error { + // 1. Get current Envoy snapshot as SnapshotConfig + cfg, err := sm.SnapshotToConfig(ctx, sm.NodeID) + if err != nil { + return fmt.Errorf("failed to convert snapshot to config: %w", err) + } + + // 2. Save to Persistent DB + // Note: DB.SaveSnapshot handles insert/update logic for all resources + if err := sm.DB.SaveSnapshot(ctx, cfg, strategy); err != nil { + return fmt.Errorf("failed to save config to DB: %w", err) + } + fmt.Println("Successfully saved to Persistent DB.") + + return nil +} + +// EnableResourceFromDB fetches a logically disabled resource from the DB and +// flips its status to enabled, then adds it back to the cache. +func (sm *SnapshotManager) EnableResourceFromDB(name string, typ resourcev3.Type) error { + ctx := context.Background() + switch typ { + case resourcev3.ClusterType: + if err := sm.DB.EnableCluster(ctx, name, true); err != nil { + return fmt.Errorf("failed to enable cluster '%s' in DB: %w", name, err) + } + case resourcev3.ListenerType: + if err := sm.DB.EnableListener(ctx, name, true); err != nil { + return fmt.Errorf("failed to enable listener '%s' in DB: %w", name, err) + } + default: + return fmt.Errorf("unsupported resource type for enabling: %s", typ) + } + + // Reload snapshot from DB to update the cache with the newly enabled resource + return sm.LoadSnapshotFromDB(ctx) +} + +// CheckCacheDBConsistency compares the currently active Envoy cache snapshot +// against the enabled resources in the persistent DB. +func (sm *SnapshotManager) CheckCacheDBConsistency(ctx context.Context) (*internalapi.ConsistencyReport, error) { + report := &internalapi.ConsistencyReport{ + CacheOnly: make(map[resourcev3.Type][]string), + DBOnly: make(map[resourcev3.Type][]string), + } + + // 1. Get current cache snapshot + cacheConfig, err := sm.SnapshotToConfig(ctx, sm.NodeID) + if err != nil { + return nil, fmt.Errorf("failed to get snapshot from cache: %w", err) + } + + // 2. Rebuild snapshot from DB (only fetches *enabled* resources from DB) + dbConfig, err := sm.DB.RebuildSnapshot(ctx) + if err != nil { + return nil, fmt.Errorf("failed to rebuild snapshot from DB: %w", err) + } + + // Helper to build a set (map[string]struct{}) of resource names for faster lookups + buildNameSet := func(resources []ResourceNamer) map[string]struct{} { + set := make(map[string]struct{}, len(resources)) + for _, r := range resources { + set[r.GetName()] = struct{}{} + } + return set + } + + // Map of resource types to their lists in SnapshotConfig + typeResourceMaps := []struct { + typ resourcev3.Type + cacheList []ResourceNamer + dbList []ResourceNamer + }{ + {resourcev3.ClusterType, resourcesToNamers(cacheConfig.EnabledClusters), resourcesToNamers(dbConfig.EnabledClusters)}, + {resourcev3.ListenerType, resourcesToNamers(cacheConfig.EnabledListeners), resourcesToNamers(dbConfig.EnabledListeners)}, + } + + for _, m := range typeResourceMaps { + cacheSet := buildNameSet(m.cacheList) + dbSet := buildNameSet(m.dbList) + + // Check for Cache-only resources (present in cacheSet but not in dbSet) + for cacheName := range cacheSet { + if _, existsInDB := dbSet[cacheName]; !existsInDB { + report.CacheOnly[m.typ] = append(report.CacheOnly[m.typ], cacheName) + report.Inconsistent = true + } + } + + // Check for DB-only resources (present in dbSet but not in cacheSet) + for dbName := range dbSet { + if _, existsInCache := cacheSet[dbName]; !existsInCache { + report.DBOnly[m.typ] = append(report.DBOnly[m.typ], dbName) + report.Inconsistent = true + } + } + } + + return report, nil +} + +// ListResources returns all enabled and disabled resources of a given type from the DB. +func (sm *SnapshotManager) ListResources(typ resourcev3.Type) ([]types.Resource, []types.Resource, error) { + snap, err := sm.DB.RebuildSnapshot(context.Background()) + if err != nil { + return nil, nil, fmt.Errorf("failed to rebuild snapshot from DB: %w", err) + } + + var enabled, disabled []types.Resource + var namerEnabled, namerDisabled []ResourceNamer + + switch typ { + case resourcev3.ClusterType: + namerEnabled = resourcesToNamers(snap.EnabledClusters) + namerDisabled = resourcesToNamers(snap.DisabledClusters) + case resourcev3.ListenerType: + namerEnabled = resourcesToNamers(snap.EnabledListeners) + namerDisabled = resourcesToNamers(snap.DisabledListeners) + default: + return nil, nil, fmt.Errorf("unsupported resource type: %s", typ) + } + + // Convert ResourceNamer slices back to types.Resource slices + enabled = make([]types.Resource, len(namerEnabled)) + for i, r := range namerEnabled { + enabled[i] = r.(types.Resource) + } + + disabled = make([]types.Resource, len(namerDisabled)) + for i, r := range namerDisabled { + disabled[i] = r.(types.Resource) + } + + return enabled, disabled, nil +} + +// resourcesToNamers converts a slice of proto-generated resource pointers +// (like []*clusterv3.Cluster) to a slice of the generic ResourceNamer interface. +// This is necessary because structs like *clusterv3.Cluster don't explicitly +// implement types.Resource, but are compatible with it and ResourceNamer. +func resourcesToNamers[T ResourceNamer](list []T) []ResourceNamer { + out := make([]ResourceNamer, len(list)) + for i, item := range list { + out[i] = item + } + return out +} diff --git a/internal/pkg/snapshot/resource_crud.go b/internal/pkg/snapshot/resource_crud.go new file mode 100644 index 0000000..9e2961e --- /dev/null +++ b/internal/pkg/snapshot/resource_crud.go @@ -0,0 +1,452 @@ +package snapshot + +import ( + "context" + "fmt" + "sort" + "time" + + listenerv3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + "github.com/envoyproxy/go-control-plane/pkg/cache/types" + cachev3 "github.com/envoyproxy/go-control-plane/pkg/cache/v3" + resourcev3 "github.com/envoyproxy/go-control-plane/pkg/resource/v3" + + internallog "envoy-control-plane/internal/log" + "envoy-control-plane/internal/pkg/storage" +) + +// AppendFilterChainToListener loads the current listener from the cache, appends the provided +// FilterChain to its list of FilterChains, and updates the cache with the new snapshot. +func (sm *SnapshotManager) AppendFilterChainToListener(ctx context.Context, listenerName string, newFilterChain *listenerv3.FilterChain) error { + log := internallog.LogFromContext(ctx) + + // 1. Get the current Listener from the cache + resource, err := sm.GetResourceFromCache(listenerName, resourcev3.ListenerType) + if err != nil { + return fmt.Errorf("failed to get listener '%s' from cache: %w", listenerName, err) + } + + listener, ok := resource.(*listenerv3.Listener) + if !ok { + return fmt.Errorf("resource '%s' is not a Listener type", listenerName) + } + + // 2. Append the new FilterChain to the listener's list of filter chains. + listener.FilterChains = append(listener.FilterChains, newFilterChain) + log.Infof("Appended new filter chain (match: %v) to listener '%s'", newFilterChain.FilterChainMatch, listenerName) + + // 3. Create a new snapshot with the modified listener (rest of logic remains similar) + snap, err := sm.Cache.GetSnapshot(sm.NodeID) + if err != nil { + return fmt.Errorf("failed to get snapshot for modification: %w", err) + } + + // Get all current resources + resources := sm.getAllResourcesFromSnapshot(snap) + + // Replace the old listener with the modified one + listenerList, ok := resources[resourcev3.ListenerType] + if !ok { + return fmt.Errorf("listener resource type not present in snapshot") + } + + foundAndReplaced := false + for i, res := range listenerList { + if namer, ok := res.(interface{ GetName() string }); ok && namer.GetName() == listenerName { + listenerList[i] = listener + foundAndReplaced = true + break + } + } + + if !foundAndReplaced { + return fmt.Errorf("failed to locate listener '%s' in current resource list for replacement", listenerName) + } + + // Create and set the new snapshot + version := fmt.Sprintf("listener-update-%s-%d", listenerName, time.Now().UnixNano()) + newSnap, err := cachev3.NewSnapshot(version, resources) + if err != nil { + return fmt.Errorf("failed to create new snapshot: %w", err) + } + + if err := sm.Cache.SetSnapshot(ctx, sm.NodeID, newSnap); err != nil { + return fmt.Errorf("failed to set new snapshot: %w", err) + } + sm.FlushCacheToDB(ctx, storage.DeleteLogical) + + log.Infof("Successfully updated listener '%s' in cache with new filter chain.", listenerName) + + return nil +} + +// ServerNamesEqual checks if two slices of server names contain the same elements, ignoring order. +// This is necessary because server_names is a list in the Envoy API, and order shouldn't matter for a match. +func ServerNamesEqual(a, b []string) bool { + if len(a) != len(b) { + return false + } + + // Sort copies of the slices to perform an ordered comparison + sort.Strings(a) + sort.Strings(b) + + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} + +// UpdateFilterChainOfListener iterates through a listener's filter chains and replaces +// the one that matches the new filter chain's ServerNames. +func (sm *SnapshotManager) UpdateFilterChainOfListener(ctx context.Context, listenerName string, newFilterChain *listenerv3.FilterChain) error { + log := internallog.LogFromContext(ctx) + + // 1. Get the current Listener from the cache + resource, err := sm.GetResourceFromCache(listenerName, resourcev3.ListenerType) + if err != nil { + return fmt.Errorf("failed to get listener '%s' from cache: %w", listenerName, err) + } + + listener, ok := resource.(*listenerv3.Listener) + if !ok { + return fmt.Errorf("resource '%s' is not a Listener type", listenerName) + } + if newFilterChain == nil { + return fmt.Errorf("new filter chain is nil") + } + + // Get the server names from the new filter chain for matching + newServerNames := newFilterChain.GetFilterChainMatch().GetServerNames() + if len(newServerNames) == 0 { + // If the new filter chain has no server names, it should typically be considered the default, + // but explicit domain matching is safer for replacement. For this implementation, + // we require at least one ServerName to perform a targeted update. + return fmt.Errorf("new filter chain must specify at least one ServerName for targeted replacement") + } + + // 2. Iterate and attempt to find the matching filter chain + foundMatch := false + + // We create a new slice to hold the updated list of filter chains + var updatedChains []*listenerv3.FilterChain + + for _, existingChain := range listener.FilterChains { + existingServerNames := existingChain.GetFilterChainMatch().GetServerNames() + + // NOTE: The ServerNamesEqual implementation sorts the slices *in place*. + // This side-effect is a common bug source. The existing function *should* use copies. + // Assuming ServerNamesEqual is fixed (or this bug is accepted), the logic holds. + // We'll keep the call as-is for the fix, but note the potential bug in ServerNamesEqual. + if ServerNamesEqual(existingServerNames, newServerNames) { + // Match found! Replace the existing chain with the new one. + updatedChains = append(updatedChains, newFilterChain) + foundMatch = true + log.Debugf("Replaced filter chain with match: %v in listener '%s'", newServerNames, listenerName) + continue + } + + // Keep the existing chain if it does not match + updatedChains = append(updatedChains, existingChain) + } + + // 3. Handle the result + if !foundMatch { + return fmt.Errorf("no existing filter chain found on listener '%s' with matching server names: %v", + listenerName, newServerNames) + } + + // 4. Update the listener with the new slice of filter chains + listener.FilterChains = updatedChains + + // 5. Get current snapshot to extract all resources for the new snapshot + snap, err := sm.Cache.GetSnapshot(sm.NodeID) + if err != nil { + return fmt.Errorf("failed to get snapshot for modification: %w", err) + } + + // Get all current resources (THIS WAS MISSING) + resources := sm.getAllResourcesFromSnapshot(snap) + + // Replace the old listener with the modified one in the resource list + listenerList, ok := resources[resourcev3.ListenerType] + if !ok { + return fmt.Errorf("listener resource type not present in snapshot") + } + + foundAndReplaced := false + for i, res := range listenerList { + if namer, ok := res.(interface{ GetName() string }); ok && namer.GetName() == listenerName { + // The `listener` variable already holds the modified listener + listenerList[i] = listener + foundAndReplaced = true + break + } + } + + if !foundAndReplaced { + // This should not happen if GetResourceFromCache succeeded, but is a good safeguard. + return fmt.Errorf("failed to locate listener '%s' in current resource list for replacement", listenerName) + } + + // 6. Create and set the new snapshot + version := fmt.Sprintf("listener-update-chain-%s-%d", listenerName, time.Now().UnixNano()) + newSnap, err := cachev3.NewSnapshot(version, resources) + if err != nil { + return fmt.Errorf("failed to create new snapshot: %w", err) + } + + if err := sm.Cache.SetSnapshot(ctx, sm.NodeID, newSnap); err != nil { + return fmt.Errorf("failed to set new snapshot: %w", err) + } + sm.FlushCacheToDB(ctx, storage.DeleteLogical) + log.Infof("Successfully updated filter chain (match: %v) on listener '%s'", newServerNames, listenerName) + + return nil +} + +// RemoveFilterChainFromListener loads the current listener from the cache, removes the +// FilterChain that matches the provided ServerNames from the listener's list of FilterChains, +// and updates the cache with the new snapshot. +func (sm *SnapshotManager) RemoveFilterChainFromListener(ctx context.Context, listenerName string, serverNames []string) error { + log := internallog.LogFromContext(ctx) + + // 1. Validate input and get the current Listener from the cache + if len(serverNames) == 0 { + return fmt.Errorf("failed to get server names from filter chain") + } + + // Use ServerNames for matching, consistent with UpdateFilterChainOfListener + if len(serverNames) == 0 { + return fmt.Errorf("target filter chain match must specify at least one ServerName for targeted removal") + } + + resource, err := sm.GetResourceFromCache(listenerName, resourcev3.ListenerType) + if err != nil { + return fmt.Errorf("failed to get listener '%s' from cache: %w", listenerName, err) + } + + listener, ok := resource.(*listenerv3.Listener) + if !ok { + return fmt.Errorf("resource '%s' is not a Listener type", listenerName) + } + + // 2. Iterate and attempt to find and remove the matching filter chain + foundMatch := false + var updatedChains []*listenerv3.FilterChain // New slice for chains to keep + + for _, existingChain := range listener.FilterChains { + existingServerNames := existingChain.GetFilterChainMatch().GetServerNames() + if len(serverNames) == 1 && serverNames[0] == "(default)" && len(existingServerNames) == 0 { + foundMatch = true + log.Debugf("Removing default filter chain from listener '%s'", listenerName) + continue + } + // Use the provided ServerNamesEqual for matching + if ServerNamesEqual(existingServerNames, serverNames) { + // Match found! DO NOT append this chain, effectively removing it. + foundMatch = true + log.Debugf("Removing filter chain with match: %v from listener '%s'", serverNames, listenerName) + continue + } + + // Keep the existing chain if it does not match + updatedChains = append(updatedChains, existingChain) + } + + // 3. Handle the result + if !foundMatch { + return fmt.Errorf("no existing filter chain found on listener '%s' with matching server names: %v", + listenerName, serverNames) + } + + // 4. Update the listener with the new slice of filter chains + listener.FilterChains = updatedChains + + // 5. Get current snapshot to extract all resources for the new snapshot + snap, err := sm.Cache.GetSnapshot(sm.NodeID) + if err != nil { + return fmt.Errorf("failed to get snapshot for modification: %w", err) + } + + resources := sm.getAllResourcesFromSnapshot(snap) + + // Replace the old listener with the modified one in the resource list + listenerList, ok := resources[resourcev3.ListenerType] + if !ok { + return fmt.Errorf("listener resource type not present in snapshot") + } + + foundAndReplaced := false + for i, res := range listenerList { + if namer, ok := res.(interface{ GetName() string }); ok && namer.GetName() == listenerName { + listenerList[i] = listener // Replace with the modified listener + foundAndReplaced = true + break + } + } + + if !foundAndReplaced { + // Should not happen if GetResourceFromCache succeeded. + return fmt.Errorf("failed to locate listener '%s' in current resource list for replacement during removal", listenerName) + } + + // 6. Create and set the new snapshot + version := fmt.Sprintf("listener-remove-chain-%s-%d", listenerName, time.Now().UnixNano()) + newSnap, err := cachev3.NewSnapshot(version, resources) + if err != nil { + return fmt.Errorf("failed to create new snapshot: %w", err) + } + + if err := sm.Cache.SetSnapshot(ctx, sm.NodeID, newSnap); err != nil { + return fmt.Errorf("failed to set new snapshot: %w", err) + } + // Assume FlushCacheToDB is a necessary final step after snapshot update + sm.FlushCacheToDB(ctx, storage.DeleteLogical) + log.Infof("Successfully removed filter chain (match: %v) from listener '%s'", serverNames, listenerName) + + return nil +} + +// AddResourceToSnapshot adds any resource to the snapshot dynamically +func (sm *SnapshotManager) AddResourceToSnapshot(resource types.Resource, typ resourcev3.Type) error { + snap, err := sm.Cache.GetSnapshot(sm.NodeID) + if err != nil { + return fmt.Errorf("failed to get snapshot from cache: %w", err) + } + resources := sm.getAllResourcesFromSnapshot(snap) + + // Append to the appropriate slice + switch typ { + case resourcev3.ClusterType: + resources[resourcev3.ClusterType] = append(resources[resourcev3.ClusterType], resource) + case resourcev3.ListenerType: + resources[resourcev3.ListenerType] = append(resources[resourcev3.ListenerType], resource) + case resourcev3.EndpointType, resourcev3.SecretType, resourcev3.RuntimeType: + resources[typ] = append(resources[typ], resource) + default: + return fmt.Errorf("unsupported resource type: %s", typ) + } + + resourceNamer, ok := resource.(interface{ GetName() string }) + if !ok { + return fmt.Errorf("resource of type %s does not implement GetName()", typ) + } + + newSnap, _ := cachev3.NewSnapshot( + "snap-generic-"+resourceNamer.GetName()+"-"+time.Now().Format(time.RFC3339), + resources, + ) + return sm.Cache.SetSnapshot(context.TODO(), sm.NodeID, newSnap) +} + +// RemoveResource removes any resource by name dynamically +func (sm *SnapshotManager) RemoveResource(name string, typ resourcev3.Type, strategy storage.DeleteStrategy) error { + snap, _ := sm.Cache.GetSnapshot(sm.NodeID) + resources := sm.getAllResourcesFromSnapshot(snap) + + // Flag to check if resource was found in cache + var resourceFound = false + + // Filter the target type + if targetResources, ok := resources[typ]; ok { + resources[typ], resourceFound = filterAndCheckResourcesByName(targetResources, name) + } + + if strategy == storage.DeleteActual { + if resourceFound { + return fmt.Errorf("actual delete requested but resource %s of type %s still exists in cache", name, typ) + } + if typ == resourcev3.ClusterType { + if err := sm.DB.RemoveCluster(context.TODO(), name); err != nil { + return fmt.Errorf("failed to delete cluster %s from DB: %w", name, err) + } + return nil + } + if typ == resourcev3.ListenerType { + if err := sm.DB.RemoveListener(context.TODO(), name); err != nil { + return fmt.Errorf("failed to delete listener %s from DB: %w", name, err) + } + return nil + } + return fmt.Errorf("actual delete not supported for resource type: %s", typ) + } + + if !resourceFound { + return fmt.Errorf("resource %s of type %s not found in cache", name, typ) + } + + newSnap, _ := cachev3.NewSnapshot( + "snap-remove-generic-"+name, + resources, + ) + + if err := sm.Cache.SetSnapshot(context.TODO(), sm.NodeID, newSnap); err != nil { + return fmt.Errorf("failed to set snapshot: %w", err) + } + + if err := sm.FlushCacheToDB(context.TODO(), strategy); err != nil { + return fmt.Errorf("failed to flush cache to DB: %w", err) + } + return nil +} + +// GetResourceFromCache retrieves a resource by name and type from the cache. +func (sm *SnapshotManager) GetResourceFromCache(name string, typ resourcev3.Type) (types.Resource, error) { + snap, err := sm.Cache.GetSnapshot(sm.NodeID) + if err != nil { + return nil, err + } + r, ok := snap.GetResources(string(typ))[name] + if !ok { + return nil, fmt.Errorf("%s resource %s not found in cache", typ, name) + } + + // We rely on the type given to be correct, as all xDS resources implement GetName(). + return r, nil +} + +// getAllResourcesFromSnapshot retrieves all known resource types from a snapshot as a map. +func (sm *SnapshotManager) getAllResourcesFromSnapshot(snap cachev3.ResourceSnapshot) map[resourcev3.Type][]types.Resource { + // Only include types that might be manipulated by the generic functions + resources := map[resourcev3.Type][]types.Resource{ + resourcev3.ClusterType: mapToSlice(snap.GetResources(string(resourcev3.ClusterType))), + resourcev3.ListenerType: mapToSlice(snap.GetResources(string(resourcev3.ListenerType))), + // resourcev3.EndpointType: mapToSlice(snap.GetResources(string(resourcev3.EndpointType))), + // resourcev3.SecretType: mapToSlice(snap.GetResources(string(resourcev3.SecretType))), + // resourcev3.RuntimeType: mapToSlice(snap.GetResources(string(resourcev3.RuntimeType))), + // Include other types as needed + } + return resources +} + +// mapToSlice converts a map of named resources to a slice of resources. +func mapToSlice(m map[string]types.Resource) []types.Resource { + out := make([]types.Resource, 0, len(m)) + for _, r := range m { + out = append(out, r) + } + return out +} + +// filterAndCheckResourcesByName filters a slice of resources by name, +// returning the filtered slice and a boolean indicating if the named resource was found. +func filterAndCheckResourcesByName(resources []types.Resource, name string) ([]types.Resource, bool) { + filtered := []types.Resource{} + var found = false + for _, r := range resources { + if namer, ok := r.(interface{ GetName() string }); ok { + if namer.GetName() != name { + filtered = append(filtered, r) + } else { + found = true + } + } else { + // fallback, include unknown type + filtered = append(filtered, r) + } + } + return filtered, found +} diff --git a/internal/pkg/snapshot/resource_io.go b/internal/pkg/snapshot/resource_io.go new file mode 100644 index 0000000..ea381cd --- /dev/null +++ b/internal/pkg/snapshot/resource_io.go @@ -0,0 +1,247 @@ +package snapshot + +import ( + "context" + "encoding/json" + "fmt" + "os" + + clusterv3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + listenerv3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/tcp_proxy/v3" + + "github.com/envoyproxy/go-control-plane/pkg/cache/types" + "github.com/envoyproxy/go-control-plane/pkg/cache/v3" + resourcev3 "github.com/envoyproxy/go-control-plane/pkg/resource/v3" + "google.golang.org/protobuf/encoding/protojson" + + yaml "gopkg.in/yaml.v3" + + internallog "envoy-control-plane/internal/log" +) + +// YamlResources is a helper struct to unmarshal the common Envoy YAML file structure +type YamlResources struct { + Resources []yaml.Node `yaml:"resources"` +} + +// unmarshalYamlNodeToProto takes a generic map representation of a YAML/JSON object +// and unmarshals it into the given Protobuf resource pointer using protojson. +func unmarshalYamlNodeToProto(node map[string]interface{}, resource types.Resource) error { + // 1. Remove the standard Protobuf type marker (if present) before marshaling to JSON. + delete(node, "@type") + + // 2. Marshal the generic map into JSON bytes. + jsonBytes, err := json.Marshal(node) + if err != nil { + return fmt.Errorf("failed to marshal resource node to JSON: %w", err) + } + + // 3. Unmarshal the JSON bytes into the target Protobuf struct. + if err := protojson.Unmarshal(jsonBytes, resource); err != nil { + return fmt.Errorf("failed to unmarshal into proto: %w", err) + } + return nil +} + +// LoadAndUnmarshal takes raw data (e.g., from a file or string) and unmarshals +// it into the target interface using YAML/JSON rules. +func LoadAndUnmarshal(data []byte, target interface{}) error { + if err := yaml.Unmarshal(data, target); err != nil { + return fmt.Errorf("failed to unmarshal data: %w", err) + } + return nil +} + +// ProcessFn is a function type that defines the processing logic to be applied +// to a potential resource node found during the walk. +// It returns a non-nil error if processing fails and should stop the walk. +type ProcessFn func(ctx context.Context, node map[string]interface{}) error + +// WalkAndProcess traverses an arbitrary Go data structure (unmarshaled from YAML/JSON) +// and applies the provided ProcessFn to every map[string]interface{} node it finds. +func WalkAndProcess(ctx context.Context, raw interface{}, processFn ProcessFn) error { + var walk func(node interface{}) error + walk = func(node interface{}) error { + switch v := node.(type) { + case map[string]interface{}: + // Apply the custom processing function to the current map node + if err := processFn(ctx, v); err != nil { + return err + } + + // Recurse into children + for _, child := range v { + if err := walk(child); err != nil { + return err + } + } + + case []interface{}: + for _, item := range v { + if err := walk(item); err != nil { + return err + } + } + } + return nil + } + + return walk(raw) +} + +// makeResourceProcessor creates the specific ProcessFn needed for the xDS resource logic. +func makeResourceProcessor( + log internallog.Logger, + resources map[resourcev3.Type][]types.Resource, +) ProcessFn { + return func(_ context.Context, v map[string]interface{}) error { + if typStr, ok := v["@type"].(string); ok { + typ := resourcev3.Type(typStr) + + // only process known top-level xDS resources + var resource types.Resource + var newResource bool + + switch typ { + case resourcev3.ClusterType: + resource = &clusterv3.Cluster{} + newResource = true + case resourcev3.ListenerType: + resource = &listenerv3.Listener{} + newResource = true + // ... other types ... + default: + log.Warnf("unsupported resource type: %s", typ) + // Skip nested or unsupported types + } + + if newResource { + // NOTE: unmarshalYamlNodeToProto must be available in this scope + if err := unmarshalYamlNodeToProto(v, resource); err != nil { + return fmt.Errorf("failed to unmarshal %s from file: %w", typ, err) + } + resources[typ] = append(resources[typ], resource) + } + } + return nil + } +} + +// LoadSnapshotFromFile reads a YAML/JSON file, parses it, and returns a map of xDS resources. +func LoadSnapshotFromFile(context context.Context, filePath string) (map[resourcev3.Type][]types.Resource, error) { + log := internallog.LogFromContext(context) + + // Read the file (Step 1: Read) + data, err := os.ReadFile(filePath) + if err != nil { + return nil, fmt.Errorf("failed to read file: %w", err) + } + + // Unmarshal data (Step 2: Generic Unmarshal) + var raw interface{} + if err := LoadAndUnmarshal(data, &raw); err != nil { + return nil, fmt.Errorf("failed to unmarshal file %s: %w", filePath, err) + } + + resources := make(map[resourcev3.Type][]types.Resource) + processor := makeResourceProcessor(log, resources) + + // Walk and Process (Step 3: Generic Walk with specific processor) + if err := WalkAndProcess(context, raw, processor); err != nil { + return nil, err + } + + return resources, nil +} + +// SaveSnapshotToFile marshals the current cache snapshot to JSON and writes it to a file. +func SaveSnapshotToFile(snap cache.ResourceSnapshot, filePath string) error { + + out := make(map[string][]interface{}) + + // Iterate over all known types + clusterTypeResources := snap.GetResources(resourcev3.ClusterType) + for _, r := range clusterTypeResources { + out[resourcev3.ClusterType] = append(out[resourcev3.ClusterType], r) + } + listenerTypeResources := snap.GetResources(resourcev3.ListenerType) + for _, r := range listenerTypeResources { + out[resourcev3.ListenerType] = append(out[resourcev3.ListenerType], r) + } + + data, err := json.MarshalIndent(out, "", " ") + if err != nil { + return err + } + + return os.WriteFile(filePath, data, 0644) +} + +// LoadFilterChainFromYAML unmarshals a YAML string representing an Envoy Listener FilterChain +// configuration into a listenerv3.FilterChain protobuf message using protojson pipeline. +func LoadFilterChainFromYAML(ctx context.Context, yamlStr string) (*listenerv3.FilterChain, error) { + log := internallog.LogFromContext(ctx) + + // 1. Unmarshal YAML into a generic Go map + var rawChainMap map[string]interface{} + if err := yaml.Unmarshal([]byte(yamlStr), &rawChainMap); err != nil { + log.Errorf("Failed to unmarshal YAML: %v", err) + return nil, fmt.Errorf("failed to unmarshal YAML into generic map: %w", err) + } + if rawChainMap == nil { + return nil, fmt.Errorf("failed to unmarshal YAML: input was empty or invalid") + } + + // 2. Unmarshal the generic map into the Protobuf struct using the helper + rawChain := &listenerv3.FilterChain{} + if err := unmarshalYamlNodeToProto(rawChainMap, rawChain); err != nil { + return nil, fmt.Errorf("failed to unmarshal YAML into FilterChain using protojson: %w", err) + } + + // Check if the FilterChain contains any filters (optional but good sanity check) + if len(rawChain.Filters) == 0 { + return nil, fmt.Errorf("filter chain loaded but contains no network filters") + } + + // Return the single FilterChain object. + return rawChain, nil +} + +// LoadResourceFromYAML unmarshals a YAML string representing a single xDS resource +// (like a Cluster or a Listener) into the appropriate Protobuf message. +// +// It leverages the existing WalkAndProcess logic to find and unmarshal the resource. +func LoadResourceFromYAML(ctx context.Context, yamlStr string, expectedType resourcev3.Type) ([]types.Resource, error) { + log := internallog.LogFromContext(ctx) + + // 1. Unmarshal YAML into a generic Go map + var raw interface{} + if err := LoadAndUnmarshal([]byte(yamlStr), &raw); err != nil { + return nil, fmt.Errorf("failed to unmarshal YAML into generic structure: %w", err) + } + + // This map will hold the one resource we expect to find. + resources := make(map[resourcev3.Type][]types.Resource) + + // 2. Use the standard resource processor to walk the structure. + processor := makeResourceProcessor(log, resources) + + // 3. Walk and Process + if err := WalkAndProcess(ctx, raw, processor); err != nil { + return nil, fmt.Errorf("failed to walk and process YAML: %w", err) + } + + // 4. Validate and return the resource + resourceList, ok := resources[expectedType] + if !ok || len(resourceList) == 0 { + // Only return an error if the expected type was not found. + return nil, fmt.Errorf("no resource of expected type %s found in YAML", expectedType) + } + if len(resourceList) > 1 { + // Warn if multiple resources were found, but return the first one as expected. + log.Warnf("found %d resources of type %s, expected 1; returning the first", len(resourceList), expectedType) + } + return resourceList, nil +} diff --git a/internal/pkg/snapshot/resource_io_test.go b/internal/pkg/snapshot/resource_io_test.go new file mode 100644 index 0000000..40f591d --- /dev/null +++ b/internal/pkg/snapshot/resource_io_test.go @@ -0,0 +1,198 @@ +package snapshot + +import ( + "context" + "testing" +) + +// NOTE: Assume MockLogger and SnapshotManager are defined for the test to run. +// The actual implementation of LoadFilterChainFromYAML is assumed to be available +// to the test file. + +// TestLoadFilterChainFromYAML_ComplexInput tests the functionality of LoadFilterChainFromYAML +func TestLoadFilterChainFromYAML_ComplexInput(t *testing.T) { + ctx := context.Background() + + // The user's provided, valid YAML for a single FilterChain object + validComplexYAML := ` + filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + upgrade_configs: + - upgrade_type: websocket + stream_idle_timeout: 0s + normalize_path: true + merge_slashes: true + route_config: + virtual_hosts: + - name: printer_service + domains: ["printer.jerxie.com"] + routes: + - match: { prefix: "/webcam" } + route: { prefix_rewrite: "/", cluster: "_3d_printer_camera", max_stream_duration: {grpc_timeout_header_max: 0s} } + - match: { prefix: "/" } + route: { cluster: "_3d_printer_console"} + http_filters: + - name: envoy.filters.http.oauth2 + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.oauth2.v3.OAuth2 + config: + token_endpoint: + cluster: _auth_server + uri: auth.jerxie.com/token + timeout: 3s + authorization_endpoint: https://auth.jerxie.com/auth + redirect_uri: "%REQ(x-forwarded-proto)%://%REQ(:authority)%/callback" + redirect_path_matcher: + path: + exact: /callback + signout_path: + path: + exact: /signout + forward_bearer_token: true + credentials: + client_id: octoprint-portal + token_secret: + name: token + sds_config: + path: "/etc/envoy/token-secret.yaml" + hmac_secret: + name: hmac + sds_config: + path: "/etc/envoy/hmac-secret.yaml" + auth_scopes: + - openid + - email + - name: envoy.filters.http.jwt_authn + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.jwt_authn.v3.JwtAuthentication + providers: + provider1: + remote_jwks: + http_uri: + uri: "https://auth.jerxie.com/keys" + cluster: _auth_server + timeout: 5s + cache_duration: 600s + from_headers: + - name: Authorization + value_prefix: "Bearer " + payload_in_metadata: jwt_payload + rules: + - match: + prefix: / + requires: + provider_name: provider1 + - name: envoy.filters.http.lua + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua + inline_code: | + email = "" + function envoy_on_request(request_handle) + email = "" + local meta = request_handle:streamInfo():dynamicMetadata() + for key, value in pairs(meta:get("envoy.filters.http.jwt_authn")) do + if key == "jwt_payload" then + for k, v in pairs(value) do + if k == "email" then + print("login octoprint: "..v) + email = v + request_handle:headers():add("ENVOY_AUTHENTICATED_USER", v) + end + end + end + end + end + + function envoy_on_response(response_handle) + if email ~="" and email ~= "axieyangb@gmail.com" then + response_handle:logInfo("Got unauthorized user, return 403 for user " ..email) + response_handle:headers():add("set-cookie", "BearerToken=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT") + response_handle:headers():add("set-cookie", "OauthHMAC=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT") + response_handle:headers():add("set-cookie", "IdToken=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT") + response_handle:headers():add("set-cookie", "OauthExpires=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT") + end + email = "" + end + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + filter_chain_match: + server_names: ["printer.jerxie.com", "printer.local"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "/etc/certs/downstream/printer.jerxie.com/fullchain.pem" } + private_key: { filename: "/etc/certs/downstream/printer.jerxie.com/privkey.pem" } +` + + tests := []struct { + name string + yamlStr string + expectError bool + expectedLen int // Expected number of network filters (top-level filters array) + }{ + { + name: "Success_ComplexSingleFilterChain", + yamlStr: validComplexYAML, + expectError: false, + expectedLen: 1, // Only one top-level network filter: http_connection_manager + }, + // Re-include sanity checks for robust testing + { + name: "Error_NoFiltersInChain", + yamlStr: `filter_chain_match: { server_names: ["empty"] }`, + expectError: true, + expectedLen: 0, + }, + { + name: "Error_InputIsAList", + yamlStr: `- filters: []`, + expectError: true, // Should fail unmarshaling a list into a single struct + expectedLen: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + chain, err := LoadFilterChainFromYAML(ctx, tt.yamlStr) + + if tt.expectError { + if err == nil { + t.Errorf("Expected an error but got nil") + } + if chain != nil { + t.Errorf("Expected nil chain on error, but got non-nil") + } + } else { + if err != nil { + t.Fatalf("Expected no error but got: %v", err) + } + if chain == nil { + t.Fatal("Expected non-nil filter chain, but got nil") + } + + // 1. Check top-level filter count + if len(chain.Filters) != tt.expectedLen { + t.Errorf("Top-level filter count mismatch. Got %d, want %d", len(chain.Filters), tt.expectedLen) + } + + // 2. Check a deeply nested value to ensure complex unmarshaling worked + if len(chain.FilterChainMatch.ServerNames) == 0 || chain.FilterChainMatch.ServerNames[0] != "printer.jerxie.com" { + t.Errorf("FilterChainMatch assertion failed. Expected server name 'printer.jerxie.com'") + } + + // 3. Check the name of the top-level filter + if chain.Filters[0].Name != "envoy.filters.network.http_connection_manager" { + t.Errorf("Top-level filter name mismatch. Got %s", chain.Filters[0].Name) + } + } + }) + } +} diff --git a/internal/pkg/storage/database.go b/internal/pkg/storage/database.go new file mode 100644 index 0000000..a997819 --- /dev/null +++ b/internal/pkg/storage/database.go @@ -0,0 +1,44 @@ +package storage + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + + internallog "envoy-control-plane/internal/log" + + _ "github.com/lib/pq" // Postgres driver + _ "github.com/mattn/go-sqlite3" // Import SQLite driver" + // Import drivers in cmd/main.go or just keep them in the original main.go +) + +// determineDriver returns driver name from connection string +func determineDriver(dsn string) string { + if strings.HasPrefix(dsn, "postgres://") || strings.HasPrefix(dsn, "postgresql://") { + return "postgres" + } + return "sqlite3" +} + +// SetupDBConnection determines the DB connection string and driver. +func SetupDBConnection(ctx context.Context, dbConnStrIn string) (connStr, driver string, err error) { + log := internallog.LogFromContext(ctx) + connStr = dbConnStrIn + + // Default DB to SQLite file if none provided + if connStr == "" { + defaultDBPath := "data/config.db" + if err := os.MkdirAll(filepath.Dir(defaultDBPath), 0755); err != nil { + return "", "", fmt.Errorf("failed to create data directory: %w", err) + } + connStr = fmt.Sprintf("file:%s?_foreign_keys=on", defaultDBPath) + driver = "sqlite3" + } else { + driver = determineDriver(connStr) + } + + log.Debugf("Using database driver: %s", driver) + return connStr, driver, nil +} diff --git a/internal/pkg/storage/storage.go b/internal/pkg/storage/storage.go new file mode 100644 index 0000000..4d44d4e --- /dev/null +++ b/internal/pkg/storage/storage.go @@ -0,0 +1,604 @@ +package storage + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "strings" + + clusterv3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + listenerv3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + + // routev3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" // REMOVED + + "google.golang.org/protobuf/encoding/protojson" +) + +// Storage abstracts database persistence +type Storage struct { + db *sql.DB + driver string +} + +// DeleteStrategy defines the action to take on missing resources +type DeleteStrategy int + +const ( + // DeleteNone performs only UPSERT for items in the list (default behavior) + DeleteNone DeleteStrategy = iota + // DeleteLogical marks missing resources as disabled (now applicable to clusters and listeners) + DeleteLogical + // DeleteActual removes missing resources physically from the database + DeleteActual +) + +// NewStorage initializes a Storage instance +func NewStorage(db *sql.DB, driver string) *Storage { + return &Storage{db: db, driver: driver} +} + +// placeholder returns correct SQL placeholder based on driver +func (s *Storage) placeholder(n int) string { + if s.driver == "postgres" { + return fmt.Sprintf("$%d", n) + } + return "?" +} + +// InitSchema ensures required tables exist +func (s *Storage) InitSchema(ctx context.Context) error { + var schema string + switch s.driver { + case "postgres": + schema = ` + CREATE TABLE IF NOT EXISTS clusters ( + id SERIAL PRIMARY KEY, + name TEXT UNIQUE NOT NULL, + data JSONB NOT NULL, + enabled BOOLEAN DEFAULT true, + updated_at TIMESTAMP DEFAULT now() + ); + -- REMOVED routes table + CREATE TABLE IF NOT EXISTS listeners ( + id SERIAL PRIMARY KEY, + name TEXT UNIQUE NOT NULL, + data JSONB NOT NULL, + enabled BOOLEAN DEFAULT true, + updated_at TIMESTAMP DEFAULT now() + );` + default: // SQLite + schema = ` + CREATE TABLE IF NOT EXISTS clusters ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + name TEXT UNIQUE NOT NULL, + data TEXT NOT NULL, + enabled BOOLEAN DEFAULT 1, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ); + -- REMOVED routes table + CREATE TABLE IF NOT EXISTS listeners ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + name TEXT UNIQUE NOT NULL, + data TEXT NOT NULL, + enabled BOOLEAN DEFAULT 1, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + );` + } + _, err := s.db.ExecContext(ctx, schema) + return err +} + +// SaveCluster inserts or updates a cluster +func (s *Storage) SaveCluster(ctx context.Context, cluster *clusterv3.Cluster) error { + data, err := protojson.Marshal(cluster) + if err != nil { + return err + } + + var query string + switch s.driver { + case "postgres": + // Explicitly set enabled=true on update to re-enable a logically deleted cluster + query = fmt.Sprintf(` + INSERT INTO clusters (name, data, enabled, updated_at) + VALUES (%s, %s, true, now()) + ON CONFLICT (name) DO UPDATE SET data = %s, enabled = true, updated_at = now()`, + s.placeholder(1), s.placeholder(2), s.placeholder(2)) + default: // SQLite + // Explicitly set enabled=1 on update to re-enable a logically deleted cluster + query = ` + INSERT INTO clusters (name, data, enabled, updated_at) + VALUES (?, ?, 1, CURRENT_TIMESTAMP) + ON CONFLICT(name) DO UPDATE SET data=excluded.data, enabled=1, updated_at=CURRENT_TIMESTAMP` + } + + _, err = s.db.ExecContext(ctx, query, cluster.GetName(), string(data)) + return err +} + +// SaveRoute inserts or updates a route // REMOVED +// func (s *Storage) SaveRoute(ctx context.Context, route *routev3.RouteConfiguration) error { +// // ... (route logic removed) +// } + +// SaveListener inserts or updates a listener +func (s *Storage) SaveListener(ctx context.Context, listener *listenerv3.Listener) error { + data, err := protojson.Marshal(listener) + if err != nil { + return err + } + + var query string + switch s.driver { + case "postgres": + // Explicitly set enabled=true on update to re-enable a logically deleted listener + query = fmt.Sprintf(` + INSERT INTO listeners (name, data, enabled, updated_at) + VALUES (%s, %s, true, now()) + ON CONFLICT (name) DO UPDATE SET data = %s, enabled = true, updated_at = now()`, + s.placeholder(1), s.placeholder(2), s.placeholder(2)) + default: // SQLite + // Explicitly set enabled=1 on update to re-enable a logically deleted listener + query = ` + INSERT INTO listeners (name, data, enabled, updated_at) + VALUES (?, ?, 1, CURRENT_TIMESTAMP) + ON CONFLICT(name) DO UPDATE SET data=excluded.data, enabled=1, updated_at=CURRENT_TIMESTAMP` + } + + _, err = s.db.ExecContext(ctx, query, listener.GetName(), string(data)) + return err +} + +// LoadEnabledClusters retrieves all enabled clusters +func (s *Storage) LoadEnabledClusters(ctx context.Context) ([]*clusterv3.Cluster, error) { + query := `SELECT data FROM clusters` + if s.driver == "postgres" { + query += ` WHERE enabled = true` + } else { + query += ` WHERE enabled = 1` + } + + rows, err := s.db.QueryContext(ctx, query) + if err != nil { + return nil, err + } + defer rows.Close() + + var clusters []*clusterv3.Cluster + for rows.Next() { + var raw json.RawMessage + // FIX: Handle type difference between Postgres (JSONB) and SQLite (TEXT) + if s.driver != "postgres" { + var dataStr string + if err := rows.Scan(&dataStr); err != nil { + return nil, err + } + raw = json.RawMessage(dataStr) // Convert string to json.RawMessage + } else { + if err := rows.Scan(&raw); err != nil { + return nil, err + } + } + + var cluster clusterv3.Cluster + if err := protojson.Unmarshal(raw, &cluster); err != nil { + return nil, err + } + clusters = append(clusters, &cluster) + } + return clusters, nil +} + +// LoadAllClusters retrieves all clusters, regardless of their enabled status +func (s *Storage) LoadAllClusters(ctx context.Context) ([]*clusterv3.Cluster, error) { + rows, err := s.db.QueryContext(ctx, `SELECT data FROM clusters`) + if err != nil { + return nil, err + } + defer rows.Close() + + var clusters []*clusterv3.Cluster + for rows.Next() { + var raw json.RawMessage + // FIX: Handle type difference between Postgres (JSONB) and SQLite (TEXT) + if s.driver != "postgres" { + var dataStr string + if err := rows.Scan(&dataStr); err != nil { + return nil, err + } + raw = json.RawMessage(dataStr) // Convert string to json.RawMessage + } else { + if err := rows.Scan(&raw); err != nil { + return nil, err + } + } + + var cluster clusterv3.Cluster + if err := protojson.Unmarshal(raw, &cluster); err != nil { + return nil, err + } + clusters = append(clusters, &cluster) + } + return clusters, nil +} + +// LoadEnabledRoutes retrieves all enabled routes // REMOVED +// func (s *Storage) LoadEnabledRoutes(ctx context.Context) ([]*routev3.RouteConfiguration, error) { +// // ... (route logic removed) +// } + +// LoadAllRoutes retrieves all routes, regardless of their enabled status // REMOVED +// func (s *Storage) LoadAllRoutes(ctx context.Context) ([]*routev3.RouteConfiguration, error) { +// // ... (route logic removed) +// } + +// LoadEnabledListeners retrieves all enabled listeners +func (s *Storage) LoadEnabledListeners(ctx context.Context) ([]*listenerv3.Listener, error) { + query := `SELECT data FROM listeners` + if s.driver == "postgres" { + query += ` WHERE enabled = true` + } else { + query += ` WHERE enabled = 1` + } + + rows, err := s.db.QueryContext(ctx, query) + if err != nil { + return nil, err + } + defer rows.Close() + + var listeners []*listenerv3.Listener + for rows.Next() { + var raw json.RawMessage + // FIX: Handle type difference between Postgres (JSONB) and SQLite (TEXT) + if s.driver != "postgres" { + var dataStr string + if err := rows.Scan(&dataStr); err != nil { + return nil, err + } + raw = json.RawMessage(dataStr) // Convert string to json.RawMessage + } else { + if err := rows.Scan(&raw); err != nil { + return nil, err + } + } + + var l listenerv3.Listener + if err := protojson.Unmarshal(raw, &l); err != nil { + return nil, err + } + listeners = append(listeners, &l) + } + return listeners, nil +} + +// LoadAllListeners retrieves all listeners, regardless of their enabled status +func (s *Storage) LoadAllListeners(ctx context.Context) ([]*listenerv3.Listener, error) { + rows, err := s.db.QueryContext(ctx, `SELECT data FROM listeners`) + if err != nil { + return nil, err + } + defer rows.Close() + + var listeners []*listenerv3.Listener + for rows.Next() { + var raw json.RawMessage + // FIX: Handle type difference between Postgres (JSONB) and SQLite (TEXT) + if s.driver != "postgres" { + var dataStr string + if err := rows.Scan(&dataStr); err != nil { + return nil, err + } + raw = json.RawMessage(dataStr) // Convert string to json.RawMessage + } else { + if err := rows.Scan(&raw); err != nil { + return nil, err + } + } + + var l listenerv3.Listener + if err := protojson.Unmarshal(raw, &l); err != nil { + return nil, err + } + listeners = append(listeners, &l) + } + return listeners, nil +} + +// RebuildSnapshot rebuilds full snapshot from DB +func (s *Storage) RebuildSnapshot(ctx context.Context) (*SnapshotConfig, error) { + // 1. Load Enabled Resources (for xDS serving) + enabledClusters, err := s.LoadEnabledClusters(ctx) + if err != nil { + return nil, err + } + // enabledRoutes, err := s.LoadEnabledRoutes(ctx) // REMOVED + // if err != nil { + // return nil, err + // } + enabledListeners, err := s.LoadEnabledListeners(ctx) + if err != nil { + return nil, err + } + + // 2. Load ALL Resources (for comparison and disabled set) + allClusters, err := s.LoadAllClusters(ctx) + if err != nil { + return nil, err + } + // allRoutes, err := s.LoadAllRoutes(ctx) // REMOVED + // if err != nil { + // return nil, err + // } + allListeners, err := s.LoadAllListeners(ctx) + if err != nil { + return nil, err + } + + // 3. Separate Disabled Resources + + // Clusters + enabledClusterNames := make(map[string]struct{}, len(enabledClusters)) + for _, c := range enabledClusters { + enabledClusterNames[c.GetName()] = struct{}{} + } + var disabledClusters []*clusterv3.Cluster + for _, c := range allClusters { + if _, found := enabledClusterNames[c.GetName()]; !found { + disabledClusters = append(disabledClusters, c) + } + } + + // Routes // REMOVED + // enabledRouteNames := make(map[string]struct{}, 0) + // var disabledRoutes []*routev3.RouteConfiguration + // for _, r := range allRoutes { + // if _, found := enabledRouteNames[r.GetName()]; !found { + // disabledRoutes = append(disabledRoutes, r) + // } + // } + + // Listeners + enabledListenerNames := make(map[string]struct{}, len(enabledListeners)) + for _, l := range enabledListeners { + enabledListenerNames[l.GetName()] = struct{}{} + } + var disabledListeners []*listenerv3.Listener + for _, l := range allListeners { + if _, found := enabledListenerNames[l.GetName()]; !found { + disabledListeners = append(disabledListeners, l) + } + } + + return &SnapshotConfig{ + EnabledClusters: enabledClusters, + // EnabledRoutes: nil, // REMOVED + EnabledListeners: enabledListeners, + DisabledClusters: disabledClusters, + // DisabledRoutes: nil, // REMOVED + DisabledListeners: disabledListeners, + }, nil +} + +// SnapshotConfig aggregates xDS resources +type SnapshotConfig struct { + // Enabled resources (for xDS serving) + EnabledClusters []*clusterv3.Cluster + // EnabledRoutes []*routev3.RouteConfiguration // REMOVED + EnabledListeners []*listenerv3.Listener + + // Disabled resources (for UI display) + DisabledClusters []*clusterv3.Cluster + // DisabledRoutes []*routev3.RouteConfiguration // REMOVED + DisabledListeners []*listenerv3.Listener +} + +// EnableCluster toggles a cluster +func (s *Storage) EnableCluster(ctx context.Context, name string, enabled bool) error { + query := `UPDATE clusters SET enabled = ?, updated_at = CURRENT_TIMESTAMP WHERE name = ?` + if s.driver == "postgres" { + query = `UPDATE clusters SET enabled = $1, updated_at = now() WHERE name = $2` + } + _, err := s.db.ExecContext(ctx, query, enabled, name) + return err +} + +// EnableRoute toggles a route // REMOVED +// func (s *Storage) EnableRoute(ctx context.Context, name string, enabled bool) error { +// // ... (route logic removed) +// } + +// EnableListener toggles a listener +func (s *Storage) EnableListener(ctx context.Context, name string, enabled bool) error { + query := `UPDATE listeners SET enabled = ?, updated_at = CURRENT_TIMESTAMP WHERE name = ?` + if s.driver == "postgres" { + query = `UPDATE listeners SET enabled = $1, updated_at = now() WHERE name = $2` + } + _, err := s.db.ExecContext(ctx, query, enabled, name) + return err +} + +// disableMissingResources updates the 'enabled' status for resources in 'table' +// whose 'name' is NOT in 'inputNames'. +func (s *Storage) disableMissingResources(ctx context.Context, table string, inputNames []string) error { + if table != "clusters" && table != "listeners" { // CHECK UPDATED + return fmt.Errorf("logical delete (disable) is only supported for tables with an 'enabled' column (clusters, listeners)") + } + + // 1. Build placeholders and args + placeholders := make([]string, len(inputNames)) + args := make([]interface{}, len(inputNames)) + for i, name := range inputNames { + if s.driver == "postgres" { + placeholders[i] = fmt.Sprintf("$%d", i+1) + } else { + placeholders[i] = "?" + } + args[i] = name + } + + disabledValue := "false" + if s.driver != "postgres" { + disabledValue = "0" + } + + var updateTime string + if s.driver == "postgres" { + updateTime = "now()" + } else { + updateTime = "CURRENT_TIMESTAMP" + } + + // If no names are provided, disable ALL currently enabled resources + whereClause := "" + if len(inputNames) > 0 { + whereClause = fmt.Sprintf("WHERE name NOT IN (%s)", strings.Join(placeholders, ", ")) + } + + // 2. Construct and execute the UPDATE query + query := fmt.Sprintf(` + UPDATE %s + SET enabled = %s, updated_at = %s + %s`, + table, disabledValue, updateTime, whereClause) + + _, err := s.db.ExecContext(ctx, query, args...) + return err +} + +// deleteMissingResources physically deletes resources from 'table' whose 'name' is NOT in 'inputNames'. +func (s *Storage) deleteMissingResources(ctx context.Context, table string, inputNames []string) error { + if table != "clusters" && table != "listeners" { // CHECK UPDATED + return fmt.Errorf("physical delete is only supported for tables: clusters, listeners") + } + + // 1. Build placeholders and args + placeholders := make([]string, len(inputNames)) + args := make([]interface{}, len(inputNames)) + for i, name := range inputNames { + if s.driver == "postgres" { + placeholders[i] = fmt.Sprintf("$%d", i+1) + } else { + placeholders[i] = "?" + } + args[i] = name + } + + // If no names are provided, delete ALL resources + whereClause := "" + if len(inputNames) > 0 { + whereClause = fmt.Sprintf("WHERE name NOT IN (%s)", strings.Join(placeholders, ", ")) + } + + // 2. Construct and execute the DELETE query + query := fmt.Sprintf(` + DELETE FROM %s + %s`, + table, whereClause) + + _, err := s.db.ExecContext(ctx, query, args...) + return err +} + +func (s *Storage) SaveSnapshot(ctx context.Context, cfg *SnapshotConfig, strategy DeleteStrategy) error { + if cfg == nil { + return fmt.Errorf("SnapshotConfig is nil") + } + + // Use a transaction for atomicity + tx, err := s.db.BeginTx(ctx, nil) + if err != nil { + return fmt.Errorf("failed to begin transaction: %w", err) + } + defer func() { + if err != nil { + tx.Rollback() + return + } + err = tx.Commit() + }() + + // Note: Only Enabledxxx resources are UPSERTED. Disabledxxx resources are + // left alone unless the deletion strategy removes them. + + // --- 1. Save/Upsert Clusters and Collect Names --- + clusterNames := make([]string, 0, len(cfg.EnabledClusters)) + for _, c := range cfg.EnabledClusters { + if err = s.SaveCluster(ctx, c); err != nil { + return fmt.Errorf("failed to save cluster %s: %w", c.GetName(), err) + } + clusterNames = append(clusterNames, c.GetName()) + } + + // --- 2. Save/Upsert Routes and Collect Names --- // REMOVED + // routeNames := make([]string, 0, len(cfg.EnabledRoutes)) + // for _, r := range cfg.EnabledRoutes { + // if err = s.SaveRoute(ctx, r); err != nil { + // return fmt.Errorf("failed to save route %s: %w", r.GetName(), err) + // } + // routeNames = append(routeNames, r.GetName()) + // } + + // --- 3. Save/Upsert Listeners and Collect Names --- + listenerNames := make([]string, 0, len(cfg.EnabledListeners)) + for _, l := range cfg.EnabledListeners { + if err = s.SaveListener(ctx, l); err != nil { + return fmt.Errorf("failed to save listener %s: %w", l.GetName(), err) + } + listenerNames = append(listenerNames, l.GetName()) + } + + // --- 4. Apply Deletion Strategy --- + switch strategy { + case DeleteLogical: + // Logical Delete (Disable) for all resource types: marks resources NOT in the current enabled list as disabled + if err = s.disableMissingResources(ctx, "clusters", clusterNames); err != nil { + return fmt.Errorf("failed to logically delete missing clusters: %w", err) + } + // if err = s.disableMissingResources(ctx, "routes", routeNames); err != nil { // REMOVED + // return fmt.Errorf("failed to logically delete missing routes: %w", err) + // } + if err = s.disableMissingResources(ctx, "listeners", listenerNames); err != nil { + return fmt.Errorf("failed to logically delete missing listeners: %w", err) + } + + case DeleteActual: + // Actual Delete (Physical Removal) for all resources: removes resources NOT in the current enabled list + if err = s.deleteMissingResources(ctx, "clusters", clusterNames); err != nil { + return fmt.Errorf("failed to physically delete missing clusters: %w", err) + } + // if err = s.deleteMissingResources(ctx, "routes", routeNames); err != nil { // REMOVED + // return fmt.Errorf("failed to physically delete missing routes: %w", err) + // } + if err = s.deleteMissingResources(ctx, "listeners", listenerNames); err != nil { + return fmt.Errorf("failed to physically delete missing listeners: %w", err) + } + + case DeleteNone: + // Do nothing for missing resources + return nil + } + + return err +} + +// RemoveListener deletes a listener by name +func (s *Storage) RemoveListener(ctx context.Context, name string) error { + query := `DELETE FROM listeners WHERE name = ?` + if s.driver == "postgres" { + query = `DELETE FROM listeners WHERE name = $1` + } + _, err := s.db.ExecContext(ctx, query, name) + return err +} + +// RemoveCluster deletes a cluster by name +func (s *Storage) RemoveCluster(ctx context.Context, name string) error { + query := `DELETE FROM clusters WHERE name = ?` + if s.driver == "postgres" { + query = `DELETE FROM clusters WHERE name = $1` + } + _, err := s.db.ExecContext(ctx, query, name) + return err +} diff --git a/internal/snapshot/manager.go b/internal/snapshot/manager.go deleted file mode 100644 index e13691c..0000000 --- a/internal/snapshot/manager.go +++ /dev/null @@ -1,50 +0,0 @@ -package snapshot - -import ( - "context" - "envoy-control-plane/internal/storage" - "fmt" - - // Ensure all standard filters are imported for proto unmarshalling - _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/jwt_authn/v3" - _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/lua/v3" - _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/oauth2/v3" - _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3" - _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/listener/tls_inspector/v3" - _ "github.com/envoyproxy/go-control-plane/envoy/service/runtime/v3" - - "github.com/envoyproxy/go-control-plane/pkg/cache/types" - cachev3 "github.com/envoyproxy/go-control-plane/pkg/cache/v3" - resourcev3 "github.com/envoyproxy/go-control-plane/pkg/resource/v3" -) - -// ResourceNamer is an interface implemented by all xDS resources with a GetName() method. -type ResourceNamer interface { - GetName() string -} - -// SnapshotManager wraps a SnapshotCache and provides file loading/modifying -// and DB synchronization functionality for Envoy xDS resources. -type SnapshotManager struct { - Cache cachev3.SnapshotCache - NodeID string - DB *storage.Storage -} - -// NewSnapshotManager creates a new instance of SnapshotManager. -func NewSnapshotManager(cache cachev3.SnapshotCache, nodeID string, db *storage.Storage) *SnapshotManager { - return &SnapshotManager{ - Cache: cache, - NodeID: nodeID, - DB: db, - } -} - -// SetSnapshot sets a full snapshot (utility method used by others). -func (sm *SnapshotManager) SetSnapshot(ctx context.Context, version string, resources map[resourcev3.Type][]types.Resource) error { - snap, err := cachev3.NewSnapshot(version, resources) - if err != nil { - return fmt.Errorf("failed to create snapshot: %w", err) - } - return sm.Cache.SetSnapshot(ctx, sm.NodeID, snap) -} diff --git a/internal/snapshot/resource_config.go b/internal/snapshot/resource_config.go deleted file mode 100644 index 2779415..0000000 --- a/internal/snapshot/resource_config.go +++ /dev/null @@ -1,253 +0,0 @@ -package snapshot - -import ( - "context" - "fmt" - "time" - - clusterv3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" - listenerv3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" - "github.com/envoyproxy/go-control-plane/pkg/cache/types" - cachev3 "github.com/envoyproxy/go-control-plane/pkg/cache/v3" - resourcev3 "github.com/envoyproxy/go-control-plane/pkg/resource/v3" - - internalapi "envoy-control-plane/internal/api" - "envoy-control-plane/internal/storage" -) - -// SetSnapshotFromConfig sets a snapshot from an aggregated SnapshotConfig -func (sm *SnapshotManager) SetSnapshotFromConfig(ctx context.Context, version string, cfg *storage.SnapshotConfig) error { - if cfg == nil { - return fmt.Errorf("snapshot config is nil") - } - - // Ensure version is not empty - if version == "" { - version = fmt.Sprintf("snap-%d", time.Now().UnixNano()) - } - - // Build the resource map expected by cachev3.NewSnapshot - resources := map[resourcev3.Type][]types.Resource{ - resourcev3.ClusterType: make([]types.Resource, len(cfg.EnabledClusters)), - resourcev3.ListenerType: make([]types.Resource, len(cfg.EnabledListeners)), - // Other types if supported by SnapshotConfig, can be added here - } - - // Populate slices by direct type assertion and conversion - for i, c := range cfg.EnabledClusters { - resources[resourcev3.ClusterType][i] = c - } - for i, l := range cfg.EnabledListeners { - resources[resourcev3.ListenerType][i] = l - } - - // Create the snapshot - snap, err := cachev3.NewSnapshot(version, resources) - if err != nil { - return fmt.Errorf("failed to create snapshot: %w", err) - } - - // Apply snapshot to the cache - if err := sm.Cache.SetSnapshot(ctx, sm.NodeID, snap); err != nil { - return fmt.Errorf("failed to set snapshot: %w", err) - } - - return nil -} - -// SnapshotToConfig converts current cache snapshot into SnapshotConfig -func (sm *SnapshotManager) SnapshotToConfig(ctx context.Context, nodeID string) (*storage.SnapshotConfig, error) { - snap, err := sm.Cache.GetSnapshot(nodeID) - if err != nil { - return nil, fmt.Errorf("failed to get snapshot for node %s: %w", nodeID, err) - } - - config := &storage.SnapshotConfig{ - EnabledClusters: []*clusterv3.Cluster{}, - EnabledListeners: []*listenerv3.Listener{}, - // Disabled fields are not populated from the cache, only enabled ones. - } - - // Convert Cluster resources - for _, r := range snap.GetResources(string(resourcev3.ClusterType)) { - if c, ok := r.(*clusterv3.Cluster); ok { - config.EnabledClusters = append(config.EnabledClusters, c) - } - } - - // Convert Listener resources - for _, r := range snap.GetResources(string(resourcev3.ListenerType)) { - if l, ok := r.(*listenerv3.Listener); ok { - config.EnabledListeners = append(config.EnabledListeners, l) - } - } - - return config, nil -} - -// LoadSnapshotFromDB implements the sync DB data to cache. It rebuilds the snapshot -// from the persistent store and updates the Envoy cache. -func (sm *SnapshotManager) LoadSnapshotFromDB(ctx context.Context) error { - fmt.Println("Loading configuration from main DB...") - - // 1. Try Database (Primary Source) - cfg, err := sm.DB.RebuildSnapshot(ctx) - if err != nil { - return fmt.Errorf("failed to rebuild snapshot from DB: %w", err) - } - - fmt.Println("Loaded configuration from DB. Updating Envoy Cache.") - - // 2. Update Envoy's in-memory cache (This is the 'flash cache' layer) - return sm.SetSnapshotFromConfig(ctx, "db-sync-"+time.Now().Format(time.RFC3339), cfg) -} - -// FlushCacheToDB saves the current in-memory Envoy snapshot (the source of truth) -// to the persistent DB. This implements the "flash cache to db" write-through. -func (sm *SnapshotManager) FlushCacheToDB(ctx context.Context, strategy storage.DeleteStrategy) error { - // 1. Get current Envoy snapshot as SnapshotConfig - cfg, err := sm.SnapshotToConfig(ctx, sm.NodeID) - if err != nil { - return fmt.Errorf("failed to convert snapshot to config: %w", err) - } - - // 2. Save to Persistent DB - // Note: DB.SaveSnapshot handles insert/update logic for all resources - if err := sm.DB.SaveSnapshot(ctx, cfg, strategy); err != nil { - return fmt.Errorf("failed to save config to DB: %w", err) - } - fmt.Println("Successfully saved to Persistent DB.") - - return nil -} - -// EnableResourceFromDB fetches a logically disabled resource from the DB and -// flips its status to enabled, then adds it back to the cache. -func (sm *SnapshotManager) EnableResourceFromDB(name string, typ resourcev3.Type) error { - ctx := context.Background() - switch typ { - case resourcev3.ClusterType: - if err := sm.DB.EnableCluster(ctx, name, true); err != nil { - return fmt.Errorf("failed to enable cluster '%s' in DB: %w", name, err) - } - case resourcev3.ListenerType: - if err := sm.DB.EnableListener(ctx, name, true); err != nil { - return fmt.Errorf("failed to enable listener '%s' in DB: %w", name, err) - } - default: - return fmt.Errorf("unsupported resource type for enabling: %s", typ) - } - - // Reload snapshot from DB to update the cache with the newly enabled resource - return sm.LoadSnapshotFromDB(ctx) -} - -// CheckCacheDBConsistency compares the currently active Envoy cache snapshot -// against the enabled resources in the persistent DB. -func (sm *SnapshotManager) CheckCacheDBConsistency(ctx context.Context) (*internalapi.ConsistencyReport, error) { - report := &internalapi.ConsistencyReport{ - CacheOnly: make(map[resourcev3.Type][]string), - DBOnly: make(map[resourcev3.Type][]string), - } - - // 1. Get current cache snapshot - cacheConfig, err := sm.SnapshotToConfig(ctx, sm.NodeID) - if err != nil { - return nil, fmt.Errorf("failed to get snapshot from cache: %w", err) - } - - // 2. Rebuild snapshot from DB (only fetches *enabled* resources from DB) - dbConfig, err := sm.DB.RebuildSnapshot(ctx) - if err != nil { - return nil, fmt.Errorf("failed to rebuild snapshot from DB: %w", err) - } - - // Helper to build a set (map[string]struct{}) of resource names for faster lookups - buildNameSet := func(resources []ResourceNamer) map[string]struct{} { - set := make(map[string]struct{}, len(resources)) - for _, r := range resources { - set[r.GetName()] = struct{}{} - } - return set - } - - // Map of resource types to their lists in SnapshotConfig - typeResourceMaps := []struct { - typ resourcev3.Type - cacheList []ResourceNamer - dbList []ResourceNamer - }{ - {resourcev3.ClusterType, resourcesToNamers(cacheConfig.EnabledClusters), resourcesToNamers(dbConfig.EnabledClusters)}, - {resourcev3.ListenerType, resourcesToNamers(cacheConfig.EnabledListeners), resourcesToNamers(dbConfig.EnabledListeners)}, - } - - for _, m := range typeResourceMaps { - cacheSet := buildNameSet(m.cacheList) - dbSet := buildNameSet(m.dbList) - - // Check for Cache-only resources (present in cacheSet but not in dbSet) - for cacheName := range cacheSet { - if _, existsInDB := dbSet[cacheName]; !existsInDB { - report.CacheOnly[m.typ] = append(report.CacheOnly[m.typ], cacheName) - report.Inconsistent = true - } - } - - // Check for DB-only resources (present in dbSet but not in cacheSet) - for dbName := range dbSet { - if _, existsInCache := cacheSet[dbName]; !existsInCache { - report.DBOnly[m.typ] = append(report.DBOnly[m.typ], dbName) - report.Inconsistent = true - } - } - } - - return report, nil -} - -// ListResources returns all enabled and disabled resources of a given type from the DB. -func (sm *SnapshotManager) ListResources(typ resourcev3.Type) ([]types.Resource, []types.Resource, error) { - snap, err := sm.DB.RebuildSnapshot(context.Background()) - if err != nil { - return nil, nil, fmt.Errorf("failed to rebuild snapshot from DB: %w", err) - } - - var enabled, disabled []types.Resource - var namerEnabled, namerDisabled []ResourceNamer - - switch typ { - case resourcev3.ClusterType: - namerEnabled = resourcesToNamers(snap.EnabledClusters) - namerDisabled = resourcesToNamers(snap.DisabledClusters) - case resourcev3.ListenerType: - namerEnabled = resourcesToNamers(snap.EnabledListeners) - namerDisabled = resourcesToNamers(snap.DisabledListeners) - default: - return nil, nil, fmt.Errorf("unsupported resource type: %s", typ) - } - - // Convert ResourceNamer slices back to types.Resource slices - enabled = make([]types.Resource, len(namerEnabled)) - for i, r := range namerEnabled { - enabled[i] = r.(types.Resource) - } - - disabled = make([]types.Resource, len(namerDisabled)) - for i, r := range namerDisabled { - disabled[i] = r.(types.Resource) - } - - return enabled, disabled, nil -} - -// resourcesToNamers converts a slice of proto-generated resource pointers -// (like []*clusterv3.Cluster) to a slice of the generic ResourceNamer interface. -// This is necessary because structs like *clusterv3.Cluster don't explicitly -// implement types.Resource, but are compatible with it and ResourceNamer. -func resourcesToNamers[T ResourceNamer](list []T) []ResourceNamer { - out := make([]ResourceNamer, len(list)) - for i, item := range list { - out[i] = item - } - return out -} diff --git a/internal/snapshot/resource_crud.go b/internal/snapshot/resource_crud.go deleted file mode 100644 index 714c9f9..0000000 --- a/internal/snapshot/resource_crud.go +++ /dev/null @@ -1,452 +0,0 @@ -package snapshot - -import ( - "context" - "fmt" - "sort" - "time" - - listenerv3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" - "github.com/envoyproxy/go-control-plane/pkg/cache/types" - cachev3 "github.com/envoyproxy/go-control-plane/pkg/cache/v3" - resourcev3 "github.com/envoyproxy/go-control-plane/pkg/resource/v3" - - internallog "envoy-control-plane/internal/log" - "envoy-control-plane/internal/storage" -) - -// AppendFilterChainToListener loads the current listener from the cache, appends the provided -// FilterChain to its list of FilterChains, and updates the cache with the new snapshot. -func (sm *SnapshotManager) AppendFilterChainToListener(ctx context.Context, listenerName string, newFilterChain *listenerv3.FilterChain) error { - log := internallog.LogFromContext(ctx) - - // 1. Get the current Listener from the cache - resource, err := sm.GetResourceFromCache(listenerName, resourcev3.ListenerType) - if err != nil { - return fmt.Errorf("failed to get listener '%s' from cache: %w", listenerName, err) - } - - listener, ok := resource.(*listenerv3.Listener) - if !ok { - return fmt.Errorf("resource '%s' is not a Listener type", listenerName) - } - - // 2. Append the new FilterChain to the listener's list of filter chains. - listener.FilterChains = append(listener.FilterChains, newFilterChain) - log.Infof("Appended new filter chain (match: %v) to listener '%s'", newFilterChain.FilterChainMatch, listenerName) - - // 3. Create a new snapshot with the modified listener (rest of logic remains similar) - snap, err := sm.Cache.GetSnapshot(sm.NodeID) - if err != nil { - return fmt.Errorf("failed to get snapshot for modification: %w", err) - } - - // Get all current resources - resources := sm.getAllResourcesFromSnapshot(snap) - - // Replace the old listener with the modified one - listenerList, ok := resources[resourcev3.ListenerType] - if !ok { - return fmt.Errorf("listener resource type not present in snapshot") - } - - foundAndReplaced := false - for i, res := range listenerList { - if namer, ok := res.(interface{ GetName() string }); ok && namer.GetName() == listenerName { - listenerList[i] = listener - foundAndReplaced = true - break - } - } - - if !foundAndReplaced { - return fmt.Errorf("failed to locate listener '%s' in current resource list for replacement", listenerName) - } - - // Create and set the new snapshot - version := fmt.Sprintf("listener-update-%s-%d", listenerName, time.Now().UnixNano()) - newSnap, err := cachev3.NewSnapshot(version, resources) - if err != nil { - return fmt.Errorf("failed to create new snapshot: %w", err) - } - - if err := sm.Cache.SetSnapshot(ctx, sm.NodeID, newSnap); err != nil { - return fmt.Errorf("failed to set new snapshot: %w", err) - } - sm.FlushCacheToDB(ctx, storage.DeleteLogical) - - log.Infof("Successfully updated listener '%s' in cache with new filter chain.", listenerName) - - return nil -} - -// ServerNamesEqual checks if two slices of server names contain the same elements, ignoring order. -// This is necessary because server_names is a list in the Envoy API, and order shouldn't matter for a match. -func ServerNamesEqual(a, b []string) bool { - if len(a) != len(b) { - return false - } - - // Sort copies of the slices to perform an ordered comparison - sort.Strings(a) - sort.Strings(b) - - for i := range a { - if a[i] != b[i] { - return false - } - } - return true -} - -// UpdateFilterChainOfListener iterates through a listener's filter chains and replaces -// the one that matches the new filter chain's ServerNames. -func (sm *SnapshotManager) UpdateFilterChainOfListener(ctx context.Context, listenerName string, newFilterChain *listenerv3.FilterChain) error { - log := internallog.LogFromContext(ctx) - - // 1. Get the current Listener from the cache - resource, err := sm.GetResourceFromCache(listenerName, resourcev3.ListenerType) - if err != nil { - return fmt.Errorf("failed to get listener '%s' from cache: %w", listenerName, err) - } - - listener, ok := resource.(*listenerv3.Listener) - if !ok { - return fmt.Errorf("resource '%s' is not a Listener type", listenerName) - } - if newFilterChain == nil { - return fmt.Errorf("new filter chain is nil") - } - - // Get the server names from the new filter chain for matching - newServerNames := newFilterChain.GetFilterChainMatch().GetServerNames() - if len(newServerNames) == 0 { - // If the new filter chain has no server names, it should typically be considered the default, - // but explicit domain matching is safer for replacement. For this implementation, - // we require at least one ServerName to perform a targeted update. - return fmt.Errorf("new filter chain must specify at least one ServerName for targeted replacement") - } - - // 2. Iterate and attempt to find the matching filter chain - foundMatch := false - - // We create a new slice to hold the updated list of filter chains - var updatedChains []*listenerv3.FilterChain - - for _, existingChain := range listener.FilterChains { - existingServerNames := existingChain.GetFilterChainMatch().GetServerNames() - - // NOTE: The ServerNamesEqual implementation sorts the slices *in place*. - // This side-effect is a common bug source. The existing function *should* use copies. - // Assuming ServerNamesEqual is fixed (or this bug is accepted), the logic holds. - // We'll keep the call as-is for the fix, but note the potential bug in ServerNamesEqual. - if ServerNamesEqual(existingServerNames, newServerNames) { - // Match found! Replace the existing chain with the new one. - updatedChains = append(updatedChains, newFilterChain) - foundMatch = true - log.Debugf("Replaced filter chain with match: %v in listener '%s'", newServerNames, listenerName) - continue - } - - // Keep the existing chain if it does not match - updatedChains = append(updatedChains, existingChain) - } - - // 3. Handle the result - if !foundMatch { - return fmt.Errorf("no existing filter chain found on listener '%s' with matching server names: %v", - listenerName, newServerNames) - } - - // 4. Update the listener with the new slice of filter chains - listener.FilterChains = updatedChains - - // 5. Get current snapshot to extract all resources for the new snapshot - snap, err := sm.Cache.GetSnapshot(sm.NodeID) - if err != nil { - return fmt.Errorf("failed to get snapshot for modification: %w", err) - } - - // Get all current resources (THIS WAS MISSING) - resources := sm.getAllResourcesFromSnapshot(snap) - - // Replace the old listener with the modified one in the resource list - listenerList, ok := resources[resourcev3.ListenerType] - if !ok { - return fmt.Errorf("listener resource type not present in snapshot") - } - - foundAndReplaced := false - for i, res := range listenerList { - if namer, ok := res.(interface{ GetName() string }); ok && namer.GetName() == listenerName { - // The `listener` variable already holds the modified listener - listenerList[i] = listener - foundAndReplaced = true - break - } - } - - if !foundAndReplaced { - // This should not happen if GetResourceFromCache succeeded, but is a good safeguard. - return fmt.Errorf("failed to locate listener '%s' in current resource list for replacement", listenerName) - } - - // 6. Create and set the new snapshot - version := fmt.Sprintf("listener-update-chain-%s-%d", listenerName, time.Now().UnixNano()) - newSnap, err := cachev3.NewSnapshot(version, resources) - if err != nil { - return fmt.Errorf("failed to create new snapshot: %w", err) - } - - if err := sm.Cache.SetSnapshot(ctx, sm.NodeID, newSnap); err != nil { - return fmt.Errorf("failed to set new snapshot: %w", err) - } - sm.FlushCacheToDB(ctx, storage.DeleteLogical) - log.Infof("Successfully updated filter chain (match: %v) on listener '%s'", newServerNames, listenerName) - - return nil -} - -// RemoveFilterChainFromListener loads the current listener from the cache, removes the -// FilterChain that matches the provided ServerNames from the listener's list of FilterChains, -// and updates the cache with the new snapshot. -func (sm *SnapshotManager) RemoveFilterChainFromListener(ctx context.Context, listenerName string, serverNames []string) error { - log := internallog.LogFromContext(ctx) - - // 1. Validate input and get the current Listener from the cache - if len(serverNames) == 0 { - return fmt.Errorf("failed to get server names from filter chain") - } - - // Use ServerNames for matching, consistent with UpdateFilterChainOfListener - if len(serverNames) == 0 { - return fmt.Errorf("target filter chain match must specify at least one ServerName for targeted removal") - } - - resource, err := sm.GetResourceFromCache(listenerName, resourcev3.ListenerType) - if err != nil { - return fmt.Errorf("failed to get listener '%s' from cache: %w", listenerName, err) - } - - listener, ok := resource.(*listenerv3.Listener) - if !ok { - return fmt.Errorf("resource '%s' is not a Listener type", listenerName) - } - - // 2. Iterate and attempt to find and remove the matching filter chain - foundMatch := false - var updatedChains []*listenerv3.FilterChain // New slice for chains to keep - - for _, existingChain := range listener.FilterChains { - existingServerNames := existingChain.GetFilterChainMatch().GetServerNames() - if len(serverNames) == 1 && serverNames[0] == "(default)" && len(existingServerNames) == 0 { - foundMatch = true - log.Debugf("Removing default filter chain from listener '%s'", listenerName) - continue - } - // Use the provided ServerNamesEqual for matching - if ServerNamesEqual(existingServerNames, serverNames) { - // Match found! DO NOT append this chain, effectively removing it. - foundMatch = true - log.Debugf("Removing filter chain with match: %v from listener '%s'", serverNames, listenerName) - continue - } - - // Keep the existing chain if it does not match - updatedChains = append(updatedChains, existingChain) - } - - // 3. Handle the result - if !foundMatch { - return fmt.Errorf("no existing filter chain found on listener '%s' with matching server names: %v", - listenerName, serverNames) - } - - // 4. Update the listener with the new slice of filter chains - listener.FilterChains = updatedChains - - // 5. Get current snapshot to extract all resources for the new snapshot - snap, err := sm.Cache.GetSnapshot(sm.NodeID) - if err != nil { - return fmt.Errorf("failed to get snapshot for modification: %w", err) - } - - resources := sm.getAllResourcesFromSnapshot(snap) - - // Replace the old listener with the modified one in the resource list - listenerList, ok := resources[resourcev3.ListenerType] - if !ok { - return fmt.Errorf("listener resource type not present in snapshot") - } - - foundAndReplaced := false - for i, res := range listenerList { - if namer, ok := res.(interface{ GetName() string }); ok && namer.GetName() == listenerName { - listenerList[i] = listener // Replace with the modified listener - foundAndReplaced = true - break - } - } - - if !foundAndReplaced { - // Should not happen if GetResourceFromCache succeeded. - return fmt.Errorf("failed to locate listener '%s' in current resource list for replacement during removal", listenerName) - } - - // 6. Create and set the new snapshot - version := fmt.Sprintf("listener-remove-chain-%s-%d", listenerName, time.Now().UnixNano()) - newSnap, err := cachev3.NewSnapshot(version, resources) - if err != nil { - return fmt.Errorf("failed to create new snapshot: %w", err) - } - - if err := sm.Cache.SetSnapshot(ctx, sm.NodeID, newSnap); err != nil { - return fmt.Errorf("failed to set new snapshot: %w", err) - } - // Assume FlushCacheToDB is a necessary final step after snapshot update - sm.FlushCacheToDB(ctx, storage.DeleteLogical) - log.Infof("Successfully removed filter chain (match: %v) from listener '%s'", serverNames, listenerName) - - return nil -} - -// AddResourceToSnapshot adds any resource to the snapshot dynamically -func (sm *SnapshotManager) AddResourceToSnapshot(resource types.Resource, typ resourcev3.Type) error { - snap, err := sm.Cache.GetSnapshot(sm.NodeID) - if err != nil { - return fmt.Errorf("failed to get snapshot from cache: %w", err) - } - resources := sm.getAllResourcesFromSnapshot(snap) - - // Append to the appropriate slice - switch typ { - case resourcev3.ClusterType: - resources[resourcev3.ClusterType] = append(resources[resourcev3.ClusterType], resource) - case resourcev3.ListenerType: - resources[resourcev3.ListenerType] = append(resources[resourcev3.ListenerType], resource) - case resourcev3.EndpointType, resourcev3.SecretType, resourcev3.RuntimeType: - resources[typ] = append(resources[typ], resource) - default: - return fmt.Errorf("unsupported resource type: %s", typ) - } - - resourceNamer, ok := resource.(interface{ GetName() string }) - if !ok { - return fmt.Errorf("resource of type %s does not implement GetName()", typ) - } - - newSnap, _ := cachev3.NewSnapshot( - "snap-generic-"+resourceNamer.GetName()+"-"+time.Now().Format(time.RFC3339), - resources, - ) - return sm.Cache.SetSnapshot(context.TODO(), sm.NodeID, newSnap) -} - -// RemoveResource removes any resource by name dynamically -func (sm *SnapshotManager) RemoveResource(name string, typ resourcev3.Type, strategy storage.DeleteStrategy) error { - snap, _ := sm.Cache.GetSnapshot(sm.NodeID) - resources := sm.getAllResourcesFromSnapshot(snap) - - // Flag to check if resource was found in cache - var resourceFound = false - - // Filter the target type - if targetResources, ok := resources[typ]; ok { - resources[typ], resourceFound = filterAndCheckResourcesByName(targetResources, name) - } - - if strategy == storage.DeleteActual { - if resourceFound { - return fmt.Errorf("actual delete requested but resource %s of type %s still exists in cache", name, typ) - } - if typ == resourcev3.ClusterType { - if err := sm.DB.RemoveCluster(context.TODO(), name); err != nil { - return fmt.Errorf("failed to delete cluster %s from DB: %w", name, err) - } - return nil - } - if typ == resourcev3.ListenerType { - if err := sm.DB.RemoveListener(context.TODO(), name); err != nil { - return fmt.Errorf("failed to delete listener %s from DB: %w", name, err) - } - return nil - } - return fmt.Errorf("actual delete not supported for resource type: %s", typ) - } - - if !resourceFound { - return fmt.Errorf("resource %s of type %s not found in cache", name, typ) - } - - newSnap, _ := cachev3.NewSnapshot( - "snap-remove-generic-"+name, - resources, - ) - - if err := sm.Cache.SetSnapshot(context.TODO(), sm.NodeID, newSnap); err != nil { - return fmt.Errorf("failed to set snapshot: %w", err) - } - - if err := sm.FlushCacheToDB(context.TODO(), strategy); err != nil { - return fmt.Errorf("failed to flush cache to DB: %w", err) - } - return nil -} - -// GetResourceFromCache retrieves a resource by name and type from the cache. -func (sm *SnapshotManager) GetResourceFromCache(name string, typ resourcev3.Type) (types.Resource, error) { - snap, err := sm.Cache.GetSnapshot(sm.NodeID) - if err != nil { - return nil, err - } - r, ok := snap.GetResources(string(typ))[name] - if !ok { - return nil, fmt.Errorf("%s resource %s not found in cache", typ, name) - } - - // We rely on the type given to be correct, as all xDS resources implement GetName(). - return r, nil -} - -// getAllResourcesFromSnapshot retrieves all known resource types from a snapshot as a map. -func (sm *SnapshotManager) getAllResourcesFromSnapshot(snap cachev3.ResourceSnapshot) map[resourcev3.Type][]types.Resource { - // Only include types that might be manipulated by the generic functions - resources := map[resourcev3.Type][]types.Resource{ - resourcev3.ClusterType: mapToSlice(snap.GetResources(string(resourcev3.ClusterType))), - resourcev3.ListenerType: mapToSlice(snap.GetResources(string(resourcev3.ListenerType))), - // resourcev3.EndpointType: mapToSlice(snap.GetResources(string(resourcev3.EndpointType))), - // resourcev3.SecretType: mapToSlice(snap.GetResources(string(resourcev3.SecretType))), - // resourcev3.RuntimeType: mapToSlice(snap.GetResources(string(resourcev3.RuntimeType))), - // Include other types as needed - } - return resources -} - -// mapToSlice converts a map of named resources to a slice of resources. -func mapToSlice(m map[string]types.Resource) []types.Resource { - out := make([]types.Resource, 0, len(m)) - for _, r := range m { - out = append(out, r) - } - return out -} - -// filterAndCheckResourcesByName filters a slice of resources by name, -// returning the filtered slice and a boolean indicating if the named resource was found. -func filterAndCheckResourcesByName(resources []types.Resource, name string) ([]types.Resource, bool) { - filtered := []types.Resource{} - var found = false - for _, r := range resources { - if namer, ok := r.(interface{ GetName() string }); ok { - if namer.GetName() != name { - filtered = append(filtered, r) - } else { - found = true - } - } else { - // fallback, include unknown type - filtered = append(filtered, r) - } - } - return filtered, found -} diff --git a/internal/snapshot/resource_io.go b/internal/snapshot/resource_io.go deleted file mode 100644 index ea381cd..0000000 --- a/internal/snapshot/resource_io.go +++ /dev/null @@ -1,247 +0,0 @@ -package snapshot - -import ( - "context" - "encoding/json" - "fmt" - "os" - - clusterv3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" - listenerv3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" - _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" - _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/tcp_proxy/v3" - - "github.com/envoyproxy/go-control-plane/pkg/cache/types" - "github.com/envoyproxy/go-control-plane/pkg/cache/v3" - resourcev3 "github.com/envoyproxy/go-control-plane/pkg/resource/v3" - "google.golang.org/protobuf/encoding/protojson" - - yaml "gopkg.in/yaml.v3" - - internallog "envoy-control-plane/internal/log" -) - -// YamlResources is a helper struct to unmarshal the common Envoy YAML file structure -type YamlResources struct { - Resources []yaml.Node `yaml:"resources"` -} - -// unmarshalYamlNodeToProto takes a generic map representation of a YAML/JSON object -// and unmarshals it into the given Protobuf resource pointer using protojson. -func unmarshalYamlNodeToProto(node map[string]interface{}, resource types.Resource) error { - // 1. Remove the standard Protobuf type marker (if present) before marshaling to JSON. - delete(node, "@type") - - // 2. Marshal the generic map into JSON bytes. - jsonBytes, err := json.Marshal(node) - if err != nil { - return fmt.Errorf("failed to marshal resource node to JSON: %w", err) - } - - // 3. Unmarshal the JSON bytes into the target Protobuf struct. - if err := protojson.Unmarshal(jsonBytes, resource); err != nil { - return fmt.Errorf("failed to unmarshal into proto: %w", err) - } - return nil -} - -// LoadAndUnmarshal takes raw data (e.g., from a file or string) and unmarshals -// it into the target interface using YAML/JSON rules. -func LoadAndUnmarshal(data []byte, target interface{}) error { - if err := yaml.Unmarshal(data, target); err != nil { - return fmt.Errorf("failed to unmarshal data: %w", err) - } - return nil -} - -// ProcessFn is a function type that defines the processing logic to be applied -// to a potential resource node found during the walk. -// It returns a non-nil error if processing fails and should stop the walk. -type ProcessFn func(ctx context.Context, node map[string]interface{}) error - -// WalkAndProcess traverses an arbitrary Go data structure (unmarshaled from YAML/JSON) -// and applies the provided ProcessFn to every map[string]interface{} node it finds. -func WalkAndProcess(ctx context.Context, raw interface{}, processFn ProcessFn) error { - var walk func(node interface{}) error - walk = func(node interface{}) error { - switch v := node.(type) { - case map[string]interface{}: - // Apply the custom processing function to the current map node - if err := processFn(ctx, v); err != nil { - return err - } - - // Recurse into children - for _, child := range v { - if err := walk(child); err != nil { - return err - } - } - - case []interface{}: - for _, item := range v { - if err := walk(item); err != nil { - return err - } - } - } - return nil - } - - return walk(raw) -} - -// makeResourceProcessor creates the specific ProcessFn needed for the xDS resource logic. -func makeResourceProcessor( - log internallog.Logger, - resources map[resourcev3.Type][]types.Resource, -) ProcessFn { - return func(_ context.Context, v map[string]interface{}) error { - if typStr, ok := v["@type"].(string); ok { - typ := resourcev3.Type(typStr) - - // only process known top-level xDS resources - var resource types.Resource - var newResource bool - - switch typ { - case resourcev3.ClusterType: - resource = &clusterv3.Cluster{} - newResource = true - case resourcev3.ListenerType: - resource = &listenerv3.Listener{} - newResource = true - // ... other types ... - default: - log.Warnf("unsupported resource type: %s", typ) - // Skip nested or unsupported types - } - - if newResource { - // NOTE: unmarshalYamlNodeToProto must be available in this scope - if err := unmarshalYamlNodeToProto(v, resource); err != nil { - return fmt.Errorf("failed to unmarshal %s from file: %w", typ, err) - } - resources[typ] = append(resources[typ], resource) - } - } - return nil - } -} - -// LoadSnapshotFromFile reads a YAML/JSON file, parses it, and returns a map of xDS resources. -func LoadSnapshotFromFile(context context.Context, filePath string) (map[resourcev3.Type][]types.Resource, error) { - log := internallog.LogFromContext(context) - - // Read the file (Step 1: Read) - data, err := os.ReadFile(filePath) - if err != nil { - return nil, fmt.Errorf("failed to read file: %w", err) - } - - // Unmarshal data (Step 2: Generic Unmarshal) - var raw interface{} - if err := LoadAndUnmarshal(data, &raw); err != nil { - return nil, fmt.Errorf("failed to unmarshal file %s: %w", filePath, err) - } - - resources := make(map[resourcev3.Type][]types.Resource) - processor := makeResourceProcessor(log, resources) - - // Walk and Process (Step 3: Generic Walk with specific processor) - if err := WalkAndProcess(context, raw, processor); err != nil { - return nil, err - } - - return resources, nil -} - -// SaveSnapshotToFile marshals the current cache snapshot to JSON and writes it to a file. -func SaveSnapshotToFile(snap cache.ResourceSnapshot, filePath string) error { - - out := make(map[string][]interface{}) - - // Iterate over all known types - clusterTypeResources := snap.GetResources(resourcev3.ClusterType) - for _, r := range clusterTypeResources { - out[resourcev3.ClusterType] = append(out[resourcev3.ClusterType], r) - } - listenerTypeResources := snap.GetResources(resourcev3.ListenerType) - for _, r := range listenerTypeResources { - out[resourcev3.ListenerType] = append(out[resourcev3.ListenerType], r) - } - - data, err := json.MarshalIndent(out, "", " ") - if err != nil { - return err - } - - return os.WriteFile(filePath, data, 0644) -} - -// LoadFilterChainFromYAML unmarshals a YAML string representing an Envoy Listener FilterChain -// configuration into a listenerv3.FilterChain protobuf message using protojson pipeline. -func LoadFilterChainFromYAML(ctx context.Context, yamlStr string) (*listenerv3.FilterChain, error) { - log := internallog.LogFromContext(ctx) - - // 1. Unmarshal YAML into a generic Go map - var rawChainMap map[string]interface{} - if err := yaml.Unmarshal([]byte(yamlStr), &rawChainMap); err != nil { - log.Errorf("Failed to unmarshal YAML: %v", err) - return nil, fmt.Errorf("failed to unmarshal YAML into generic map: %w", err) - } - if rawChainMap == nil { - return nil, fmt.Errorf("failed to unmarshal YAML: input was empty or invalid") - } - - // 2. Unmarshal the generic map into the Protobuf struct using the helper - rawChain := &listenerv3.FilterChain{} - if err := unmarshalYamlNodeToProto(rawChainMap, rawChain); err != nil { - return nil, fmt.Errorf("failed to unmarshal YAML into FilterChain using protojson: %w", err) - } - - // Check if the FilterChain contains any filters (optional but good sanity check) - if len(rawChain.Filters) == 0 { - return nil, fmt.Errorf("filter chain loaded but contains no network filters") - } - - // Return the single FilterChain object. - return rawChain, nil -} - -// LoadResourceFromYAML unmarshals a YAML string representing a single xDS resource -// (like a Cluster or a Listener) into the appropriate Protobuf message. -// -// It leverages the existing WalkAndProcess logic to find and unmarshal the resource. -func LoadResourceFromYAML(ctx context.Context, yamlStr string, expectedType resourcev3.Type) ([]types.Resource, error) { - log := internallog.LogFromContext(ctx) - - // 1. Unmarshal YAML into a generic Go map - var raw interface{} - if err := LoadAndUnmarshal([]byte(yamlStr), &raw); err != nil { - return nil, fmt.Errorf("failed to unmarshal YAML into generic structure: %w", err) - } - - // This map will hold the one resource we expect to find. - resources := make(map[resourcev3.Type][]types.Resource) - - // 2. Use the standard resource processor to walk the structure. - processor := makeResourceProcessor(log, resources) - - // 3. Walk and Process - if err := WalkAndProcess(ctx, raw, processor); err != nil { - return nil, fmt.Errorf("failed to walk and process YAML: %w", err) - } - - // 4. Validate and return the resource - resourceList, ok := resources[expectedType] - if !ok || len(resourceList) == 0 { - // Only return an error if the expected type was not found. - return nil, fmt.Errorf("no resource of expected type %s found in YAML", expectedType) - } - if len(resourceList) > 1 { - // Warn if multiple resources were found, but return the first one as expected. - log.Warnf("found %d resources of type %s, expected 1; returning the first", len(resourceList), expectedType) - } - return resourceList, nil -} diff --git a/internal/snapshot/resource_io_test.go b/internal/snapshot/resource_io_test.go deleted file mode 100644 index 40f591d..0000000 --- a/internal/snapshot/resource_io_test.go +++ /dev/null @@ -1,198 +0,0 @@ -package snapshot - -import ( - "context" - "testing" -) - -// NOTE: Assume MockLogger and SnapshotManager are defined for the test to run. -// The actual implementation of LoadFilterChainFromYAML is assumed to be available -// to the test file. - -// TestLoadFilterChainFromYAML_ComplexInput tests the functionality of LoadFilterChainFromYAML -func TestLoadFilterChainFromYAML_ComplexInput(t *testing.T) { - ctx := context.Background() - - // The user's provided, valid YAML for a single FilterChain object - validComplexYAML := ` - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - stat_prefix: ingress_http - codec_type: AUTO - upgrade_configs: - - upgrade_type: websocket - stream_idle_timeout: 0s - normalize_path: true - merge_slashes: true - route_config: - virtual_hosts: - - name: printer_service - domains: ["printer.jerxie.com"] - routes: - - match: { prefix: "/webcam" } - route: { prefix_rewrite: "/", cluster: "_3d_printer_camera", max_stream_duration: {grpc_timeout_header_max: 0s} } - - match: { prefix: "/" } - route: { cluster: "_3d_printer_console"} - http_filters: - - name: envoy.filters.http.oauth2 - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.oauth2.v3.OAuth2 - config: - token_endpoint: - cluster: _auth_server - uri: auth.jerxie.com/token - timeout: 3s - authorization_endpoint: https://auth.jerxie.com/auth - redirect_uri: "%REQ(x-forwarded-proto)%://%REQ(:authority)%/callback" - redirect_path_matcher: - path: - exact: /callback - signout_path: - path: - exact: /signout - forward_bearer_token: true - credentials: - client_id: octoprint-portal - token_secret: - name: token - sds_config: - path: "/etc/envoy/token-secret.yaml" - hmac_secret: - name: hmac - sds_config: - path: "/etc/envoy/hmac-secret.yaml" - auth_scopes: - - openid - - email - - name: envoy.filters.http.jwt_authn - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.jwt_authn.v3.JwtAuthentication - providers: - provider1: - remote_jwks: - http_uri: - uri: "https://auth.jerxie.com/keys" - cluster: _auth_server - timeout: 5s - cache_duration: 600s - from_headers: - - name: Authorization - value_prefix: "Bearer " - payload_in_metadata: jwt_payload - rules: - - match: - prefix: / - requires: - provider_name: provider1 - - name: envoy.filters.http.lua - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua - inline_code: | - email = "" - function envoy_on_request(request_handle) - email = "" - local meta = request_handle:streamInfo():dynamicMetadata() - for key, value in pairs(meta:get("envoy.filters.http.jwt_authn")) do - if key == "jwt_payload" then - for k, v in pairs(value) do - if k == "email" then - print("login octoprint: "..v) - email = v - request_handle:headers():add("ENVOY_AUTHENTICATED_USER", v) - end - end - end - end - end - - function envoy_on_response(response_handle) - if email ~="" and email ~= "axieyangb@gmail.com" then - response_handle:logInfo("Got unauthorized user, return 403 for user " ..email) - response_handle:headers():add("set-cookie", "BearerToken=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT") - response_handle:headers():add("set-cookie", "OauthHMAC=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT") - response_handle:headers():add("set-cookie", "IdToken=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT") - response_handle:headers():add("set-cookie", "OauthExpires=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT") - end - email = "" - end - - name: envoy.filters.http.router - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router - filter_chain_match: - server_names: ["printer.jerxie.com", "printer.local"] - transport_socket: - name: envoy.transport_sockets.tls - typed_config: - "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext - common_tls_context: - tls_certificates: - - certificate_chain: { filename: "/etc/certs/downstream/printer.jerxie.com/fullchain.pem" } - private_key: { filename: "/etc/certs/downstream/printer.jerxie.com/privkey.pem" } -` - - tests := []struct { - name string - yamlStr string - expectError bool - expectedLen int // Expected number of network filters (top-level filters array) - }{ - { - name: "Success_ComplexSingleFilterChain", - yamlStr: validComplexYAML, - expectError: false, - expectedLen: 1, // Only one top-level network filter: http_connection_manager - }, - // Re-include sanity checks for robust testing - { - name: "Error_NoFiltersInChain", - yamlStr: `filter_chain_match: { server_names: ["empty"] }`, - expectError: true, - expectedLen: 0, - }, - { - name: "Error_InputIsAList", - yamlStr: `- filters: []`, - expectError: true, // Should fail unmarshaling a list into a single struct - expectedLen: 0, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - chain, err := LoadFilterChainFromYAML(ctx, tt.yamlStr) - - if tt.expectError { - if err == nil { - t.Errorf("Expected an error but got nil") - } - if chain != nil { - t.Errorf("Expected nil chain on error, but got non-nil") - } - } else { - if err != nil { - t.Fatalf("Expected no error but got: %v", err) - } - if chain == nil { - t.Fatal("Expected non-nil filter chain, but got nil") - } - - // 1. Check top-level filter count - if len(chain.Filters) != tt.expectedLen { - t.Errorf("Top-level filter count mismatch. Got %d, want %d", len(chain.Filters), tt.expectedLen) - } - - // 2. Check a deeply nested value to ensure complex unmarshaling worked - if len(chain.FilterChainMatch.ServerNames) == 0 || chain.FilterChainMatch.ServerNames[0] != "printer.jerxie.com" { - t.Errorf("FilterChainMatch assertion failed. Expected server name 'printer.jerxie.com'") - } - - // 3. Check the name of the top-level filter - if chain.Filters[0].Name != "envoy.filters.network.http_connection_manager" { - t.Errorf("Top-level filter name mismatch. Got %s", chain.Filters[0].Name) - } - } - }) - } -} diff --git a/internal/storage/storage.go b/internal/storage/storage.go deleted file mode 100644 index 4d44d4e..0000000 --- a/internal/storage/storage.go +++ /dev/null @@ -1,604 +0,0 @@ -package storage - -import ( - "context" - "database/sql" - "encoding/json" - "fmt" - "strings" - - clusterv3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" - listenerv3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" - - // routev3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" // REMOVED - - "google.golang.org/protobuf/encoding/protojson" -) - -// Storage abstracts database persistence -type Storage struct { - db *sql.DB - driver string -} - -// DeleteStrategy defines the action to take on missing resources -type DeleteStrategy int - -const ( - // DeleteNone performs only UPSERT for items in the list (default behavior) - DeleteNone DeleteStrategy = iota - // DeleteLogical marks missing resources as disabled (now applicable to clusters and listeners) - DeleteLogical - // DeleteActual removes missing resources physically from the database - DeleteActual -) - -// NewStorage initializes a Storage instance -func NewStorage(db *sql.DB, driver string) *Storage { - return &Storage{db: db, driver: driver} -} - -// placeholder returns correct SQL placeholder based on driver -func (s *Storage) placeholder(n int) string { - if s.driver == "postgres" { - return fmt.Sprintf("$%d", n) - } - return "?" -} - -// InitSchema ensures required tables exist -func (s *Storage) InitSchema(ctx context.Context) error { - var schema string - switch s.driver { - case "postgres": - schema = ` - CREATE TABLE IF NOT EXISTS clusters ( - id SERIAL PRIMARY KEY, - name TEXT UNIQUE NOT NULL, - data JSONB NOT NULL, - enabled BOOLEAN DEFAULT true, - updated_at TIMESTAMP DEFAULT now() - ); - -- REMOVED routes table - CREATE TABLE IF NOT EXISTS listeners ( - id SERIAL PRIMARY KEY, - name TEXT UNIQUE NOT NULL, - data JSONB NOT NULL, - enabled BOOLEAN DEFAULT true, - updated_at TIMESTAMP DEFAULT now() - );` - default: // SQLite - schema = ` - CREATE TABLE IF NOT EXISTS clusters ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - name TEXT UNIQUE NOT NULL, - data TEXT NOT NULL, - enabled BOOLEAN DEFAULT 1, - updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP - ); - -- REMOVED routes table - CREATE TABLE IF NOT EXISTS listeners ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - name TEXT UNIQUE NOT NULL, - data TEXT NOT NULL, - enabled BOOLEAN DEFAULT 1, - updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP - );` - } - _, err := s.db.ExecContext(ctx, schema) - return err -} - -// SaveCluster inserts or updates a cluster -func (s *Storage) SaveCluster(ctx context.Context, cluster *clusterv3.Cluster) error { - data, err := protojson.Marshal(cluster) - if err != nil { - return err - } - - var query string - switch s.driver { - case "postgres": - // Explicitly set enabled=true on update to re-enable a logically deleted cluster - query = fmt.Sprintf(` - INSERT INTO clusters (name, data, enabled, updated_at) - VALUES (%s, %s, true, now()) - ON CONFLICT (name) DO UPDATE SET data = %s, enabled = true, updated_at = now()`, - s.placeholder(1), s.placeholder(2), s.placeholder(2)) - default: // SQLite - // Explicitly set enabled=1 on update to re-enable a logically deleted cluster - query = ` - INSERT INTO clusters (name, data, enabled, updated_at) - VALUES (?, ?, 1, CURRENT_TIMESTAMP) - ON CONFLICT(name) DO UPDATE SET data=excluded.data, enabled=1, updated_at=CURRENT_TIMESTAMP` - } - - _, err = s.db.ExecContext(ctx, query, cluster.GetName(), string(data)) - return err -} - -// SaveRoute inserts or updates a route // REMOVED -// func (s *Storage) SaveRoute(ctx context.Context, route *routev3.RouteConfiguration) error { -// // ... (route logic removed) -// } - -// SaveListener inserts or updates a listener -func (s *Storage) SaveListener(ctx context.Context, listener *listenerv3.Listener) error { - data, err := protojson.Marshal(listener) - if err != nil { - return err - } - - var query string - switch s.driver { - case "postgres": - // Explicitly set enabled=true on update to re-enable a logically deleted listener - query = fmt.Sprintf(` - INSERT INTO listeners (name, data, enabled, updated_at) - VALUES (%s, %s, true, now()) - ON CONFLICT (name) DO UPDATE SET data = %s, enabled = true, updated_at = now()`, - s.placeholder(1), s.placeholder(2), s.placeholder(2)) - default: // SQLite - // Explicitly set enabled=1 on update to re-enable a logically deleted listener - query = ` - INSERT INTO listeners (name, data, enabled, updated_at) - VALUES (?, ?, 1, CURRENT_TIMESTAMP) - ON CONFLICT(name) DO UPDATE SET data=excluded.data, enabled=1, updated_at=CURRENT_TIMESTAMP` - } - - _, err = s.db.ExecContext(ctx, query, listener.GetName(), string(data)) - return err -} - -// LoadEnabledClusters retrieves all enabled clusters -func (s *Storage) LoadEnabledClusters(ctx context.Context) ([]*clusterv3.Cluster, error) { - query := `SELECT data FROM clusters` - if s.driver == "postgres" { - query += ` WHERE enabled = true` - } else { - query += ` WHERE enabled = 1` - } - - rows, err := s.db.QueryContext(ctx, query) - if err != nil { - return nil, err - } - defer rows.Close() - - var clusters []*clusterv3.Cluster - for rows.Next() { - var raw json.RawMessage - // FIX: Handle type difference between Postgres (JSONB) and SQLite (TEXT) - if s.driver != "postgres" { - var dataStr string - if err := rows.Scan(&dataStr); err != nil { - return nil, err - } - raw = json.RawMessage(dataStr) // Convert string to json.RawMessage - } else { - if err := rows.Scan(&raw); err != nil { - return nil, err - } - } - - var cluster clusterv3.Cluster - if err := protojson.Unmarshal(raw, &cluster); err != nil { - return nil, err - } - clusters = append(clusters, &cluster) - } - return clusters, nil -} - -// LoadAllClusters retrieves all clusters, regardless of their enabled status -func (s *Storage) LoadAllClusters(ctx context.Context) ([]*clusterv3.Cluster, error) { - rows, err := s.db.QueryContext(ctx, `SELECT data FROM clusters`) - if err != nil { - return nil, err - } - defer rows.Close() - - var clusters []*clusterv3.Cluster - for rows.Next() { - var raw json.RawMessage - // FIX: Handle type difference between Postgres (JSONB) and SQLite (TEXT) - if s.driver != "postgres" { - var dataStr string - if err := rows.Scan(&dataStr); err != nil { - return nil, err - } - raw = json.RawMessage(dataStr) // Convert string to json.RawMessage - } else { - if err := rows.Scan(&raw); err != nil { - return nil, err - } - } - - var cluster clusterv3.Cluster - if err := protojson.Unmarshal(raw, &cluster); err != nil { - return nil, err - } - clusters = append(clusters, &cluster) - } - return clusters, nil -} - -// LoadEnabledRoutes retrieves all enabled routes // REMOVED -// func (s *Storage) LoadEnabledRoutes(ctx context.Context) ([]*routev3.RouteConfiguration, error) { -// // ... (route logic removed) -// } - -// LoadAllRoutes retrieves all routes, regardless of their enabled status // REMOVED -// func (s *Storage) LoadAllRoutes(ctx context.Context) ([]*routev3.RouteConfiguration, error) { -// // ... (route logic removed) -// } - -// LoadEnabledListeners retrieves all enabled listeners -func (s *Storage) LoadEnabledListeners(ctx context.Context) ([]*listenerv3.Listener, error) { - query := `SELECT data FROM listeners` - if s.driver == "postgres" { - query += ` WHERE enabled = true` - } else { - query += ` WHERE enabled = 1` - } - - rows, err := s.db.QueryContext(ctx, query) - if err != nil { - return nil, err - } - defer rows.Close() - - var listeners []*listenerv3.Listener - for rows.Next() { - var raw json.RawMessage - // FIX: Handle type difference between Postgres (JSONB) and SQLite (TEXT) - if s.driver != "postgres" { - var dataStr string - if err := rows.Scan(&dataStr); err != nil { - return nil, err - } - raw = json.RawMessage(dataStr) // Convert string to json.RawMessage - } else { - if err := rows.Scan(&raw); err != nil { - return nil, err - } - } - - var l listenerv3.Listener - if err := protojson.Unmarshal(raw, &l); err != nil { - return nil, err - } - listeners = append(listeners, &l) - } - return listeners, nil -} - -// LoadAllListeners retrieves all listeners, regardless of their enabled status -func (s *Storage) LoadAllListeners(ctx context.Context) ([]*listenerv3.Listener, error) { - rows, err := s.db.QueryContext(ctx, `SELECT data FROM listeners`) - if err != nil { - return nil, err - } - defer rows.Close() - - var listeners []*listenerv3.Listener - for rows.Next() { - var raw json.RawMessage - // FIX: Handle type difference between Postgres (JSONB) and SQLite (TEXT) - if s.driver != "postgres" { - var dataStr string - if err := rows.Scan(&dataStr); err != nil { - return nil, err - } - raw = json.RawMessage(dataStr) // Convert string to json.RawMessage - } else { - if err := rows.Scan(&raw); err != nil { - return nil, err - } - } - - var l listenerv3.Listener - if err := protojson.Unmarshal(raw, &l); err != nil { - return nil, err - } - listeners = append(listeners, &l) - } - return listeners, nil -} - -// RebuildSnapshot rebuilds full snapshot from DB -func (s *Storage) RebuildSnapshot(ctx context.Context) (*SnapshotConfig, error) { - // 1. Load Enabled Resources (for xDS serving) - enabledClusters, err := s.LoadEnabledClusters(ctx) - if err != nil { - return nil, err - } - // enabledRoutes, err := s.LoadEnabledRoutes(ctx) // REMOVED - // if err != nil { - // return nil, err - // } - enabledListeners, err := s.LoadEnabledListeners(ctx) - if err != nil { - return nil, err - } - - // 2. Load ALL Resources (for comparison and disabled set) - allClusters, err := s.LoadAllClusters(ctx) - if err != nil { - return nil, err - } - // allRoutes, err := s.LoadAllRoutes(ctx) // REMOVED - // if err != nil { - // return nil, err - // } - allListeners, err := s.LoadAllListeners(ctx) - if err != nil { - return nil, err - } - - // 3. Separate Disabled Resources - - // Clusters - enabledClusterNames := make(map[string]struct{}, len(enabledClusters)) - for _, c := range enabledClusters { - enabledClusterNames[c.GetName()] = struct{}{} - } - var disabledClusters []*clusterv3.Cluster - for _, c := range allClusters { - if _, found := enabledClusterNames[c.GetName()]; !found { - disabledClusters = append(disabledClusters, c) - } - } - - // Routes // REMOVED - // enabledRouteNames := make(map[string]struct{}, 0) - // var disabledRoutes []*routev3.RouteConfiguration - // for _, r := range allRoutes { - // if _, found := enabledRouteNames[r.GetName()]; !found { - // disabledRoutes = append(disabledRoutes, r) - // } - // } - - // Listeners - enabledListenerNames := make(map[string]struct{}, len(enabledListeners)) - for _, l := range enabledListeners { - enabledListenerNames[l.GetName()] = struct{}{} - } - var disabledListeners []*listenerv3.Listener - for _, l := range allListeners { - if _, found := enabledListenerNames[l.GetName()]; !found { - disabledListeners = append(disabledListeners, l) - } - } - - return &SnapshotConfig{ - EnabledClusters: enabledClusters, - // EnabledRoutes: nil, // REMOVED - EnabledListeners: enabledListeners, - DisabledClusters: disabledClusters, - // DisabledRoutes: nil, // REMOVED - DisabledListeners: disabledListeners, - }, nil -} - -// SnapshotConfig aggregates xDS resources -type SnapshotConfig struct { - // Enabled resources (for xDS serving) - EnabledClusters []*clusterv3.Cluster - // EnabledRoutes []*routev3.RouteConfiguration // REMOVED - EnabledListeners []*listenerv3.Listener - - // Disabled resources (for UI display) - DisabledClusters []*clusterv3.Cluster - // DisabledRoutes []*routev3.RouteConfiguration // REMOVED - DisabledListeners []*listenerv3.Listener -} - -// EnableCluster toggles a cluster -func (s *Storage) EnableCluster(ctx context.Context, name string, enabled bool) error { - query := `UPDATE clusters SET enabled = ?, updated_at = CURRENT_TIMESTAMP WHERE name = ?` - if s.driver == "postgres" { - query = `UPDATE clusters SET enabled = $1, updated_at = now() WHERE name = $2` - } - _, err := s.db.ExecContext(ctx, query, enabled, name) - return err -} - -// EnableRoute toggles a route // REMOVED -// func (s *Storage) EnableRoute(ctx context.Context, name string, enabled bool) error { -// // ... (route logic removed) -// } - -// EnableListener toggles a listener -func (s *Storage) EnableListener(ctx context.Context, name string, enabled bool) error { - query := `UPDATE listeners SET enabled = ?, updated_at = CURRENT_TIMESTAMP WHERE name = ?` - if s.driver == "postgres" { - query = `UPDATE listeners SET enabled = $1, updated_at = now() WHERE name = $2` - } - _, err := s.db.ExecContext(ctx, query, enabled, name) - return err -} - -// disableMissingResources updates the 'enabled' status for resources in 'table' -// whose 'name' is NOT in 'inputNames'. -func (s *Storage) disableMissingResources(ctx context.Context, table string, inputNames []string) error { - if table != "clusters" && table != "listeners" { // CHECK UPDATED - return fmt.Errorf("logical delete (disable) is only supported for tables with an 'enabled' column (clusters, listeners)") - } - - // 1. Build placeholders and args - placeholders := make([]string, len(inputNames)) - args := make([]interface{}, len(inputNames)) - for i, name := range inputNames { - if s.driver == "postgres" { - placeholders[i] = fmt.Sprintf("$%d", i+1) - } else { - placeholders[i] = "?" - } - args[i] = name - } - - disabledValue := "false" - if s.driver != "postgres" { - disabledValue = "0" - } - - var updateTime string - if s.driver == "postgres" { - updateTime = "now()" - } else { - updateTime = "CURRENT_TIMESTAMP" - } - - // If no names are provided, disable ALL currently enabled resources - whereClause := "" - if len(inputNames) > 0 { - whereClause = fmt.Sprintf("WHERE name NOT IN (%s)", strings.Join(placeholders, ", ")) - } - - // 2. Construct and execute the UPDATE query - query := fmt.Sprintf(` - UPDATE %s - SET enabled = %s, updated_at = %s - %s`, - table, disabledValue, updateTime, whereClause) - - _, err := s.db.ExecContext(ctx, query, args...) - return err -} - -// deleteMissingResources physically deletes resources from 'table' whose 'name' is NOT in 'inputNames'. -func (s *Storage) deleteMissingResources(ctx context.Context, table string, inputNames []string) error { - if table != "clusters" && table != "listeners" { // CHECK UPDATED - return fmt.Errorf("physical delete is only supported for tables: clusters, listeners") - } - - // 1. Build placeholders and args - placeholders := make([]string, len(inputNames)) - args := make([]interface{}, len(inputNames)) - for i, name := range inputNames { - if s.driver == "postgres" { - placeholders[i] = fmt.Sprintf("$%d", i+1) - } else { - placeholders[i] = "?" - } - args[i] = name - } - - // If no names are provided, delete ALL resources - whereClause := "" - if len(inputNames) > 0 { - whereClause = fmt.Sprintf("WHERE name NOT IN (%s)", strings.Join(placeholders, ", ")) - } - - // 2. Construct and execute the DELETE query - query := fmt.Sprintf(` - DELETE FROM %s - %s`, - table, whereClause) - - _, err := s.db.ExecContext(ctx, query, args...) - return err -} - -func (s *Storage) SaveSnapshot(ctx context.Context, cfg *SnapshotConfig, strategy DeleteStrategy) error { - if cfg == nil { - return fmt.Errorf("SnapshotConfig is nil") - } - - // Use a transaction for atomicity - tx, err := s.db.BeginTx(ctx, nil) - if err != nil { - return fmt.Errorf("failed to begin transaction: %w", err) - } - defer func() { - if err != nil { - tx.Rollback() - return - } - err = tx.Commit() - }() - - // Note: Only Enabledxxx resources are UPSERTED. Disabledxxx resources are - // left alone unless the deletion strategy removes them. - - // --- 1. Save/Upsert Clusters and Collect Names --- - clusterNames := make([]string, 0, len(cfg.EnabledClusters)) - for _, c := range cfg.EnabledClusters { - if err = s.SaveCluster(ctx, c); err != nil { - return fmt.Errorf("failed to save cluster %s: %w", c.GetName(), err) - } - clusterNames = append(clusterNames, c.GetName()) - } - - // --- 2. Save/Upsert Routes and Collect Names --- // REMOVED - // routeNames := make([]string, 0, len(cfg.EnabledRoutes)) - // for _, r := range cfg.EnabledRoutes { - // if err = s.SaveRoute(ctx, r); err != nil { - // return fmt.Errorf("failed to save route %s: %w", r.GetName(), err) - // } - // routeNames = append(routeNames, r.GetName()) - // } - - // --- 3. Save/Upsert Listeners and Collect Names --- - listenerNames := make([]string, 0, len(cfg.EnabledListeners)) - for _, l := range cfg.EnabledListeners { - if err = s.SaveListener(ctx, l); err != nil { - return fmt.Errorf("failed to save listener %s: %w", l.GetName(), err) - } - listenerNames = append(listenerNames, l.GetName()) - } - - // --- 4. Apply Deletion Strategy --- - switch strategy { - case DeleteLogical: - // Logical Delete (Disable) for all resource types: marks resources NOT in the current enabled list as disabled - if err = s.disableMissingResources(ctx, "clusters", clusterNames); err != nil { - return fmt.Errorf("failed to logically delete missing clusters: %w", err) - } - // if err = s.disableMissingResources(ctx, "routes", routeNames); err != nil { // REMOVED - // return fmt.Errorf("failed to logically delete missing routes: %w", err) - // } - if err = s.disableMissingResources(ctx, "listeners", listenerNames); err != nil { - return fmt.Errorf("failed to logically delete missing listeners: %w", err) - } - - case DeleteActual: - // Actual Delete (Physical Removal) for all resources: removes resources NOT in the current enabled list - if err = s.deleteMissingResources(ctx, "clusters", clusterNames); err != nil { - return fmt.Errorf("failed to physically delete missing clusters: %w", err) - } - // if err = s.deleteMissingResources(ctx, "routes", routeNames); err != nil { // REMOVED - // return fmt.Errorf("failed to physically delete missing routes: %w", err) - // } - if err = s.deleteMissingResources(ctx, "listeners", listenerNames); err != nil { - return fmt.Errorf("failed to physically delete missing listeners: %w", err) - } - - case DeleteNone: - // Do nothing for missing resources - return nil - } - - return err -} - -// RemoveListener deletes a listener by name -func (s *Storage) RemoveListener(ctx context.Context, name string) error { - query := `DELETE FROM listeners WHERE name = ?` - if s.driver == "postgres" { - query = `DELETE FROM listeners WHERE name = $1` - } - _, err := s.db.ExecContext(ctx, query, name) - return err -} - -// RemoveCluster deletes a cluster by name -func (s *Storage) RemoveCluster(ctx context.Context, name string) error { - query := `DELETE FROM clusters WHERE name = ?` - if s.driver == "postgres" { - query = `DELETE FROM clusters WHERE name = $1` - } - _, err := s.db.ExecContext(ctx, query, name) - return err -} diff --git a/main.go b/main.go index f36a717..b6e2ff8 100644 --- a/main.go +++ b/main.go @@ -1,265 +1,32 @@ +// cmd/main.go package main import ( "context" - "database/sql" - "flag" - "fmt" - "net/http" "os" - "path/filepath" - "strings" - "github.com/envoyproxy/go-control-plane/pkg/cache/types" - cachev3 "github.com/envoyproxy/go-control-plane/pkg/cache/v3" - resourcev3 "github.com/envoyproxy/go-control-plane/pkg/resource/v3" - "github.com/envoyproxy/go-control-plane/pkg/server/v3" - "github.com/envoyproxy/go-control-plane/pkg/test/v3" - _ "github.com/lib/pq" // Postgres driver - _ "github.com/mattn/go-sqlite3" // SQLite driver - "k8s.io/klog/v2" - - "envoy-control-plane/internal" + "envoy-control-plane/internal/app" + "envoy-control-plane/internal/config" internallog "envoy-control-plane/internal/log" - "envoy-control-plane/internal/snapshot" - internalstorage "envoy-control-plane/internal/storage" + + "k8s.io/klog/v2" ) -var ( - // The logger variable should now be of the internal.Logger interface type - // to use the custom context functions. - logger internallog.Logger - port uint - nodeID string - restPort uint - snapshotFile string - configDir string - dbConnStr string - dbDriver string -) - -func init() { - // Initialize the default logger (which implements the internal.Logger interface) - logger = internallog.NewDefaultLogger() - klog.InitFlags(nil) - - flag.UintVar(&port, "port", 18000, "xDS management server port") - flag.StringVar(&nodeID, "nodeID", "test-id", "Node ID") - flag.UintVar(&restPort, "rest-port", 8080, "REST API server port") - flag.StringVar(&snapshotFile, "snapshot-file", "", "Optional initial snapshot JSON/YAML file") - flag.StringVar(&configDir, "config-dir", "data/", "Optional directory containing multiple config files") - flag.StringVar(&dbConnStr, "db", "", "Optional database connection string for config persistence") -} - -// determineDriver returns driver name from connection string -func determineDriver(dsn string) string { - if strings.HasPrefix(dsn, "postgres://") || strings.HasPrefix(dsn, "postgresql://") { - return "postgres" - } - return "sqlite3" -} - -// loadConfigFiles now accepts and uses a context -func loadConfigFiles(ctx context.Context, manager *snapshot.SnapshotManager, dir string) error { - log := internallog.LogFromContext(ctx) // Use the logger from context - - log.Infof("loading configuration files from directory: %s", dir) - - files, err := os.ReadDir(dir) - if err != nil { - return fmt.Errorf("failed to read directory %s: %w", dir, err) - } - - resourceFiles := make(map[string][]types.Resource) - for _, file := range files { - if file.IsDir() { - continue - } - fileName := file.Name() - if strings.HasSuffix(fileName, ".yaml") || strings.HasSuffix(fileName, ".yml") || strings.HasSuffix(fileName, ".json") { - filePath := filepath.Join(dir, fileName) - log.Infof(" -> loading config file: %s", filePath) - - rf, err := snapshot.LoadSnapshotFromFile(ctx, filePath) - if err != nil { - return fmt.Errorf("failed to load snapshot from file %s: %w", filePath, err) - } - for k, v := range rf { - resourceFiles[k] = append(resourceFiles[k], v...) - } - log.Infof("loaded %d resources from %s", len(rf), filePath) - } - } - - if err := manager.SetSnapshot(ctx, "snap-from-file", resourceFiles); err != nil { - return fmt.Errorf("failed to set combined snapshot from files: %w", err) - } - return nil -} - -// CORS is a middleware that sets the Access-Control-Allow-Origin header to * (all origins). -func CORS(next http.Handler) http.Handler { - // ... (CORS implementation remains unchanged) ... - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Set CORS headers for all domains - w.Header().Set("Access-Control-Allow-Origin", "*") - w.Header().Set("Access-Control-Allow-Methods", "GET, POST, OPTIONS, PUT, DELETE") - w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Access-Control-Allow-Headers, Authorization, X-Requested-With") - - // Handle preflight requests - if r.Method == "OPTIONS" { - w.WriteHeader(http.StatusOK) - return - } - - next.ServeHTTP(w, r) - }) -} - func main() { + // 1. Initialize and Parse Flags + config.InitFlags() // Initialize all flags from a central location flag.Parse() defer klog.Flush() - // 1. Create the root context and inject the logger + // 2. Setup Logger and Context + logger := internallog.NewDefaultLogger() ctx := internallog.WithLogger(context.Background(), logger) - log := internallog.LogFromContext(ctx) // Now 'log' is the context-aware logger + log := internallog.LogFromContext(ctx) - // Default DB to SQLite file if none provided - if dbConnStr == "" { - defaultDBPath := "data/config.db" - if err := os.MkdirAll(filepath.Dir(defaultDBPath), 0755); err != nil { - fmt.Fprintf(os.Stderr, "failed to create data directory: %v\n", err) - os.Exit(1) - } - dbConnStr = fmt.Sprintf("file:%s?_foreign_keys=on", defaultDBPath) - dbDriver = "sqlite3" - } else { - dbDriver = determineDriver(dbConnStr) - } - // --- Database initialization --- - db, err := sql.Open(dbDriver, dbConnStr) - if err != nil { - log.Errorf("failed to connect to DB: %v", err) - os.Exit(1) - } - defer db.Close() - - // internal.NewStorage likely needs to be updated to accept a logger as well - // if its methods don't accept context, but we will pass context to its methods below. - storage := internalstorage.NewStorage(db, dbDriver) - // Pass the context with the logger down - if err := storage.InitSchema(ctx); err != nil { - log.Errorf("failed to initialize DB schema: %v", err) - os.Exit(1) - } - - // Create snapshot cache and manager - // NOTE: The Envoy cachev3.NewSnapshotCache takes a `log.Logger` from go-control-plane, - // which is likely a different interface. For now, we continue to use the global 'logger' - // variable (which is an internal.Logger that wraps klog, matching the go-control-plane - // logger behavior you previously set up) as a bridge, since it was initialized - // to log.NewDefaultLogger(). - cache := cachev3.NewSnapshotCache(false, cachev3.IDHash{}, logger) - manager := snapshot.NewSnapshotManager(cache, nodeID, storage) - - loadedConfigs := false - - // Step 1: Try to load snapshot from DB - // Pass the context with the logger down - snapCfg, err := storage.RebuildSnapshot(ctx) - if err == nil && len(snapCfg.EnabledClusters)+len(snapCfg.EnabledListeners) > 0 { - if err := manager.SetSnapshotFromConfig(ctx, "snap-from-db", snapCfg); err != nil { - log.Errorf("failed to set DB snapshot: %v", err) - os.Exit(1) - } - loadedConfigs = true - log.Infof("loaded snapshot from database") - } - - // Step 2: If DB empty, load from files and persist into DB - if !loadedConfigs { - if configDir != "" { - // Pass the context with the logger down - if err := loadConfigFiles(ctx, manager, configDir); err != nil { - log.Errorf("failed to load configs from directory: %v", err) - os.Exit(1) - } - loadedConfigs = true - } else if snapshotFile != "" { - if _, err := os.Stat(snapshotFile); err == nil { - resources, err := snapshot.LoadSnapshotFromFile(ctx, snapshotFile) - if err != nil { - log.Errorf("failed to load snapshot from file: %v", err) - os.Exit(1) - } - if err := manager.SetSnapshot(ctx, "snap-from-file", resources); err != nil { - log.Errorf("failed to set loaded snapshot: %v", err) - os.Exit(1) - } - loadedConfigs = true - } else { - log.Warnf("snapshot file not found: %s", snapshotFile) - } - } - - // Persist loaded snapshot into DB - if loadedConfigs { - // Pass the context with the logger down - snapCfg, err := manager.SnapshotToConfig(ctx, nodeID) - if err != nil { - log.Errorf("failed to convert snapshot to DB config: %v", err) - os.Exit(1) - } - // Pass the context with the logger down - if err := storage.SaveSnapshot(ctx, snapCfg, internalstorage.DeleteLogical); err != nil { - log.Errorf("failed to save initial snapshot into DB: %v", err) - os.Exit(1) - } - log.Infof("initial snapshot written into database") - } - } - - // Step 3: Ensure snapshot exists in cache - snap, err := manager.Cache.GetSnapshot(nodeID) - if err != nil || !loadedConfigs { - log.Warnf("no valid snapshot found, creating empty snapshot") - snap, _ = cachev3.NewSnapshot("snap-init", map[resourcev3.Type][]types.Resource{ - resourcev3.ClusterType: {}, - resourcev3.RouteType: {}, - resourcev3.ListenerType: {}, - }) - // Pass the context with the logger down - if err := cache.SetSnapshot(ctx, nodeID, snap); err != nil { - log.Errorf("failed to set initial snapshot: %v", err) - os.Exit(1) - } - } - - log.Infof("xDS snapshot ready: version %s", snap.GetVersion(string(resourcev3.ClusterType))) - - // --- Start xDS gRPC server --- - // The root context with logger is used here - cb := &test.Callbacks{Debug: true} - srv := server.NewServer(ctx, cache, cb) - go internal.RunServer(srv, port) - - // --- Start REST API server --- - api := internal.NewAPI(manager) - mux := http.NewServeMux() - // NOTE: If api.RegisterRoutes uses a context to log, it should be updated. - api.RegisterRoutes(mux) - - // Wrap the main multiplexer with the CORS handler - corsHandler := CORS(mux) - - // NEW: Serve the index.html file and any other static assets - mux.Handle("/", http.FileServer(http.Dir("./static"))) // Assuming 'web' is the folder - - restAddr := fmt.Sprintf(":%d", restPort) - log.Infof("starting REST API server on %s", restAddr) - if err := http.ListenAndServe(restAddr, corsHandler); err != nil { // Use corsHandler - log.Errorf("REST server error: %v", err) + // 3. Run the Application + if err := app.Run(ctx); err != nil { + log.Errorf("Application failed: %v", err) os.Exit(1) } }