diff --git a/data/config.db b/data/config.db new file mode 100644 index 0000000..bfbb3ed --- /dev/null +++ b/data/config.db Binary files differ diff --git a/docker-compose.yml b/docker-compose.yml index 0997c5e..48b0157 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -14,7 +14,7 @@ - "18000:18000" volumes: - data_volume:/app/data:rw - command: ["--nodeID", "proxy", "--config-dir", "app/data/config"] + command: ["--nodeID", "home", "--config-dir", "app/data/config"] # Define the volumes used by the services volumes: # Define a named volume for your code diff --git a/go.mod b/go.mod index 19d5ad9..c302257 100644 --- a/go.mod +++ b/go.mod @@ -16,6 +16,8 @@ github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 // indirect github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect github.com/go-logr/logr v1.4.3 // indirect + github.com/lib/pq v1.10.9 // indirect + github.com/mattn/go-sqlite3 v1.14.32 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect golang.org/x/net v0.41.0 // indirect golang.org/x/sys v0.33.0 // indirect diff --git a/go.sum b/go.sum index 50c4b3f..793cc5a 100644 --- a/go.sum +++ b/go.sum @@ -22,6 +22,10 @@ github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs= +github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= diff --git a/internal/README.md b/internal/README.md new file mode 100644 index 0000000..fee36b8 --- /dev/null +++ b/internal/README.md @@ -0,0 +1,74 @@ +This is a great request! Based on the provided Go code, which defines a **REST API** for managing an **Envoy xDS Configuration Snapshot**, here is a comprehensive `README.md`. + +*** + +# Envoy xDS Snapshot Management API + +This package provides a **REST API** built in Go for managing the configuration snapshot (Cluster, Listener, Route) used by an Envoy **xDS control plane**. It allows for dynamic addition, removal, querying, and persistence of Envoy configuration resources. + +## 🚀 Features + +* **Dynamic Configuration:** Add and remove **Clusters**, **Listeners**, and **Routes** on the fly. +* **Querying:** Retrieve the current list or details of configured resources. +* **Persistence:** Save the current configuration snapshot to a **Database** or a **File** and load it back. +* **Envoy xDS Types:** Utilizes official Envoy Go types (`github.com/envoyproxy/go-control-plane`) for resource definitions. + +--- + +## 💻 API Endpoints + +All endpoints expect a **JSON** request body (for POST) or use **query parameters** (for GET). + +### Configuration Management (POST) + +These endpoints allow you to add or remove Envoy xDS resources from the current snapshot. + +| Endpoint | Method | Description | Request Body Example | +| :--- | :--- | :--- | :--- | +| `/add-cluster` | `POST` | Adds a new Envoy Cluster. | `{"name": "my-cluster", "cluster": {...}}` | +| `/remove-cluster` | `POST` | Removes a Cluster by name. | `{"name": "my-cluster"}` | +| `/add-listener` | `POST` | Adds a new Envoy Listener. | `{"name": "my-listener", "listener": {...}}` | +| `/remove-listener` | `POST` | Removes a Listener by name. | `{"name": "my-listener"}` | +| `/add-route` | `POST` | Adds a new Envoy Route configuration. | `{"name": "my-route", "route": {...}}` | +| `/remove-route` | `POST` | Removes a Route by name. | `{"name": "my-route"}` | + +> **Note:** The `cluster`, `listener`, and `route` fields in the requests must contain the full, JSON-marshaled protobuf structure for `clusterv3.Cluster`, `listenerv3.Listener`, and `routerv3.Router` respectively. + +--- + +### Query and Listing (GET) + +Use these endpoints to inspect the current state of the in-memory snapshot. + +| Endpoint | Method | Description | Query Parameters | Response Structure | +| :--- | :--- | :--- | :--- | :--- | +| `/list-clusters` | `GET` | Lists all **enabled** and **disabled** Clusters. | None | `{"enabled": [...], "disabled": [...]}` | +| `/get-cluster` | `GET` | Retrieves the details of a specific Cluster. | `?name=my-cluster` | Full Cluster Protobuf JSON | +| `/list-listeners` | `GET` | Lists all **enabled** and **disabled** Listeners. | None | `{"enabled": [...], "disabled": [...]}` | +| `/get-listener` | `GET` | Retrieves the details of a specific Listener. | `?name=my-listener` | Full Listener Protobuf JSON | +| `/list-routes` | `GET` | Lists all **enabled** and **disabled** Routes. | None | `{"enabled": [...], "disabled": [...]}` | +| `/get-route` | `GET` | Retrieves the details of a specific Route. | `?name=my-route` | Full Route Protobuf JSON | + +--- + +### Persistence (POST) + +These handlers manage loading and saving the configuration snapshot from/to persistent storage (Database or local File). + +#### Database Handlers + +| Endpoint | Method | Description | Query Parameters | +| :--- | :--- | :--- | :--- | +| `/load-from-db` | `POST` | Loads the complete configuration from the persistent database into the snapshot cache. | None | +| `/flush-to-db` | `POST` | Saves the current snapshot from the cache (source of truth) to the persistent database. | `?deleteMissing=true/1` (Optional) | + +> **`/flush-to-db` Logic:** +> * By **default**, it performs a *logical* flush (updates existing, adds new, but does **not** physically delete resources from the DB). +> * If the query parameter `deleteMissing=true` or `deleteMissing=1` is provided, it performs an *actual* flush, physically **deleting** any resources from the database that are no longer present in the in-memory cache. + +#### File Handlers + +| Endpoint | Method | Description | Request Body Example | +| :--- | :--- | :--- | :--- | +| `/load-from-file` | `POST` | Loads a snapshot from a local file at the specified path and applies it to the cache. | `{"path": "/tmp/snapshot.json"}` | +| `/save-to-file` | `POST` | Saves the current cache snapshot to a local file at the specified path. | `{"path": "/tmp/snapshot.json"}` | \ No newline at end of file diff --git a/internal/converter.go b/internal/converter.go new file mode 100644 index 0000000..e0ae349 --- /dev/null +++ b/internal/converter.go @@ -0,0 +1,37 @@ +package internal + +import ( + "bytes" + "encoding/json" + "fmt" + + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" + yaml "gopkg.in/yaml.v3" +) + +// ConvertProtoToYAML converts any protobuf message to a YAML string with 2-space indentation +func ConvertProtoToYAML(msg proto.Message) (string, error) { + // 1. Marshal protobuf to JSON + jsonBytes, err := protojson.Marshal(msg) + if err != nil { + return "", fmt.Errorf("failed to marshal proto to JSON: %w", err) + } + + // 2. Unmarshal JSON into a generic Go map + var obj interface{} + if err := json.Unmarshal(jsonBytes, &obj); err != nil { + return "", fmt.Errorf("failed to unmarshal JSON: %w", err) + } + + // 3. Marshal the map to YAML with 2-space indentation + var buf bytes.Buffer + encoder := yaml.NewEncoder(&buf) + encoder.SetIndent(2) // set indentation to 2 spaces + if err := encoder.Encode(obj); err != nil { + return "", fmt.Errorf("failed to encode YAML: %w", err) + } + encoder.Close() + + return buf.String(), nil +} diff --git a/internal/rest_api.go b/internal/rest_api.go index 8ed8d2b..933d07d 100644 --- a/internal/rest_api.go +++ b/internal/rest_api.go @@ -1,13 +1,15 @@ package internal import ( + "context" "encoding/json" "fmt" "net/http" + clusterv3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + listenerv3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" "github.com/envoyproxy/go-control-plane/pkg/cache/types" resourcev3 "github.com/envoyproxy/go-control-plane/pkg/resource/v3" - "github.com/google/uuid" "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/reflect/protoreflect" ) @@ -19,7 +21,8 @@ // AddClusterRequest defines payload to add a cluster type AddClusterRequest struct { - Name string `json:"name"` + Name string `json:"name"` + Cluster clusterv3.Cluster `json:"cluster"` } // RemoveClusterRequest defines payload to remove a cluster @@ -27,15 +30,15 @@ Name string `json:"name"` } -// AddRouteRequest defines payload to add a route -type AddRouteRequest struct { - Name string `json:"name"` - Cluster string `json:"cluster"` - PathPrefix string `json:"path_prefix"` +// EnableResourceRequest defines payload to enable a resource +// This will be used for both enable-cluster and enable-listener +type EnableResourceRequest struct { + Name string `json:"name"` } -// RemoveRouteRequest defines payload to remove a route -type RemoveRouteRequest struct { +// RemoveResourceRequest defines payload to remove a resource. +// Used for both /remove-cluster and /remove-listener. +type RemoveResourceRequest struct { Name string `json:"name"` } @@ -46,8 +49,8 @@ // AddListenerRequest defines payload to add a listener type AddListenerRequest struct { - Name string `json:"name"` - Port uint32 `json:"port"` + Name string `json:"name"` + Listener listenerv3.Listener `json:"listener"` } // RemoveListenerRequest defines payload to remove a listener @@ -64,16 +67,40 @@ // RegisterRoutes mounts REST handlers func (api *API) RegisterRoutes(mux *http.ServeMux) { - // Management Handlers (Add / Remove) - mux.HandleFunc("/add-cluster", api.addCluster) + // Management Handlers (Add / Remove / Enable / Disable) + + // Cluster Handlers + mux.HandleFunc("/add-cluster", func(w http.ResponseWriter, r *http.Request) { + api.addResourceHandler(w, r, resourcev3.ClusterType, func(req interface{}) types.Resource { + cr := req.(*AddClusterRequest) + return &cr.Cluster + }) + }) + mux.HandleFunc("/disable-cluster", func(w http.ResponseWriter, r *http.Request) { + api.disableResourceHandler(w, r, resourcev3.ClusterType) + }) + mux.HandleFunc("/enable-cluster", func(w http.ResponseWriter, r *http.Request) { + api.enableResourceHandler(w, r, resourcev3.ClusterType) + }) + // NEW: Remove Cluster Handler mux.HandleFunc("/remove-cluster", func(w http.ResponseWriter, r *http.Request) { api.removeResourceHandler(w, r, resourcev3.ClusterType) }) - mux.HandleFunc("/add-route", api.addRoute) - mux.HandleFunc("/remove-route", func(w http.ResponseWriter, r *http.Request) { - api.removeResourceHandler(w, r, resourcev3.RouteType) + + // Listener Handlers + mux.HandleFunc("/add-listener", func(w http.ResponseWriter, r *http.Request) { + api.addResourceHandler(w, r, resourcev3.ListenerType, func(req interface{}) types.Resource { + lr := req.(*AddListenerRequest) + return &lr.Listener + }) }) - mux.HandleFunc("/add-listener", api.addListener) + mux.HandleFunc("/disable-listener", func(w http.ResponseWriter, r *http.Request) { + api.disableResourceHandler(w, r, resourcev3.ListenerType) + }) + mux.HandleFunc("/enable-listener", func(w http.ResponseWriter, r *http.Request) { + api.enableResourceHandler(w, r, resourcev3.ListenerType) + }) + // NEW: Remove Listener Handler mux.HandleFunc("/remove-listener", func(w http.ResponseWriter, r *http.Request) { api.removeResourceHandler(w, r, resourcev3.ListenerType) }) @@ -86,46 +113,124 @@ api.getResourceHandler(w, r, resourcev3.ClusterType) }) - mux.HandleFunc("/list-routes", func(w http.ResponseWriter, r *http.Request) { - api.listResourceHandler(w, r, resourcev3.RouteType) - }) - mux.HandleFunc("/get-route", func(w http.ResponseWriter, r *http.Request) { - api.getResourceHandler(w, r, resourcev3.RouteType) - }) - mux.HandleFunc("/list-listeners", func(w http.ResponseWriter, r *http.Request) { api.listResourceHandler(w, r, resourcev3.ListenerType) }) mux.HandleFunc("/get-listener", func(w http.ResponseWriter, r *http.Request) { api.getResourceHandler(w, r, resourcev3.ListenerType) }) + + // Persistence Handlers (NEW) + mux.HandleFunc("/load-from-db", api.loadSnapshotFromDB) + mux.HandleFunc("/flush-to-db", api.flushCacheToDB) + mux.HandleFunc("/load-from-file", api.loadSnapshotFromFile) + mux.HandleFunc("/save-to-file", api.saveSnapshotToFile) + + // Consistency Handler (NEW) + mux.HandleFunc("/is-consistent", api.isConsistentHandler) } -// ---------------- Cluster / Route / Listener Handlers Using Generic ---------------- +// ---------------- Persistence Handlers ---------------- -func (api *API) addCluster(w http.ResponseWriter, r *http.Request) { - api.addResourceHandler(w, r, resourcev3.ClusterType, func(req interface{}) types.Resource { - cr := req.(*AddClusterRequest) - name := cr.Name - if name == "" { - name = uuid.NewString() - } - return NewCluster(name) - }) +// loadSnapshotFromDB loads the full configuration from the persistent database +// into the SnapshotManager's Envoy Cache. +func (api *API) loadSnapshotFromDB(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + w.Header().Set("Content-Type", "application/json") + + // Use context.Background() since this is a top-level operation + if err := api.Manager.LoadSnapshotFromDB(context.Background()); err != nil { + http.Error(w, fmt.Sprintf("failed to load from DB: %v", err), http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(map[string]string{"status": "ok", "message": "Configuration loaded from DB and applied to cache."}) } -func (api *API) addRoute(w http.ResponseWriter, r *http.Request) { - api.addResourceHandler(w, r, resourcev3.RouteType, func(req interface{}) types.Resource { - rr := req.(*AddRouteRequest) - return NewRoute(rr.Name, rr.Cluster, rr.PathPrefix) - }) +// flushCacheToDB saves the current configuration from the Envoy Cache (source of truth) +// to the persistent database. +func (api *API) flushCacheToDB(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + w.Header().Set("Content-Type", "application/json") + + // Default to DeleteNone (no physical deletion) + deleteStrategy := DeleteLogical + + // Check for 'deleteMissing' query parameter. If "true" or "1", switch to DeleteActual. + deleteMissingStr := r.URL.Query().Get("deleteMissing") + if deleteMissingStr == "true" || deleteMissingStr == "1" { + deleteStrategy = DeleteActual + } + + // Use context.Background() since this is a top-level operation + // Pass the determined DeleteStrategy + if err := api.Manager.FlushCacheToDB(context.Background(), deleteStrategy); err != nil { + http.Error(w, fmt.Sprintf("failed to flush to DB: %v", err), http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(map[string]string{"status": "ok", "message": "Configuration saved from cache to DB."}) } -func (api *API) addListener(w http.ResponseWriter, r *http.Request) { - api.addResourceHandler(w, r, resourcev3.ListenerType, func(req interface{}) types.Resource { - lr := req.(*AddListenerRequest) - return NewListener(lr.Name, lr.Port) - }) +// loadSnapshotFromFile loads a snapshot from a local file and applies it to the cache. +func (api *API) loadSnapshotFromFile(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + w.Header().Set("Content-Type", "application/json") + + var req SnapshotFileRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil || req.Path == "" { + http.Error(w, "path required in request body", http.StatusBadRequest) + return + } + + resources, err := api.Manager.LoadSnapshotFromFile(req.Path) + if err != nil { + http.Error(w, fmt.Sprintf("failed to load snapshot from file: %v", err), http.StatusInternalServerError) + return + } + + // Use context.Background() + if err := api.Manager.SetSnapshot(context.Background(), req.Path, resources); err != nil { + http.Error(w, fmt.Sprintf("failed to set snapshot: %v", err), http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(map[string]string{"status": "ok", "message": fmt.Sprintf("Snapshot loaded from %s and applied.", req.Path)}) +} + +// saveSnapshotToFile saves the current cache snapshot to a local file. +func (api *API) saveSnapshotToFile(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + w.Header().Set("Content-Type", "application/json") + + var req SnapshotFileRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil || req.Path == "" { + http.Error(w, "path required in request body", http.StatusBadRequest) + return + } + + if err := api.Manager.SaveSnapshotToFile(req.Path); err != nil { + http.Error(w, fmt.Sprintf("failed to save snapshot to file: %v", err), http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(map[string]string{"status": "ok", "message": fmt.Sprintf("Snapshot saved to %s.", req.Path)}) } // ---------------- Generic REST Handlers ---------------- @@ -142,8 +247,8 @@ switch typ { case resourcev3.ClusterType: req = &AddClusterRequest{} - case resourcev3.RouteType: - req = &AddRouteRequest{} + // case resourcev3.RouteType: + // req = &AddRouteRequest{} case resourcev3.ListenerType: req = &AddListenerRequest{} default: @@ -157,16 +262,20 @@ } res := createFn(req) - if err := api.Manager.AddResource(res, typ); err != nil { + if err := api.Manager.AddResourceToSnapshot(res, typ); err != nil { http.Error(w, fmt.Sprintf("failed to add resource: %v", err), http.StatusInternalServerError) return } + if err := api.Manager.FlushCacheToDB(context.Background(), DeleteLogical); err != nil { + http.Error(w, fmt.Sprintf("failed to persist resource to DB: %v", err), http.StatusInternalServerError) + return + } w.WriteHeader(http.StatusCreated) json.NewEncoder(w).Encode(map[string]string{"name": res.(interface{ GetName() string }).GetName()}) } -func (api *API) removeResourceHandler(w http.ResponseWriter, r *http.Request, typ resourcev3.Type) { +func (api *API) disableResourceHandler(w http.ResponseWriter, r *http.Request, typ resourcev3.Type) { if r.Method != http.MethodPost { http.Error(w, "method not allowed", http.StatusMethodNotAllowed) return @@ -179,7 +288,7 @@ return } - if err := api.Manager.RemoveResource(req.Name, typ); err != nil { + if err := api.Manager.RemoveResource(req.Name, typ, DeleteLogical); err != nil { http.Error(w, fmt.Sprintf("failed to remove resource: %v", err), http.StatusInternalServerError) return } @@ -187,6 +296,78 @@ w.WriteHeader(http.StatusOK) } +// enableResourceHandler fetches a disabled resource from the DB and enables it (adds to cache). +func (api *API) enableResourceHandler(w http.ResponseWriter, r *http.Request, typ resourcev3.Type) { + if r.Method != http.MethodPost { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + w.Header().Set("Content-Type", "application/json") + + var req EnableResourceRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil || req.Name == "" { + http.Error(w, "name required", http.StatusBadRequest) + return + } + + // Call the Manager function to pull the resource from DB, enable it, and add to the cache. + // The implementation of EnableResourceFromDB is assumed to exist in SnapshotManager. + if err := api.Manager.EnableResourceFromDB(req.Name, typ); err != nil { + // Use StatusNotFound if the error indicates the resource wasn't found in the disabled list + // NOTE: The exact error string "not found" or similar must match the error returned by Manager.EnableResourceFromDB + if err.Error() == fmt.Sprintf("disabled resource %s not found in DB for type %s", req.Name, typ) { + http.Error(w, fmt.Sprintf("disabled resource '%s' not found or already enabled: %v", req.Name, err), http.StatusNotFound) + } else { + http.Error(w, fmt.Sprintf("failed to enable resource '%s' from DB: %v", req.Name, err), http.StatusInternalServerError) + } + return + } + + // Reload the cache again from DB to ensure consistency. + if err := api.Manager.LoadSnapshotFromDB(context.Background()); err != nil { + http.Error(w, fmt.Sprintf("failed to reload snapshot from DB after enabling resource: %v", err), http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(map[string]string{"status": "ok", "message": fmt.Sprintf("Resource '%s' enabled and applied to cache.", req.Name)}) +} + +// ---------------- NEW: Generic Remove Handler ---------------- + +// removeResourceHandler removes a resource completely from the DB (DeleteActual). +// It requires the resource to be disabled (not in the cache) before actual deletion. +func (api *API) removeResourceHandler(w http.ResponseWriter, r *http.Request, typ resourcev3.Type) { + if r.Method != http.MethodPost { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + w.Header().Set("Content-Type", "application/json") + + var req RemoveResourceRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil || req.Name == "" { + http.Error(w, "name required", http.StatusBadRequest) + return + } + + // Use DeleteActual strategy for permanent removal from the DB. + // The Manager implementation must enforce the 'must be disabled' rule. + if err := api.Manager.RemoveResource(req.Name, typ, DeleteActual); err != nil { + // Manager is assumed to return a specific error if the resource is still enabled. + if err.Error() == fmt.Sprintf("resource %s for type %s is enabled and must be disabled before removal", req.Name, typ) { + http.Error(w, fmt.Sprintf("resource '%s' must be disabled first before permanent removal: %v", req.Name, err), http.StatusBadRequest) + } else { + http.Error(w, fmt.Sprintf("failed to permanently remove resource: %v", err), http.StatusInternalServerError) + } + return + } + + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(map[string]string{"status": "ok", "message": fmt.Sprintf("Resource '%s' permanently removed.", req.Name)}) +} + +// ---------------- Query / List Handlers ---------------- + func (api *API) listResourceHandler(w http.ResponseWriter, r *http.Request, typ resourcev3.Type) { if r.Method != http.MethodGet { http.Error(w, "method not allowed", http.StatusMethodNotAllowed) @@ -194,22 +375,27 @@ } w.Header().Set("Content-Type", "application/json") - resources, err := api.Manager.ListResources(typ) + enabledResources, disabledResources, err := api.Manager.ListResources(typ) if err != nil { http.Error(w, fmt.Sprintf("failed to list resources: %v", err), http.StatusInternalServerError) return } - out := []json.RawMessage{} - for _, res := range resources { - if pb, ok := res.(interface{ ProtoReflect() protoreflect.Message }); ok { - data, _ := protojson.Marshal(pb) - out = append(out, data) - } + // Create the final response object structure + response := struct { + Enabled []types.Resource `json:"enabled"` + Disabled []types.Resource `json:"disabled"` + }{ + Enabled: enabledResources, + Disabled: disabledResources, } w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode(out) + // Encode the new struct instead of the flat 'out' slice + if err := json.NewEncoder(w).Encode(response); err != nil { + // Handle encoding error, although unlikely if marshaling was fine + http.Error(w, "failed to encode response", http.StatusInternalServerError) + } } func (api *API) getResourceHandler(w http.ResponseWriter, r *http.Request, typ resourcev3.Type) { @@ -217,7 +403,6 @@ http.Error(w, "method not allowed", http.StatusMethodNotAllowed) return } - w.Header().Set("Content-Type", "application/json") name := r.URL.Query().Get("name") if name == "" { @@ -225,24 +410,71 @@ return } - res, err := api.Manager.GetResource(name, typ) + format := r.URL.Query().Get("format") + if format == "" { + format = "json" // default format + } + + res, err := api.Manager.GetResourceFromCache(name, typ) if err != nil { http.Error(w, fmt.Sprintf("resource not found: %v", err), http.StatusNotFound) return } - // Marshal using protojson for full nested fields - if pb, ok := res.(interface{ ProtoReflect() protoreflect.Message }); ok { - data, err := protojson.Marshal(pb) - if err != nil { - http.Error(w, fmt.Sprintf("failed to marshal protobuf: %v", err), http.StatusInternalServerError) - return - } - w.WriteHeader(http.StatusOK) - w.Write(data) + pb, ok := res.(interface{ ProtoReflect() protoreflect.Message }) + if !ok { + http.Error(w, "resource is not a protobuf message", http.StatusInternalServerError) return } - // fallback for non-proto resources - http.Error(w, "resource is not a protobuf message", http.StatusInternalServerError) + var output []byte + switch format { + case "yaml": + yamlStr, err := ConvertProtoToYAML(pb) + if err != nil { + http.Error(w, fmt.Sprintf("failed to convert resource to YAML: %v", err), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/x-yaml") + output = []byte(yamlStr) + default: // json + data, err := protojson.Marshal(pb) + if err != nil { + http.Error(w, fmt.Sprintf("failed to marshal protobuf to JSON: %v", err), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + output = data + } + + w.WriteHeader(http.StatusOK) + w.Write(output) +} + +// ---------------- Consistency Handler ---------------- + +// isConsistentHandler checks whether the current in-memory cache is consistent with the database. +func (api *API) isConsistentHandler(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + w.Header().Set("Content-Type", "application/json") + + consistent, err := api.Manager.CheckCacheDBConsistency(context.TODO()) // Assumes IsConsistent method exists on SnapshotManager + if err != nil { + http.Error(w, fmt.Sprintf("failed to check consistency: %v", err), http.StatusInternalServerError) + return + } + + response := struct { + Consistent *ConsistencyReport `json:"consistent"` + }{ + Consistent: consistent, + } + + w.WriteHeader(http.StatusOK) + if err := json.NewEncoder(w).Encode(response); err != nil { + http.Error(w, "failed to encode response", http.StatusInternalServerError) + } } diff --git a/internal/snapshot.go b/internal/snapshot.go index 5082e37..5b64822 100644 --- a/internal/snapshot.go +++ b/internal/snapshot.go @@ -8,38 +8,42 @@ "time" clusterv3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" - corev3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" endpointv3 "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" listenerv3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" - routev3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + secretv3 "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + + // Ensure all standard filters are imported for proto unmarshalling _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/jwt_authn/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/lua/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/oauth2/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/listener/tls_inspector/v3" - secretv3 "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" - - // New Import for Router Filter _ "github.com/envoyproxy/go-control-plane/envoy/service/runtime/v3" + "github.com/envoyproxy/go-control-plane/pkg/cache/types" cachev3 "github.com/envoyproxy/go-control-plane/pkg/cache/v3" resourcev3 "github.com/envoyproxy/go-control-plane/pkg/resource/v3" "google.golang.org/protobuf/encoding/protojson" - "google.golang.org/protobuf/types/known/durationpb" yaml "gopkg.in/yaml.v3" ) +// ResourceNamer is an interface implemented by all xDS resources with a GetName() method. +type ResourceNamer interface { + GetName() string +} + // SnapshotManager wraps a SnapshotCache and provides file loading/modifying type SnapshotManager struct { Cache cachev3.SnapshotCache NodeID string + DB *Storage // Assuming Storage is defined elsewhere and has RebuildSnapshot/LoadAll* methods } -// NewSnapshotManager creates a new manager for a given cache and node -func NewSnapshotManager(cache cachev3.SnapshotCache, nodeID string) *SnapshotManager { +func NewSnapshotManager(cache cachev3.SnapshotCache, nodeID string, db *Storage) *SnapshotManager { return &SnapshotManager{ Cache: cache, NodeID: nodeID, + DB: db, } } @@ -69,14 +73,30 @@ typ := resourcev3.Type(typStr) // only process known top-level xDS resources - switch typ { - case resourcev3.ClusterType, - resourcev3.RouteType, - resourcev3.ListenerType, - resourcev3.EndpointType, - resourcev3.SecretType, - resourcev3.RuntimeType: + var resource types.Resource + var newResource bool + switch typ { + case resourcev3.ClusterType: + resource = &clusterv3.Cluster{} + newResource = true + case resourcev3.ListenerType: + resource = &listenerv3.Listener{} + newResource = true + case resourcev3.EndpointType: + resource = &endpointv3.ClusterLoadAssignment{} + newResource = true + case resourcev3.SecretType: + resource = &secretv3.Secret{} + newResource = true + case resourcev3.RuntimeType: + // resource = &runtimev3.Runtime{} // Placeholder, assuming it's correctly imported + // newResource = true + default: + // Skip nested or unsupported types + } + + if newResource { // Remove @type before unmarshalling delete(v, "@type") @@ -85,47 +105,10 @@ return fmt.Errorf("failed to marshal resource node to JSON: %w", err) } - fmt.Printf("Detected resource type: %s\n", typ) - - switch typ { - case resourcev3.ClusterType: - var c clusterv3.Cluster - if err := protojson.Unmarshal(jsonBytes, &c); err != nil { - return fmt.Errorf("failed to unmarshal Cluster: %w", err) - } - resources[typ] = append(resources[typ], &c) - - case resourcev3.RouteType: - var rt routev3.RouteConfiguration - if err := protojson.Unmarshal(jsonBytes, &rt); err != nil { - return fmt.Errorf("failed to unmarshal RouteConfiguration: %w", err) - } - resources[typ] = append(resources[typ], &rt) - - case resourcev3.ListenerType: - var l listenerv3.Listener - if err := protojson.Unmarshal(jsonBytes, &l); err != nil { - return fmt.Errorf("failed to unmarshal Listener: %w", err) - } - resources[typ] = append(resources[typ], &l) - - case resourcev3.EndpointType: - var eds endpointv3.ClusterLoadAssignment - if err := protojson.Unmarshal(jsonBytes, &eds); err != nil { - return fmt.Errorf("failed to unmarshal ClusterLoadAssignment: %w", err) - } - resources[typ] = append(resources[typ], &eds) - - case resourcev3.SecretType: - var sec secretv3.Secret - if err := protojson.Unmarshal(jsonBytes, &sec); err != nil { - return fmt.Errorf("failed to unmarshal Secret: %w", err) - } - resources[typ] = append(resources[typ], &sec) + if err := protojson.Unmarshal(jsonBytes, resource); err != nil { + return fmt.Errorf("failed to unmarshal %s: %w", typ, err) } - default: - // skip nested extension/filter types (handled inside parent) - fmt.Printf("Skipping nested type: %s\n", typStr) + resources[typ] = append(resources[typ], resource) } } @@ -153,6 +136,76 @@ return resources, nil } +// SetSnapshotFromConfig sets a snapshot from an aggregated SnapshotConfig +func (sm *SnapshotManager) SetSnapshotFromConfig(ctx context.Context, version string, cfg *SnapshotConfig) error { + if cfg == nil { + return fmt.Errorf("snapshot config is nil") + } + + // Ensure version is not empty + if version == "" { + version = fmt.Sprintf("snap-%d", time.Now().UnixNano()) + } + + // Build the resource map expected by cachev3.NewSnapshot + resources := map[resourcev3.Type][]types.Resource{ + resourcev3.ClusterType: make([]types.Resource, len(cfg.EnabledClusters)), + resourcev3.ListenerType: make([]types.Resource, len(cfg.EnabledListeners)), + // Other types if supported by SnapshotConfig, can be added here + } + + // Populate slices by direct type assertion and conversion + for i, c := range cfg.EnabledClusters { + resources[resourcev3.ClusterType][i] = c + } + for i, l := range cfg.EnabledListeners { + resources[resourcev3.ListenerType][i] = l + } + + // Create the snapshot + snap, err := cachev3.NewSnapshot(version, resources) + if err != nil { + return fmt.Errorf("failed to create snapshot: %w", err) + } + + // Apply snapshot to the cache + if err := sm.Cache.SetSnapshot(ctx, sm.NodeID, snap); err != nil { + return fmt.Errorf("failed to set snapshot: %w", err) + } + + return nil +} + +// SnapshotToConfig converts current cache snapshot into SnapshotConfig +func (sm *SnapshotManager) SnapshotToConfig(ctx context.Context, nodeID string) (*SnapshotConfig, error) { + snap, err := sm.Cache.GetSnapshot(nodeID) + if err != nil { + return nil, fmt.Errorf("failed to get snapshot for node %s: %w", nodeID, err) + } + + config := &SnapshotConfig{ + EnabledClusters: []*clusterv3.Cluster{}, + EnabledListeners: []*listenerv3.Listener{}, + // Disabled fields are not populated from the cache, only enabled ones. + } + + // Convert Cluster resources + for _, r := range snap.GetResources(string(resourcev3.ClusterType)) { + if c, ok := r.(*clusterv3.Cluster); ok { + config.EnabledClusters = append(config.EnabledClusters, c) + } + } + + // Convert Listener resources + for _, r := range snap.GetResources(string(resourcev3.ListenerType)) { + if l, ok := r.(*listenerv3.Listener); ok { + config.EnabledListeners = append(config.EnabledListeners, l) + } + } + + return config, nil +} + // SetSnapshot sets a full snapshot func (sm *SnapshotManager) SetSnapshot(ctx context.Context, version string, resources map[resourcev3.Type][]types.Resource) error { snap, err := cachev3.NewSnapshot(version, resources) @@ -162,229 +215,270 @@ return sm.Cache.SetSnapshot(ctx, sm.NodeID, snap) } -// ---------------- Add / Remove / List ---------------- +// ---------------- Persistence and Sync Methods (Two-Layer Model) ---------------- -// AddCluster adds a cluster to the snapshot -func (sm *SnapshotManager) AddCluster(cluster *clusterv3.Cluster) error { - snap, _ := sm.Cache.GetSnapshot(sm.NodeID) - clusters := mapToSlice(snap.GetResources(string(resourcev3.ClusterType))) - routes := mapToSlice(snap.GetResources(string(resourcev3.RouteType))) - listeners := mapToSlice(snap.GetResources(string(resourcev3.ListenerType))) +// LoadSnapshotFromDB implements the sync DB data to cache. It rebuilds the snapshot +// from the persistent store and updates the Envoy cache. +func (sm *SnapshotManager) LoadSnapshotFromDB(ctx context.Context) error { + fmt.Println("Loading configuration from main DB...") - clusters = append(clusters, cluster) + // 1. Try Database (Primary Source) + cfg, err := sm.DB.RebuildSnapshot(ctx) + if err != nil { + return fmt.Errorf("failed to rebuild snapshot from DB: %w", err) + } - newSnap, _ := cachev3.NewSnapshot( - "snap-"+cluster.GetName(), - map[resourcev3.Type][]types.Resource{ - resourcev3.ClusterType: clusters, - resourcev3.RouteType: routes, - resourcev3.ListenerType: listeners, - }, - ) - return sm.Cache.SetSnapshot(context.TODO(), sm.NodeID, newSnap) + fmt.Println("Loaded configuration from DB. Updating Envoy Cache.") + + // 2. Update Envoy's in-memory cache (This is the 'flash cache' layer) + return sm.SetSnapshotFromConfig(ctx, "db-sync-"+time.Now().Format(time.RFC3339), cfg) } -// AddRoute adds a route configuration to the snapshot -func (sm *SnapshotManager) AddRoute(route *routev3.RouteConfiguration) error { - snap, _ := sm.Cache.GetSnapshot(sm.NodeID) - clusters := mapToSlice(snap.GetResources(string(resourcev3.ClusterType))) - routes := mapToSlice(snap.GetResources(string(resourcev3.RouteType))) - listeners := mapToSlice(snap.GetResources(string(resourcev3.ListenerType))) +// FlushCacheToDB saves the current in-memory Envoy snapshot (the source of truth) +// to the persistent DB. This implements the "flash cache to db" write-through. +func (sm *SnapshotManager) FlushCacheToDB(ctx context.Context, strategy DeleteStrategy) error { + // 1. Get current Envoy snapshot as SnapshotConfig + cfg, err := sm.SnapshotToConfig(ctx, sm.NodeID) + if err != nil { + return fmt.Errorf("failed to convert snapshot to config: %w", err) + } - routes = append(routes, route) + // 2. Save to Persistent DB + // Note: DB.SaveSnapshot handles insert/update logic for all resources + if err := sm.DB.SaveSnapshot(ctx, cfg, strategy); err != nil { + return fmt.Errorf("failed to save config to DB: %w", err) + } + fmt.Println("Successfully saved to Persistent DB.") - newSnap, _ := cachev3.NewSnapshot( - "snap-"+route.GetName(), - map[resourcev3.Type][]types.Resource{ - resourcev3.ClusterType: clusters, - resourcev3.RouteType: routes, - resourcev3.ListenerType: listeners, - }, - ) - return sm.Cache.SetSnapshot(context.TODO(), sm.NodeID, newSnap) + return nil } -// AddListener adds a listener to the snapshot -func (sm *SnapshotManager) AddListener(listener *listenerv3.Listener) error { - snap, _ := sm.Cache.GetSnapshot(sm.NodeID) - clusters := mapToSlice(snap.GetResources(string(resourcev3.ClusterType))) - routes := mapToSlice(snap.GetResources(string(resourcev3.RouteType))) - listeners := mapToSlice(snap.GetResources(string(resourcev3.ListenerType))) +// EnableResourceFromDB fetches a logically disabled resource from the DB and +// flips its status to enabled, then adds it back to the cache. +func (sm *SnapshotManager) EnableResourceFromDB(name string, typ resourcev3.Type) error { + ctx := context.Background() + switch typ { + case resourcev3.ClusterType: + if err := sm.DB.EnableCluster(ctx, name, true); err != nil { + return fmt.Errorf("failed to enable cluster '%s' in DB: %w", name, err) + } + case resourcev3.ListenerType: + if err := sm.DB.EnableListener(ctx, name, true); err != nil { + return fmt.Errorf("failed to enable listener '%s' in DB: %w", name, err) + } + default: + return fmt.Errorf("unsupported resource type for enabling: %s", typ) + } - listeners = append(listeners, listener) - - newSnap, _ := cachev3.NewSnapshot( - "snap-"+listener.GetName(), - map[resourcev3.Type][]types.Resource{ - resourcev3.ClusterType: clusters, - resourcev3.RouteType: routes, - resourcev3.ListenerType: listeners, - }, - ) - return sm.Cache.SetSnapshot(context.TODO(), sm.NodeID, newSnap) + // Reload snapshot from DB to update the cache with the newly enabled resource + return sm.LoadSnapshotFromDB(ctx) } -// RemoveCluster removes a cluster by name -func (sm *SnapshotManager) RemoveCluster(name string) error { - snap, _ := sm.Cache.GetSnapshot(sm.NodeID) - clusters := []types.Resource{} - routes := mapToSlice(snap.GetResources(string(resourcev3.RouteType))) - listeners := mapToSlice(snap.GetResources(string(resourcev3.ListenerType))) +// ---------------- Consistency Check ---------------- - for _, r := range snap.GetResources(string(resourcev3.ClusterType)) { - if c, ok := r.(*clusterv3.Cluster); ok && c.GetName() != name { - clusters = append(clusters, c) +// ConsistencyReport holds the results of the cache/DB consistency check. +type ConsistencyReport struct { + CacheOnly map[resourcev3.Type][]string `json:"cache-only"` // Resources present in cache but not enabled in DB + DBOnly map[resourcev3.Type][]string `json:"db-only"` // Resources enabled in DB but not present in cache + Inconsistent bool `json:"inconsistent"` +} + +// CheckCacheDBConsistency compares the currently active Envoy cache snapshot +// against the enabled resources in the persistent DB. +func (sm *SnapshotManager) CheckCacheDBConsistency(ctx context.Context) (*ConsistencyReport, error) { + report := &ConsistencyReport{ + CacheOnly: make(map[resourcev3.Type][]string), + DBOnly: make(map[resourcev3.Type][]string), + } + + // 1. Get current cache snapshot + cacheConfig, err := sm.SnapshotToConfig(ctx, sm.NodeID) + if err != nil { + return nil, fmt.Errorf("failed to get snapshot from cache: %w", err) + } + + // 2. Rebuild snapshot from DB (only fetches *enabled* resources from DB) + dbConfig, err := sm.DB.RebuildSnapshot(ctx) + if err != nil { + return nil, fmt.Errorf("failed to rebuild snapshot from DB: %w", err) + } + + // Helper to build a set (map[string]struct{}) of resource names for faster lookups + buildNameSet := func(resources []ResourceNamer) map[string]struct{} { + set := make(map[string]struct{}, len(resources)) + for _, r := range resources { + set[r.GetName()] = struct{}{} + } + return set + } + + // Map of resource types to their lists in SnapshotConfig + typeResourceMaps := []struct { + typ resourcev3.Type + cacheList []ResourceNamer + dbList []ResourceNamer + }{ + {resourcev3.ClusterType, resourcesToNamers(cacheConfig.EnabledClusters), resourcesToNamers(dbConfig.EnabledClusters)}, + {resourcev3.ListenerType, resourcesToNamers(cacheConfig.EnabledListeners), resourcesToNamers(dbConfig.EnabledListeners)}, + } + + for _, m := range typeResourceMaps { + cacheSet := buildNameSet(m.cacheList) + dbSet := buildNameSet(m.dbList) + + // Check for Cache-only resources (present in cacheSet but not in dbSet) + for cacheName := range cacheSet { + if _, existsInDB := dbSet[cacheName]; !existsInDB { + report.CacheOnly[m.typ] = append(report.CacheOnly[m.typ], cacheName) + report.Inconsistent = true + } + } + + // Check for DB-only resources (present in dbSet but not in cacheSet) + for dbName := range dbSet { + if _, existsInCache := cacheSet[dbName]; !existsInCache { + report.DBOnly[m.typ] = append(report.DBOnly[m.typ], dbName) + report.Inconsistent = true + } } } - newSnap, _ := cachev3.NewSnapshot( - "snap-remove-"+name, - map[resourcev3.Type][]types.Resource{ - resourcev3.ClusterType: clusters, - resourcev3.RouteType: routes, - resourcev3.ListenerType: listeners, - }, - ) - return sm.Cache.SetSnapshot(context.TODO(), sm.NodeID, newSnap) + return report, nil } -// RemoveRoute removes a route by name -func (sm *SnapshotManager) RemoveRoute(name string) error { - snap, _ := sm.Cache.GetSnapshot(sm.NodeID) - clusters := mapToSlice(snap.GetResources(string(resourcev3.ClusterType))) - routes := []types.Resource{} - listeners := mapToSlice(snap.GetResources(string(resourcev3.ListenerType))) +// ---------------- Generic Add / Remove / List / Get ---------------- - for _, r := range snap.GetResources(string(resourcev3.RouteType)) { - if rt, ok := r.(*routev3.RouteConfiguration); ok && rt.GetName() != name { - routes = append(routes, rt) - } - } - - newSnap, _ := cachev3.NewSnapshot( - "snap-remove-route-"+name, - map[resourcev3.Type][]types.Resource{ - resourcev3.ClusterType: clusters, - resourcev3.RouteType: routes, - resourcev3.ListenerType: listeners, - }, - ) - return sm.Cache.SetSnapshot(context.TODO(), sm.NodeID, newSnap) -} - -// RemoveListener removes a listener by name -func (sm *SnapshotManager) RemoveListener(name string) error { - snap, _ := sm.Cache.GetSnapshot(sm.NodeID) - clusters := mapToSlice(snap.GetResources(string(resourcev3.ClusterType))) - routes := mapToSlice(snap.GetResources(string(resourcev3.RouteType))) - listeners := []types.Resource{} - - for _, r := range snap.GetResources(string(resourcev3.ListenerType)) { - if l, ok := r.(*listenerv3.Listener); ok && l.GetName() != name { - listeners = append(listeners, l) - } - } - - newSnap, _ := cachev3.NewSnapshot( - "snap-remove-listener-"+name, - map[resourcev3.Type][]types.Resource{ - resourcev3.ClusterType: clusters, - resourcev3.RouteType: routes, - resourcev3.ListenerType: listeners, - }, - ) - return sm.Cache.SetSnapshot(context.TODO(), sm.NodeID, newSnap) -} - -// ---------------- List ---------------- - -func (sm *SnapshotManager) ListClusters() ([]*clusterv3.Cluster, error) { +// AddResourceToSnapshot adds any resource to the snapshot dynamically +func (sm *SnapshotManager) AddResourceToSnapshot(resource types.Resource, typ resourcev3.Type) error { snap, err := sm.Cache.GetSnapshot(sm.NodeID) if err != nil { - return nil, err + return fmt.Errorf("failed to get snapshot from cache: %w", err) } - clusters := []*clusterv3.Cluster{} - for _, r := range snap.GetResources(string(resourcev3.ClusterType)) { - if c, ok := r.(*clusterv3.Cluster); ok { - clusters = append(clusters, c) - } - } - return clusters, nil -} + resources := sm.getAllResourcesFromSnapshot(snap) -func (sm *SnapshotManager) ListRoutes() ([]*routev3.RouteConfiguration, error) { - snap, err := sm.Cache.GetSnapshot(sm.NodeID) - if err != nil { - return nil, err + // Append to the appropriate slice + switch typ { + case resourcev3.ClusterType: + resources[resourcev3.ClusterType] = append(resources[resourcev3.ClusterType], resource) + case resourcev3.ListenerType: + resources[resourcev3.ListenerType] = append(resources[resourcev3.ListenerType], resource) + case resourcev3.EndpointType, resourcev3.SecretType, resourcev3.RuntimeType: + resources[typ] = append(resources[typ], resource) + default: + return fmt.Errorf("unsupported resource type: %s", typ) } - routes := []*routev3.RouteConfiguration{} - for _, r := range snap.GetResources(string(resourcev3.RouteType)) { - if rt, ok := r.(*routev3.RouteConfiguration); ok { - routes = append(routes, rt) - } - } - return routes, nil -} -func (sm *SnapshotManager) ListListeners() ([]*listenerv3.Listener, error) { - snap, err := sm.Cache.GetSnapshot(sm.NodeID) - if err != nil { - return nil, err - } - listeners := []*listenerv3.Listener{} - for _, r := range snap.GetResources(string(resourcev3.ListenerType)) { - if l, ok := r.(*listenerv3.Listener); ok { - listeners = append(listeners, l) - } - } - return listeners, nil -} - -// ---------------- Get ---------------- - -func (sm *SnapshotManager) GetCluster(name string) (*clusterv3.Cluster, error) { - snap, err := sm.Cache.GetSnapshot(sm.NodeID) - if err != nil { - return nil, err - } - r, ok := snap.GetResources(string(resourcev3.ClusterType))[name] + resourceNamer, ok := resource.(interface{ GetName() string }) if !ok { - return nil, fmt.Errorf("cluster %s not found", name) + return fmt.Errorf("resource of type %s does not implement GetName()", typ) } - if c, ok := r.(*clusterv3.Cluster); ok { - return c, nil - } - return nil, fmt.Errorf("resource %s found, but is not a Cluster", name) + + newSnap, _ := cachev3.NewSnapshot( + "snap-generic-"+resourceNamer.GetName(), + resources, + ) + return sm.Cache.SetSnapshot(context.TODO(), sm.NodeID, newSnap) } -func (sm *SnapshotManager) GetRoute(name string) (*routev3.RouteConfiguration, error) { +// RemoveResource removes any resource by name dynamically +func (sm *SnapshotManager) RemoveResource(name string, typ resourcev3.Type, strategy DeleteStrategy) error { + snap, _ := sm.Cache.GetSnapshot(sm.NodeID) + resources := sm.getAllResourcesFromSnapshot(snap) + + // Flag to check if resource was found in cache + var resourceFound = false + + // Filter the target type + if targetResources, ok := resources[typ]; ok { + resources[typ], resourceFound = filterAndCheckResourcesByName(targetResources, name) + } + + if strategy == DeleteActual { + if resourceFound { + return fmt.Errorf("actual delete requested but resource %s of type %s still exists in cache", name, typ) + } + if typ == resourcev3.ClusterType { + if err := sm.DB.RemoveCluster(context.TODO(), name); err != nil { + return fmt.Errorf("failed to delete cluster %s from DB: %w", name, err) + } + return nil + } + if typ == resourcev3.ListenerType { + if err := sm.DB.RemoveListener(context.TODO(), name); err != nil { + return fmt.Errorf("failed to delete listener %s from DB: %w", name, err) + } + return nil + } + return fmt.Errorf("actual delete not supported for resource type: %s", typ) + } + + if !resourceFound { + return fmt.Errorf("resource %s of type %s not found in cache", name, typ) + } + + newSnap, _ := cachev3.NewSnapshot( + "snap-remove-generic-"+name, + resources, + ) + + if err := sm.Cache.SetSnapshot(context.TODO(), sm.NodeID, newSnap); err != nil { + return fmt.Errorf("failed to set snapshot: %w", err) + } + + if err := sm.FlushCacheToDB(context.TODO(), strategy); err != nil { + return fmt.Errorf("failed to flush cache to DB: %w", err) + } + return nil +} + +// ListResources returns all enabled and disabled resources of a given type from the DB. +func (sm *SnapshotManager) ListResources(typ resourcev3.Type) ([]types.Resource, []types.Resource, error) { + snap, err := sm.DB.RebuildSnapshot(context.Background()) + if err != nil { + return nil, nil, fmt.Errorf("failed to rebuild snapshot from DB: %w", err) + } + + var enabled, disabled []types.Resource + var namerEnabled, namerDisabled []ResourceNamer + + switch typ { + case resourcev3.ClusterType: + namerEnabled = resourcesToNamers(snap.EnabledClusters) + namerDisabled = resourcesToNamers(snap.DisabledClusters) + case resourcev3.ListenerType: + namerEnabled = resourcesToNamers(snap.EnabledListeners) + namerDisabled = resourcesToNamers(snap.DisabledListeners) + default: + return nil, nil, fmt.Errorf("unsupported resource type: %s", typ) + } + + // Convert ResourceNamer slices back to types.Resource slices + enabled = make([]types.Resource, len(namerEnabled)) + for i, r := range namerEnabled { + enabled[i] = r.(types.Resource) + } + + disabled = make([]types.Resource, len(namerDisabled)) + for i, r := range namerDisabled { + disabled[i] = r.(types.Resource) + } + + return enabled, disabled, nil +} + +// GetResourceFromCache retrieves a resource by name and type from the cache. +func (sm *SnapshotManager) GetResourceFromCache(name string, typ resourcev3.Type) (types.Resource, error) { snap, err := sm.Cache.GetSnapshot(sm.NodeID) if err != nil { return nil, err } - r, ok := snap.GetResources(string(resourcev3.RouteType))[name] + r, ok := snap.GetResources(string(typ))[name] if !ok { - return nil, fmt.Errorf("route %s not found", name) + return nil, fmt.Errorf("%s resource %s not found in cache", typ, name) } - if rt, ok := r.(*routev3.RouteConfiguration); ok { - return rt, nil - } - return nil, fmt.Errorf("resource %s found, but is not a RouteConfiguration", name) -} -func (sm *SnapshotManager) GetListener(name string) (*listenerv3.Listener, error) { - snap, err := sm.Cache.GetSnapshot(sm.NodeID) - if err != nil { - return nil, err - } - r, ok := snap.GetResources(string(resourcev3.ListenerType))[name] - if !ok { - return nil, fmt.Errorf("listener %s not found", name) - } - if l, ok := r.(*listenerv3.Listener); ok { - return l, nil - } - return nil, fmt.Errorf("resource %s found, but is not a Listener", name) + // We rely on the type given to be correct, as all xDS resources implement GetName(). + return r, nil } // ---------------- Save ---------------- @@ -397,20 +491,14 @@ out := make(map[string][]interface{}) - for _, r := range snap.GetResources(string(resourcev3.ClusterType)) { - if c, ok := r.(*clusterv3.Cluster); ok { - out[string(resourcev3.ClusterType)] = append(out[string(resourcev3.ClusterType)], c) - } + // Iterate over all known types + clusterTypeResources := snap.GetResources(resourcev3.ClusterType) + for _, r := range clusterTypeResources { + out[resourcev3.ClusterType] = append(out[resourcev3.ClusterType], r) } - for _, r := range snap.GetResources(string(resourcev3.RouteType)) { - if rt, ok := r.(*routev3.RouteConfiguration); ok { - out[string(resourcev3.RouteType)] = append(out[string(resourcev3.RouteType)], rt) - } - } - for _, r := range snap.GetResources(string(resourcev3.ListenerType)) { - if l, ok := r.(*listenerv3.Listener); ok { - out[string(resourcev3.ListenerType)] = append(out[string(resourcev3.ListenerType)], l) - } + listenerTypeResources := snap.GetResources(resourcev3.ListenerType) + for _, r := range listenerTypeResources { + out[resourcev3.ListenerType] = append(out[resourcev3.ListenerType], r) } data, err := json.MarshalIndent(out, "", " ") @@ -423,201 +511,57 @@ // ---------------- Helpers ---------------- +// mapToSlice converts a map of named resources to a slice of resources. func mapToSlice(m map[string]types.Resource) []types.Resource { - out := []types.Resource{} + out := make([]types.Resource, 0, len(m)) for _, r := range m { out = append(out, r) } return out } -// NewCluster creates a simple cluster -func NewCluster(name string) *clusterv3.Cluster { - return &clusterv3.Cluster{ - Name: name, - ConnectTimeout: durationpb.New(5 * time.Second), - ClusterDiscoveryType: &clusterv3.Cluster_Type{ - Type: clusterv3.Cluster_EDS, - }, - LbPolicy: clusterv3.Cluster_ROUND_ROBIN, - } -} - -// NewRoute creates a simple route tied to a cluster -func NewRoute(name, clusterName, prefix string) *routev3.RouteConfiguration { - return &routev3.RouteConfiguration{ - Name: name, - VirtualHosts: []*routev3.VirtualHost{ - { - Name: "vh-" + name, - Domains: []string{"*"}, - Routes: []*routev3.Route{ - { - Match: &routev3.RouteMatch{ - PathSpecifier: &routev3.RouteMatch_Prefix{Prefix: prefix}, - }, - Action: &routev3.Route_Route{ - Route: &routev3.RouteAction{ - ClusterSpecifier: &routev3.RouteAction_Cluster{Cluster: clusterName}, - }, - }, - }, - }, - }, - }, - } -} - -// NewListener creates a simple TCP listener for a given port -func NewListener(name string, port uint32) *listenerv3.Listener { - return &listenerv3.Listener{ - Name: name, - Address: &corev3.Address{ - Address: &corev3.Address_SocketAddress{ - SocketAddress: &corev3.SocketAddress{ - Protocol: corev3.SocketAddress_TCP, - Address: "0.0.0.0", - PortSpecifier: &corev3.SocketAddress_PortValue{ - PortValue: port, - }, - }, - }, - }, - } -} - -// ---------------- Generic Helpers for all xDS types ---------------- - -// AddResource adds any resource to the snapshot dynamically -func (sm *SnapshotManager) AddResource(resource types.Resource, typ resourcev3.Type) error { - snap, _ := sm.Cache.GetSnapshot(sm.NodeID) - - // Convert existing resources to slices - clusters := mapToSlice(snap.GetResources(string(resourcev3.ClusterType))) - routes := mapToSlice(snap.GetResources(string(resourcev3.RouteType))) - listeners := mapToSlice(snap.GetResources(string(resourcev3.ListenerType))) - endpoints := mapToSlice(snap.GetResources(string(resourcev3.EndpointType))) - secrets := mapToSlice(snap.GetResources(string(resourcev3.SecretType))) - runtimes := mapToSlice(snap.GetResources(string(resourcev3.RuntimeType))) - extConfigs := mapToSlice(snap.GetResources(string(resourcev3.ExtensionConfigType))) - - // Append to the appropriate slice - switch typ { - case resourcev3.ClusterType: - clusters = append(clusters, resource) - case resourcev3.RouteType: - routes = append(routes, resource) - case resourcev3.ListenerType: - listeners = append(listeners, resource) - case resourcev3.EndpointType: - endpoints = append(endpoints, resource) - case resourcev3.SecretType: - secrets = append(secrets, resource) - case resourcev3.RuntimeType: - runtimes = append(runtimes, resource) - case resourcev3.ExtensionConfigType: - extConfigs = append(extConfigs, resource) - default: - return fmt.Errorf("unsupported resource type: %s", typ) - } - - newSnap, _ := cachev3.NewSnapshot( - "snap-generic-"+resource.(interface{ GetName() string }).GetName(), - map[resourcev3.Type][]types.Resource{ - resourcev3.ClusterType: clusters, - resourcev3.RouteType: routes, - resourcev3.ListenerType: listeners, - resourcev3.EndpointType: endpoints, - resourcev3.SecretType: secrets, - resourcev3.RuntimeType: runtimes, - resourcev3.ExtensionConfigType: extConfigs, - }, - ) - return sm.Cache.SetSnapshot(context.TODO(), sm.NodeID, newSnap) -} - -// RemoveResource removes any resource by name dynamically -func (sm *SnapshotManager) RemoveResource(name string, typ resourcev3.Type) error { - snap, _ := sm.Cache.GetSnapshot(sm.NodeID) - - // Convert all to slices - clusters := mapToSlice(snap.GetResources(string(resourcev3.ClusterType))) - routes := mapToSlice(snap.GetResources(string(resourcev3.RouteType))) - listeners := mapToSlice(snap.GetResources(string(resourcev3.ListenerType))) - endpoints := mapToSlice(snap.GetResources(string(resourcev3.EndpointType))) - secrets := mapToSlice(snap.GetResources(string(resourcev3.SecretType))) - runtimes := mapToSlice(snap.GetResources(string(resourcev3.RuntimeType))) - extConfigs := mapToSlice(snap.GetResources(string(resourcev3.ExtensionConfigType))) - - // Filter the target type - switch typ { - case resourcev3.ClusterType: - clusters = filterResourcesByName[*clusterv3.Cluster](clusters, name) - case resourcev3.RouteType: - routes = filterResourcesByName[*routev3.RouteConfiguration](routes, name) - case resourcev3.ListenerType: - listeners = filterResourcesByName[*listenerv3.Listener](listeners, name) - case resourcev3.EndpointType: - endpoints = filterResourcesByName[types.Resource](endpoints, name) // ClusterLoadAssignment - case resourcev3.SecretType: - secrets = filterResourcesByName[types.Resource](secrets, name) // Secret - case resourcev3.RuntimeType: - runtimes = filterResourcesByName[types.Resource](runtimes, name) - case resourcev3.ExtensionConfigType: - extConfigs = filterResourcesByName[types.Resource](extConfigs, name) - default: - return fmt.Errorf("unsupported resource type: %s", typ) - } - - newSnap, _ := cachev3.NewSnapshot( - "snap-remove-generic-"+name, - map[resourcev3.Type][]types.Resource{ - resourcev3.ClusterType: clusters, - resourcev3.RouteType: routes, - resourcev3.ListenerType: listeners, - resourcev3.EndpointType: endpoints, - resourcev3.SecretType: secrets, - resourcev3.RuntimeType: runtimes, - resourcev3.ExtensionConfigType: extConfigs, - }, - ) - return sm.Cache.SetSnapshot(context.TODO(), sm.NodeID, newSnap) -} - -// ListResources returns all resources of a given type -func (sm *SnapshotManager) ListResources(typ resourcev3.Type) ([]types.Resource, error) { - snap, err := sm.Cache.GetSnapshot(sm.NodeID) - if err != nil { - return nil, err - } - return mapToSlice(snap.GetResources(string(typ))), nil -} - -// GetResource retrieves a resource by name and type -func (sm *SnapshotManager) GetResource(name string, typ resourcev3.Type) (types.Resource, error) { - snap, err := sm.Cache.GetSnapshot(sm.NodeID) - if err != nil { - return nil, err - } - r, ok := snap.GetResources(string(typ))[name] - if !ok { - return nil, fmt.Errorf("%s %s not found", typ, name) - } - return r, nil -} - -// ---------------- Generic filter helper ---------------- -func filterResourcesByName[T any](resources []types.Resource, name string) []types.Resource { +// filterAndCheckResourcesByName filters a slice of resources by name, +// returning the filtered slice and a boolean indicating if the named resource was found. +func filterAndCheckResourcesByName(resources []types.Resource, name string) ([]types.Resource, bool) { filtered := []types.Resource{} + var found = false for _, r := range resources { - if getNameFunc, ok := r.(interface{ GetName() string }); ok { - if getNameFunc.GetName() != name { + if namer, ok := r.(interface{ GetName() string }); ok { + if namer.GetName() != name { filtered = append(filtered, r) + } else { + found = true } } else { // fallback, include unknown type filtered = append(filtered, r) } } - return filtered + return filtered, found +} + +// getAllResourcesFromSnapshot retrieves all known resource types from a snapshot as a map. +func (sm *SnapshotManager) getAllResourcesFromSnapshot(snap cachev3.ResourceSnapshot) map[resourcev3.Type][]types.Resource { + // Only include types that might be manipulated by the generic functions + resources := map[resourcev3.Type][]types.Resource{ + resourcev3.ClusterType: mapToSlice(snap.GetResources(string(resourcev3.ClusterType))), + resourcev3.ListenerType: mapToSlice(snap.GetResources(string(resourcev3.ListenerType))), + // resourcev3.EndpointType: mapToSlice(snap.GetResources(string(resourcev3.EndpointType))), + // resourcev3.SecretType: mapToSlice(snap.GetResources(string(resourcev3.SecretType))), + // resourcev3.RuntimeType: mapToSlice(snap.GetResources(string(resourcev3.RuntimeType))), + // Include other types as needed + } + return resources +} + +// resourcesToNamers converts a slice of proto-generated resource pointers +// (like []*clusterv3.Cluster) to a slice of the generic ResourceNamer interface. +// This is necessary because structs like *clusterv3.Cluster don't explicitly +// implement types.Resource, but are compatible with it and ResourceNamer. +func resourcesToNamers[T ResourceNamer](list []T) []ResourceNamer { + out := make([]ResourceNamer, len(list)) + for i, item := range list { + out[i] = item + } + return out } diff --git a/internal/storage.go b/internal/storage.go new file mode 100644 index 0000000..ff8139a --- /dev/null +++ b/internal/storage.go @@ -0,0 +1,604 @@ +package internal + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "strings" + + clusterv3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + listenerv3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + + // routev3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" // REMOVED + + "google.golang.org/protobuf/encoding/protojson" +) + +// Storage abstracts database persistence +type Storage struct { + db *sql.DB + driver string +} + +// DeleteStrategy defines the action to take on missing resources +type DeleteStrategy int + +const ( + // DeleteNone performs only UPSERT for items in the list (default behavior) + DeleteNone DeleteStrategy = iota + // DeleteLogical marks missing resources as disabled (now applicable to clusters and listeners) + DeleteLogical + // DeleteActual removes missing resources physically from the database + DeleteActual +) + +// NewStorage initializes a Storage instance +func NewStorage(db *sql.DB, driver string) *Storage { + return &Storage{db: db, driver: driver} +} + +// placeholder returns correct SQL placeholder based on driver +func (s *Storage) placeholder(n int) string { + if s.driver == "postgres" { + return fmt.Sprintf("$%d", n) + } + return "?" +} + +// InitSchema ensures required tables exist +func (s *Storage) InitSchema(ctx context.Context) error { + var schema string + switch s.driver { + case "postgres": + schema = ` + CREATE TABLE IF NOT EXISTS clusters ( + id SERIAL PRIMARY KEY, + name TEXT UNIQUE NOT NULL, + data JSONB NOT NULL, + enabled BOOLEAN DEFAULT true, + updated_at TIMESTAMP DEFAULT now() + ); + -- REMOVED routes table + CREATE TABLE IF NOT EXISTS listeners ( + id SERIAL PRIMARY KEY, + name TEXT UNIQUE NOT NULL, + data JSONB NOT NULL, + enabled BOOLEAN DEFAULT true, + updated_at TIMESTAMP DEFAULT now() + );` + default: // SQLite + schema = ` + CREATE TABLE IF NOT EXISTS clusters ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + name TEXT UNIQUE NOT NULL, + data TEXT NOT NULL, + enabled BOOLEAN DEFAULT 1, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ); + -- REMOVED routes table + CREATE TABLE IF NOT EXISTS listeners ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + name TEXT UNIQUE NOT NULL, + data TEXT NOT NULL, + enabled BOOLEAN DEFAULT 1, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + );` + } + _, err := s.db.ExecContext(ctx, schema) + return err +} + +// SaveCluster inserts or updates a cluster +func (s *Storage) SaveCluster(ctx context.Context, cluster *clusterv3.Cluster) error { + data, err := protojson.Marshal(cluster) + if err != nil { + return err + } + + var query string + switch s.driver { + case "postgres": + // Explicitly set enabled=true on update to re-enable a logically deleted cluster + query = fmt.Sprintf(` + INSERT INTO clusters (name, data, enabled, updated_at) + VALUES (%s, %s, true, now()) + ON CONFLICT (name) DO UPDATE SET data = %s, enabled = true, updated_at = now()`, + s.placeholder(1), s.placeholder(2), s.placeholder(2)) + default: // SQLite + // Explicitly set enabled=1 on update to re-enable a logically deleted cluster + query = ` + INSERT INTO clusters (name, data, enabled, updated_at) + VALUES (?, ?, 1, CURRENT_TIMESTAMP) + ON CONFLICT(name) DO UPDATE SET data=excluded.data, enabled=1, updated_at=CURRENT_TIMESTAMP` + } + + _, err = s.db.ExecContext(ctx, query, cluster.GetName(), string(data)) + return err +} + +// SaveRoute inserts or updates a route // REMOVED +// func (s *Storage) SaveRoute(ctx context.Context, route *routev3.RouteConfiguration) error { +// // ... (route logic removed) +// } + +// SaveListener inserts or updates a listener +func (s *Storage) SaveListener(ctx context.Context, listener *listenerv3.Listener) error { + data, err := protojson.Marshal(listener) + if err != nil { + return err + } + + var query string + switch s.driver { + case "postgres": + // Explicitly set enabled=true on update to re-enable a logically deleted listener + query = fmt.Sprintf(` + INSERT INTO listeners (name, data, enabled, updated_at) + VALUES (%s, %s, true, now()) + ON CONFLICT (name) DO UPDATE SET data = %s, enabled = true, updated_at = now()`, + s.placeholder(1), s.placeholder(2), s.placeholder(2)) + default: // SQLite + // Explicitly set enabled=1 on update to re-enable a logically deleted listener + query = ` + INSERT INTO listeners (name, data, enabled, updated_at) + VALUES (?, ?, 1, CURRENT_TIMESTAMP) + ON CONFLICT(name) DO UPDATE SET data=excluded.data, enabled=1, updated_at=CURRENT_TIMESTAMP` + } + + _, err = s.db.ExecContext(ctx, query, listener.GetName(), string(data)) + return err +} + +// LoadEnabledClusters retrieves all enabled clusters +func (s *Storage) LoadEnabledClusters(ctx context.Context) ([]*clusterv3.Cluster, error) { + query := `SELECT data FROM clusters` + if s.driver == "postgres" { + query += ` WHERE enabled = true` + } else { + query += ` WHERE enabled = 1` + } + + rows, err := s.db.QueryContext(ctx, query) + if err != nil { + return nil, err + } + defer rows.Close() + + var clusters []*clusterv3.Cluster + for rows.Next() { + var raw json.RawMessage + // FIX: Handle type difference between Postgres (JSONB) and SQLite (TEXT) + if s.driver != "postgres" { + var dataStr string + if err := rows.Scan(&dataStr); err != nil { + return nil, err + } + raw = json.RawMessage(dataStr) // Convert string to json.RawMessage + } else { + if err := rows.Scan(&raw); err != nil { + return nil, err + } + } + + var cluster clusterv3.Cluster + if err := protojson.Unmarshal(raw, &cluster); err != nil { + return nil, err + } + clusters = append(clusters, &cluster) + } + return clusters, nil +} + +// LoadAllClusters retrieves all clusters, regardless of their enabled status +func (s *Storage) LoadAllClusters(ctx context.Context) ([]*clusterv3.Cluster, error) { + rows, err := s.db.QueryContext(ctx, `SELECT data FROM clusters`) + if err != nil { + return nil, err + } + defer rows.Close() + + var clusters []*clusterv3.Cluster + for rows.Next() { + var raw json.RawMessage + // FIX: Handle type difference between Postgres (JSONB) and SQLite (TEXT) + if s.driver != "postgres" { + var dataStr string + if err := rows.Scan(&dataStr); err != nil { + return nil, err + } + raw = json.RawMessage(dataStr) // Convert string to json.RawMessage + } else { + if err := rows.Scan(&raw); err != nil { + return nil, err + } + } + + var cluster clusterv3.Cluster + if err := protojson.Unmarshal(raw, &cluster); err != nil { + return nil, err + } + clusters = append(clusters, &cluster) + } + return clusters, nil +} + +// LoadEnabledRoutes retrieves all enabled routes // REMOVED +// func (s *Storage) LoadEnabledRoutes(ctx context.Context) ([]*routev3.RouteConfiguration, error) { +// // ... (route logic removed) +// } + +// LoadAllRoutes retrieves all routes, regardless of their enabled status // REMOVED +// func (s *Storage) LoadAllRoutes(ctx context.Context) ([]*routev3.RouteConfiguration, error) { +// // ... (route logic removed) +// } + +// LoadEnabledListeners retrieves all enabled listeners +func (s *Storage) LoadEnabledListeners(ctx context.Context) ([]*listenerv3.Listener, error) { + query := `SELECT data FROM listeners` + if s.driver == "postgres" { + query += ` WHERE enabled = true` + } else { + query += ` WHERE enabled = 1` + } + + rows, err := s.db.QueryContext(ctx, query) + if err != nil { + return nil, err + } + defer rows.Close() + + var listeners []*listenerv3.Listener + for rows.Next() { + var raw json.RawMessage + // FIX: Handle type difference between Postgres (JSONB) and SQLite (TEXT) + if s.driver != "postgres" { + var dataStr string + if err := rows.Scan(&dataStr); err != nil { + return nil, err + } + raw = json.RawMessage(dataStr) // Convert string to json.RawMessage + } else { + if err := rows.Scan(&raw); err != nil { + return nil, err + } + } + + var l listenerv3.Listener + if err := protojson.Unmarshal(raw, &l); err != nil { + return nil, err + } + listeners = append(listeners, &l) + } + return listeners, nil +} + +// LoadAllListeners retrieves all listeners, regardless of their enabled status +func (s *Storage) LoadAllListeners(ctx context.Context) ([]*listenerv3.Listener, error) { + rows, err := s.db.QueryContext(ctx, `SELECT data FROM listeners`) + if err != nil { + return nil, err + } + defer rows.Close() + + var listeners []*listenerv3.Listener + for rows.Next() { + var raw json.RawMessage + // FIX: Handle type difference between Postgres (JSONB) and SQLite (TEXT) + if s.driver != "postgres" { + var dataStr string + if err := rows.Scan(&dataStr); err != nil { + return nil, err + } + raw = json.RawMessage(dataStr) // Convert string to json.RawMessage + } else { + if err := rows.Scan(&raw); err != nil { + return nil, err + } + } + + var l listenerv3.Listener + if err := protojson.Unmarshal(raw, &l); err != nil { + return nil, err + } + listeners = append(listeners, &l) + } + return listeners, nil +} + +// RebuildSnapshot rebuilds full snapshot from DB +func (s *Storage) RebuildSnapshot(ctx context.Context) (*SnapshotConfig, error) { + // 1. Load Enabled Resources (for xDS serving) + enabledClusters, err := s.LoadEnabledClusters(ctx) + if err != nil { + return nil, err + } + // enabledRoutes, err := s.LoadEnabledRoutes(ctx) // REMOVED + // if err != nil { + // return nil, err + // } + enabledListeners, err := s.LoadEnabledListeners(ctx) + if err != nil { + return nil, err + } + + // 2. Load ALL Resources (for comparison and disabled set) + allClusters, err := s.LoadAllClusters(ctx) + if err != nil { + return nil, err + } + // allRoutes, err := s.LoadAllRoutes(ctx) // REMOVED + // if err != nil { + // return nil, err + // } + allListeners, err := s.LoadAllListeners(ctx) + if err != nil { + return nil, err + } + + // 3. Separate Disabled Resources + + // Clusters + enabledClusterNames := make(map[string]struct{}, len(enabledClusters)) + for _, c := range enabledClusters { + enabledClusterNames[c.GetName()] = struct{}{} + } + var disabledClusters []*clusterv3.Cluster + for _, c := range allClusters { + if _, found := enabledClusterNames[c.GetName()]; !found { + disabledClusters = append(disabledClusters, c) + } + } + + // Routes // REMOVED + // enabledRouteNames := make(map[string]struct{}, 0) + // var disabledRoutes []*routev3.RouteConfiguration + // for _, r := range allRoutes { + // if _, found := enabledRouteNames[r.GetName()]; !found { + // disabledRoutes = append(disabledRoutes, r) + // } + // } + + // Listeners + enabledListenerNames := make(map[string]struct{}, len(enabledListeners)) + for _, l := range enabledListeners { + enabledListenerNames[l.GetName()] = struct{}{} + } + var disabledListeners []*listenerv3.Listener + for _, l := range allListeners { + if _, found := enabledListenerNames[l.GetName()]; !found { + disabledListeners = append(disabledListeners, l) + } + } + + return &SnapshotConfig{ + EnabledClusters: enabledClusters, + // EnabledRoutes: nil, // REMOVED + EnabledListeners: enabledListeners, + DisabledClusters: disabledClusters, + // DisabledRoutes: nil, // REMOVED + DisabledListeners: disabledListeners, + }, nil +} + +// SnapshotConfig aggregates xDS resources +type SnapshotConfig struct { + // Enabled resources (for xDS serving) + EnabledClusters []*clusterv3.Cluster + // EnabledRoutes []*routev3.RouteConfiguration // REMOVED + EnabledListeners []*listenerv3.Listener + + // Disabled resources (for UI display) + DisabledClusters []*clusterv3.Cluster + // DisabledRoutes []*routev3.RouteConfiguration // REMOVED + DisabledListeners []*listenerv3.Listener +} + +// EnableCluster toggles a cluster +func (s *Storage) EnableCluster(ctx context.Context, name string, enabled bool) error { + query := `UPDATE clusters SET enabled = ?, updated_at = CURRENT_TIMESTAMP WHERE name = ?` + if s.driver == "postgres" { + query = `UPDATE clusters SET enabled = $1, updated_at = now() WHERE name = $2` + } + _, err := s.db.ExecContext(ctx, query, enabled, name) + return err +} + +// EnableRoute toggles a route // REMOVED +// func (s *Storage) EnableRoute(ctx context.Context, name string, enabled bool) error { +// // ... (route logic removed) +// } + +// EnableListener toggles a listener +func (s *Storage) EnableListener(ctx context.Context, name string, enabled bool) error { + query := `UPDATE listeners SET enabled = ?, updated_at = CURRENT_TIMESTAMP WHERE name = ?` + if s.driver == "postgres" { + query = `UPDATE listeners SET enabled = $1, updated_at = now() WHERE name = $2` + } + _, err := s.db.ExecContext(ctx, query, enabled, name) + return err +} + +// disableMissingResources updates the 'enabled' status for resources in 'table' +// whose 'name' is NOT in 'inputNames'. +func (s *Storage) disableMissingResources(ctx context.Context, table string, inputNames []string) error { + if table != "clusters" && table != "listeners" { // CHECK UPDATED + return fmt.Errorf("logical delete (disable) is only supported for tables with an 'enabled' column (clusters, listeners)") + } + + // 1. Build placeholders and args + placeholders := make([]string, len(inputNames)) + args := make([]interface{}, len(inputNames)) + for i, name := range inputNames { + if s.driver == "postgres" { + placeholders[i] = fmt.Sprintf("$%d", i+1) + } else { + placeholders[i] = "?" + } + args[i] = name + } + + disabledValue := "false" + if s.driver != "postgres" { + disabledValue = "0" + } + + var updateTime string + if s.driver == "postgres" { + updateTime = "now()" + } else { + updateTime = "CURRENT_TIMESTAMP" + } + + // If no names are provided, disable ALL currently enabled resources + whereClause := "" + if len(inputNames) > 0 { + whereClause = fmt.Sprintf("WHERE name NOT IN (%s)", strings.Join(placeholders, ", ")) + } + + // 2. Construct and execute the UPDATE query + query := fmt.Sprintf(` + UPDATE %s + SET enabled = %s, updated_at = %s + %s`, + table, disabledValue, updateTime, whereClause) + + _, err := s.db.ExecContext(ctx, query, args...) + return err +} + +// deleteMissingResources physically deletes resources from 'table' whose 'name' is NOT in 'inputNames'. +func (s *Storage) deleteMissingResources(ctx context.Context, table string, inputNames []string) error { + if table != "clusters" && table != "listeners" { // CHECK UPDATED + return fmt.Errorf("physical delete is only supported for tables: clusters, listeners") + } + + // 1. Build placeholders and args + placeholders := make([]string, len(inputNames)) + args := make([]interface{}, len(inputNames)) + for i, name := range inputNames { + if s.driver == "postgres" { + placeholders[i] = fmt.Sprintf("$%d", i+1) + } else { + placeholders[i] = "?" + } + args[i] = name + } + + // If no names are provided, delete ALL resources + whereClause := "" + if len(inputNames) > 0 { + whereClause = fmt.Sprintf("WHERE name NOT IN (%s)", strings.Join(placeholders, ", ")) + } + + // 2. Construct and execute the DELETE query + query := fmt.Sprintf(` + DELETE FROM %s + %s`, + table, whereClause) + + _, err := s.db.ExecContext(ctx, query, args...) + return err +} + +func (s *Storage) SaveSnapshot(ctx context.Context, cfg *SnapshotConfig, strategy DeleteStrategy) error { + if cfg == nil { + return fmt.Errorf("SnapshotConfig is nil") + } + + // Use a transaction for atomicity + tx, err := s.db.BeginTx(ctx, nil) + if err != nil { + return fmt.Errorf("failed to begin transaction: %w", err) + } + defer func() { + if err != nil { + tx.Rollback() + return + } + err = tx.Commit() + }() + + // Note: Only Enabledxxx resources are UPSERTED. Disabledxxx resources are + // left alone unless the deletion strategy removes them. + + // --- 1. Save/Upsert Clusters and Collect Names --- + clusterNames := make([]string, 0, len(cfg.EnabledClusters)) + for _, c := range cfg.EnabledClusters { + if err = s.SaveCluster(ctx, c); err != nil { + return fmt.Errorf("failed to save cluster %s: %w", c.GetName(), err) + } + clusterNames = append(clusterNames, c.GetName()) + } + + // --- 2. Save/Upsert Routes and Collect Names --- // REMOVED + // routeNames := make([]string, 0, len(cfg.EnabledRoutes)) + // for _, r := range cfg.EnabledRoutes { + // if err = s.SaveRoute(ctx, r); err != nil { + // return fmt.Errorf("failed to save route %s: %w", r.GetName(), err) + // } + // routeNames = append(routeNames, r.GetName()) + // } + + // --- 3. Save/Upsert Listeners and Collect Names --- + listenerNames := make([]string, 0, len(cfg.EnabledListeners)) + for _, l := range cfg.EnabledListeners { + if err = s.SaveListener(ctx, l); err != nil { + return fmt.Errorf("failed to save listener %s: %w", l.GetName(), err) + } + listenerNames = append(listenerNames, l.GetName()) + } + + // --- 4. Apply Deletion Strategy --- + switch strategy { + case DeleteLogical: + // Logical Delete (Disable) for all resource types: marks resources NOT in the current enabled list as disabled + if err = s.disableMissingResources(ctx, "clusters", clusterNames); err != nil { + return fmt.Errorf("failed to logically delete missing clusters: %w", err) + } + // if err = s.disableMissingResources(ctx, "routes", routeNames); err != nil { // REMOVED + // return fmt.Errorf("failed to logically delete missing routes: %w", err) + // } + if err = s.disableMissingResources(ctx, "listeners", listenerNames); err != nil { + return fmt.Errorf("failed to logically delete missing listeners: %w", err) + } + + case DeleteActual: + // Actual Delete (Physical Removal) for all resources: removes resources NOT in the current enabled list + if err = s.deleteMissingResources(ctx, "clusters", clusterNames); err != nil { + return fmt.Errorf("failed to physically delete missing clusters: %w", err) + } + // if err = s.deleteMissingResources(ctx, "routes", routeNames); err != nil { // REMOVED + // return fmt.Errorf("failed to physically delete missing routes: %w", err) + // } + if err = s.deleteMissingResources(ctx, "listeners", listenerNames); err != nil { + return fmt.Errorf("failed to physically delete missing listeners: %w", err) + } + + case DeleteNone: + // Do nothing for missing resources + return nil + } + + return err +} + +// RemoveListener deletes a listener by name +func (s *Storage) RemoveListener(ctx context.Context, name string) error { + query := `DELETE FROM listeners WHERE name = ?` + if s.driver == "postgres" { + query = `DELETE FROM listeners WHERE name = $1` + } + _, err := s.db.ExecContext(ctx, query, name) + return err +} + +// RemoveCluster deletes a cluster by name +func (s *Storage) RemoveCluster(ctx context.Context, name string) error { + query := `DELETE FROM clusters WHERE name = ?` + if s.driver == "postgres" { + query = `DELETE FROM clusters WHERE name = $1` + } + _, err := s.db.ExecContext(ctx, query, name) + return err +} diff --git a/main.go b/main.go index 73f8194..d5b014e 100644 --- a/main.go +++ b/main.go @@ -2,12 +2,16 @@ import ( "context" + "database/sql" "flag" "fmt" "net/http" "os" - "path/filepath" // ADDED: for directory and file path operations - "strings" // ADDED: for string manipulation + "path/filepath" + "strings" + + _ "github.com/lib/pq" // Postgres driver + _ "github.com/mattn/go-sqlite3" // SQLite driver "github.com/envoyproxy/go-control-plane/pkg/cache/types" cachev3 "github.com/envoyproxy/go-control-plane/pkg/cache/v3" @@ -24,8 +28,10 @@ port uint nodeID string restPort uint - snapshotFile string // Single file flag (kept for backwards compatibility) - configDir string // NEW FLAG: Directory containing config files + snapshotFile string + configDir string + dbConnStr string + dbDriver string ) func init() { @@ -35,12 +41,20 @@ flag.UintVar(&port, "port", 18000, "xDS management server port") flag.StringVar(&nodeID, "nodeID", "test-id", "Node ID") flag.UintVar(&restPort, "rest-port", 8080, "REST API server port") - // Keeping snapshotFile for backwards compatibility, though configDir is preferred for multiple files - flag.StringVar(&snapshotFile, "snapshot-file", "", "Optional initial snapshot JSON/YAML file (single file)") - flag.StringVar(&configDir, "config-dir", "", "Optional directory containing multiple config files (.yaml, .json)") // NEW + flag.StringVar(&snapshotFile, "snapshot-file", "", "Optional initial snapshot JSON/YAML file") + flag.StringVar(&configDir, "config-dir", "data/", "Optional directory containing multiple config files") + flag.StringVar(&dbConnStr, "db", "", "Optional database connection string for config persistence") } -// loadConfigFiles iterates over the specified directory and loads all .yaml or .json files. +// determineDriver returns driver name from connection string +func determineDriver(dsn string) string { + if strings.HasPrefix(dsn, "postgres://") || strings.HasPrefix(dsn, "postgresql://") { + return "postgres" + } + return "sqlite3" +} + +// loadConfigFiles iterates over a directory and loads all .yaml/.json files func loadConfigFiles(manager *internal.SnapshotManager, dir string) error { logger.Infof("loading configuration files from directory: %s", dir) @@ -49,123 +63,152 @@ return fmt.Errorf("failed to read directory %s: %w", dir, err) } - var resourceFiles map[string][]types.Resource = make(map[string][]types.Resource) + resourceFiles := make(map[string][]types.Resource) for _, file := range files { if file.IsDir() { continue } - fileName := file.Name() - // Only process files with .yaml, .yml, or .json extensions if strings.HasSuffix(fileName, ".yaml") || strings.HasSuffix(fileName, ".yml") || strings.HasSuffix(fileName, ".json") { filePath := filepath.Join(dir, fileName) logger.Infof(" -> loading config file: %s", filePath) - var rf map[string][]types.Resource - if rf, err = manager.LoadSnapshotFromFile(filePath); err != nil { + rf, err := manager.LoadSnapshotFromFile(filePath) + if err != nil { return fmt.Errorf("failed to load snapshot from file %s: %w", filePath, err) } for k, v := range rf { resourceFiles[k] = append(resourceFiles[k], v...) } logger.Infof("loaded %d resources from %s", len(rf), filePath) - // Note: SnapshotManager.LoadSnapshotFromFile must be implemented to *add* - // resources to the existing snapshot, not replace it entirely, for this - // iteration to work correctly. (Assuming the implementation you showed - // previously, which replaced the resources, is now updated to merge/add them, - // or that LoadSnapshotFromFile always creates a complete snapshot.) - // Since your `snapshot.go` was updated to load a *complete* snapshot from a file, - // we will rely on the user to provide files that contain complete, non-conflicting - // configurations. If you want true file merging, the internal logic needs to change. - // However, for this change, we'll keep the current behavior of LoadSnapshotFromFile - // and proceed. } } + if err := manager.SetSnapshot(context.TODO(), "snap-from-file", resourceFiles); err != nil { return fmt.Errorf("failed to set combined snapshot from files: %w", err) } - logger.Infof("successfully loaded %d configuration files from %s", len(files), dir) return nil } func main() { flag.Parse() - - // Optional: Configure klog to flush logs when the application exits defer klog.Flush() - // Create snapshot cache - cache := cachev3.NewSnapshotCache(false, cachev3.IDHash{}, logger) - // Create SnapshotManager - manager := internal.NewSnapshotManager(cache, nodeID) - - loadedConfigs := false - // OPTION 1: Load multiple config files from a directory - if configDir != "" { - if err := loadConfigFiles(manager, configDir); err != nil { - logger.Errorf("failed to load configs from directory: %v", err) + // Default DB to SQLite file if none provided + if dbConnStr == "" { + defaultDBPath := "data/config.db" + if err := os.MkdirAll(filepath.Dir(defaultDBPath), 0755); err != nil { + fmt.Fprintf(os.Stderr, "failed to create data directory: %v\n", err) os.Exit(1) } + dbConnStr = fmt.Sprintf("file:%s?_foreign_keys=on", defaultDBPath) + dbDriver = "sqlite3" + } else { + dbDriver = determineDriver(dbConnStr) + } + // --- Database initialization --- + db, err := sql.Open(dbDriver, dbConnStr) + if err != nil { + logger.Errorf("failed to connect to DB: %v", err) + os.Exit(1) + } + defer db.Close() - loadedConfigs = true - logger.Infof("successfully loaded all configuration files from %s", configDir) + storage := internal.NewStorage(db, dbDriver) + if err := storage.InitSchema(context.Background()); err != nil { + logger.Errorf("failed to initialize DB schema: %v", err) + os.Exit(1) } - // OPTION 2: Load single initial snapshot file (backwards compatible/single file mode) - if snapshotFile != "" && !loadedConfigs { - if _, err := os.Stat(snapshotFile); err == nil { - if resources, err := manager.LoadSnapshotFromFile(snapshotFile); err != nil { - logger.Errorf("failed to load snapshot from file: %v", err) + // Create snapshot cache and manager + cache := cachev3.NewSnapshotCache(false, cachev3.IDHash{}, logger) + manager := internal.NewSnapshotManager(cache, nodeID, storage) + + loadedConfigs := false + + // Step 1: Try to load snapshot from DB + snapCfg, err := storage.RebuildSnapshot(context.Background()) + if err == nil && len(snapCfg.EnabledClusters)+len(snapCfg.EnabledListeners) > 0 { + if err := manager.SetSnapshotFromConfig(context.Background(), "snap-from-db", snapCfg); err != nil { + logger.Errorf("failed to set DB snapshot: %v", err) + os.Exit(1) + } + loadedConfigs = true + logger.Infof("loaded snapshot from database") + } + + // Step 2: If DB empty, load from files and persist into DB + if !loadedConfigs { + if configDir != "" { + if err := loadConfigFiles(manager, configDir); err != nil { + logger.Errorf("failed to load configs from directory: %v", err) os.Exit(1) - } else { + } + loadedConfigs = true + } else if snapshotFile != "" { + if _, err := os.Stat(snapshotFile); err == nil { + resources, err := manager.LoadSnapshotFromFile(snapshotFile) + if err != nil { + logger.Errorf("failed to load snapshot from file: %v", err) + os.Exit(1) + } if err := manager.SetSnapshot(context.TODO(), "snap-from-file", resources); err != nil { logger.Errorf("failed to set loaded snapshot: %v", err) os.Exit(1) } + loadedConfigs = true + } else { + logger.Warnf("snapshot file not found: %s", snapshotFile) } - loadedConfigs = true - logger.Infof("loaded initial snapshot from %s", snapshotFile) - } else { - logger.Warnf("snapshot file not found: %s", snapshotFile) + } + + // Persist loaded snapshot into DB + if loadedConfigs { + snapCfg, err := manager.SnapshotToConfig(context.Background(), nodeID) + if err != nil { + logger.Errorf("failed to convert snapshot to DB config: %v", err) + os.Exit(1) + } + if err := storage.SaveSnapshot(context.Background(), snapCfg, internal.DeleteLogical); err != nil { + logger.Errorf("failed to save initial snapshot into DB: %v", err) + os.Exit(1) + } + logger.Infof("initial snapshot written into database") } } - // Ensure snapshot is consistent or create empty snapshot if no configs were loaded + // Step 3: Ensure snapshot exists in cache snap, err := manager.Cache.GetSnapshot(nodeID) if err != nil || !loadedConfigs { - // If an error occurred or no files were loaded, set an initial empty snapshot - if !loadedConfigs { - logger.Warnf("no configuration files loaded, creating empty snapshot.") - snap, _ = cachev3.NewSnapshot("snap-init", map[resourcev3.Type][]types.Resource{ - resourcev3.ClusterType: {}, - resourcev3.RouteType: {}, - resourcev3.ListenerType: {}, // Assuming ListenerType support is added in internal - }) - } else { - // This case should ideally not happen if loading was successful, but handles initial cache miss. - logger.Warnf("no snapshot found in cache, creating empty snapshot.") - } - + logger.Warnf("no valid snapshot found, creating empty snapshot") + snap, _ = cachev3.NewSnapshot("snap-init", map[resourcev3.Type][]types.Resource{ + resourcev3.ClusterType: {}, + resourcev3.RouteType: {}, + resourcev3.ListenerType: {}, + }) if err := cache.SetSnapshot(context.Background(), nodeID, snap); err != nil { logger.Errorf("failed to set initial snapshot: %v", err) os.Exit(1) } } - logger.Infof("xDS snapshot ready: version %s", snap.GetVersion(string(resourcev3.ClusterType))) // Use a specific type to show version + logger.Infof("xDS snapshot ready: version %s", snap.GetVersion(string(resourcev3.ClusterType))) - // Start xDS gRPC server + // --- Start xDS gRPC server --- ctx := context.Background() cb := &test.Callbacks{Debug: true} srv := server.NewServer(ctx, cache, cb) - go internal.RunServer(srv, port) // your existing RunServer implementation + go internal.RunServer(srv, port) - // Start REST API server + // --- Start REST API server --- api := internal.NewAPI(manager) mux := http.NewServeMux() api.RegisterRoutes(mux) - restAddr := ":" + fmt.Sprint(restPort) + // NEW: Serve the index.html file and any other static assets + mux.Handle("/", http.FileServer(http.Dir("./static"))) // Assuming 'web' is the folder + + restAddr := fmt.Sprintf(":%d", restPort) logger.Infof("starting REST API server on %s", restAddr) if err := http.ListenAndServe(restAddr, mux); err != nil { logger.Errorf("REST server error: %v", err) diff --git a/save.json b/save.json new file mode 100644 index 0000000..6498e0d --- /dev/null +++ b/save.json @@ -0,0 +1,1678 @@ +{ + "type.googleapis.com/envoy.config.cluster.v3.Cluster": [ + { + "name": "_nas_note", + "ClusterDiscoveryType": { + "Type": 0 + }, + "connect_timeout": { + "nanos": 200000000 + }, + "load_assignment": { + "cluster_name": "nas", + "endpoints": [ + { + "lb_endpoints": [ + { + "HostIdentifier": { + "Endpoint": { + "address": { + "Address": { + "SocketAddress": { + "address": "127.0.0.1", + "PortSpecifier": { + "PortValue": 9350 + } + } + } + }, + "health_check_config": { + "port_value": 9350 + } + } + } + } + ], + "LbConfig": null + } + ] + }, + "LbConfig": null + }, + { + "name": "_portainer_ui", + "ClusterDiscoveryType": { + "Type": 1 + }, + "connect_timeout": { + "nanos": 200000000 + }, + "load_assignment": { + "cluster_name": "portainer_ui", + "endpoints": [ + { + "lb_endpoints": [ + { + "HostIdentifier": { + "Endpoint": { + "address": { + "Address": { + "SocketAddress": { + "address": "192.168.68.161", + "PortSpecifier": { + "PortValue": 9000 + } + } + } + }, + "health_check_config": { + "port_value": 9000 + } + } + } + } + ], + "LbConfig": null + } + ] + }, + "LbConfig": null + }, + { + "name": "_git_bucket", + "ClusterDiscoveryType": { + "Type": 0 + }, + "connect_timeout": { + "nanos": 200000000 + }, + "load_assignment": { + "cluster_name": "git_bucket", + "endpoints": [ + { + "lb_endpoints": [ + { + "HostIdentifier": { + "Endpoint": { + "address": { + "Address": { + "SocketAddress": { + "address": "172.17.0.1", + "PortSpecifier": { + "PortValue": 8088 + } + } + } + }, + "health_check_config": { + "port_value": 8088 + } + } + } + } + ], + "LbConfig": null + } + ] + }, + "LbConfig": null + }, + { + "name": "_plex_server", + "ClusterDiscoveryType": { + "Type": 0 + }, + "connect_timeout": { + "nanos": 200000000 + }, + "load_assignment": { + "cluster_name": "nas", + "endpoints": [ + { + "lb_endpoints": [ + { + "HostIdentifier": { + "Endpoint": { + "address": { + "Address": { + "SocketAddress": { + "address": "192.168.68.113", + "PortSpecifier": { + "PortValue": 32400 + } + } + } + }, + "health_check_config": { + "port_value": 32400 + } + } + } + } + ], + "LbConfig": null + } + ] + }, + "LbConfig": null + }, + { + "name": "_nas_service", + "ClusterDiscoveryType": { + "Type": 0 + }, + "connect_timeout": { + "nanos": 200000000 + }, + "load_assignment": { + "cluster_name": "nas", + "endpoints": [ + { + "lb_endpoints": [ + { + "HostIdentifier": { + "Endpoint": { + "address": { + "Address": { + "SocketAddress": { + "address": "172.17.0.1", + "PortSpecifier": { + "PortValue": 5000 + } + } + } + }, + "health_check_config": { + "port_value": 5000 + } + } + } + } + ], + "LbConfig": null + } + ] + }, + "LbConfig": null + }, + { + "name": "_bitwarden_service", + "ClusterDiscoveryType": { + "Type": 0 + }, + "connect_timeout": { + "nanos": 200000000 + }, + "load_assignment": { + "cluster_name": "pwassword_manager", + "endpoints": [ + { + "lb_endpoints": [ + { + "HostIdentifier": { + "Endpoint": { + "address": { + "Address": { + "SocketAddress": { + "address": "172.17.0.1", + "PortSpecifier": { + "PortValue": 10010 + } + } + } + }, + "health_check_config": { + "port_value": 10010 + } + } + } + } + ], + "LbConfig": null + } + ] + }, + "LbConfig": null + }, + { + "name": "_nas_photo", + "ClusterDiscoveryType": { + "Type": 0 + }, + "connect_timeout": { + "nanos": 200000000 + }, + "load_assignment": { + "cluster_name": "nas", + "endpoints": [ + { + "lb_endpoints": [ + { + "HostIdentifier": { + "Endpoint": { + "address": { + "Address": { + "SocketAddress": { + "address": "127.0.0.1", + "PortSpecifier": { + "PortValue": 5080 + } + } + } + }, + "health_check_config": { + "port_value": 5080 + } + } + } + } + ], + "LbConfig": null + } + ] + }, + "LbConfig": null + }, + { + "name": "_3d_printer_console", + "ClusterDiscoveryType": { + "Type": 1 + }, + "connect_timeout": { + "seconds": 2 + }, + "load_assignment": { + "cluster_name": "printer", + "endpoints": [ + { + "lb_endpoints": [ + { + "HostIdentifier": { + "Endpoint": { + "address": { + "Address": { + "SocketAddress": { + "address": "octoprint", + "PortSpecifier": { + "PortValue": 5000 + } + } + } + }, + "health_check_config": { + "port_value": 5000 + } + } + } + } + ], + "LbConfig": null + } + ] + }, + "LbConfig": null + }, + { + "name": "_docker_registry", + "ClusterDiscoveryType": { + "Type": 0 + }, + "connect_timeout": { + "nanos": 200000000 + }, + "load_assignment": { + "cluster_name": "docker_registry", + "endpoints": [ + { + "lb_endpoints": [ + { + "HostIdentifier": { + "Endpoint": { + "address": { + "Address": { + "SocketAddress": { + "address": "172.17.0.1", + "PortSpecifier": { + "PortValue": 5555 + } + } + } + }, + "health_check_config": { + "port_value": 5555 + } + } + } + } + ], + "LbConfig": null + } + ] + }, + "LbConfig": null, + "transport_socket": { + "name": "envoy.transport_sockets.tls", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "value": "CnIScAo3CjUvZXRjL2NlcnRzL2Rvd25zdHJlYW0vZG9ja2VyLmplcnhpZS5jb20vZnVsbGNoYWluLnBlbRI1CjMvZXRjL2NlcnRzL2Rvd25zdHJlYW0vZG9ja2VyLmplcnhpZS5jb20vcHJpdmtleS5wZW0=" + } + } + } + }, + { + "name": "_ai_api_server", + "ClusterDiscoveryType": { + "Type": 1 + }, + "connect_timeout": { + "nanos": 200000000 + }, + "load_assignment": { + "cluster_name": "_ai_api_server", + "endpoints": [ + { + "lb_endpoints": [ + { + "HostIdentifier": { + "Endpoint": { + "address": { + "Address": { + "SocketAddress": { + "address": "192.168.68.113", + "PortSpecifier": { + "PortValue": 8002 + } + } + } + }, + "health_check_config": { + "port_value": 8002 + } + } + } + } + ], + "LbConfig": null + } + ] + }, + "LbConfig": null + }, + { + "name": "_nas_camera", + "ClusterDiscoveryType": { + "Type": 0 + }, + "connect_timeout": { + "nanos": 200000000 + }, + "load_assignment": { + "cluster_name": "camera", + "endpoints": [ + { + "lb_endpoints": [ + { + "HostIdentifier": { + "Endpoint": { + "address": { + "Address": { + "SocketAddress": { + "address": "127.0.0.1", + "PortSpecifier": { + "PortValue": 9900 + } + } + } + }, + "health_check_config": { + "port_value": 9900 + } + } + } + } + ], + "LbConfig": null + } + ] + }, + "LbConfig": null + }, + { + "name": "_nas_audio", + "ClusterDiscoveryType": { + "Type": 0 + }, + "connect_timeout": { + "nanos": 200000000 + }, + "load_assignment": { + "cluster_name": "nas", + "endpoints": [ + { + "lb_endpoints": [ + { + "HostIdentifier": { + "Endpoint": { + "address": { + "Address": { + "SocketAddress": { + "address": "127.0.0.1", + "PortSpecifier": { + "PortValue": 8800 + } + } + } + }, + "health_check_config": { + "port_value": 8800 + } + } + } + } + ], + "LbConfig": null + } + ] + }, + "LbConfig": null + }, + { + "name": "_ai_ui_server", + "ClusterDiscoveryType": { + "Type": 1 + }, + "connect_timeout": { + "nanos": 200000000 + }, + "load_assignment": { + "cluster_name": "_ai_ui_server", + "endpoints": [ + { + "lb_endpoints": [ + { + "HostIdentifier": { + "Endpoint": { + "address": { + "Address": { + "SocketAddress": { + "address": "192.168.68.113", + "PortSpecifier": { + "PortValue": 8003 + } + } + } + }, + "health_check_config": { + "port_value": 8003 + } + } + } + } + ], + "LbConfig": null + } + ] + }, + "LbConfig": null + }, + { + "name": "_nas_video", + "ClusterDiscoveryType": { + "Type": 0 + }, + "connect_timeout": { + "nanos": 200000000 + }, + "load_assignment": { + "cluster_name": "nas", + "endpoints": [ + { + "lb_endpoints": [ + { + "HostIdentifier": { + "Endpoint": { + "address": { + "Address": { + "SocketAddress": { + "address": "127.0.0.1", + "PortSpecifier": { + "PortValue": 9007 + } + } + } + }, + "health_check_config": { + "port_value": 9007 + } + } + } + } + ], + "LbConfig": null + } + ] + }, + "LbConfig": null + }, + { + "name": "_ai_server", + "ClusterDiscoveryType": { + "Type": 1 + }, + "connect_timeout": { + "nanos": 200000000 + }, + "load_assignment": { + "cluster_name": "_ai_server", + "endpoints": [ + { + "lb_endpoints": [ + { + "HostIdentifier": { + "Endpoint": { + "address": { + "Address": { + "SocketAddress": { + "address": "192.168.68.113", + "PortSpecifier": { + "PortValue": 3000 + } + } + } + }, + "health_check_config": { + "port_value": 3000 + } + } + } + } + ], + "LbConfig": null + } + ] + }, + "LbConfig": null + }, + { + "name": "_auth_server", + "ClusterDiscoveryType": { + "Type": 1 + }, + "connect_timeout": { + "nanos": 200000000 + }, + "load_assignment": { + "cluster_name": "_auth_server", + "endpoints": [ + { + "lb_endpoints": [ + { + "HostIdentifier": { + "Endpoint": { + "address": { + "Address": { + "SocketAddress": { + "address": "192.168.68.113", + "PortSpecifier": { + "PortValue": 5557 + } + } + } + }, + "health_check_config": { + "port_value": 5556 + } + } + } + } + ], + "LbConfig": null + } + ] + }, + "LbConfig": null + }, + { + "name": "_k8s_router", + "ClusterDiscoveryType": { + "Type": 1 + }, + "connect_timeout": { + "seconds": 1 + }, + "load_assignment": { + "cluster_name": "nginx", + "endpoints": [ + { + "lb_endpoints": [ + { + "HostIdentifier": { + "Endpoint": { + "address": { + "Address": { + "SocketAddress": { + "address": "192.168.68.139", + "PortSpecifier": { + "PortValue": 32704 + } + } + } + }, + "health_check_config": { + "port_value": 32704 + } + } + } + } + ], + "LbConfig": null + } + ] + }, + "LbConfig": null + }, + { + "name": "_3d_printer_camera", + "ClusterDiscoveryType": { + "Type": 1 + }, + "connect_timeout": { + "seconds": 2 + }, + "load_assignment": { + "cluster_name": "camera", + "endpoints": [ + { + "lb_endpoints": [ + { + "HostIdentifier": { + "Endpoint": { + "address": { + "Address": { + "SocketAddress": { + "address": "octoprint", + "PortSpecifier": { + "PortValue": 8080 + } + } + } + }, + "health_check_config": { + "port_value": 8080 + } + } + } + } + ], + "LbConfig": null + } + ] + }, + "LbConfig": null + }, + { + "name": "_homeassistant_service", + "ClusterDiscoveryType": { + "Type": 1 + }, + "connect_timeout": { + "nanos": 200000000 + }, + "load_assignment": { + "cluster_name": "homeassistant_manager", + "endpoints": [ + { + "lb_endpoints": [ + { + "HostIdentifier": { + "Endpoint": { + "address": { + "Address": { + "SocketAddress": { + "address": "192.168.68.133", + "PortSpecifier": { + "PortValue": 8123 + } + } + } + }, + "health_check_config": { + "port_value": 8123 + } + } + } + } + ], + "LbConfig": null + } + ] + }, + "LbConfig": null + }, + { + "name": "_code_server", + "ClusterDiscoveryType": { + "Type": 0 + }, + "connect_timeout": { + "nanos": 200000000 + }, + "load_assignment": { + "cluster_name": "code_server", + "endpoints": [ + { + "lb_endpoints": [ + { + "HostIdentifier": { + "Endpoint": { + "address": { + "Address": { + "SocketAddress": { + "address": "192.168.68.113", + "PortSpecifier": { + "PortValue": 8080 + } + } + } + }, + "health_check_config": { + "port_value": 8080 + } + } + } + } + ], + "LbConfig": null + } + ] + }, + "LbConfig": null, + "transport_socket": { + "name": "envoy.transport_sockets.tls", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "value": "CikaJwolCiMvZXRjL2NlcnRzL3Vwc3RyZWFtL3ZzY29kZS9yb290LmNydA==" + } + } + } + }, + { + "name": "_k8s_apiserver", + "ClusterDiscoveryType": { + "Type": 1 + }, + "connect_timeout": { + "seconds": 1 + }, + "load_assignment": { + "cluster_name": "apiserver", + "endpoints": [ + { + "lb_endpoints": [ + { + "HostIdentifier": { + "Endpoint": { + "address": { + "Address": { + "SocketAddress": { + "address": "192.168.68.139", + "PortSpecifier": { + "PortValue": 16443 + } + } + } + }, + "health_check_config": { + "port_value": 16443 + } + } + } + } + ], + "LbConfig": null + } + ] + }, + "LbConfig": null, + "transport_socket": { + "name": "envoy.transport_sockets.tls", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "value": "Ci0aKwopCicvZXRjL2NlcnRzL3Vwc3RyZWFtL2t1YmVybmV0ZXMvcm9vdC5jcnQ=" + } + } + } + }, + { + "name": "_acme_renewer", + "ClusterDiscoveryType": { + "Type": 0 + }, + "connect_timeout": { + "nanos": 200000000 + }, + "load_assignment": { + "cluster_name": "acme_renewer", + "endpoints": [ + { + "lb_endpoints": [ + { + "HostIdentifier": { + "Endpoint": { + "address": { + "Address": { + "SocketAddress": { + "address": "172.17.0.1", + "PortSpecifier": { + "PortValue": 8888 + } + } + } + }, + "health_check_config": { + "port_value": 8888 + } + } + } + } + ], + "LbConfig": null + } + ] + }, + "LbConfig": null + }, + { + "name": "_grafana_ui", + "ClusterDiscoveryType": { + "Type": 1 + }, + "connect_timeout": { + "nanos": 200000000 + }, + "load_assignment": { + "cluster_name": "_grafana_ui", + "endpoints": [ + { + "lb_endpoints": [ + { + "HostIdentifier": { + "Endpoint": { + "address": { + "Address": { + "SocketAddress": { + "address": "192.168.68.106", + "PortSpecifier": { + "PortValue": 3000 + } + } + } + }, + "health_check_config": { + "port_value": 3000 + } + } + } + } + ], + "LbConfig": null + } + ] + }, + "LbConfig": null + }, + { + "name": "_pcb_server", + "ClusterDiscoveryType": { + "Type": 1 + }, + "connect_timeout": { + "nanos": 200000000 + }, + "load_assignment": { + "cluster_name": "_pcb_server", + "endpoints": [ + { + "lb_endpoints": [ + { + "HostIdentifier": { + "Endpoint": { + "address": { + "Address": { + "SocketAddress": { + "address": "192.168.68.113", + "PortSpecifier": { + "PortValue": 8088 + } + } + } + }, + "health_check_config": { + "port_value": 8088 + } + } + } + } + ], + "LbConfig": null + } + ] + }, + "LbConfig": null + } + ], + "type.googleapis.com/envoy.config.listener.v3.Listener": [ + { + "name": "http_listener", + "address": { + "Address": { + "SocketAddress": { + "address": "0.0.0.0", + "PortSpecifier": { + "PortValue": 10000 + } + } + } + }, + "filter_chains": [ + { + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "value": "EgxpbmdyZXNzX2h0dHAqYQoZZW52b3kuZmlsdGVycy5odHRwLnJvdXRlciJECkJ0eXBlLmdvb2dsZWFwaXMuY29tL2Vudm95LmV4dGVuc2lvbnMuZmlsdGVycy5odHRwLnJvdXRlci52My5Sb3V0ZXIi6QEKGGluZ3Jlc3NfZ2VuZXJpY19pbnNlY3VyZRJPCg1odHRwX3RvX2h0dHBzEgEqGjAKHQobLy53ZWxsLWtub3duL2FjbWUtY2hhbGxlbmdlEg8KDV9hY21lX3JlbmV3ZXIaCQoDCgEvGgIgARJ8Cg52aWRlb19pbnNlY3VyZRIQdmlkZW8uamVyeGllLmNvbRIRdmlkZW8ubG9jYWw6MTAwMDAaMAodChsvLndlbGwta25vd24vYWNtZS1jaGFsbGVuZ2USDwoNX2FjbWVfcmVuZXdlchoTCgMKAS8SDAoKX25hc192aWRlbw==" + } + } + } + ] + } + ], + "ListenerSpecifier": null + }, + { + "name": "https_listener", + "address": { + "Address": { + "SocketAddress": { + "address": "0.0.0.0", + "PortSpecifier": { + "PortValue": 10001 + } + } + } + }, + "filter_chains": [ + { + "filter_chain_match": { + "server_names": [ + "home.jerxie.com" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "value": "EgxpbmdyZXNzX2h0dHAqeAoWZW52b3kuZmlsdGVycy5odHRwLmx1YSJeCjx0eXBlLmdvb2dsZWFwaXMuY29tL2Vudm95LmV4dGVuc2lvbnMuZmlsdGVycy5odHRwLmx1YS52My5MdWESHgoccmVxdWlyZSAiL2V0Yy9lbnZveS9maWx0ZXIiCiphChllbnZveS5maWx0ZXJzLmh0dHAucm91dGVyIkQKQnR5cGUuZ29vZ2xlYXBpcy5jb20vZW52b3kuZXh0ZW5zaW9ucy5maWx0ZXJzLmh0dHAucm91dGVyLnYzLlJvdXRlcroBCwoJd2Vic29ja2V0wgEDCKwC4gEDCKwC8gECCAGIAgEiQhJACgxob21lX3NlcnZpY2USD2hvbWUuamVyeGllLmNvbRofCgMKAS8SGAoWX2hvbWVhc3Npc3RhbnRfc2VydmljZQ==" + } + } + } + ], + "transport_socket": { + "name": "envoy.transport_sockets.tls", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "value": "Cm4SbAo1CjMvZXRjL2NlcnRzL2Rvd25zdHJlYW0vaG9tZS5qZXJ4aWUuY29tL2Z1bGxjaGFpbi5wZW0SMwoxL2V0Yy9jZXJ0cy9kb3duc3RyZWFtL2hvbWUuamVyeGllLmNvbS9wcml2a2V5LnBlbQ==" + } + } + } + }, + { + "filter_chain_match": { + "server_names": [ + "docker.jerxie.com", + "docker.local" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "value": "EgxpbmdyZXNzX2h0dHAqYQoZZW52b3kuZmlsdGVycy5odHRwLnJvdXRlciJECkJ0eXBlLmdvb2dsZWFwaXMuY29tL2Vudm95LmV4dGVuc2lvbnMuZmlsdGVycy5odHRwLnJvdXRlci52My5Sb3V0ZXIiQhJACg5kb2NrZXJfc2VydmljZRIRZG9ja2VyLmplcnhpZS5jb20aGwoDCgEvEhRCAAoQX2RvY2tlcl9yZWdpc3RyeQ==" + } + } + } + ], + "transport_socket": { + "name": "envoy.transport_sockets.tls", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "value": "CnIScAo3CjUvZXRjL2NlcnRzL2Rvd25zdHJlYW0vZG9ja2VyLmplcnhpZS5jb20vZnVsbGNoYWluLnBlbRI1CjMvZXRjL2NlcnRzL2Rvd25zdHJlYW0vZG9ja2VyLmplcnhpZS5jb20vcHJpdmtleS5wZW0=" + } + } + } + }, + { + "filter_chain_match": { + "server_names": [ + "nas.jerxie.com", + "nas" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "value": "EgxpbmdyZXNzX2h0dHAqYQoZZW52b3kuZmlsdGVycy5odHRwLnJvdXRlciJECkJ0eXBlLmdvb2dsZWFwaXMuY29tL2Vudm95LmV4dGVuc2lvbnMuZmlsdGVycy5odHRwLnJvdXRlci52My5Sb3V0ZXK6AQsKCXdlYnNvY2tldCJGEkQKDmRvY2tlcl9zZXJ2aWNlEg5uYXMuamVyeGllLmNvbRIJbmFzOjEwMDAxGhcKAwoBLxIQQgAKDF9uYXNfc2VydmljZQ==" + } + } + } + ], + "transport_socket": { + "name": "envoy.transport_sockets.tls", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "value": "CmwSago0CjIvZXRjL2NlcnRzL2Rvd25zdHJlYW0vbmFzLmplcnhpZS5jb20vZnVsbGNoYWluLnBlbRIyCjAvZXRjL2NlcnRzL2Rvd25zdHJlYW0vbmFzLmplcnhpZS5jb20vcHJpdmtleS5wZW0=" + } + } + } + }, + { + "filter_chain_match": { + "server_names": [ + "video.jerxie.com" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "value": "EgxpbmdyZXNzX2h0dHAqYQoZZW52b3kuZmlsdGVycy5odHRwLnJvdXRlciJECkJ0eXBlLmdvb2dsZWFwaXMuY29tL2Vudm95LmV4dGVuc2lvbnMuZmlsdGVycy5odHRwLnJvdXRlci52My5Sb3V0ZXIiOxI5Cg5kb2NrZXJfc2VydmljZRIQdmlkZW8uamVyeGllLmNvbRoVCgMKAS8SDkIACgpfbmFzX3ZpZGVv" + } + } + } + ], + "transport_socket": { + "name": "envoy.transport_sockets.tls", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "value": "CnASbgo2CjQvZXRjL2NlcnRzL2Rvd25zdHJlYW0vdmlkZW8uamVyeGllLmNvbS9mdWxsY2hhaW4ucGVtEjQKMi9ldGMvY2VydHMvZG93bnN0cmVhbS92aWRlby5qZXJ4aWUuY29tL3ByaXZrZXkucGVt" + } + } + } + }, + { + "filter_chain_match": { + "server_names": [ + "plex.jerxie.com" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "value": "EgxpbmdyZXNzX2h0dHAqYQoZZW52b3kuZmlsdGVycy5odHRwLnJvdXRlciJECkJ0eXBlLmdvb2dsZWFwaXMuY29tL2Vudm95LmV4dGVuc2lvbnMuZmlsdGVycy5odHRwLnJvdXRlci52My5Sb3V0ZXIiORI3CgtwbGV4X3NlcnZlchIPcGxleC5qZXJ4aWUuY29tGhcKAwoBLxIQQgAKDF9wbGV4X3NlcnZlcg==" + } + } + } + ], + "transport_socket": { + "name": "envoy.transport_sockets.tls", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "value": "Cm4SbAo1CjMvZXRjL2NlcnRzL2Rvd25zdHJlYW0vcGxleC5qZXJ4aWUuY29tL2Z1bGxjaGFpbi5wZW0SMwoxL2V0Yy9jZXJ0cy9kb3duc3RyZWFtL3BsZXguamVyeGllLmNvbS9wcml2a2V5LnBlbQ==" + } + } + } + }, + { + "filter_chain_match": { + "server_names": [ + "kubernetes.jerxie.com", + "kubernetes.local" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "value": "EgxpbmdyZXNzX2h0dHAqYQoZZW52b3kuZmlsdGVycy5odHRwLnJvdXRlciJECkJ0eXBlLmdvb2dsZWFwaXMuY29tL2Vudm95LmV4dGVuc2lvbnMuZmlsdGVycy5odHRwLnJvdXRlci52My5Sb3V0ZXLyAQIIAYgCASKPARKMAQoSa3ViZXJuZXRlc19zZXJ2aWNlEhVrdWJlcm5ldGVzLmplcnhpZS5jb20aIwoMEgovYXBpc2VydmVyEhMqAS8KDl9rOHNfYXBpc2VydmVyGiQKDQoLL2FwaXNlcnZlci8SEyoBLwoOX2s4c19hcGlzZXJ2ZXIaFAoDCgEvEg0KC19rOHNfcm91dGVy" + } + } + } + ], + "transport_socket": { + "name": "envoy.transport_sockets.tls", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "value": "CnoSeAo7CjkvZXRjL2NlcnRzL2Rvd25zdHJlYW0va3ViZXJuZXRlcy5qZXJ4aWUuY29tL2Z1bGxjaGFpbi5wZW0SOQo3L2V0Yy9jZXJ0cy9kb3duc3RyZWFtL2t1YmVybmV0ZXMuamVyeGllLmNvbS9wcml2a2V5LnBlbQ==" + } + } + } + }, + { + "filter_chain_match": { + "server_names": [ + "kubernetes.dashboard.jerxie.com", + "kubernetes.dashboard.local" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "value": "EgxpbmdyZXNzX2h0dHAq/gIKGWVudm95LmZpbHRlcnMuaHR0cC5vYXV0aDIi4AIKQnR5cGUuZ29vZ2xlYXBpcy5jb20vZW52b3kuZXh0ZW5zaW9ucy5maWx0ZXJzLmh0dHAub2F1dGgyLnYzLk9BdXRoMhKZAgqWAgopChVhdXRoLmplcnhpZS5jb20vdG9rZW4aAggDEgxfYXV0aF9zZXJ2ZXISHGh0dHBzOi8vYXV0aC5qZXJ4aWUuY29tL2F1dGgaZgoUa3ViZXJuZXRlcy1kYXNoYm9hcmQSJwoFdG9rZW4SHgocL2V0Yy9lbnZveS90b2tlbi1zZWNyZXQueWFtbBolCgRobWFjEh0KGy9ldGMvZW52b3kvaG1hYy1zZWNyZXQueWFtbCI1JVJFUSh4LWZvcndhcmRlZC1wcm90byklOi8vJVJFUSg6YXV0aG9yaXR5KSUvY2FsbGJhY2sqDQoLCgkvY2FsbGJhY2syDAoKCggvc2lnbm91dDgBSgZvcGVuaWRKBWVtYWlsKmEKGWVudm95LmZpbHRlcnMuaHR0cC5yb3V0ZXIiRApCdHlwZS5nb29nbGVhcGlzLmNvbS9lbnZveS5leHRlbnNpb25zLmZpbHRlcnMuaHR0cC5yb3V0ZXIudjMuUm91dGVy8gECCAGIAgEiVxJVChxrdWJlcm5ldGVzX2Rhc2hib2FyZF9zZXJ2aWNlEh9rdWJlcm5ldGVzLmRhc2hib2FyZC5qZXJ4aWUuY29tGhQKAwoBLxINCgtfazhzX3JvdXRlcg==" + } + } + } + ], + "transport_socket": { + "name": "envoy.transport_sockets.tls", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "value": "Co8BEowBCkUKQy9ldGMvY2VydHMvZG93bnN0cmVhbS9rdWJlcm5ldGVzLmRhc2hib2FyZC5qZXJ4aWUuY29tL2Z1bGxjaGFpbi5wZW0SQwpBL2V0Yy9jZXJ0cy9kb3duc3RyZWFtL2t1YmVybmV0ZXMuZGFzaGJvYXJkLmplcnhpZS5jb20vcHJpdmtleS5wZW0=" + } + } + } + }, + { + "filter_chain_match": { + "server_names": [ + "blog.jerxie.com" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "value": "EgxpbmdyZXNzX2h0dHAqYQoZZW52b3kuZmlsdGVycy5odHRwLnJvdXRlciJECkJ0eXBlLmdvb2dsZWFwaXMuY29tL2Vudm95LmV4dGVuc2lvbnMuZmlsdGVycy5odHRwLnJvdXRlci52My5Sb3V0ZXIiQhJAChdrdWJlcm5ldGVzX2Jsb2dfc2VydmljZRIPYmxvZy5qZXJ4aWUuY29tGhQKAwoBLxINCgtfazhzX3JvdXRlcg==" + } + } + } + ], + "transport_socket": { + "name": "envoy.transport_sockets.tls", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "value": "Cm4SbAo1CjMvZXRjL2NlcnRzL2Rvd25zdHJlYW0vYmxvZy5qZXJ4aWUuY29tL2Z1bGxjaGFpbi5wZW0SMwoxL2V0Yy9jZXJ0cy9kb3duc3RyZWFtL2Jsb2cuamVyeGllLmNvbS9wcml2a2V5LnBlbQ==" + } + } + } + }, + { + "filter_chain_match": { + "server_names": [ + "argocd.jerxie.com" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "value": "EgxpbmdyZXNzX2h0dHAqYQoZZW52b3kuZmlsdGVycy5odHRwLnJvdXRlciJECkJ0eXBlLmdvb2dsZWFwaXMuY29tL2Vudm95LmV4dGVuc2lvbnMuZmlsdGVycy5odHRwLnJvdXRlci52My5Sb3V0ZXIiRBJCChdrdWJlcm5ldGVzX2Jsb2dfc2VydmljZRIRYXJnb2NkLmplcnhpZS5jb20aFAoDCgEvEg0KC19rOHNfcm91dGVy" + } + } + } + ], + "transport_socket": { + "name": "envoy.transport_sockets.tls", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "value": "CnIScAo3CjUvZXRjL2NlcnRzL2Rvd25zdHJlYW0vYXJnb2NkLmplcnhpZS5jb20vZnVsbGNoYWluLnBlbRI1CjMvZXRjL2NlcnRzL2Rvd25zdHJlYW0vYXJnb2NkLmplcnhpZS5jb20vcHJpdmtleS5wZW0=" + } + } + } + }, + { + "filter_chain_match": { + "server_names": [ + "meet.jerxie.com" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "value": "EgxpbmdyZXNzX2h0dHAqYQoZZW52b3kuZmlsdGVycy5odHRwLnJvdXRlciJECkJ0eXBlLmdvb2dsZWFwaXMuY29tL2Vudm95LmV4dGVuc2lvbnMuZmlsdGVycy5odHRwLnJvdXRlci52My5Sb3V0ZXK6AQsKCXdlYnNvY2tldMIBAPIBAggBiAIBIjcSNQoMbWVldF9zZXJ2aWNlEg9tZWV0LmplcnhpZS5jb20aFAoDCgEvEg0KC19rOHNfcm91dGVy" + } + } + } + ], + "transport_socket": { + "name": "envoy.transport_sockets.tls", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "value": "Cm4SbAo1CjMvZXRjL2NlcnRzL2Rvd25zdHJlYW0vbWVldC5qZXJ4aWUuY29tL2Z1bGxjaGFpbi5wZW0SMwoxL2V0Yy9jZXJ0cy9kb3duc3RyZWFtL21lZXQuamVyeGllLmNvbS9wcml2a2V5LnBlbQ==" + } + } + } + }, + { + "filter_chain_match": { + "server_names": [ + "audio.jerxie.com", + "audio.local" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "value": "EgxpbmdyZXNzX2h0dHAqYQoZZW52b3kuZmlsdGVycy5odHRwLnJvdXRlciJECkJ0eXBlLmdvb2dsZWFwaXMuY29tL2Vudm95LmV4dGVuc2lvbnMuZmlsdGVycy5odHRwLnJvdXRlci52My5Sb3V0ZXIiRhJECg5kb2NrZXJfc2VydmljZRIQYXVkaW8uamVyeGllLmNvbRILYXVkaW8ubG9jYWwaEwoDCgEvEgwKCl9uYXNfYXVkaW8=" + } + } + } + ], + "transport_socket": { + "name": "envoy.transport_sockets.tls", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "value": "CnASbgo2CjQvZXRjL2NlcnRzL2Rvd25zdHJlYW0vYXVkaW8uamVyeGllLmNvbS9mdWxsY2hhaW4ucGVtEjQKMi9ldGMvY2VydHMvZG93bnN0cmVhbS9hdWRpby5qZXJ4aWUuY29tL3ByaXZrZXkucGVt" + } + } + } + }, + { + "filter_chain_match": { + "server_names": [ + "code.jerxie.com" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "value": "EgxpbmdyZXNzX2h0dHAq9QIKGWVudm95LmZpbHRlcnMuaHR0cC5vYXV0aDIi1wIKQnR5cGUuZ29vZ2xlYXBpcy5jb20vZW52b3kuZXh0ZW5zaW9ucy5maWx0ZXJzLmh0dHAub2F1dGgyLnYzLk9BdXRoMhKQAgqNAgopChVhdXRoLmplcnhpZS5jb20vdG9rZW4aAggDEgxfYXV0aF9zZXJ2ZXISHGh0dHBzOi8vYXV0aC5qZXJ4aWUuY29tL2F1dGgaXQoLY29kZS1zZXJ2ZXISJwoFdG9rZW4SHgocL2V0Yy9lbnZveS90b2tlbi1zZWNyZXQueWFtbBolCgRobWFjEh0KGy9ldGMvZW52b3kvaG1hYy1zZWNyZXQueWFtbCI1JVJFUSh4LWZvcndhcmRlZC1wcm90byklOi8vJVJFUSg6YXV0aG9yaXR5KSUvY2FsbGJhY2sqDQoLCgkvY2FsbGJhY2syDAoKCggvc2lnbm91dDgBSgZvcGVuaWRKBWVtYWlsKoYCChxlbnZveS5maWx0ZXJzLmh0dHAuand0X2F1dGhuIuUBClB0eXBlLmdvb2dsZWFwaXMuY29tL2Vudm95LmV4dGVuc2lvbnMuZmlsdGVycy5odHRwLmp3dF9hdXRobi52My5Kd3RBdXRoZW50aWNhdGlvbhKQAQp6Cglwcm92aWRlcjESbTIYCg1BdXRob3JpemF0aW9uEgdCZWFyZXIgSgtqd3RfcGF5bG9hZGoLQmVhcmVyVG9rZW4aNwowChxodHRwczovL2F1dGguamVyeGllLmNvbS9rZXlzGgIIBRIMX2F1dGhfc2VydmVyEgMI2AQSEgoDCgEvEgsKCXByb3ZpZGVyMSqeCQoWZW52b3kuZmlsdGVycy5odHRwLmx1YSKDCQo8dHlwZS5nb29nbGVhcGlzLmNvbS9lbnZveS5leHRlbnNpb25zLmZpbHRlcnMuaHR0cC5sdWEudjMuTHVhEsIICr8IZW1haWwgPSAiIgpmdW5jdGlvbiBlbnZveV9vbl9yZXF1ZXN0KHJlcXVlc3RfaGFuZGxlKQogIGVtYWlsID0gIiIKICBsb2NhbCBtZXRhID0gcmVxdWVzdF9oYW5kbGU6c3RyZWFtSW5mbygpOmR5bmFtaWNNZXRhZGF0YSgpCiAgZm9yIGtleSwgdmFsdWUgaW4gcGFpcnMobWV0YTpnZXQoImVudm95LmZpbHRlcnMuaHR0cC5qd3RfYXV0aG4iKSkgZG8KICAgIGlmIGtleSA9PSAiand0X3BheWxvYWQiIHRoZW4KICAgICAgZm9yIGssIHYgaW4gcGFpcnModmFsdWUpIGRvCiAgICAgICAgaWYgayA9PSAiZW1haWwiIHRoZW4KICAgICAgICAgIHJlcXVlc3RfaGFuZGxlOmxvZ0luZm8oImxvZ2luIGNvZGVzZXJ2ZXI6ICIgLi52KQogICAgICAgICAgZW1haWwgPSB2CiAgICAgICAgZW5kCiAgICAgIGVuZAogICAgZW5kCiAgZW5kCmVuZAoKZnVuY3Rpb24gZW52b3lfb25fcmVzcG9uc2UocmVzcG9uc2VfaGFuZGxlKQogIGlmIGVtYWlsIH49IiIgYW5kIGVtYWlsIH49ICJheGlleWFuZ2JAZ21haWwuY29tIiB0aGVuCiAgICByZXNwb25zZV9oYW5kbGU6bG9nSW5mbygiR290IHVuYXV0aG9yaXplZCB1c2VyLCByZXR1cm4gNDAzIGZvciB1c2VyICIgLi5lbWFpbCkKICAgIHJlc3BvbnNlX2hhbmRsZTpoZWFkZXJzKCk6YWRkKCJzZXQtY29va2llIiwgIkJlYXJlclRva2VuPWRlbGV0ZWQ7IHBhdGg9LzsgZXhwaXJlcz1UaHUsIDAxIEphbiAxOTcwIDAwOjAwOjAwIEdNVCIpCiAgICByZXNwb25zZV9oYW5kbGU6aGVhZGVycygpOmFkZCgic2V0LWNvb2tpZSIsICJPYXV0aEhNQUM9ZGVsZXRlZDsgcGF0aD0vOyBleHBpcmVzPVRodSwgMDEgSmFuIDE5NzAgMDA6MDA6MDAgR01UIikKICAgIHJlc3BvbnNlX2hhbmRsZTpoZWFkZXJzKCk6YWRkKCJzZXQtY29va2llIiwgIklkVG9rZW49ZGVsZXRlZDsgcGF0aD0vOyBleHBpcmVzPVRodSwgMDEgSmFuIDE5NzAgMDA6MDA6MDAgR01UIikKICAgIHJlc3BvbnNlX2hhbmRsZTpoZWFkZXJzKCk6YWRkKCJzZXQtY29va2llIiwgIk9hdXRoRXhwaXJlcz1kZWxldGVkOyBwYXRoPS87IGV4cGlyZXM9VGh1LCAwMSBKYW4gMTk3MCAwMDowMDowMCBHTVQiKQogIGVuZAogIGVtYWlsID0gIiIKZW5kCiphChllbnZveS5maWx0ZXJzLmh0dHAucm91dGVyIkQKQnR5cGUuZ29vZ2xlYXBpcy5jb20vZW52b3kuZXh0ZW5zaW9ucy5maWx0ZXJzLmh0dHAucm91dGVyLnYzLlJvdXRlcroBCwoJd2Vic29ja2V0IjgSNgoMY29kZV9zZXJ2aWNlEg9jb2RlLmplcnhpZS5jb20aFQoDCgEvEg4KDF9jb2RlX3NlcnZlcg==" + } + } + } + ], + "transport_socket": { + "name": "envoy.transport_sockets.tls", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "value": "Cm4SbAo1CjMvZXRjL2NlcnRzL2Rvd25zdHJlYW0vY29kZS5qZXJ4aWUuY29tL2Z1bGxjaGFpbi5wZW0SMwoxL2V0Yy9jZXJ0cy9kb3duc3RyZWFtL2NvZGUuamVyeGllLmNvbS9wcml2a2V5LnBlbQ==" + } + } + } + }, + { + "filter_chain_match": { + "server_names": [ + "photo.jerxie.com" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "value": "EgxpbmdyZXNzX2h0dHAqYQoZZW52b3kuZmlsdGVycy5odHRwLnJvdXRlciJECkJ0eXBlLmdvb2dsZWFwaXMuY29tL2Vudm95LmV4dGVuc2lvbnMuZmlsdGVycy5odHRwLnJvdXRlci52My5Sb3V0ZXIiOhI4Cg1waG90b19zZXJ2aWNlEhBwaG90by5qZXJ4aWUuY29tGhUKAwoBLxIOQgAKCl9uYXNfcGhvdG8=" + } + } + } + ], + "transport_socket": { + "name": "envoy.transport_sockets.tls", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "value": "CnASbgo2CjQvZXRjL2NlcnRzL2Rvd25zdHJlYW0vcGhvdG8uamVyeGllLmNvbS9mdWxsY2hhaW4ucGVtEjQKMi9ldGMvY2VydHMvZG93bnN0cmVhbS9waG90by5qZXJ4aWUuY29tL3ByaXZrZXkucGVt" + } + } + } + }, + { + "filter_chain_match": { + "server_names": [ + "password.jerxie.com" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "value": "EgxpbmdyZXNzX2h0dHAqYQoZZW52b3kuZmlsdGVycy5odHRwLnJvdXRlciJECkJ0eXBlLmdvb2dsZWFwaXMuY29tL2Vudm95LmV4dGVuc2lvbnMuZmlsdGVycy5odHRwLnJvdXRlci52My5Sb3V0ZXIiRhJEChBwYXNzd29yZF9zZXJ2aWNlEhNwYXNzd29yZC5qZXJ4aWUuY29tGhsKAwoBLxIUChJfYml0d2FyZGVuX3NlcnZpY2U=" + } + } + } + ], + "transport_socket": { + "name": "envoy.transport_sockets.tls", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "value": "CnYSdAo5CjcvZXRjL2NlcnRzL2Rvd25zdHJlYW0vcGFzc3dvcmQuamVyeGllLmNvbS9mdWxsY2hhaW4ucGVtEjcKNS9ldGMvY2VydHMvZG93bnN0cmVhbS9wYXNzd29yZC5qZXJ4aWUuY29tL3ByaXZrZXkucGVt" + } + } + } + }, + { + "filter_chain_match": { + "server_names": [ + "gitbucket.jerxie.com" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "value": "EgxpbmdyZXNzX2h0dHAqYQoZZW52b3kuZmlsdGVycy5odHRwLnJvdXRlciJECkJ0eXBlLmdvb2dsZWFwaXMuY29tL2Vudm95LmV4dGVuc2lvbnMuZmlsdGVycy5odHRwLnJvdXRlci52My5Sb3V0ZXIiQRI/ChFnaXRidWNrZXRfc2VydmljZRIUZ2l0YnVja2V0LmplcnhpZS5jb20aFAoDCgEvEg0KC19naXRfYnVja2V0" + } + } + } + ], + "transport_socket": { + "name": "envoy.transport_sockets.tls", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "value": "CngSdgo6CjgvZXRjL2NlcnRzL2Rvd25zdHJlYW0vZ2l0YnVja2V0LmplcnhpZS5jb20vZnVsbGNoYWluLnBlbRI4CjYvZXRjL2NlcnRzL2Rvd25zdHJlYW0vZ2l0YnVja2V0LmplcnhpZS5jb20vcHJpdmtleS5wZW0=" + } + } + } + }, + { + "filter_chain_match": { + "server_names": [ + "printer.jerxie.com", + "printer.local" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "value": "EgxpbmdyZXNzX2h0dHAq+gIKGWVudm95LmZpbHRlcnMuaHR0cC5vYXV0aDIi3AIKQnR5cGUuZ29vZ2xlYXBpcy5jb20vZW52b3kuZXh0ZW5zaW9ucy5maWx0ZXJzLmh0dHAub2F1dGgyLnYzLk9BdXRoMhKVAgqSAgopChVhdXRoLmplcnhpZS5jb20vdG9rZW4aAggDEgxfYXV0aF9zZXJ2ZXISHGh0dHBzOi8vYXV0aC5qZXJ4aWUuY29tL2F1dGgaYgoQb2N0b3ByaW50LXBvcnRhbBInCgV0b2tlbhIeChwvZXRjL2Vudm95L3Rva2VuLXNlY3JldC55YW1sGiUKBGhtYWMSHQobL2V0Yy9lbnZveS9obWFjLXNlY3JldC55YW1sIjUlUkVRKHgtZm9yd2FyZGVkLXByb3RvKSU6Ly8lUkVRKDphdXRob3JpdHkpJS9jYWxsYmFjayoNCgsKCS9jYWxsYmFjazIMCgoKCC9zaWdub3V0OAFKBm9wZW5pZEoFZW1haWwq+QEKHGVudm95LmZpbHRlcnMuaHR0cC5qd3RfYXV0aG4i2AEKUHR5cGUuZ29vZ2xlYXBpcy5jb20vZW52b3kuZXh0ZW5zaW9ucy5maWx0ZXJzLmh0dHAuand0X2F1dGhuLnYzLkp3dEF1dGhlbnRpY2F0aW9uEoMBCm0KCXByb3ZpZGVyMRJgMhgKDUF1dGhvcml6YXRpb24SB0JlYXJlciBKC2p3dF9wYXlsb2FkGjcKMAocaHR0cHM6Ly9hdXRoLmplcnhpZS5jb20va2V5cxoCCAUSDF9hdXRoX3NlcnZlchIDCNgEEhIKAwoBLxILCglwcm92aWRlcjEq0QkKFmVudm95LmZpbHRlcnMuaHR0cC5sdWEitgkKPHR5cGUuZ29vZ2xlYXBpcy5jb20vZW52b3kuZXh0ZW5zaW9ucy5maWx0ZXJzLmh0dHAubHVhLnYzLkx1YRL1CAryCGVtYWlsID0gIiIKZnVuY3Rpb24gZW52b3lfb25fcmVxdWVzdChyZXF1ZXN0X2hhbmRsZSkKICBlbWFpbCA9ICIiCiAgbG9jYWwgbWV0YSA9IHJlcXVlc3RfaGFuZGxlOnN0cmVhbUluZm8oKTpkeW5hbWljTWV0YWRhdGEoKQogIGZvciBrZXksIHZhbHVlIGluIHBhaXJzKG1ldGE6Z2V0KCJlbnZveS5maWx0ZXJzLmh0dHAuand0X2F1dGhuIikpIGRvCiAgICBpZiBrZXkgPT0gImp3dF9wYXlsb2FkIiB0aGVuCiAgICAgIGZvciBrLCB2IGluIHBhaXJzKHZhbHVlKSBkbwogICAgICAgIGlmIGsgPT0gImVtYWlsIiB0aGVuCiAgICAgICAgICBwcmludCgibG9naW4gb2N0b3ByaW50OiAiLi52KQogICAgICAgICAgZW1haWwgPSB2CiAgICAgICAgICByZXF1ZXN0X2hhbmRsZTpoZWFkZXJzKCk6YWRkKCJFTlZPWV9BVVRIRU5USUNBVEVEX1VTRVIiLCB2KQogICAgICAgIGVuZAogICAgICBlbmQKICAgIGVuZAogIGVuZAplbmQKCmZ1bmN0aW9uIGVudm95X29uX3Jlc3BvbnNlKHJlc3BvbnNlX2hhbmRsZSkKICBpZiBlbWFpbCB+PSIiIGFuZCBlbWFpbCB+PSAiYXhpZXlhbmdiQGdtYWlsLmNvbSIgdGhlbgogICAgcmVzcG9uc2VfaGFuZGxlOmxvZ0luZm8oIkdvdCB1bmF1dGhvcml6ZWQgdXNlciwgcmV0dXJuIDQwMyBmb3IgdXNlciAiIC4uZW1haWwpCiAgICByZXNwb25zZV9oYW5kbGU6aGVhZGVycygpOmFkZCgic2V0LWNvb2tpZSIsICJCZWFyZXJUb2tlbj1kZWxldGVkOyBwYXRoPS87IGV4cGlyZXM9VGh1LCAwMSBKYW4gMTk3MCAwMDowMDowMCBHTVQiKQogICAgcmVzcG9uc2VfaGFuZGxlOmhlYWRlcnMoKTphZGQoInNldC1jb29raWUiLCAiT2F1dGhITUFDPWRlbGV0ZWQ7IHBhdGg9LzsgZXhwaXJlcz1UaHUsIDAxIEphbiAxOTcwIDAwOjAwOjAwIEdNVCIpCiAgICByZXNwb25zZV9oYW5kbGU6aGVhZGVycygpOmFkZCgic2V0LWNvb2tpZSIsICJJZFRva2VuPWRlbGV0ZWQ7IHBhdGg9LzsgZXhwaXJlcz1UaHUsIDAxIEphbiAxOTcwIDAwOjAwOjAwIEdNVCIpCiAgICByZXNwb25zZV9oYW5kbGU6aGVhZGVycygpOmFkZCgic2V0LWNvb2tpZSIsICJPYXV0aEV4cGlyZXM9ZGVsZXRlZDsgcGF0aD0vOyBleHBpcmVzPVRodSwgMDEgSmFuIDE5NzAgMDA6MDA6MDAgR01UIikKICBlbmQKICBlbWFpbCA9ICIiCmVuZAoqYQoZZW52b3kuZmlsdGVycy5odHRwLnJvdXRlciJECkJ0eXBlLmdvb2dsZWFwaXMuY29tL2Vudm95LmV4dGVuc2lvbnMuZmlsdGVycy5odHRwLnJvdXRlci52My5Sb3V0ZXK6AQsKCXdlYnNvY2tldMIBAPIBAggBiAIBInASbgoPcHJpbnRlcl9zZXJ2aWNlEhJwcmludGVyLmplcnhpZS5jb20aKQoJCgcvd2ViY2FtEhwqAS+iAgISAAoSXzNkX3ByaW50ZXJfY2FtZXJhGhwKAwoBLxIVChNfM2RfcHJpbnRlcl9jb25zb2xl" + } + } + } + ], + "transport_socket": { + "name": "envoy.transport_sockets.tls", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "value": "CnQScgo4CjYvZXRjL2NlcnRzL2Rvd25zdHJlYW0vcHJpbnRlci5qZXJ4aWUuY29tL2Z1bGxjaGFpbi5wZW0SNgo0L2V0Yy9jZXJ0cy9kb3duc3RyZWFtL3ByaW50ZXIuamVyeGllLmNvbS9wcml2a2V5LnBlbQ==" + } + } + } + }, + { + "filter_chain_match": { + "server_names": [ + "camera.jerxie.com" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "value": "EgxpbmdyZXNzX2h0dHAqYQoZZW52b3kuZmlsdGVycy5odHRwLnJvdXRlciJECkJ0eXBlLmdvb2dsZWFwaXMuY29tL2Vudm95LmV4dGVuc2lvbnMuZmlsdGVycy5odHRwLnJvdXRlci52My5Sb3V0ZXK6AQsKCXdlYnNvY2tldCI7EjkKDmNhbWVyYV9zZXJ2aWNlEhFjYW1lcmEuamVyeGllLmNvbRoUCgMKAS8SDQoLX25hc19jYW1lcmE=" + } + } + } + ], + "transport_socket": { + "name": "envoy.transport_sockets.tls", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "value": "CnIScAo3CjUvZXRjL2NlcnRzL2Rvd25zdHJlYW0vY2FtZXJhLmplcnhpZS5jb20vZnVsbGNoYWluLnBlbRI1CjMvZXRjL2NlcnRzL2Rvd25zdHJlYW0vY2FtZXJhLmplcnhpZS5jb20vcHJpdmtleS5wZW0=" + } + } + } + }, + { + "filter_chain_match": { + "server_names": [ + "note.jerxie.com" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "value": "EgxpbmdyZXNzX2h0dHAqYQoZZW52b3kuZmlsdGVycy5odHRwLnJvdXRlciJECkJ0eXBlLmdvb2dsZWFwaXMuY29tL2Vudm95LmV4dGVuc2lvbnMuZmlsdGVycy5odHRwLnJvdXRlci52My5Sb3V0ZXIiNRIzCgxub3RlX3NlcnZpY2USD25vdGUuamVyeGllLmNvbRoSCgMKAS8SCwoJX25hc19ub3Rl" + } + } + } + ], + "transport_socket": { + "name": "envoy.transport_sockets.tls", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "value": "Cm4SbAo1CjMvZXRjL2NlcnRzL2Rvd25zdHJlYW0vbm90ZS5qZXJ4aWUuY29tL2Z1bGxjaGFpbi5wZW0SMwoxL2V0Yy9jZXJ0cy9kb3duc3RyZWFtL25vdGUuamVyeGllLmNvbS9wcml2a2V5LnBlbQ==" + } + } + } + }, + { + "filter_chain_match": { + "server_names": [ + "container.jerxie.com" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "value": "EgxpbmdyZXNzX2h0dHAqYQoZZW52b3kuZmlsdGVycy5odHRwLnJvdXRlciJECkJ0eXBlLmdvb2dsZWFwaXMuY29tL2Vudm95LmV4dGVuc2lvbnMuZmlsdGVycy5odHRwLnJvdXRlci52My5Sb3V0ZXK6AQsKCXdlYnNvY2tldCJDEkEKEWNvbnRhaW5lcl9zZXJ2aWNlEhRjb250YWluZXIuamVyeGllLmNvbRoWCgMKAS8SDwoNX3BvcnRhaW5lcl91aQ==" + } + } + } + ], + "transport_socket": { + "name": "envoy.transport_sockets.tls", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "value": "CngSdgo6CjgvZXRjL2NlcnRzL2Rvd25zdHJlYW0vY29udGFpbmVyLmplcnhpZS5jb20vZnVsbGNoYWluLnBlbRI4CjYvZXRjL2NlcnRzL2Rvd25zdHJlYW0vY29udGFpbmVyLmplcnhpZS5jb20vcHJpdmtleS5wZW0=" + } + } + } + }, + { + "filter_chain_match": { + "server_names": [ + "grafana.jerxie.com" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "value": "EgxpbmdyZXNzX2h0dHAqYQoZZW52b3kuZmlsdGVycy5odHRwLnJvdXRlciJECkJ0eXBlLmdvb2dsZWFwaXMuY29tL2Vudm95LmV4dGVuc2lvbnMuZmlsdGVycy5odHRwLnJvdXRlci52My5Sb3V0ZXK6AQsKCXdlYnNvY2tldCI9EjsKD2dyYWZhbmFfc2VydmljZRISZ3JhZmFuYS5qZXJ4aWUuY29tGhQKAwoBLxINCgtfZ3JhZmFuYV91aQ==" + } + } + } + ], + "transport_socket": { + "name": "envoy.transport_sockets.tls", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "value": "CnQScgo4CjYvZXRjL2NlcnRzL2Rvd25zdHJlYW0vZ3JhZmFuYS5qZXJ4aWUuY29tL2Z1bGxjaGFpbi5wZW0SNgo0L2V0Yy9jZXJ0cy9kb3duc3RyZWFtL2dyYWZhbmEuamVyeGllLmNvbS9wcml2a2V5LnBlbQ==" + } + } + } + }, + { + "filter_chain_match": { + "server_names": [ + "auth.jerxie.com" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "value": "EgxpbmdyZXNzX2h0dHAqYQoZZW52b3kuZmlsdGVycy5odHRwLnJvdXRlciJECkJ0eXBlLmdvb2dsZWFwaXMuY29tL2Vudm95LmV4dGVuc2lvbnMuZmlsdGVycy5odHRwLnJvdXRlci52My5Sb3V0ZXK6AQsKCXdlYnNvY2tldCI4EjYKDGF1dGhfc2VydmljZRIPYXV0aC5qZXJ4aWUuY29tGhUKAwoBLxIOCgxfYXV0aF9zZXJ2ZXI=" + } + } + } + ], + "transport_socket": { + "name": "envoy.transport_sockets.tls", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "value": "Cm4SbAo1CjMvZXRjL2NlcnRzL2Rvd25zdHJlYW0vYXV0aC5qZXJ4aWUuY29tL2Z1bGxjaGFpbi5wZW0SMwoxL2V0Yy9jZXJ0cy9kb3duc3RyZWFtL2F1dGguamVyeGllLmNvbS9wcml2a2V5LnBlbQ==" + } + } + } + }, + { + "filter_chain_match": { + "server_names": [ + "ai.jerxie.com" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "value": "EgxpbmdyZXNzX2h0dHAqYQoZZW52b3kuZmlsdGVycy5odHRwLnJvdXRlciJECkJ0eXBlLmdvb2dsZWFwaXMuY29tL2Vudm95LmV4dGVuc2lvbnMuZmlsdGVycy5odHRwLnJvdXRlci52My5Sb3V0ZXK6AQsKCXdlYnNvY2tldCJVElMKCmFpX3NlcnZpY2USDWFpLmplcnhpZS5jb20aHAoGCgQvYXBpEhJCAAoOX2FpX2FwaV9zZXJ2ZXIaGAoDCgEvEhFCAAoNX2FpX3VpX3NlcnZlcg==" + } + } + } + ], + "transport_socket": { + "name": "envoy.transport_sockets.tls", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "value": "CmoSaAozCjEvZXRjL2NlcnRzL2Rvd25zdHJlYW0vYWkuamVyeGllLmNvbS9mdWxsY2hhaW4ucGVtEjEKLy9ldGMvY2VydHMvZG93bnN0cmVhbS9haS5qZXJ4aWUuY29tL3ByaXZrZXkucGVt" + } + } + } + }, + { + "filter_chain_match": { + "server_names": [ + "pcb.jerxie.com" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "value": "EgxpbmdyZXNzX2h0dHAqYQoZZW52b3kuZmlsdGVycy5odHRwLnJvdXRlciJECkJ0eXBlLmdvb2dsZWFwaXMuY29tL2Vudm95LmV4dGVuc2lvbnMuZmlsdGVycy5odHRwLnJvdXRlci52My5Sb3V0ZXK6AQsKCXdlYnNvY2tldCI3EjUKC3BjYl9zZXJ2aWNlEg5wY2IuamVyeGllLmNvbRoWCgMKAS8SD0IACgtfcGNiX3NlcnZlcg==" + } + } + } + ], + "transport_socket": { + "name": "envoy.transport_sockets.tls", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "value": "CmwSago0CjIvZXRjL2NlcnRzL2Rvd25zdHJlYW0vcGNiLmplcnhpZS5jb20vZnVsbGNoYWluLnBlbRIyCjAvZXRjL2NlcnRzL2Rvd25zdHJlYW0vcGNiLmplcnhpZS5jb20vcHJpdmtleS5wZW0=" + } + } + } + }, + { + "filter_chain_match": { + "server_names": [ + "monitor.jerxie.com" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "value": "EgxpbmdyZXNzX2h0dHAqYQoZZW52b3kuZmlsdGVycy5odHRwLnJvdXRlciJECkJ0eXBlLmdvb2dsZWFwaXMuY29tL2Vudm95LmV4dGVuc2lvbnMuZmlsdGVycy5odHRwLnJvdXRlci52My5Sb3V0ZXK6AQsKCXdlYnNvY2tldCJDEkEKD21vbml0b3Jfc2VydmljZRISbW9uaXRvci5qZXJ4aWUuY29tGhoKAwoBLxITQgAKD19tb25pdG9yX3NlcnZlcg==" + } + } + } + ], + "transport_socket": { + "name": "envoy.transport_sockets.tls", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "value": "CnQScgo4CjYvZXRjL2NlcnRzL2Rvd25zdHJlYW0vbW9uaXRvci5qZXJ4aWUuY29tL2Z1bGxjaGFpbi5wZW0SNgo0L2V0Yy9jZXJ0cy9kb3duc3RyZWFtL21vbml0b3IuamVyeGllLmNvbS9wcml2a2V5LnBlbQ==" + } + } + } + } + ], + "listener_filters": [ + { + "name": "envoy.filters.listener.tls_inspector", + "ConfigType": { + "TypedConfig": { + "type_url": "type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector" + } + } + } + ], + "ListenerSpecifier": null + } + ] +} \ No newline at end of file diff --git a/static/filter_chain.html b/static/filter_chain.html new file mode 100644 index 0000000..46b7c9e --- /dev/null +++ b/static/filter_chain.html @@ -0,0 +1,341 @@ + + +
+ + +Configure your Envoy listener filter chain using simple inputs or switch to advanced editing mode.
+ +Cluster Name | +Status | +Primary Endpoint | +Connect Timeout | +Action | +
---|---|---|---|---|
Loading cluster data... |
Listener Name | +Status | +Address:Port | +Domains / Filters | +Action | +
---|---|---|---|---|
Loading listener data... |