From 1eafe1456386a64fa17762ebec133554307a656f Mon Sep 17 00:00:00 2001 From: lyt99 Date: Thu, 18 Jun 2020 19:37:03 +0800 Subject: [PATCH 1/7] feature: terway-metric-proxy.yml & terway_grafana_dashboard.json changes: buckets for prometheus & histogram for terway rpc latency --- monitoring/terway-metric-proxy.yml | 70 ++ monitoring/terway_grafana_dashboard.json | 985 +++++++++++++++++++++++ pkg/metric/aliyun.go | 2 +- pkg/metric/rpc.go | 10 +- 4 files changed, 1061 insertions(+), 6 deletions(-) create mode 100644 monitoring/terway-metric-proxy.yml create mode 100644 monitoring/terway_grafana_dashboard.json diff --git a/monitoring/terway-metric-proxy.yml b/monitoring/terway-metric-proxy.yml new file mode 100644 index 00000000..df73d704 --- /dev/null +++ b/monitoring/terway-metric-proxy.yml @@ -0,0 +1,70 @@ +# use this config to expose terway metrics api on node +# and register it to prometheus +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: terway-metric-svcmonitor + # use your namespace where prometheus in + # arms-prom for Aliyun ARMS Prometheus + namespace: arms-prom +spec: + jobLabel: terway-metric + selector: + mathLabels: + app: terway-metric-proxy + namespaceSelector: + matchNames: + - kube-system + endpoints: + - port: metric + path: /metrics + # pull interval + interval: 15s + +--- + +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: terway-metric-proxy + namespace: kube-system +spec: + template: + metadata: + labels: + app: terway-metric-proxy + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + nodeSelector: + beta.kubernetes.io/arch: amd64 + tolerations: + - operator: "Exists" + hostNetwork: true + containers: + - name: proxy + image: alpine/socat + command: [ "socat", "-d", "-d", "TCP4-LISTEN:15432,fork", "UNIX-CONNECT:/var/run/eni/eni_debug.socket" ] + volumeMounts: + - name: terway-metric + mountPath: /var/run/eni/ + volumes: + - name: terway-metric + hostPath: + path: /var/run/eni/ +--- + +apiVersion: v1 +kind: Service +metadata: + name: terway-metric + namespace: kube-system +spec: + selector: + app: terway-metric-proxy + clusterIP: None + ports: + - name: metric + protocol: TCP + port: 15432 + targetPort: 15432 \ No newline at end of file diff --git a/monitoring/terway_grafana_dashboard.json b/monitoring/terway_grafana_dashboard.json new file mode 100644 index 00000000..efdce44b --- /dev/null +++ b/monitoring/terway_grafana_dashboard.json @@ -0,0 +1,985 @@ +{ + "__inputs": [ + { + "name": "TERWAY-PROM-CLUSTER", + "label": "Cluster with Terway & Prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__requires": [ + { + "type": "panel", + "id": "bargauge", + "name": "Bar Gauge", + "version": "" + }, + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "6.4.0-pre" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "links": [], + "panels": [ + { + "datasource": "${TERWAY-PROM-CLUSTER}", + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 0 + }, + "id": 2, + "options": { + "displayMode": "gradient", + "fieldOptions": { + "calcs": [ + "lastNotNull" + ], + "defaults": { + "mappings": [], + "max": 100, + "min": 0, + "thresholds": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ], + "title": "", + "unit": "none" + }, + "override": {}, + "values": false + }, + "orientation": "vertical" + }, + "pluginVersion": "6.4.0-pre", + "targets": [ + { + "expr": "terway_resource_pool_total_count", + "format": "time_series", + "instant": false, + "legendFormat": "{{instance}}({{capacity}})", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Terway Resource Pool Total Count", + "type": "bargauge" + }, + { + "datasource": "${DS_LYT-TEST-CLUTSER-PROM_1018023522129950}", + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 0 + }, + "id": 4, + "options": { + "displayMode": "gradient", + "fieldOptions": { + "calcs": [ + "lastNotNull" + ], + "defaults": { + "mappings": [], + "max": 100, + "min": 0, + "thresholds": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ], + "title": "", + "unit": "none" + }, + "override": {}, + "values": false + }, + "orientation": "vertical" + }, + "pluginVersion": "6.4.0-pre", + "targets": [ + { + "expr": "terway_resource_pool_idle_count", + "format": "time_series", + "instant": false, + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Terway Resource Pool Idle Count", + "type": "bargauge" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_LYT-TEST-CLUTSER-PROM_1018023522129950}", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 9 + }, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "terway_resource_pool_total_count", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Terway Resource Pool Total Count Time Series", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_LYT-TEST-CLUTSER-PROM_1018023522129950}", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 9 + }, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "terway_resource_pool_idle_count", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Terway Resource Pool Idle Count Time Series", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_LYT-TEST-CLUTSER-PROM_1018023522129950}", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 17 + }, + "id": 9, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.95, sum(rate(aliyun_metadata_latency_bucket[5m])) by (le, url))", + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Aliyun Metadata API Latency 95%", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_LYT-TEST-CLUTSER-PROM_1018023522129950}", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 17 + }, + "id": 10, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.95, sum(rate(aliyun_openapi_latency_bucket[5m])) by (le, api))", + "instant": false, + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Aliyun OpenAPI Latency 95%", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_LYT-TEST-CLUTSER-PROM_1018023522129950}", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 25 + }, + "id": 12, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(aliyun_metadata_latency_count) by (error)", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Aliyun Metadata Request Count", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_LYT-TEST-CLUTSER-PROM_1018023522129950}", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 25 + }, + "id": 13, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(aliyun_openapi_latency_count) by (error)", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Aliyun OpenAPI Reqeust Count", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_LYT-TEST-CLUTSER-PROM_1018023522129950}", + "description": "", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 33 + }, + "id": 15, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "terway_resource_pool_disposed_count", + "legendFormat": "{{instance}}({{min_idle}}-{{max_idle}})", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Terway Resource Pool Disposed Count", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "datasource": "${DS_LYT-TEST-CLUTSER-PROM_1018023522129950}", + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 33 + }, + "id": 16, + "options": { + "displayMode": "gradient", + "fieldOptions": { + "calcs": [ + "lastNotNull" + ], + "defaults": { + "mappings": [], + "max": 100, + "min": 0, + "thresholds": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ], + "title": "", + "unit": "none" + }, + "override": {}, + "values": false + }, + "orientation": "vertical" + }, + "pluginVersion": "6.4.0-pre", + "targets": [ + { + "expr": "terway_eniip_factory_eni_count", + "format": "time_series", + "instant": false, + "legendFormat": "{{instance}}({{max_eni}})", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Terway ENIIP Factory ENI Count", + "type": "bargauge" + }, + { + "datasource": "${DS_LYT-TEST-CLUTSER-PROM_1018023522129950}", + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 41 + }, + "id": 17, + "options": { + "displayMode": "gradient", + "fieldOptions": { + "calcs": [ + "lastNotNull" + ], + "defaults": { + "mappings": [], + "max": 100, + "min": 0, + "thresholds": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ], + "title": "", + "unit": "none" + }, + "override": {}, + "values": false + }, + "orientation": "horizontal" + }, + "pluginVersion": "6.4.0-pre", + "targets": [ + { + "expr": "terway_eniip_factory_ip_count", + "format": "time_series", + "instant": false, + "legendFormat": "{{eni}}({{instance}})", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Terway ENIIP Factory IP Count", + "type": "bargauge" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_LYT-TEST-CLUTSER-PROM_1018023522129950}", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 41 + }, + "id": 18, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pluginVersion": "6.4.0-pre", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(terway_eniip_factory_ip_alloc_count) by (status)", + "format": "time_series", + "instant": false, + "legendFormat": "{{status}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Terway ENIIP Factory IP Alloc Count", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "10s", + "schemaVersion": 19, + "style": "light", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "Terway Dashboard", + "uid": "VLtNEKmGz", + "version": 9 +} \ No newline at end of file diff --git a/pkg/metric/aliyun.go b/pkg/metric/aliyun.go index f6efcff6..4997031a 100644 --- a/pkg/metric/aliyun.go +++ b/pkg/metric/aliyun.go @@ -8,7 +8,7 @@ var ( prometheus.HistogramOpts{ Name: "aliyun_openapi_latency", Help: "aliyun openapi latency in ms", - Buckets: prometheus.ExponentialBuckets(500, 2, 10), + Buckets: prometheus.ExponentialBuckets(50, 2, 10), }, []string{"api", "error"}, ) diff --git a/pkg/metric/rpc.go b/pkg/metric/rpc.go index 44dd6af7..6346a0ca 100644 --- a/pkg/metric/rpc.go +++ b/pkg/metric/rpc.go @@ -4,11 +4,11 @@ import "github.com/prometheus/client_golang/prometheus" var ( // RPCLatency terway grpc latency for grpc by cni binary - RPCLatency = prometheus.NewSummaryVec( - prometheus.SummaryOpts{ - Name: "terway_rpc_latency_ms", - Help: "terway rpc latency in ms", - Objectives: map[float64]float64{0.5: 0.05, 0.8: 0.01, 0.95: 0.001}, + RPCLatency = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "terway_rpc_latency", + Help: "terway rpc latency in ms", + Buckets: prometheus.ExponentialBuckets(1, 2, 10), }, []string{"rpc_api", "error"}, ) From 4696c6d286933bdb15bd3bf377ef037785d04bc5 Mon Sep 17 00:00:00 2001 From: lyt99 Date: Thu, 18 Jun 2020 20:05:10 +0800 Subject: [PATCH 2/7] changes: buckets for rpc latency & dashboard --- cli/main.go | 2 +- daemon/daemon.go | 2 - monitoring/terway_grafana_dashboard.json | 144 ++++++++++++++++++----- pkg/metric/aliyun.go | 2 +- pkg/metric/rpc.go | 2 +- 5 files changed, 118 insertions(+), 34 deletions(-) diff --git a/cli/main.go b/cli/main.go index 672d0029..67df137a 100644 --- a/cli/main.go +++ b/cli/main.go @@ -52,7 +52,7 @@ func main() { } // initialize gRPC - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) grpcConn, err := grpc.DialContext(ctx, defaultSocketPath, grpc.WithInsecure(), grpc.WithContextDialer( func(ctx context.Context, s string) (net.Conn, error) { unixAddr, err := net.ResolveUnixAddr("unix", defaultSocketPath) diff --git a/daemon/daemon.go b/daemon/daemon.go index 58eb2485..b32d4a76 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -618,7 +618,6 @@ func (networkService *networkService) Execute(cmd string, _ []string, message ch } func (networkService *networkService) GetResourceMapping() ([]tracing.PodResourceMapping, error) { - log.Println("get network_service resource mapping") var resourceMapping []tracing.ResourceMapping var err error @@ -676,7 +675,6 @@ func (networkService *networkService) GetResourceMapping() ([]tracing.PodResourc mapping[i].Resource = res } - log.Printf("get network_service resource mapping done: %v\n", mapping) return mapping, nil } diff --git a/monitoring/terway_grafana_dashboard.json b/monitoring/terway_grafana_dashboard.json index efdce44b..d9c44369 100644 --- a/monitoring/terway_grafana_dashboard.json +++ b/monitoring/terway_grafana_dashboard.json @@ -107,7 +107,7 @@ "type": "bargauge" }, { - "datasource": "${DS_LYT-TEST-CLUTSER-PROM_1018023522129950}", + "datasource": "${TERWAY-PROM-CLUSTER}", "gridPos": { "h": 9, "w": 12, @@ -163,7 +163,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_LYT-TEST-CLUTSER-PROM_1018023522129950}", + "datasource": "${TERWAY-PROM-CLUSTER}", "fill": 1, "fillGradient": 0, "gridPos": { @@ -249,7 +249,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_LYT-TEST-CLUTSER-PROM_1018023522129950}", + "datasource": "${TERWAY-PROM-CLUSTER}", "fill": 1, "fillGradient": 0, "gridPos": { @@ -335,7 +335,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_LYT-TEST-CLUTSER-PROM_1018023522129950}", + "datasource": "${TERWAY-PROM-CLUSTER}", "fill": 1, "fillGradient": 0, "gridPos": { @@ -421,7 +421,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_LYT-TEST-CLUTSER-PROM_1018023522129950}", + "datasource": "${TERWAY-PROM-CLUSTER}", "fill": 1, "fillGradient": 0, "gridPos": { @@ -507,7 +507,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_LYT-TEST-CLUTSER-PROM_1018023522129950}", + "datasource": "${TERWAY-PROM-CLUSTER}", "fill": 1, "fillGradient": 0, "gridPos": { @@ -592,7 +592,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_LYT-TEST-CLUTSER-PROM_1018023522129950}", + "datasource": "${TERWAY-PROM-CLUSTER}", "fill": 1, "fillGradient": 0, "gridPos": { @@ -601,7 +601,7 @@ "x": 12, "y": 25 }, - "id": 13, + "id": 20, "legend": { "avg": false, "current": false, @@ -618,6 +618,7 @@ "dataLinks": [] }, "percentage": false, + "pluginVersion": "6.4.0-pre", "pointradius": 2, "points": false, "renderer": "flot", @@ -627,7 +628,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(aliyun_openapi_latency_count) by (error)", + "expr": "histogram_quantile(0.95, sum(rate(terway_rpc_latency_bucket[5m])) by (le, rpc_api, error))", "refId": "A" } ], @@ -635,7 +636,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Aliyun OpenAPI Reqeust Count", + "title": "Terway CNI RPC Latency", "tooltip": { "shared": true, "sort": 0, @@ -677,7 +678,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_LYT-TEST-CLUTSER-PROM_1018023522129950}", + "datasource": "${TERWAY-PROM-CLUSTER}", "description": "", "fill": 1, "fillGradient": 0, @@ -760,14 +761,99 @@ } }, { - "datasource": "${DS_LYT-TEST-CLUTSER-PROM_1018023522129950}", + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${TERWAY-PROM-CLUSTER}", + "fill": 1, + "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 12, "y": 33 }, - "id": 16, + "id": 13, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(aliyun_openapi_latency_count) by (error)", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Aliyun OpenAPI Reqeust Count", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "datasource": "${TERWAY-PROM-CLUSTER}", + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 41 + }, + "id": 17, "options": { "displayMode": "gradient", "fieldOptions": { @@ -794,32 +880,32 @@ "override": {}, "values": false }, - "orientation": "vertical" + "orientation": "horizontal" }, "pluginVersion": "6.4.0-pre", "targets": [ { - "expr": "terway_eniip_factory_eni_count", + "expr": "terway_eniip_factory_ip_count", "format": "time_series", "instant": false, - "legendFormat": "{{instance}}({{max_eni}})", + "legendFormat": "{{eni}}({{instance}})", "refId": "A" } ], "timeFrom": null, "timeShift": null, - "title": "Terway ENIIP Factory ENI Count", + "title": "Terway ENIIP Factory IP Count", "type": "bargauge" }, { - "datasource": "${DS_LYT-TEST-CLUTSER-PROM_1018023522129950}", + "datasource": "${TERWAY-PROM-CLUSTER}", "gridPos": { "h": 8, "w": 12, - "x": 0, + "x": 12, "y": 41 }, - "id": 17, + "id": 16, "options": { "displayMode": "gradient", "fieldOptions": { @@ -846,21 +932,21 @@ "override": {}, "values": false }, - "orientation": "horizontal" + "orientation": "vertical" }, "pluginVersion": "6.4.0-pre", "targets": [ { - "expr": "terway_eniip_factory_ip_count", + "expr": "terway_eniip_factory_eni_count", "format": "time_series", "instant": false, - "legendFormat": "{{eni}}({{instance}})", + "legendFormat": "{{instance}}({{max_eni}})", "refId": "A" } ], "timeFrom": null, "timeShift": null, - "title": "Terway ENIIP Factory IP Count", + "title": "Terway ENIIP Factory ENI Count", "type": "bargauge" }, { @@ -868,14 +954,14 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_LYT-TEST-CLUTSER-PROM_1018023522129950}", + "datasource": "${TERWAY-PROM-CLUSTER}", "fill": 1, "fillGradient": 0, "gridPos": { "h": 8, "w": 12, - "x": 12, - "y": 41 + "x": 0, + "y": 49 }, "id": 18, "legend": { @@ -961,7 +1047,7 @@ "list": [] }, "time": { - "from": "now-15m", + "from": "now-5m", "to": "now" }, "timepicker": { @@ -981,5 +1067,5 @@ "timezone": "", "title": "Terway Dashboard", "uid": "VLtNEKmGz", - "version": 9 + "version": 12 } \ No newline at end of file diff --git a/pkg/metric/aliyun.go b/pkg/metric/aliyun.go index 4997031a..b2fec491 100644 --- a/pkg/metric/aliyun.go +++ b/pkg/metric/aliyun.go @@ -8,7 +8,7 @@ var ( prometheus.HistogramOpts{ Name: "aliyun_openapi_latency", Help: "aliyun openapi latency in ms", - Buckets: prometheus.ExponentialBuckets(50, 2, 10), + Buckets: []float64{50, 100, 200, 400, 800, 1600, 3200, 6400, 12800, 13800, 14800, 16800, 20800, 28800, 44800}, }, []string{"api", "error"}, ) diff --git a/pkg/metric/rpc.go b/pkg/metric/rpc.go index 6346a0ca..29415b5a 100644 --- a/pkg/metric/rpc.go +++ b/pkg/metric/rpc.go @@ -8,7 +8,7 @@ var ( prometheus.HistogramOpts{ Name: "terway_rpc_latency", Help: "terway rpc latency in ms", - Buckets: prometheus.ExponentialBuckets(1, 2, 10), + Buckets: []float64{50, 100, 200, 400, 800, 1600, 3200, 6400, 12800, 25600, 26600, 27600, 29600, 33600, 41600, 57600, 89600, 110000, 120000}, }, []string{"rpc_api", "error"}, ) From 26432ba2b3ebe5ec9731a65ca168f1c3194fbfca Mon Sep 17 00:00:00 2001 From: lyt99 Date: Mon, 22 Jun 2020 15:13:48 +0800 Subject: [PATCH 3/7] feature: add event recorder --- Gopkg.lock | 25 +- daemon/daemon.go | 28 + daemon/k8s.go | 64 +- plugin/terway/cni.go | 20 + rpc/rpc.pb.go | 1792 ++++++-------- rpc/rpc.proto | 26 + vendor/github.com/golang/groupcache/LICENSE | 191 ++ .../github.com/golang/groupcache/lru/lru.go | 133 + .../apimachinery/pkg/util/mergepatch/BUILD | 39 + .../apimachinery/pkg/util/mergepatch/OWNERS | 5 + .../pkg/util/mergepatch/errors.go | 102 + .../apimachinery/pkg/util/mergepatch/util.go | 133 + .../pkg/util/strategicpatch/BUILD | 60 + .../pkg/util/strategicpatch/OWNERS | 5 + .../pkg/util/strategicpatch/errors.go | 49 + .../pkg/util/strategicpatch/meta.go | 194 ++ .../pkg/util/strategicpatch/patch.go | 2151 +++++++++++++++++ .../pkg/util/strategicpatch/types.go | 193 ++ .../third_party/forked/golang/json/BUILD | 32 + .../third_party/forked/golang/json/OWNERS | 5 + .../third_party/forked/golang/json/fields.go | 513 ++++ vendor/k8s.io/client-go/tools/record/OWNERS | 27 + vendor/k8s.io/client-go/tools/record/doc.go | 18 + vendor/k8s.io/client-go/tools/record/event.go | 318 +++ .../client-go/tools/record/events_cache.go | 467 ++++ vendor/k8s.io/client-go/tools/record/fake.go | 54 + vendor/k8s.io/kube-openapi/LICENSE | 202 ++ .../k8s.io/kube-openapi/pkg/util/proto/doc.go | 19 + .../kube-openapi/pkg/util/proto/document.go | 285 +++ .../kube-openapi/pkg/util/proto/openapi.go | 276 +++ 30 files changed, 6429 insertions(+), 997 deletions(-) create mode 100644 vendor/github.com/golang/groupcache/LICENSE create mode 100644 vendor/github.com/golang/groupcache/lru/lru.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/mergepatch/BUILD create mode 100644 vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/BUILD create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go create mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/json/BUILD create mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS create mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go create mode 100755 vendor/k8s.io/client-go/tools/record/OWNERS create mode 100644 vendor/k8s.io/client-go/tools/record/doc.go create mode 100644 vendor/k8s.io/client-go/tools/record/event.go create mode 100644 vendor/k8s.io/client-go/tools/record/events_cache.go create mode 100644 vendor/k8s.io/client-go/tools/record/fake.go create mode 100644 vendor/k8s.io/kube-openapi/LICENSE create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/document.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go diff --git a/Gopkg.lock b/Gopkg.lock index 460657f0..2b639f01 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -183,6 +183,14 @@ pruneopts = "UT" revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" +[[projects]] + branch = "master" + digest = "1:b7cb6054d3dff43b38ad2e92492f220f57ae6087ee797dca298139776749ace8" + name = "github.com/golang/groupcache" + packages = ["lru"] + pruneopts = "UT" + revision = "8c9f03a8e57eb486e42badaed3fb287da51807ba" + [[projects]] digest = "1:4a32eb57407190eced21a21abee9ce4d4ab6f0bf113ca61cb1cb2d549a65c985" name = "github.com/golang/protobuf" @@ -615,7 +623,7 @@ revision = "c89978d5f86d7427bef2fc7752732c8c60b1d188" [[projects]] - digest = "1:5306fa4e911383bc8540148123e6a81aea1786dbf592ac051fd572fdd8a173b1" + digest = "1:50d6b10125570b7983978820008baca8069ec867d9808a2318243a787ab77b30" name = "k8s.io/apimachinery" packages = [ "pkg/api/errors", @@ -643,15 +651,18 @@ "pkg/util/framer", "pkg/util/intstr", "pkg/util/json", + "pkg/util/mergepatch", "pkg/util/net", "pkg/util/runtime", "pkg/util/sets", + "pkg/util/strategicpatch", "pkg/util/validation", "pkg/util/validation/field", "pkg/util/wait", "pkg/util/yaml", "pkg/version", "pkg/watch", + "third_party/forked/golang/json", "third_party/forked/golang/reflect", ] pruneopts = "UT" @@ -659,7 +670,7 @@ version = "kubernetes-1.10.2" [[projects]] - digest = "1:6001983c1d8ee92c42439d07ec7e17c4bbc6136e4fd7061b7e31c964e9dcc8c0" + digest = "1:646b63ad0c02e84e452417e8ad8b6369f94afe58079d3afeef06d2d78d9c9ed8" name = "k8s.io/client-go" packages = [ "discovery", @@ -705,6 +716,7 @@ "tools/clientcmd/api/latest", "tools/clientcmd/api/v1", "tools/metrics", + "tools/record", "tools/reference", "transport", "util/cert", @@ -716,6 +728,14 @@ revision = "33f2870a2b83179c823ddc90e5513f9e5fe43b38" version = "kubernetes-1.10.2" +[[projects]] + branch = "feature-serverside-apply" + digest = "1:e0d6dcb28c42a53c7243bb6380badd17f92fbd8488a075a07e984f91a07c0d23" + name = "k8s.io/kube-openapi" + packages = ["pkg/util/proto"] + pruneopts = "UT" + revision = "f442ecb314a3679150c272e2b9713d8deed5955d" + [[projects]] digest = "1:6a15e7ea08b2ddf329857d6611bbc7b17801d34175b0d93088ae1aede3f47872" name = "k8s.io/kubernetes" @@ -768,6 +788,7 @@ "k8s.io/apimachinery/pkg/util/wait", "k8s.io/client-go/kubernetes", "k8s.io/client-go/tools/clientcmd", + "k8s.io/client-go/tools/record", "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1", ] solver-name = "gps-cdcl" diff --git a/daemon/daemon.go b/daemon/daemon.go index b32d4a76..de97162a 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -475,6 +475,34 @@ func (networkService *networkService) GetIPInfo(ctx context.Context, r *rpc.GetI } } +func (networkService *networkService) RecordEvent(_ context.Context, r *rpc.EventRequest) (*rpc.EventReply, error) { + eventType := eventTypeNormal + if r.EventType == rpc.EventType_EventTypeWarning { + eventType = eventTypeWarning + } + + reply := &rpc.EventReply{ + Succeed: true, + Error: "", + } + + if r.EventTarget == rpc.EventTarget_EventTargetNode { // Node + networkService.k8s.RecordNodeEvent(eventType, r.Reason, r.Message) + return reply, nil + } + + // Pod + err := networkService.k8s.RecordPodEvent(r.K8SPodName, r.K8SPodNamespace, eventType, r.Reason, r.Message) + if err != nil { + reply.Succeed = false + reply.Error = err.Error() + + return reply, err + } + + return reply, nil +} + func (networkService *networkService) verifyPodNetworkType(podNetworkMode string) bool { return (networkService.daemonMode == daemonModeVPC && //vpc (podNetworkMode == podNetworkTypeVPCENI || podNetworkMode == podNetworkTypeVPCIP)) || diff --git a/daemon/k8s.go b/daemon/k8s.go index b247a821..733fa30a 100644 --- a/daemon/k8s.go +++ b/daemon/k8s.go @@ -12,6 +12,8 @@ import ( "time" "unicode" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/clientcmd" "github.com/AliyunContainerService/terway/deviceplugin" @@ -24,6 +26,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/client-go/kubernetes" + typedv1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/record" ) const ( @@ -35,6 +39,9 @@ const ( apiServerTimeout = 70 * time.Second apiServerReconnectThrottle = 2 * time.Minute + + eventTypeNormal = corev1.EventTypeNormal + eventTypeWarning = corev1.EventTypeWarning ) type podInfo struct { @@ -56,14 +63,19 @@ type Kubernetes interface { GetServiceCidr() *net.IPNet GetNodeCidr() *net.IPNet SetNodeAllocatablePod(count int) error + RecordNodeEvent(eventType, reason, message string) + RecordPodEvent(podName, podNamespace, eventType, reason, message string) error } type k8s struct { client kubernetes.Interface storage storage.Storage + broadcaster record.EventBroadcaster + recorder record.EventRecorder mode string nodeName string nodeCidr *net.IPNet + node *corev1.Node svcCidr *net.IPNet apiConn *connTracker apiConnTime time.Time @@ -94,6 +106,13 @@ func newK8S(master, kubeconfig string, svcCidr *net.IPNet, daemonMode string) (K return nil, errors.Wrap(err, "failed getting node name") } + node, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{ + ResourceVersion: "0", + }) + if err != nil { + return nil, errors.Wrap(err, "failed getting node") + } + if svcCidr == nil { svcCidr, err = serviceCidrFromAPIServer(client) if err != nil { @@ -114,18 +133,29 @@ func newK8S(master, kubeconfig string, svcCidr *net.IPNet, daemonMode string) (K return nil, errors.Wrapf(err, "failed init db storage with path %s and bucket %s", dbPath, dbName) } + broadcaster := record.NewBroadcaster() + source := corev1.EventSource{Component: "terway-daemon"} + recorder := broadcaster.NewRecorder(scheme.Scheme, source) + + sink := &typedv1.EventSinkImpl{ + Interface: typedv1.New(client.CoreV1().RESTClient()).Events(""), + } + broadcaster.StartRecordingToSink(sink) + k8sObj := &k8s{ client: client, mode: daemonMode, + node: node, nodeName: nodeName, nodeCidr: nodeCidr, svcCidr: svcCidr, storage: storage, apiConn: t, + broadcaster: broadcaster, + recorder: recorder, apiConnTime: time.Now(), Locker: &sync.RWMutex{}, } - go func() { for range time.Tick(storageCleanPeriod) { err := k8sObj.clean() @@ -478,6 +508,38 @@ func (k *k8s) reconnectOnTimeoutError(err error) { } } +func (k *k8s) RecordNodeEvent(eventType, reason, message string) { + ref := &corev1.ObjectReference{ + Kind: "Node", + Name: k.node.Name, + UID: k.node.UID, + Namespace: "", + } + + k.recorder.Event(ref, eventType, reason, message) +} + +func (k *k8s) RecordPodEvent(podName, podNamespace, eventType, reason, message string) error { + pod, err := k.client.CoreV1().Pods(podNamespace).Get(podName, metav1.GetOptions{ + ResourceVersion: "0", + }) + + if err != nil { + k.reconnectOnTimeoutError(err) + return err + } + + ref := &corev1.ObjectReference{ + Kind: "Pod", + Name: pod.Name, + UID: pod.UID, + Namespace: pod.Namespace, + } + + k.recorder.Event(ref, eventType, reason, message) + return nil +} + // connTracker is a dialer that tracks all open connections it creates. type connTracker struct { dialer *net.Dialer diff --git a/plugin/terway/cni.go b/plugin/terway/cni.go index d777f3f0..89786e23 100644 --- a/plugin/terway/cni.go +++ b/plugin/terway/cni.go @@ -155,6 +155,16 @@ func cmdAdd(args *skel.CmdArgs) (err error) { IPType: allocResult.IPType, Reason: fmt.Sprintf("roll back ip for error: %v", err), }) + + _, err = terwayBackendClient.RecordEvent(context.Background(), + &rpc.EventRequest{ + EventTarget: rpc.EventTarget_EventTargetPod, + K8SPodName: string(k8sConfig.K8S_POD_NAME), + K8SPodNamespace: string(k8sConfig.K8S_POD_NAMESPACE), + EventType: rpc.EventType_EventTypeWarning, + Reason: "AllocIPFailed", + Message: err.Error(), + }) } }() @@ -349,6 +359,16 @@ func cmdAdd(args *skel.CmdArgs) (err error) { }}, } + _, _ = terwayBackendClient.RecordEvent(context.Background(), + &rpc.EventRequest{ + EventTarget: rpc.EventTarget_EventTargetPod, + K8SPodName: string(k8sConfig.K8S_POD_NAME), + K8SPodNamespace: string(k8sConfig.K8S_POD_NAMESPACE), + EventType: rpc.EventType_EventTypeNormal, + Reason: "AllocIPSucceed", + Message: fmt.Sprintf("Alloc IP %s for Pod", allocatedIPAddr.String()), + }) + return types.PrintResult(result, confVersion) } diff --git a/rpc/rpc.pb.go b/rpc/rpc.pb.go index 025ac25b..ca0291bc 100644 --- a/rpc/rpc.pb.go +++ b/rpc/rpc.pb.go @@ -1,34 +1,29 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.23.0 -// protoc v3.12.3 // source: rpc.proto package rpc import ( context "context" - reflect "reflect" - sync "sync" + fmt "fmt" + math "math" proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type IPType int32 @@ -39,638 +34,568 @@ const ( IPType_TypeENIMultiIP IPType = 3 ) -// Enum value maps for IPType. -var ( - IPType_name = map[int32]string{ - 0: "TypeVPCIP", - 1: "TypeVPCENI", - 2: "TypeManagedK8S", - 3: "TypeENIMultiIP", - } - IPType_value = map[string]int32{ - "TypeVPCIP": 0, - "TypeVPCENI": 1, - "TypeManagedK8S": 2, - "TypeENIMultiIP": 3, - } -) +var IPType_name = map[int32]string{ + 0: "TypeVPCIP", + 1: "TypeVPCENI", + 2: "TypeManagedK8S", + 3: "TypeENIMultiIP", +} -func (x IPType) Enum() *IPType { - p := new(IPType) - *p = x - return p +var IPType_value = map[string]int32{ + "TypeVPCIP": 0, + "TypeVPCENI": 1, + "TypeManagedK8S": 2, + "TypeENIMultiIP": 3, } func (x IPType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) + return proto.EnumName(IPType_name, int32(x)) } -func (IPType) Descriptor() protoreflect.EnumDescriptor { - return file_rpc_proto_enumTypes[0].Descriptor() +func (IPType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{0} } -func (IPType) Type() protoreflect.EnumType { - return &file_rpc_proto_enumTypes[0] -} +type EventTarget int32 -func (x IPType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) +const ( + EventTarget_EventTargetNode EventTarget = 0 + EventTarget_EventTargetPod EventTarget = 1 +) + +var EventTarget_name = map[int32]string{ + 0: "EventTargetNode", + 1: "EventTargetPod", } -// Deprecated: Use IPType.Descriptor instead. -func (IPType) EnumDescriptor() ([]byte, []int) { - return file_rpc_proto_rawDescGZIP(), []int{0} +var EventTarget_value = map[string]int32{ + "EventTargetNode": 0, + "EventTargetPod": 1, } -type AllocIPRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x EventTarget) String() string { + return proto.EnumName(EventTarget_name, int32(x)) +} - K8SPodName string `protobuf:"bytes,1,opt,name=K8sPodName,proto3" json:"K8sPodName,omitempty"` - K8SPodNamespace string `protobuf:"bytes,2,opt,name=K8sPodNamespace,proto3" json:"K8sPodNamespace,omitempty"` - K8SPodInfraContainerId string `protobuf:"bytes,3,opt,name=K8sPodInfraContainerId,proto3" json:"K8sPodInfraContainerId,omitempty"` - Netns string `protobuf:"bytes,4,opt,name=Netns,proto3" json:"Netns,omitempty"` - IfName string `protobuf:"bytes,5,opt,name=IfName,proto3" json:"IfName,omitempty"` +func (EventTarget) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{1} } -func (x *AllocIPRequest) Reset() { - *x = AllocIPRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_rpc_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +type EventType int32 + +const ( + EventType_EventTypeNormal EventType = 0 + EventType_EventTypeWarning EventType = 1 +) + +var EventType_name = map[int32]string{ + 0: "EventTypeNormal", + 1: "EventTypeWarning", } -func (x *AllocIPRequest) String() string { - return protoimpl.X.MessageStringOf(x) +var EventType_value = map[string]int32{ + "EventTypeNormal": 0, + "EventTypeWarning": 1, } -func (*AllocIPRequest) ProtoMessage() {} +func (x EventType) String() string { + return proto.EnumName(EventType_name, int32(x)) +} -func (x *AllocIPRequest) ProtoReflect() protoreflect.Message { - mi := &file_rpc_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) +func (EventType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{2} } -// Deprecated: Use AllocIPRequest.ProtoReflect.Descriptor instead. +type AllocIPRequest struct { + K8SPodName string `protobuf:"bytes,1,opt,name=K8sPodName,proto3" json:"K8sPodName,omitempty"` + K8SPodNamespace string `protobuf:"bytes,2,opt,name=K8sPodNamespace,proto3" json:"K8sPodNamespace,omitempty"` + K8SPodInfraContainerId string `protobuf:"bytes,3,opt,name=K8sPodInfraContainerId,proto3" json:"K8sPodInfraContainerId,omitempty"` + Netns string `protobuf:"bytes,4,opt,name=Netns,proto3" json:"Netns,omitempty"` + IfName string `protobuf:"bytes,5,opt,name=IfName,proto3" json:"IfName,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AllocIPRequest) Reset() { *m = AllocIPRequest{} } +func (m *AllocIPRequest) String() string { return proto.CompactTextString(m) } +func (*AllocIPRequest) ProtoMessage() {} func (*AllocIPRequest) Descriptor() ([]byte, []int) { - return file_rpc_proto_rawDescGZIP(), []int{0} + return fileDescriptor_77a6da22d6a3feb1, []int{0} +} + +func (m *AllocIPRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AllocIPRequest.Unmarshal(m, b) +} +func (m *AllocIPRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AllocIPRequest.Marshal(b, m, deterministic) +} +func (m *AllocIPRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllocIPRequest.Merge(m, src) +} +func (m *AllocIPRequest) XXX_Size() int { + return xxx_messageInfo_AllocIPRequest.Size(m) +} +func (m *AllocIPRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AllocIPRequest.DiscardUnknown(m) } -func (x *AllocIPRequest) GetK8SPodName() string { - if x != nil { - return x.K8SPodName +var xxx_messageInfo_AllocIPRequest proto.InternalMessageInfo + +func (m *AllocIPRequest) GetK8SPodName() string { + if m != nil { + return m.K8SPodName } return "" } -func (x *AllocIPRequest) GetK8SPodNamespace() string { - if x != nil { - return x.K8SPodNamespace +func (m *AllocIPRequest) GetK8SPodNamespace() string { + if m != nil { + return m.K8SPodNamespace } return "" } -func (x *AllocIPRequest) GetK8SPodInfraContainerId() string { - if x != nil { - return x.K8SPodInfraContainerId +func (m *AllocIPRequest) GetK8SPodInfraContainerId() string { + if m != nil { + return m.K8SPodInfraContainerId } return "" } -func (x *AllocIPRequest) GetNetns() string { - if x != nil { - return x.Netns +func (m *AllocIPRequest) GetNetns() string { + if m != nil { + return m.Netns } return "" } -func (x *AllocIPRequest) GetIfName() string { - if x != nil { - return x.IfName +func (m *AllocIPRequest) GetIfName() string { + if m != nil { + return m.IfName } return "" } // VETH Basic type Pod struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Ingress uint64 `protobuf:"varint,1,opt,name=Ingress,proto3" json:"Ingress,omitempty"` - Egress uint64 `protobuf:"varint,2,opt,name=Egress,proto3" json:"Egress,omitempty"` + Ingress uint64 `protobuf:"varint,1,opt,name=Ingress,proto3" json:"Ingress,omitempty"` + Egress uint64 `protobuf:"varint,2,opt,name=Egress,proto3" json:"Egress,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (x *Pod) Reset() { - *x = Pod{} - if protoimpl.UnsafeEnabled { - mi := &file_rpc_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (m *Pod) Reset() { *m = Pod{} } +func (m *Pod) String() string { return proto.CompactTextString(m) } +func (*Pod) ProtoMessage() {} +func (*Pod) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{1} } -func (x *Pod) String() string { - return protoimpl.X.MessageStringOf(x) +func (m *Pod) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Pod.Unmarshal(m, b) } - -func (*Pod) ProtoMessage() {} - -func (x *Pod) ProtoReflect() protoreflect.Message { - mi := &file_rpc_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) +func (m *Pod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Pod.Marshal(b, m, deterministic) } - -// Deprecated: Use Pod.ProtoReflect.Descriptor instead. -func (*Pod) Descriptor() ([]byte, []int) { - return file_rpc_proto_rawDescGZIP(), []int{1} +func (m *Pod) XXX_Merge(src proto.Message) { + xxx_messageInfo_Pod.Merge(m, src) +} +func (m *Pod) XXX_Size() int { + return xxx_messageInfo_Pod.Size(m) } +func (m *Pod) XXX_DiscardUnknown() { + xxx_messageInfo_Pod.DiscardUnknown(m) +} + +var xxx_messageInfo_Pod proto.InternalMessageInfo -func (x *Pod) GetIngress() uint64 { - if x != nil { - return x.Ingress +func (m *Pod) GetIngress() uint64 { + if m != nil { + return m.Ingress } return 0 } -func (x *Pod) GetEgress() uint64 { - if x != nil { - return x.Egress +func (m *Pod) GetEgress() uint64 { + if m != nil { + return m.Egress } return 0 } // VPC route veth type VPCIP struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PodConfig *Pod `protobuf:"bytes,1,opt,name=PodConfig,proto3" json:"PodConfig,omitempty"` - NodeCidr string `protobuf:"bytes,2,opt,name=NodeCidr,proto3" json:"NodeCidr,omitempty"` + PodConfig *Pod `protobuf:"bytes,1,opt,name=PodConfig,proto3" json:"PodConfig,omitempty"` + NodeCidr string `protobuf:"bytes,2,opt,name=NodeCidr,proto3" json:"NodeCidr,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (x *VPCIP) Reset() { - *x = VPCIP{} - if protoimpl.UnsafeEnabled { - mi := &file_rpc_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (m *VPCIP) Reset() { *m = VPCIP{} } +func (m *VPCIP) String() string { return proto.CompactTextString(m) } +func (*VPCIP) ProtoMessage() {} +func (*VPCIP) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{2} } -func (x *VPCIP) String() string { - return protoimpl.X.MessageStringOf(x) +func (m *VPCIP) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VPCIP.Unmarshal(m, b) } - -func (*VPCIP) ProtoMessage() {} - -func (x *VPCIP) ProtoReflect() protoreflect.Message { - mi := &file_rpc_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) +func (m *VPCIP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VPCIP.Marshal(b, m, deterministic) } - -// Deprecated: Use VPCIP.ProtoReflect.Descriptor instead. -func (*VPCIP) Descriptor() ([]byte, []int) { - return file_rpc_proto_rawDescGZIP(), []int{2} +func (m *VPCIP) XXX_Merge(src proto.Message) { + xxx_messageInfo_VPCIP.Merge(m, src) +} +func (m *VPCIP) XXX_Size() int { + return xxx_messageInfo_VPCIP.Size(m) } +func (m *VPCIP) XXX_DiscardUnknown() { + xxx_messageInfo_VPCIP.DiscardUnknown(m) +} + +var xxx_messageInfo_VPCIP proto.InternalMessageInfo -func (x *VPCIP) GetPodConfig() *Pod { - if x != nil { - return x.PodConfig +func (m *VPCIP) GetPodConfig() *Pod { + if m != nil { + return m.PodConfig } return nil } -func (x *VPCIP) GetNodeCidr() string { - if x != nil { - return x.NodeCidr +func (m *VPCIP) GetNodeCidr() string { + if m != nil { + return m.NodeCidr } return "" } // ENI Basic type ENI struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - IPv4Addr string `protobuf:"bytes,1,opt,name=IPv4Addr,proto3" json:"IPv4Addr,omitempty"` - IPv4Subnet string `protobuf:"bytes,2,opt,name=IPv4Subnet,proto3" json:"IPv4Subnet,omitempty"` - MacAddr string `protobuf:"bytes,3,opt,name=MacAddr,proto3" json:"MacAddr,omitempty"` - Gateway string `protobuf:"bytes,4,opt,name=Gateway,proto3" json:"Gateway,omitempty"` - DeviceNumber int32 `protobuf:"varint,5,opt,name=DeviceNumber,proto3" json:"DeviceNumber,omitempty"` - PrimaryIPv4Addr string `protobuf:"bytes,6,opt,name=PrimaryIPv4Addr,proto3" json:"PrimaryIPv4Addr,omitempty"` + IPv4Addr string `protobuf:"bytes,1,opt,name=IPv4Addr,proto3" json:"IPv4Addr,omitempty"` + IPv4Subnet string `protobuf:"bytes,2,opt,name=IPv4Subnet,proto3" json:"IPv4Subnet,omitempty"` + MacAddr string `protobuf:"bytes,3,opt,name=MacAddr,proto3" json:"MacAddr,omitempty"` + Gateway string `protobuf:"bytes,4,opt,name=Gateway,proto3" json:"Gateway,omitempty"` + DeviceNumber int32 `protobuf:"varint,5,opt,name=DeviceNumber,proto3" json:"DeviceNumber,omitempty"` + PrimaryIPv4Addr string `protobuf:"bytes,6,opt,name=PrimaryIPv4Addr,proto3" json:"PrimaryIPv4Addr,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ENI) Reset() { *m = ENI{} } +func (m *ENI) String() string { return proto.CompactTextString(m) } +func (*ENI) ProtoMessage() {} +func (*ENI) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{3} } -func (x *ENI) Reset() { - *x = ENI{} - if protoimpl.UnsafeEnabled { - mi := &file_rpc_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (m *ENI) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ENI.Unmarshal(m, b) } - -func (x *ENI) String() string { - return protoimpl.X.MessageStringOf(x) +func (m *ENI) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ENI.Marshal(b, m, deterministic) } - -func (*ENI) ProtoMessage() {} - -func (x *ENI) ProtoReflect() protoreflect.Message { - mi := &file_rpc_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) +func (m *ENI) XXX_Merge(src proto.Message) { + xxx_messageInfo_ENI.Merge(m, src) } - -// Deprecated: Use ENI.ProtoReflect.Descriptor instead. -func (*ENI) Descriptor() ([]byte, []int) { - return file_rpc_proto_rawDescGZIP(), []int{3} +func (m *ENI) XXX_Size() int { + return xxx_messageInfo_ENI.Size(m) +} +func (m *ENI) XXX_DiscardUnknown() { + xxx_messageInfo_ENI.DiscardUnknown(m) } -func (x *ENI) GetIPv4Addr() string { - if x != nil { - return x.IPv4Addr +var xxx_messageInfo_ENI proto.InternalMessageInfo + +func (m *ENI) GetIPv4Addr() string { + if m != nil { + return m.IPv4Addr } return "" } -func (x *ENI) GetIPv4Subnet() string { - if x != nil { - return x.IPv4Subnet +func (m *ENI) GetIPv4Subnet() string { + if m != nil { + return m.IPv4Subnet } return "" } -func (x *ENI) GetMacAddr() string { - if x != nil { - return x.MacAddr +func (m *ENI) GetMacAddr() string { + if m != nil { + return m.MacAddr } return "" } -func (x *ENI) GetGateway() string { - if x != nil { - return x.Gateway +func (m *ENI) GetGateway() string { + if m != nil { + return m.Gateway } return "" } -func (x *ENI) GetDeviceNumber() int32 { - if x != nil { - return x.DeviceNumber +func (m *ENI) GetDeviceNumber() int32 { + if m != nil { + return m.DeviceNumber } return 0 } -func (x *ENI) GetPrimaryIPv4Addr() string { - if x != nil { - return x.PrimaryIPv4Addr +func (m *ENI) GetPrimaryIPv4Addr() string { + if m != nil { + return m.PrimaryIPv4Addr } return "" } // Dedicated ENI type VPCENI struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - EniConfig *ENI `protobuf:"bytes,1,opt,name=EniConfig,proto3" json:"EniConfig,omitempty"` - PodConfig *Pod `protobuf:"bytes,2,opt,name=PodConfig,proto3" json:"PodConfig,omitempty"` - ServiceCidr string `protobuf:"bytes,3,opt,name=ServiceCidr,proto3" json:"ServiceCidr,omitempty"` + EniConfig *ENI `protobuf:"bytes,1,opt,name=EniConfig,proto3" json:"EniConfig,omitempty"` + PodConfig *Pod `protobuf:"bytes,2,opt,name=PodConfig,proto3" json:"PodConfig,omitempty"` + ServiceCidr string `protobuf:"bytes,3,opt,name=ServiceCidr,proto3" json:"ServiceCidr,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (x *VPCENI) Reset() { - *x = VPCENI{} - if protoimpl.UnsafeEnabled { - mi := &file_rpc_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (m *VPCENI) Reset() { *m = VPCENI{} } +func (m *VPCENI) String() string { return proto.CompactTextString(m) } +func (*VPCENI) ProtoMessage() {} +func (*VPCENI) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{4} } -func (x *VPCENI) String() string { - return protoimpl.X.MessageStringOf(x) +func (m *VPCENI) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VPCENI.Unmarshal(m, b) } - -func (*VPCENI) ProtoMessage() {} - -func (x *VPCENI) ProtoReflect() protoreflect.Message { - mi := &file_rpc_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) +func (m *VPCENI) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VPCENI.Marshal(b, m, deterministic) } - -// Deprecated: Use VPCENI.ProtoReflect.Descriptor instead. -func (*VPCENI) Descriptor() ([]byte, []int) { - return file_rpc_proto_rawDescGZIP(), []int{4} +func (m *VPCENI) XXX_Merge(src proto.Message) { + xxx_messageInfo_VPCENI.Merge(m, src) +} +func (m *VPCENI) XXX_Size() int { + return xxx_messageInfo_VPCENI.Size(m) } +func (m *VPCENI) XXX_DiscardUnknown() { + xxx_messageInfo_VPCENI.DiscardUnknown(m) +} + +var xxx_messageInfo_VPCENI proto.InternalMessageInfo -func (x *VPCENI) GetEniConfig() *ENI { - if x != nil { - return x.EniConfig +func (m *VPCENI) GetEniConfig() *ENI { + if m != nil { + return m.EniConfig } return nil } -func (x *VPCENI) GetPodConfig() *Pod { - if x != nil { - return x.PodConfig +func (m *VPCENI) GetPodConfig() *Pod { + if m != nil { + return m.PodConfig } return nil } -func (x *VPCENI) GetServiceCidr() string { - if x != nil { - return x.ServiceCidr +func (m *VPCENI) GetServiceCidr() string { + if m != nil { + return m.ServiceCidr } return "" } // Managed k8s ENI type ManagedK8SENI struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - EniConfig *ENI `protobuf:"bytes,1,opt,name=EniConfig,proto3" json:"EniConfig,omitempty"` - PodConfig *Pod `protobuf:"bytes,2,opt,name=PodConfig,proto3" json:"PodConfig,omitempty"` - PodCidr string `protobuf:"bytes,3,opt,name=PodCidr,proto3" json:"PodCidr,omitempty"` - VpcCidr string `protobuf:"bytes,4,opt,name=VpcCidr,proto3" json:"VpcCidr,omitempty"` - NodeCidr string `protobuf:"bytes,5,opt,name=NodeCidr,proto3" json:"NodeCidr,omitempty"` - ServiceCidr string `protobuf:"bytes,6,opt,name=ServiceCidr,proto3" json:"ServiceCidr,omitempty"` + EniConfig *ENI `protobuf:"bytes,1,opt,name=EniConfig,proto3" json:"EniConfig,omitempty"` + PodConfig *Pod `protobuf:"bytes,2,opt,name=PodConfig,proto3" json:"PodConfig,omitempty"` + PodCidr string `protobuf:"bytes,3,opt,name=PodCidr,proto3" json:"PodCidr,omitempty"` + VpcCidr string `protobuf:"bytes,4,opt,name=VpcCidr,proto3" json:"VpcCidr,omitempty"` + NodeCidr string `protobuf:"bytes,5,opt,name=NodeCidr,proto3" json:"NodeCidr,omitempty"` + ServiceCidr string `protobuf:"bytes,6,opt,name=ServiceCidr,proto3" json:"ServiceCidr,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ManagedK8SENI) Reset() { *m = ManagedK8SENI{} } +func (m *ManagedK8SENI) String() string { return proto.CompactTextString(m) } +func (*ManagedK8SENI) ProtoMessage() {} +func (*ManagedK8SENI) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{5} } -func (x *ManagedK8SENI) Reset() { - *x = ManagedK8SENI{} - if protoimpl.UnsafeEnabled { - mi := &file_rpc_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (m *ManagedK8SENI) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ManagedK8SENI.Unmarshal(m, b) } - -func (x *ManagedK8SENI) String() string { - return protoimpl.X.MessageStringOf(x) +func (m *ManagedK8SENI) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ManagedK8SENI.Marshal(b, m, deterministic) } - -func (*ManagedK8SENI) ProtoMessage() {} - -func (x *ManagedK8SENI) ProtoReflect() protoreflect.Message { - mi := &file_rpc_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) +func (m *ManagedK8SENI) XXX_Merge(src proto.Message) { + xxx_messageInfo_ManagedK8SENI.Merge(m, src) } - -// Deprecated: Use ManagedK8SENI.ProtoReflect.Descriptor instead. -func (*ManagedK8SENI) Descriptor() ([]byte, []int) { - return file_rpc_proto_rawDescGZIP(), []int{5} +func (m *ManagedK8SENI) XXX_Size() int { + return xxx_messageInfo_ManagedK8SENI.Size(m) +} +func (m *ManagedK8SENI) XXX_DiscardUnknown() { + xxx_messageInfo_ManagedK8SENI.DiscardUnknown(m) } -func (x *ManagedK8SENI) GetEniConfig() *ENI { - if x != nil { - return x.EniConfig +var xxx_messageInfo_ManagedK8SENI proto.InternalMessageInfo + +func (m *ManagedK8SENI) GetEniConfig() *ENI { + if m != nil { + return m.EniConfig } return nil } -func (x *ManagedK8SENI) GetPodConfig() *Pod { - if x != nil { - return x.PodConfig +func (m *ManagedK8SENI) GetPodConfig() *Pod { + if m != nil { + return m.PodConfig } return nil } -func (x *ManagedK8SENI) GetPodCidr() string { - if x != nil { - return x.PodCidr +func (m *ManagedK8SENI) GetPodCidr() string { + if m != nil { + return m.PodCidr } return "" } -func (x *ManagedK8SENI) GetVpcCidr() string { - if x != nil { - return x.VpcCidr +func (m *ManagedK8SENI) GetVpcCidr() string { + if m != nil { + return m.VpcCidr } return "" } -func (x *ManagedK8SENI) GetNodeCidr() string { - if x != nil { - return x.NodeCidr +func (m *ManagedK8SENI) GetNodeCidr() string { + if m != nil { + return m.NodeCidr } return "" } -func (x *ManagedK8SENI) GetServiceCidr() string { - if x != nil { - return x.ServiceCidr +func (m *ManagedK8SENI) GetServiceCidr() string { + if m != nil { + return m.ServiceCidr } return "" } // ENI Multiple IP type ENIMultiIP struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - EniConfig *ENI `protobuf:"bytes,1,opt,name=EniConfig,proto3" json:"EniConfig,omitempty"` - PodConfig *Pod `protobuf:"bytes,2,opt,name=PodConfig,proto3" json:"PodConfig,omitempty"` - ServiceCidr string `protobuf:"bytes,3,opt,name=ServiceCidr,proto3" json:"ServiceCidr,omitempty"` + EniConfig *ENI `protobuf:"bytes,1,opt,name=EniConfig,proto3" json:"EniConfig,omitempty"` + PodConfig *Pod `protobuf:"bytes,2,opt,name=PodConfig,proto3" json:"PodConfig,omitempty"` + ServiceCidr string `protobuf:"bytes,3,opt,name=ServiceCidr,proto3" json:"ServiceCidr,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (x *ENIMultiIP) Reset() { - *x = ENIMultiIP{} - if protoimpl.UnsafeEnabled { - mi := &file_rpc_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (m *ENIMultiIP) Reset() { *m = ENIMultiIP{} } +func (m *ENIMultiIP) String() string { return proto.CompactTextString(m) } +func (*ENIMultiIP) ProtoMessage() {} +func (*ENIMultiIP) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{6} } -func (x *ENIMultiIP) String() string { - return protoimpl.X.MessageStringOf(x) +func (m *ENIMultiIP) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ENIMultiIP.Unmarshal(m, b) } - -func (*ENIMultiIP) ProtoMessage() {} - -func (x *ENIMultiIP) ProtoReflect() protoreflect.Message { - mi := &file_rpc_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) +func (m *ENIMultiIP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ENIMultiIP.Marshal(b, m, deterministic) } - -// Deprecated: Use ENIMultiIP.ProtoReflect.Descriptor instead. -func (*ENIMultiIP) Descriptor() ([]byte, []int) { - return file_rpc_proto_rawDescGZIP(), []int{6} +func (m *ENIMultiIP) XXX_Merge(src proto.Message) { + xxx_messageInfo_ENIMultiIP.Merge(m, src) +} +func (m *ENIMultiIP) XXX_Size() int { + return xxx_messageInfo_ENIMultiIP.Size(m) +} +func (m *ENIMultiIP) XXX_DiscardUnknown() { + xxx_messageInfo_ENIMultiIP.DiscardUnknown(m) } -func (x *ENIMultiIP) GetEniConfig() *ENI { - if x != nil { - return x.EniConfig +var xxx_messageInfo_ENIMultiIP proto.InternalMessageInfo + +func (m *ENIMultiIP) GetEniConfig() *ENI { + if m != nil { + return m.EniConfig } return nil } -func (x *ENIMultiIP) GetPodConfig() *Pod { - if x != nil { - return x.PodConfig +func (m *ENIMultiIP) GetPodConfig() *Pod { + if m != nil { + return m.PodConfig } return nil } -func (x *ENIMultiIP) GetServiceCidr() string { - if x != nil { - return x.ServiceCidr +func (m *ENIMultiIP) GetServiceCidr() string { + if m != nil { + return m.ServiceCidr } return "" } type AllocIPReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - Success bool `protobuf:"varint,1,opt,name=Success,proto3" json:"Success,omitempty"` IPType IPType `protobuf:"varint,2,opt,name=IPType,proto3,enum=rpc.IPType" json:"IPType,omitempty"` - // Types that are assignable to NetworkInfo: + // Types that are valid to be assigned to NetworkInfo: // *AllocIPReply_VpcIp // *AllocIPReply_VpcEni // *AllocIPReply_ManagedK8S // *AllocIPReply_ENIMultiIP - NetworkInfo isAllocIPReply_NetworkInfo `protobuf_oneof:"NetworkInfo"` -} - -func (x *AllocIPReply) Reset() { - *x = AllocIPReply{} - if protoimpl.UnsafeEnabled { - mi := &file_rpc_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AllocIPReply) String() string { - return protoimpl.X.MessageStringOf(x) + NetworkInfo isAllocIPReply_NetworkInfo `protobuf_oneof:"NetworkInfo"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (*AllocIPReply) ProtoMessage() {} - -func (x *AllocIPReply) ProtoReflect() protoreflect.Message { - mi := &file_rpc_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AllocIPReply.ProtoReflect.Descriptor instead. +func (m *AllocIPReply) Reset() { *m = AllocIPReply{} } +func (m *AllocIPReply) String() string { return proto.CompactTextString(m) } +func (*AllocIPReply) ProtoMessage() {} func (*AllocIPReply) Descriptor() ([]byte, []int) { - return file_rpc_proto_rawDescGZIP(), []int{7} + return fileDescriptor_77a6da22d6a3feb1, []int{7} } -func (x *AllocIPReply) GetSuccess() bool { - if x != nil { - return x.Success - } - return false +func (m *AllocIPReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AllocIPReply.Unmarshal(m, b) } - -func (x *AllocIPReply) GetIPType() IPType { - if x != nil { - return x.IPType - } - return IPType_TypeVPCIP +func (m *AllocIPReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AllocIPReply.Marshal(b, m, deterministic) } - -func (m *AllocIPReply) GetNetworkInfo() isAllocIPReply_NetworkInfo { - if m != nil { - return m.NetworkInfo - } - return nil +func (m *AllocIPReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllocIPReply.Merge(m, src) } - -func (x *AllocIPReply) GetVpcIp() *VPCIP { - if x, ok := x.GetNetworkInfo().(*AllocIPReply_VpcIp); ok { - return x.VpcIp - } - return nil +func (m *AllocIPReply) XXX_Size() int { + return xxx_messageInfo_AllocIPReply.Size(m) } - -func (x *AllocIPReply) GetVpcEni() *VPCENI { - if x, ok := x.GetNetworkInfo().(*AllocIPReply_VpcEni); ok { - return x.VpcEni - } - return nil +func (m *AllocIPReply) XXX_DiscardUnknown() { + xxx_messageInfo_AllocIPReply.DiscardUnknown(m) } -func (x *AllocIPReply) GetManagedK8S() *ManagedK8SENI { - if x, ok := x.GetNetworkInfo().(*AllocIPReply_ManagedK8S); ok { - return x.ManagedK8S +var xxx_messageInfo_AllocIPReply proto.InternalMessageInfo + +func (m *AllocIPReply) GetSuccess() bool { + if m != nil { + return m.Success } - return nil + return false } -func (x *AllocIPReply) GetENIMultiIP() *ENIMultiIP { - if x, ok := x.GetNetworkInfo().(*AllocIPReply_ENIMultiIP); ok { - return x.ENIMultiIP +func (m *AllocIPReply) GetIPType() IPType { + if m != nil { + return m.IPType } - return nil + return IPType_TypeVPCIP } type isAllocIPReply_NetworkInfo interface { @@ -701,685 +626,528 @@ func (*AllocIPReply_ManagedK8S) isAllocIPReply_NetworkInfo() {} func (*AllocIPReply_ENIMultiIP) isAllocIPReply_NetworkInfo() {} -type ReleaseIPRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (m *AllocIPReply) GetNetworkInfo() isAllocIPReply_NetworkInfo { + if m != nil { + return m.NetworkInfo + } + return nil +} - K8SPodName string `protobuf:"bytes,1,opt,name=K8sPodName,proto3" json:"K8sPodName,omitempty"` - K8SPodNamespace string `protobuf:"bytes,2,opt,name=K8sPodNamespace,proto3" json:"K8sPodNamespace,omitempty"` - K8SPodInfraContainerId string `protobuf:"bytes,3,opt,name=K8sPodInfraContainerId,proto3" json:"K8sPodInfraContainerId,omitempty"` - IPType IPType `protobuf:"varint,4,opt,name=IPType,proto3,enum=rpc.IPType" json:"IPType,omitempty"` - IPv4Addr string `protobuf:"bytes,5,opt,name=IPv4Addr,proto3" json:"IPv4Addr,omitempty"` - MacAddr string `protobuf:"bytes,6,opt,name=MacAddr,proto3" json:"MacAddr,omitempty"` - Reason string `protobuf:"bytes,7,opt,name=Reason,proto3" json:"Reason,omitempty"` +func (m *AllocIPReply) GetVpcIp() *VPCIP { + if x, ok := m.GetNetworkInfo().(*AllocIPReply_VpcIp); ok { + return x.VpcIp + } + return nil } -func (x *ReleaseIPRequest) Reset() { - *x = ReleaseIPRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_rpc_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (m *AllocIPReply) GetVpcEni() *VPCENI { + if x, ok := m.GetNetworkInfo().(*AllocIPReply_VpcEni); ok { + return x.VpcEni } + return nil } -func (x *ReleaseIPRequest) String() string { - return protoimpl.X.MessageStringOf(x) +func (m *AllocIPReply) GetManagedK8S() *ManagedK8SENI { + if x, ok := m.GetNetworkInfo().(*AllocIPReply_ManagedK8S); ok { + return x.ManagedK8S + } + return nil } -func (*ReleaseIPRequest) ProtoMessage() {} +func (m *AllocIPReply) GetENIMultiIP() *ENIMultiIP { + if x, ok := m.GetNetworkInfo().(*AllocIPReply_ENIMultiIP); ok { + return x.ENIMultiIP + } + return nil +} -func (x *ReleaseIPRequest) ProtoReflect() protoreflect.Message { - mi := &file_rpc_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +// XXX_OneofWrappers is for the internal use of the proto package. +func (*AllocIPReply) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*AllocIPReply_VpcIp)(nil), + (*AllocIPReply_VpcEni)(nil), + (*AllocIPReply_ManagedK8S)(nil), + (*AllocIPReply_ENIMultiIP)(nil), } - return mi.MessageOf(x) } -// Deprecated: Use ReleaseIPRequest.ProtoReflect.Descriptor instead. +type ReleaseIPRequest struct { + K8SPodName string `protobuf:"bytes,1,opt,name=K8sPodName,proto3" json:"K8sPodName,omitempty"` + K8SPodNamespace string `protobuf:"bytes,2,opt,name=K8sPodNamespace,proto3" json:"K8sPodNamespace,omitempty"` + K8SPodInfraContainerId string `protobuf:"bytes,3,opt,name=K8sPodInfraContainerId,proto3" json:"K8sPodInfraContainerId,omitempty"` + IPType IPType `protobuf:"varint,4,opt,name=IPType,proto3,enum=rpc.IPType" json:"IPType,omitempty"` + IPv4Addr string `protobuf:"bytes,5,opt,name=IPv4Addr,proto3" json:"IPv4Addr,omitempty"` + MacAddr string `protobuf:"bytes,6,opt,name=MacAddr,proto3" json:"MacAddr,omitempty"` + Reason string `protobuf:"bytes,7,opt,name=Reason,proto3" json:"Reason,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReleaseIPRequest) Reset() { *m = ReleaseIPRequest{} } +func (m *ReleaseIPRequest) String() string { return proto.CompactTextString(m) } +func (*ReleaseIPRequest) ProtoMessage() {} func (*ReleaseIPRequest) Descriptor() ([]byte, []int) { - return file_rpc_proto_rawDescGZIP(), []int{8} + return fileDescriptor_77a6da22d6a3feb1, []int{8} +} + +func (m *ReleaseIPRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReleaseIPRequest.Unmarshal(m, b) +} +func (m *ReleaseIPRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReleaseIPRequest.Marshal(b, m, deterministic) +} +func (m *ReleaseIPRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReleaseIPRequest.Merge(m, src) +} +func (m *ReleaseIPRequest) XXX_Size() int { + return xxx_messageInfo_ReleaseIPRequest.Size(m) +} +func (m *ReleaseIPRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReleaseIPRequest.DiscardUnknown(m) } -func (x *ReleaseIPRequest) GetK8SPodName() string { - if x != nil { - return x.K8SPodName +var xxx_messageInfo_ReleaseIPRequest proto.InternalMessageInfo + +func (m *ReleaseIPRequest) GetK8SPodName() string { + if m != nil { + return m.K8SPodName } return "" } -func (x *ReleaseIPRequest) GetK8SPodNamespace() string { - if x != nil { - return x.K8SPodNamespace +func (m *ReleaseIPRequest) GetK8SPodNamespace() string { + if m != nil { + return m.K8SPodNamespace } return "" } -func (x *ReleaseIPRequest) GetK8SPodInfraContainerId() string { - if x != nil { - return x.K8SPodInfraContainerId +func (m *ReleaseIPRequest) GetK8SPodInfraContainerId() string { + if m != nil { + return m.K8SPodInfraContainerId } return "" } -func (x *ReleaseIPRequest) GetIPType() IPType { - if x != nil { - return x.IPType +func (m *ReleaseIPRequest) GetIPType() IPType { + if m != nil { + return m.IPType } return IPType_TypeVPCIP } -func (x *ReleaseIPRequest) GetIPv4Addr() string { - if x != nil { - return x.IPv4Addr +func (m *ReleaseIPRequest) GetIPv4Addr() string { + if m != nil { + return m.IPv4Addr } return "" } -func (x *ReleaseIPRequest) GetMacAddr() string { - if x != nil { - return x.MacAddr +func (m *ReleaseIPRequest) GetMacAddr() string { + if m != nil { + return m.MacAddr } return "" } -func (x *ReleaseIPRequest) GetReason() string { - if x != nil { - return x.Reason +func (m *ReleaseIPRequest) GetReason() string { + if m != nil { + return m.Reason } return "" } type ReleaseIPReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Success bool `protobuf:"varint,1,opt,name=Success,proto3" json:"Success,omitempty"` - IPv4Addr string `protobuf:"bytes,2,opt,name=IPv4Addr,proto3" json:"IPv4Addr,omitempty"` - DeviceNumber int32 `protobuf:"varint,3,opt,name=DeviceNumber,proto3" json:"DeviceNumber,omitempty"` + Success bool `protobuf:"varint,1,opt,name=Success,proto3" json:"Success,omitempty"` + IPv4Addr string `protobuf:"bytes,2,opt,name=IPv4Addr,proto3" json:"IPv4Addr,omitempty"` + DeviceNumber int32 `protobuf:"varint,3,opt,name=DeviceNumber,proto3" json:"DeviceNumber,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (x *ReleaseIPReply) Reset() { - *x = ReleaseIPReply{} - if protoimpl.UnsafeEnabled { - mi := &file_rpc_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (m *ReleaseIPReply) Reset() { *m = ReleaseIPReply{} } +func (m *ReleaseIPReply) String() string { return proto.CompactTextString(m) } +func (*ReleaseIPReply) ProtoMessage() {} +func (*ReleaseIPReply) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{9} } -func (x *ReleaseIPReply) String() string { - return protoimpl.X.MessageStringOf(x) +func (m *ReleaseIPReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReleaseIPReply.Unmarshal(m, b) } - -func (*ReleaseIPReply) ProtoMessage() {} - -func (x *ReleaseIPReply) ProtoReflect() protoreflect.Message { - mi := &file_rpc_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) +func (m *ReleaseIPReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReleaseIPReply.Marshal(b, m, deterministic) } - -// Deprecated: Use ReleaseIPReply.ProtoReflect.Descriptor instead. -func (*ReleaseIPReply) Descriptor() ([]byte, []int) { - return file_rpc_proto_rawDescGZIP(), []int{9} +func (m *ReleaseIPReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReleaseIPReply.Merge(m, src) +} +func (m *ReleaseIPReply) XXX_Size() int { + return xxx_messageInfo_ReleaseIPReply.Size(m) } +func (m *ReleaseIPReply) XXX_DiscardUnknown() { + xxx_messageInfo_ReleaseIPReply.DiscardUnknown(m) +} + +var xxx_messageInfo_ReleaseIPReply proto.InternalMessageInfo -func (x *ReleaseIPReply) GetSuccess() bool { - if x != nil { - return x.Success +func (m *ReleaseIPReply) GetSuccess() bool { + if m != nil { + return m.Success } return false } -func (x *ReleaseIPReply) GetIPv4Addr() string { - if x != nil { - return x.IPv4Addr +func (m *ReleaseIPReply) GetIPv4Addr() string { + if m != nil { + return m.IPv4Addr } return "" } -func (x *ReleaseIPReply) GetDeviceNumber() int32 { - if x != nil { - return x.DeviceNumber +func (m *ReleaseIPReply) GetDeviceNumber() int32 { + if m != nil { + return m.DeviceNumber } return 0 } type GetInfoRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - K8SPodName string `protobuf:"bytes,1,opt,name=K8sPodName,proto3" json:"K8sPodName,omitempty"` - K8SPodNamespace string `protobuf:"bytes,2,opt,name=K8sPodNamespace,proto3" json:"K8sPodNamespace,omitempty"` - K8SPodInfraContainerId string `protobuf:"bytes,3,opt,name=K8sPodInfraContainerId,proto3" json:"K8sPodInfraContainerId,omitempty"` + K8SPodName string `protobuf:"bytes,1,opt,name=K8sPodName,proto3" json:"K8sPodName,omitempty"` + K8SPodNamespace string `protobuf:"bytes,2,opt,name=K8sPodNamespace,proto3" json:"K8sPodNamespace,omitempty"` + K8SPodInfraContainerId string `protobuf:"bytes,3,opt,name=K8sPodInfraContainerId,proto3" json:"K8sPodInfraContainerId,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (x *GetInfoRequest) Reset() { - *x = GetInfoRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_rpc_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (m *GetInfoRequest) Reset() { *m = GetInfoRequest{} } +func (m *GetInfoRequest) String() string { return proto.CompactTextString(m) } +func (*GetInfoRequest) ProtoMessage() {} +func (*GetInfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{10} } -func (x *GetInfoRequest) String() string { - return protoimpl.X.MessageStringOf(x) +func (m *GetInfoRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetInfoRequest.Unmarshal(m, b) +} +func (m *GetInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetInfoRequest.Marshal(b, m, deterministic) +} +func (m *GetInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetInfoRequest.Merge(m, src) +} +func (m *GetInfoRequest) XXX_Size() int { + return xxx_messageInfo_GetInfoRequest.Size(m) +} +func (m *GetInfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetInfoRequest.DiscardUnknown(m) } -func (*GetInfoRequest) ProtoMessage() {} +var xxx_messageInfo_GetInfoRequest proto.InternalMessageInfo -func (x *GetInfoRequest) ProtoReflect() protoreflect.Message { - mi := &file_rpc_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (m *GetInfoRequest) GetK8SPodName() string { + if m != nil { + return m.K8SPodName } - return mi.MessageOf(x) + return "" } -// Deprecated: Use GetInfoRequest.ProtoReflect.Descriptor instead. -func (*GetInfoRequest) Descriptor() ([]byte, []int) { - return file_rpc_proto_rawDescGZIP(), []int{10} +func (m *GetInfoRequest) GetK8SPodNamespace() string { + if m != nil { + return m.K8SPodNamespace + } + return "" } -func (x *GetInfoRequest) GetK8SPodName() string { - if x != nil { - return x.K8SPodName +func (m *GetInfoRequest) GetK8SPodInfraContainerId() string { + if m != nil { + return m.K8SPodInfraContainerId } return "" } -func (x *GetInfoRequest) GetK8SPodNamespace() string { - if x != nil { - return x.K8SPodNamespace +type GetInfoReply struct { + IPType IPType `protobuf:"varint,1,opt,name=IPType,proto3,enum=rpc.IPType" json:"IPType,omitempty"` + PodConfig *Pod `protobuf:"bytes,2,opt,name=PodConfig,proto3" json:"PodConfig,omitempty"` + NodeCidr string `protobuf:"bytes,3,opt,name=NodeCidr,proto3" json:"NodeCidr,omitempty"` + PodIP string `protobuf:"bytes,4,opt,name=PodIP,proto3" json:"PodIP,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetInfoReply) Reset() { *m = GetInfoReply{} } +func (m *GetInfoReply) String() string { return proto.CompactTextString(m) } +func (*GetInfoReply) ProtoMessage() {} +func (*GetInfoReply) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{11} +} + +func (m *GetInfoReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetInfoReply.Unmarshal(m, b) +} +func (m *GetInfoReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetInfoReply.Marshal(b, m, deterministic) +} +func (m *GetInfoReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetInfoReply.Merge(m, src) +} +func (m *GetInfoReply) XXX_Size() int { + return xxx_messageInfo_GetInfoReply.Size(m) +} +func (m *GetInfoReply) XXX_DiscardUnknown() { + xxx_messageInfo_GetInfoReply.DiscardUnknown(m) +} + +var xxx_messageInfo_GetInfoReply proto.InternalMessageInfo + +func (m *GetInfoReply) GetIPType() IPType { + if m != nil { + return m.IPType } - return "" + return IPType_TypeVPCIP +} + +func (m *GetInfoReply) GetPodConfig() *Pod { + if m != nil { + return m.PodConfig + } + return nil } -func (x *GetInfoRequest) GetK8SPodInfraContainerId() string { - if x != nil { - return x.K8SPodInfraContainerId +func (m *GetInfoReply) GetNodeCidr() string { + if m != nil { + return m.NodeCidr } return "" } -type GetInfoReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (m *GetInfoReply) GetPodIP() string { + if m != nil { + return m.PodIP + } + return "" +} - IPType IPType `protobuf:"varint,1,opt,name=IPType,proto3,enum=rpc.IPType" json:"IPType,omitempty"` - PodConfig *Pod `protobuf:"bytes,2,opt,name=PodConfig,proto3" json:"PodConfig,omitempty"` - NodeCidr string `protobuf:"bytes,3,opt,name=NodeCidr,proto3" json:"NodeCidr,omitempty"` - PodIP string `protobuf:"bytes,4,opt,name=PodIP,proto3" json:"PodIP,omitempty"` +type EventRequest struct { + EventTarget EventTarget `protobuf:"varint,1,opt,name=EventTarget,proto3,enum=rpc.EventTarget" json:"EventTarget,omitempty"` + K8SPodName string `protobuf:"bytes,2,opt,name=K8sPodName,proto3" json:"K8sPodName,omitempty"` + K8SPodNamespace string `protobuf:"bytes,3,opt,name=K8sPodNamespace,proto3" json:"K8sPodNamespace,omitempty"` + EventType EventType `protobuf:"varint,4,opt,name=EventType,proto3,enum=rpc.EventType" json:"EventType,omitempty"` + Reason string `protobuf:"bytes,5,opt,name=Reason,proto3" json:"Reason,omitempty"` + Message string `protobuf:"bytes,6,opt,name=Message,proto3" json:"Message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (x *GetInfoReply) Reset() { - *x = GetInfoReply{} - if protoimpl.UnsafeEnabled { - mi := &file_rpc_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (m *EventRequest) Reset() { *m = EventRequest{} } +func (m *EventRequest) String() string { return proto.CompactTextString(m) } +func (*EventRequest) ProtoMessage() {} +func (*EventRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{12} } -func (x *GetInfoReply) String() string { - return protoimpl.X.MessageStringOf(x) +func (m *EventRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EventRequest.Unmarshal(m, b) +} +func (m *EventRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EventRequest.Marshal(b, m, deterministic) +} +func (m *EventRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventRequest.Merge(m, src) +} +func (m *EventRequest) XXX_Size() int { + return xxx_messageInfo_EventRequest.Size(m) +} +func (m *EventRequest) XXX_DiscardUnknown() { + xxx_messageInfo_EventRequest.DiscardUnknown(m) } -func (*GetInfoReply) ProtoMessage() {} +var xxx_messageInfo_EventRequest proto.InternalMessageInfo -func (x *GetInfoReply) ProtoReflect() protoreflect.Message { - mi := &file_rpc_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (m *EventRequest) GetEventTarget() EventTarget { + if m != nil { + return m.EventTarget } - return mi.MessageOf(x) + return EventTarget_EventTargetNode } -// Deprecated: Use GetInfoReply.ProtoReflect.Descriptor instead. -func (*GetInfoReply) Descriptor() ([]byte, []int) { - return file_rpc_proto_rawDescGZIP(), []int{11} +func (m *EventRequest) GetK8SPodName() string { + if m != nil { + return m.K8SPodName + } + return "" } -func (x *GetInfoReply) GetIPType() IPType { - if x != nil { - return x.IPType +func (m *EventRequest) GetK8SPodNamespace() string { + if m != nil { + return m.K8SPodNamespace } - return IPType_TypeVPCIP + return "" } -func (x *GetInfoReply) GetPodConfig() *Pod { - if x != nil { - return x.PodConfig +func (m *EventRequest) GetEventType() EventType { + if m != nil { + return m.EventType } - return nil + return EventType_EventTypeNormal } -func (x *GetInfoReply) GetNodeCidr() string { - if x != nil { - return x.NodeCidr +func (m *EventRequest) GetReason() string { + if m != nil { + return m.Reason } return "" } -func (x *GetInfoReply) GetPodIP() string { - if x != nil { - return x.PodIP +func (m *EventRequest) GetMessage() string { + if m != nil { + return m.Message } return "" } -var File_rpc_proto protoreflect.FileDescriptor - -var file_rpc_proto_rawDesc = []byte{ - 0x0a, 0x09, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x03, 0x72, 0x70, 0x63, - 0x22, 0xc0, 0x01, 0x0a, 0x0e, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x49, 0x50, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x4b, 0x38, 0x73, 0x50, 0x6f, 0x64, 0x4e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x4b, 0x38, 0x73, 0x50, 0x6f, 0x64, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x0f, 0x4b, 0x38, 0x73, 0x50, 0x6f, 0x64, 0x4e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x4b, 0x38, - 0x73, 0x50, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x36, 0x0a, - 0x16, 0x4b, 0x38, 0x73, 0x50, 0x6f, 0x64, 0x49, 0x6e, 0x66, 0x72, 0x61, 0x43, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x4b, - 0x38, 0x73, 0x50, 0x6f, 0x64, 0x49, 0x6e, 0x66, 0x72, 0x61, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x4e, 0x65, 0x74, 0x6e, 0x73, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x4e, 0x65, 0x74, 0x6e, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x49, - 0x66, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x49, 0x66, 0x4e, - 0x61, 0x6d, 0x65, 0x22, 0x37, 0x0a, 0x03, 0x50, 0x6f, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x49, 0x6e, - 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x49, 0x6e, 0x67, - 0x72, 0x65, 0x73, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x45, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x45, 0x67, 0x72, 0x65, 0x73, 0x73, 0x22, 0x4b, 0x0a, 0x05, - 0x56, 0x50, 0x43, 0x49, 0x50, 0x12, 0x26, 0x0a, 0x09, 0x50, 0x6f, 0x64, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x50, - 0x6f, 0x64, 0x52, 0x09, 0x50, 0x6f, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, - 0x08, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x69, 0x64, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x69, 0x64, 0x72, 0x22, 0xc3, 0x01, 0x0a, 0x03, 0x45, 0x4e, - 0x49, 0x12, 0x1a, 0x0a, 0x08, 0x49, 0x50, 0x76, 0x34, 0x41, 0x64, 0x64, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x49, 0x50, 0x76, 0x34, 0x41, 0x64, 0x64, 0x72, 0x12, 0x1e, 0x0a, - 0x0a, 0x49, 0x50, 0x76, 0x34, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0a, 0x49, 0x50, 0x76, 0x34, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x12, 0x18, 0x0a, - 0x07, 0x4d, 0x61, 0x63, 0x41, 0x64, 0x64, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x4d, 0x61, 0x63, 0x41, 0x64, 0x64, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x47, 0x61, 0x74, 0x65, 0x77, - 0x61, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, - 0x79, 0x12, 0x22, 0x0a, 0x0c, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x4e, - 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x0f, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, - 0x49, 0x50, 0x76, 0x34, 0x41, 0x64, 0x64, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, - 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x49, 0x50, 0x76, 0x34, 0x41, 0x64, 0x64, 0x72, 0x22, - 0x7a, 0x0a, 0x06, 0x56, 0x50, 0x43, 0x45, 0x4e, 0x49, 0x12, 0x26, 0x0a, 0x09, 0x45, 0x6e, 0x69, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x72, - 0x70, 0x63, 0x2e, 0x45, 0x4e, 0x49, 0x52, 0x09, 0x45, 0x6e, 0x69, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x26, 0x0a, 0x09, 0x50, 0x6f, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x6f, 0x64, 0x52, 0x09, - 0x50, 0x6f, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x20, 0x0a, 0x0b, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x43, 0x69, 0x64, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x69, 0x64, 0x72, 0x22, 0xd1, 0x01, 0x0a, 0x0d, - 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x4b, 0x38, 0x53, 0x45, 0x4e, 0x49, 0x12, 0x26, 0x0a, - 0x09, 0x45, 0x6e, 0x69, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x08, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x4e, 0x49, 0x52, 0x09, 0x45, 0x6e, 0x69, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x26, 0x0a, 0x09, 0x50, 0x6f, 0x64, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x50, - 0x6f, 0x64, 0x52, 0x09, 0x50, 0x6f, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, - 0x07, 0x50, 0x6f, 0x64, 0x43, 0x69, 0x64, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x50, 0x6f, 0x64, 0x43, 0x69, 0x64, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x56, 0x70, 0x63, 0x43, 0x69, - 0x64, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x56, 0x70, 0x63, 0x43, 0x69, 0x64, - 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x69, 0x64, 0x72, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x69, 0x64, 0x72, 0x12, 0x20, 0x0a, - 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x69, 0x64, 0x72, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x69, 0x64, 0x72, 0x22, - 0x7e, 0x0a, 0x0a, 0x45, 0x4e, 0x49, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x49, 0x50, 0x12, 0x26, 0x0a, - 0x09, 0x45, 0x6e, 0x69, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x08, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x4e, 0x49, 0x52, 0x09, 0x45, 0x6e, 0x69, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x26, 0x0a, 0x09, 0x50, 0x6f, 0x64, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x50, - 0x6f, 0x64, 0x52, 0x09, 0x50, 0x6f, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x20, 0x0a, - 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x69, 0x64, 0x72, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x69, 0x64, 0x72, 0x22, - 0x90, 0x02, 0x0a, 0x0c, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x49, 0x50, 0x52, 0x65, 0x70, 0x6c, 0x79, - 0x12, 0x18, 0x0a, 0x07, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x07, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x23, 0x0a, 0x06, 0x49, 0x50, - 0x54, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0b, 0x2e, 0x72, 0x70, 0x63, - 0x2e, 0x49, 0x50, 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x49, 0x50, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x22, 0x0a, 0x05, 0x56, 0x70, 0x63, 0x49, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, - 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x56, 0x50, 0x43, 0x49, 0x50, 0x48, 0x00, 0x52, 0x05, 0x56, 0x70, - 0x63, 0x49, 0x70, 0x12, 0x25, 0x0a, 0x06, 0x56, 0x70, 0x63, 0x45, 0x6e, 0x69, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x56, 0x50, 0x43, 0x45, 0x4e, 0x49, - 0x48, 0x00, 0x52, 0x06, 0x56, 0x70, 0x63, 0x45, 0x6e, 0x69, 0x12, 0x34, 0x0a, 0x0a, 0x4d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x64, 0x4b, 0x38, 0x53, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, - 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x4b, 0x38, 0x53, 0x45, - 0x4e, 0x49, 0x48, 0x00, 0x52, 0x0a, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x4b, 0x38, 0x53, - 0x12, 0x31, 0x0a, 0x0a, 0x45, 0x4e, 0x49, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x49, 0x50, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x4e, 0x49, 0x4d, 0x75, - 0x6c, 0x74, 0x69, 0x49, 0x50, 0x48, 0x00, 0x52, 0x0a, 0x45, 0x4e, 0x49, 0x4d, 0x75, 0x6c, 0x74, - 0x69, 0x49, 0x50, 0x42, 0x0d, 0x0a, 0x0b, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x6e, - 0x66, 0x6f, 0x22, 0x87, 0x02, 0x0a, 0x10, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x49, 0x50, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x4b, 0x38, 0x73, 0x50, 0x6f, - 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x4b, 0x38, 0x73, - 0x50, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x0f, 0x4b, 0x38, 0x73, 0x50, 0x6f, - 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0f, 0x4b, 0x38, 0x73, 0x50, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x12, 0x36, 0x0a, 0x16, 0x4b, 0x38, 0x73, 0x50, 0x6f, 0x64, 0x49, 0x6e, 0x66, 0x72, 0x61, - 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x16, 0x4b, 0x38, 0x73, 0x50, 0x6f, 0x64, 0x49, 0x6e, 0x66, 0x72, 0x61, 0x43, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x06, 0x49, 0x50, 0x54, - 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0b, 0x2e, 0x72, 0x70, 0x63, 0x2e, - 0x49, 0x50, 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x49, 0x50, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, - 0x0a, 0x08, 0x49, 0x50, 0x76, 0x34, 0x41, 0x64, 0x64, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x49, 0x50, 0x76, 0x34, 0x41, 0x64, 0x64, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x4d, 0x61, - 0x63, 0x41, 0x64, 0x64, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x4d, 0x61, 0x63, - 0x41, 0x64, 0x64, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x6a, 0x0a, 0x0e, - 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x49, 0x50, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x18, - 0x0a, 0x07, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x07, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x49, 0x50, 0x76, 0x34, - 0x41, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x49, 0x50, 0x76, 0x34, - 0x41, 0x64, 0x64, 0x72, 0x12, 0x22, 0x0a, 0x0c, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x75, - 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x44, 0x65, 0x76, 0x69, - 0x63, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x92, 0x01, 0x0a, 0x0e, 0x47, 0x65, 0x74, - 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x4b, - 0x38, 0x73, 0x50, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0a, 0x4b, 0x38, 0x73, 0x50, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x0f, 0x4b, - 0x38, 0x73, 0x50, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x4b, 0x38, 0x73, 0x50, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x36, 0x0a, 0x16, 0x4b, 0x38, 0x73, 0x50, 0x6f, 0x64, 0x49, - 0x6e, 0x66, 0x72, 0x61, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x4b, 0x38, 0x73, 0x50, 0x6f, 0x64, 0x49, 0x6e, 0x66, - 0x72, 0x61, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x22, 0x8d, 0x01, - 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x23, - 0x0a, 0x06, 0x49, 0x50, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0b, - 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x49, 0x50, 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x49, 0x50, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x26, 0x0a, 0x09, 0x50, 0x6f, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x6f, 0x64, - 0x52, 0x09, 0x50, 0x6f, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, 0x08, 0x4e, - 0x6f, 0x64, 0x65, 0x43, 0x69, 0x64, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x4e, - 0x6f, 0x64, 0x65, 0x43, 0x69, 0x64, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x50, 0x6f, 0x64, 0x49, 0x50, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x50, 0x6f, 0x64, 0x49, 0x50, 0x2a, 0x4f, 0x0a, - 0x06, 0x49, 0x50, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x79, 0x70, 0x65, 0x56, - 0x50, 0x43, 0x49, 0x50, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x79, 0x70, 0x65, 0x56, 0x50, - 0x43, 0x45, 0x4e, 0x49, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x54, 0x79, 0x70, 0x65, 0x4d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x64, 0x4b, 0x38, 0x53, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x54, 0x79, - 0x70, 0x65, 0x45, 0x4e, 0x49, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x49, 0x50, 0x10, 0x03, 0x32, 0xb6, - 0x01, 0x0a, 0x0d, 0x54, 0x65, 0x72, 0x77, 0x61, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, - 0x12, 0x33, 0x0a, 0x07, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x49, 0x50, 0x12, 0x13, 0x2e, 0x72, 0x70, - 0x63, 0x2e, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x49, 0x50, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x11, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x49, 0x50, 0x52, 0x65, - 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x39, 0x0a, 0x09, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, - 0x49, 0x50, 0x12, 0x15, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, - 0x49, 0x50, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x72, 0x70, 0x63, 0x2e, - 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x49, 0x50, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, - 0x12, 0x35, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x49, 0x50, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x13, 0x2e, - 0x72, 0x70, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, - 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_rpc_proto_rawDescOnce sync.Once - file_rpc_proto_rawDescData = file_rpc_proto_rawDesc -) +type EventReply struct { + Succeed bool `protobuf:"varint,1,opt,name=Succeed,proto3" json:"Succeed,omitempty"` + Error string `protobuf:"bytes,2,opt,name=Error,proto3" json:"Error,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} -func file_rpc_proto_rawDescGZIP() []byte { - file_rpc_proto_rawDescOnce.Do(func() { - file_rpc_proto_rawDescData = protoimpl.X.CompressGZIP(file_rpc_proto_rawDescData) - }) - return file_rpc_proto_rawDescData -} - -var file_rpc_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_rpc_proto_msgTypes = make([]protoimpl.MessageInfo, 12) -var file_rpc_proto_goTypes = []interface{}{ - (IPType)(0), // 0: rpc.IPType - (*AllocIPRequest)(nil), // 1: rpc.AllocIPRequest - (*Pod)(nil), // 2: rpc.Pod - (*VPCIP)(nil), // 3: rpc.VPCIP - (*ENI)(nil), // 4: rpc.ENI - (*VPCENI)(nil), // 5: rpc.VPCENI - (*ManagedK8SENI)(nil), // 6: rpc.ManagedK8SENI - (*ENIMultiIP)(nil), // 7: rpc.ENIMultiIP - (*AllocIPReply)(nil), // 8: rpc.AllocIPReply - (*ReleaseIPRequest)(nil), // 9: rpc.ReleaseIPRequest - (*ReleaseIPReply)(nil), // 10: rpc.ReleaseIPReply - (*GetInfoRequest)(nil), // 11: rpc.GetInfoRequest - (*GetInfoReply)(nil), // 12: rpc.GetInfoReply -} -var file_rpc_proto_depIdxs = []int32{ - 2, // 0: rpc.VPCIP.PodConfig:type_name -> rpc.Pod - 4, // 1: rpc.VPCENI.EniConfig:type_name -> rpc.ENI - 2, // 2: rpc.VPCENI.PodConfig:type_name -> rpc.Pod - 4, // 3: rpc.ManagedK8SENI.EniConfig:type_name -> rpc.ENI - 2, // 4: rpc.ManagedK8SENI.PodConfig:type_name -> rpc.Pod - 4, // 5: rpc.ENIMultiIP.EniConfig:type_name -> rpc.ENI - 2, // 6: rpc.ENIMultiIP.PodConfig:type_name -> rpc.Pod - 0, // 7: rpc.AllocIPReply.IPType:type_name -> rpc.IPType - 3, // 8: rpc.AllocIPReply.VpcIp:type_name -> rpc.VPCIP - 5, // 9: rpc.AllocIPReply.VpcEni:type_name -> rpc.VPCENI - 6, // 10: rpc.AllocIPReply.ManagedK8S:type_name -> rpc.ManagedK8SENI - 7, // 11: rpc.AllocIPReply.ENIMultiIP:type_name -> rpc.ENIMultiIP - 0, // 12: rpc.ReleaseIPRequest.IPType:type_name -> rpc.IPType - 0, // 13: rpc.GetInfoReply.IPType:type_name -> rpc.IPType - 2, // 14: rpc.GetInfoReply.PodConfig:type_name -> rpc.Pod - 1, // 15: rpc.TerwayBackend.AllocIP:input_type -> rpc.AllocIPRequest - 9, // 16: rpc.TerwayBackend.ReleaseIP:input_type -> rpc.ReleaseIPRequest - 11, // 17: rpc.TerwayBackend.GetIPInfo:input_type -> rpc.GetInfoRequest - 8, // 18: rpc.TerwayBackend.AllocIP:output_type -> rpc.AllocIPReply - 10, // 19: rpc.TerwayBackend.ReleaseIP:output_type -> rpc.ReleaseIPReply - 12, // 20: rpc.TerwayBackend.GetIPInfo:output_type -> rpc.GetInfoReply - 18, // [18:21] is the sub-list for method output_type - 15, // [15:18] is the sub-list for method input_type - 15, // [15:15] is the sub-list for extension type_name - 15, // [15:15] is the sub-list for extension extendee - 0, // [0:15] is the sub-list for field type_name -} - -func init() { file_rpc_proto_init() } -func file_rpc_proto_init() { - if File_rpc_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_rpc_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AllocIPRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_rpc_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Pod); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_rpc_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VPCIP); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_rpc_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ENI); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_rpc_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VPCENI); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_rpc_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ManagedK8SENI); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_rpc_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ENIMultiIP); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_rpc_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AllocIPReply); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_rpc_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReleaseIPRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_rpc_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReleaseIPReply); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_rpc_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetInfoRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_rpc_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetInfoReply); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_rpc_proto_msgTypes[7].OneofWrappers = []interface{}{ - (*AllocIPReply_VpcIp)(nil), - (*AllocIPReply_VpcEni)(nil), - (*AllocIPReply_ManagedK8S)(nil), - (*AllocIPReply_ENIMultiIP)(nil), +func (m *EventReply) Reset() { *m = EventReply{} } +func (m *EventReply) String() string { return proto.CompactTextString(m) } +func (*EventReply) ProtoMessage() {} +func (*EventReply) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{13} +} + +func (m *EventReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EventReply.Unmarshal(m, b) +} +func (m *EventReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EventReply.Marshal(b, m, deterministic) +} +func (m *EventReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventReply.Merge(m, src) +} +func (m *EventReply) XXX_Size() int { + return xxx_messageInfo_EventReply.Size(m) +} +func (m *EventReply) XXX_DiscardUnknown() { + xxx_messageInfo_EventReply.DiscardUnknown(m) +} + +var xxx_messageInfo_EventReply proto.InternalMessageInfo + +func (m *EventReply) GetSucceed() bool { + if m != nil { + return m.Succeed } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_rpc_proto_rawDesc, - NumEnums: 1, - NumMessages: 12, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_rpc_proto_goTypes, - DependencyIndexes: file_rpc_proto_depIdxs, - EnumInfos: file_rpc_proto_enumTypes, - MessageInfos: file_rpc_proto_msgTypes, - }.Build() - File_rpc_proto = out.File - file_rpc_proto_rawDesc = nil - file_rpc_proto_goTypes = nil - file_rpc_proto_depIdxs = nil + return false +} + +func (m *EventReply) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +func init() { + proto.RegisterEnum("rpc.IPType", IPType_name, IPType_value) + proto.RegisterEnum("rpc.EventTarget", EventTarget_name, EventTarget_value) + proto.RegisterEnum("rpc.EventType", EventType_name, EventType_value) + proto.RegisterType((*AllocIPRequest)(nil), "rpc.AllocIPRequest") + proto.RegisterType((*Pod)(nil), "rpc.Pod") + proto.RegisterType((*VPCIP)(nil), "rpc.VPCIP") + proto.RegisterType((*ENI)(nil), "rpc.ENI") + proto.RegisterType((*VPCENI)(nil), "rpc.VPCENI") + proto.RegisterType((*ManagedK8SENI)(nil), "rpc.ManagedK8SENI") + proto.RegisterType((*ENIMultiIP)(nil), "rpc.ENIMultiIP") + proto.RegisterType((*AllocIPReply)(nil), "rpc.AllocIPReply") + proto.RegisterType((*ReleaseIPRequest)(nil), "rpc.ReleaseIPRequest") + proto.RegisterType((*ReleaseIPReply)(nil), "rpc.ReleaseIPReply") + proto.RegisterType((*GetInfoRequest)(nil), "rpc.GetInfoRequest") + proto.RegisterType((*GetInfoReply)(nil), "rpc.GetInfoReply") + proto.RegisterType((*EventRequest)(nil), "rpc.EventRequest") + proto.RegisterType((*EventReply)(nil), "rpc.EventReply") +} + +func init() { proto.RegisterFile("rpc.proto", fileDescriptor_77a6da22d6a3feb1) } + +var fileDescriptor_77a6da22d6a3feb1 = []byte{ + // 927 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x56, 0xdd, 0x6e, 0xe3, 0x44, + 0x14, 0x8e, 0x93, 0xda, 0x6d, 0x8e, 0xdb, 0xd4, 0x3b, 0x2d, 0x55, 0xd4, 0x0b, 0xb4, 0x32, 0x62, + 0xb5, 0xaa, 0xd0, 0x4a, 0xa4, 0x4b, 0x59, 0x24, 0x6e, 0x76, 0x83, 0xb5, 0xb5, 0xaa, 0x1a, 0xcb, + 0xad, 0xc2, 0xf5, 0xd4, 0x9e, 0x46, 0x66, 0xd3, 0x19, 0x33, 0x76, 0x5a, 0x85, 0x0b, 0x6e, 0xb9, + 0x42, 0x42, 0x3c, 0x11, 0x12, 0x4f, 0xc0, 0x73, 0xc0, 0x43, 0xa0, 0xf9, 0x71, 0x3c, 0xf6, 0x6e, + 0xd0, 0xde, 0x80, 0xf6, 0xaa, 0xfd, 0xbe, 0x73, 0x8e, 0x67, 0xce, 0x77, 0x7e, 0x32, 0x30, 0xe4, + 0x45, 0xfa, 0xac, 0xe0, 0xac, 0x62, 0x68, 0xc0, 0x8b, 0xd4, 0xff, 0xdd, 0x82, 0xd1, 0xcb, 0xc5, + 0x82, 0xa5, 0x61, 0x9c, 0x90, 0x1f, 0x96, 0xa4, 0xac, 0xd0, 0xc7, 0x00, 0x17, 0x2f, 0xca, 0x98, + 0x65, 0x11, 0xbe, 0x23, 0x63, 0xeb, 0xb1, 0xf5, 0x74, 0x98, 0x18, 0x0c, 0x7a, 0x0a, 0xfb, 0x0d, + 0x2a, 0x0b, 0x9c, 0x92, 0x71, 0x5f, 0x3a, 0x75, 0x69, 0x74, 0x06, 0x47, 0x8a, 0x0a, 0xe9, 0x2d, + 0xc7, 0x53, 0x46, 0x2b, 0x9c, 0x53, 0xc2, 0xc3, 0x6c, 0x3c, 0x90, 0x01, 0x1b, 0xac, 0xe8, 0x10, + 0xec, 0x88, 0x54, 0xb4, 0x1c, 0x6f, 0x49, 0x37, 0x05, 0xd0, 0x11, 0x38, 0xe1, 0xad, 0xbc, 0x93, + 0x2d, 0x69, 0x8d, 0xfc, 0x2f, 0x61, 0x10, 0xb3, 0x0c, 0x8d, 0x61, 0x3b, 0xa4, 0x73, 0x4e, 0xca, + 0x52, 0xde, 0x79, 0x2b, 0xa9, 0xa1, 0x08, 0x0c, 0x94, 0xa1, 0x2f, 0x0d, 0x1a, 0xf9, 0x17, 0x60, + 0xcf, 0xe2, 0x69, 0x18, 0xa3, 0x27, 0x30, 0x8c, 0x59, 0x36, 0x65, 0xf4, 0x36, 0x9f, 0xcb, 0x60, + 0x77, 0xb2, 0xf3, 0x4c, 0x08, 0x15, 0xb3, 0x2c, 0x69, 0x4c, 0xe8, 0x18, 0x76, 0x22, 0x96, 0x91, + 0x69, 0x9e, 0x71, 0x9d, 0xf2, 0x1a, 0xfb, 0x7f, 0x58, 0x30, 0x08, 0xa2, 0x50, 0xf8, 0x84, 0xf1, + 0xfd, 0xf3, 0x97, 0x59, 0xc6, 0xb5, 0x76, 0x6b, 0x2c, 0x94, 0x15, 0xff, 0x5f, 0x2d, 0x6f, 0x28, + 0xa9, 0xf4, 0x17, 0x0c, 0x46, 0xa4, 0x70, 0x89, 0x53, 0x19, 0xaa, 0x04, 0xaa, 0xa1, 0xb0, 0xbc, + 0xc6, 0x15, 0x79, 0xc0, 0x2b, 0xad, 0x49, 0x0d, 0x91, 0x0f, 0xbb, 0xdf, 0x90, 0xfb, 0x3c, 0x25, + 0xd1, 0xf2, 0xee, 0x86, 0x70, 0xa9, 0x8d, 0x9d, 0xb4, 0x38, 0x51, 0xb1, 0x98, 0xe7, 0x77, 0x98, + 0xaf, 0xd6, 0x57, 0x73, 0x54, 0xc5, 0x3a, 0xb4, 0xff, 0x23, 0x38, 0xb3, 0x78, 0x2a, 0xf2, 0x78, + 0x02, 0xc3, 0x80, 0xe6, 0xef, 0xd0, 0x24, 0x88, 0xc2, 0xa4, 0x31, 0xb5, 0xb5, 0xeb, 0x6f, 0xd6, + 0xee, 0x31, 0xb8, 0x57, 0x84, 0x8b, 0x4b, 0x49, 0xf9, 0x54, 0x7e, 0x26, 0xe5, 0xff, 0x69, 0xc1, + 0xde, 0x25, 0xa6, 0x78, 0x4e, 0xb2, 0x8b, 0x17, 0x57, 0xff, 0xc5, 0x1d, 0xc6, 0xb0, 0x2d, 0x40, + 0x73, 0x7e, 0x0d, 0x85, 0x65, 0x56, 0xa4, 0xd2, 0xa2, 0xf5, 0xd5, 0xb0, 0x55, 0x73, 0xbb, 0x5d, + 0xf3, 0x6e, 0x4e, 0xce, 0xdb, 0x39, 0xfd, 0x04, 0x10, 0x44, 0xe1, 0xe5, 0x72, 0x51, 0xe5, 0xaa, + 0xcf, 0xfe, 0x67, 0x4d, 0x7f, 0xed, 0xc3, 0xee, 0x7a, 0xbc, 0x8b, 0xc5, 0x4a, 0x24, 0x7a, 0xb5, + 0x4c, 0xd3, 0x7a, 0x4a, 0x76, 0x92, 0x1a, 0xa2, 0x4f, 0xc0, 0x09, 0xe3, 0xeb, 0x55, 0xa1, 0xa6, + 0x79, 0x34, 0x71, 0xe5, 0x89, 0x8a, 0x4a, 0xb4, 0x09, 0xf9, 0x60, 0xcf, 0x8a, 0x34, 0x2c, 0xe4, + 0x59, 0xee, 0x04, 0xa4, 0x8f, 0x1c, 0xa2, 0xf3, 0x5e, 0xa2, 0x4c, 0xe8, 0x53, 0x70, 0x66, 0x45, + 0x1a, 0xd0, 0x5c, 0x4a, 0xe9, 0xea, 0x0f, 0xa9, 0xb6, 0x3a, 0xef, 0x25, 0xda, 0x88, 0x9e, 0x03, + 0x34, 0xd5, 0x96, 0xd2, 0xba, 0x13, 0x24, 0x5d, 0x5b, 0x4d, 0x70, 0xde, 0x4b, 0x0c, 0x3f, 0xf4, + 0xb9, 0x29, 0xa8, 0x54, 0xdc, 0x9d, 0xec, 0xd7, 0x1a, 0x6a, 0x5a, 0x84, 0x34, 0xe8, 0xd5, 0x1e, + 0xb8, 0x11, 0xa9, 0x1e, 0x18, 0x7f, 0x13, 0xd2, 0x5b, 0xe6, 0xff, 0xdc, 0x07, 0x2f, 0x21, 0x0b, + 0x82, 0x4b, 0xf2, 0x21, 0xed, 0xbc, 0x46, 0xfe, 0xad, 0xcd, 0xf2, 0x9b, 0xcb, 0xc5, 0xee, 0x2c, + 0x17, 0x63, 0x79, 0x38, 0xed, 0xe5, 0x71, 0x04, 0x4e, 0x42, 0x70, 0xc9, 0xe8, 0x78, 0x5b, 0x2d, + 0x4e, 0x85, 0xfc, 0xef, 0x61, 0x64, 0x08, 0xf1, 0xef, 0xdd, 0x61, 0x9e, 0xdc, 0xef, 0x9c, 0xdc, + 0x5d, 0x41, 0x83, 0xb7, 0x57, 0x90, 0xff, 0x9b, 0x05, 0xa3, 0xd7, 0xa4, 0x12, 0x15, 0xf8, 0x60, + 0x34, 0xf7, 0x7f, 0xb1, 0x60, 0x77, 0x7d, 0x29, 0x91, 0x7f, 0x53, 0x04, 0x6b, 0x73, 0x11, 0xde, + 0x77, 0x3a, 0xcd, 0xcd, 0x31, 0xe8, 0x6c, 0x8e, 0x43, 0xb0, 0xc5, 0x85, 0xe2, 0xfa, 0x17, 0x4e, + 0x02, 0xff, 0x6f, 0x0b, 0x76, 0x83, 0x7b, 0x42, 0xab, 0x5a, 0xa2, 0x09, 0xb8, 0x12, 0x5f, 0x63, + 0x3e, 0x27, 0x95, 0xbe, 0x94, 0xa7, 0xda, 0xbd, 0xe1, 0x13, 0xd3, 0xa9, 0x23, 0x6b, 0xff, 0x7d, + 0x64, 0x1d, 0xbc, 0x5b, 0xd6, 0xcf, 0x60, 0xa8, 0x3e, 0xdc, 0x74, 0xe5, 0xc8, 0x38, 0x5b, 0x68, + 0xd2, 0x38, 0x18, 0x5d, 0x66, 0x9b, 0x5d, 0x26, 0xfb, 0x92, 0x94, 0x25, 0x9e, 0x93, 0x75, 0x5f, + 0x2a, 0xe8, 0x7f, 0x0d, 0xa0, 0xb3, 0x35, 0x7b, 0x8f, 0x64, 0xad, 0xde, 0x23, 0xf2, 0x39, 0x10, + 0x70, 0xce, 0xea, 0xc6, 0x53, 0xe0, 0xe4, 0xdb, 0xba, 0x56, 0x68, 0x0f, 0x86, 0xe2, 0xaf, 0x5c, + 0x43, 0x5e, 0x0f, 0x8d, 0x00, 0x34, 0x0c, 0xa2, 0xd0, 0xb3, 0x10, 0x82, 0x91, 0xc0, 0xcd, 0x12, + 0xf1, 0xfa, 0x35, 0xd7, 0x6c, 0x09, 0x6f, 0x70, 0x72, 0xd6, 0x12, 0x1b, 0x1d, 0xc0, 0xbe, 0x01, + 0x45, 0xe5, 0xbc, 0x9e, 0x88, 0x33, 0xc8, 0x98, 0x65, 0x9e, 0x75, 0x72, 0x66, 0xc8, 0xd4, 0x44, + 0xad, 0x0a, 0x12, 0x31, 0x7e, 0x87, 0x17, 0x5e, 0x0f, 0x1d, 0x82, 0xb7, 0x26, 0xbf, 0xc3, 0x9c, + 0xe6, 0x74, 0xee, 0x59, 0x93, 0xbf, 0x2c, 0xd8, 0xbb, 0x26, 0xfc, 0x01, 0xaf, 0x5e, 0xe1, 0xf4, + 0x0d, 0xa1, 0x19, 0x3a, 0x85, 0x6d, 0xbd, 0xac, 0xd1, 0x81, 0x14, 0xba, 0xfd, 0x32, 0x3b, 0x7e, + 0xd4, 0x26, 0x8b, 0xc5, 0xca, 0xef, 0xa1, 0xaf, 0x60, 0xb8, 0x9e, 0x62, 0xf4, 0x91, 0xf4, 0xe8, + 0xae, 0xb7, 0xe3, 0x83, 0x2e, 0xad, 0x42, 0xbf, 0x80, 0xa1, 0x68, 0xff, 0x58, 0x0c, 0x80, 0x3e, + 0xb1, 0x3d, 0xa3, 0xfa, 0x44, 0x73, 0x46, 0xfc, 0x1e, 0x3a, 0x05, 0x37, 0x21, 0x29, 0xe3, 0x99, + 0x4c, 0x0a, 0x3d, 0x6a, 0x7a, 0xa2, 0x0e, 0xdb, 0x37, 0x29, 0x19, 0x74, 0xe3, 0xc8, 0x47, 0xe7, + 0xe9, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xd7, 0x00, 0x01, 0xdf, 0x81, 0x0a, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context -var _ grpc.ClientConnInterface +var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 +const _ = grpc.SupportPackageIsVersion4 // TerwayBackendClient is the client API for TerwayBackend service. // @@ -1388,13 +1156,14 @@ type TerwayBackendClient interface { AllocIP(ctx context.Context, in *AllocIPRequest, opts ...grpc.CallOption) (*AllocIPReply, error) ReleaseIP(ctx context.Context, in *ReleaseIPRequest, opts ...grpc.CallOption) (*ReleaseIPReply, error) GetIPInfo(ctx context.Context, in *GetInfoRequest, opts ...grpc.CallOption) (*GetInfoReply, error) + RecordEvent(ctx context.Context, in *EventRequest, opts ...grpc.CallOption) (*EventReply, error) } type terwayBackendClient struct { - cc grpc.ClientConnInterface + cc *grpc.ClientConn } -func NewTerwayBackendClient(cc grpc.ClientConnInterface) TerwayBackendClient { +func NewTerwayBackendClient(cc *grpc.ClientConn) TerwayBackendClient { return &terwayBackendClient{cc} } @@ -1425,26 +1194,39 @@ func (c *terwayBackendClient) GetIPInfo(ctx context.Context, in *GetInfoRequest, return out, nil } +func (c *terwayBackendClient) RecordEvent(ctx context.Context, in *EventRequest, opts ...grpc.CallOption) (*EventReply, error) { + out := new(EventReply) + err := c.cc.Invoke(ctx, "/rpc.TerwayBackend/RecordEvent", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // TerwayBackendServer is the server API for TerwayBackend service. type TerwayBackendServer interface { AllocIP(context.Context, *AllocIPRequest) (*AllocIPReply, error) ReleaseIP(context.Context, *ReleaseIPRequest) (*ReleaseIPReply, error) GetIPInfo(context.Context, *GetInfoRequest) (*GetInfoReply, error) + RecordEvent(context.Context, *EventRequest) (*EventReply, error) } // UnimplementedTerwayBackendServer can be embedded to have forward compatible implementations. type UnimplementedTerwayBackendServer struct { } -func (*UnimplementedTerwayBackendServer) AllocIP(context.Context, *AllocIPRequest) (*AllocIPReply, error) { +func (*UnimplementedTerwayBackendServer) AllocIP(ctx context.Context, req *AllocIPRequest) (*AllocIPReply, error) { return nil, status.Errorf(codes.Unimplemented, "method AllocIP not implemented") } -func (*UnimplementedTerwayBackendServer) ReleaseIP(context.Context, *ReleaseIPRequest) (*ReleaseIPReply, error) { +func (*UnimplementedTerwayBackendServer) ReleaseIP(ctx context.Context, req *ReleaseIPRequest) (*ReleaseIPReply, error) { return nil, status.Errorf(codes.Unimplemented, "method ReleaseIP not implemented") } -func (*UnimplementedTerwayBackendServer) GetIPInfo(context.Context, *GetInfoRequest) (*GetInfoReply, error) { +func (*UnimplementedTerwayBackendServer) GetIPInfo(ctx context.Context, req *GetInfoRequest) (*GetInfoReply, error) { return nil, status.Errorf(codes.Unimplemented, "method GetIPInfo not implemented") } +func (*UnimplementedTerwayBackendServer) RecordEvent(ctx context.Context, req *EventRequest) (*EventReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method RecordEvent not implemented") +} func RegisterTerwayBackendServer(s *grpc.Server, srv TerwayBackendServer) { s.RegisterService(&_TerwayBackend_serviceDesc, srv) @@ -1504,6 +1286,24 @@ func _TerwayBackend_GetIPInfo_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +func _TerwayBackend_RecordEvent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(EventRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TerwayBackendServer).RecordEvent(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/rpc.TerwayBackend/RecordEvent", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TerwayBackendServer).RecordEvent(ctx, req.(*EventRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _TerwayBackend_serviceDesc = grpc.ServiceDesc{ ServiceName: "rpc.TerwayBackend", HandlerType: (*TerwayBackendServer)(nil), @@ -1520,6 +1320,10 @@ var _TerwayBackend_serviceDesc = grpc.ServiceDesc{ MethodName: "GetIPInfo", Handler: _TerwayBackend_GetIPInfo_Handler, }, + { + MethodName: "RecordEvent", + Handler: _TerwayBackend_RecordEvent_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "rpc.proto", diff --git a/rpc/rpc.proto b/rpc/rpc.proto index 528ba15c..6aa8492f 100644 --- a/rpc/rpc.proto +++ b/rpc/rpc.proto @@ -8,6 +8,8 @@ service TerwayBackend { } rpc GetIPInfo(GetInfoRequest) returns (GetInfoReply) { } + rpc RecordEvent(EventRequest) returns (EventReply) { + } } message AllocIPRequest { @@ -111,3 +113,27 @@ message GetInfoReply { string NodeCidr = 3; string PodIP = 4; } + +enum EventTarget { + EventTargetNode = 0; + EventTargetPod = 1; +} + +enum EventType { + EventTypeNormal = 0; + EventTypeWarning = 1; +} + +message EventRequest { + EventTarget EventTarget = 1; + string K8sPodName = 2; + string K8sPodNamespace = 3; + EventType EventType = 4; + string Reason = 5; + string Message = 6; +} + +message EventReply { + bool Succeed = 1; + string Error = 2; +} \ No newline at end of file diff --git a/vendor/github.com/golang/groupcache/LICENSE b/vendor/github.com/golang/groupcache/LICENSE new file mode 100644 index 00000000..37ec93a1 --- /dev/null +++ b/vendor/github.com/golang/groupcache/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/golang/groupcache/lru/lru.go b/vendor/github.com/golang/groupcache/lru/lru.go new file mode 100644 index 00000000..eac1c766 --- /dev/null +++ b/vendor/github.com/golang/groupcache/lru/lru.go @@ -0,0 +1,133 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package lru implements an LRU cache. +package lru + +import "container/list" + +// Cache is an LRU cache. It is not safe for concurrent access. +type Cache struct { + // MaxEntries is the maximum number of cache entries before + // an item is evicted. Zero means no limit. + MaxEntries int + + // OnEvicted optionally specifies a callback function to be + // executed when an entry is purged from the cache. + OnEvicted func(key Key, value interface{}) + + ll *list.List + cache map[interface{}]*list.Element +} + +// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators +type Key interface{} + +type entry struct { + key Key + value interface{} +} + +// New creates a new Cache. +// If maxEntries is zero, the cache has no limit and it's assumed +// that eviction is done by the caller. +func New(maxEntries int) *Cache { + return &Cache{ + MaxEntries: maxEntries, + ll: list.New(), + cache: make(map[interface{}]*list.Element), + } +} + +// Add adds a value to the cache. +func (c *Cache) Add(key Key, value interface{}) { + if c.cache == nil { + c.cache = make(map[interface{}]*list.Element) + c.ll = list.New() + } + if ee, ok := c.cache[key]; ok { + c.ll.MoveToFront(ee) + ee.Value.(*entry).value = value + return + } + ele := c.ll.PushFront(&entry{key, value}) + c.cache[key] = ele + if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries { + c.RemoveOldest() + } +} + +// Get looks up a key's value from the cache. +func (c *Cache) Get(key Key) (value interface{}, ok bool) { + if c.cache == nil { + return + } + if ele, hit := c.cache[key]; hit { + c.ll.MoveToFront(ele) + return ele.Value.(*entry).value, true + } + return +} + +// Remove removes the provided key from the cache. +func (c *Cache) Remove(key Key) { + if c.cache == nil { + return + } + if ele, hit := c.cache[key]; hit { + c.removeElement(ele) + } +} + +// RemoveOldest removes the oldest item from the cache. +func (c *Cache) RemoveOldest() { + if c.cache == nil { + return + } + ele := c.ll.Back() + if ele != nil { + c.removeElement(ele) + } +} + +func (c *Cache) removeElement(e *list.Element) { + c.ll.Remove(e) + kv := e.Value.(*entry) + delete(c.cache, kv.key) + if c.OnEvicted != nil { + c.OnEvicted(kv.key, kv.value) + } +} + +// Len returns the number of items in the cache. +func (c *Cache) Len() int { + if c.cache == nil { + return 0 + } + return c.ll.Len() +} + +// Clear purges all stored items from the cache. +func (c *Cache) Clear() { + if c.OnEvicted != nil { + for _, e := range c.cache { + kv := e.Value.(*entry) + c.OnEvicted(kv.key, kv.value) + } + } + c.ll = nil + c.cache = nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/BUILD b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/BUILD new file mode 100644 index 00000000..0ddf97d1 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/BUILD @@ -0,0 +1,39 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = ["util_test.go"], + embed = [":go_default_library"], +) + +go_library( + name = "go_default_library", + srcs = [ + "errors.go", + "util.go", + ], + importpath = "k8s.io/apimachinery/pkg/util/mergepatch", + deps = [ + "//vendor/github.com/davecgh/go-spew/spew:go_default_library", + "//vendor/github.com/ghodss/yaml:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS new file mode 100644 index 00000000..8e8d9fce --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS @@ -0,0 +1,5 @@ +approvers: +- pwittrock +reviewers: +- mengqiy +- apelisse diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go new file mode 100644 index 00000000..16501d5a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go @@ -0,0 +1,102 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mergepatch + +import ( + "errors" + "fmt" + "reflect" +) + +var ( + ErrBadJSONDoc = errors.New("invalid JSON document") + ErrNoListOfLists = errors.New("lists of lists are not supported") + ErrBadPatchFormatForPrimitiveList = errors.New("invalid patch format of primitive list") + ErrBadPatchFormatForRetainKeys = errors.New("invalid patch format of retainKeys") + ErrBadPatchFormatForSetElementOrderList = errors.New("invalid patch format of setElementOrder list") + ErrPatchContentNotMatchRetainKeys = errors.New("patch content doesn't match retainKeys list") + ErrUnsupportedStrategicMergePatchFormat = errors.New("strategic merge patch format is not supported") +) + +func ErrNoMergeKey(m map[string]interface{}, k string) error { + return fmt.Errorf("map: %v does not contain declared merge key: %s", m, k) +} + +func ErrBadArgType(expected, actual interface{}) error { + return fmt.Errorf("expected a %s, but received a %s", + reflect.TypeOf(expected), + reflect.TypeOf(actual)) +} + +func ErrBadArgKind(expected, actual interface{}) error { + var expectedKindString, actualKindString string + if expected == nil { + expectedKindString = "nil" + } else { + expectedKindString = reflect.TypeOf(expected).Kind().String() + } + if actual == nil { + actualKindString = "nil" + } else { + actualKindString = reflect.TypeOf(actual).Kind().String() + } + return fmt.Errorf("expected a %s, but received a %s", expectedKindString, actualKindString) +} + +func ErrBadPatchType(t interface{}, m map[string]interface{}) error { + return fmt.Errorf("unknown patch type: %s in map: %v", t, m) +} + +// IsPreconditionFailed returns true if the provided error indicates +// a precondition failed. +func IsPreconditionFailed(err error) bool { + _, ok := err.(ErrPreconditionFailed) + return ok +} + +type ErrPreconditionFailed struct { + message string +} + +func NewErrPreconditionFailed(target map[string]interface{}) ErrPreconditionFailed { + s := fmt.Sprintf("precondition failed for: %v", target) + return ErrPreconditionFailed{s} +} + +func (err ErrPreconditionFailed) Error() string { + return err.message +} + +type ErrConflict struct { + message string +} + +func NewErrConflict(patch, current string) ErrConflict { + s := fmt.Sprintf("patch:\n%s\nconflicts with changes made from original to current:\n%s\n", patch, current) + return ErrConflict{s} +} + +func (err ErrConflict) Error() string { + return err.message +} + +// IsConflict returns true if the provided error indicates +// a conflict between the patch and the current configuration. +func IsConflict(err error) bool { + _, ok := err.(ErrConflict) + return ok +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go new file mode 100644 index 00000000..9261290a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go @@ -0,0 +1,133 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mergepatch + +import ( + "fmt" + "reflect" + + "github.com/davecgh/go-spew/spew" + "github.com/ghodss/yaml" +) + +// PreconditionFunc asserts that an incompatible change is not present within a patch. +type PreconditionFunc func(interface{}) bool + +// RequireKeyUnchanged returns a precondition function that fails if the provided key +// is present in the patch (indicating that its value has changed). +func RequireKeyUnchanged(key string) PreconditionFunc { + return func(patch interface{}) bool { + patchMap, ok := patch.(map[string]interface{}) + if !ok { + return true + } + + // The presence of key means that its value has been changed, so the test fails. + _, ok = patchMap[key] + return !ok + } +} + +// RequireMetadataKeyUnchanged creates a precondition function that fails +// if the metadata.key is present in the patch (indicating its value +// has changed). +func RequireMetadataKeyUnchanged(key string) PreconditionFunc { + return func(patch interface{}) bool { + patchMap, ok := patch.(map[string]interface{}) + if !ok { + return true + } + patchMap1, ok := patchMap["metadata"] + if !ok { + return true + } + patchMap2, ok := patchMap1.(map[string]interface{}) + if !ok { + return true + } + _, ok = patchMap2[key] + return !ok + } +} + +func ToYAMLOrError(v interface{}) string { + y, err := toYAML(v) + if err != nil { + return err.Error() + } + + return y +} + +func toYAML(v interface{}) (string, error) { + y, err := yaml.Marshal(v) + if err != nil { + return "", fmt.Errorf("yaml marshal failed:%v\n%v\n", err, spew.Sdump(v)) + } + + return string(y), nil +} + +// HasConflicts returns true if the left and right JSON interface objects overlap with +// different values in any key. All keys are required to be strings. Since patches of the +// same Type have congruent keys, this is valid for multiple patch types. This method +// supports JSON merge patch semantics. +// +// NOTE: Numbers with different types (e.g. int(0) vs int64(0)) will be detected as conflicts. +// Make sure the unmarshaling of left and right are consistent (e.g. use the same library). +func HasConflicts(left, right interface{}) (bool, error) { + switch typedLeft := left.(type) { + case map[string]interface{}: + switch typedRight := right.(type) { + case map[string]interface{}: + for key, leftValue := range typedLeft { + rightValue, ok := typedRight[key] + if !ok { + continue + } + if conflict, err := HasConflicts(leftValue, rightValue); err != nil || conflict { + return conflict, err + } + } + + return false, nil + default: + return true, nil + } + case []interface{}: + switch typedRight := right.(type) { + case []interface{}: + if len(typedLeft) != len(typedRight) { + return true, nil + } + + for i := range typedLeft { + if conflict, err := HasConflicts(typedLeft[i], typedRight[i]); err != nil || conflict { + return conflict, err + } + } + + return false, nil + default: + return true, nil + } + case string, float64, bool, int, int64, nil: + return !reflect.DeepEqual(left, right), nil + default: + return true, fmt.Errorf("unknown type: %v", reflect.TypeOf(left)) + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/BUILD b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/BUILD new file mode 100644 index 00000000..2d608892 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/BUILD @@ -0,0 +1,60 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = ["patch_test.go"], + data = [ + "testdata/swagger-merge-item.json", + "testdata/swagger-precision-item.json", + ], + embed = [":go_default_library"], + deps = [ + "//vendor/github.com/davecgh/go-spew/spew:go_default_library", + "//vendor/github.com/ghodss/yaml:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/mergepatch:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/strategicpatch/testing:go_default_library", + ], +) + +go_library( + name = "go_default_library", + srcs = [ + "errors.go", + "meta.go", + "patch.go", + "types.go", + ], + importpath = "k8s.io/apimachinery/pkg/util/strategicpatch", + deps = [ + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/mergepatch:go_default_library", + "//vendor/k8s.io/apimachinery/third_party/forked/golang/json:go_default_library", + "//vendor/k8s.io/kube-openapi/pkg/util/proto:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/testing:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS new file mode 100644 index 00000000..8e8d9fce --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS @@ -0,0 +1,5 @@ +approvers: +- pwittrock +reviewers: +- mengqiy +- apelisse diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go new file mode 100644 index 00000000..ab66d045 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go @@ -0,0 +1,49 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategicpatch + +import ( + "fmt" +) + +type LookupPatchMetaError struct { + Path string + Err error +} + +func (e LookupPatchMetaError) Error() string { + return fmt.Sprintf("LookupPatchMetaError(%s): %v", e.Path, e.Err) +} + +type FieldNotFoundError struct { + Path string + Field string +} + +func (e FieldNotFoundError) Error() string { + return fmt.Sprintf("unable to find api field %q in %s", e.Field, e.Path) +} + +type InvalidTypeError struct { + Path string + Expected string + Actual string +} + +func (e InvalidTypeError) Error() string { + return fmt.Sprintf("invalid type for %s: got %q, expected %q", e.Path, e.Actual, e.Expected) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go new file mode 100644 index 00000000..c31de15e --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go @@ -0,0 +1,194 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategicpatch + +import ( + "errors" + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/util/mergepatch" + forkedjson "k8s.io/apimachinery/third_party/forked/golang/json" + openapi "k8s.io/kube-openapi/pkg/util/proto" +) + +type PatchMeta struct { + patchStrategies []string + patchMergeKey string +} + +func (pm PatchMeta) GetPatchStrategies() []string { + if pm.patchStrategies == nil { + return []string{} + } + return pm.patchStrategies +} + +func (pm PatchMeta) SetPatchStrategies(ps []string) { + pm.patchStrategies = ps +} + +func (pm PatchMeta) GetPatchMergeKey() string { + return pm.patchMergeKey +} + +func (pm PatchMeta) SetPatchMergeKey(pmk string) { + pm.patchMergeKey = pmk +} + +type LookupPatchMeta interface { + // LookupPatchMetadataForStruct gets subschema and the patch metadata (e.g. patch strategy and merge key) for map. + LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) + // LookupPatchMetadataForSlice get subschema and the patch metadata for slice. + LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) + // Get the type name of the field + Name() string +} + +type PatchMetaFromStruct struct { + T reflect.Type +} + +func NewPatchMetaFromStruct(dataStruct interface{}) (PatchMetaFromStruct, error) { + t, err := getTagStructType(dataStruct) + return PatchMetaFromStruct{T: t}, err +} + +var _ LookupPatchMeta = PatchMetaFromStruct{} + +func (s PatchMetaFromStruct) LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) { + fieldType, fieldPatchStrategies, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadataForStruct(s.T, key) + if err != nil { + return nil, PatchMeta{}, err + } + + return PatchMetaFromStruct{T: fieldType}, + PatchMeta{ + patchStrategies: fieldPatchStrategies, + patchMergeKey: fieldPatchMergeKey, + }, nil +} + +func (s PatchMetaFromStruct) LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) { + subschema, patchMeta, err := s.LookupPatchMetadataForStruct(key) + if err != nil { + return nil, PatchMeta{}, err + } + elemPatchMetaFromStruct := subschema.(PatchMetaFromStruct) + t := elemPatchMetaFromStruct.T + + var elemType reflect.Type + switch t.Kind() { + // If t is an array or a slice, get the element type. + // If element is still an array or a slice, return an error. + // Otherwise, return element type. + case reflect.Array, reflect.Slice: + elemType = t.Elem() + if elemType.Kind() == reflect.Array || elemType.Kind() == reflect.Slice { + return nil, PatchMeta{}, errors.New("unexpected slice of slice") + } + // If t is an pointer, get the underlying element. + // If the underlying element is neither an array nor a slice, the pointer is pointing to a slice, + // e.g. https://github.com/kubernetes/kubernetes/blob/bc22e206c79282487ea0bf5696d5ccec7e839a76/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch_test.go#L2782-L2822 + // If the underlying element is either an array or a slice, return its element type. + case reflect.Ptr: + t = t.Elem() + if t.Kind() == reflect.Array || t.Kind() == reflect.Slice { + t = t.Elem() + } + elemType = t + default: + return nil, PatchMeta{}, fmt.Errorf("expected slice or array type, but got: %s", s.T.Kind().String()) + } + + return PatchMetaFromStruct{T: elemType}, patchMeta, nil +} + +func (s PatchMetaFromStruct) Name() string { + return s.T.Kind().String() +} + +func getTagStructType(dataStruct interface{}) (reflect.Type, error) { + if dataStruct == nil { + return nil, mergepatch.ErrBadArgKind(struct{}{}, nil) + } + + t := reflect.TypeOf(dataStruct) + // Get the underlying type for pointers + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + return nil, mergepatch.ErrBadArgKind(struct{}{}, dataStruct) + } + + return t, nil +} + +func GetTagStructTypeOrDie(dataStruct interface{}) reflect.Type { + t, err := getTagStructType(dataStruct) + if err != nil { + panic(err) + } + return t +} + +type PatchMetaFromOpenAPI struct { + Schema openapi.Schema +} + +func NewPatchMetaFromOpenAPI(s openapi.Schema) PatchMetaFromOpenAPI { + return PatchMetaFromOpenAPI{Schema: s} +} + +var _ LookupPatchMeta = PatchMetaFromOpenAPI{} + +func (s PatchMetaFromOpenAPI) LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) { + if s.Schema == nil { + return nil, PatchMeta{}, nil + } + kindItem := NewKindItem(key, s.Schema.GetPath()) + s.Schema.Accept(kindItem) + + err := kindItem.Error() + if err != nil { + return nil, PatchMeta{}, err + } + return PatchMetaFromOpenAPI{Schema: kindItem.subschema}, + kindItem.patchmeta, nil +} + +func (s PatchMetaFromOpenAPI) LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) { + if s.Schema == nil { + return nil, PatchMeta{}, nil + } + sliceItem := NewSliceItem(key, s.Schema.GetPath()) + s.Schema.Accept(sliceItem) + + err := sliceItem.Error() + if err != nil { + return nil, PatchMeta{}, err + } + return PatchMetaFromOpenAPI{Schema: sliceItem.subschema}, + sliceItem.patchmeta, nil +} + +func (s PatchMetaFromOpenAPI) Name() string { + schema := s.Schema + return schema.GetName() +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go new file mode 100644 index 00000000..2f6ade2b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go @@ -0,0 +1,2151 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategicpatch + +import ( + "fmt" + "reflect" + "sort" + "strings" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/mergepatch" +) + +// An alternate implementation of JSON Merge Patch +// (https://tools.ietf.org/html/rfc7386) which supports the ability to annotate +// certain fields with metadata that indicates whether the elements of JSON +// lists should be merged or replaced. +// +// For more information, see the PATCH section of docs/devel/api-conventions.md. +// +// Some of the content of this package was borrowed with minor adaptations from +// evanphx/json-patch and openshift/origin. + +const ( + directiveMarker = "$patch" + deleteDirective = "delete" + replaceDirective = "replace" + mergeDirective = "merge" + + retainKeysStrategy = "retainKeys" + + deleteFromPrimitiveListDirectivePrefix = "$deleteFromPrimitiveList" + retainKeysDirective = "$" + retainKeysStrategy + setElementOrderDirectivePrefix = "$setElementOrder" +) + +// JSONMap is a representations of JSON object encoded as map[string]interface{} +// where the children can be either map[string]interface{}, []interface{} or +// primitive type). +// Operating on JSONMap representation is much faster as it doesn't require any +// json marshaling and/or unmarshaling operations. +type JSONMap map[string]interface{} + +type DiffOptions struct { + // SetElementOrder determines whether we generate the $setElementOrder parallel list. + SetElementOrder bool + // IgnoreChangesAndAdditions indicates if we keep the changes and additions in the patch. + IgnoreChangesAndAdditions bool + // IgnoreDeletions indicates if we keep the deletions in the patch. + IgnoreDeletions bool + // We introduce a new value retainKeys for patchStrategy. + // It indicates that all fields needing to be preserved must be + // present in the `retainKeys` list. + // And the fields that are present will be merged with live object. + // All the missing fields will be cleared when patching. + BuildRetainKeysDirective bool +} + +type MergeOptions struct { + // MergeParallelList indicates if we are merging the parallel list. + // We don't merge parallel list when calling mergeMap() in CreateThreeWayMergePatch() + // which is called client-side. + // We merge parallel list iff when calling mergeMap() in StrategicMergeMapPatch() + // which is called server-side + MergeParallelList bool + // IgnoreUnmatchedNulls indicates if we should process the unmatched nulls. + IgnoreUnmatchedNulls bool +} + +// The following code is adapted from github.com/openshift/origin/pkg/util/jsonmerge. +// Instead of defining a Delta that holds an original, a patch and a set of preconditions, +// the reconcile method accepts a set of preconditions as an argument. + +// CreateTwoWayMergePatch creates a patch that can be passed to StrategicMergePatch from an original +// document and a modified document, which are passed to the method as json encoded content. It will +// return a patch that yields the modified document when applied to the original document, or an error +// if either of the two documents is invalid. +func CreateTwoWayMergePatch(original, modified []byte, dataStruct interface{}, fns ...mergepatch.PreconditionFunc) ([]byte, error) { + schema, err := NewPatchMetaFromStruct(dataStruct) + if err != nil { + return nil, err + } + + return CreateTwoWayMergePatchUsingLookupPatchMeta(original, modified, schema, fns...) +} + +func CreateTwoWayMergePatchUsingLookupPatchMeta( + original, modified []byte, schema LookupPatchMeta, fns ...mergepatch.PreconditionFunc) ([]byte, error) { + originalMap := map[string]interface{}{} + if len(original) > 0 { + if err := json.Unmarshal(original, &originalMap); err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + } + + modifiedMap := map[string]interface{}{} + if len(modified) > 0 { + if err := json.Unmarshal(modified, &modifiedMap); err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + } + + patchMap, err := CreateTwoWayMergeMapPatchUsingLookupPatchMeta(originalMap, modifiedMap, schema, fns...) + if err != nil { + return nil, err + } + + return json.Marshal(patchMap) +} + +// CreateTwoWayMergeMapPatch creates a patch from an original and modified JSON objects, +// encoded JSONMap. +// The serialized version of the map can then be passed to StrategicMergeMapPatch. +func CreateTwoWayMergeMapPatch(original, modified JSONMap, dataStruct interface{}, fns ...mergepatch.PreconditionFunc) (JSONMap, error) { + schema, err := NewPatchMetaFromStruct(dataStruct) + if err != nil { + return nil, err + } + + return CreateTwoWayMergeMapPatchUsingLookupPatchMeta(original, modified, schema, fns...) +} + +func CreateTwoWayMergeMapPatchUsingLookupPatchMeta(original, modified JSONMap, schema LookupPatchMeta, fns ...mergepatch.PreconditionFunc) (JSONMap, error) { + diffOptions := DiffOptions{ + SetElementOrder: true, + } + patchMap, err := diffMaps(original, modified, schema, diffOptions) + if err != nil { + return nil, err + } + + // Apply the preconditions to the patch, and return an error if any of them fail. + for _, fn := range fns { + if !fn(patchMap) { + return nil, mergepatch.NewErrPreconditionFailed(patchMap) + } + } + + return patchMap, nil +} + +// Returns a (recursive) strategic merge patch that yields modified when applied to original. +// Including: +// - Adding fields to the patch present in modified, missing from original +// - Setting fields to the patch present in modified and original with different values +// - Delete fields present in original, missing from modified through +// - IFF map field - set to nil in patch +// - IFF list of maps && merge strategy - use deleteDirective for the elements +// - IFF list of primitives && merge strategy - use parallel deletion list +// - IFF list of maps or primitives with replace strategy (default) - set patch value to the value in modified +// - Build $retainKeys directive for fields with retainKeys patch strategy +func diffMaps(original, modified map[string]interface{}, schema LookupPatchMeta, diffOptions DiffOptions) (map[string]interface{}, error) { + patch := map[string]interface{}{} + + // This will be used to build the $retainKeys directive sent in the patch + retainKeysList := make([]interface{}, 0, len(modified)) + + // Compare each value in the modified map against the value in the original map + for key, modifiedValue := range modified { + // Get the underlying type for pointers + if diffOptions.BuildRetainKeysDirective && modifiedValue != nil { + retainKeysList = append(retainKeysList, key) + } + + originalValue, ok := original[key] + if !ok { + // Key was added, so add to patch + if !diffOptions.IgnoreChangesAndAdditions { + patch[key] = modifiedValue + } + continue + } + + // The patch may have a patch directive + // TODO: figure out if we need this. This shouldn't be needed by apply. When would the original map have patch directives in it? + foundDirectiveMarker, err := handleDirectiveMarker(key, originalValue, modifiedValue, patch) + if err != nil { + return nil, err + } + if foundDirectiveMarker { + continue + } + + if reflect.TypeOf(originalValue) != reflect.TypeOf(modifiedValue) { + // Types have changed, so add to patch + if !diffOptions.IgnoreChangesAndAdditions { + patch[key] = modifiedValue + } + continue + } + + // Types are the same, so compare values + switch originalValueTyped := originalValue.(type) { + case map[string]interface{}: + modifiedValueTyped := modifiedValue.(map[string]interface{}) + err = handleMapDiff(key, originalValueTyped, modifiedValueTyped, patch, schema, diffOptions) + case []interface{}: + modifiedValueTyped := modifiedValue.([]interface{}) + err = handleSliceDiff(key, originalValueTyped, modifiedValueTyped, patch, schema, diffOptions) + default: + replacePatchFieldIfNotEqual(key, originalValue, modifiedValue, patch, diffOptions) + } + if err != nil { + return nil, err + } + } + + updatePatchIfMissing(original, modified, patch, diffOptions) + // Insert the retainKeysList iff there are values present in the retainKeysList and + // either of the following is true: + // - the patch is not empty + // - there are additional field in original that need to be cleared + if len(retainKeysList) > 0 && + (len(patch) > 0 || hasAdditionalNewField(original, modified)) { + patch[retainKeysDirective] = sortScalars(retainKeysList) + } + return patch, nil +} + +// handleDirectiveMarker handles how to diff directive marker between 2 objects +func handleDirectiveMarker(key string, originalValue, modifiedValue interface{}, patch map[string]interface{}) (bool, error) { + if key == directiveMarker { + originalString, ok := originalValue.(string) + if !ok { + return false, fmt.Errorf("invalid value for special key: %s", directiveMarker) + } + modifiedString, ok := modifiedValue.(string) + if !ok { + return false, fmt.Errorf("invalid value for special key: %s", directiveMarker) + } + if modifiedString != originalString { + patch[directiveMarker] = modifiedValue + } + return true, nil + } + return false, nil +} + +// handleMapDiff diff between 2 maps `originalValueTyped` and `modifiedValue`, +// puts the diff in the `patch` associated with `key` +// key is the key associated with originalValue and modifiedValue. +// originalValue, modifiedValue are the old and new value respectively.They are both maps +// patch is the patch map that contains key and the updated value, and it is the parent of originalValue, modifiedValue +// diffOptions contains multiple options to control how we do the diff. +func handleMapDiff(key string, originalValue, modifiedValue, patch map[string]interface{}, + schema LookupPatchMeta, diffOptions DiffOptions) error { + subschema, patchMeta, err := schema.LookupPatchMetadataForStruct(key) + + if err != nil { + // We couldn't look up metadata for the field + // If the values are identical, this doesn't matter, no patch is needed + if reflect.DeepEqual(originalValue, modifiedValue) { + return nil + } + // Otherwise, return the error + return err + } + retainKeys, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err != nil { + return err + } + diffOptions.BuildRetainKeysDirective = retainKeys + switch patchStrategy { + // The patch strategic from metadata tells us to replace the entire object instead of diffing it + case replaceDirective: + if !diffOptions.IgnoreChangesAndAdditions { + patch[key] = modifiedValue + } + default: + patchValue, err := diffMaps(originalValue, modifiedValue, subschema, diffOptions) + if err != nil { + return err + } + // Maps were not identical, use provided patch value + if len(patchValue) > 0 { + patch[key] = patchValue + } + } + return nil +} + +// handleSliceDiff diff between 2 slices `originalValueTyped` and `modifiedValue`, +// puts the diff in the `patch` associated with `key` +// key is the key associated with originalValue and modifiedValue. +// originalValue, modifiedValue are the old and new value respectively.They are both slices +// patch is the patch map that contains key and the updated value, and it is the parent of originalValue, modifiedValue +// diffOptions contains multiple options to control how we do the diff. +func handleSliceDiff(key string, originalValue, modifiedValue []interface{}, patch map[string]interface{}, + schema LookupPatchMeta, diffOptions DiffOptions) error { + subschema, patchMeta, err := schema.LookupPatchMetadataForSlice(key) + if err != nil { + // We couldn't look up metadata for the field + // If the values are identical, this doesn't matter, no patch is needed + if reflect.DeepEqual(originalValue, modifiedValue) { + return nil + } + // Otherwise, return the error + return err + } + retainKeys, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err != nil { + return err + } + switch patchStrategy { + // Merge the 2 slices using mergePatchKey + case mergeDirective: + diffOptions.BuildRetainKeysDirective = retainKeys + addList, deletionList, setOrderList, err := diffLists(originalValue, modifiedValue, subschema, patchMeta.GetPatchMergeKey(), diffOptions) + if err != nil { + return err + } + if len(addList) > 0 { + patch[key] = addList + } + // generate a parallel list for deletion + if len(deletionList) > 0 { + parallelDeletionListKey := fmt.Sprintf("%s/%s", deleteFromPrimitiveListDirectivePrefix, key) + patch[parallelDeletionListKey] = deletionList + } + if len(setOrderList) > 0 { + parallelSetOrderListKey := fmt.Sprintf("%s/%s", setElementOrderDirectivePrefix, key) + patch[parallelSetOrderListKey] = setOrderList + } + default: + replacePatchFieldIfNotEqual(key, originalValue, modifiedValue, patch, diffOptions) + } + return nil +} + +// replacePatchFieldIfNotEqual updates the patch if original and modified are not deep equal +// if diffOptions.IgnoreChangesAndAdditions is false. +// original is the old value, maybe either the live cluster object or the last applied configuration +// modified is the new value, is always the users new config +func replacePatchFieldIfNotEqual(key string, original, modified interface{}, + patch map[string]interface{}, diffOptions DiffOptions) { + if diffOptions.IgnoreChangesAndAdditions { + // Ignoring changes - do nothing + return + } + if reflect.DeepEqual(original, modified) { + // Contents are identical - do nothing + return + } + // Create a patch to replace the old value with the new one + patch[key] = modified +} + +// updatePatchIfMissing iterates over `original` when ignoreDeletions is false. +// Clear the field whose key is not present in `modified`. +// original is the old value, maybe either the live cluster object or the last applied configuration +// modified is the new value, is always the users new config +func updatePatchIfMissing(original, modified, patch map[string]interface{}, diffOptions DiffOptions) { + if diffOptions.IgnoreDeletions { + // Ignoring deletion - do nothing + return + } + // Add nils for deleted values + for key := range original { + if _, found := modified[key]; !found { + patch[key] = nil + } + } +} + +// validateMergeKeyInLists checks if each map in the list has the mentryerge key. +func validateMergeKeyInLists(mergeKey string, lists ...[]interface{}) error { + for _, list := range lists { + for _, item := range list { + m, ok := item.(map[string]interface{}) + if !ok { + return mergepatch.ErrBadArgType(m, item) + } + if _, ok = m[mergeKey]; !ok { + return mergepatch.ErrNoMergeKey(m, mergeKey) + } + } + } + return nil +} + +// normalizeElementOrder sort `patch` list by `patchOrder` and sort `serverOnly` list by `serverOrder`. +// Then it merges the 2 sorted lists. +// It guarantee the relative order in the patch list and in the serverOnly list is kept. +// `patch` is a list of items in the patch, and `serverOnly` is a list of items in the live object. +// `patchOrder` is the order we want `patch` list to have and +// `serverOrder` is the order we want `serverOnly` list to have. +// kind is the kind of each item in the lists `patch` and `serverOnly`. +func normalizeElementOrder(patch, serverOnly, patchOrder, serverOrder []interface{}, mergeKey string, kind reflect.Kind) ([]interface{}, error) { + patch, err := normalizeSliceOrder(patch, patchOrder, mergeKey, kind) + if err != nil { + return nil, err + } + serverOnly, err = normalizeSliceOrder(serverOnly, serverOrder, mergeKey, kind) + if err != nil { + return nil, err + } + all := mergeSortedSlice(serverOnly, patch, serverOrder, mergeKey, kind) + + return all, nil +} + +// mergeSortedSlice merges the 2 sorted lists by serverOrder with best effort. +// It will insert each item in `left` list to `right` list. In most cases, the 2 lists will be interleaved. +// The relative order of left and right are guaranteed to be kept. +// They have higher precedence than the order in the live list. +// The place for a item in `left` is found by: +// scan from the place of last insertion in `right` to the end of `right`, +// the place is before the first item that is greater than the item we want to insert. +// example usage: using server-only items as left and patch items as right. We insert server-only items +// to patch list. We use the order of live object as record for comparison. +func mergeSortedSlice(left, right, serverOrder []interface{}, mergeKey string, kind reflect.Kind) []interface{} { + // Returns if l is less than r, and if both have been found. + // If l and r both present and l is in front of r, l is less than r. + less := func(l, r interface{}) (bool, bool) { + li := index(serverOrder, l, mergeKey, kind) + ri := index(serverOrder, r, mergeKey, kind) + if li >= 0 && ri >= 0 { + return li < ri, true + } else { + return false, false + } + } + + // left and right should be non-overlapping. + size := len(left) + len(right) + i, j := 0, 0 + s := make([]interface{}, size, size) + + for k := 0; k < size; k++ { + if i >= len(left) && j < len(right) { + // have items left in `right` list + s[k] = right[j] + j++ + } else if j >= len(right) && i < len(left) { + // have items left in `left` list + s[k] = left[i] + i++ + } else { + // compare them if i and j are both in bound + less, foundBoth := less(left[i], right[j]) + if foundBoth && less { + s[k] = left[i] + i++ + } else { + s[k] = right[j] + j++ + } + } + } + return s +} + +// index returns the index of the item in the given items, or -1 if it doesn't exist +// l must NOT be a slice of slices, this should be checked before calling. +func index(l []interface{}, valToLookUp interface{}, mergeKey string, kind reflect.Kind) int { + var getValFn func(interface{}) interface{} + // Get the correct `getValFn` based on item `kind`. + // It should return the value of merge key for maps and + // return the item for other kinds. + switch kind { + case reflect.Map: + getValFn = func(item interface{}) interface{} { + typedItem, ok := item.(map[string]interface{}) + if !ok { + return nil + } + val := typedItem[mergeKey] + return val + } + default: + getValFn = func(item interface{}) interface{} { + return item + } + } + + for i, v := range l { + if getValFn(valToLookUp) == getValFn(v) { + return i + } + } + return -1 +} + +// extractToDeleteItems takes a list and +// returns 2 lists: one contains items that should be kept and the other contains items to be deleted. +func extractToDeleteItems(l []interface{}) ([]interface{}, []interface{}, error) { + var nonDelete, toDelete []interface{} + for _, v := range l { + m, ok := v.(map[string]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(m, v) + } + + directive, foundDirective := m[directiveMarker] + if foundDirective && directive == deleteDirective { + toDelete = append(toDelete, v) + } else { + nonDelete = append(nonDelete, v) + } + } + return nonDelete, toDelete, nil +} + +// normalizeSliceOrder sort `toSort` list by `order` +func normalizeSliceOrder(toSort, order []interface{}, mergeKey string, kind reflect.Kind) ([]interface{}, error) { + var toDelete []interface{} + if kind == reflect.Map { + // make sure each item in toSort, order has merge key + err := validateMergeKeyInLists(mergeKey, toSort, order) + if err != nil { + return nil, err + } + toSort, toDelete, err = extractToDeleteItems(toSort) + if err != nil { + return nil, err + } + } + + sort.SliceStable(toSort, func(i, j int) bool { + if ii := index(order, toSort[i], mergeKey, kind); ii >= 0 { + if ij := index(order, toSort[j], mergeKey, kind); ij >= 0 { + return ii < ij + } + } + return true + }) + toSort = append(toSort, toDelete...) + return toSort, nil +} + +// Returns a (recursive) strategic merge patch, a parallel deletion list if necessary and +// another list to set the order of the list +// Only list of primitives with merge strategy will generate a parallel deletion list. +// These two lists should yield modified when applied to original, for lists with merge semantics. +func diffLists(original, modified []interface{}, schema LookupPatchMeta, mergeKey string, diffOptions DiffOptions) ([]interface{}, []interface{}, []interface{}, error) { + if len(original) == 0 { + // Both slices are empty - do nothing + if len(modified) == 0 || diffOptions.IgnoreChangesAndAdditions { + return nil, nil, nil, nil + } + + // Old slice was empty - add all elements from the new slice + return modified, nil, nil, nil + } + + elementType, err := sliceElementType(original, modified) + if err != nil { + return nil, nil, nil, err + } + + var patchList, deleteList, setOrderList []interface{} + kind := elementType.Kind() + switch kind { + case reflect.Map: + patchList, deleteList, err = diffListsOfMaps(original, modified, schema, mergeKey, diffOptions) + if err != nil { + return nil, nil, nil, err + } + patchList, err = normalizeSliceOrder(patchList, modified, mergeKey, kind) + if err != nil { + return nil, nil, nil, err + } + orderSame, err := isOrderSame(original, modified, mergeKey) + if err != nil { + return nil, nil, nil, err + } + // append the deletions to the end of the patch list. + patchList = append(patchList, deleteList...) + deleteList = nil + // generate the setElementOrder list when there are content changes or order changes + if diffOptions.SetElementOrder && + ((!diffOptions.IgnoreChangesAndAdditions && (len(patchList) > 0 || !orderSame)) || + (!diffOptions.IgnoreDeletions && len(patchList) > 0)) { + // Generate a list of maps that each item contains only the merge key. + setOrderList = make([]interface{}, len(modified)) + for i, v := range modified { + typedV := v.(map[string]interface{}) + setOrderList[i] = map[string]interface{}{ + mergeKey: typedV[mergeKey], + } + } + } + case reflect.Slice: + // Lists of Lists are not permitted by the api + return nil, nil, nil, mergepatch.ErrNoListOfLists + default: + patchList, deleteList, err = diffListsOfScalars(original, modified, diffOptions) + if err != nil { + return nil, nil, nil, err + } + patchList, err = normalizeSliceOrder(patchList, modified, mergeKey, kind) + // generate the setElementOrder list when there are content changes or order changes + if diffOptions.SetElementOrder && ((!diffOptions.IgnoreDeletions && len(deleteList) > 0) || + (!diffOptions.IgnoreChangesAndAdditions && !reflect.DeepEqual(original, modified))) { + setOrderList = modified + } + } + return patchList, deleteList, setOrderList, err +} + +// isOrderSame checks if the order in a list has changed +func isOrderSame(original, modified []interface{}, mergeKey string) (bool, error) { + if len(original) != len(modified) { + return false, nil + } + for i, modifiedItem := range modified { + equal, err := mergeKeyValueEqual(original[i], modifiedItem, mergeKey) + if err != nil || !equal { + return equal, err + } + } + return true, nil +} + +// diffListsOfScalars returns 2 lists, the first one is addList and the second one is deletionList. +// Argument diffOptions.IgnoreChangesAndAdditions controls if calculate addList. true means not calculate. +// Argument diffOptions.IgnoreDeletions controls if calculate deletionList. true means not calculate. +// original may be changed, but modified is guaranteed to not be changed +func diffListsOfScalars(original, modified []interface{}, diffOptions DiffOptions) ([]interface{}, []interface{}, error) { + modifiedCopy := make([]interface{}, len(modified)) + copy(modifiedCopy, modified) + // Sort the scalars for easier calculating the diff + originalScalars := sortScalars(original) + modifiedScalars := sortScalars(modifiedCopy) + + originalIndex, modifiedIndex := 0, 0 + addList := []interface{}{} + deletionList := []interface{}{} + + for { + originalInBounds := originalIndex < len(originalScalars) + modifiedInBounds := modifiedIndex < len(modifiedScalars) + if !originalInBounds && !modifiedInBounds { + break + } + // we need to compare the string representation of the scalar, + // because the scalar is an interface which doesn't support either < or > + // And that's how func sortScalars compare scalars. + var originalString, modifiedString string + var originalValue, modifiedValue interface{} + if originalInBounds { + originalValue = originalScalars[originalIndex] + originalString = fmt.Sprintf("%v", originalValue) + } + if modifiedInBounds { + modifiedValue = modifiedScalars[modifiedIndex] + modifiedString = fmt.Sprintf("%v", modifiedValue) + } + + originalV, modifiedV := compareListValuesAtIndex(originalInBounds, modifiedInBounds, originalString, modifiedString) + switch { + case originalV == nil && modifiedV == nil: + originalIndex++ + modifiedIndex++ + case originalV != nil && modifiedV == nil: + if !diffOptions.IgnoreDeletions { + deletionList = append(deletionList, originalValue) + } + originalIndex++ + case originalV == nil && modifiedV != nil: + if !diffOptions.IgnoreChangesAndAdditions { + addList = append(addList, modifiedValue) + } + modifiedIndex++ + default: + return nil, nil, fmt.Errorf("Unexpected returned value from compareListValuesAtIndex: %v and %v", originalV, modifiedV) + } + } + + return addList, deduplicateScalars(deletionList), nil +} + +// If first return value is non-nil, list1 contains an element not present in list2 +// If second return value is non-nil, list2 contains an element not present in list1 +func compareListValuesAtIndex(list1Inbounds, list2Inbounds bool, list1Value, list2Value string) (interface{}, interface{}) { + bothInBounds := list1Inbounds && list2Inbounds + switch { + // scalars are identical + case bothInBounds && list1Value == list2Value: + return nil, nil + // only list2 is in bound + case !list1Inbounds: + fallthrough + // list2 has additional scalar + case bothInBounds && list1Value > list2Value: + return nil, list2Value + // only original is in bound + case !list2Inbounds: + fallthrough + // original has additional scalar + case bothInBounds && list1Value < list2Value: + return list1Value, nil + default: + return nil, nil + } +} + +// diffListsOfMaps takes a pair of lists and +// returns a (recursive) strategic merge patch list contains additions and changes and +// a deletion list contains deletions +func diffListsOfMaps(original, modified []interface{}, schema LookupPatchMeta, mergeKey string, diffOptions DiffOptions) ([]interface{}, []interface{}, error) { + patch := make([]interface{}, 0, len(modified)) + deletionList := make([]interface{}, 0, len(original)) + + originalSorted, err := sortMergeListsByNameArray(original, schema, mergeKey, false) + if err != nil { + return nil, nil, err + } + modifiedSorted, err := sortMergeListsByNameArray(modified, schema, mergeKey, false) + if err != nil { + return nil, nil, err + } + + originalIndex, modifiedIndex := 0, 0 + for { + originalInBounds := originalIndex < len(originalSorted) + modifiedInBounds := modifiedIndex < len(modifiedSorted) + bothInBounds := originalInBounds && modifiedInBounds + if !originalInBounds && !modifiedInBounds { + break + } + + var originalElementMergeKeyValueString, modifiedElementMergeKeyValueString string + var originalElementMergeKeyValue, modifiedElementMergeKeyValue interface{} + var originalElement, modifiedElement map[string]interface{} + if originalInBounds { + originalElement, originalElementMergeKeyValue, err = getMapAndMergeKeyValueByIndex(originalIndex, mergeKey, originalSorted) + if err != nil { + return nil, nil, err + } + originalElementMergeKeyValueString = fmt.Sprintf("%v", originalElementMergeKeyValue) + } + if modifiedInBounds { + modifiedElement, modifiedElementMergeKeyValue, err = getMapAndMergeKeyValueByIndex(modifiedIndex, mergeKey, modifiedSorted) + if err != nil { + return nil, nil, err + } + modifiedElementMergeKeyValueString = fmt.Sprintf("%v", modifiedElementMergeKeyValue) + } + + switch { + case bothInBounds && ItemMatchesOriginalAndModifiedSlice(originalElementMergeKeyValueString, modifiedElementMergeKeyValueString): + // Merge key values are equal, so recurse + patchValue, err := diffMaps(originalElement, modifiedElement, schema, diffOptions) + if err != nil { + return nil, nil, err + } + if len(patchValue) > 0 { + patchValue[mergeKey] = modifiedElementMergeKeyValue + patch = append(patch, patchValue) + } + originalIndex++ + modifiedIndex++ + // only modified is in bound + case !originalInBounds: + fallthrough + // modified has additional map + case bothInBounds && ItemAddedToModifiedSlice(originalElementMergeKeyValueString, modifiedElementMergeKeyValueString): + if !diffOptions.IgnoreChangesAndAdditions { + patch = append(patch, modifiedElement) + } + modifiedIndex++ + // only original is in bound + case !modifiedInBounds: + fallthrough + // original has additional map + case bothInBounds && ItemRemovedFromModifiedSlice(originalElementMergeKeyValueString, modifiedElementMergeKeyValueString): + if !diffOptions.IgnoreDeletions { + // Item was deleted, so add delete directive + deletionList = append(deletionList, CreateDeleteDirective(mergeKey, originalElementMergeKeyValue)) + } + originalIndex++ + } + } + + return patch, deletionList, nil +} + +// getMapAndMergeKeyValueByIndex return a map in the list and its merge key value given the index of the map. +func getMapAndMergeKeyValueByIndex(index int, mergeKey string, listOfMaps []interface{}) (map[string]interface{}, interface{}, error) { + m, ok := listOfMaps[index].(map[string]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(m, listOfMaps[index]) + } + + val, ok := m[mergeKey] + if !ok { + return nil, nil, mergepatch.ErrNoMergeKey(m, mergeKey) + } + return m, val, nil +} + +// StrategicMergePatch applies a strategic merge patch. The patch and the original document +// must be json encoded content. A patch can be created from an original and a modified document +// by calling CreateStrategicMergePatch. +func StrategicMergePatch(original, patch []byte, dataStruct interface{}) ([]byte, error) { + schema, err := NewPatchMetaFromStruct(dataStruct) + if err != nil { + return nil, err + } + + return StrategicMergePatchUsingLookupPatchMeta(original, patch, schema) +} + +func StrategicMergePatchUsingLookupPatchMeta(original, patch []byte, schema LookupPatchMeta) ([]byte, error) { + originalMap, err := handleUnmarshal(original) + if err != nil { + return nil, err + } + patchMap, err := handleUnmarshal(patch) + if err != nil { + return nil, err + } + + result, err := StrategicMergeMapPatchUsingLookupPatchMeta(originalMap, patchMap, schema) + if err != nil { + return nil, err + } + + return json.Marshal(result) +} + +func handleUnmarshal(j []byte) (map[string]interface{}, error) { + if j == nil { + j = []byte("{}") + } + + m := map[string]interface{}{} + err := json.Unmarshal(j, &m) + if err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + return m, nil +} + +// StrategicMergeMapPatch applies a strategic merge patch. The original and patch documents +// must be JSONMap. A patch can be created from an original and modified document by +// calling CreateTwoWayMergeMapPatch. +// Warning: the original and patch JSONMap objects are mutated by this function and should not be reused. +func StrategicMergeMapPatch(original, patch JSONMap, dataStruct interface{}) (JSONMap, error) { + schema, err := NewPatchMetaFromStruct(dataStruct) + if err != nil { + return nil, err + } + + // We need the go struct tags `patchMergeKey` and `patchStrategy` for fields that support a strategic merge patch. + // For native resources, we can easily figure out these tags since we know the fields. + + // Because custom resources are decoded as Unstructured and because we're missing the metadata about how to handle + // each field in a strategic merge patch, we can't find the go struct tags. Hence, we can't easily do a strategic merge + // for custom resources. So we should fail fast and return an error. + if _, ok := dataStruct.(*unstructured.Unstructured); ok { + return nil, mergepatch.ErrUnsupportedStrategicMergePatchFormat + } + + return StrategicMergeMapPatchUsingLookupPatchMeta(original, patch, schema) +} + +func StrategicMergeMapPatchUsingLookupPatchMeta(original, patch JSONMap, schema LookupPatchMeta) (JSONMap, error) { + mergeOptions := MergeOptions{ + MergeParallelList: true, + IgnoreUnmatchedNulls: true, + } + return mergeMap(original, patch, schema, mergeOptions) +} + +// handleDirectiveInMergeMap handles the patch directive when merging 2 maps. +func handleDirectiveInMergeMap(directive interface{}, patch map[string]interface{}) (map[string]interface{}, error) { + if directive == replaceDirective { + // If the patch contains "$patch: replace", don't merge it, just use the + // patch directly. Later on, we can add a single level replace that only + // affects the map that the $patch is in. + delete(patch, directiveMarker) + return patch, nil + } + + if directive == deleteDirective { + // If the patch contains "$patch: delete", don't merge it, just return + // an empty map. + return map[string]interface{}{}, nil + } + + return nil, mergepatch.ErrBadPatchType(directive, patch) +} + +func containsDirectiveMarker(item interface{}) bool { + m, ok := item.(map[string]interface{}) + if ok { + if _, foundDirectiveMarker := m[directiveMarker]; foundDirectiveMarker { + return true + } + } + return false +} + +func mergeKeyValueEqual(left, right interface{}, mergeKey string) (bool, error) { + if len(mergeKey) == 0 { + return left == right, nil + } + typedLeft, ok := left.(map[string]interface{}) + if !ok { + return false, mergepatch.ErrBadArgType(typedLeft, left) + } + typedRight, ok := right.(map[string]interface{}) + if !ok { + return false, mergepatch.ErrBadArgType(typedRight, right) + } + mergeKeyLeft, ok := typedLeft[mergeKey] + if !ok { + return false, mergepatch.ErrNoMergeKey(typedLeft, mergeKey) + } + mergeKeyRight, ok := typedRight[mergeKey] + if !ok { + return false, mergepatch.ErrNoMergeKey(typedRight, mergeKey) + } + return mergeKeyLeft == mergeKeyRight, nil +} + +// extractKey trims the prefix and return the original key +func extractKey(s, prefix string) (string, error) { + substrings := strings.SplitN(s, "/", 2) + if len(substrings) <= 1 || substrings[0] != prefix { + switch prefix { + case deleteFromPrimitiveListDirectivePrefix: + return "", mergepatch.ErrBadPatchFormatForPrimitiveList + case setElementOrderDirectivePrefix: + return "", mergepatch.ErrBadPatchFormatForSetElementOrderList + default: + return "", fmt.Errorf("fail to find unknown prefix %q in %s\n", prefix, s) + } + } + return substrings[1], nil +} + +// validatePatchUsingSetOrderList verifies: +// the relative order of any two items in the setOrderList list matches that in the patch list. +// the items in the patch list must be a subset or the same as the $setElementOrder list (deletions are ignored). +func validatePatchWithSetOrderList(patchList, setOrderList interface{}, mergeKey string) error { + typedSetOrderList, ok := setOrderList.([]interface{}) + if !ok { + return mergepatch.ErrBadPatchFormatForSetElementOrderList + } + typedPatchList, ok := patchList.([]interface{}) + if !ok { + return mergepatch.ErrBadPatchFormatForSetElementOrderList + } + if len(typedSetOrderList) == 0 || len(typedPatchList) == 0 { + return nil + } + + var nonDeleteList, toDeleteList []interface{} + var err error + if len(mergeKey) > 0 { + nonDeleteList, toDeleteList, err = extractToDeleteItems(typedPatchList) + if err != nil { + return err + } + } else { + nonDeleteList = typedPatchList + } + + patchIndex, setOrderIndex := 0, 0 + for patchIndex < len(nonDeleteList) && setOrderIndex < len(typedSetOrderList) { + if containsDirectiveMarker(nonDeleteList[patchIndex]) { + patchIndex++ + continue + } + mergeKeyEqual, err := mergeKeyValueEqual(nonDeleteList[patchIndex], typedSetOrderList[setOrderIndex], mergeKey) + if err != nil { + return err + } + if mergeKeyEqual { + patchIndex++ + } + setOrderIndex++ + } + // If patchIndex is inbound but setOrderIndex if out of bound mean there are items mismatching between the patch list and setElementOrder list. + // the second check is is a sanity check, and should always be true if the first is true. + if patchIndex < len(nonDeleteList) && setOrderIndex >= len(typedSetOrderList) { + return fmt.Errorf("The order in patch list:\n%v\n doesn't match %s list:\n%v\n", typedPatchList, setElementOrderDirectivePrefix, setOrderList) + } + typedPatchList = append(nonDeleteList, toDeleteList...) + return nil +} + +// preprocessDeletionListForMerging preprocesses the deletion list. +// it returns shouldContinue, isDeletionList, noPrefixKey +func preprocessDeletionListForMerging(key string, original map[string]interface{}, + patchVal interface{}, mergeDeletionList bool) (bool, bool, string, error) { + // If found a parallel list for deletion and we are going to merge the list, + // overwrite the key to the original key and set flag isDeleteList + foundParallelListPrefix := strings.HasPrefix(key, deleteFromPrimitiveListDirectivePrefix) + if foundParallelListPrefix { + if !mergeDeletionList { + original[key] = patchVal + return true, false, "", nil + } + originalKey, err := extractKey(key, deleteFromPrimitiveListDirectivePrefix) + return false, true, originalKey, err + } + return false, false, "", nil +} + +// applyRetainKeysDirective looks for a retainKeys directive and applies to original +// - if no directive exists do nothing +// - if directive is found, clear keys in original missing from the directive list +// - validate that all keys present in the patch are present in the retainKeys directive +// note: original may be another patch request, e.g. applying the add+modified patch to the deletions patch. In this case it may have directives +func applyRetainKeysDirective(original, patch map[string]interface{}, options MergeOptions) error { + retainKeysInPatch, foundInPatch := patch[retainKeysDirective] + if !foundInPatch { + return nil + } + // cleanup the directive + delete(patch, retainKeysDirective) + + if !options.MergeParallelList { + // If original is actually a patch, make sure the retainKeys directives are the same in both patches if present in both. + // If not present in the original patch, copy from the modified patch. + retainKeysInOriginal, foundInOriginal := original[retainKeysDirective] + if foundInOriginal { + if !reflect.DeepEqual(retainKeysInOriginal, retainKeysInPatch) { + // This error actually should never happen. + return fmt.Errorf("%v and %v are not deep equal: this may happen when calculating the 3-way diff patch", retainKeysInOriginal, retainKeysInPatch) + } + } else { + original[retainKeysDirective] = retainKeysInPatch + } + return nil + } + + retainKeysList, ok := retainKeysInPatch.([]interface{}) + if !ok { + return mergepatch.ErrBadPatchFormatForRetainKeys + } + + // validate patch to make sure all fields in the patch are present in the retainKeysList. + // The map is used only as a set, the value is never referenced + m := map[interface{}]struct{}{} + for _, v := range retainKeysList { + m[v] = struct{}{} + } + for k, v := range patch { + if v == nil || strings.HasPrefix(k, deleteFromPrimitiveListDirectivePrefix) || + strings.HasPrefix(k, setElementOrderDirectivePrefix) { + continue + } + // If there is an item present in the patch but not in the retainKeys list, + // the patch is invalid. + if _, found := m[k]; !found { + return mergepatch.ErrBadPatchFormatForRetainKeys + } + } + + // clear not present fields + for k := range original { + if _, found := m[k]; !found { + delete(original, k) + } + } + return nil +} + +// mergePatchIntoOriginal processes $setElementOrder list. +// When not merging the directive, it will make sure $setElementOrder list exist only in original. +// When merging the directive, it will try to find the $setElementOrder list and +// its corresponding patch list, validate it and merge it. +// Then, sort them by the relative order in setElementOrder, patch list and live list. +// The precedence is $setElementOrder > order in patch list > order in live list. +// This function will delete the item after merging it to prevent process it again in the future. +// Ref: https://git.k8s.io/community/contributors/design-proposals/cli/preserve-order-in-strategic-merge-patch.md +func mergePatchIntoOriginal(original, patch map[string]interface{}, schema LookupPatchMeta, mergeOptions MergeOptions) error { + for key, patchV := range patch { + // Do nothing if there is no ordering directive + if !strings.HasPrefix(key, setElementOrderDirectivePrefix) { + continue + } + + setElementOrderInPatch := patchV + // Copies directive from the second patch (`patch`) to the first patch (`original`) + // and checks they are equal and delete the directive in the second patch + if !mergeOptions.MergeParallelList { + setElementOrderListInOriginal, ok := original[key] + if ok { + // check if the setElementOrder list in original and the one in patch matches + if !reflect.DeepEqual(setElementOrderListInOriginal, setElementOrderInPatch) { + return mergepatch.ErrBadPatchFormatForSetElementOrderList + } + } else { + // move the setElementOrder list from patch to original + original[key] = setElementOrderInPatch + } + } + delete(patch, key) + + var ( + ok bool + originalFieldValue, patchFieldValue, merged []interface{} + patchStrategy string + patchMeta PatchMeta + subschema LookupPatchMeta + ) + typedSetElementOrderList, ok := setElementOrderInPatch.([]interface{}) + if !ok { + return mergepatch.ErrBadArgType(typedSetElementOrderList, setElementOrderInPatch) + } + // Trim the setElementOrderDirectivePrefix to get the key of the list field in original. + originalKey, err := extractKey(key, setElementOrderDirectivePrefix) + if err != nil { + return err + } + // try to find the list with `originalKey` in `original` and `modified` and merge them. + originalList, foundOriginal := original[originalKey] + patchList, foundPatch := patch[originalKey] + if foundOriginal { + originalFieldValue, ok = originalList.([]interface{}) + if !ok { + return mergepatch.ErrBadArgType(originalFieldValue, originalList) + } + } + if foundPatch { + patchFieldValue, ok = patchList.([]interface{}) + if !ok { + return mergepatch.ErrBadArgType(patchFieldValue, patchList) + } + } + subschema, patchMeta, err = schema.LookupPatchMetadataForSlice(originalKey) + if err != nil { + return err + } + _, patchStrategy, err = extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err != nil { + return err + } + // Check for consistency between the element order list and the field it applies to + err = validatePatchWithSetOrderList(patchFieldValue, typedSetElementOrderList, patchMeta.GetPatchMergeKey()) + if err != nil { + return err + } + + switch { + case foundOriginal && !foundPatch: + // no change to list contents + merged = originalFieldValue + case !foundOriginal && foundPatch: + // list was added + merged = patchFieldValue + case foundOriginal && foundPatch: + merged, err = mergeSliceHandler(originalList, patchList, subschema, + patchStrategy, patchMeta.GetPatchMergeKey(), false, mergeOptions) + if err != nil { + return err + } + case !foundOriginal && !foundPatch: + continue + } + + // Split all items into patch items and server-only items and then enforce the order. + var patchItems, serverOnlyItems []interface{} + if len(patchMeta.GetPatchMergeKey()) == 0 { + // Primitives doesn't need merge key to do partitioning. + patchItems, serverOnlyItems = partitionPrimitivesByPresentInList(merged, typedSetElementOrderList) + + } else { + // Maps need merge key to do partitioning. + patchItems, serverOnlyItems, err = partitionMapsByPresentInList(merged, typedSetElementOrderList, patchMeta.GetPatchMergeKey()) + if err != nil { + return err + } + } + + elementType, err := sliceElementType(originalFieldValue, patchFieldValue) + if err != nil { + return err + } + kind := elementType.Kind() + // normalize merged list + // typedSetElementOrderList contains all the relative order in typedPatchList, + // so don't need to use typedPatchList + both, err := normalizeElementOrder(patchItems, serverOnlyItems, typedSetElementOrderList, originalFieldValue, patchMeta.GetPatchMergeKey(), kind) + if err != nil { + return err + } + original[originalKey] = both + // delete patch list from patch to prevent process again in the future + delete(patch, originalKey) + } + return nil +} + +// partitionPrimitivesByPresentInList partitions elements into 2 slices, the first containing items present in partitionBy, the other not. +func partitionPrimitivesByPresentInList(original, partitionBy []interface{}) ([]interface{}, []interface{}) { + patch := make([]interface{}, 0, len(original)) + serverOnly := make([]interface{}, 0, len(original)) + inPatch := map[interface{}]bool{} + for _, v := range partitionBy { + inPatch[v] = true + } + for _, v := range original { + if !inPatch[v] { + serverOnly = append(serverOnly, v) + } else { + patch = append(patch, v) + } + } + return patch, serverOnly +} + +// partitionMapsByPresentInList partitions elements into 2 slices, the first containing items present in partitionBy, the other not. +func partitionMapsByPresentInList(original, partitionBy []interface{}, mergeKey string) ([]interface{}, []interface{}, error) { + patch := make([]interface{}, 0, len(original)) + serverOnly := make([]interface{}, 0, len(original)) + for _, v := range original { + typedV, ok := v.(map[string]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(typedV, v) + } + mergeKeyValue, foundMergeKey := typedV[mergeKey] + if !foundMergeKey { + return nil, nil, mergepatch.ErrNoMergeKey(typedV, mergeKey) + } + _, _, found, err := findMapInSliceBasedOnKeyValue(partitionBy, mergeKey, mergeKeyValue) + if err != nil { + return nil, nil, err + } + if !found { + serverOnly = append(serverOnly, v) + } else { + patch = append(patch, v) + } + } + return patch, serverOnly, nil +} + +// Merge fields from a patch map into the original map. Note: This may modify +// both the original map and the patch because getting a deep copy of a map in +// golang is highly non-trivial. +// flag mergeOptions.MergeParallelList controls if using the parallel list to delete or keeping the list. +// If patch contains any null field (e.g. field_1: null) that is not +// present in original, then to propagate it to the end result use +// mergeOptions.IgnoreUnmatchedNulls == false. +func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, mergeOptions MergeOptions) (map[string]interface{}, error) { + if v, ok := patch[directiveMarker]; ok { + return handleDirectiveInMergeMap(v, patch) + } + + // nil is an accepted value for original to simplify logic in other places. + // If original is nil, replace it with an empty map and then apply the patch. + if original == nil { + original = map[string]interface{}{} + } + + err := applyRetainKeysDirective(original, patch, mergeOptions) + if err != nil { + return nil, err + } + + // Process $setElementOrder list and other lists sharing the same key. + // When not merging the directive, it will make sure $setElementOrder list exist only in original. + // When merging the directive, it will process $setElementOrder and its patch list together. + // This function will delete the merged elements from patch so they will not be reprocessed + err = mergePatchIntoOriginal(original, patch, schema, mergeOptions) + if err != nil { + return nil, err + } + + // Start merging the patch into the original. + for k, patchV := range patch { + skipProcessing, isDeleteList, noPrefixKey, err := preprocessDeletionListForMerging(k, original, patchV, mergeOptions.MergeParallelList) + if err != nil { + return nil, err + } + if skipProcessing { + continue + } + if len(noPrefixKey) > 0 { + k = noPrefixKey + } + + // If the value of this key is null, delete the key if it exists in the + // original. Otherwise, check if we want to preserve it or skip it. + // Preserving the null value is useful when we want to send an explicit + // delete to the API server. + if patchV == nil { + if _, ok := original[k]; ok { + delete(original, k) + } + if mergeOptions.IgnoreUnmatchedNulls { + continue + } + } + + _, ok := original[k] + if !ok { + // If it's not in the original document, just take the patch value. + original[k] = patchV + continue + } + + originalType := reflect.TypeOf(original[k]) + patchType := reflect.TypeOf(patchV) + if originalType != patchType { + original[k] = patchV + continue + } + // If they're both maps or lists, recurse into the value. + switch originalType.Kind() { + case reflect.Map: + subschema, patchMeta, err2 := schema.LookupPatchMetadataForStruct(k) + if err2 != nil { + return nil, err2 + } + _, patchStrategy, err2 := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err2 != nil { + return nil, err2 + } + original[k], err = mergeMapHandler(original[k], patchV, subschema, patchStrategy, mergeOptions) + case reflect.Slice: + subschema, patchMeta, err2 := schema.LookupPatchMetadataForSlice(k) + if err2 != nil { + return nil, err2 + } + _, patchStrategy, err2 := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err2 != nil { + return nil, err2 + } + original[k], err = mergeSliceHandler(original[k], patchV, subschema, patchStrategy, patchMeta.GetPatchMergeKey(), isDeleteList, mergeOptions) + default: + original[k] = patchV + } + if err != nil { + return nil, err + } + } + return original, nil +} + +// mergeMapHandler handles how to merge `patchV` whose key is `key` with `original` respecting +// fieldPatchStrategy and mergeOptions. +func mergeMapHandler(original, patch interface{}, schema LookupPatchMeta, + fieldPatchStrategy string, mergeOptions MergeOptions) (map[string]interface{}, error) { + typedOriginal, typedPatch, err := mapTypeAssertion(original, patch) + if err != nil { + return nil, err + } + + if fieldPatchStrategy != replaceDirective { + return mergeMap(typedOriginal, typedPatch, schema, mergeOptions) + } else { + return typedPatch, nil + } +} + +// mergeSliceHandler handles how to merge `patchV` whose key is `key` with `original` respecting +// fieldPatchStrategy, fieldPatchMergeKey, isDeleteList and mergeOptions. +func mergeSliceHandler(original, patch interface{}, schema LookupPatchMeta, + fieldPatchStrategy, fieldPatchMergeKey string, isDeleteList bool, mergeOptions MergeOptions) ([]interface{}, error) { + typedOriginal, typedPatch, err := sliceTypeAssertion(original, patch) + if err != nil { + return nil, err + } + + if fieldPatchStrategy == mergeDirective { + return mergeSlice(typedOriginal, typedPatch, schema, fieldPatchMergeKey, mergeOptions, isDeleteList) + } else { + return typedPatch, nil + } +} + +// Merge two slices together. Note: This may modify both the original slice and +// the patch because getting a deep copy of a slice in golang is highly +// non-trivial. +func mergeSlice(original, patch []interface{}, schema LookupPatchMeta, mergeKey string, mergeOptions MergeOptions, isDeleteList bool) ([]interface{}, error) { + if len(original) == 0 && len(patch) == 0 { + return original, nil + } + + // All the values must be of the same type, but not a list. + t, err := sliceElementType(original, patch) + if err != nil { + return nil, err + } + + var merged []interface{} + kind := t.Kind() + // If the elements are not maps, merge the slices of scalars. + if kind != reflect.Map { + if mergeOptions.MergeParallelList && isDeleteList { + return deleteFromSlice(original, patch), nil + } + // Maybe in the future add a "concat" mode that doesn't + // deduplicate. + both := append(original, patch...) + merged = deduplicateScalars(both) + + } else { + if mergeKey == "" { + return nil, fmt.Errorf("cannot merge lists without merge key for %s", schema.Name()) + } + + original, patch, err = mergeSliceWithSpecialElements(original, patch, mergeKey) + if err != nil { + return nil, err + } + + merged, err = mergeSliceWithoutSpecialElements(original, patch, mergeKey, schema, mergeOptions) + if err != nil { + return nil, err + } + } + + // enforce the order + var patchItems, serverOnlyItems []interface{} + if len(mergeKey) == 0 { + patchItems, serverOnlyItems = partitionPrimitivesByPresentInList(merged, patch) + } else { + patchItems, serverOnlyItems, err = partitionMapsByPresentInList(merged, patch, mergeKey) + if err != nil { + return nil, err + } + } + return normalizeElementOrder(patchItems, serverOnlyItems, patch, original, mergeKey, kind) +} + +// mergeSliceWithSpecialElements handles special elements with directiveMarker +// before merging the slices. It returns a updated `original` and a patch without special elements. +// original and patch must be slices of maps, they should be checked before calling this function. +func mergeSliceWithSpecialElements(original, patch []interface{}, mergeKey string) ([]interface{}, []interface{}, error) { + patchWithoutSpecialElements := []interface{}{} + replace := false + for _, v := range patch { + typedV := v.(map[string]interface{}) + patchType, ok := typedV[directiveMarker] + if !ok { + patchWithoutSpecialElements = append(patchWithoutSpecialElements, v) + } else { + switch patchType { + case deleteDirective: + mergeValue, ok := typedV[mergeKey] + if ok { + var err error + original, err = deleteMatchingEntries(original, mergeKey, mergeValue) + if err != nil { + return nil, nil, err + } + } else { + return nil, nil, mergepatch.ErrNoMergeKey(typedV, mergeKey) + } + case replaceDirective: + replace = true + // Continue iterating through the array to prune any other $patch elements. + case mergeDirective: + return nil, nil, fmt.Errorf("merging lists cannot yet be specified in the patch") + default: + return nil, nil, mergepatch.ErrBadPatchType(patchType, typedV) + } + } + } + if replace { + return patchWithoutSpecialElements, nil, nil + } + return original, patchWithoutSpecialElements, nil +} + +// delete all matching entries (based on merge key) from a merging list +func deleteMatchingEntries(original []interface{}, mergeKey string, mergeValue interface{}) ([]interface{}, error) { + for { + _, originalKey, found, err := findMapInSliceBasedOnKeyValue(original, mergeKey, mergeValue) + if err != nil { + return nil, err + } + + if !found { + break + } + // Delete the element at originalKey. + original = append(original[:originalKey], original[originalKey+1:]...) + } + return original, nil +} + +// mergeSliceWithoutSpecialElements merges slices with non-special elements. +// original and patch must be slices of maps, they should be checked before calling this function. +func mergeSliceWithoutSpecialElements(original, patch []interface{}, mergeKey string, schema LookupPatchMeta, mergeOptions MergeOptions) ([]interface{}, error) { + for _, v := range patch { + typedV := v.(map[string]interface{}) + mergeValue, ok := typedV[mergeKey] + if !ok { + return nil, mergepatch.ErrNoMergeKey(typedV, mergeKey) + } + + // If we find a value with this merge key value in original, merge the + // maps. Otherwise append onto original. + originalMap, originalKey, found, err := findMapInSliceBasedOnKeyValue(original, mergeKey, mergeValue) + if err != nil { + return nil, err + } + + if found { + var mergedMaps interface{} + var err error + // Merge into original. + mergedMaps, err = mergeMap(originalMap, typedV, schema, mergeOptions) + if err != nil { + return nil, err + } + + original[originalKey] = mergedMaps + } else { + original = append(original, v) + } + } + return original, nil +} + +// deleteFromSlice uses the parallel list to delete the items in a list of scalars +func deleteFromSlice(current, toDelete []interface{}) []interface{} { + toDeleteMap := map[interface{}]interface{}{} + processed := make([]interface{}, 0, len(current)) + for _, v := range toDelete { + toDeleteMap[v] = true + } + for _, v := range current { + if _, found := toDeleteMap[v]; !found { + processed = append(processed, v) + } + } + return processed +} + +// This method no longer panics if any element of the slice is not a map. +func findMapInSliceBasedOnKeyValue(m []interface{}, key string, value interface{}) (map[string]interface{}, int, bool, error) { + for k, v := range m { + typedV, ok := v.(map[string]interface{}) + if !ok { + return nil, 0, false, fmt.Errorf("value for key %v is not a map", k) + } + + valueToMatch, ok := typedV[key] + if ok && valueToMatch == value { + return typedV, k, true, nil + } + } + + return nil, 0, false, nil +} + +// This function takes a JSON map and sorts all the lists that should be merged +// by key. This is needed by tests because in JSON, list order is significant, +// but in Strategic Merge Patch, merge lists do not have significant order. +// Sorting the lists allows for order-insensitive comparison of patched maps. +func sortMergeListsByName(mapJSON []byte, schema LookupPatchMeta) ([]byte, error) { + var m map[string]interface{} + err := json.Unmarshal(mapJSON, &m) + if err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + + newM, err := sortMergeListsByNameMap(m, schema) + if err != nil { + return nil, err + } + + return json.Marshal(newM) +} + +// Function sortMergeListsByNameMap recursively sorts the merge lists by its mergeKey in a map. +func sortMergeListsByNameMap(s map[string]interface{}, schema LookupPatchMeta) (map[string]interface{}, error) { + newS := map[string]interface{}{} + for k, v := range s { + if k == retainKeysDirective { + typedV, ok := v.([]interface{}) + if !ok { + return nil, mergepatch.ErrBadPatchFormatForRetainKeys + } + v = sortScalars(typedV) + } else if strings.HasPrefix(k, deleteFromPrimitiveListDirectivePrefix) { + typedV, ok := v.([]interface{}) + if !ok { + return nil, mergepatch.ErrBadPatchFormatForPrimitiveList + } + v = sortScalars(typedV) + } else if strings.HasPrefix(k, setElementOrderDirectivePrefix) { + _, ok := v.([]interface{}) + if !ok { + return nil, mergepatch.ErrBadPatchFormatForSetElementOrderList + } + } else if k != directiveMarker { + // recurse for map and slice. + switch typedV := v.(type) { + case map[string]interface{}: + subschema, _, err := schema.LookupPatchMetadataForStruct(k) + if err != nil { + return nil, err + } + v, err = sortMergeListsByNameMap(typedV, subschema) + if err != nil { + return nil, err + } + case []interface{}: + subschema, patchMeta, err := schema.LookupPatchMetadataForSlice(k) + if err != nil { + return nil, err + } + _, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err != nil { + return nil, err + } + if patchStrategy == mergeDirective { + var err error + v, err = sortMergeListsByNameArray(typedV, subschema, patchMeta.GetPatchMergeKey(), true) + if err != nil { + return nil, err + } + } + } + } + + newS[k] = v + } + + return newS, nil +} + +// Function sortMergeListsByNameMap recursively sorts the merge lists by its mergeKey in an array. +func sortMergeListsByNameArray(s []interface{}, schema LookupPatchMeta, mergeKey string, recurse bool) ([]interface{}, error) { + if len(s) == 0 { + return s, nil + } + + // We don't support lists of lists yet. + t, err := sliceElementType(s) + if err != nil { + return nil, err + } + + // If the elements are not maps... + if t.Kind() != reflect.Map { + // Sort the elements, because they may have been merged out of order. + return deduplicateAndSortScalars(s), nil + } + + // Elements are maps - if one of the keys of the map is a map or a + // list, we may need to recurse into it. + newS := []interface{}{} + for _, elem := range s { + if recurse { + typedElem := elem.(map[string]interface{}) + newElem, err := sortMergeListsByNameMap(typedElem, schema) + if err != nil { + return nil, err + } + + newS = append(newS, newElem) + } else { + newS = append(newS, elem) + } + } + + // Sort the maps. + newS = sortMapsBasedOnField(newS, mergeKey) + return newS, nil +} + +func sortMapsBasedOnField(m []interface{}, fieldName string) []interface{} { + mapM := mapSliceFromSlice(m) + ss := SortableSliceOfMaps{mapM, fieldName} + sort.Sort(ss) + newS := sliceFromMapSlice(ss.s) + return newS +} + +func mapSliceFromSlice(m []interface{}) []map[string]interface{} { + newM := []map[string]interface{}{} + for _, v := range m { + vt := v.(map[string]interface{}) + newM = append(newM, vt) + } + + return newM +} + +func sliceFromMapSlice(s []map[string]interface{}) []interface{} { + newS := []interface{}{} + for _, v := range s { + newS = append(newS, v) + } + + return newS +} + +type SortableSliceOfMaps struct { + s []map[string]interface{} + k string // key to sort on +} + +func (ss SortableSliceOfMaps) Len() int { + return len(ss.s) +} + +func (ss SortableSliceOfMaps) Less(i, j int) bool { + iStr := fmt.Sprintf("%v", ss.s[i][ss.k]) + jStr := fmt.Sprintf("%v", ss.s[j][ss.k]) + return sort.StringsAreSorted([]string{iStr, jStr}) +} + +func (ss SortableSliceOfMaps) Swap(i, j int) { + tmp := ss.s[i] + ss.s[i] = ss.s[j] + ss.s[j] = tmp +} + +func deduplicateAndSortScalars(s []interface{}) []interface{} { + s = deduplicateScalars(s) + return sortScalars(s) +} + +func sortScalars(s []interface{}) []interface{} { + ss := SortableSliceOfScalars{s} + sort.Sort(ss) + return ss.s +} + +func deduplicateScalars(s []interface{}) []interface{} { + // Clever algorithm to deduplicate. + length := len(s) - 1 + for i := 0; i < length; i++ { + for j := i + 1; j <= length; j++ { + if s[i] == s[j] { + s[j] = s[length] + s = s[0:length] + length-- + j-- + } + } + } + + return s +} + +type SortableSliceOfScalars struct { + s []interface{} +} + +func (ss SortableSliceOfScalars) Len() int { + return len(ss.s) +} + +func (ss SortableSliceOfScalars) Less(i, j int) bool { + iStr := fmt.Sprintf("%v", ss.s[i]) + jStr := fmt.Sprintf("%v", ss.s[j]) + return sort.StringsAreSorted([]string{iStr, jStr}) +} + +func (ss SortableSliceOfScalars) Swap(i, j int) { + tmp := ss.s[i] + ss.s[i] = ss.s[j] + ss.s[j] = tmp +} + +// Returns the type of the elements of N slice(s). If the type is different, +// another slice or undefined, returns an error. +func sliceElementType(slices ...[]interface{}) (reflect.Type, error) { + var prevType reflect.Type + for _, s := range slices { + // Go through elements of all given slices and make sure they are all the same type. + for _, v := range s { + currentType := reflect.TypeOf(v) + if prevType == nil { + prevType = currentType + // We don't support lists of lists yet. + if prevType.Kind() == reflect.Slice { + return nil, mergepatch.ErrNoListOfLists + } + } else { + if prevType != currentType { + return nil, fmt.Errorf("list element types are not identical: %v", fmt.Sprint(slices)) + } + prevType = currentType + } + } + } + + if prevType == nil { + return nil, fmt.Errorf("no elements in any of the given slices") + } + + return prevType, nil +} + +// MergingMapsHaveConflicts returns true if the left and right JSON interface +// objects overlap with different values in any key. All keys are required to be +// strings. Since patches of the same Type have congruent keys, this is valid +// for multiple patch types. This method supports strategic merge patch semantics. +func MergingMapsHaveConflicts(left, right map[string]interface{}, schema LookupPatchMeta) (bool, error) { + return mergingMapFieldsHaveConflicts(left, right, schema, "", "") +} + +func mergingMapFieldsHaveConflicts( + left, right interface{}, + schema LookupPatchMeta, + fieldPatchStrategy, fieldPatchMergeKey string, +) (bool, error) { + switch leftType := left.(type) { + case map[string]interface{}: + rightType, ok := right.(map[string]interface{}) + if !ok { + return true, nil + } + leftMarker, okLeft := leftType[directiveMarker] + rightMarker, okRight := rightType[directiveMarker] + // if one or the other has a directive marker, + // then we need to consider that before looking at the individual keys, + // since a directive operates on the whole map. + if okLeft || okRight { + // if one has a directive marker and the other doesn't, + // then we have a conflict, since one is deleting or replacing the whole map, + // and the other is doing things to individual keys. + if okLeft != okRight { + return true, nil + } + // if they both have markers, but they are not the same directive, + // then we have a conflict because they're doing different things to the map. + if leftMarker != rightMarker { + return true, nil + } + } + if fieldPatchStrategy == replaceDirective { + return false, nil + } + // Check the individual keys. + return mapsHaveConflicts(leftType, rightType, schema) + + case []interface{}: + rightType, ok := right.([]interface{}) + if !ok { + return true, nil + } + return slicesHaveConflicts(leftType, rightType, schema, fieldPatchStrategy, fieldPatchMergeKey) + case string, float64, bool, int, int64, nil: + return !reflect.DeepEqual(left, right), nil + default: + return true, fmt.Errorf("unknown type: %v", reflect.TypeOf(left)) + } +} + +func mapsHaveConflicts(typedLeft, typedRight map[string]interface{}, schema LookupPatchMeta) (bool, error) { + for key, leftValue := range typedLeft { + if key != directiveMarker && key != retainKeysDirective { + if rightValue, ok := typedRight[key]; ok { + var subschema LookupPatchMeta + var patchMeta PatchMeta + var patchStrategy string + var err error + switch leftValue.(type) { + case []interface{}: + subschema, patchMeta, err = schema.LookupPatchMetadataForSlice(key) + if err != nil { + return true, err + } + _, patchStrategy, err = extractRetainKeysPatchStrategy(patchMeta.patchStrategies) + if err != nil { + return true, err + } + case map[string]interface{}: + subschema, patchMeta, err = schema.LookupPatchMetadataForStruct(key) + if err != nil { + return true, err + } + _, patchStrategy, err = extractRetainKeysPatchStrategy(patchMeta.patchStrategies) + if err != nil { + return true, err + } + } + + if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue, + subschema, patchStrategy, patchMeta.GetPatchMergeKey()); hasConflicts { + return true, err + } + } + } + } + + return false, nil +} + +func slicesHaveConflicts( + typedLeft, typedRight []interface{}, + schema LookupPatchMeta, + fieldPatchStrategy, fieldPatchMergeKey string, +) (bool, error) { + elementType, err := sliceElementType(typedLeft, typedRight) + if err != nil { + return true, err + } + + if fieldPatchStrategy == mergeDirective { + // Merging lists of scalars have no conflicts by definition + // So we only need to check further if the elements are maps + if elementType.Kind() != reflect.Map { + return false, nil + } + + // Build a map for each slice and then compare the two maps + leftMap, err := sliceOfMapsToMapOfMaps(typedLeft, fieldPatchMergeKey) + if err != nil { + return true, err + } + + rightMap, err := sliceOfMapsToMapOfMaps(typedRight, fieldPatchMergeKey) + if err != nil { + return true, err + } + + return mapsOfMapsHaveConflicts(leftMap, rightMap, schema) + } + + // Either we don't have type information, or these are non-merging lists + if len(typedLeft) != len(typedRight) { + return true, nil + } + + // Sort scalar slices to prevent ordering issues + // We have no way to sort non-merging lists of maps + if elementType.Kind() != reflect.Map { + typedLeft = deduplicateAndSortScalars(typedLeft) + typedRight = deduplicateAndSortScalars(typedRight) + } + + // Compare the slices element by element in order + // This test will fail if the slices are not sorted + for i := range typedLeft { + if hasConflicts, err := mergingMapFieldsHaveConflicts(typedLeft[i], typedRight[i], schema, "", ""); hasConflicts { + return true, err + } + } + + return false, nil +} + +func sliceOfMapsToMapOfMaps(slice []interface{}, mergeKey string) (map[string]interface{}, error) { + result := make(map[string]interface{}, len(slice)) + for _, value := range slice { + typedValue, ok := value.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("invalid element type in merging list:%v", slice) + } + + mergeValue, ok := typedValue[mergeKey] + if !ok { + return nil, fmt.Errorf("cannot find merge key `%s` in merging list element:%v", mergeKey, typedValue) + } + + result[fmt.Sprintf("%s", mergeValue)] = typedValue + } + + return result, nil +} + +func mapsOfMapsHaveConflicts(typedLeft, typedRight map[string]interface{}, schema LookupPatchMeta) (bool, error) { + for key, leftValue := range typedLeft { + if rightValue, ok := typedRight[key]; ok { + if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue, schema, "", ""); hasConflicts { + return true, err + } + } + } + + return false, nil +} + +// CreateThreeWayMergePatch reconciles a modified configuration with an original configuration, +// while preserving any changes or deletions made to the original configuration in the interim, +// and not overridden by the current configuration. All three documents must be passed to the +// method as json encoded content. It will return a strategic merge patch, or an error if any +// of the documents is invalid, or if there are any preconditions that fail against the modified +// configuration, or, if overwrite is false and there are conflicts between the modified and current +// configurations. Conflicts are defined as keys changed differently from original to modified +// than from original to current. In other words, a conflict occurs if modified changes any key +// in a way that is different from how it is changed in current (e.g., deleting it, changing its +// value). We also propagate values fields that do not exist in original but are explicitly +// defined in modified. +func CreateThreeWayMergePatch(original, modified, current []byte, schema LookupPatchMeta, overwrite bool, fns ...mergepatch.PreconditionFunc) ([]byte, error) { + originalMap := map[string]interface{}{} + if len(original) > 0 { + if err := json.Unmarshal(original, &originalMap); err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + } + + modifiedMap := map[string]interface{}{} + if len(modified) > 0 { + if err := json.Unmarshal(modified, &modifiedMap); err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + } + + currentMap := map[string]interface{}{} + if len(current) > 0 { + if err := json.Unmarshal(current, ¤tMap); err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + } + + // The patch is the difference from current to modified without deletions, plus deletions + // from original to modified. To find it, we compute deletions, which are the deletions from + // original to modified, and delta, which is the difference from current to modified without + // deletions, and then apply delta to deletions as a patch, which should be strictly additive. + deltaMapDiffOptions := DiffOptions{ + IgnoreDeletions: true, + SetElementOrder: true, + } + deltaMap, err := diffMaps(currentMap, modifiedMap, schema, deltaMapDiffOptions) + if err != nil { + return nil, err + } + deletionsMapDiffOptions := DiffOptions{ + SetElementOrder: true, + IgnoreChangesAndAdditions: true, + } + deletionsMap, err := diffMaps(originalMap, modifiedMap, schema, deletionsMapDiffOptions) + if err != nil { + return nil, err + } + + mergeOptions := MergeOptions{} + patchMap, err := mergeMap(deletionsMap, deltaMap, schema, mergeOptions) + if err != nil { + return nil, err + } + + // Apply the preconditions to the patch, and return an error if any of them fail. + for _, fn := range fns { + if !fn(patchMap) { + return nil, mergepatch.NewErrPreconditionFailed(patchMap) + } + } + + // If overwrite is false, and the patch contains any keys that were changed differently, + // then return a conflict error. + if !overwrite { + changeMapDiffOptions := DiffOptions{} + changedMap, err := diffMaps(originalMap, currentMap, schema, changeMapDiffOptions) + if err != nil { + return nil, err + } + + hasConflicts, err := MergingMapsHaveConflicts(patchMap, changedMap, schema) + if err != nil { + return nil, err + } + + if hasConflicts { + return nil, mergepatch.NewErrConflict(mergepatch.ToYAMLOrError(patchMap), mergepatch.ToYAMLOrError(changedMap)) + } + } + + return json.Marshal(patchMap) +} + +func ItemAddedToModifiedSlice(original, modified string) bool { return original > modified } + +func ItemRemovedFromModifiedSlice(original, modified string) bool { return original < modified } + +func ItemMatchesOriginalAndModifiedSlice(original, modified string) bool { return original == modified } + +func CreateDeleteDirective(mergeKey string, mergeKeyValue interface{}) map[string]interface{} { + return map[string]interface{}{mergeKey: mergeKeyValue, directiveMarker: deleteDirective} +} + +func mapTypeAssertion(original, patch interface{}) (map[string]interface{}, map[string]interface{}, error) { + typedOriginal, ok := original.(map[string]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(typedOriginal, original) + } + typedPatch, ok := patch.(map[string]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(typedPatch, patch) + } + return typedOriginal, typedPatch, nil +} + +func sliceTypeAssertion(original, patch interface{}) ([]interface{}, []interface{}, error) { + typedOriginal, ok := original.([]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(typedOriginal, original) + } + typedPatch, ok := patch.([]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(typedPatch, patch) + } + return typedOriginal, typedPatch, nil +} + +// extractRetainKeysPatchStrategy process patch strategy, which is a string may contains multiple +// patch strategies separated by ",". It returns a boolean var indicating if it has +// retainKeys strategies and a string for the other strategy. +func extractRetainKeysPatchStrategy(strategies []string) (bool, string, error) { + switch len(strategies) { + case 0: + return false, "", nil + case 1: + singleStrategy := strategies[0] + switch singleStrategy { + case retainKeysStrategy: + return true, "", nil + default: + return false, singleStrategy, nil + } + case 2: + switch { + case strategies[0] == retainKeysStrategy: + return true, strategies[1], nil + case strategies[1] == retainKeysStrategy: + return true, strategies[0], nil + default: + return false, "", fmt.Errorf("unexpected patch strategy: %v", strategies) + } + default: + return false, "", fmt.Errorf("unexpected patch strategy: %v", strategies) + } +} + +// hasAdditionalNewField returns if original map has additional key with non-nil value than modified. +func hasAdditionalNewField(original, modified map[string]interface{}) bool { + for k, v := range original { + if v == nil { + continue + } + if _, found := modified[k]; !found { + return true + } + } + return false +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go new file mode 100644 index 00000000..f84d65aa --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go @@ -0,0 +1,193 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategicpatch + +import ( + "errors" + "strings" + + "k8s.io/apimachinery/pkg/util/mergepatch" + openapi "k8s.io/kube-openapi/pkg/util/proto" +) + +const ( + patchStrategyOpenapiextensionKey = "x-kubernetes-patch-strategy" + patchMergeKeyOpenapiextensionKey = "x-kubernetes-patch-merge-key" +) + +type LookupPatchItem interface { + openapi.SchemaVisitor + + Error() error + Path() *openapi.Path +} + +type kindItem struct { + key string + path *openapi.Path + err error + patchmeta PatchMeta + subschema openapi.Schema + hasVisitKind bool +} + +func NewKindItem(key string, path *openapi.Path) *kindItem { + return &kindItem{ + key: key, + path: path, + } +} + +var _ LookupPatchItem = &kindItem{} + +func (item *kindItem) Error() error { + return item.err +} + +func (item *kindItem) Path() *openapi.Path { + return item.path +} + +func (item *kindItem) VisitPrimitive(schema *openapi.Primitive) { + item.err = errors.New("expected kind, but got primitive") +} + +func (item *kindItem) VisitArray(schema *openapi.Array) { + item.err = errors.New("expected kind, but got slice") +} + +func (item *kindItem) VisitMap(schema *openapi.Map) { + item.err = errors.New("expected kind, but got map") +} + +func (item *kindItem) VisitReference(schema openapi.Reference) { + if !item.hasVisitKind { + schema.SubSchema().Accept(item) + } +} + +func (item *kindItem) VisitKind(schema *openapi.Kind) { + subschema, ok := schema.Fields[item.key] + if !ok { + item.err = FieldNotFoundError{Path: schema.GetPath().String(), Field: item.key} + return + } + + mergeKey, patchStrategies, err := parsePatchMetadata(subschema.GetExtensions()) + if err != nil { + item.err = err + return + } + item.patchmeta = PatchMeta{ + patchStrategies: patchStrategies, + patchMergeKey: mergeKey, + } + item.subschema = subschema +} + +type sliceItem struct { + key string + path *openapi.Path + err error + patchmeta PatchMeta + subschema openapi.Schema + hasVisitKind bool +} + +func NewSliceItem(key string, path *openapi.Path) *sliceItem { + return &sliceItem{ + key: key, + path: path, + } +} + +var _ LookupPatchItem = &sliceItem{} + +func (item *sliceItem) Error() error { + return item.err +} + +func (item *sliceItem) Path() *openapi.Path { + return item.path +} + +func (item *sliceItem) VisitPrimitive(schema *openapi.Primitive) { + item.err = errors.New("expected slice, but got primitive") +} + +func (item *sliceItem) VisitArray(schema *openapi.Array) { + if !item.hasVisitKind { + item.err = errors.New("expected visit kind first, then visit array") + } + subschema := schema.SubType + item.subschema = subschema +} + +func (item *sliceItem) VisitMap(schema *openapi.Map) { + item.err = errors.New("expected slice, but got map") +} + +func (item *sliceItem) VisitReference(schema openapi.Reference) { + if !item.hasVisitKind { + schema.SubSchema().Accept(item) + } else { + item.subschema = schema.SubSchema() + } +} + +func (item *sliceItem) VisitKind(schema *openapi.Kind) { + subschema, ok := schema.Fields[item.key] + if !ok { + item.err = FieldNotFoundError{Path: schema.GetPath().String(), Field: item.key} + return + } + + mergeKey, patchStrategies, err := parsePatchMetadata(subschema.GetExtensions()) + if err != nil { + item.err = err + return + } + item.patchmeta = PatchMeta{ + patchStrategies: patchStrategies, + patchMergeKey: mergeKey, + } + item.hasVisitKind = true + subschema.Accept(item) +} + +func parsePatchMetadata(extensions map[string]interface{}) (string, []string, error) { + ps, foundPS := extensions[patchStrategyOpenapiextensionKey] + var patchStrategies []string + var mergeKey, patchStrategy string + var ok bool + if foundPS { + patchStrategy, ok = ps.(string) + if ok { + patchStrategies = strings.Split(patchStrategy, ",") + } else { + return "", nil, mergepatch.ErrBadArgType(patchStrategy, ps) + } + } + mk, foundMK := extensions[patchMergeKeyOpenapiextensionKey] + if foundMK { + mergeKey, ok = mk.(string) + if !ok { + return "", nil, mergepatch.ErrBadArgType(mergeKey, mk) + } + } + return mergeKey, patchStrategies, nil +} diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/BUILD b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/BUILD new file mode 100644 index 00000000..7ece664d --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/BUILD @@ -0,0 +1,32 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = ["fields.go"], + importpath = "k8s.io/apimachinery/third_party/forked/golang/json", +) + +go_test( + name = "go_default_test", + srcs = ["fields_test.go"], + embed = [":go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS new file mode 100644 index 00000000..8e8d9fce --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS @@ -0,0 +1,5 @@ +approvers: +- pwittrock +reviewers: +- mengqiy +- apelisse diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go new file mode 100644 index 00000000..8205a4dd --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go @@ -0,0 +1,513 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json is forked from the Go standard library to enable us to find the +// field of a struct that a given JSON key maps to. +package json + +import ( + "bytes" + "fmt" + "reflect" + "sort" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +const ( + patchStrategyTagKey = "patchStrategy" + patchMergeKeyTagKey = "patchMergeKey" +) + +// Finds the patchStrategy and patchMergeKey struct tag fields on a given +// struct field given the struct type and the JSON name of the field. +// It returns field type, a slice of patch strategies, merge key and error. +// TODO: fix the returned errors to be introspectable. +func LookupPatchMetadataForStruct(t reflect.Type, jsonField string) ( + elemType reflect.Type, patchStrategies []string, patchMergeKey string, e error) { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + e = fmt.Errorf("merging an object in json but data type is not struct, instead is: %s", + t.Kind().String()) + return + } + jf := []byte(jsonField) + // Find the field that the JSON library would use. + var f *field + fields := cachedTypeFields(t) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, jf) { + f = ff + break + } + // Do case-insensitive comparison. + if f == nil && ff.equalFold(ff.nameBytes, jf) { + f = ff + } + } + if f != nil { + // Find the reflect.Value of the most preferential struct field. + tjf := t.Field(f.index[0]) + // we must navigate down all the anonymously included structs in the chain + for i := 1; i < len(f.index); i++ { + tjf = tjf.Type.Field(f.index[i]) + } + patchStrategy := tjf.Tag.Get(patchStrategyTagKey) + patchMergeKey = tjf.Tag.Get(patchMergeKeyTagKey) + patchStrategies = strings.Split(patchStrategy, ",") + elemType = tjf.Type + return + } + e = fmt.Errorf("unable to find api field in struct %s for the json field %q", t.Name(), jsonField) + return +} + +// A field represents a single field found in a struct. +type field struct { + name string + nameBytes []byte // []byte(name) + equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent + + tag bool + // index is the sequence of indexes from the containing type fields to this field. + // it is a slice because anonymous structs will need multiple navigation steps to correctly + // resolve the proper fields + index []int + typ reflect.Type + omitEmpty bool + quoted bool +} + +func (f field) String() string { + return fmt.Sprintf("{name: %s, type: %v, tag: %v, index: %v, omitEmpty: %v, quoted: %v}", f.name, f.typ, f.tag, f.index, f.omitEmpty, f.quoted) +} + +func fillField(f field) field { + f.nameBytes = []byte(f.name) + f.equalFold = foldFunc(f.nameBytes) + return f +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from json tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that JSON should recognize for the given type. +// The algorithm is breadth-first search over the set of structs to include - the top struct +// and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" { // unexported + continue + } + tag := sf.Tag.Get("json") + if tag == "-" { + continue + } + name, opts := parseTag(tag) + if !isValidTag(name) { + name = "" + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := name != "" + if name == "" { + name = sf.Name + } + fields = append(fields, fillField(field{ + name: name, + tag: tagged, + index: index, + typ: ft, + omitEmpty: opts.Contains("omitempty"), + quoted: opts.Contains("string"), + })) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with JSON tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// JSON tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +const ( + caseMask = ^byte(0x20) // Mask to ignore case in ASCII. + kelvin = '\u212a' + smallLongEss = '\u017f' +) + +// foldFunc returns one of four different case folding equivalence +// functions, from most general (and slow) to fastest: +// +// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 +// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') +// 3) asciiEqualFold, no special, but includes non-letters (including _) +// 4) simpleLetterEqualFold, no specials, no non-letters. +// +// The letters S and K are special because they map to 3 runes, not just 2: +// * S maps to s and to U+017F 'Å¿' Latin small letter long s +// * k maps to K and to U+212A 'K' Kelvin sign +// See http://play.golang.org/p/tTxjOc0OGo +// +// The returned function is specialized for matching against s and +// should only be given s. It's not curried for performance reasons. +func foldFunc(s []byte) func(s, t []byte) bool { + nonLetter := false + special := false // special letter + for _, b := range s { + if b >= utf8.RuneSelf { + return bytes.EqualFold + } + upper := b & caseMask + if upper < 'A' || upper > 'Z' { + nonLetter = true + } else if upper == 'K' || upper == 'S' { + // See above for why these letters are special. + special = true + } + } + if special { + return equalFoldRight + } + if nonLetter { + return asciiEqualFold + } + return simpleLetterEqualFold +} + +// equalFoldRight is a specialization of bytes.EqualFold when s is +// known to be all ASCII (including punctuation), but contains an 's', +// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. +// See comments on foldFunc. +func equalFoldRight(s, t []byte) bool { + for _, sb := range s { + if len(t) == 0 { + return false + } + tb := t[0] + if tb < utf8.RuneSelf { + if sb != tb { + sbUpper := sb & caseMask + if 'A' <= sbUpper && sbUpper <= 'Z' { + if sbUpper != tb&caseMask { + return false + } + } else { + return false + } + } + t = t[1:] + continue + } + // sb is ASCII and t is not. t must be either kelvin + // sign or long s; sb must be s, S, k, or K. + tr, size := utf8.DecodeRune(t) + switch sb { + case 's', 'S': + if tr != smallLongEss { + return false + } + case 'k', 'K': + if tr != kelvin { + return false + } + default: + return false + } + t = t[size:] + + } + if len(t) > 0 { + return false + } + return true +} + +// asciiEqualFold is a specialization of bytes.EqualFold for use when +// s is all ASCII (but may contain non-letters) and contains no +// special-folding letters. +// See comments on foldFunc. +func asciiEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, sb := range s { + tb := t[i] + if sb == tb { + continue + } + if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { + if sb&caseMask != tb&caseMask { + return false + } + } else { + return false + } + } + return true +} + +// simpleLetterEqualFold is a specialization of bytes.EqualFold for +// use when s is all ASCII letters (no underscores, etc) and also +// doesn't contain 'k', 'K', 's', or 'S'. +// See comments on foldFunc. +func simpleLetterEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, b := range s { + if b&caseMask != t[i]&caseMask { + return false + } + } + return true +} + +// tagOptions is the string following a comma in a struct field's "json" +// tag, or the empty string. It does not include the leading comma. +type tagOptions string + +// parseTag splits a struct field's json tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + return tag, tagOptions("") +} + +// Contains reports whether a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + s := string(o) + for s != "" { + var next string + i := strings.Index(s, ",") + if i >= 0 { + s, next = s[:i], s[i+1:] + } + if s == optionName { + return true + } + s = next + } + return false +} diff --git a/vendor/k8s.io/client-go/tools/record/OWNERS b/vendor/k8s.io/client-go/tools/record/OWNERS new file mode 100755 index 00000000..4dd54bbc --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/OWNERS @@ -0,0 +1,27 @@ +reviewers: +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- derekwaynecarr +- caesarxuchao +- vishh +- mikedanese +- liggitt +- nikhiljindal +- erictune +- pmorie +- dchen1107 +- saad-ali +- luxas +- yifan-gu +- eparis +- mwielgus +- timothysc +- jsafrane +- dims +- krousey +- a-robinson +- aveshagarwal +- resouer +- cjcullen diff --git a/vendor/k8s.io/client-go/tools/record/doc.go b/vendor/k8s.io/client-go/tools/record/doc.go new file mode 100644 index 00000000..657ddecb --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package record has all client logic for recording and reporting events. +package record // import "k8s.io/client-go/tools/record" diff --git a/vendor/k8s.io/client-go/tools/record/event.go b/vendor/k8s.io/client-go/tools/record/event.go new file mode 100644 index 00000000..b5ec4465 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/event.go @@ -0,0 +1,318 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package record + +import ( + "fmt" + "math/rand" + "time" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/clock" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/watch" + restclient "k8s.io/client-go/rest" + ref "k8s.io/client-go/tools/reference" + + "net/http" + + "github.com/golang/glog" +) + +const maxTriesPerEvent = 12 + +var defaultSleepDuration = 10 * time.Second + +const maxQueuedEvents = 1000 + +// EventSink knows how to store events (client.Client implements it.) +// EventSink must respect the namespace that will be embedded in 'event'. +// It is assumed that EventSink will return the same sorts of errors as +// pkg/client's REST client. +type EventSink interface { + Create(event *v1.Event) (*v1.Event, error) + Update(event *v1.Event) (*v1.Event, error) + Patch(oldEvent *v1.Event, data []byte) (*v1.Event, error) +} + +// EventRecorder knows how to record events on behalf of an EventSource. +type EventRecorder interface { + // Event constructs an event from the given information and puts it in the queue for sending. + // 'object' is the object this event is about. Event will make a reference-- or you may also + // pass a reference to the object directly. + // 'type' of this event, and can be one of Normal, Warning. New types could be added in future + // 'reason' is the reason this event is generated. 'reason' should be short and unique; it + // should be in UpperCamelCase format (starting with a capital letter). "reason" will be used + // to automate handling of events, so imagine people writing switch statements to handle them. + // You want to make that easy. + // 'message' is intended to be human readable. + // + // The resulting event will be created in the same namespace as the reference object. + Event(object runtime.Object, eventtype, reason, message string) + + // Eventf is just like Event, but with Sprintf for the message field. + Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) + + // PastEventf is just like Eventf, but with an option to specify the event's 'timestamp' field. + PastEventf(object runtime.Object, timestamp metav1.Time, eventtype, reason, messageFmt string, args ...interface{}) +} + +// EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log. +type EventBroadcaster interface { + // StartEventWatcher starts sending events received from this EventBroadcaster to the given + // event handler function. The return value can be ignored or used to stop recording, if + // desired. + StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface + + // StartRecordingToSink starts sending events received from this EventBroadcaster to the given + // sink. The return value can be ignored or used to stop recording, if desired. + StartRecordingToSink(sink EventSink) watch.Interface + + // StartLogging starts sending events received from this EventBroadcaster to the given logging + // function. The return value can be ignored or used to stop recording, if desired. + StartLogging(logf func(format string, args ...interface{})) watch.Interface + + // NewRecorder returns an EventRecorder that can be used to send events to this EventBroadcaster + // with the event source set to the given event source. + NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder +} + +// Creates a new event broadcaster. +func NewBroadcaster() EventBroadcaster { + return &eventBroadcasterImpl{watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), defaultSleepDuration} +} + +func NewBroadcasterForTests(sleepDuration time.Duration) EventBroadcaster { + return &eventBroadcasterImpl{watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), sleepDuration} +} + +type eventBroadcasterImpl struct { + *watch.Broadcaster + sleepDuration time.Duration +} + +// StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink. +// The return value can be ignored or used to stop recording, if desired. +// TODO: make me an object with parameterizable queue length and retry interval +func (eventBroadcaster *eventBroadcasterImpl) StartRecordingToSink(sink EventSink) watch.Interface { + // The default math/rand package functions aren't thread safe, so create a + // new Rand object for each StartRecording call. + randGen := rand.New(rand.NewSource(time.Now().UnixNano())) + eventCorrelator := NewEventCorrelator(clock.RealClock{}) + return eventBroadcaster.StartEventWatcher( + func(event *v1.Event) { + recordToSink(sink, event, eventCorrelator, randGen, eventBroadcaster.sleepDuration) + }) +} + +func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrelator, randGen *rand.Rand, sleepDuration time.Duration) { + // Make a copy before modification, because there could be multiple listeners. + // Events are safe to copy like this. + eventCopy := *event + event = &eventCopy + result, err := eventCorrelator.EventCorrelate(event) + if err != nil { + utilruntime.HandleError(err) + } + if result.Skip { + return + } + tries := 0 + for { + if recordEvent(sink, result.Event, result.Patch, result.Event.Count > 1, eventCorrelator) { + break + } + tries++ + if tries >= maxTriesPerEvent { + glog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event) + break + } + // Randomize the first sleep so that various clients won't all be + // synced up if the master goes down. + if tries == 1 { + time.Sleep(time.Duration(float64(sleepDuration) * randGen.Float64())) + } else { + time.Sleep(sleepDuration) + } + } +} + +func isKeyNotFoundError(err error) bool { + statusErr, _ := err.(*errors.StatusError) + + if statusErr != nil && statusErr.Status().Code == http.StatusNotFound { + return true + } + + return false +} + +// recordEvent attempts to write event to a sink. It returns true if the event +// was successfully recorded or discarded, false if it should be retried. +// If updateExistingEvent is false, it creates a new event, otherwise it updates +// existing event. +func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEvent bool, eventCorrelator *EventCorrelator) bool { + var newEvent *v1.Event + var err error + if updateExistingEvent { + newEvent, err = sink.Patch(event, patch) + } + // Update can fail because the event may have been removed and it no longer exists. + if !updateExistingEvent || (updateExistingEvent && isKeyNotFoundError(err)) { + // Making sure that ResourceVersion is empty on creation + event.ResourceVersion = "" + newEvent, err = sink.Create(event) + } + if err == nil { + // we need to update our event correlator with the server returned state to handle name/resourceversion + eventCorrelator.UpdateState(newEvent) + return true + } + + // If we can't contact the server, then hold everything while we keep trying. + // Otherwise, something about the event is malformed and we should abandon it. + switch err.(type) { + case *restclient.RequestConstructionError: + // We will construct the request the same next time, so don't keep trying. + glog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err) + return true + case *errors.StatusError: + if errors.IsAlreadyExists(err) { + glog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err) + } else { + glog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err) + } + return true + case *errors.UnexpectedObjectError: + // We don't expect this; it implies the server's response didn't match a + // known pattern. Go ahead and retry. + default: + // This case includes actual http transport errors. Go ahead and retry. + } + glog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err) + return false +} + +// StartLogging starts sending events received from this EventBroadcaster to the given logging function. +// The return value can be ignored or used to stop recording, if desired. +func (eventBroadcaster *eventBroadcasterImpl) StartLogging(logf func(format string, args ...interface{})) watch.Interface { + return eventBroadcaster.StartEventWatcher( + func(e *v1.Event) { + logf("Event(%#v): type: '%v' reason: '%v' %v", e.InvolvedObject, e.Type, e.Reason, e.Message) + }) +} + +// StartEventWatcher starts sending events received from this EventBroadcaster to the given event handler function. +// The return value can be ignored or used to stop recording, if desired. +func (eventBroadcaster *eventBroadcasterImpl) StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface { + watcher := eventBroadcaster.Watch() + go func() { + defer utilruntime.HandleCrash() + for { + watchEvent, open := <-watcher.ResultChan() + if !open { + return + } + event, ok := watchEvent.Object.(*v1.Event) + if !ok { + // This is all local, so there's no reason this should + // ever happen. + continue + } + eventHandler(event) + } + }() + return watcher +} + +// NewRecorder returns an EventRecorder that records events with the given event source. +func (eventBroadcaster *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder { + return &recorderImpl{scheme, source, eventBroadcaster.Broadcaster, clock.RealClock{}} +} + +type recorderImpl struct { + scheme *runtime.Scheme + source v1.EventSource + *watch.Broadcaster + clock clock.Clock +} + +func (recorder *recorderImpl) generateEvent(object runtime.Object, timestamp metav1.Time, eventtype, reason, message string) { + ref, err := ref.GetReference(recorder.scheme, object) + if err != nil { + glog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message) + return + } + + if !validateEventType(eventtype) { + glog.Errorf("Unsupported event type: '%v'", eventtype) + return + } + + event := recorder.makeEvent(ref, eventtype, reason, message) + event.Source = recorder.source + + go func() { + // NOTE: events should be a non-blocking operation + defer utilruntime.HandleCrash() + recorder.Action(watch.Added, event) + }() +} + +func validateEventType(eventtype string) bool { + switch eventtype { + case v1.EventTypeNormal, v1.EventTypeWarning: + return true + } + return false +} + +func (recorder *recorderImpl) Event(object runtime.Object, eventtype, reason, message string) { + recorder.generateEvent(object, metav1.Now(), eventtype, reason, message) +} + +func (recorder *recorderImpl) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { + recorder.Event(object, eventtype, reason, fmt.Sprintf(messageFmt, args...)) +} + +func (recorder *recorderImpl) PastEventf(object runtime.Object, timestamp metav1.Time, eventtype, reason, messageFmt string, args ...interface{}) { + recorder.generateEvent(object, timestamp, eventtype, reason, fmt.Sprintf(messageFmt, args...)) +} + +func (recorder *recorderImpl) makeEvent(ref *v1.ObjectReference, eventtype, reason, message string) *v1.Event { + t := metav1.Time{Time: recorder.clock.Now()} + namespace := ref.Namespace + if namespace == "" { + namespace = metav1.NamespaceDefault + } + return &v1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()), + Namespace: namespace, + }, + InvolvedObject: *ref, + Reason: reason, + Message: message, + FirstTimestamp: t, + LastTimestamp: t, + Count: 1, + Type: eventtype, + } +} diff --git a/vendor/k8s.io/client-go/tools/record/events_cache.go b/vendor/k8s.io/client-go/tools/record/events_cache.go new file mode 100644 index 00000000..6ac767c9 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/events_cache.go @@ -0,0 +1,467 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package record + +import ( + "encoding/json" + "fmt" + "strings" + "sync" + "time" + + "github.com/golang/groupcache/lru" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/client-go/util/flowcontrol" +) + +const ( + maxLruCacheEntries = 4096 + + // if we see the same event that varies only by message + // more than 10 times in a 10 minute period, aggregate the event + defaultAggregateMaxEvents = 10 + defaultAggregateIntervalInSeconds = 600 + + // by default, allow a source to send 25 events about an object + // but control the refill rate to 1 new event every 5 minutes + // this helps control the long-tail of events for things that are always + // unhealthy + defaultSpamBurst = 25 + defaultSpamQPS = 1. / 300. +) + +// getEventKey builds unique event key based on source, involvedObject, reason, message +func getEventKey(event *v1.Event) string { + return strings.Join([]string{ + event.Source.Component, + event.Source.Host, + event.InvolvedObject.Kind, + event.InvolvedObject.Namespace, + event.InvolvedObject.Name, + event.InvolvedObject.FieldPath, + string(event.InvolvedObject.UID), + event.InvolvedObject.APIVersion, + event.Type, + event.Reason, + event.Message, + }, + "") +} + +// getSpamKey builds unique event key based on source, involvedObject +func getSpamKey(event *v1.Event) string { + return strings.Join([]string{ + event.Source.Component, + event.Source.Host, + event.InvolvedObject.Kind, + event.InvolvedObject.Namespace, + event.InvolvedObject.Name, + string(event.InvolvedObject.UID), + event.InvolvedObject.APIVersion, + }, + "") +} + +// EventFilterFunc is a function that returns true if the event should be skipped +type EventFilterFunc func(event *v1.Event) bool + +// DefaultEventFilterFunc returns false for all incoming events +func DefaultEventFilterFunc(event *v1.Event) bool { + return false +} + +// EventSourceObjectSpamFilter is responsible for throttling +// the amount of events a source and object can produce. +type EventSourceObjectSpamFilter struct { + sync.RWMutex + + // the cache that manages last synced state + cache *lru.Cache + + // burst is the amount of events we allow per source + object + burst int + + // qps is the refill rate of the token bucket in queries per second + qps float32 + + // clock is used to allow for testing over a time interval + clock clock.Clock +} + +// NewEventSourceObjectSpamFilter allows burst events from a source about an object with the specified qps refill. +func NewEventSourceObjectSpamFilter(lruCacheSize, burst int, qps float32, clock clock.Clock) *EventSourceObjectSpamFilter { + return &EventSourceObjectSpamFilter{ + cache: lru.New(lruCacheSize), + burst: burst, + qps: qps, + clock: clock, + } +} + +// spamRecord holds data used to perform spam filtering decisions. +type spamRecord struct { + // rateLimiter controls the rate of events about this object + rateLimiter flowcontrol.RateLimiter +} + +// Filter controls that a given source+object are not exceeding the allowed rate. +func (f *EventSourceObjectSpamFilter) Filter(event *v1.Event) bool { + var record spamRecord + + // controls our cached information about this event (source+object) + eventKey := getSpamKey(event) + + // do we have a record of similar events in our cache? + f.Lock() + defer f.Unlock() + value, found := f.cache.Get(eventKey) + if found { + record = value.(spamRecord) + } + + // verify we have a rate limiter for this record + if record.rateLimiter == nil { + record.rateLimiter = flowcontrol.NewTokenBucketRateLimiterWithClock(f.qps, f.burst, f.clock) + } + + // ensure we have available rate + filter := !record.rateLimiter.TryAccept() + + // update the cache + f.cache.Add(eventKey, record) + + return filter +} + +// EventAggregatorKeyFunc is responsible for grouping events for aggregation +// It returns a tuple of the following: +// aggregateKey - key the identifies the aggregate group to bucket this event +// localKey - key that makes this event in the local group +type EventAggregatorKeyFunc func(event *v1.Event) (aggregateKey string, localKey string) + +// EventAggregatorByReasonFunc aggregates events by exact match on event.Source, event.InvolvedObject, event.Type and event.Reason +func EventAggregatorByReasonFunc(event *v1.Event) (string, string) { + return strings.Join([]string{ + event.Source.Component, + event.Source.Host, + event.InvolvedObject.Kind, + event.InvolvedObject.Namespace, + event.InvolvedObject.Name, + string(event.InvolvedObject.UID), + event.InvolvedObject.APIVersion, + event.Type, + event.Reason, + }, + ""), event.Message +} + +// EventAggregatorMessageFunc is responsible for producing an aggregation message +type EventAggregatorMessageFunc func(event *v1.Event) string + +// EventAggregratorByReasonMessageFunc returns an aggregate message by prefixing the incoming message +func EventAggregatorByReasonMessageFunc(event *v1.Event) string { + return "(combined from similar events): " + event.Message +} + +// EventAggregator identifies similar events and aggregates them into a single event +type EventAggregator struct { + sync.RWMutex + + // The cache that manages aggregation state + cache *lru.Cache + + // The function that groups events for aggregation + keyFunc EventAggregatorKeyFunc + + // The function that generates a message for an aggregate event + messageFunc EventAggregatorMessageFunc + + // The maximum number of events in the specified interval before aggregation occurs + maxEvents uint + + // The amount of time in seconds that must transpire since the last occurrence of a similar event before it's considered new + maxIntervalInSeconds uint + + // clock is used to allow for testing over a time interval + clock clock.Clock +} + +// NewEventAggregator returns a new instance of an EventAggregator +func NewEventAggregator(lruCacheSize int, keyFunc EventAggregatorKeyFunc, messageFunc EventAggregatorMessageFunc, + maxEvents int, maxIntervalInSeconds int, clock clock.Clock) *EventAggregator { + return &EventAggregator{ + cache: lru.New(lruCacheSize), + keyFunc: keyFunc, + messageFunc: messageFunc, + maxEvents: uint(maxEvents), + maxIntervalInSeconds: uint(maxIntervalInSeconds), + clock: clock, + } +} + +// aggregateRecord holds data used to perform aggregation decisions +type aggregateRecord struct { + // we track the number of unique local keys we have seen in the aggregate set to know when to actually aggregate + // if the size of this set exceeds the max, we know we need to aggregate + localKeys sets.String + // The last time at which the aggregate was recorded + lastTimestamp metav1.Time +} + +// EventAggregate checks if a similar event has been seen according to the +// aggregation configuration (max events, max interval, etc) and returns: +// +// - The (potentially modified) event that should be created +// - The cache key for the event, for correlation purposes. This will be set to +// the full key for normal events, and to the result of +// EventAggregatorMessageFunc for aggregate events. +func (e *EventAggregator) EventAggregate(newEvent *v1.Event) (*v1.Event, string) { + now := metav1.NewTime(e.clock.Now()) + var record aggregateRecord + // eventKey is the full cache key for this event + eventKey := getEventKey(newEvent) + // aggregateKey is for the aggregate event, if one is needed. + aggregateKey, localKey := e.keyFunc(newEvent) + + // Do we have a record of similar events in our cache? + e.Lock() + defer e.Unlock() + value, found := e.cache.Get(aggregateKey) + if found { + record = value.(aggregateRecord) + } + + // Is the previous record too old? If so, make a fresh one. Note: if we didn't + // find a similar record, its lastTimestamp will be the zero value, so we + // create a new one in that case. + maxInterval := time.Duration(e.maxIntervalInSeconds) * time.Second + interval := now.Time.Sub(record.lastTimestamp.Time) + if interval > maxInterval { + record = aggregateRecord{localKeys: sets.NewString()} + } + + // Write the new event into the aggregation record and put it on the cache + record.localKeys.Insert(localKey) + record.lastTimestamp = now + e.cache.Add(aggregateKey, record) + + // If we are not yet over the threshold for unique events, don't correlate them + if uint(record.localKeys.Len()) < e.maxEvents { + return newEvent, eventKey + } + + // do not grow our local key set any larger than max + record.localKeys.PopAny() + + // create a new aggregate event, and return the aggregateKey as the cache key + // (so that it can be overwritten.) + eventCopy := &v1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%v.%x", newEvent.InvolvedObject.Name, now.UnixNano()), + Namespace: newEvent.Namespace, + }, + Count: 1, + FirstTimestamp: now, + InvolvedObject: newEvent.InvolvedObject, + LastTimestamp: now, + Message: e.messageFunc(newEvent), + Type: newEvent.Type, + Reason: newEvent.Reason, + Source: newEvent.Source, + } + return eventCopy, aggregateKey +} + +// eventLog records data about when an event was observed +type eventLog struct { + // The number of times the event has occurred since first occurrence. + count uint + + // The time at which the event was first recorded. + firstTimestamp metav1.Time + + // The unique name of the first occurrence of this event + name string + + // Resource version returned from previous interaction with server + resourceVersion string +} + +// eventLogger logs occurrences of an event +type eventLogger struct { + sync.RWMutex + cache *lru.Cache + clock clock.Clock +} + +// newEventLogger observes events and counts their frequencies +func newEventLogger(lruCacheEntries int, clock clock.Clock) *eventLogger { + return &eventLogger{cache: lru.New(lruCacheEntries), clock: clock} +} + +// eventObserve records an event, or updates an existing one if key is a cache hit +func (e *eventLogger) eventObserve(newEvent *v1.Event, key string) (*v1.Event, []byte, error) { + var ( + patch []byte + err error + ) + eventCopy := *newEvent + event := &eventCopy + + e.Lock() + defer e.Unlock() + + // Check if there is an existing event we should update + lastObservation := e.lastEventObservationFromCache(key) + + // If we found a result, prepare a patch + if lastObservation.count > 0 { + // update the event based on the last observation so patch will work as desired + event.Name = lastObservation.name + event.ResourceVersion = lastObservation.resourceVersion + event.FirstTimestamp = lastObservation.firstTimestamp + event.Count = int32(lastObservation.count) + 1 + + eventCopy2 := *event + eventCopy2.Count = 0 + eventCopy2.LastTimestamp = metav1.NewTime(time.Unix(0, 0)) + eventCopy2.Message = "" + + newData, _ := json.Marshal(event) + oldData, _ := json.Marshal(eventCopy2) + patch, err = strategicpatch.CreateTwoWayMergePatch(oldData, newData, event) + } + + // record our new observation + e.cache.Add( + key, + eventLog{ + count: uint(event.Count), + firstTimestamp: event.FirstTimestamp, + name: event.Name, + resourceVersion: event.ResourceVersion, + }, + ) + return event, patch, err +} + +// updateState updates its internal tracking information based on latest server state +func (e *eventLogger) updateState(event *v1.Event) { + key := getEventKey(event) + e.Lock() + defer e.Unlock() + // record our new observation + e.cache.Add( + key, + eventLog{ + count: uint(event.Count), + firstTimestamp: event.FirstTimestamp, + name: event.Name, + resourceVersion: event.ResourceVersion, + }, + ) +} + +// lastEventObservationFromCache returns the event from the cache, reads must be protected via external lock +func (e *eventLogger) lastEventObservationFromCache(key string) eventLog { + value, ok := e.cache.Get(key) + if ok { + observationValue, ok := value.(eventLog) + if ok { + return observationValue + } + } + return eventLog{} +} + +// EventCorrelator processes all incoming events and performs analysis to avoid overwhelming the system. It can filter all +// incoming events to see if the event should be filtered from further processing. It can aggregate similar events that occur +// frequently to protect the system from spamming events that are difficult for users to distinguish. It performs de-duplication +// to ensure events that are observed multiple times are compacted into a single event with increasing counts. +type EventCorrelator struct { + // the function to filter the event + filterFunc EventFilterFunc + // the object that performs event aggregation + aggregator *EventAggregator + // the object that observes events as they come through + logger *eventLogger +} + +// EventCorrelateResult is the result of a Correlate +type EventCorrelateResult struct { + // the event after correlation + Event *v1.Event + // if provided, perform a strategic patch when updating the record on the server + Patch []byte + // if true, do no further processing of the event + Skip bool +} + +// NewEventCorrelator returns an EventCorrelator configured with default values. +// +// The EventCorrelator is responsible for event filtering, aggregating, and counting +// prior to interacting with the API server to record the event. +// +// The default behavior is as follows: +// * Aggregation is performed if a similar event is recorded 10 times in a +// in a 10 minute rolling interval. A similar event is an event that varies only by +// the Event.Message field. Rather than recording the precise event, aggregation +// will create a new event whose message reports that it has combined events with +// the same reason. +// * Events are incrementally counted if the exact same event is encountered multiple +// times. +// * A source may burst 25 events about an object, but has a refill rate budget +// per object of 1 event every 5 minutes to control long-tail of spam. +func NewEventCorrelator(clock clock.Clock) *EventCorrelator { + cacheSize := maxLruCacheEntries + spamFilter := NewEventSourceObjectSpamFilter(cacheSize, defaultSpamBurst, defaultSpamQPS, clock) + return &EventCorrelator{ + filterFunc: spamFilter.Filter, + aggregator: NewEventAggregator( + cacheSize, + EventAggregatorByReasonFunc, + EventAggregatorByReasonMessageFunc, + defaultAggregateMaxEvents, + defaultAggregateIntervalInSeconds, + clock), + + logger: newEventLogger(cacheSize, clock), + } +} + +// EventCorrelate filters, aggregates, counts, and de-duplicates all incoming events +func (c *EventCorrelator) EventCorrelate(newEvent *v1.Event) (*EventCorrelateResult, error) { + if newEvent == nil { + return nil, fmt.Errorf("event is nil") + } + aggregateEvent, ckey := c.aggregator.EventAggregate(newEvent) + observedEvent, patch, err := c.logger.eventObserve(aggregateEvent, ckey) + if c.filterFunc(observedEvent) { + return &EventCorrelateResult{Skip: true}, nil + } + return &EventCorrelateResult{Event: observedEvent, Patch: patch}, err +} + +// UpdateState based on the latest observed state from server +func (c *EventCorrelator) UpdateState(event *v1.Event) { + c.logger.updateState(event) +} diff --git a/vendor/k8s.io/client-go/tools/record/fake.go b/vendor/k8s.io/client-go/tools/record/fake.go new file mode 100644 index 00000000..c0e8eedb --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/fake.go @@ -0,0 +1,54 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package record + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// FakeRecorder is used as a fake during tests. It is thread safe. It is usable +// when created manually and not by NewFakeRecorder, however all events may be +// thrown away in this case. +type FakeRecorder struct { + Events chan string +} + +func (f *FakeRecorder) Event(object runtime.Object, eventtype, reason, message string) { + if f.Events != nil { + f.Events <- fmt.Sprintf("%s %s %s", eventtype, reason, message) + } +} + +func (f *FakeRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { + if f.Events != nil { + f.Events <- fmt.Sprintf(eventtype+" "+reason+" "+messageFmt, args...) + } +} + +func (f *FakeRecorder) PastEventf(object runtime.Object, timestamp metav1.Time, eventtype, reason, messageFmt string, args ...interface{}) { +} + +// NewFakeRecorder creates new fake event recorder with event channel with +// buffer of given size. +func NewFakeRecorder(bufferSize int) *FakeRecorder { + return &FakeRecorder{ + Events: make(chan string, bufferSize), + } +} diff --git a/vendor/k8s.io/kube-openapi/LICENSE b/vendor/k8s.io/kube-openapi/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go new file mode 100644 index 00000000..11ed8a6b --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package proto is a collection of libraries for parsing and indexing the type definitions. +// The openapi spec contains the object model definitions and extensions metadata. +package proto diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go new file mode 100644 index 00000000..61dbf4fc --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go @@ -0,0 +1,285 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proto + +import ( + "fmt" + "sort" + "strings" + + openapi_v2 "github.com/googleapis/gnostic/OpenAPIv2" + yaml "gopkg.in/yaml.v2" +) + +func newSchemaError(path *Path, format string, a ...interface{}) error { + err := fmt.Sprintf(format, a...) + if path.Len() == 0 { + return fmt.Errorf("SchemaError: %v", err) + } + return fmt.Errorf("SchemaError(%v): %v", path, err) +} + +// VendorExtensionToMap converts openapi VendorExtension to a map. +func VendorExtensionToMap(e []*openapi_v2.NamedAny) map[string]interface{} { + values := map[string]interface{}{} + + for _, na := range e { + if na.GetName() == "" || na.GetValue() == nil { + continue + } + if na.GetValue().GetYaml() == "" { + continue + } + var value interface{} + err := yaml.Unmarshal([]byte(na.GetValue().GetYaml()), &value) + if err != nil { + continue + } + + values[na.GetName()] = value + } + + return values +} + +// Definitions is an implementation of `Models`. It looks for +// models in an openapi Schema. +type Definitions struct { + models map[string]Schema +} + +var _ Models = &Definitions{} + +// NewOpenAPIData creates a new `Models` out of the openapi document. +func NewOpenAPIData(doc *openapi_v2.Document) (Models, error) { + definitions := Definitions{ + models: map[string]Schema{}, + } + + // Save the list of all models first. This will allow us to + // validate that we don't have any dangling reference. + for _, namedSchema := range doc.GetDefinitions().GetAdditionalProperties() { + definitions.models[namedSchema.GetName()] = nil + } + + // Now, parse each model. We can validate that references exists. + for _, namedSchema := range doc.GetDefinitions().GetAdditionalProperties() { + path := NewPath(namedSchema.GetName()) + schema, err := definitions.ParseSchema(namedSchema.GetValue(), &path) + if err != nil { + return nil, err + } + definitions.models[namedSchema.GetName()] = schema + } + + return &definitions, nil +} + +// We believe the schema is a reference, verify that and returns a new +// Schema +func (d *Definitions) parseReference(s *openapi_v2.Schema, path *Path) (Schema, error) { + if len(s.GetProperties().GetAdditionalProperties()) > 0 { + return nil, newSchemaError(path, "unallowed embedded type definition") + } + if len(s.GetType().GetValue()) > 0 { + return nil, newSchemaError(path, "definition reference can't have a type") + } + + if !strings.HasPrefix(s.GetXRef(), "#/definitions/") { + return nil, newSchemaError(path, "unallowed reference to non-definition %q", s.GetXRef()) + } + reference := strings.TrimPrefix(s.GetXRef(), "#/definitions/") + if _, ok := d.models[reference]; !ok { + return nil, newSchemaError(path, "unknown model in reference: %q", reference) + } + return &Ref{ + BaseSchema: d.parseBaseSchema(s, path), + reference: reference, + definitions: d, + }, nil +} + +func (d *Definitions) parseBaseSchema(s *openapi_v2.Schema, path *Path) BaseSchema { + return BaseSchema{ + Description: s.GetDescription(), + Extensions: VendorExtensionToMap(s.GetVendorExtension()), + Path: *path, + } +} + +// We believe the schema is a map, verify and return a new schema +func (d *Definitions) parseMap(s *openapi_v2.Schema, path *Path) (Schema, error) { + if len(s.GetType().GetValue()) != 0 && s.GetType().GetValue()[0] != object { + return nil, newSchemaError(path, "invalid object type") + } + if s.GetAdditionalProperties().GetSchema() == nil { + return nil, newSchemaError(path, "invalid object doesn't have additional properties") + } + sub, err := d.ParseSchema(s.GetAdditionalProperties().GetSchema(), path) + if err != nil { + return nil, err + } + return &Map{ + BaseSchema: d.parseBaseSchema(s, path), + SubType: sub, + }, nil +} + +func (d *Definitions) parsePrimitive(s *openapi_v2.Schema, path *Path) (Schema, error) { + var t string + if len(s.GetType().GetValue()) > 1 { + return nil, newSchemaError(path, "primitive can't have more than 1 type") + } + if len(s.GetType().GetValue()) == 1 { + t = s.GetType().GetValue()[0] + } + switch t { + case String: + case Number: + case Integer: + case Boolean: + case "": // Some models are completely empty, and can be safely ignored. + // Do nothing + default: + return nil, newSchemaError(path, "Unknown primitive type: %q", t) + } + return &Primitive{ + BaseSchema: d.parseBaseSchema(s, path), + Type: t, + Format: s.GetFormat(), + }, nil +} + +func (d *Definitions) parseArray(s *openapi_v2.Schema, path *Path) (Schema, error) { + if len(s.GetType().GetValue()) != 1 { + return nil, newSchemaError(path, "array should have exactly one type") + } + if s.GetType().GetValue()[0] != array { + return nil, newSchemaError(path, `array should have type "array"`) + } + if len(s.GetItems().GetSchema()) != 1 { + return nil, newSchemaError(path, "array should have exactly one sub-item") + } + sub, err := d.ParseSchema(s.GetItems().GetSchema()[0], path) + if err != nil { + return nil, err + } + return &Array{ + BaseSchema: d.parseBaseSchema(s, path), + SubType: sub, + }, nil +} + +func (d *Definitions) parseKind(s *openapi_v2.Schema, path *Path) (Schema, error) { + if len(s.GetType().GetValue()) != 0 && s.GetType().GetValue()[0] != object { + return nil, newSchemaError(path, "invalid object type") + } + if s.GetProperties() == nil { + return nil, newSchemaError(path, "object doesn't have properties") + } + + fields := map[string]Schema{} + + for _, namedSchema := range s.GetProperties().GetAdditionalProperties() { + var err error + path := path.FieldPath(namedSchema.GetName()) + fields[namedSchema.GetName()], err = d.ParseSchema(namedSchema.GetValue(), &path) + if err != nil { + return nil, err + } + } + + return &Kind{ + BaseSchema: d.parseBaseSchema(s, path), + RequiredFields: s.GetRequired(), + Fields: fields, + }, nil +} + +func (d *Definitions) parseArbitrary(s *openapi_v2.Schema, path *Path) (Schema, error) { + return &Arbitrary{ + BaseSchema: d.parseBaseSchema(s, path), + }, nil +} + +// ParseSchema creates a walkable Schema from an openapi schema. While +// this function is public, it doesn't leak through the interface. +func (d *Definitions) ParseSchema(s *openapi_v2.Schema, path *Path) (Schema, error) { + objectTypes := s.GetType().GetValue() + if len(objectTypes) == 1 { + t := objectTypes[0] + switch t { + case object: + return d.parseMap(s, path) + case array: + return d.parseArray(s, path) + } + + } + if s.GetXRef() != "" { + return d.parseReference(s, path) + } + if s.GetProperties() != nil { + return d.parseKind(s, path) + } + if len(objectTypes) == 0 || (len(objectTypes) == 1 && objectTypes[0] == "") { + return d.parseArbitrary(s, path) + } + return d.parsePrimitive(s, path) +} + +// LookupModel is public through the interface of Models. It +// returns a visitable schema from the given model name. +func (d *Definitions) LookupModel(model string) Schema { + return d.models[model] +} + +func (d *Definitions) ListModels() []string { + models := []string{} + + for model := range d.models { + models = append(models, model) + } + + sort.Strings(models) + return models +} + +type Ref struct { + BaseSchema + + reference string + definitions *Definitions +} + +var _ Reference = &Ref{} + +func (r *Ref) Reference() string { + return r.reference +} + +func (r *Ref) SubSchema() Schema { + return r.definitions.models[r.reference] +} + +func (r *Ref) Accept(v SchemaVisitor) { + v.VisitReference(r) +} + +func (r *Ref) GetName() string { + return fmt.Sprintf("Reference to %q", r.reference) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go new file mode 100644 index 00000000..b48e62c3 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go @@ -0,0 +1,276 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proto + +import ( + "fmt" + "sort" + "strings" +) + +// Defines openapi types. +const ( + Integer = "integer" + Number = "number" + String = "string" + Boolean = "boolean" + + // These types are private as they should never leak, and are + // represented by actual structs. + array = "array" + object = "object" +) + +// Models interface describe a model provider. They can give you the +// schema for a specific model. +type Models interface { + LookupModel(string) Schema + ListModels() []string +} + +// SchemaVisitor is an interface that you need to implement if you want +// to "visit" an openapi schema. A dispatch on the Schema type will call +// the appropriate function based on its actual type: +// - Array is a list of one and only one given subtype +// - Map is a map of string to one and only one given subtype +// - Primitive can be string, integer, number and boolean. +// - Kind is an object with specific fields mapping to specific types. +// - Reference is a link to another definition. +type SchemaVisitor interface { + VisitArray(*Array) + VisitMap(*Map) + VisitPrimitive(*Primitive) + VisitKind(*Kind) + VisitReference(Reference) +} + +// SchemaVisitorArbitrary is an additional visitor interface which handles +// arbitrary types. For backwards compatability, it's a separate interface +// which is checked for at runtime. +type SchemaVisitorArbitrary interface { + SchemaVisitor + VisitArbitrary(*Arbitrary) +} + +// Schema is the base definition of an openapi type. +type Schema interface { + // Giving a visitor here will let you visit the actual type. + Accept(SchemaVisitor) + + // Pretty print the name of the type. + GetName() string + // Describes how to access this field. + GetPath() *Path + // Describes the field. + GetDescription() string + // Returns type extensions. + GetExtensions() map[string]interface{} +} + +// Path helps us keep track of type paths +type Path struct { + parent *Path + key string +} + +func NewPath(key string) Path { + return Path{key: key} +} + +func (p *Path) Get() []string { + if p == nil { + return []string{} + } + if p.key == "" { + return p.parent.Get() + } + return append(p.parent.Get(), p.key) +} + +func (p *Path) Len() int { + return len(p.Get()) +} + +func (p *Path) String() string { + return strings.Join(p.Get(), "") +} + +// ArrayPath appends an array index and creates a new path +func (p *Path) ArrayPath(i int) Path { + return Path{ + parent: p, + key: fmt.Sprintf("[%d]", i), + } +} + +// FieldPath appends a field name and creates a new path +func (p *Path) FieldPath(field string) Path { + return Path{ + parent: p, + key: fmt.Sprintf(".%s", field), + } +} + +// BaseSchema holds data used by each types of schema. +type BaseSchema struct { + Description string + Extensions map[string]interface{} + + Path Path +} + +func (b *BaseSchema) GetDescription() string { + return b.Description +} + +func (b *BaseSchema) GetExtensions() map[string]interface{} { + return b.Extensions +} + +func (b *BaseSchema) GetPath() *Path { + return &b.Path +} + +// Array must have all its element of the same `SubType`. +type Array struct { + BaseSchema + + SubType Schema +} + +var _ Schema = &Array{} + +func (a *Array) Accept(v SchemaVisitor) { + v.VisitArray(a) +} + +func (a *Array) GetName() string { + return fmt.Sprintf("Array of %s", a.SubType.GetName()) +} + +// Kind is a complex object. It can have multiple different +// subtypes for each field, as defined in the `Fields` field. Mandatory +// fields are listed in `RequiredFields`. The key of the object is +// always of type `string`. +type Kind struct { + BaseSchema + + // Lists names of required fields. + RequiredFields []string + // Maps field names to types. + Fields map[string]Schema +} + +var _ Schema = &Kind{} + +func (k *Kind) Accept(v SchemaVisitor) { + v.VisitKind(k) +} + +func (k *Kind) GetName() string { + properties := []string{} + for key := range k.Fields { + properties = append(properties, key) + } + return fmt.Sprintf("Kind(%v)", properties) +} + +// IsRequired returns true if `field` is a required field for this type. +func (k *Kind) IsRequired(field string) bool { + for _, f := range k.RequiredFields { + if f == field { + return true + } + } + return false +} + +// Keys returns a alphabetically sorted list of keys. +func (k *Kind) Keys() []string { + keys := make([]string, 0) + for key := range k.Fields { + keys = append(keys, key) + } + sort.Strings(keys) + return keys +} + +// Map is an object who values must all be of the same `SubType`. +// The key of the object is always of type `string`. +type Map struct { + BaseSchema + + SubType Schema +} + +var _ Schema = &Map{} + +func (m *Map) Accept(v SchemaVisitor) { + v.VisitMap(m) +} + +func (m *Map) GetName() string { + return fmt.Sprintf("Map of %s", m.SubType.GetName()) +} + +// Primitive is a literal. There can be multiple types of primitives, +// and this subtype can be visited through the `subType` field. +type Primitive struct { + BaseSchema + + // Type of a primitive must be one of: integer, number, string, boolean. + Type string + Format string +} + +var _ Schema = &Primitive{} + +func (p *Primitive) Accept(v SchemaVisitor) { + v.VisitPrimitive(p) +} + +func (p *Primitive) GetName() string { + if p.Format == "" { + return p.Type + } + return fmt.Sprintf("%s (%s)", p.Type, p.Format) +} + +// Arbitrary is a value of any type (primitive, object or array) +type Arbitrary struct { + BaseSchema +} + +var _ Schema = &Arbitrary{} + +func (a *Arbitrary) Accept(v SchemaVisitor) { + if visitor, ok := v.(SchemaVisitorArbitrary); ok { + visitor.VisitArbitrary(a) + } +} + +func (a *Arbitrary) GetName() string { + return "Arbitrary value (primitive, object or array)" +} + +// Reference implementation depends on the type of document. +type Reference interface { + Schema + + Reference() string + SubSchema() Schema +} From 9561e7ce5fadd6f5bf1f4d0d8b21e9fa6cd8db6c Mon Sep 17 00:00:00 2001 From: lyt99 Date: Mon, 22 Jun 2020 20:40:41 +0800 Subject: [PATCH 4/7] rule for create event & event recorder in tracing --- daemon/daemon.go | 1 + pkg/tracing/tracing.go | 50 +++++++++++++++++++++++++++++++++++++++++- terway-multiip.yml | 5 +++++ terway.yml | 5 +++++ 4 files changed, 60 insertions(+), 1 deletion(-) diff --git a/daemon/daemon.go b/daemon/daemon.go index de97162a..f11303c2 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -857,6 +857,7 @@ func newNetworkService(configFilePath, kubeconfig, master, daemonMode string) (r // register for tracing _ = tracing.Register(tracing.ResourceTypeNetworkService, "default", netSrv) tracing.RegisterResourceMapping(netSrv) + tracing.RegisterEventRecorder(netSrv.k8s.RecordNodeEvent, netSrv.k8s.RecordPodEvent) return netSrv, nil } diff --git a/pkg/tracing/tracing.go b/pkg/tracing/tracing.go index 44030c11..e3423bad 100644 --- a/pkg/tracing/tracing.go +++ b/pkg/tracing/tracing.go @@ -69,6 +69,12 @@ type ResourceMappingHandler interface { GetResourceMapping() ([]PodResourceMapping, error) } +// PodEventRecorder records event on pod +type PodEventRecorder func(podName, podNamespace, eventType, reason, message string) error + +// NodeEventRecorder records event on node +type NodeEventRecorder func(eventType, reason, message string) + type resourceMap map[string]TraceHandler // Tracer manages tracing handlers registered from the system @@ -78,6 +84,8 @@ type Tracer struct { // store TraceHandler by resource name traceMap map[string]resourceMap resourceMapping ResourceMappingHandler + podEvent PodEventRecorder + nodeEvent NodeEventRecorder } func init() { @@ -121,13 +129,19 @@ func (t *Tracer) RegisterResourceMapping(mapping ResourceMappingHandler) { t.resourceMapping = mapping } +// RegisterEventRecorder registers pod & node event recorder to a tracer +func (t *Tracer) RegisterEventRecorder(node NodeEventRecorder, pod PodEventRecorder) { + t.nodeEvent = node + t.podEvent = pod +} + // GetTypes gets all types registered to the tracer func (t *Tracer) GetTypes() []string { t.mtx.Lock() defer t.mtx.Unlock() var names []string - // may be unordered, do we need a sort? + for k := range t.traceMap { names = append(names, k) } @@ -204,6 +218,25 @@ func (t *Tracer) Execute(typ, resourceName, cmd string, args []string) (<-chan s return ch, nil } +// RecordPodEvent records pod event via PodEventRecorder +func (t *Tracer) RecordPodEvent(podName, podNamespace, eventType, reason, message string) error { + if t.podEvent == nil { + return errors.New("no pod event recorder registered") + } + + return t.podEvent(podName, podNamespace, eventType, reason, message) +} + +// RecordNodeEvent records node event via PodEventRecorder +func (t *Tracer) RecordNodeEvent(eventType, reason, message string) error { + if t.nodeEvent == nil { + return errors.New("no node event recorder registered") + } + + t.nodeEvent(eventType, reason, message) + return nil +} + // GetResourceMapping gives the resource mapping from the handler // if the handler has not been registered, there will be error func (t *Tracer) GetResourceMapping() ([]PodResourceMapping, error) { @@ -229,6 +262,21 @@ func Unregister(typ, resourceName string) { defaultTracer.Unregister(typ, resourceName) } +// RegisterEventRecorder registers pod & node event recorder to a tracer +func RegisterEventRecorder(node NodeEventRecorder, pod PodEventRecorder) { + defaultTracer.RegisterEventRecorder(node, pod) +} + +// RecordPodEvent records pod event via PodEventRecorder +func RecordPodEvent(podName, podNamespace, eventType, reason, message string) error { + return defaultTracer.RecordPodEvent(podName, podNamespace, eventType, reason, message) +} + +// RecordNodeEvent records node event via PodEventRecorder +func RecordNodeEvent(eventType, reason, message string) error { + return defaultTracer.RecordNodeEvent(eventType, reason, message) +} + // NewTracer creates a new tracer func NewTracer() *Tracer { return &Tracer{ diff --git a/terway-multiip.yml b/terway-multiip.yml index 950b3f6d..63e56d0b 100644 --- a/terway-multiip.yml +++ b/terway-multiip.yml @@ -15,6 +15,11 @@ rules: - apiGroups: [""] resources: ["pods", "nodes", "namespaces", "configmaps", "serviceaccounts"] verbs: ["get", "watch", "list", "update"] +- apiGroups: [""] + resources: + - events + verbs: + - create - apiGroups: ["networking.k8s.io"] resources: - networkpolicies diff --git a/terway.yml b/terway.yml index fd93a633..33128646 100644 --- a/terway.yml +++ b/terway.yml @@ -15,6 +15,11 @@ rules: - apiGroups: [""] resources: ["pods", "nodes", "namespaces", "configmaps", "serviceaccounts"] verbs: ["get", "watch", "list", "update"] +- apiGroups: [""] + resources: + - events + verbs: + - create - apiGroups: ["networking.k8s.io"] resources: - networkpolicies From 10570d75831188a6f632c0687115ccc7169545a2 Mon Sep 17 00:00:00 2001 From: lyt99 Date: Mon, 22 Jun 2020 20:40:41 +0800 Subject: [PATCH 5/7] rule for create event & event recorder in tracing --- daemon/k8s.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/daemon/k8s.go b/daemon/k8s.go index 733fa30a..a966f4d0 100644 --- a/daemon/k8s.go +++ b/daemon/k8s.go @@ -12,6 +12,8 @@ import ( "time" "unicode" + "github.com/AliyunContainerService/terway/pkg/tracing" + "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/clientcmd" @@ -301,7 +303,9 @@ func convertPod(daemonMode string, pod *corev1.Pod) *podInfo { if ingress, err := parseBandwidth(ingressBandwidth); err == nil { pi.TcIngress = ingress } - //TODO write event on pod if parse bandwidth fail + + _ = tracing.RecordPodEvent(pod.Name, pod.Namespace, eventTypeWarning, + "ParseFailed", fmt.Sprintf("Parse bandwidth %s failed.", ingressBandwidth)) } if egressBandwidth, ok := podAnnotation[podEgressBandwidth]; ok { if egress, err := parseBandwidth(egressBandwidth); err == nil { From 501c5d2b37322762767462072ff182de5b5f55e8 Mon Sep 17 00:00:00 2001 From: lyt99 Date: Sun, 28 Jun 2020 10:18:36 +0800 Subject: [PATCH 6/7] change: deferred func for RecordEvent --- plugin/terway/cni.go | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/plugin/terway/cni.go b/plugin/terway/cni.go index 89786e23..0dde078f 100644 --- a/plugin/terway/cni.go +++ b/plugin/terway/cni.go @@ -90,6 +90,7 @@ func cmdAdd(args *skel.CmdArgs) (err error) { confVersion string cniNetns ns.NetNS ) + confVersion, err = versionDecoder.Decode(args.StdinData) if err != nil { return err @@ -125,6 +126,20 @@ func cmdAdd(args *skel.CmdArgs) (err error) { timeoutContext, cancel := context.WithTimeout(context.Background(), defaultCniTimeout*time.Second) defer cancel() + defer func() { + if err != nil { + _, err = terwayBackendClient.RecordEvent(context.Background(), + &rpc.EventRequest{ + EventTarget: rpc.EventTarget_EventTargetPod, + K8SPodName: string(k8sConfig.K8S_POD_NAME), + K8SPodNamespace: string(k8sConfig.K8S_POD_NAMESPACE), + EventType: rpc.EventType_EventTypeWarning, + Reason: "AllocIPFailed", + Message: err.Error(), + }) + } + }() + allocResult, err := terwayBackendClient.AllocIP( timeoutContext, &rpc.AllocIPRequest{ @@ -155,16 +170,6 @@ func cmdAdd(args *skel.CmdArgs) (err error) { IPType: allocResult.IPType, Reason: fmt.Sprintf("roll back ip for error: %v", err), }) - - _, err = terwayBackendClient.RecordEvent(context.Background(), - &rpc.EventRequest{ - EventTarget: rpc.EventTarget_EventTargetPod, - K8SPodName: string(k8sConfig.K8S_POD_NAME), - K8SPodNamespace: string(k8sConfig.K8S_POD_NAMESPACE), - EventType: rpc.EventType_EventTypeWarning, - Reason: "AllocIPFailed", - Message: err.Error(), - }) } }() From 9edadb75b57ea1029350b5216ce2a668c5e68ea3 Mon Sep 17 00:00:00 2001 From: Lyt99 Date: Sun, 28 Jun 2020 15:52:26 +0800 Subject: [PATCH 7/7] fix: use timeout context instead of background context --- plugin/terway/cni.go | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/plugin/terway/cni.go b/plugin/terway/cni.go index 0dde078f..35d82d5e 100644 --- a/plugin/terway/cni.go +++ b/plugin/terway/cni.go @@ -128,7 +128,9 @@ func cmdAdd(args *skel.CmdArgs) (err error) { defer func() { if err != nil { - _, err = terwayBackendClient.RecordEvent(context.Background(), + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + _, err = terwayBackendClient.RecordEvent(ctx, &rpc.EventRequest{ EventTarget: rpc.EventTarget_EventTargetPod, K8SPodName: string(k8sConfig.K8S_POD_NAME), @@ -162,7 +164,9 @@ func cmdAdd(args *skel.CmdArgs) (err error) { defer func() { if err != nil { - _, err = terwayBackendClient.ReleaseIP(context.Background(), + ctx, cancel := context.WithTimeout(context.Background(), defaultCniTimeout*time.Second) + defer cancel() + _, err = terwayBackendClient.ReleaseIP(ctx, &rpc.ReleaseIPRequest{ K8SPodName: string(k8sConfig.K8S_POD_NAME), K8SPodNamespace: string(k8sConfig.K8S_POD_NAMESPACE), @@ -364,7 +368,9 @@ func cmdAdd(args *skel.CmdArgs) (err error) { }}, } - _, _ = terwayBackendClient.RecordEvent(context.Background(), + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + _, _ = terwayBackendClient.RecordEvent(ctx, &rpc.EventRequest{ EventTarget: rpc.EventTarget_EventTargetPod, K8SPodName: string(k8sConfig.K8S_POD_NAME),