From e29ffc55625468f7572ccb2919697dc0dfd77b28 Mon Sep 17 00:00:00 2001 From: Minhan Xia Date: Wed, 26 Jun 2019 14:45:49 -0700 Subject: [PATCH] address KEP comments --- .../sig-network/20190603-EndpointSlice-API.md | 264 +++++++++++++----- 1 file changed, 194 insertions(+), 70 deletions(-) diff --git a/keps/sig-network/20190603-EndpointSlice-API.md b/keps/sig-network/20190603-EndpointSlice-API.md index f635e4daa9f7..453046639824 100644 --- a/keps/sig-network/20190603-EndpointSlice-API.md +++ b/keps/sig-network/20190603-EndpointSlice-API.md @@ -39,6 +39,7 @@ The new EndpointSlice API aims to address existing problems as well as leaving r ### Goal - Support tens of thousands of backend endpoints in a single service on cluster with thousands of nodes. +- Move the API towards a general-purpose backend discovery API. - Leave room for foreseeable extension: - Support multiple IPs per pod - More endpoint states than Ready/NotReady @@ -51,95 +52,138 @@ The new EndpointSlice API aims to address existing problems as well as leaving r ## Proposal ### EndpointSlice API -The following new EndpointSlice API will be added to the networking API group. +The following new EndpointSlice API will be added to the `Discovery` API group. ``` type EndpointSlice struct { - metav1.TypeMeta - metav1.ObjectMeta - Spec EndpointSliceSpec + metav1.TypeMeta `json:",inline"` + // OwnerReferences should be set when the object is derived from a k8s service. + // The object labels may contain the following key: + // kubernetes.io/service: the label value indicates the name of the service from which the EndpointSlice is derived from. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Metadata of the Endpoints in the EndpointSlice + EndpointMeta `json:",inline"` + // Endpoints of this EndpointSlice + // +optional + // +listType=set + Endpoints []Endpoint `json:"endpoints,omitempty" protobuf:"bytes,2,opt,name=endpoints"` } -type EndpointSliceSpec struct { - Endpoints []Endpoint - Ports []EndpointPort +type Endpoint struct { + // Required: Backend of the endpoint. Must contain at least one backend. + // The type of backend must conforms with the BackendType specified in EndpointMeta. + // Different consumers (e.g. kube-proxy) handle different types of backends in the context of its own capabilities. + // +listType=set + Backends []string `json:"backends,omitempty" protobuf:"bytes,1,opt,name=backends"` + // Required: the conditions of the endpoint + Condition EndpointCondition `json:"condition,omitempty" protobuf:"bytes,2,opt,name=condition"` + // Reference to object providing the endpoint. + // +optional + TargetRef *v1.ObjectReference `json:"targetRef,omitempty" protobuf:"bytes,3,opt,name=targetRef"` + // Labels can contain arbitrary metadata associated with the endpoint. + // The key must conform with the label format. + // The labels may contain the following well known keys: + // kubernetes.io/nodename: the value indicates the corresponding node where the endpoint is located. + // kubernetes.io/hostname: the value indicates the hostname of the endpoint. If the endpoint is derived from a k8s pod. This must match the hostname in pod spec. + // failure-domain.beta.kubernetes.io/zone: the value indicates the zone where the endpoint is located. + // failure-domain.beta.kubernetes.io/region: the value indicates the region where the endpoint is located. + // +optional + Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,4,opt,name=labels"` } -type EndpointPort struct { - // The name of this port (corresponds to ServicePort.Name). - // Must be a DNS_LABEL. - // Optional only if one port is defined. - Name string - // Required: The IP protocol for this port. - // Must be UDP, TCP, or SCTP. - // Default is TCP. - Protocol v1.Protocol - // Optional: The port number of the endpoint. - // If unspecified, port remapping is not implemented. - Port *int32 +type EndpointCondition struct { + // Ready indicates if the endpoint is ready to serve traffic. + Ready bool `json:"ready,omitempty" protobuf:"bytes,1,opt,name=ready"` } -type Endpoint struct { - // Required: must contain at least one IP. - IPs []string - // Optional: The Hostname of this endpoint - Hostname string - // Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. - NodeName *string - // Optional: the conditions of the endpoint - Condition EndpointConditions - // Optional: Reference to object providing the endpoint. - TargetRef *v1.ObjectReference +type EndpointMeta struct { + // This field specifies the list of ports associated with each endpoint in the EndpointSlice + // Each EndpiontPort must have a unique port name. + // +optional + // +listType=set + Ports []EndpointPort `json:"ports,omitempty" protobuf:"bytes,1,opt,name=ports"` + // Required: BackendType specifies the type of endpoint backends contained in the EndpointSlice. + BackendType BackendType `json:"backendType,omitempty" protobuf:"bytes,2,opt,name=backendType"` } -type EndpointConditions struct { - // Matches the Ready condition on pod - Ready bool - // Matches ContainersReady condition on pod - ContainersReady bool +type EndpointPort struct { + // Required: The name of this port. + // If the EndpointSlice is dervied from K8s service, this corresponds to ServicePort.Name. + // Must be a DNS_LABEL or an empty string. + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + // Required: The IP protocol for this port. + // Must be UDP, TCP, or SCTP. + // Default is TCP. + Protocol v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,2,opt,name=protocol"` + // The port number of the endpoint. + // If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer. + // +optional + Port *int32 `json:"port,omitempty" protobuf:"bytes,3,opt,name=port"` } +type BackendType string + +const ( + // SingleIPBackend indicates each endpoint has one IP. + // The IP can be v4 IP or v6 IP. + SingleIPBackend = BackendType("SingleIP") + // DualStackBackend indicates each endpoint has 2 IPs. + // One v4 IP and the other is v6 IP. + DualStackBackend = BackendType("DualStack") + // UrlBackend indicates each endpoint has one URL backend. + UrlBackend = BackendType("Url") +) + ``` ### Mapping - 1 Service maps to N EndpointSlice objects. - Each EndpointSlice contains at most 100 endpoints by default (MaxEndpointThreshold: configurable via controller flag). +- If a EndpointSlice is derived from K8s: + - The following label is added to identify corresponding service: + - Key: kubernetes.io/service + - Value: ${service name} + - For EndpointSlice instances that are not derived from kubernetes Services, the above label must not be applied. + - The OwnerReferences of the EndpointSlice instances will be set to the corresponding service. - For backend pods with non-uniform named ports (e.g. a service port targets a named port. Backend pods have different port number with the same port name), this would amplify the number of EndpointSlice object depending on the number of backend groups with same ports. - EndpointSlice will be covered by resource quota. This is to limit the max number of EndpointSlice objects in one namespace. This would provide protection for k8s apiserver. For instance, a malicious user would not be able to DOS k8s API by creating services selecting all pods. -### Multiple IPs -Each endpoint can contain more than one IP corresponding to the pod. The API itself does not enforce the consumer's behavior (e.g. kube-proxy). - -### Optional Endpoint Port -The endpoint port number becomes optional in the EndpointSlice API while the port number field in core/v1 Endpoints API is required. This allows the API to support services with no port remapping or all port services. +### EndpointMeta (Per EndpointSlice Metadata) +- **Endpoint Port**: The endpoint port number becomes optional in the EndpointSlice API while the port number field in core/v1 Endpoints API is required. This allows the API to support services with no port remapping or all port services. +- **BackendType**: This is a required field for every EndpointSlice instance. It specifies the type backend of the endpoints in the EndpointSlice resource. The K8s API server will validate if all endpoints in the EndpointSlice matches the type. The consumer (e.g. kubeproxy) of EndpointSlice determines if it can consume the type of backend. Currently it supports 3 valid types: + - SingleIP: Each endpoint has only one v4 IP or one v6 IP. + - DualStack: Each endpoint has one v4 IP and one v6 IP. + - Url: Each endpoint contains one URL backend. + +### Endpoint Labels (Per Endpoint Metadata) +A new labels field (string to string map) is added to each endpoint. It can contain arbitrary metadata associated with the endpoint. If the EndpointSlice instance is derived from K8s service, the labels may contain following well known key: +- **kubernetes.io/hostname**: the value indicates the hostname of the endpoint. This must match the hostname in pod spec. +- **kubernetes.io/nodename**: the value indicates the name of the node where the endpoint is located. +- By default, the following labels will be copied from node labels of the pod. + - **failure-domain.beta.kubernetes.io/zone**: the value indicates the zone where the endpoint is located. This must match the corresponding label on the node where the endpoint is located. + - **failure-domain.beta.kubernetes.io/region**: the value indicates the region where the endpoint is located. This must match the corresponding label on the node where the endpoint is located. +- If the k8s service has topological keys specified, the corresponding node labels will be copied to endpoint labels. ### EndpointSlice Naming -Use generateName with service name as prefix: +Use `generateName` with service name as prefix: ``` -${service name}-${random} +${service name}.${random} ``` -### Label -For all EndpointSlice objects managed by EndpointSlice controller. The following label is added to identify corresponding service: - -- Key: k8s.io/service -- Value: ${service name} - -For self managed EndpointSlice objects, this label is not required. - ## Estimation This section provides comparisons between Endpoints API and EndpointSlice API under 3 scenarios: - Service Creation/Deletion - Single Endpoint Update - Rolling Update - ``` Number of Backend Pod: P Number of Node: N Number of Endpoint Per EndpointSlice:B -Sample Case: 20,000 endpoints, 5,000 nodes ``` + +## Sample Case 1: 20,000 endpoints, 5,000 nodes ### Service Creation/Deletion @@ -185,15 +229,80 @@ Sample Case: 20,000 endpoints, 5,000 nodes | | 5000 | 5000 | 5000 | | # of total watch event | O(NP) | O(NP) | O(NP) | | | 5000 * 20k | 5000 * 20k | 5000 * 20k | -| Total Bytes Transmitted | O(P^2N) | O(NPB) | O(NP) | +| Total Bytes Transmitted | O(P^2N) | O(NPB) | O(NP) | | | 2.0MB * 5000 * 20k = 200 TB | 10KB * 5000 * 20k = 1 TB | ~1KB * 5000 * 20k = ~100 GB | +## Sample Case 2: 20 endpoints, 10 nodes + +### Service Creation/Deletion + +| | Endpoints | 100 Endpoints per EndpointSlice | 1 Endpoint per EndpointSlice | +|--------------------------|-----------------------|---------------------------------|------------------------------| +| # of writes | O(1) | O(P/B) | O(P) | +| | 1 | 1 | 20 | +| Size of API object | O(P) | O(B) | O(1) | +| | ~1KB | ~1KB | ~1KB | +| # of watchers per object | O(N) | O(N) | O(N) | +| | 10 | 10 | 10 | +| # of total watch event | O(N) | O(NP/B) | O(NP) | +| | 1 * 10 = 10 | 1 * 10 = 10 | 10 * 20 = 200 | +| Total Bytes Transmitted | O(PN) | O(PN) | O(PN) | +| | ~1KB * 10 = 10KB | ~1KB * 10 = 10KB | ~1KB * 200 = 200KB | + +### Single Endpoint Update + +| | Endpoints | 100 Endpoints per EndpointSlice | 1 Endpoint per EndpointSlice | +|--------------------------|-----------------------|---------------------------------|------------------------------| +| # of writes | O(1) | O(1) | O(1) | +| | 1 | 1 | 1 | +| Size of API object | O(P) | O(B) | O(1) | +| | ~1KB | ~1KB | ~1KB | +| # of watchers per object | O(N) | O(N) | O(N) | +| | 10 | 10 | 10 | +| # of total watch event | O(N) | O(N) | O(N) | +| | 1 | 1 | 1 | +| Total Bytes Transmitted | O(PN) | O(BN) | O(N) | +| | ~1KB * 10 = 10KB | ~1KB * 10 = 10KB | ~1KB * 10 = 10KB | + + +### Rolling Update + +| | Endpoints | 100 Endpoints per EndpointSlice | 1 Endpoint per EndpointSlice | +|--------------------------|-----------------------------|---------------------------------|------------------------------| +| # of writes | O(P) | O(P) | O(P) | +| | 20 | 20 | 20 | +| Size of API object | O(P) | O(B) | O(1) | +| | ~1KB | ~1KB | ~1KB | +| # of watchers per object | O(N) | O(N) | O(N) | +| | 10 | 10 | 10 | +| # of total watch event | O(NP) | O(NP) | O(NP) | +| | 10 * 20 | 10 * 20 | 10 * 20 | +| Total Bytes Transmitted | O(P^2N) | O(NPB) | O(NP) | +| | ~1KB * 10 * 20 = 200KB | ~1KB * 10 * 20 = 200KB | ~1KB * 10 * 20 = 200KB | + + ## Implementation + +### Requirements + +- Persistence (Minimal Churn of Endpoints) + +Upon service endpoint changes, the # of object writes and disruption to ongoing connections should be minimal. + +- Handling Restarts & Failures + +The producer/consumer of EndpointSlice must be able to handle restarts and recreate state from scratch with minimal change to existing state. + + ### EndpointSlice Controller +A new EndpointSlice Controller will be added to `kube-controller-manager`. It will manage the lifecycle EndpointSlice instances derived from services. +``` Watch: Service, Pod ==> Manage: EndpointSlice +``` +#### Workflows On Service Create/Update/Delete: - `syncService(svc)` @@ -201,7 +310,6 @@ On Pod Create/Update/Delete: - Reverse lookup relevant services - For each relevant service, - `syncService(svc)` - `syncService(svc)`: - Look up selected backend pods @@ -220,16 +328,19 @@ On Pod Create/Update/Delete: ### Kube-Proxy -Watch: Service, EndpointSlice ==> Manage: iptables, ipvs, etc +Kube-proxy will be modified to consume EndpointSlice instances besides Endpoints resource. A flag will be added to kube-proxy to toggle the mode. +``` +Watch: Service, EndpointSlice ==> Manage: iptables, ipvs, etc +``` - Merge multiple EndpointSlice into an aggregated list. - Reuse the existing processing logic ### Endpoint Controller (classic) In order to ensure backward compatibility for external consumer of the core/v1 Endpoints API, the existing K8s endpoint controller will keep running until the API is EOL. The following limitations will apply: -- Starting from EndpointSlice beta: If # of endpoints in one Endpoints object exceed 100, generate a warning event to the object. -- Starting from EndpointSlice GA: Only include up to 500 endpoints in one Endpoints Object. +- Starting from EndpointSlice beta: If # of endpoints in one Endpoints object exceed 500, generate a warning event to the object. +- Starting from EndpointSlice GA: Only include up to 1000 endpoints in one Endpoints Object. ## Roll Out Plan @@ -241,30 +352,43 @@ In order to ensure backward compatibility for external consumer of the core/v1 E + +## Graduation Criteria + +In order to graduate to beta, we need: + +- Kube-proxy switch to consume EndpointSlice API. +- Verify performance/scalability via testing. + +## Alternatives + +1. increase the etcd size limits +2. endpoints controller batches / rate limits changes +3. apiserver batches / rate-limits watch notifications +4. apimachinery to support object level pagination + + ## FAQ -- Why only include up to 100 endpoints in one EndpointSlice object? Why not 1 endpoint? Why not 1000 endpoints? +- #### Why not pursue the alternatives? -Based on the data collected from user clusters, vast majority (> 99%) of the k8s services have less than 100 endpoints. For small services, EndpointSlice API will make no difference. If the MaxEndpointThreshold is too small (e.g. 1 endpoint per EndpointSlice), controller loses capability to batch updates, hence causing worse write amplification on service creation/deletion and scale up/down. Etcd write RPS is significant limiting factor. +In order to fulfill the goal of this proposal, without redesigning the Core/V1 Endpoints API, all items listed in the alternatives section are required. Item #1 increase maximum endpoints limitation by increasing the object size limit. This may bring other performance/scalability implications. Item #2 and #3 can reduce transmission overhead but sacrificed endpoint update latency. Item #4 can further reduce transmission overhead, however it is a big change to the existing API machinery. -- Why do we have a status struct for each endpoint? Why not boolean state for readiness? +In summary, each of the items can only achieve incremental gain to some extent. Compared to this proposal, the combined effort would be equal or more while achieving less performance improvements. -The current Endpoints API only includes a boolean state (Ready vs. NotReady) on individual endpoint. However, according to pod life cycle, there are more states (e.g. Graceful Termination, ContainerReary). In order to represent additional states other than Ready/NotReady, a status structure is included for each endpoint. More condition types can be added in the future without compatibility disruptions. As more conditions are added, different consumer (e.g. different kube-proxy implementations) will have the option to evaluate the additional conditions. +In addition, the EndpointSlice API is capable to express endpoint subsetting, which is the natural next step for improving k8s service endpoint scalability. +- #### Why only include up to 100 endpoints in one EndpointSlice object? Why not 1 endpoint? Why not 1000 endpoints? -## Graduation Criteria +Based on the data collected from user clusters, vast majority (> 99%) of the k8s services have less than 100 endpoints. For small services, EndpointSlice API will make no difference. If the MaxEndpointThreshold is too small (e.g. 1 endpoint per EndpointSlice), controller loses capability to batch updates, hence causing worse write amplification on service creation/deletion and scale up/down. Etcd write RPS is significant limiting factor. -In order to graduate to beta, we need: +- #### Why do we have a condition struct for each endpoint? -- Kube-proxy switch to consume EndpointSlice API. -- Verify performance/scalability via testing. +The current Endpoints API only includes a boolean state (Ready vs. NotReady) on individual endpoint. However, according to pod life cycle, there are more states (e.g. Graceful Termination, ContainerReary). In order to represent additional states other than Ready/NotReady, a status structure is included for each endpoint. More condition types can be added in the future without compatibility disruptions. As more conditions are added, different consumer (e.g. different kube-proxy implementations) will have the option to evaluate the additional conditions. -## Alternatives -- increase the etcd size limits -- endpoints controller batches / rate limits changes -- apiserver batches / rate-limits watch notifications -- apimachinery to support object level pagination + + [original-doc]: https://docs.google.com/document/d/1sLJfolOeEVzK5oOviRmtHOHmke8qtteljQPaDUEukxY/edit#