diff --git a/Makefile b/Makefile index 3fe66f4ffd..049cfea48d 100644 --- a/Makefile +++ b/Makefile @@ -45,6 +45,8 @@ E2E_DATA_DIR ?= $(REPO_ROOT)/test/e2e_new/data E2E_CONF_PATH ?= $(E2E_DATA_DIR)/e2e_conf.yaml KUBETEST_CONF_PATH ?= $(abspath $(E2E_DATA_DIR)/kubetest/conformance.yaml) KUBETEST_FAST_CONF_PATH ?= $(abspath $(REPO_ROOT)/test/e2e_new/data/kubetest/conformance-fast.yaml) +CONFORMANCE_CI_TEMPLATE := $(ARTIFACTS)/templates/cluster-template-conformance-ci-artifacts.yaml +EXP_DIR := exp # Binaries. CLUSTERCTL := $(BIN_DIR)/clusterctl @@ -165,7 +167,7 @@ test-conformance-fast: ## Run clusterctl based conformance test on workload clus ## Binaries ## -------------------------------------- .PHONY: binaries -binaries: manager clusterawsadm ## Builds and installs all binaries +binaries: manager clusterawsadm ## Builds and installs all binaries .PHONY: manager manager: ## Build manager binary. @@ -273,6 +275,7 @@ generate: ## Generate code generate-go: $(CONTROLLER_GEN) $(CONVERSION_GEN) $(MOCKGEN) $(DEFAULTER_GEN) ## Runs Go related generate targets $(CONTROLLER_GEN) \ paths=./api/... \ + paths=./$(EXP_DIR)/api/... \ object:headerFile=./hack/boilerplate/boilerplate.generatego.txt $(CONTROLLER_GEN) \ @@ -291,15 +294,17 @@ generate-go: $(CONTROLLER_GEN) $(CONVERSION_GEN) $(MOCKGEN) $(DEFAULTER_GEN) ## go generate ./... .PHONY: generate-manifests -generate-manifests: $(CONTROLLER_GEN) ## Generate manifests e.g. CRD, RBAC etc. +generate-manifests: $(CONTROLLER_GEN) ## Generate manifests for the core provider e.g. CRD, RBAC etc. $(CONTROLLER_GEN) \ paths=./api/... \ + paths=./$(EXP_DIR)/api/... \ crd:crdVersions=v1 \ output:crd:dir=$(CRD_ROOT) \ output:webhook:dir=$(WEBHOOK_ROOT) \ webhook $(CONTROLLER_GEN) \ paths=./controllers/... \ + paths=./$(EXP_DIR)/controllers/... \ output:rbac:dir=$(RBAC_ROOT) \ rbac:roleName=manager-role diff --git a/api/v1alpha3/awscluster_types.go b/api/v1alpha3/awscluster_types.go index eda1b82cf0..722565a922 100644 --- a/api/v1alpha3/awscluster_types.go +++ b/api/v1alpha3/awscluster_types.go @@ -48,7 +48,7 @@ type AWSClusterSpec struct { // +optional AdditionalTags Tags `json:"additionalTags,omitempty"` - // ControlPlaneLoadBalancer is optional configuration for customizing control plane behavior + // ControlPlaneLoadBalancer is optional configuration for customizing control plane behavior. // +optional ControlPlaneLoadBalancer *AWSLoadBalancerSpec `json:"controlPlaneLoadBalancer,omitempty"` diff --git a/api/v1alpha3/types.go b/api/v1alpha3/types.go index fe0861c321..77d4ed220e 100644 --- a/api/v1alpha3/types.go +++ b/api/v1alpha3/types.go @@ -343,6 +343,19 @@ func (s Subnets) FilterByZone(zone string) (res Subnets) { return } +// GetUniqueZones returns a slice containing the unique zones of the subnets +func (s Subnets) GetUniqueZones() []string { + keys := make(map[string]bool) + zones := []string{} + for _, x := range s { + if _, value := keys[x.AvailabilityZone]; !value { + keys[x.AvailabilityZone] = true + zones = append(zones, x.AvailabilityZone) + } + } + return zones +} + // CNISpec defines configuration for CNI type CNISpec struct { // CNIIngressRules specify rules to apply to control plane and worker node security groups. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml index fa6951a49e..11ce3df714 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml @@ -433,7 +433,7 @@ spec: - port type: object controlPlaneLoadBalancer: - description: ControlPlaneLoadBalancer is optional configuration for customizing control plane behavior + description: ControlPlaneLoadBalancer is optional configuration for customizing control plane behavior. properties: crossZoneLoadBalancing: description: "CrossZoneLoadBalancing enables the classic ELB cross availability zone balancing. \n With cross-zone load balancing, each load balancer node for your Classic Load Balancer distributes requests evenly across the registered instances in all enabled Availability Zones. If cross-zone load balancing is disabled, each load balancer node distributes requests evenly across the registered instances in its Availability Zone only. \n Defaults to false." diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml new file mode 100644 index 0000000000..25cf5f4ff3 --- /dev/null +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml @@ -0,0 +1,104 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.9 + creationTimestamp: null + name: awsmanagedclusters.infrastructure.cluster.x-k8s.io +spec: + group: infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: AWSManagedCluster + listKind: AWSManagedClusterList + plural: awsmanagedclusters + shortNames: + - awsmc + singular: awsmanagedcluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Cluster to which this AWSManagedControl belongs + jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name + name: Cluster + type: string + - description: Control plane infrastructure is ready for worker nodes + jsonPath: .status.ready + name: Ready + type: string + - description: AWS VPC the control plane is using + jsonPath: .spec.networkSpec.vpc.id + name: VPC + type: string + - description: API Endpoint + jsonPath: .spec.controlPlaneEndpoint.host + name: Endpoint + priority: 1 + type: string + name: v1alpha3 + schema: + openAPIV3Schema: + description: AWSManagedCluster is the Schema for the awsmanagedclusters API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AWSManagedClusterSpec defines the desired state of AWSManagedCluster + properties: + controlPlaneEndpoint: + description: ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + properties: + host: + description: The hostname on which the API server is serving. + type: string + port: + description: The port on which the API server is serving. + format: int32 + type: integer + required: + - host + - port + type: object + type: object + status: + description: AWSManagedClusterStatus defines the observed state of AWSManagedCluster + properties: + failureDomains: + additionalProperties: + description: FailureDomainSpec is the Schema for Cluster API failure domains. It allows controllers to understand how many failure domains a cluster can optionally span across. + properties: + attributes: + additionalProperties: + type: string + description: Attributes is a free form map of attributes an infrastructure provider might use or require. + type: object + controlPlane: + description: ControlPlane determines if this failure domain is suitable for use by control plane machines. + type: boolean + type: object + description: FailureDomains specifies a list fo available availability zones that can be used + type: object + ready: + description: Ready is when the AWSManagedControlPlane has a API server URL. + type: boolean + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml new file mode 100644 index 0000000000..d8734e1fa8 --- /dev/null +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml @@ -0,0 +1,647 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.9 + creationTimestamp: null + name: awsmanagedcontrolplanes.infrastructure.cluster.x-k8s.io +spec: + group: infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: AWSManagedControlPlane + listKind: AWSManagedControlPlaneList + plural: awsmanagedcontrolplanes + shortNames: + - awsmcp + singular: awsmanagedcontrolplane + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Cluster to which this AWSManagedControl belongs + jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name + name: Cluster + type: string + - description: Control plane infrastructure is ready for worker nodes + jsonPath: .status.ready + name: Ready + type: string + - description: AWS VPC the control plane is using + jsonPath: .spec.networkSpec.vpc.id + name: VPC + type: string + - description: API Endpoint + jsonPath: .spec.controlPlaneEndpoint.host + name: Endpoint + priority: 1 + type: string + - description: Bastion IP address for breakglass access + jsonPath: .status.bastion.publicIp + name: Bastion IP + type: string + name: v1alpha3 + schema: + openAPIV3Schema: + description: AWSManagedControlPlane is the Schema for the awsmanagedcontrolplanes API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AWSManagedControlPlaneSpec defines the desired state of AWSManagedControlPlane + properties: + additionalTags: + additionalProperties: + type: string + description: AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the ones added by default. + type: object + bastion: + description: Bastion contains options to configure the bastion host. + properties: + allowedCIDRBlocks: + description: AllowedCIDRBlocks is a list of CIDR blocks allowed to access the bastion host. They are set as ingress rules for the Bastion host's Security Group (defaults to 0.0.0.0/0). + items: + type: string + type: array + disableIngressRules: + description: DisableIngressRules will ensure there are no Ingress rules in the bastion host's security group. Requires AllowedCIDRBlocks to be empty. + type: boolean + enabled: + description: Enabled allows this provider to create a bastion host instance with a public ip to access the VPC private network. + type: boolean + instanceType: + description: InstanceType will use the specified instance type for the bastion. If not specified, Cluster API Provider AWS will use t3.micro for all regions except us-east-1, where t2.micro will be the default. + type: string + type: object + controlPlaneEndpoint: + description: ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + properties: + host: + description: The hostname on which the API server is serving. + type: string + port: + description: The port on which the API server is serving. + format: int32 + type: integer + required: + - host + - port + type: object + eksClusterName: + description: EKSClusterName allows you to specify the name of the EKS cluster in AWS. If you don't specify a name then a default name will be created based on the namespace and name of the managed control plane. + type: string + encryptionConfig: + description: EncryptionConfig specifies the encryption configuration for the cluster + properties: + provider: + description: Provider specifies the ARN or alias of the CMK (in AWS KMS) + type: string + resources: + description: Resources specifies the resources to be encrypted + items: + type: string + type: array + type: object + endpointAccess: + description: Endpoints specifies access to this cluster's control plane endpoints + properties: + private: + description: Private points VPC-internal control plane access to the private endpoint + type: boolean + public: + description: Public controls whether control plane endpoints are publicly accessible + type: boolean + publicCIDRs: + description: PublicCIDRs specifies which blocks can access the public endpoint + items: + type: string + type: array + type: object + imageLookupBaseOS: + description: ImageLookupBaseOS is the name of the base operating system used to look up machine images when a machine does not specify an AMI. When set, this will be used for all cluster machines unless a machine specifies a different ImageLookupBaseOS. + type: string + imageLookupFormat: + description: 'ImageLookupFormat is the AMI naming format to look up machine images when a machine does not specify an AMI. When set, this will be used for all cluster machines unless a machine specifies a different ImageLookupOrg. Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and kubernetes version, respectively. The BaseOS will be the value in ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as defined by the packages produced by kubernetes/release without v as a prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See also: https://golang.org/pkg/text/template/' + type: string + imageLookupOrg: + description: ImageLookupOrg is the AWS Organization ID to look up machine images when a machine does not specify an AMI. When set, this will be used for all cluster machines unless a machine specifies a different ImageLookupOrg. + type: string + logging: + description: Logging specifies which EKS Cluster logs should be enabled. Entries for each of the enabled logs will be sent to CloudWatch + properties: + apiServer: + default: false + description: APIServer indicates if the Kubernetes API Server log (kube-apiserver) shoulkd be enabled + type: boolean + audit: + default: false + description: Audit indicates if the Kubernetes API audit log should be enabled + type: boolean + authenticator: + default: false + description: Authenticator indicates if the iam authenticator log should be enabled + type: boolean + controllerManager: + default: false + description: ControllerManager indicates if the controller manager (kube-controller-manager) log should be enabled + type: boolean + scheduler: + default: false + description: Scheduler indicates if the Kubernetes scheduler (kube-scheduler) log should be enabled + type: boolean + required: + - apiServer + - audit + - authenticator + - controllerManager + - scheduler + type: object + networkSpec: + description: NetworkSpec encapsulates all things related to AWS network. + properties: + cni: + description: CNI configuration + properties: + cniIngressRules: + description: CNIIngressRules specify rules to apply to control plane and worker node security groups. The source for the rule will be set to control plane and worker security group IDs. + items: + description: CNIIngressRule defines an AWS ingress rule for CNI requirements. + properties: + description: + type: string + fromPort: + format: int64 + type: integer + protocol: + description: SecurityGroupProtocol defines the protocol type for a security group rule. + type: string + toPort: + format: int64 + type: integer + required: + - description + - fromPort + - protocol + - toPort + type: object + type: array + type: object + subnets: + description: Subnets configuration. + items: + description: SubnetSpec configures an AWS Subnet. + properties: + availabilityZone: + description: AvailabilityZone defines the availability zone to use for this subnet in the cluster's region. + type: string + cidrBlock: + description: CidrBlock is the CIDR block to be used when the provider creates a managed VPC. + type: string + id: + description: ID defines a unique identifier to reference this resource. + type: string + isPublic: + description: IsPublic defines the subnet as a public subnet. A subnet is public when it is associated with a route table that has a route to an internet gateway. + type: boolean + natGatewayId: + description: NatGatewayID is the NAT gateway id associated with the subnet. Ignored unless the subnet is managed by the provider, in which case this is set on the public subnet where the NAT gateway resides. It is then used to determine routes for private subnets in the same AZ as the public subnet. + type: string + routeTableId: + description: RouteTableID is the routing table id associated with the subnet. + type: string + tags: + additionalProperties: + type: string + description: Tags is a collection of tags describing the resource. + type: object + type: object + type: array + vpc: + description: VPC configuration. + properties: + availabilityZoneSelection: + default: Ordered + description: 'AvailabilityZoneSelection specifies how AZs should be selected if there are more AZs in a region than specified by AvailabilityZoneUsageLimit. There are 2 selection schemes: Ordered - selects based on alphabetical order Random - selects AZs randomly in a region Defaults to Ordered' + enum: + - Ordered + - Random + type: string + availabilityZoneUsageLimit: + default: 3 + description: AvailabilityZoneUsageLimit specifies the maximum number of availability zones (AZ) that should be used in a region when automatically creating subnets. If a region has more than this number of AZs then this number of AZs will be picked randomly when creating default subnets. Defaults to 3 + minimum: 1 + type: integer + cidrBlock: + description: CidrBlock is the CIDR block to be used when the provider creates a managed VPC. Defaults to 10.0.0.0/16. + type: string + id: + description: ID is the vpc-id of the VPC this provider should use to create resources. + type: string + internetGatewayId: + description: InternetGatewayID is the id of the internet gateway associated with the VPC. + type: string + tags: + additionalProperties: + type: string + description: Tags is a collection of tags describing the resource. + type: object + type: object + type: object + region: + description: The AWS Region the cluster lives in. + type: string + roleAdditionalPolicies: + description: RoleAdditionalPolicies allows you to attach additional polices to the control plane role. You must enable the EKSAllowAddRoles feature flag to incorporate these into the created role. + items: + type: string + type: array + roleName: + description: RoleName specifies the name of IAM role that gives EKS permission to make API calls. If the role is pre-existing we will treat it as unmanaged and not delete it on deletion. If the EKSEnableIAM feature flag is true and no name is supplied then a role is created. + minLength: 2 + type: string + sshKeyName: + description: SSHKeyName is the name of the ssh key to attach to the bastion host. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name) + type: string + tokenMethod: + default: iam-authenticator + description: TokenMethod is used to specify the method for obtaining a client token for communicating with EKS iam-authenticator - obtains a client token using iam-authentictor aws-cli - obtains a client token using the AWS CLI Defaults to iam-authenticator + enum: + - iam-authenticator + - aws-cli + type: string + version: + description: Version defines the desired Kubernetes version. If no version number is supplied then the latest version of Kubernetes that EKS supports will be used. + minLength: 2 + pattern: ^v(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.?$ + type: string + type: object + status: + description: AWSManagedControlPlaneStatus defines the observed state of AWSManagedControlPlane + properties: + bastion: + description: Bastion holds details of the instance that is used as a bastion jump box + properties: + addresses: + description: Addresses contains the AWS instance associated addresses. + items: + description: MachineAddress contains information for the node's address. + properties: + address: + description: The machine address. + type: string + type: + description: Machine address type, one of Hostname, ExternalIP or InternalIP. + type: string + required: + - address + - type + type: object + type: array + availabilityZone: + description: Availability zone of instance + type: string + ebsOptimized: + description: Indicates whether the instance is optimized for Amazon EBS I/O. + type: boolean + enaSupport: + description: Specifies whether enhanced networking with ENA is enabled. + type: boolean + iamProfile: + description: The name of the IAM instance profile associated with the instance, if applicable. + type: string + id: + type: string + imageId: + description: The ID of the AMI used to launch the instance. + type: string + instanceState: + description: The current state of the instance. + type: string + networkInterfaces: + description: Specifies ENIs attached to instance + items: + type: string + type: array + nonRootVolumes: + description: Configuration options for the non root storage volumes. + items: + description: Volume encapsulates the configuration options for the root volume + properties: + deviceName: + description: Device name + type: string + encrypted: + description: Encrypted is whether the volume should be encrypted or not. + type: boolean + encryptionKey: + description: EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN. If Encrypted is set and this is omitted, the default AWS key will be used. The key must already exist and be accessible by the controller. + type: string + iops: + description: IOPS is the number of IOPS requested for the disk. Not applicable to all types. + format: int64 + type: integer + size: + description: Size specifies size (in Gi) of the root storage device. Must be greater than the image root snapshot size or 8 (whichever is greater). + format: int64 + minimum: 8 + type: integer + type: + description: Type is the type of the root volume (e.g. gp2, io1, etc...). + type: string + required: + - size + type: object + type: array + privateIp: + description: The private IPv4 address assigned to the instance. + type: string + publicIp: + description: The public IPv4 address assigned to the instance, if applicable. + type: string + rootVolume: + description: Configuration options for the root storage volume. + properties: + deviceName: + description: Device name + type: string + encrypted: + description: Encrypted is whether the volume should be encrypted or not. + type: boolean + encryptionKey: + description: EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN. If Encrypted is set and this is omitted, the default AWS key will be used. The key must already exist and be accessible by the controller. + type: string + iops: + description: IOPS is the number of IOPS requested for the disk. Not applicable to all types. + format: int64 + type: integer + size: + description: Size specifies size (in Gi) of the root storage device. Must be greater than the image root snapshot size or 8 (whichever is greater). + format: int64 + minimum: 8 + type: integer + type: + description: Type is the type of the root volume (e.g. gp2, io1, etc...). + type: string + required: + - size + type: object + securityGroupIds: + description: SecurityGroupIDs are one or more security group IDs this instance belongs to. + items: + type: string + type: array + spotMarketOptions: + description: SpotMarketOptions option for configuring instances to be run using AWS Spot instances. + properties: + maxPrice: + description: MaxPrice defines the maximum price the user is willing to pay for Spot VM instances + type: string + type: object + sshKeyName: + description: The name of the SSH key pair. + type: string + subnetId: + description: The ID of the subnet of the instance. + type: string + tags: + additionalProperties: + type: string + description: The tags associated with the instance. + type: object + type: + description: The instance type. + type: string + userData: + description: UserData is the raw data script passed to the instance which is run upon bootstrap. This field must not be base64 encoded and should only be used when running a new instance. + type: string + required: + - id + type: object + conditions: + description: Conditions specifies the cpnditions for the managed control plane + items: + description: Condition defines an observation of a Cluster API resource operational state. + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: A human readable message indicating details about the transition. This field may be empty. + type: string + reason: + description: The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty. + type: string + severity: + description: Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. + type: string + required: + - status + - type + type: object + type: array + failureDomains: + additionalProperties: + description: FailureDomainSpec is the Schema for Cluster API failure domains. It allows controllers to understand how many failure domains a cluster can optionally span across. + properties: + attributes: + additionalProperties: + type: string + description: Attributes is a free form map of attributes an infrastructure provider might use or require. + type: object + controlPlane: + description: ControlPlane determines if this failure domain is suitable for use by control plane machines. + type: boolean + type: object + description: FailureDomains specifies a list fo available availability zones that can be used + type: object + failureMessage: + description: ErrorMessage indicates that there is a terminal problem reconciling the state, and will be set to a descriptive error message. + type: string + initialized: + description: Initialized denotes whether or not the control plane has the uploaded kubernetes config-map. + type: boolean + network: + description: Networks holds details about the AWS networking resources used by the control plane + properties: + apiServerElb: + description: APIServerELB is the Kubernetes api server classic load balancer. + properties: + attributes: + description: Attributes defines extra attributes associated with the load balancer. + properties: + crossZoneLoadBalancing: + description: CrossZoneLoadBalancing enables the classic load balancer load balancing. + type: boolean + idleTimeout: + description: IdleTimeout is time that the connection is allowed to be idle (no data has been sent over the connection) before it is closed by the load balancer. + format: int64 + type: integer + type: object + availabilityZones: + description: AvailabilityZones is an array of availability zones in the VPC attached to the load balancer. + items: + type: string + type: array + dnsName: + description: DNSName is the dns name of the load balancer. + type: string + healthChecks: + description: HealthCheck is the classic elb health check associated with the load balancer. + properties: + healthyThreshold: + format: int64 + type: integer + interval: + description: A Duration represents the elapsed time between two instants as an int64 nanosecond count. The representation limits the largest representable duration to approximately 290 years. + format: int64 + type: integer + target: + type: string + timeout: + description: A Duration represents the elapsed time between two instants as an int64 nanosecond count. The representation limits the largest representable duration to approximately 290 years. + format: int64 + type: integer + unhealthyThreshold: + format: int64 + type: integer + required: + - healthyThreshold + - interval + - target + - timeout + - unhealthyThreshold + type: object + listeners: + description: Listeners is an array of classic elb listeners associated with the load balancer. There must be at least one. + items: + description: ClassicELBListener defines an AWS classic load balancer listener. + properties: + instancePort: + format: int64 + type: integer + instanceProtocol: + description: ClassicELBProtocol defines listener protocols for a classic load balancer. + type: string + port: + format: int64 + type: integer + protocol: + description: ClassicELBProtocol defines listener protocols for a classic load balancer. + type: string + required: + - instancePort + - instanceProtocol + - port + - protocol + type: object + type: array + name: + description: The name of the load balancer. It must be unique within the set of load balancers defined in the region. It also serves as identifier. + type: string + scheme: + description: Scheme is the load balancer scheme, either internet-facing or private. + type: string + securityGroupIds: + description: SecurityGroupIDs is an array of security groups assigned to the load balancer. + items: + type: string + type: array + subnetIds: + description: SubnetIDs is an array of subnets in the VPC attached to the load balancer. + items: + type: string + type: array + tags: + additionalProperties: + type: string + description: Tags is a map of tags associated with the load balancer. + type: object + type: object + securityGroups: + additionalProperties: + description: SecurityGroup defines an AWS security group. + properties: + id: + description: ID is a unique identifier. + type: string + ingressRule: + description: IngressRules is the inbound rules associated with the security group. + items: + description: IngressRule defines an AWS ingress rule for security groups. + properties: + cidrBlocks: + description: List of CIDR blocks to allow access from. Cannot be specified with SourceSecurityGroupID. + items: + type: string + type: array + description: + type: string + fromPort: + format: int64 + type: integer + protocol: + description: SecurityGroupProtocol defines the protocol type for a security group rule. + type: string + sourceSecurityGroupIds: + description: The security group id to allow access from. Cannot be specified with CidrBlocks. + items: + type: string + type: array + toPort: + format: int64 + type: integer + required: + - description + - fromPort + - protocol + - toPort + type: object + type: array + name: + description: Name is the security group name. + type: string + tags: + additionalProperties: + type: string + description: Tags is a map of tags associated with the security group. + type: object + required: + - id + - name + type: object + description: SecurityGroups is a map from the role/kind of the security group to its unique name, if any. + type: object + type: object + ready: + default: false + description: Ready denotes that the AWSManagedControlPlane API Server is ready to receive requests and that the VPC infra is ready. + type: boolean + required: + - ready + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index c48c8dfa67..5a53d84dec 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -8,6 +8,8 @@ resources: - bases/infrastructure.cluster.x-k8s.io_awsmachines.yaml - bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml - bases/infrastructure.cluster.x-k8s.io_awsmachinetemplates.yaml +- bases/infrastructure.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml +- bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml # +kubebuilder:scaffold:crdkustomizeresource patchesStrategicMerge: @@ -16,6 +18,7 @@ patchesStrategicMerge: - patches/webhook_in_awsmachines.yaml - patches/webhook_in_awsclusters.yaml - patches/webhook_in_awsmachinetemplates.yaml +# - patches/webhook_in_ekscontrolplanes.yaml # +kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. @@ -23,6 +26,7 @@ patchesStrategicMerge: - patches/cainjection_in_awsmachines.yaml - patches/cainjection_in_awsclusters.yaml - patches/cainjection_in_awsmachinetemplates.yaml +# - patches/cainjection_in_ekscontrolplanes.yaml # +kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/config/crd/patches/cainjection_in_ekscontrolplanes.yaml b/config/crd/patches/cainjection_in_ekscontrolplanes.yaml new file mode 100644 index 0000000000..c142baaa98 --- /dev/null +++ b/config/crd/patches/cainjection_in_ekscontrolplanes.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: awsmanagedcontrolplanes.infrastructure.cluster.x-k8s.io diff --git a/config/crd/patches/webhook_in_ekscontrolplanes.yaml b/config/crd/patches/webhook_in_ekscontrolplanes.yaml new file mode 100644 index 0000000000..14bb881475 --- /dev/null +++ b/config/crd/patches/webhook_in_ekscontrolplanes.yaml @@ -0,0 +1,19 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: awsmanagedcontrolplanes.infrastructure.cluster.x-k8s.io +spec: + conversion: + strategy: Webhook + webhook: + conversionReviewVersions: ["v1", "v1beta1"] + clientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 974df9388e..b3a49f3e71 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -22,8 +22,12 @@ rules: resources: - secrets verbs: + - create + - delete - get - list + - patch + - update - watch - apiGroups: - cluster.x-k8s.io @@ -43,6 +47,16 @@ rules: - get - list - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - get + - list + - patch + - watch - apiGroups: - infrastructure.cluster.x-k8s.io resources: @@ -83,3 +97,43 @@ rules: - get - patch - update +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - awsmanagedclusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - awsmanagedclusters/status + verbs: + - get + - patch + - update +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - awsmanagedcontrolplanes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - awsmanagedcontrolplanes/status + verbs: + - get + - patch + - update diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index 06376cb7d7..a1828ec226 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -26,6 +26,26 @@ webhooks: resources: - awsclusters sideEffects: None +- clientConfig: + caBundle: Cg== + service: + name: webhook-service + namespace: system + path: /mutate-infrastructure-cluster-x-k8s-io-v1alpha3-awsmanagedcontrolplane + failurePolicy: Fail + matchPolicy: Equivalent + name: default.awsmanagedcontrolplanes.infrastructure.cluster.x-k8s.io + rules: + - apiGroups: + - infrastructure.cluster.x-k8s.io + apiVersions: + - v1alpha3 + operations: + - CREATE + - UPDATE + resources: + - awsmanagedcontrolplanes + sideEffects: None --- apiVersion: admissionregistration.k8s.io/v1beta1 @@ -94,3 +114,23 @@ webhooks: resources: - awsmachinetemplates sideEffects: None +- clientConfig: + caBundle: Cg== + service: + name: webhook-service + namespace: system + path: /validate-infrastructure-cluster-x-k8s-io-v1alpha3-awsmanagedcontrolplane + failurePolicy: Fail + matchPolicy: Equivalent + name: validation.awsmanagedcontrolplanes.infrastructure.cluster.x-k8s.io + rules: + - apiGroups: + - infrastructure.cluster.x-k8s.io + apiVersions: + - v1alpha3 + operations: + - CREATE + - UPDATE + resources: + - awsmanagedcontrolplanes + sideEffects: None diff --git a/controllers/awscluster_controller.go b/controllers/awscluster_controller.go index 1f147ff0ac..6a49243d9d 100644 --- a/controllers/awscluster_controller.go +++ b/controllers/awscluster_controller.go @@ -27,7 +27,6 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/tools/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" diff --git a/exp/README.md b/exp/README.md new file mode 100644 index 0000000000..5e9cbae041 --- /dev/null +++ b/exp/README.md @@ -0,0 +1,10 @@ +# exp + +This subrepository holds experimental code and API types. + +**Warning**: Packages here are experimental and unreliable. Some may one day be promoted to the main repository, or they may be modified arbitrarily or even disappear altogether. + +In short, code in this subrepository is not subject to any compatibility or deprecation promise. + + +For policy around graduation timeline, see [Cluster API Exp](https://github.com/kubernetes-sigs/cluster-api/tree/master/exp). \ No newline at end of file diff --git a/exp/api/v1alpha3/awsmanagedcluster_types.go b/exp/api/v1alpha3/awsmanagedcluster_types.go new file mode 100644 index 0000000000..09fc229657 --- /dev/null +++ b/exp/api/v1alpha3/awsmanagedcluster_types.go @@ -0,0 +1,71 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" +) + +// AWSManagedClusterSpec defines the desired state of AWSManagedCluster +type AWSManagedClusterSpec struct { + // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + // +optional + ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"` +} + +// AWSManagedClusterStatus defines the observed state of AWSManagedCluster +type AWSManagedClusterStatus struct { + // Ready is when the AWSManagedControlPlane has a API server URL. + // +optional + Ready bool `json:"ready,omitempty"` + + // FailureDomains specifies a list fo available availability zones that can be used + // +optional + FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=awsmanagedclusters,scope=Namespaced,categories=cluster-api,shortName=awsmc +// +kubebuilder:storageversion +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels.cluster\\.x-k8s\\.io/cluster-name",description="Cluster to which this AWSManagedControl belongs" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Control plane infrastructure is ready for worker nodes" +// +kubebuilder:printcolumn:name="VPC",type="string",JSONPath=".spec.networkSpec.vpc.id",description="AWS VPC the control plane is using" +// +kubebuilder:printcolumn:name="Endpoint",type="string",JSONPath=".spec.controlPlaneEndpoint.host",description="API Endpoint",priority=1 + +// AWSManagedCluster is the Schema for the awsmanagedclusters API +type AWSManagedCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AWSManagedClusterSpec `json:"spec,omitempty"` + Status AWSManagedClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AWSManagedClusterList contains a list of AWSManagedCluster +type AWSManagedClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AWSManagedCluster `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AWSManagedCluster{}, &AWSManagedClusterList{}) +} diff --git a/exp/api/v1alpha3/awsmanagedcontrolplane_types.go b/exp/api/v1alpha3/awsmanagedcontrolplane_types.go new file mode 100644 index 0000000000..a6908b6f61 --- /dev/null +++ b/exp/api/v1alpha3/awsmanagedcontrolplane_types.go @@ -0,0 +1,222 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" +) + +const ( + // ManagedControlPlaneFinalizer allows the controller to clean up resources on delete + ManagedControlPlaneFinalizer = "awsmanagedcontrolplane.infrastructure.cluster.x-k8s.io" +) + +// AWSManagedControlPlaneSpec defines the desired state of AWSManagedControlPlane +type AWSManagedControlPlaneSpec struct { + // EKSClusterName allows you to specify the name of the EKS cluster in + // AWS. If you don't specify a name then a default name will be created + // based on the namespace and name of the managed control plane. + // +optional + EKSClusterName string `json:"eksClusterName,omitempty"` + + // NetworkSpec encapsulates all things related to AWS network. + NetworkSpec infrav1.NetworkSpec `json:"networkSpec,omitempty"` + + // The AWS Region the cluster lives in. + Region string `json:"region,omitempty"` + + // SSHKeyName is the name of the ssh key to attach to the bastion host. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name) + // +optional + SSHKeyName *string `json:"sshKeyName,omitempty"` + + // Version defines the desired Kubernetes version. If no version number + // is supplied then the latest version of Kubernetes that EKS supports + // will be used. + // +kubebuilder:validation:MinLength:=2 + // +kubebuilder:validation:Pattern:=^v(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.?$ + // +optional + Version *string `json:"version,omitempty"` + + // RoleName specifies the name of IAM role that gives EKS + // permission to make API calls. If the role is pre-existing + // we will treat it as unmanaged and not delete it on + // deletion. If the EKSEnableIAM feature flag is true + // and no name is supplied then a role is created. + // +kubebuilder:validation:MinLength:=2 + // +optional + RoleName *string `json:"roleName,omitempty"` + + // RoleAdditionalPolicies allows you to attach additional polices to + // the control plane role. You must enable the EKSAllowAddRoles + // feature flag to incorporate these into the created role. + // +optional + RoleAdditionalPolicies *[]string `json:"roleAdditionalPolicies,omitempty"` + + // Logging specifies which EKS Cluster logs should be enabled. Entries for + // each of the enabled logs will be sent to CloudWatch + // +optional + Logging *ControlPlaneLoggingSpec `json:"logging,omitempty"` + + // EncryptionConfig specifies the encryption configuration for the cluster + // +optional + EncryptionConfig *EncryptionConfig `json:"encryptionConfig,omitempty"` + + // AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the + // ones added by default. + // +optional + AdditionalTags infrav1.Tags `json:"additionalTags,omitempty"` + + // Endpoints specifies access to this cluster's control plane endpoints + // +optional + EndpointAccess EndpointAccess `json:"endpointAccess,omitempty"` + + // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + // +optional + ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"` + + // ImageLookupFormat is the AMI naming format to look up machine images when + // a machine does not specify an AMI. When set, this will be used for all + // cluster machines unless a machine specifies a different ImageLookupOrg. + // Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base + // OS and kubernetes version, respectively. The BaseOS will be the value in + // ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as + // defined by the packages produced by kubernetes/release without v as a + // prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default + // image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up + // searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a + // Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See + // also: https://golang.org/pkg/text/template/ + // +optional + ImageLookupFormat string `json:"imageLookupFormat,omitempty"` + + // ImageLookupOrg is the AWS Organization ID to look up machine images when a + // machine does not specify an AMI. When set, this will be used for all + // cluster machines unless a machine specifies a different ImageLookupOrg. + // +optional + ImageLookupOrg string `json:"imageLookupOrg,omitempty"` + + // ImageLookupBaseOS is the name of the base operating system used to look + // up machine images when a machine does not specify an AMI. When set, this + // will be used for all cluster machines unless a machine specifies a + // different ImageLookupBaseOS. + ImageLookupBaseOS string `json:"imageLookupBaseOS,omitempty"` + + // Bastion contains options to configure the bastion host. + // +optional + Bastion infrav1.Bastion `json:"bastion"` + + // TokenMethod is used to specify the method for obtaining a client token for communicating with EKS + // iam-authenticator - obtains a client token using iam-authentictor + // aws-cli - obtains a client token using the AWS CLI + // Defaults to iam-authenticator + // +kubebuilder:default=iam-authenticator + // +kubebuilder:validation:Enum=iam-authenticator;aws-cli + TokenMethod *EKSTokenMethod `json:"tokenMethod,omitempty"` +} + +// EndpointAccess specifies how control plane endpoints are accessible +type EndpointAccess struct { + // Public controls whether control plane endpoints are publicly accessible + // +optional + Public *bool `json:"public,omitempty"` + // PublicCIDRs specifies which blocks can access the public endpoint + // +optional + PublicCIDRs []*string `json:"publicCIDRs,omitempty"` + // Private points VPC-internal control plane access to the private endpoint + // +optional + Private *bool `json:"private,omitempty"` +} + +// EncryptionConfig specifies the encryption configuration for the EKS clsuter +type EncryptionConfig struct { + // Provider specifies the ARN or alias of the CMK (in AWS KMS) + Provider *string `json:"provider,omitempty"` + //Resources specifies the resources to be encrypted + Resources []*string `json:"resources,omitempty"` +} + +// AWSManagedControlPlaneStatus defines the observed state of AWSManagedControlPlane +type AWSManagedControlPlaneStatus struct { + // Networks holds details about the AWS networking resources used by the control plane + // +optional + Network infrav1.Network `json:"network,omitempty"` + // FailureDomains specifies a list fo available availability zones that can be used + // +optional + FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` + // Bastion holds details of the instance that is used as a bastion jump box + // +optional + Bastion *infrav1.Instance `json:"bastion,omitempty"` + // Initialized denotes whether or not the control plane has the + // uploaded kubernetes config-map. + // +optional + Initialized bool `json:"initialized"` + // Ready denotes that the AWSManagedControlPlane API Server is ready to + // receive requests and that the VPC infra is ready. + // +kubebuilder:default=false + Ready bool `json:"ready"` + // ErrorMessage indicates that there is a terminal problem reconciling the + // state, and will be set to a descriptive error message. + // +optional + FailureMessage *string `json:"failureMessage,omitempty"` + // Conditions specifies the cpnditions for the managed control plane + Conditions clusterv1.Conditions `json:"conditions,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=awsmanagedcontrolplanes,shortName=awsmcp,scope=Namespaced,categories=cluster-api +// +kubebuilder:storageversion +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels.cluster\\.x-k8s\\.io/cluster-name",description="Cluster to which this AWSManagedControl belongs" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Control plane infrastructure is ready for worker nodes" +// +kubebuilder:printcolumn:name="VPC",type="string",JSONPath=".spec.networkSpec.vpc.id",description="AWS VPC the control plane is using" +// +kubebuilder:printcolumn:name="Endpoint",type="string",JSONPath=".spec.controlPlaneEndpoint.host",description="API Endpoint",priority=1 +// +kubebuilder:printcolumn:name="Bastion IP",type="string",JSONPath=".status.bastion.publicIp",description="Bastion IP address for breakglass access" + +// AWSManagedControlPlane is the Schema for the awsmanagedcontrolplanes API +type AWSManagedControlPlane struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AWSManagedControlPlaneSpec `json:"spec,omitempty"` + Status AWSManagedControlPlaneStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AWSManagedControlPlaneList contains a list of AWSManagedControlPlane +type AWSManagedControlPlaneList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AWSManagedControlPlane `json:"items"` +} + +// GetConditions returns the control planes conditions +func (r *AWSManagedControlPlane) GetConditions() clusterv1.Conditions { + return r.Status.Conditions +} + +// SetConditions sets the status conditions for the AWSManagedControlPlane +func (r *AWSManagedControlPlane) SetConditions(conditions clusterv1.Conditions) { + r.Status.Conditions = conditions +} + +func init() { + SchemeBuilder.Register(&AWSManagedControlPlane{}, &AWSManagedControlPlaneList{}) +} diff --git a/exp/api/v1alpha3/awsmanagedcontrolplane_webhook.go b/exp/api/v1alpha3/awsmanagedcontrolplane_webhook.go new file mode 100644 index 0000000000..88535f4720 --- /dev/null +++ b/exp/api/v1alpha3/awsmanagedcontrolplane_webhook.go @@ -0,0 +1,219 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +import ( + "fmt" + "strings" + + "github.com/pkg/errors" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apimachinery/pkg/util/version" + + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + + "sigs.k8s.io/cluster-api-provider-aws/pkg/hash" +) + +const ( + // maxCharsName maximum number of characters for the name + maxCharsName = 100 + + clusterPrefix = "capa_" +) + +// log is for logging in this package. +var mcpLog = logf.Log.WithName("awsmanagedcontrolplane-resource") + +// SetupWebhookWithManager will setup the webhooks for the AWSManagedControlPlane +func (r *AWSManagedControlPlane) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1alpha3-awsmanagedcontrolplane,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsmanagedcontrolplanes,versions=v1alpha3,name=validation.awsmanagedcontrolplanes.infrastructure.cluster.x-k8s.io,sideEffects=None +// +kubebuilder:webhook:verbs=create;update,path=/mutate-infrastructure-cluster-x-k8s-io-v1alpha3-awsmanagedcontrolplane,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsmanagedcontrolplanes,versions=v1alpha3,name=default.awsmanagedcontrolplanes.infrastructure.cluster.x-k8s.io,sideEffects=None + +var _ webhook.Defaulter = &AWSManagedControlPlane{} +var _ webhook.Validator = &AWSManagedControlPlane{} + +func parseEKSVersion(raw string) (*version.Version, error) { + v, err := version.ParseGeneric(raw) + if err != nil { + return nil, err + } + return version.MustParseGeneric(fmt.Sprintf("%d.%d", v.Major(), v.Minor())), nil +} + +func normalizeVersion(raw string) (string, error) { + // Normalize version (i.e. remove patch, add "v" prefix) if necessary + eksV, err := parseEKSVersion(raw) + if err != nil { + return "", err + } + return fmt.Sprintf("v%d.%d", eksV.Major(), eksV.Minor()), nil +} + +// ValidateCreate will do any extra validation when creating a AWSManagedControlPlane +func (r *AWSManagedControlPlane) ValidateCreate() error { + mcpLog.Info("AWSManagedControlPlane validate create", "name", r.Name) + + var allErrs field.ErrorList + + if r.Spec.EKSClusterName == "" { + allErrs = append(allErrs, field.Required(field.NewPath("spec.eksClusterName"), "eksClusterName is required")) + } + + allErrs = append(allErrs, r.validateEKSVersion(nil)...) + + if len(allErrs) == 0 { + return nil + } + + return apierrors.NewInvalid( + r.GroupVersionKind().GroupKind(), + r.Name, + allErrs, + ) +} + +// ValidateUpdate will do any extra validation when updating a AWSManagedControlPlane +func (r *AWSManagedControlPlane) ValidateUpdate(old runtime.Object) error { + mcpLog.Info("AWSManagedControlPlane validate update", "name", r.Name) + oldAWSManagedControlplane, ok := old.(*AWSManagedControlPlane) + if !ok { + return apierrors.NewInvalid(GroupVersion.WithKind("AWSManagedControlPlane").GroupKind(), r.Name, field.ErrorList{ + field.InternalError(nil, errors.New("failed to convert old AWSManagedControlPlane to object")), + }) + } + + var allErrs field.ErrorList + allErrs = append(allErrs, r.validateEKSClusterName()...) + allErrs = append(allErrs, r.validateEKSClusterNameSame(oldAWSManagedControlplane)...) + allErrs = append(allErrs, r.validateEKSVersion(oldAWSManagedControlplane)...) + + if len(allErrs) == 0 { + return nil + } + + return apierrors.NewInvalid( + r.GroupVersionKind().GroupKind(), + r.Name, + allErrs, + ) +} + +// ValidateDelete allows you to add any extra validation when deleting +func (r *AWSManagedControlPlane) ValidateDelete() error { + mcpLog.Info("AWSManagedControlPlane validate delete", "name", r.Name) + + return nil +} + +func (r *AWSManagedControlPlane) validateEKSClusterName() field.ErrorList { + var allErrs field.ErrorList + + if r.Spec.EKSClusterName == "" { + allErrs = append(allErrs, field.Required(field.NewPath("spec.eksClusterName"), "eksClusterName is required")) + } + + return allErrs +} + +func (r *AWSManagedControlPlane) validateEKSClusterNameSame(old *AWSManagedControlPlane) field.ErrorList { + var allErrs field.ErrorList + + if r.Spec.EKSClusterName != old.Spec.EKSClusterName { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec.eksClusterName"), r.Spec.EKSClusterName, "eksClusterName is different to current cluster name")) + } + + return allErrs +} + +func (r *AWSManagedControlPlane) validateEKSVersion(old *AWSManagedControlPlane) field.ErrorList { + path := field.NewPath("spec.version") + var allErrs field.ErrorList + + if r.Spec.Version == nil { + return allErrs + } + + v, err := parseEKSVersion(*r.Spec.Version) + if err != nil { + allErrs = append(allErrs, field.Invalid(path, *r.Spec.Version, err.Error())) + } + + if old != nil { + oldV, err := parseEKSVersion(*old.Spec.Version) + if err == nil && (v.Major() < oldV.Major() || v.Minor() < oldV.Minor()) { + allErrs = append(allErrs, field.Invalid(path, *r.Spec.Version, "new version less than old version")) + } + } + + return allErrs +} + +// Default will set default values for the AWSManagedControlPlane +func (r *AWSManagedControlPlane) Default() { + mcpLog.Info("AWSManagedControlPlane setting defaults", "name", r.Name) + + if r.Spec.EKSClusterName == "" { + mcpLog.Info("EKSClusterName is empty, generating name") + name, err := generateEKSName(r.Name, r.Namespace) + if err != nil { + mcpLog.Error(err, "failed to create EKS cluster name") + return + } + + mcpLog.Info("defaulting EKS cluster name", "cluster-name", name) + r.Spec.EKSClusterName = name + } + + // Normalize version (i.e. remove patch, add "v" prefix) if necessary + if r.Spec.Version != nil { + normalizedV, err := normalizeVersion(*r.Spec.Version) + if err != nil { + mcpLog.Error(err, "couldn't parse version") + return + } + r.Spec.Version = &normalizedV + } +} + +// generateEKSName generates a name of the EKS cluster +func generateEKSName(clusterName, namespace string) (string, error) { + escapedName := strings.Replace(clusterName, ".", "_", -1) + eksName := fmt.Sprintf("%s_%s", namespace, escapedName) + + if len(eksName) < maxCharsName { + return eksName, nil + } + + hashLength := 32 - len(clusterPrefix) + hashedName, err := hash.Base36TruncatedHash(eksName, hashLength) + if err != nil { + return "", fmt.Errorf("creating hash from cluster name: %w", err) + } + + return fmt.Sprintf("%s%s", clusterPrefix, hashedName), nil +} diff --git a/exp/api/v1alpha3/awsmanagedcontrolplane_webhook_test.go b/exp/api/v1alpha3/awsmanagedcontrolplane_webhook_test.go new file mode 100644 index 0000000000..3b03af4953 --- /dev/null +++ b/exp/api/v1alpha3/awsmanagedcontrolplane_webhook_test.go @@ -0,0 +1,245 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +import ( + "strings" + "testing" + + . "github.com/onsi/gomega" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var ( + vV1_17_1 = "v1.17.1" + vV1_17 = "v1.17" + vV1_16 = "v1.16" +) + +func TestDefaultingWebhook(t *testing.T) { + tests := []struct { + name string + resourceName string + resourceNS string + expectHash bool + expect string + spec AWSManagedControlPlaneSpec + expectSpec AWSManagedControlPlaneSpec + }{ + { + name: "less than 100 chars", + resourceName: "cluster1", + resourceNS: "default", + expectHash: false, + expectSpec: AWSManagedControlPlaneSpec{EKSClusterName: "default_cluster1"}, + }, + { + name: "less than 100 chars, dot in name", + resourceName: "team1.cluster1", + resourceNS: "default", + expectHash: false, + expectSpec: AWSManagedControlPlaneSpec{EKSClusterName: "default_team1_cluster1"}, + }, + { + name: "more than 100 chars", + resourceName: "ABCDEABCDEABCDEABCDEABCDEABCDEABCDEABCDEABCDEABCDEABCDEABCDEABCDEABCDEABCDEABCDEABCDEABCDEABCDEABCDE", + resourceNS: "default", + expectHash: true, + expectSpec: AWSManagedControlPlaneSpec{EKSClusterName: "capi_"}, + }, + { + name: "with patch", + resourceName: "cluster1", + resourceNS: "default", + expectHash: false, + spec: AWSManagedControlPlaneSpec{Version: &vV1_17_1}, + expectSpec: AWSManagedControlPlaneSpec{EKSClusterName: "default_cluster1", Version: &vV1_17}, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + mcp := &AWSManagedControlPlane{ + ObjectMeta: v1.ObjectMeta{ + Name: tc.resourceName, + Namespace: tc.resourceNS, + }, + } + mcp.Spec = tc.spec + mcp.Default() + + g.Expect(mcp.Spec.EKSClusterName).ToNot(BeEmpty()) + + if tc.expectHash { + g.Expect(strings.HasPrefix(mcp.Spec.EKSClusterName, "capa_")).To(BeTrue()) + // We don't care about the exact name + tc.expectSpec.EKSClusterName = mcp.Spec.EKSClusterName + } + g.Expect(mcp.Spec).To(Equal(tc.expectSpec)) + }) + } +} + +func TestValidatingWebhookCreate(t *testing.T) { + tests := []struct { + name string + eksClusterName string + expectError bool + eksVersion string + }{ + { + name: "ekscluster specified", + eksClusterName: "default_cluster1", + expectError: false, + }, + { + name: "ekscluster NOT specified", + eksClusterName: "", + expectError: true, + }, + { + name: "invalid version", + eksClusterName: "default_cluster1", + eksVersion: "v1.x17", + expectError: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + mcp := &AWSManagedControlPlane{ + Spec: AWSManagedControlPlaneSpec{ + EKSClusterName: tc.eksClusterName, + }, + } + if tc.eksVersion != "" { + mcp.Spec.Version = &tc.eksVersion + } + err := mcp.ValidateCreate() + + if tc.expectError { + g.Expect(err).ToNot(BeNil()) + } else { + g.Expect(err).To(BeNil()) + } + }) + } +} + +func TestValidatingWebhookUpdate(t *testing.T) { + tests := []struct { + name string + oldClusterSpec AWSManagedControlPlaneSpec + newClusterSpec AWSManagedControlPlaneSpec + oldClusterName string + newClusterName string + oldEksVersion string + newEksVersion string + expectError bool + }{ + { + name: "ekscluster specified, same cluster names", + oldClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + }, + newClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + }, + expectError: false, + }, + { + name: "ekscluster specified, different cluster names", + oldClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + }, + newClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster2", + }, + expectError: true, + }, + { + name: "old ekscluster specified, no new cluster name", + oldClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + }, + newClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "", + }, + expectError: true, + }, + { + name: "older version", + oldClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + Version: &vV1_17, + }, + newClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + Version: &vV1_16, + }, + expectError: true, + }, + { + name: "same version", + oldClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + Version: &vV1_17, + }, + newClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + Version: &vV1_17, + }, + expectError: false, + }, + { + name: "newer version", + oldClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + Version: &vV1_16, + }, + newClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + Version: &vV1_17, + }, + expectError: false, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + newMCP := &AWSManagedControlPlane{ + Spec: tc.newClusterSpec, + } + oldMCP := &AWSManagedControlPlane{ + Spec: tc.oldClusterSpec, + } + err := newMCP.ValidateUpdate(oldMCP) + + if tc.expectError { + g.Expect(err).ToNot(BeNil()) + } else { + g.Expect(err).To(BeNil()) + } + }) + } +} diff --git a/exp/api/v1alpha3/conditions_consts.go b/exp/api/v1alpha3/conditions_consts.go new file mode 100644 index 0000000000..ae6b7123a4 --- /dev/null +++ b/exp/api/v1alpha3/conditions_consts.go @@ -0,0 +1,33 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +import clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + +const ( + // EKSControlPlaneReadyCondition condition reports on the successful reconciliation of eks control plane. + EKSControlPlaneReadyCondition clusterv1.ConditionType = "EKSControlPlaneReady" + // EKSControlPlaneReconciliationFailedReason used to report failures while reconciling EKS control plane + EKSControlPlaneReconciliationFailedReason = "EKSControlPlaneReconciliationFailed" +) + +const ( + // IAMControlPlaneRolesReadyCondition condition reports on the successful reconciliation of eks control plane iam roles. + IAMControlPlaneRolesReadyCondition clusterv1.ConditionType = "IAMControlPlaneRolesReady" + // IAMControlPlaneRolesReconciliationFailedReason used to report failures while reconciling EKS control plane iam roles + IAMControlPlaneRolesReconciliationFailedReason = "IAMControlPlaneRolesReconciliationFailed" +) diff --git a/exp/api/v1alpha3/doc.go b/exp/api/v1alpha3/doc.go new file mode 100644 index 0000000000..e072c3ecbe --- /dev/null +++ b/exp/api/v1alpha3/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 diff --git a/exp/api/v1alpha3/groupversion_info.go b/exp/api/v1alpha3/groupversion_info.go new file mode 100644 index 0000000000..a1124f6a0e --- /dev/null +++ b/exp/api/v1alpha3/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha3 contains API Schema definitions for the eks controlplane v1alpha3 API group +// +kubebuilder:object:generate=true +// +groupName=infrastructure.cluster.x-k8s.io +package v1alpha3 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "infrastructure.cluster.x-k8s.io", Version: "v1alpha3"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/exp/api/v1alpha3/types.go b/exp/api/v1alpha3/types.go new file mode 100644 index 0000000000..69a92bc0e8 --- /dev/null +++ b/exp/api/v1alpha3/types.go @@ -0,0 +1,74 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +import ( + "github.com/aws/aws-sdk-go/service/eks" +) + +// ControlPlaneLoggingSpec defines what EKS control plane logs that should be enabled +type ControlPlaneLoggingSpec struct { + // APIServer indicates if the Kubernetes API Server log (kube-apiserver) shoulkd be enabled + // +kubebuilder:default=false + APIServer bool `json:"apiServer"` + // Audit indicates if the Kubernetes API audit log should be enabled + // +kubebuilder:default=false + Audit bool `json:"audit"` + // Authenticator indicates if the iam authenticator log should be enabled + // +kubebuilder:default=false + Authenticator bool `json:"authenticator"` + //ControllerManager indicates if the controller manager (kube-controller-manager) log should be enabled + // +kubebuilder:default=false + ControllerManager bool `json:"controllerManager"` + // Scheduler indicates if the Kubernetes scheduler (kube-scheduler) log should be enabled + // +kubebuilder:default=false + Scheduler bool `json:"scheduler"` +} + +// IsLogEnabled returns true if the log is enabled +func (s *ControlPlaneLoggingSpec) IsLogEnabled(logName string) bool { + if s == nil { + return false + } + + switch logName { + case eks.LogTypeApi: + return s.APIServer + case eks.LogTypeAudit: + return s.Audit + case eks.LogTypeAuthenticator: + return s.Authenticator + case eks.LogTypeControllerManager: + return s.ControllerManager + case eks.LogTypeScheduler: + return s.Scheduler + default: + return false + } +} + +// EKSTokenMethod defines the method for obtaining a client token to use when connecting to EKS. +type EKSTokenMethod string + +var ( + // EKSTokenMethodIAMAuthenticator indicates that IAM autenticator will be used to get a token + EKSTokenMethodIAMAuthenticator = EKSTokenMethod("iam-authenticator") + + // EKSTokenMethodAWSCli indicates that the AWS CLI will be used to get a token + // Version 1.16.156 or greater is required of the AWS CLI + EKSTokenMethodAWSCli = EKSTokenMethod("aws-cli") +) diff --git a/exp/api/v1alpha3/zz_generated.deepcopy.go b/exp/api/v1alpha3/zz_generated.deepcopy.go new file mode 100644 index 0000000000..26da145db7 --- /dev/null +++ b/exp/api/v1alpha3/zz_generated.deepcopy.go @@ -0,0 +1,370 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + "k8s.io/apimachinery/pkg/runtime" + cluster_api_provider_awsapiv1alpha3 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3" + apiv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSManagedCluster) DeepCopyInto(out *AWSManagedCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedCluster. +func (in *AWSManagedCluster) DeepCopy() *AWSManagedCluster { + if in == nil { + return nil + } + out := new(AWSManagedCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AWSManagedCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSManagedClusterList) DeepCopyInto(out *AWSManagedClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AWSManagedCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedClusterList. +func (in *AWSManagedClusterList) DeepCopy() *AWSManagedClusterList { + if in == nil { + return nil + } + out := new(AWSManagedClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AWSManagedClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSManagedClusterSpec) DeepCopyInto(out *AWSManagedClusterSpec) { + *out = *in + out.ControlPlaneEndpoint = in.ControlPlaneEndpoint +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedClusterSpec. +func (in *AWSManagedClusterSpec) DeepCopy() *AWSManagedClusterSpec { + if in == nil { + return nil + } + out := new(AWSManagedClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSManagedClusterStatus) DeepCopyInto(out *AWSManagedClusterStatus) { + *out = *in + if in.FailureDomains != nil { + in, out := &in.FailureDomains, &out.FailureDomains + *out = make(apiv1alpha3.FailureDomains, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedClusterStatus. +func (in *AWSManagedClusterStatus) DeepCopy() *AWSManagedClusterStatus { + if in == nil { + return nil + } + out := new(AWSManagedClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSManagedControlPlane) DeepCopyInto(out *AWSManagedControlPlane) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedControlPlane. +func (in *AWSManagedControlPlane) DeepCopy() *AWSManagedControlPlane { + if in == nil { + return nil + } + out := new(AWSManagedControlPlane) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AWSManagedControlPlane) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSManagedControlPlaneList) DeepCopyInto(out *AWSManagedControlPlaneList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AWSManagedControlPlane, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedControlPlaneList. +func (in *AWSManagedControlPlaneList) DeepCopy() *AWSManagedControlPlaneList { + if in == nil { + return nil + } + out := new(AWSManagedControlPlaneList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AWSManagedControlPlaneList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSManagedControlPlaneSpec) DeepCopyInto(out *AWSManagedControlPlaneSpec) { + *out = *in + in.NetworkSpec.DeepCopyInto(&out.NetworkSpec) + if in.SSHKeyName != nil { + in, out := &in.SSHKeyName, &out.SSHKeyName + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.RoleName != nil { + in, out := &in.RoleName, &out.RoleName + *out = new(string) + **out = **in + } + if in.RoleAdditionalPolicies != nil { + in, out := &in.RoleAdditionalPolicies, &out.RoleAdditionalPolicies + *out = new([]string) + if **in != nil { + in, out := *in, *out + *out = make([]string, len(*in)) + copy(*out, *in) + } + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = new(ControlPlaneLoggingSpec) + **out = **in + } + if in.EncryptionConfig != nil { + in, out := &in.EncryptionConfig, &out.EncryptionConfig + *out = new(EncryptionConfig) + (*in).DeepCopyInto(*out) + } + if in.AdditionalTags != nil { + in, out := &in.AdditionalTags, &out.AdditionalTags + *out = make(cluster_api_provider_awsapiv1alpha3.Tags, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.EndpointAccess.DeepCopyInto(&out.EndpointAccess) + out.ControlPlaneEndpoint = in.ControlPlaneEndpoint + in.Bastion.DeepCopyInto(&out.Bastion) + if in.TokenMethod != nil { + in, out := &in.TokenMethod, &out.TokenMethod + *out = new(EKSTokenMethod) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedControlPlaneSpec. +func (in *AWSManagedControlPlaneSpec) DeepCopy() *AWSManagedControlPlaneSpec { + if in == nil { + return nil + } + out := new(AWSManagedControlPlaneSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSManagedControlPlaneStatus) DeepCopyInto(out *AWSManagedControlPlaneStatus) { + *out = *in + in.Network.DeepCopyInto(&out.Network) + if in.FailureDomains != nil { + in, out := &in.FailureDomains, &out.FailureDomains + *out = make(apiv1alpha3.FailureDomains, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.Bastion != nil { + in, out := &in.Bastion, &out.Bastion + *out = new(cluster_api_provider_awsapiv1alpha3.Instance) + (*in).DeepCopyInto(*out) + } + if in.FailureMessage != nil { + in, out := &in.FailureMessage, &out.FailureMessage + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(apiv1alpha3.Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedControlPlaneStatus. +func (in *AWSManagedControlPlaneStatus) DeepCopy() *AWSManagedControlPlaneStatus { + if in == nil { + return nil + } + out := new(AWSManagedControlPlaneStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneLoggingSpec) DeepCopyInto(out *ControlPlaneLoggingSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneLoggingSpec. +func (in *ControlPlaneLoggingSpec) DeepCopy() *ControlPlaneLoggingSpec { + if in == nil { + return nil + } + out := new(ControlPlaneLoggingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfig) DeepCopyInto(out *EncryptionConfig) { + *out = *in + if in.Provider != nil { + in, out := &in.Provider, &out.Provider + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfig. +func (in *EncryptionConfig) DeepCopy() *EncryptionConfig { + if in == nil { + return nil + } + out := new(EncryptionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointAccess) DeepCopyInto(out *EndpointAccess) { + *out = *in + if in.Public != nil { + in, out := &in.Public, &out.Public + *out = new(bool) + **out = **in + } + if in.PublicCIDRs != nil { + in, out := &in.PublicCIDRs, &out.PublicCIDRs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Private != nil { + in, out := &in.Private, &out.Private + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointAccess. +func (in *EndpointAccess) DeepCopy() *EndpointAccess { + if in == nil { + return nil + } + out := new(EndpointAccess) + in.DeepCopyInto(out) + return out +} diff --git a/exp/controllers/awsmanagedcluster_controller.go b/exp/controllers/awsmanagedcluster_controller.go new file mode 100644 index 0000000000..e69937cd9b --- /dev/null +++ b/exp/controllers/awsmanagedcluster_controller.go @@ -0,0 +1,195 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + "time" + + "github.com/go-logr/logr" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/cluster-api/util/predicates" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + infrav1exp "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1alpha3" +) + +// AWSManagedClusterReconciler reconciles AWSManagedCluster +type AWSManagedClusterReconciler struct { + client.Client + Log logr.Logger + Recorder record.EventRecorder +} + +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmanagedclusters,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmanagedclusters/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete + +func (r *AWSManagedClusterReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reterr error) { + ctx := context.Background() + log := r.Log.WithValues("namespace", req.Namespace, "awsManagedCluster", req.Name) + + // Fetch the AWSManagedCluster instance + awsManagedCluster := &infrav1exp.AWSManagedCluster{} + err := r.Get(ctx, req.NamespacedName, awsManagedCluster) + if err != nil { + if apierrors.IsNotFound(err) { + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + + // Fetch the Cluster. + cluster, err := util.GetOwnerCluster(ctx, r.Client, awsManagedCluster.ObjectMeta) + if err != nil { + return reconcile.Result{}, err + } + if cluster == nil { + log.Info("Cluster Controller has not yet set OwnerRef") + return reconcile.Result{}, nil + } + + if util.IsPaused(cluster, awsManagedCluster) { + log.Info("AWSManagedCluster or linked Cluster is marked as paused. Won't reconcile") + return reconcile.Result{}, nil + } + + log = log.WithValues("cluster", cluster.Name) + + controlPlane := &infrav1exp.AWSManagedControlPlane{} + controlPlaneRef := types.NamespacedName{ + Name: cluster.Spec.ControlPlaneRef.Name, + Namespace: cluster.Spec.ControlPlaneRef.Namespace, + } + + if err := r.Get(ctx, controlPlaneRef, controlPlane); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to get control plane ref: %w", err) + } + + log = log.WithValues("controlPlane", controlPlaneRef.Name) + + patchHelper, err := patch.NewHelper(awsManagedCluster, r.Client) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to init patch helper: %w", err) + } + + // Set the values from the managed control plane + awsManagedCluster.Status.Ready = controlPlane.Status.Ready + awsManagedCluster.Spec.ControlPlaneEndpoint = controlPlane.Spec.ControlPlaneEndpoint + awsManagedCluster.Status.FailureDomains = controlPlane.Status.FailureDomains + + if err := patchHelper.Patch(ctx, awsManagedCluster); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to patch AWSManagedCluster: %w", err) + } + + log.Info("Successfully reconciled AWSManagedCluster") + + return reconcile.Result{}, nil + +} + +func (r *AWSManagedClusterReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error { + awsManagedCluster := &infrav1exp.AWSManagedCluster{} + + controller, err := ctrl.NewControllerManagedBy(mgr). + WithOptions(options). + For(awsManagedCluster). + WithEventFilter(predicates.ResourceNotPaused(r.Log)). + Build(r) + + if err != nil { + return fmt.Errorf("error creating controller: %w", err) + } + + // Add a watch for clusterv1.Cluster unpaise + if err = controller.Watch( + &source.Kind{Type: &clusterv1.Cluster{}}, + &handler.EnqueueRequestsFromMapFunc{ + ToRequests: util.ClusterToInfrastructureMapFunc(awsManagedCluster.GroupVersionKind()), + }, + predicates.ClusterUnpaused(r.Log), + ); err != nil { + return fmt.Errorf("failed adding a watch for ready clusters: %w", err) + } + + // Add a watch for AWSManagedControlPlane + if err = controller.Watch( + &source.Kind{Type: &infrav1exp.AWSManagedControlPlane{}}, + &handler.EnqueueRequestsFromMapFunc{ + ToRequests: handler.ToRequestsFunc(r.managedControlPlaneToManagedCluster), + }, + ); err != nil { + return fmt.Errorf("failed adding watch on AWSManagedControlPlane: %w", err) + } + + return nil +} + +func (r *AWSManagedClusterReconciler) managedControlPlaneToManagedCluster(o handler.MapObject) []ctrl.Request { + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + awsManagedControlPlane, ok := o.Object.(*infrav1exp.AWSManagedControlPlane) + if !ok { + r.Log.Error(nil, fmt.Sprintf("Expected a AWSManagedControlPlane but got a %T", o.Object)) + return nil + } + + if !awsManagedControlPlane.ObjectMeta.DeletionTimestamp.IsZero() { + r.Log.V(4).Info("AWSManagedControlPlane has a deletion timestamp, skipping mapping") + return nil + } + + cluster, err := util.GetOwnerCluster(ctx, r.Client, awsManagedControlPlane.ObjectMeta) + if err != nil { + r.Log.Error(err, "failed to get owning cluster") + return nil + } + if cluster == nil { + r.Log.Info("no owning cluster, skipping mapping") + return nil + } + + managedClusterRef := cluster.Spec.InfrastructureRef + if managedClusterRef == nil || managedClusterRef.Kind != "AWSManagedCluster" { + r.Log.V(4).Info("InfrastructureRef is nil or not AWSManagedCluster, skipping mapping") + return nil + } + + return []ctrl.Request{ + { + NamespacedName: types.NamespacedName{ + Name: managedClusterRef.Name, + Namespace: managedClusterRef.Namespace, + }, + }, + } +} diff --git a/exp/controllers/awsmanagedcontrolplane_controller.go b/exp/controllers/awsmanagedcontrolplane_controller.go new file mode 100644 index 0000000000..98ed2fdfc1 --- /dev/null +++ b/exp/controllers/awsmanagedcontrolplane_controller.go @@ -0,0 +1,313 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + "time" + + "github.com/go-logr/logr" + "github.com/pkg/errors" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/predicates" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3" + infrav1exp "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1alpha3" + "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope" + "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2" + "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/eks" + "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/network" + "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/securitygroup" +) + +// AWSManagedControlPlaneReconciler reconciles a AWSManagedControlPlane object +type AWSManagedControlPlaneReconciler struct { + client.Client + Log logr.Logger + Recorder record.EventRecorder + + EnableIAM bool + AllowAdditionalRoles bool +} + +// SetupWithManager is used to setup the controller +func (r *AWSManagedControlPlaneReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error { + awsManagedControlPlane := &infrav1exp.AWSManagedControlPlane{} + c, err := ctrl.NewControllerManagedBy(mgr). + For(awsManagedControlPlane). + WithOptions(options). + WithEventFilter(predicates.ResourceNotPaused(r.Log)). + Watches( + &source.Kind{Type: &infrav1exp.AWSManagedCluster{}}, + &handler.EnqueueRequestsFromMapFunc{ + ToRequests: handler.ToRequestsFunc(r.managedClusterToManagedControlPlane), + }, + ). + Build(r) + + if err != nil { + return fmt.Errorf("failed setting up the AWSManagedControlPlane controller manager: %w", err) + } + + if err = c.Watch( + &source.Kind{Type: &clusterv1.Cluster{}}, + &handler.EnqueueRequestsFromMapFunc{ + ToRequests: util.ClusterToInfrastructureMapFunc(awsManagedControlPlane.GroupVersionKind()), + }, + predicates.ClusterUnpausedAndInfrastructureReady(r.Log), + ); err != nil { + return fmt.Errorf("failed adding a watch for ready clusters: %w", err) + } + + return nil +} + +// +kubebuilder:rbac:groups=core,resources=events,verbs=get;list;watch;create;patch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmanagedcontrolplanes,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmanagedcontrolplanes/status,verbs=get;update;patch + +// Reconcile will reconcile AWSManagedControlPlane Resources +func (r *AWSManagedControlPlaneReconciler) Reconcile(req ctrl.Request) (res ctrl.Result, reterr error) { + logger := r.Log.WithValues("namespace", req.Namespace, "eksControlPlane", req.Name) + ctx := context.Background() + + // Get the control plane instance + awsControlPlane := &infrav1exp.AWSManagedControlPlane{} + if err := r.Client.Get(ctx, req.NamespacedName, awsControlPlane); err != nil { + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{Requeue: true}, nil + } + + // Get the cluster + cluster, err := util.GetOwnerCluster(ctx, r.Client, awsControlPlane.ObjectMeta) + if err != nil { + logger.Error(err, "Failed to retrieve owner Cluster from the API Server") + return ctrl.Result{}, err + } + if cluster == nil { + logger.Info("Cluster Controller has not yet set OwnerRef") + return ctrl.Result{}, nil + } + + if util.IsPaused(cluster, awsControlPlane) { + logger.Info("Reconciliation is paused for this object") + return ctrl.Result{}, nil + } + + logger = logger.WithValues("cluster", cluster.Name) + + managedScope, err := scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{ + Client: r.Client, + Logger: logger, + Cluster: cluster, + ControlPlane: awsControlPlane, + ControllerName: "awsmanagedcontrolplane", + EnableIAM: r.EnableIAM, + AllowAdditionalRoles: r.AllowAdditionalRoles, + }) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to create scope: %w", err) + } + + // Always close the scope + defer func() { + applicableConditions := []clusterv1.ConditionType{ + infrav1exp.EKSControlPlaneReadyCondition, + infrav1exp.IAMControlPlaneRolesReadyCondition, + infrav1.VpcReadyCondition, + infrav1.SubnetsReadyCondition, + infrav1.ClusterSecurityGroupsReadyCondition, + } + + if managedScope.VPC().IsManaged(managedScope.Name()) { + applicableConditions = append(applicableConditions, + infrav1.InternetGatewayReadyCondition, + infrav1.NatGatewaysReadyCondition, + infrav1.RouteTablesReadyCondition, + ) + if managedScope.Bastion().Enabled { + applicableConditions = append(applicableConditions, infrav1.BastionHostReadyCondition) + } + } + + conditions.SetSummary(managedScope.ControlPlane, conditions.WithConditions(applicableConditions...), conditions.WithStepCounter()) + + if err := managedScope.Close(); err != nil && reterr == nil { + reterr = err + } + }() + + if !awsControlPlane.ObjectMeta.DeletionTimestamp.IsZero() { + // Handle deletion reconciliation loop. + return r.reconcileDelete(ctx, managedScope) + } + + // Handle normal reconciliation loop. + return r.reconcileNormal(ctx, managedScope) +} + +func (r *AWSManagedControlPlaneReconciler) reconcileNormal(ctx context.Context, managedScope *scope.ManagedControlPlaneScope) (res ctrl.Result, reterr error) { + managedScope.Info("Reconciling AWSManagedControlPlane") + + awsManagedControlPlane := managedScope.ControlPlane + + controllerutil.AddFinalizer(managedScope.ControlPlane, infrav1exp.ManagedControlPlaneFinalizer) + if err := managedScope.PatchObject(); err != nil { + return ctrl.Result{}, err + } + + sgRoles := []infrav1.SecurityGroupRole{ + infrav1.SecurityGroupBastion, + } + + ec2Service := ec2.NewService(managedScope) + networkSvc := network.NewService(managedScope) + ekssvc := eks.NewService(managedScope) + sgService := securitygroup.NewServiceWithRoles(managedScope, sgRoles) + + if err := networkSvc.ReconcileNetwork(); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to reconcile network for AWSManagedControlPlane %s/%s: %w", awsManagedControlPlane.Namespace, awsManagedControlPlane.Name, err) + } + + if err := ec2Service.ReconcileBastion(); err != nil { + conditions.MarkFalse(awsManagedControlPlane, infrav1.BastionHostReadyCondition, infrav1.BastionHostFailedReason, clusterv1.ConditionSeverityError, err.Error()) + return reconcile.Result{}, fmt.Errorf("failed to reconcile bastion host for AWSManagedControlPlane %s/%s: %w", awsManagedControlPlane.Namespace, awsManagedControlPlane.Name, err) + } + + if err := sgService.ReconcileSecurityGroups(); err != nil { + conditions.MarkFalse(awsManagedControlPlane, infrav1.ClusterSecurityGroupsReadyCondition, infrav1.ClusterSecurityGroupReconciliationFailedReason, clusterv1.ConditionSeverityError, err.Error()) + return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile general security groups for AWSManagedControlPlane %s/%s", awsManagedControlPlane.Namespace, awsManagedControlPlane.Name) + } + + if err := ekssvc.ReconcileControlPlane(ctx); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to reconcile control plane for AWSManagedControlPlane %s/%s: %w", awsManagedControlPlane.Namespace, awsManagedControlPlane.Name, err) + } + + for _, subnet := range managedScope.Subnets().FilterPrivate() { + managedScope.SetFailureDomain(subnet.AvailabilityZone, clusterv1.FailureDomainSpec{ + ControlPlane: true, + }) + } + + return reconcile.Result{}, nil +} + +func (r *AWSManagedControlPlaneReconciler) reconcileDelete(_ context.Context, managedScope *scope.ManagedControlPlaneScope) (_ ctrl.Result, reterr error) { + managedScope.Info("Reconciling AWSManagedClusterPlane delete") + + controlPlane := managedScope.ControlPlane + + ekssvc := eks.NewService(managedScope) + ec2svc := ec2.NewService(managedScope) + networkSvc := network.NewService(managedScope) + sgService := securitygroup.NewService(managedScope) + + if err := ekssvc.DeleteControlPlane(); err != nil { + return reconcile.Result{}, fmt.Errorf("error deleting EKS cluster for EKS control plane %s/%s: %w", controlPlane.Namespace, controlPlane.Name, err) + } + + if err := sgService.DeleteSecurityGroups(); err != nil { + return reconcile.Result{}, fmt.Errorf("error deleting general security groups for AWSManagedControlPlane %s/%s: %w", controlPlane.Namespace, controlPlane.Name, err) //nolint:goerr113 + } + + if err := ec2svc.DeleteBastion(); err != nil { + return reconcile.Result{}, fmt.Errorf("error deleting bastion for AWSManagedControlPlane %s/%s: %w", controlPlane.Namespace, controlPlane.Name, err) + } + + if err := networkSvc.DeleteNetwork(); err != nil { + return reconcile.Result{}, fmt.Errorf("error deleting network for AWSManagedControlPlane %s/%s: %w", controlPlane.Namespace, controlPlane.Name, err) + } + + controllerutil.RemoveFinalizer(controlPlane, infrav1exp.ManagedControlPlaneFinalizer) + + return reconcile.Result{}, nil +} + +// ClusterToAWSManagedControlPlane is a handler.ToRequestsFunc to be used to enqueue requests for reconciliation +// for AWSManagedControlPlane based on updates to a Cluster. +func (r *AWSManagedControlPlaneReconciler) ClusterToAWSManagedControlPlane(o handler.MapObject) []ctrl.Request { + c, ok := o.Object.(*clusterv1.Cluster) + if !ok { + r.Log.Error(nil, fmt.Sprintf("Expected a Cluster but got a %T", o.Object)) + return nil + } + + controlPlaneRef := c.Spec.ControlPlaneRef + if controlPlaneRef != nil && controlPlaneRef.Kind == "AWSManagedControlPlane" { + return []ctrl.Request{{NamespacedName: client.ObjectKey{Namespace: controlPlaneRef.Namespace, Name: controlPlaneRef.Name}}} + } + + return nil +} + +func (r *AWSManagedControlPlaneReconciler) managedClusterToManagedControlPlane(o handler.MapObject) []ctrl.Request { + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + awsManagedCluster, ok := o.Object.(*infrav1exp.AWSManagedCluster) + if !ok { + r.Log.Error(nil, fmt.Sprintf("Expected a AWSManagedCluster but got a %T", o.Object)) + return nil + } + + if !awsManagedCluster.ObjectMeta.DeletionTimestamp.IsZero() { + r.Log.V(4).Info("AWSManagedCluster has a deletion timestamp, skipping mapping") + return nil + } + + cluster, err := util.GetOwnerCluster(ctx, r.Client, awsManagedCluster.ObjectMeta) + if err != nil { + r.Log.Error(err, "failed to get owning cluster") + return nil + } + if cluster == nil { + r.Log.V(4).Info("Owning cluster not set on AWSManagedCluster, skipping mapping") + return nil + } + + controlPlaneRef := cluster.Spec.ControlPlaneRef + if controlPlaneRef == nil || controlPlaneRef.Kind != "AWSManagedControlPlane" { + r.Log.V(4).Info("ControlPlaneRef is nil or not AWSManagedControlPlane, skipping mapping") + return nil + } + + return []ctrl.Request{ + { + NamespacedName: types.NamespacedName{ + Name: controlPlaneRef.Name, + Namespace: controlPlaneRef.Namespace, + }, + }, + } +} diff --git a/exp/controllers/suite_test.go b/exp/controllers/suite_test.go new file mode 100644 index 0000000000..375a86fc3b --- /dev/null +++ b/exp/controllers/suite_test.go @@ -0,0 +1,83 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "path/filepath" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + infrav1exp "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1alpha3" + + //infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecsWithDefaultAndCustomReporters(t, + "Controller Suite", + []Reporter{printer.NewlineReporter{}}) +} + +var _ = BeforeSuite(func(done Done) { + logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, + } + + var err error + cfg, err = testEnv.Start() + Expect(err).ToNot(HaveOccurred()) + Expect(cfg).ToNot(BeNil()) + + err = infrav1exp.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).ToNot(HaveOccurred()) + Expect(k8sClient).ToNot(BeNil()) + + close(done) +}, 60) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).ToNot(HaveOccurred()) +}) diff --git a/exp/doc.go b/exp/doc.go new file mode 100644 index 0000000000..8a32e6835a --- /dev/null +++ b/exp/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exp diff --git a/feature/feature.go b/feature/feature.go new file mode 100644 index 0000000000..d93d188527 --- /dev/null +++ b/feature/feature.go @@ -0,0 +1,58 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package feature + +import ( + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/component-base/featuregate" +) + +const ( + // Every capa-specific feature gate should add method here following this template: + // + // // owner: @username + // // alpha: v1.X + // MyFeature featuregate.Feature = "MyFeature" + + // EKS is used to enable EKS support + // owner: @richardcase + // alpha: v0.4 + EKS featuregate.Feature = "EKS" + + // EKSEnableIAM will enable the IAM resource creation/modification + // owner: @richardcase + // alpha: v0.4 + EKSEnableIAM featuregate.Feature = "EKSEnableIAM" + + // EKSAllowAddRoles is used to enable the usage of additional IAM roles + // owner: @richardcase + // alpha: v0.4 + EKSAllowAddRoles featuregate.Feature = "EKSAllowAddRoles" +) + +func init() { + runtime.Must(MutableGates.Add(defaultCAPAFeatureGates)) +} + +// defaultCAPAFeatureGates consists of all known capa-specific feature keys. +// To add a new feature, define a key for it above and add it here. +var defaultCAPAFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ + // Every feature should be initiated here: + EKS: {Default: false, PreRelease: featuregate.Alpha}, + EKSEnableIAM: {Default: false, PreRelease: featuregate.Alpha}, + EKSAllowAddRoles: {Default: false, PreRelease: featuregate.Alpha}, +} diff --git a/feature/gates.go b/feature/gates.go new file mode 100644 index 0000000000..bf73fb3ff0 --- /dev/null +++ b/feature/gates.go @@ -0,0 +1,34 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package feature + +import ( + "k8s.io/component-base/featuregate" + "sigs.k8s.io/cluster-api/feature" +) + +var ( + // MutableGates is a mutable version of DefaultFeatureGate. + // Only top-level commands/options setup and the k8s.io/component-base/featuregate/testing package should make use of this. + // Tests that need to modify featuregate gates for the duration of their test should use: + // defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features., )() + MutableGates featuregate.MutableFeatureGate = feature.MutableGates + + // Gates is a shared global FeatureGate. + // Top-level commands/options setup that needs to modify this featuregate gate should use DefaultMutableFeatureGate. + Gates featuregate.FeatureGate = MutableGates +) diff --git a/go.mod b/go.mod index 068f758d9e..3f00bca018 100644 --- a/go.mod +++ b/go.mod @@ -21,6 +21,7 @@ require ( k8s.io/api v0.17.8 k8s.io/apimachinery v0.17.8 k8s.io/client-go v0.17.8 + k8s.io/component-base v0.17.8 k8s.io/klog v1.0.0 k8s.io/utils v0.0.0-20200619165400-6e3d28b6ed19 sigs.k8s.io/cluster-api v0.3.7 diff --git a/go.sum b/go.sum index 268aac808a..d9159cf1a4 100644 --- a/go.sum +++ b/go.sum @@ -526,6 +526,7 @@ golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 h1:DYfZAGf2WMFjMxbgTjaC+2HC7NkNAQs+6Q8b9WEB/F4= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -636,6 +637,7 @@ k8s.io/client-go v0.17.8/go.mod h1:SJsDS64AAtt9VZyeaQMb4Ck5etCitZ/FwajWdzua5eY= k8s.io/cluster-bootstrap v0.17.8 h1:qee9dmkOVwngBf98zbwrij1s898EZ2aHg+ymXw1UBLU= k8s.io/cluster-bootstrap v0.17.8/go.mod h1:SC9J2Lt/MBOkxcCB04+5mYULLfDQL5kdM0BjtKaVCVU= k8s.io/code-generator v0.17.8/go.mod h1:iiHz51+oTx+Z9D0vB3CH3O4HDDPWrvZyUgUYaIE9h9M= +k8s.io/component-base v0.17.8 h1:3YilgRh9TcifVsKWReiZL1JfoUzqLesDc0wYIpimJN8= k8s.io/component-base v0.17.8/go.mod h1:xfNNdTAMsYzdiAa8vXnqDhRVSEgkfza0iMt0FrZDY7s= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= diff --git a/main.go b/main.go index a60c03ac26..12c805468c 100644 --- a/main.go +++ b/main.go @@ -17,23 +17,31 @@ limitations under the License. package main import ( + "errors" "flag" + "fmt" "math/rand" "net/http" _ "net/http/pprof" "os" "time" + "github.com/spf13/pflag" "k8s.io/apimachinery/pkg/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" cgrecord "k8s.io/client-go/tools/record" "k8s.io/klog" "k8s.io/klog/klogr" + infrav1alpha2 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha2" infrav1alpha3 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3" "sigs.k8s.io/cluster-api-provider-aws/controllers" + infrav1alpha3exp "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1alpha3" + controllersexp "sigs.k8s.io/cluster-api-provider-aws/exp/controllers" + "sigs.k8s.io/cluster-api-provider-aws/feature" "sigs.k8s.io/cluster-api-provider-aws/pkg/record" "sigs.k8s.io/cluster-api-provider-aws/version" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -42,102 +50,42 @@ import ( ) var ( - scheme = runtime.NewScheme() - setupLog = ctrl.Log.WithName("setup") + scheme = runtime.NewScheme() + setupLog = ctrl.Log.WithName("setup") + maxEKSSyncPeriod = time.Minute * 10 + errMaxSyncPeriodExceeded = errors.New("sync period greater than maximum allowed") + errEKSInvalidFlags = errors.New("invalid EKS flag combination") ) func init() { _ = clientgoscheme.AddToScheme(scheme) _ = infrav1alpha2.AddToScheme(scheme) _ = infrav1alpha3.AddToScheme(scheme) + _ = infrav1alpha3exp.AddToScheme(scheme) _ = clusterv1.AddToScheme(scheme) // +kubebuilder:scaffold:scheme } -func main() { - rand.Seed(time.Now().UnixNano()) +var ( + metricsAddr string + enableLeaderElection bool + leaderElectionNamespace string + watchNamespace string + profilerAddress string + awsClusterConcurrency int + awsMachineConcurrency int + syncPeriod time.Duration + webhookPort int + healthAddr string +) +func main() { klog.InitFlags(nil) - var ( - metricsAddr string - enableLeaderElection bool - leaderElectionNamespace string - watchNamespace string - profilerAddress string - awsClusterConcurrency int - awsMachineConcurrency int - syncPeriod time.Duration - webhookPort int - healthAddr string - ) - - flag.StringVar( - &metricsAddr, - "metrics-addr", - ":8080", - "The address the metric endpoint binds to.", - ) - - flag.BoolVar( - &enableLeaderElection, - "enable-leader-election", - false, - "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.", - ) - - flag.StringVar( - &watchNamespace, - "namespace", - "", - "Namespace that the controller watches to reconcile cluster-api objects. If unspecified, the controller watches for cluster-api objects across all namespaces.", - ) - - flag.StringVar( - &leaderElectionNamespace, - "leader-election-namespace", - "", - "Namespace that the controller performs leader election in. If unspecified, the controller will discover which namespace it is running in.", - ) - - flag.StringVar( - &profilerAddress, - "profiler-address", - "", - "Bind address to expose the pprof profiler (e.g. localhost:6060)", - ) - - flag.IntVar(&awsClusterConcurrency, - "awscluster-concurrency", - 5, - "Number of AWSClusters to process simultaneously", - ) - - flag.IntVar(&awsMachineConcurrency, - "awsmachine-concurrency", - 10, - "Number of AWSMachines to process simultaneously", - ) - - flag.DurationVar(&syncPeriod, - "sync-period", - 10*time.Minute, - "The minimum interval at which watched resources are reconciled (e.g. 15m)", - ) - - flag.IntVar(&webhookPort, - "webhook-port", - 0, - "Webhook Server port, disabled by default. When enabled, the manager will only work as webhook server, no reconcilers are installed.", - ) - - flag.StringVar(&healthAddr, - "health-addr", - ":9440", - "The address the health endpoint binds to.", - ) - - flag.Parse() + rand.Seed(time.Now().UnixNano()) + initFlags(pflag.CommandLine) + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() ctrl.SetLogger(klogr.New()) @@ -178,6 +126,8 @@ func main() { // Initialize event recorder. record.InitFromRecorder(mgr.GetEventRecorderFor("aws-controller")) + setupLog.V(1).Info(fmt.Sprintf("%+v\n", feature.Gates)) + if webhookPort == 0 { if err = (&controllers.AWSMachineReconciler{ Client: mgr.GetClient(), @@ -195,6 +145,42 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "AWSCluster") os.Exit(1) } + + if feature.Gates.Enabled(feature.EKS) { + setupLog.Info("enabling EKS controllers") + if syncPeriod > maxEKSSyncPeriod { + setupLog.Error(errMaxSyncPeriodExceeded, "sync period exceeded maximum allowed when using EKS", "max-sync-period", maxEKSSyncPeriod) + os.Exit(1) + } + + enableIAM := feature.Gates.Enabled(feature.EKSEnableIAM) + allowAddRoles := feature.Gates.Enabled(feature.EKSAllowAddRoles) + + if allowAddRoles && !enableIAM { + setupLog.Error(errEKSInvalidFlags, "cannot use EKSAllowAddRoles flag without EKSEnableIAM") + os.Exit(1) + } + + if err = (&controllersexp.AWSManagedControlPlaneReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("AWSManagedControlPlane"), + Recorder: mgr.GetEventRecorderFor("awsmanagedcontrolplane-reconciler"), + AllowAdditionalRoles: allowAddRoles, + EnableIAM: enableIAM, + }).SetupWithManager(mgr, controller.Options{MaxConcurrentReconciles: awsClusterConcurrency}); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "AWSManagedControlPlane") + os.Exit(1) + } + if err = (&controllersexp.AWSManagedClusterReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("AWSManagedCluster"), + Recorder: mgr.GetEventRecorderFor("awsmanagedcluster-reconciler"), + }).SetupWithManager(mgr, controller.Options{MaxConcurrentReconciles: awsClusterConcurrency}); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "AWSManagedCluster") + os.Exit(1) + } + } + } else { if err = (&infrav1alpha3.AWSMachineTemplate{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "AWSMachineTemplate") @@ -220,6 +206,13 @@ func main() { setupLog.Error(err, "unable to create webhook", "webhook", "AWSClusterList") os.Exit(1) } + if feature.Gates.Enabled(feature.EKS) { + setupLog.Info("enabling EKS webhooks") + if err = (&infrav1alpha3exp.AWSManagedControlPlane{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "AWSManagedControlPlane") + os.Exit(1) + } + } } // +kubebuilder:scaffold:builder @@ -239,3 +232,72 @@ func main() { os.Exit(1) } } + +func initFlags(fs *pflag.FlagSet) { + fs.StringVar( + &metricsAddr, + "metrics-addr", + ":8080", + "The address the metric endpoint binds to.", + ) + + fs.BoolVar( + &enableLeaderElection, + "enable-leader-election", + false, + "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.", + ) + + fs.StringVar( + &watchNamespace, + "namespace", + "", + "Namespace that the controller watches to reconcile cluster-api objects. If unspecified, the controller watches for cluster-api objects across all namespaces.", + ) + + fs.StringVar( + &leaderElectionNamespace, + "leader-election-namespace", + "", + "Namespace that the controller performs leader election in. If unspecified, the controller will discover which namespace it is running in.", + ) + + fs.StringVar( + &profilerAddress, + "profiler-address", + "", + "Bind address to expose the pprof profiler (e.g. localhost:6060)", + ) + + fs.IntVar(&awsClusterConcurrency, + "awscluster-concurrency", + 5, + "Number of AWSClusters to process simultaneously", + ) + + fs.IntVar(&awsMachineConcurrency, + "awsmachine-concurrency", + 10, + "Number of AWSMachines to process simultaneously", + ) + + fs.DurationVar(&syncPeriod, + "sync-period", + 10*time.Minute, + "The minimum interval at which watched resources are reconciled (e.g. 15m)", + ) + + fs.IntVar(&webhookPort, + "webhook-port", + 0, + "Webhook Server port, disabled by default. When enabled, the manager will only work as webhook server, no reconcilers are installed.", + ) + + fs.StringVar(&healthAddr, + "health-addr", + ":9440", + "The address the health endpoint binds to.", + ) + + feature.MutableGates.AddFlag(fs) +} diff --git a/pkg/cloud/converters/tags.go b/pkg/cloud/converters/tags.go index 5a91c930b2..8301ae857f 100644 --- a/pkg/cloud/converters/tags.go +++ b/pkg/cloud/converters/tags.go @@ -35,6 +35,17 @@ func TagsToMap(src []*ec2.Tag) infrav1.Tags { return tags } +// MapPtrToMap converts a [string]*string into a infrav1.Tags. +func MapPtrToMap(src map[string]*string) infrav1.Tags { + tags := make(infrav1.Tags, len(src)) + + for k, v := range src { + tags[k] = *v + } + + return tags +} + // MapToTags converts a infrav1.Tags to a []*ec2.Tag func MapToTags(src infrav1.Tags) []*ec2.Tag { tags := make([]*ec2.Tag, 0, len(src)) diff --git a/pkg/cloud/interfaces.go b/pkg/cloud/interfaces.go index b5a2815eaf..ac12c13137 100644 --- a/pkg/cloud/interfaces.go +++ b/pkg/cloud/interfaces.go @@ -50,12 +50,15 @@ type ClusterScoper interface { Session ScopeUsage - // Name returns the cluster name. + // Name returns the CAPI cluster name. Name() string // Namespace returns the cluster namespace. Namespace() string // Region returns the cluster region. Region() string + // KubernetesClusterName is the name of the Kubernetes cluster. For EKS this + // will differ to the CAPI cluster name + KubernetesClusterName() string // InfraCluster returns the AWS infrastructure cluster object. InfraCluster() ClusterObject diff --git a/pkg/cloud/scope/clients.go b/pkg/cloud/scope/clients.go index c565f1bc9e..15175ff853 100644 --- a/pkg/cloud/scope/clients.go +++ b/pkg/cloud/scope/clients.go @@ -21,12 +21,18 @@ import ( "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ec2/ec2iface" + "github.com/aws/aws-sdk-go/service/eks" + "github.com/aws/aws-sdk-go/service/eks/eksiface" "github.com/aws/aws-sdk-go/service/elb" "github.com/aws/aws-sdk-go/service/elb/elbiface" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/aws/aws-sdk-go/service/iam/iamiface" "github.com/aws/aws-sdk-go/service/resourcegroupstaggingapi" "github.com/aws/aws-sdk-go/service/resourcegroupstaggingapi/resourcegroupstaggingapiiface" "github.com/aws/aws-sdk-go/service/secretsmanager" "github.com/aws/aws-sdk-go/service/secretsmanager/secretsmanageriface" + "github.com/aws/aws-sdk-go/service/sts" + "github.com/aws/aws-sdk-go/service/sts/stsiface" "k8s.io/apimachinery/pkg/runtime" @@ -76,6 +82,36 @@ func NewSecretsManagerClient(scopeUser cloud.ScopeUsage, session cloud.Session, return secretsClient } +// NewEKSClient creates a new EKS API client for a given session +func NewEKSClient(scopeUser cloud.ScopeUsage, session cloud.Session, target runtime.Object) eksiface.EKSAPI { + eksClient := eks.New(session.Session()) + eksClient.Handlers.Build.PushFrontNamed(getUserAgentHandler()) + eksClient.Handlers.CompleteAttempt.PushFront(awsmetrics.CaptureRequestMetrics(scopeUser.ControllerName())) + eksClient.Handlers.Complete.PushBack(recordAWSPermissionsIssue(target)) + + return eksClient +} + +// NewIAMClient creates a new IAM API client for a given session +func NewIAMClient(scopeUser cloud.ScopeUsage, session cloud.Session, target runtime.Object) iamiface.IAMAPI { + iamClient := iam.New(session.Session()) + iamClient.Handlers.Build.PushFrontNamed(getUserAgentHandler()) + iamClient.Handlers.CompleteAttempt.PushFront(awsmetrics.CaptureRequestMetrics(scopeUser.ControllerName())) + iamClient.Handlers.Complete.PushBack(recordAWSPermissionsIssue(target)) + + return iamClient +} + +// NewSTSClient creates a new STS API client for a given session +func NewSTSClient(scopeUser cloud.ScopeUsage, session cloud.Session, target runtime.Object) stsiface.STSAPI { + stsClient := sts.New(session.Session()) + stsClient.Handlers.Build.PushFrontNamed(getUserAgentHandler()) + stsClient.Handlers.CompleteAttempt.PushFront(awsmetrics.CaptureRequestMetrics(scopeUser.ControllerName())) + stsClient.Handlers.Complete.PushBack(recordAWSPermissionsIssue(target)) + + return stsClient +} + func recordAWSPermissionsIssue(target runtime.Object) func(r *request.Request) { return func(r *request.Request) { if awsErr, ok := r.Error.(awserr.Error); ok { diff --git a/pkg/cloud/scope/cluster.go b/pkg/cloud/scope/cluster.go index 2e5fc14527..8efb59d5bf 100644 --- a/pkg/cloud/scope/cluster.go +++ b/pkg/cloud/scope/cluster.go @@ -121,7 +121,7 @@ func (s *ClusterScope) SecurityGroups() map[infrav1.SecurityGroupRole]infrav1.Se return s.AWSCluster.Status.Network.SecurityGroups } -// Name returns the cluster name. +// Name returns the CAPI cluster name. func (s *ClusterScope) Name() string { return s.Cluster.Name } @@ -136,6 +136,12 @@ func (s *ClusterScope) Region() string { return s.AWSCluster.Spec.Region } +// KubernetesClusterName is the name of the Kubernetes cluster. For the cluster +// scope this is the same as the CAPI cluster name +func (s *ClusterScope) KubernetesClusterName() string { + return s.Cluster.Name +} + // ControlPlaneLoadBalancer returns the AWSLoadBalancerSpec func (s *ClusterScope) ControlPlaneLoadBalancer() *infrav1.AWSLoadBalancerSpec { return s.AWSCluster.Spec.ControlPlaneLoadBalancer @@ -210,9 +216,7 @@ func (s *ClusterScope) SetFailureDomain(id string, spec clusterv1.FailureDomainS s.AWSCluster.Status.FailureDomains[id] = spec } -// InfraCluster returns the AWS infrastructure cluster object. -// Initially this will be AWSCluster but in the future it -// could also be AWSManagedCluster +// InfraCluster returns the AWS infrastructure cluster or control plane object. func (s *ClusterScope) InfraCluster() cloud.ClusterObject { return s.AWSCluster } diff --git a/pkg/cloud/scope/managedcontrolplane.go b/pkg/cloud/scope/managedcontrolplane.go new file mode 100644 index 0000000000..864a5facef --- /dev/null +++ b/pkg/cloud/scope/managedcontrolplane.go @@ -0,0 +1,269 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scope + +import ( + "context" + + awsclient "github.com/aws/aws-sdk-go/aws/client" + "github.com/go-logr/logr" + "github.com/pkg/errors" + "k8s.io/klog/klogr" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/controller-runtime/pkg/client" + + infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3" + infrav1exp "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1alpha3" + "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud" +) + +// ManagedControlPlaneScopeParams defines the input parameters used to create a new Scope. +type ManagedControlPlaneScopeParams struct { + Client client.Client + Logger logr.Logger + Cluster *clusterv1.Cluster + ControlPlane *infrav1exp.AWSManagedControlPlane + ControllerName string + Session awsclient.ConfigProvider + + EnableIAM bool + AllowAdditionalRoles bool +} + +// NewManagedControlPlaneScope creates a new Scope from the supplied parameters. +// This is meant to be called for each reconcile iteration. +func NewManagedControlPlaneScope(params ManagedControlPlaneScopeParams) (*ManagedControlPlaneScope, error) { + if params.Cluster == nil { + return nil, errors.New("failed to generate new scope from nil Cluster") + } + if params.ControlPlane == nil { + return nil, errors.New("failed to generate new scope from nil AWSManagedControlPlane") + } + if params.Logger == nil { + params.Logger = klogr.New() + } + + session, err := sessionForRegion(params.ControlPlane.Spec.Region) + if err != nil { + return nil, errors.Errorf("failed to create aws session: %v", err) + } + + helper, err := patch.NewHelper(params.ControlPlane, params.Client) + if err != nil { + return nil, errors.Wrap(err, "failed to init patch helper") + } + + return &ManagedControlPlaneScope{ + Logger: params.Logger, + Client: params.Client, + Cluster: params.Cluster, + ControlPlane: params.ControlPlane, + patchHelper: helper, + session: session, + controllerName: params.ControllerName, + allowAdditionalRoles: params.AllowAdditionalRoles, + enableIAM: params.EnableIAM, + }, nil +} + +// ManagedControlPlaneScope defines the basic context for an actuator to operate upon. +type ManagedControlPlaneScope struct { + logr.Logger + Client client.Client + patchHelper *patch.Helper + + Cluster *clusterv1.Cluster + ControlPlane *infrav1exp.AWSManagedControlPlane + + session awsclient.ConfigProvider + controllerName string + + enableIAM bool + allowAdditionalRoles bool +} + +// Network returns the control plane network object. +func (s *ManagedControlPlaneScope) Network() *infrav1.Network { + return &s.ControlPlane.Status.Network +} + +// VPC returns the control plane VPC. +func (s *ManagedControlPlaneScope) VPC() *infrav1.VPCSpec { + return &s.ControlPlane.Spec.NetworkSpec.VPC +} + +// Subnets returns the control plane subnets. +func (s *ManagedControlPlaneScope) Subnets() infrav1.Subnets { + return s.ControlPlane.Spec.NetworkSpec.Subnets +} + +// SetSubnets updates the control planes subnets. +func (s *ManagedControlPlaneScope) SetSubnets(subnets infrav1.Subnets) { + s.ControlPlane.Spec.NetworkSpec.Subnets = subnets +} + +// CNIIngressRules returns the CNI spec ingress rules. +func (s *ManagedControlPlaneScope) CNIIngressRules() infrav1.CNIIngressRules { + if s.ControlPlane.Spec.NetworkSpec.CNI != nil { + return s.ControlPlane.Spec.NetworkSpec.CNI.CNIIngressRules + } + return infrav1.CNIIngressRules{} +} + +// SecurityGroups returns the control plane security groups as a map, it creates the map if empty. +func (s *ManagedControlPlaneScope) SecurityGroups() map[infrav1.SecurityGroupRole]infrav1.SecurityGroup { + return s.ControlPlane.Status.Network.SecurityGroups +} + +// Name returns the CAPI cluster name. +func (s *ManagedControlPlaneScope) Name() string { + return s.Cluster.Name +} + +// Namespace returns the cluster namespace. +func (s *ManagedControlPlaneScope) Namespace() string { + return s.Cluster.Namespace +} + +// Region returns the cluster region. +func (s *ManagedControlPlaneScope) Region() string { + return s.ControlPlane.Spec.Region +} + +// ListOptionsLabelSelector returns a ListOptions with a label selector for clusterName. +func (s *ManagedControlPlaneScope) ListOptionsLabelSelector() client.ListOption { + return client.MatchingLabels(map[string]string{ + clusterv1.ClusterLabelName: s.Cluster.Name, + }) +} + +// PatchObject persists the control plane configuration and status. +func (s *ManagedControlPlaneScope) PatchObject() error { + return s.patchHelper.Patch( + context.TODO(), + s.ControlPlane, + patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + infrav1.VpcReadyCondition, + infrav1.SubnetsReadyCondition, + infrav1.InternetGatewayReadyCondition, + infrav1.NatGatewaysReadyCondition, + infrav1.RouteTablesReadyCondition, + infrav1.BastionHostReadyCondition, + infrav1exp.EKSControlPlaneReadyCondition, + infrav1exp.IAMControlPlaneRolesReadyCondition, + }}) +} + +// Close closes the current scope persisting the control plane configuration and status. +func (s *ManagedControlPlaneScope) Close() error { + return s.PatchObject() +} + +// AdditionalTags returns AdditionalTags from the scope's EksControlPlane. The returned value will never be nil. +func (s *ManagedControlPlaneScope) AdditionalTags() infrav1.Tags { + if s.ControlPlane.Spec.AdditionalTags == nil { + s.ControlPlane.Spec.AdditionalTags = infrav1.Tags{} + } + + return s.ControlPlane.Spec.AdditionalTags.DeepCopy() +} + +// APIServerPort returns the port to use when communicating with the API server +func (s *ManagedControlPlaneScope) APIServerPort() int32 { + return 443 +} + +// SetFailureDomain sets the infrastructure provider failure domain key to the spec given as input. +func (s *ManagedControlPlaneScope) SetFailureDomain(id string, spec clusterv1.FailureDomainSpec) { + if s.ControlPlane.Status.FailureDomains == nil { + s.ControlPlane.Status.FailureDomains = make(clusterv1.FailureDomains) + } + s.ControlPlane.Status.FailureDomains[id] = spec +} + +// InfraCluster returns the AWS infrastructure cluster or control plane object. +func (s *ManagedControlPlaneScope) InfraCluster() cloud.ClusterObject { + return s.ControlPlane +} + +// Session returns the AWS SDK session. Used for creating clients +func (s *ManagedControlPlaneScope) Session() awsclient.ConfigProvider { + return s.session +} + +// Bastion returns the bastion details. +func (s *ManagedControlPlaneScope) Bastion() *infrav1.Bastion { + return &s.ControlPlane.Spec.Bastion +} + +// SetBastionInstance sets the bastion instance in the status of the cluster. +func (s *ManagedControlPlaneScope) SetBastionInstance(instance *infrav1.Instance) { + s.ControlPlane.Status.Bastion = instance +} + +// SSHKeyName returns the SSH key name to use for instances. +func (s *ManagedControlPlaneScope) SSHKeyName() *string { + return s.ControlPlane.Spec.SSHKeyName +} + +// ControllerName returns the name of the controller that +// created the ManagedControlPlane. +func (s *ManagedControlPlaneScope) ControllerName() string { + return s.controllerName +} + +// TokenMethod returns the token method to use in the kubeconfig +func (s *ManagedControlPlaneScope) TokenMethod() infrav1exp.EKSTokenMethod { + if s.ControlPlane.Spec.TokenMethod != nil { + return *s.ControlPlane.Spec.TokenMethod + } + + return infrav1exp.EKSTokenMethodIAMAuthenticator +} + +// KubernetesClusterName is the name of the Kubernetes cluster. For the managed +// scope this is the different to the CAPI cluster name and is the EKS cluster name +func (s *ManagedControlPlaneScope) KubernetesClusterName() string { + return s.ControlPlane.Spec.EKSClusterName +} + +// EnableIAM indicates that reconciliation should create IAM roles +func (s *ManagedControlPlaneScope) EnableIAM() bool { + return s.enableIAM +} + +// AllowAdditionalRoles indicates if additional roles can be added to the created IAM roles +func (s *ManagedControlPlaneScope) AllowAdditionalRoles() bool { + return s.allowAdditionalRoles +} + +// ImageLookupFormat returns the format string to use when looking up AMIs +func (s *ManagedControlPlaneScope) ImageLookupFormat() string { + return s.ControlPlane.Spec.ImageLookupFormat +} + +// ImageLookupOrg returns the organization name to use when looking up AMIs +func (s *ManagedControlPlaneScope) ImageLookupOrg() string { + return s.ControlPlane.Spec.ImageLookupOrg +} + +// ImageLookupBaseOS returns the base operating system name to use when looking up AMIs +func (s *ManagedControlPlaneScope) ImageLookupBaseOS() string { + return s.ControlPlane.Spec.ImageLookupBaseOS +} diff --git a/pkg/cloud/services/eks/cluster.go b/pkg/cloud/services/eks/cluster.go new file mode 100644 index 0000000000..a625621338 --- /dev/null +++ b/pkg/cloud/services/eks/cluster.go @@ -0,0 +1,529 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package eks + +import ( + "context" + "fmt" + "net" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/eks" + + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/version" + + "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors" + "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/wait" + "sigs.k8s.io/cluster-api-provider-aws/pkg/internal/tristate" + "sigs.k8s.io/cluster-api-provider-aws/pkg/record" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + + infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3" + infrav1exp "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1alpha3" +) + +func (s *Service) reconcileCluster(ctx context.Context) error { + s.scope.V(2).Info("Reconciling EKS cluster") + + eksClusterName := s.scope.KubernetesClusterName() + + cluster, err := s.describeEKSCluster(eksClusterName) + if err != nil { + return errors.Wrap(err, "failed to describe eks clusters") + } + + if cluster == nil { + cluster, err = s.createCluster(eksClusterName) + if err != nil { + return errors.Wrap(err, "failed to create cluster") + } + s.scope.Info("Created EKS cluster in AWS", "cluster-name", eksClusterName) + } else { + tagKey := infrav1.ClusterAWSCloudProviderTagKey(s.scope.Name()) + ownedTag := cluster.Tags[tagKey] + if ownedTag == nil { + return fmt.Errorf("checking owner of %s is %s: %w", s.scope.KubernetesClusterName(), s.scope.Name(), err) + } + + s.scope.V(2).Info("Found owned EKS cluster in AWS", "cluster-name", eksClusterName) + } + + if err := s.setStatus(cluster); err != nil { + return errors.Wrap(err, "failed to set status") + } + + // Wait for our cluster to be ready if necessary + switch *cluster.Status { + case eks.ClusterStatusUpdating: + cluster, err = s.waitForClusterUpdate() + case eks.ClusterStatusCreating: + cluster, err = s.waitForClusterActive() + default: + break + } + if err != nil { + return errors.Wrap(err, "failed to wait for cluster to be active") + } + + if !s.scope.ControlPlane.Status.Ready { + return nil + } + + s.scope.V(2).Info("EKS Control Plane active", "endpoint", *cluster.Endpoint) + + s.scope.ControlPlane.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ + Host: *cluster.Endpoint, + Port: 443, + } + + if err := s.reconcileSecurityGroups(cluster); err != nil { + return errors.Wrap(err, "failed reconciling security groups") + } + + if err := s.reconcileKubeconfig(ctx, cluster); err != nil { + return errors.Wrap(err, "failed reconciling kubeconfig") + } + + if err := s.reconcileAdditionalKubeconfigs(ctx, cluster); err != nil { + return errors.Wrap(err, "failed reconciling additional kubeconfigs") + } + + if err := s.reconcileClusterVersion(cluster); err != nil { + return errors.Wrap(err, "failed reconciling cluster version") + } + + if err := s.reconcileClusterConfig(cluster); err != nil { + return errors.Wrap(err, "failed reconciling cluster config") + } + + if err := s.reconcileTags(cluster); err != nil { + return errors.Wrap(err, "failed updating cluster tags") + } + + return nil +} + +func (s *Service) setStatus(cluster *eks.Cluster) error { + switch *cluster.Status { + case eks.ClusterStatusDeleting: + s.scope.ControlPlane.Status.Ready = false + case eks.ClusterStatusFailed: + s.scope.ControlPlane.Status.Ready = false + // TODO FailureReason + failureMsg := fmt.Sprintf("EKS cluster in unexpected %s state", *cluster.Status) + s.scope.ControlPlane.Status.FailureMessage = &failureMsg + case eks.ClusterStatusActive: + s.scope.ControlPlane.Status.Ready = true + s.scope.ControlPlane.Status.FailureMessage = nil + // TODO FailureReason + case eks.ClusterStatusCreating: + s.scope.ControlPlane.Status.Ready = false + case eks.ClusterStatusUpdating: + s.scope.ControlPlane.Status.Ready = true + default: + return errors.Errorf("unexpected EKS cluster status %s", *cluster.Status) + } + if err := s.scope.PatchObject(); err != nil { + return errors.Wrap(err, "failed to update control plane") + } + return nil +} + +// deleteCluster deletes an EKS cluster +func (s *Service) deleteCluster() error { + eksClusterName := s.scope.KubernetesClusterName() + + if eksClusterName == "" { + s.scope.V(2).Info("no EKS cluster name, skipping EKS cluster deletion") + return nil + } + + cluster, err := s.describeEKSCluster(eksClusterName) + if err != nil { + if awserrors.IsNotFound(err) { + s.scope.V(4).Info("eks cluster does not exist") + return nil + } + return errors.Wrap(err, "unable to describe eks cluster") + } + if cluster == nil { + return nil + } + + err = s.deleteClusterAndWait(cluster) + if err != nil { + record.Warnf(s.scope.ControlPlane, "FailedDeleteEKSCluster", "Failed to delete EKS cluster %s: %v", s.scope.KubernetesClusterName(), err) + return errors.Wrap(err, "unable to delete EKS cluster") + } + record.Eventf(s.scope.ControlPlane, "SuccessfulDeleteEKSCluster", "Deleted EKS Cluster %s", s.scope.KubernetesClusterName()) + + return nil +} + +func (s *Service) deleteClusterAndWait(cluster *eks.Cluster) error { + s.scope.Info("Deleting EKS cluster", "cluster-name", s.scope.KubernetesClusterName()) + + input := &eks.DeleteClusterInput{ + Name: cluster.Name, + } + _, err := s.EKSClient.DeleteCluster(input) + if err != nil { + return errors.Wrapf(err, "failed to request delete of eks cluster %s", *cluster.Name) + } + + waitInput := &eks.DescribeClusterInput{ + Name: cluster.Name, + } + + err = s.EKSClient.WaitUntilClusterDeleted(waitInput) + if err != nil { + return errors.Wrapf(err, "failed waiting for eks cluster %s to delete", *cluster.Name) + } + + return nil +} + +func makeEksEncryptionConfigs(encryptionConfig *infrav1exp.EncryptionConfig) []*eks.EncryptionConfig { + if encryptionConfig == nil { + return []*eks.EncryptionConfig{} + } + return []*eks.EncryptionConfig{{ + Provider: &eks.Provider{ + KeyArn: encryptionConfig.Provider, + }, + Resources: encryptionConfig.Resources, + }} +} + +func makeVpcConfig(subnets infrav1.Subnets, endpointAccess infrav1exp.EndpointAccess) (*eks.VpcConfigRequest, error) { + // TODO: Do we need to just add the private subnets? + if len(subnets) < 2 { + return nil, awserrors.NewFailedDependency("at least 2 subnets is required") + } + + zones := subnets.GetUniqueZones() + if len(zones) < 2 { + return nil, awserrors.NewFailedDependency("subnets in at least 2 different az's are required") + } + + subnetIds := make([]*string, 0) + for _, subnet := range subnets { + subnetIds = append(subnetIds, &subnet.ID) + } + + cidrs := make([]*string, 0) + for _, cidr := range endpointAccess.PublicCIDRs { + _, ipNet, err := net.ParseCIDR(*cidr) + if err != nil { + return nil, errors.Wrap(err, "couldn't parse PublicCIDRs") + } + parsedCIDR := ipNet.String() + cidrs = append(cidrs, &parsedCIDR) + } + + vpcConfig := &eks.VpcConfigRequest{ + EndpointPublicAccess: endpointAccess.Public, + EndpointPrivateAccess: endpointAccess.Private, + SubnetIds: subnetIds, + } + + if len(cidrs) > 0 { + vpcConfig.PublicAccessCidrs = cidrs + } + + return vpcConfig, nil +} + +func makeEksLogging(loggingSpec *infrav1exp.ControlPlaneLoggingSpec) *eks.Logging { + if loggingSpec == nil { + return nil + } + var on = true + var off = false + var enabledTypes []string + var disabledTypes []string + + appendToTypes := func(logType string, field bool) { + if field { + enabledTypes = append(enabledTypes, logType) + } else { + disabledTypes = append(disabledTypes, logType) + } + } + + appendToTypes(eks.LogTypeApi, loggingSpec.APIServer) + appendToTypes(eks.LogTypeAudit, loggingSpec.Audit) + appendToTypes(eks.LogTypeAuthenticator, loggingSpec.Authenticator) + appendToTypes(eks.LogTypeControllerManager, loggingSpec.ControllerManager) + appendToTypes(eks.LogTypeScheduler, loggingSpec.Scheduler) + + var clusterLogging []*eks.LogSetup + if len(enabledTypes) > 0 { + enabled := eks.LogSetup{ + Enabled: &on, + Types: aws.StringSlice(enabledTypes), + } + clusterLogging = append(clusterLogging, &enabled) + } + if len(disabledTypes) > 0 { + disabled := eks.LogSetup{ + Enabled: &off, + Types: aws.StringSlice(disabledTypes), + } + clusterLogging = append(clusterLogging, &disabled) + } + if len(clusterLogging) > 0 { + return &eks.Logging{ + ClusterLogging: clusterLogging, + } + } + return nil +} + +func (s *Service) createCluster(eksClusterName string) (*eks.Cluster, error) { + logging := makeEksLogging(s.scope.ControlPlane.Spec.Logging) + encryptionConfigs := makeEksEncryptionConfigs(s.scope.ControlPlane.Spec.EncryptionConfig) + vpcConfig, err := makeVpcConfig(s.scope.Subnets(), s.scope.ControlPlane.Spec.EndpointAccess) + if err != nil { + return nil, errors.Wrap(err, "couldn't create vpc config for cluster") + } + + // Make sure to use the MachineScope here to get the merger of AWSCluster and AWSMachine tags + additionalTags := s.scope.AdditionalTags() + + // Set the cloud provider tag + additionalTags[infrav1.ClusterAWSCloudProviderTagKey(s.scope.Name())] = string(infrav1.ResourceLifecycleOwned) + tags := make(map[string]*string) + for k, v := range additionalTags { + tagValue := v + tags[k] = &tagValue + } + + role, err := s.getIAMRole(*s.scope.ControlPlane.Spec.RoleName) + if err != nil { + return nil, errors.Wrapf(err, "error getting control plane iam role: %s", *s.scope.ControlPlane.Spec.RoleName) + } + + v := versionToEKS(parseEKSVersion(*s.scope.ControlPlane.Spec.Version)) + + input := &eks.CreateClusterInput{ + Name: aws.String(eksClusterName), + //ClientRequestToken: aws.String(uuid.New().String()), + Version: aws.String(v), + Logging: logging, + EncryptionConfig: encryptionConfigs, + ResourcesVpcConfig: vpcConfig, + RoleArn: role.Arn, + Tags: tags, + } + + var out *eks.CreateClusterOutput + if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) { + if out, err = s.EKSClient.CreateCluster(input); err != nil { + if aerr, ok := err.(awserr.Error); ok { + return false, aerr + } + return false, err + } + return true, nil + }, awserrors.ResourceNotFound); err != nil { //TODO: change the error that can be retried + record.Warnf(s.scope.ControlPlane, "FaiedCreateEKSCluster", "Failed to create a new EKS cluster: %v", err) + return nil, errors.Wrapf(err, "failed to create EKS cluster") + } + + record.Eventf(s.scope.ControlPlane, "SuccessfulCreateEKSCluster", "Created a new EKS cluster %q", s.scope.Name()) + return out.Cluster, nil +} + +func (s *Service) waitForClusterActive() (*eks.Cluster, error) { + eksClusterName := s.scope.KubernetesClusterName() + req := eks.DescribeClusterInput{ + Name: aws.String(eksClusterName), + } + if err := s.EKSClient.WaitUntilClusterActive(&req); err != nil { + return nil, errors.Wrapf(err, "failed to wait for eks control plane %q", *req.Name) + } + + s.scope.Info("EKS control plane is now available", "cluster-name", eksClusterName) + + cluster, err := s.describeEKSCluster(eksClusterName) + if err != nil { + return nil, errors.Wrap(err, "failed to describe eks clusters") + } + if err := s.setStatus(cluster); err != nil { + return nil, errors.Wrap(err, "failed to set status") + } + + return cluster, nil +} + +func (s *Service) reconcileClusterConfig(cluster *eks.Cluster) error { + var needsUpdate bool + input := eks.UpdateClusterConfigInput{Name: aws.String(s.scope.KubernetesClusterName())} + + if updateLogging := s.reconcileLogging(cluster.Logging); updateLogging != nil { + needsUpdate = true + input.Logging = updateLogging + } + + updateVpcConfig, err := s.reconcileVpcConfig(cluster.ResourcesVpcConfig) + if err != nil { + return errors.Wrap(err, "couldn't create vpc config for cluster") + } + if updateVpcConfig != nil { + needsUpdate = true + input.ResourcesVpcConfig = updateVpcConfig + } + + if needsUpdate { + if err := input.Validate(); err != nil { + return errors.Wrap(err, "created invalid UpdateClusterConfigInput") + } + if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) { + if _, err := s.EKSClient.UpdateClusterConfig(&input); err != nil { + if aerr, ok := err.(awserr.Error); ok { + return false, aerr + } + return false, err + } + return true, nil + }); err != nil { + record.Warnf(s.scope.ControlPlane, "FailedUpdateEKSControlPlane", "failed to update the EKS control plane %s: %v", s.scope.KubernetesClusterName(), err) + return errors.Wrapf(err, "failed to update EKS cluster") + } + } + return nil +} + +func (s *Service) reconcileLogging(logging *eks.Logging) *eks.Logging { + for _, logSetup := range logging.ClusterLogging { + for _, l := range logSetup.Types { + enabled := s.scope.ControlPlane.Spec.Logging.IsLogEnabled(*l) + if enabled != *logSetup.Enabled { + return makeEksLogging(s.scope.ControlPlane.Spec.Logging) + } + } + } + return nil +} + +func publicAccessCIDRsEqual(as []*string, bs []*string) bool { + all := "0.0.0.0/0" + if len(as) == 0 { + as = []*string{&all} + } + if len(bs) == 0 { + bs = []*string{&all} + } + return sets.NewString(aws.StringValueSlice(as)...).Equal( + sets.NewString(aws.StringValueSlice(bs)...), + ) +} + +func (s *Service) reconcileVpcConfig(vpcConfig *eks.VpcConfigResponse) (*eks.VpcConfigRequest, error) { + endpointAccess := s.scope.ControlPlane.Spec.EndpointAccess + updatedVpcConfig, err := makeVpcConfig(s.scope.Subnets(), endpointAccess) + if err != nil { + return nil, err + } + needsUpdate := !tristate.EqualWithDefault(false, vpcConfig.EndpointPrivateAccess, updatedVpcConfig.EndpointPrivateAccess) || + !tristate.EqualWithDefault(true, vpcConfig.EndpointPublicAccess, updatedVpcConfig.EndpointPublicAccess) || + !publicAccessCIDRsEqual(vpcConfig.PublicAccessCidrs, updatedVpcConfig.PublicAccessCidrs) + if needsUpdate { + return &eks.VpcConfigRequest{ + EndpointPublicAccess: updatedVpcConfig.EndpointPublicAccess, + EndpointPrivateAccess: updatedVpcConfig.EndpointPrivateAccess, + PublicAccessCidrs: updatedVpcConfig.PublicAccessCidrs, + }, nil + } + return nil, nil +} + +func parseEKSVersion(raw string) *version.Version { + v := version.MustParseGeneric(raw) + return version.MustParseGeneric(fmt.Sprintf("%d.%d", v.Major(), v.Minor())) +} + +func versionToEKS(v *version.Version) string { + return fmt.Sprintf("%d.%d", v.Major(), v.Minor()) +} + +func (s *Service) reconcileClusterVersion(cluster *eks.Cluster) error { + specVersion := parseEKSVersion(*s.scope.ControlPlane.Spec.Version) + clusterVersion := version.MustParseGeneric(*cluster.Version) + + if clusterVersion.LessThan(specVersion) { + // NOTE: you can only upgrade increments of minor versions. If you want to upgrade 1.14 to 1.16 we + // need to go 1.14-> 1.15 and then 1.15 -> 1.16. + nextVersionString := versionToEKS(clusterVersion.WithMinor(clusterVersion.Minor() + 1)) + + input := &eks.UpdateClusterVersionInput{ + Name: aws.String(s.scope.KubernetesClusterName()), + Version: &nextVersionString, + } + + if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) { + if _, err := s.EKSClient.UpdateClusterVersion(input); err != nil { + if aerr, ok := err.(awserr.Error); ok { + return false, aerr + } + return false, err + } + record.Eventf(s.scope.ControlPlane, "SuccessfulUpdateEKSControlPlane", "Updated EKS control plane %s to version %s", s.scope.KubernetesClusterName(), nextVersionString) + return true, nil + }); err != nil { + record.Warnf(s.scope.ControlPlane, "FailedUpdateEKSControlPlane", "failed to update the EKS control plane: %v", err) + return errors.Wrapf(err, "failed to update EKS cluster") + } + } + return nil +} + +func (s *Service) waitForClusterUpdate() (*eks.Cluster, error) { + cluster, err := s.waitForClusterActive() + if err != nil { + return nil, err + } + + record.Eventf(s.scope.ControlPlane, "SuccessfulUpdateEKSControlPlane", "Updated EKS control plane %s", s.scope.KubernetesClusterName()) + return cluster, nil +} + +func (s *Service) describeEKSCluster(eksClusterName string) (*eks.Cluster, error) { + input := &eks.DescribeClusterInput{ + Name: aws.String(eksClusterName), + } + + out, err := s.EKSClient.DescribeCluster(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case eks.ErrCodeResourceNotFoundException: + return nil, nil + default: + return nil, errors.Wrap(err, "failed to describe cluster") + } + } else { + return nil, errors.Wrap(err, "failed to describe cluster") + } + } + + return out.Cluster, nil +} diff --git a/pkg/cloud/services/eks/cluster_test.go b/pkg/cloud/services/eks/cluster_test.go new file mode 100644 index 0000000000..6e6067b95a --- /dev/null +++ b/pkg/cloud/services/eks/cluster_test.go @@ -0,0 +1,391 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package eks + +import ( + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/eks" + "github.com/golang/mock/gomock" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/version" + infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3" + infrav1exp "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1alpha3" + "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope" + "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/eks/mock_eksiface" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" +) + +func TestMakeEksEncryptionConfigs(t *testing.T) { + providerOne := "provider" + resourceOne := "resourceOne" + resourceTwo := "resourceTwo" + testCases := []struct { + name string + input *infrav1exp.EncryptionConfig + expect []*eks.EncryptionConfig + }{ + { + name: "nil input", + input: nil, + expect: []*eks.EncryptionConfig{}, + }, + { + name: "nil input", + input: &infrav1exp.EncryptionConfig{ + Provider: &providerOne, + Resources: []*string{&resourceOne, &resourceTwo}, + }, + expect: []*eks.EncryptionConfig{{ + Provider: &eks.Provider{KeyArn: &providerOne}, + Resources: []*string{&resourceOne, &resourceTwo}, + }}, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + g.Expect(makeEksEncryptionConfigs(tc.input)).To(Equal(tc.expect)) + }) + } +} + +func TestParseEKSVersion(t *testing.T) { + testCases := []struct { + name string + input string + expect version.Version + }{ + { + name: "with patch", + input: "1.17.8", + expect: *version.MustParseGeneric("1.17"), + }, + { + name: "with v", + input: "v1.17.8", + expect: *version.MustParseGeneric("1.17"), + }, + { + name: "no patch", + input: "1.17", + expect: *version.MustParseGeneric("1.17"), + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + g.Expect(*parseEKSVersion(tc.input)).To(Equal(tc.expect)) + }) + } +} +func TestVersionToEKS(t *testing.T) { + testCases := []struct { + name string + input *version.Version + expect string + }{ + { + name: "with patch", + input: version.MustParseGeneric("1.17.8"), + expect: "1.17", + }, + { + name: "no patch", + input: version.MustParseGeneric("1.17"), + expect: "1.17", + }, + { + name: "with extra data", + input: version.MustParseGeneric("1.17-alpha"), + expect: "1.17", + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + g.Expect(versionToEKS(tc.input)).To(Equal(tc.expect)) + }) + } +} + +func TestMakeVPCConfig(t *testing.T) { + type input struct { + subnets infrav1.Subnets + endpointAccess infrav1exp.EndpointAccess + } + + subnetIDOne := "one" + subnetIDTwo := "two" + testCases := []struct { + name string + input input + err bool + expect *eks.VpcConfigRequest + }{ + { + name: "no subnets", + input: input{ + subnets: nil, + endpointAccess: infrav1exp.EndpointAccess{}, + }, + err: true, + expect: nil, + }, + { + name: "enough subnets", + input: input{ + subnets: []*infrav1.SubnetSpec{ + { + ID: subnetIDOne, + CidrBlock: "10.0.10.0/24", + AvailabilityZone: "us-west-2a", + IsPublic: true, + }, + { + ID: subnetIDTwo, + CidrBlock: "10.0.10.0/24", + AvailabilityZone: "us-west-2b", + IsPublic: false, + }, + }, + endpointAccess: infrav1exp.EndpointAccess{}, + }, + expect: &eks.VpcConfigRequest{ + SubnetIds: []*string{&subnetIDOne, &subnetIDTwo}, + }, + }, + { + name: "non canonical public access CIDR", + input: input{ + subnets: []*infrav1.SubnetSpec{ + { + ID: subnetIDOne, + CidrBlock: "10.0.10.0/24", + AvailabilityZone: "us-west-2a", + IsPublic: true, + }, + { + ID: subnetIDTwo, + CidrBlock: "10.0.10.1/24", + AvailabilityZone: "us-west-2b", + IsPublic: false, + }, + }, + endpointAccess: infrav1exp.EndpointAccess{ + PublicCIDRs: []*string{aws.String("10.0.0.1/24")}, + }, + }, + expect: &eks.VpcConfigRequest{ + SubnetIds: []*string{&subnetIDOne, &subnetIDTwo}, + PublicAccessCidrs: []*string{aws.String("10.0.0.0/24")}, + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + config, err := makeVpcConfig(tc.input.subnets, tc.input.endpointAccess) + if tc.err { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(config).To(Equal(tc.expect)) + } + }) + } + +} + +func TestPublicAccessCIDRsEqual(t *testing.T) { + testCases := []struct { + name string + a []*string + b []*string + expect bool + }{ + { + name: "no CIDRs", + a: nil, + b: nil, + expect: true, + }, + { + name: "every address", + a: []*string{aws.String("0.0.0.0/0")}, + b: nil, + expect: true, + }, + { + name: "every address", + a: []*string{aws.String("1.1.1.0/24")}, + b: []*string{aws.String("1.1.1.0/24")}, + expect: true, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + g.Expect(publicAccessCIDRsEqual(tc.a, tc.b)).To(Equal(tc.expect)) + }) + } +} + +func TestMakeEKSLogging(t *testing.T) { + testCases := []struct { + name string + input *infrav1exp.ControlPlaneLoggingSpec + expect *eks.Logging + }{ + { + name: "no subnets", + input: nil, + expect: nil, + }, + { + name: "some enabled, some disabled", + input: &infrav1exp.ControlPlaneLoggingSpec{ + APIServer: true, + Audit: false, + }, + expect: &eks.Logging{ + ClusterLogging: []*eks.LogSetup{ + { + Enabled: aws.Bool(true), + Types: []*string{aws.String(eks.LogTypeApi)}, + }, + { + Enabled: aws.Bool(false), + Types: []*string{ + aws.String(eks.LogTypeAudit), + aws.String(eks.LogTypeAuthenticator), + aws.String(eks.LogTypeControllerManager), + aws.String(eks.LogTypeScheduler), + }, + }, + }, + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + logging := makeEksLogging(tc.input) + g.Expect(logging).To(Equal(tc.expect)) + }) + } +} + +func TestReconcileClusterVersion(t *testing.T) { + clusterName := "default.cluster" + tests := []struct { + name string + expect func(m *mock_eksiface.MockEKSAPIMockRecorder) + expectError bool + }{ + { + name: "no upgrade necessary", + expect: func(m *mock_eksiface.MockEKSAPIMockRecorder) { + m. + DescribeCluster(gomock.AssignableToTypeOf(&eks.DescribeClusterInput{})). + Return(&eks.DescribeClusterOutput{ + Cluster: &eks.Cluster{ + Name: aws.String("default.cluster"), + Version: aws.String("1.16"), + }, + }, nil) + }, + expectError: false, + }, + { + name: "needs upgrade", + expect: func(m *mock_eksiface.MockEKSAPIMockRecorder) { + m. + DescribeCluster(gomock.AssignableToTypeOf(&eks.DescribeClusterInput{})). + Return(&eks.DescribeClusterOutput{ + Cluster: &eks.Cluster{ + Name: aws.String("default.cluster"), + Version: aws.String("1.14"), + }, + }, nil) + m. + UpdateClusterVersion(gomock.AssignableToTypeOf(&eks.UpdateClusterVersionInput{})). + Return(&eks.UpdateClusterVersionOutput{}, nil) + }, + expectError: false, + }, + { + name: "api error", + expect: func(m *mock_eksiface.MockEKSAPIMockRecorder) { + m. + DescribeCluster(gomock.AssignableToTypeOf(&eks.DescribeClusterInput{})). + Return(&eks.DescribeClusterOutput{ + Cluster: &eks.Cluster{ + Name: aws.String("default.cluster"), + Version: aws.String("1.14"), + }, + }, nil) + m. + UpdateClusterVersion(gomock.AssignableToTypeOf(&eks.UpdateClusterVersionInput{})). + Return(&eks.UpdateClusterVersionOutput{}, errors.New("")) + }, + expectError: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + mockControl := gomock.NewController(t) + defer mockControl.Finish() + + eksMock := mock_eksiface.NewMockEKSAPI(mockControl) + + scope, err := scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{ + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: clusterName, + }, + }, + ControlPlane: &infrav1exp.AWSManagedControlPlane{ + Spec: infrav1exp.AWSManagedControlPlaneSpec{ + Version: aws.String("1.16"), + }, + }, + }) + g.Expect(err).To(BeNil()) + + tc.expect(eksMock.EXPECT()) + s := NewService(scope) + s.EKSClient = eksMock + + cluster, err := s.describeEKSCluster(clusterName) + g.Expect(err).To(BeNil()) + + err = s.reconcileClusterVersion(cluster) + if tc.expectError { + g.Expect(err).To(HaveOccurred()) + return + } + g.Expect(err).To(BeNil()) + }) + } +} diff --git a/pkg/cloud/services/eks/config.go b/pkg/cloud/services/eks/config.go new file mode 100644 index 0000000000..9967ccd63e --- /dev/null +++ b/pkg/cloud/services/eks/config.go @@ -0,0 +1,282 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package eks + +import ( + "context" + "encoding/base64" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/service/eks" + "github.com/aws/aws-sdk-go/service/sts" + "github.com/pkg/errors" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/clientcmd/api" + + "sigs.k8s.io/cluster-api/util/kubeconfig" + "sigs.k8s.io/cluster-api/util/secret" + + infrav1exp "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1alpha3" + "sigs.k8s.io/cluster-api-provider-aws/pkg/record" +) + +const ( + tokenPrefix = "k8s-aws-v1." //nolint:gosec + clusterNameHeader = "x-k8s-aws-id" + tokenAgeMins = 15 +) + +func (s *Service) reconcileKubeconfig(ctx context.Context, cluster *eks.Cluster) error { + s.scope.V(2).Info("Reconciling EKS kubeconfigs for cluster", "cluster-name", s.scope.KubernetesClusterName()) + + clusterRef := types.NamespacedName{ + Name: s.scope.Cluster.Name, + Namespace: s.scope.Cluster.Namespace, + } + + // Create the kubeconfig used by CAPI + configSecret, err := secret.GetFromNamespacedName(ctx, s.scope.Client, clusterRef, secret.Kubeconfig) + if err != nil { + if !apierrors.IsNotFound(err) { + return errors.Wrap(err, "failed to get kubeconfig secret") + } + + if createErr := s.createCAPIKubeconfigSecret( + ctx, + cluster, + &clusterRef, + ); createErr != nil { + return fmt.Errorf("creating kubeconfig secret: %w", err) + } + } else if updateErr := s.updateCAPIKubeconfigSecret(ctx, configSecret, cluster); updateErr != nil { + return fmt.Errorf("updating kubeconfig secret: %w", err) + } + + // Set initialized to true to indicate the kubconfig has been created + s.scope.ControlPlane.Status.Initialized = true + + return nil +} + +func (s *Service) reconcileAdditionalKubeconfigs(ctx context.Context, cluster *eks.Cluster) error { + s.scope.V(2).Info("Reconciling additional EKS kubeconfigs for cluster", "cluster-name", s.scope.KubernetesClusterName()) + + clusterRef := types.NamespacedName{ + Name: s.scope.Cluster.Name + "-user", + Namespace: s.scope.Cluster.Namespace, + } + + // Create the additional kubeconfig for users. This doesn't need updating on every sync + _, err := secret.GetFromNamespacedName(ctx, s.scope.Client, clusterRef, secret.Kubeconfig) + if err != nil { + if !apierrors.IsNotFound(err) { + return errors.Wrap(err, "failed to get kubeconfig (user) secret") + } + + createErr := s.createUserKubeconfigSecret( + ctx, + cluster, + &clusterRef, + ) + if createErr != nil { + return err + } + } + + return nil +} + +func (s *Service) createCAPIKubeconfigSecret(ctx context.Context, cluster *eks.Cluster, clusterRef *types.NamespacedName) error { + controllerOwnerRef := *metav1.NewControllerRef(s.scope.ControlPlane, infrav1exp.GroupVersion.WithKind("AWSManagedControlPlane")) + + clusterName := s.scope.KubernetesClusterName() + userName := s.getKubeConfigUserName(clusterName, false) + + cfg, err := s.createBaseKubeConfig(cluster, userName) + if err != nil { + return fmt.Errorf("creating base kubeconfig: %w", err) + } + + token, err := s.generateToken() + if err != nil { + return fmt.Errorf("generating presigned token: %w", err) + } + + cfg.AuthInfos = map[string]*api.AuthInfo{ + userName: { + Token: token, + }, + } + + out, err := clientcmd.Write(*cfg) + if err != nil { + return errors.Wrap(err, "failed to serialize config to yaml") + } + + kubeconfigSecret := kubeconfig.GenerateSecretWithOwner(*clusterRef, out, controllerOwnerRef) + if err := s.scope.Client.Create(ctx, kubeconfigSecret); err != nil { + return errors.Wrap(err, "failed to create kubeconfig secret") + } + + record.Eventf(s.scope.ControlPlane, "SucessfulCreateKubeconfig", "Created kubeconfig for cluster %q", s.scope.Name()) + return nil +} + +func (s *Service) updateCAPIKubeconfigSecret(ctx context.Context, configSecret *corev1.Secret, cluster *eks.Cluster) error { + s.scope.V(2).Info("Updating EKS kubeconfigs for cluster", "cluster-name", s.scope.KubernetesClusterName()) + + data, ok := configSecret.Data[secret.KubeconfigDataName] + if !ok { + return errors.Errorf("missing key %q in secret data", secret.KubeconfigDataName) + } + + config, err := clientcmd.Load(data) + if err != nil { + return errors.Wrap(err, "failed to convert kubeconfig Secret into a clientcmdapi.Config") + } + + token, err := s.generateToken() + if err != nil { + return fmt.Errorf("generating presigned token: %w", err) + } + + userName := s.getKubeConfigUserName(*cluster.Name, false) + config.AuthInfos[userName].Token = token + + out, err := clientcmd.Write(*config) + if err != nil { + return errors.Wrap(err, "failed to serialize config to yaml") + } + + configSecret.Data[secret.KubeconfigDataName] = out + + err = s.scope.Client.Update(ctx, configSecret) + if err != nil { + return fmt.Errorf("updating kubeconfig secret: %w", err) + } + + return nil +} + +func (s *Service) createUserKubeconfigSecret(ctx context.Context, cluster *eks.Cluster, clusterRef *types.NamespacedName) error { + controllerOwnerRef := *metav1.NewControllerRef(s.scope.ControlPlane, infrav1exp.GroupVersion.WithKind("AWSManagedControlPlane")) + + clusterName := s.scope.KubernetesClusterName() + userName := s.getKubeConfigUserName(clusterName, true) + + cfg, err := s.createBaseKubeConfig(cluster, userName) + if err != nil { + return fmt.Errorf("creating base kubeconfig: %w", err) + } + + execConfig := &api.ExecConfig{APIVersion: "client.authentication.k8s.io/v1alpha1"} + switch s.scope.TokenMethod() { + case infrav1exp.EKSTokenMethodIAMAuthenticator: + execConfig.Command = "aws-iam-authenticator" + execConfig.Args = []string{ + "token", + "-i", + clusterName, + } + case infrav1exp.EKSTokenMethodAWSCli: + execConfig.Command = "aws" + execConfig.Args = []string{ + "eks", + "get-token", + "--cluster-name", + clusterName, + } + default: + return fmt.Errorf("using token method %s: %w", s.scope.TokenMethod(), ErrUnknownTokenMethod) + } + cfg.AuthInfos = map[string]*api.AuthInfo{ + userName: { + Exec: execConfig, + }, + } + + out, err := clientcmd.Write(*cfg) + if err != nil { + return errors.Wrap(err, "failed to serialize config to yaml") + } + + kubeconfigSecret := kubeconfig.GenerateSecretWithOwner(*clusterRef, out, controllerOwnerRef) + if err := s.scope.Client.Create(ctx, kubeconfigSecret); err != nil { + return errors.Wrap(err, "failed to create kubeconfig secret") + } + + record.Eventf(s.scope.ControlPlane, "SucessfulCreateUserKubeconfig", "Created user kubeconfig for cluster %q", s.scope.Name()) + return nil +} + +func (s *Service) createBaseKubeConfig(cluster *eks.Cluster, userName string) (*api.Config, error) { + clusterName := s.scope.KubernetesClusterName() + contextName := fmt.Sprintf("%s@%s", userName, clusterName) + + certData, err := base64.StdEncoding.DecodeString(*cluster.CertificateAuthority.Data) + if err != nil { + return nil, fmt.Errorf("decoding cluster CA cert: %w", err) + } + + cfg := &api.Config{ + APIVersion: api.SchemeGroupVersion.Version, + Clusters: map[string]*api.Cluster{ + clusterName: { + Server: *cluster.Endpoint, + CertificateAuthorityData: certData, + }, + }, + Contexts: map[string]*api.Context{ + contextName: { + Cluster: clusterName, + AuthInfo: userName, + }, + }, + CurrentContext: contextName, + } + + return cfg, nil +} + +func (s *Service) generateToken() (string, error) { + eksClusterName := s.scope.KubernetesClusterName() + + req, _ := s.STSClient.GetCallerIdentityRequest(&sts.GetCallerIdentityInput{}) + req.HTTPRequest.Header.Add(clusterNameHeader, eksClusterName) + + presignedURL, err := req.Presign(tokenAgeMins * time.Minute) + if err != nil { + return "", fmt.Errorf("presigning AWS get caller identity: %w", err) + } + + encodedURL := base64.RawURLEncoding.EncodeToString([]byte(presignedURL)) + return fmt.Sprintf("%s%s", tokenPrefix, encodedURL), nil +} + +func (s *Service) getKubeConfigUserName(clusterName string, isUser bool) string { + if isUser { + return fmt.Sprintf("%s-user", clusterName) + } + + return fmt.Sprintf("%s-capi-admin", clusterName) +} diff --git a/pkg/cloud/services/eks/eks.go b/pkg/cloud/services/eks/eks.go new file mode 100644 index 0000000000..a0d8c71c6d --- /dev/null +++ b/pkg/cloud/services/eks/eks.go @@ -0,0 +1,66 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package eks + +import ( + "context" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + "sigs.k8s.io/cluster-api/util/conditions" + + infrav1exp "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1alpha3" +) + +// ReconcileControlPlane reconciles a EKS control plane +func (s *Service) ReconcileControlPlane(ctx context.Context) error { + s.scope.V(2).Info("Reconciling EKS control plane", "cluster-name", s.scope.Cluster.Name, "cluster-namespace", s.scope.Cluster.Namespace) + + // Control Plane IAM Role + if err := s.reconcileControlPlaneIAMRole(); err != nil { + conditions.MarkFalse(s.scope.ControlPlane, infrav1exp.IAMControlPlaneRolesReadyCondition, infrav1exp.IAMControlPlaneRolesReconciliationFailedReason, clusterv1.ConditionSeverityError, err.Error()) + return err + } + conditions.MarkTrue(s.scope.ControlPlane, infrav1exp.IAMControlPlaneRolesReadyCondition) + + // EKS Cluster + if err := s.reconcileCluster(ctx); err != nil { + conditions.MarkFalse(s.scope.ControlPlane, infrav1exp.EKSControlPlaneReadyCondition, infrav1exp.EKSControlPlaneReconciliationFailedReason, clusterv1.ConditionSeverityError, err.Error()) + return err + } + conditions.MarkTrue(s.scope.ControlPlane, infrav1exp.EKSControlPlaneReadyCondition) + + s.scope.V(2).Info("Reconcile EKS control plane completed successfully") + return nil +} + +// DeleteControlPlane deletes the EKS control plane. +func (s *Service) DeleteControlPlane() (err error) { + s.scope.V(2).Info("Deleting EKS control plane") + + // EKS Cluster + if err := s.deleteCluster(); err != nil { + return err + } + + // Control Plane IAM role + if err := s.deleteControlPlaneIAMRole(); err != nil { + return err + } + + s.scope.V(2).Info("Delete EKS control plane completed successfully") + return nil +} diff --git a/pkg/cloud/services/eks/errors.go b/pkg/cloud/services/eks/errors.go new file mode 100644 index 0000000000..2477ba7e36 --- /dev/null +++ b/pkg/cloud/services/eks/errors.go @@ -0,0 +1,36 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package eks + +import "github.com/pkg/errors" + +var ( + // ErrClusterExists is an error if a EKS cluster already exists with + // the same name in the spec but that isn't owned by the CAPI cluster + ErrClusterExists = errors.New("an EKS cluster already exists with same name but isn't owned by cluster") + // ErrUnknownTokenMethod defines an error if a unsupported token generation method is supplied + ErrUnknownTokenMethod = errors.New("unknown token method") + // ErrClusterRoleNameMissing if no role name is specified + ErrClusterRoleNameMissing = errors.New("a cluster role name must be specified") + // ErrClusterRoleNotFound is an error if the specified role couldn't be founbd in AWS + ErrClusterRoleNotFound = errors.New("the specified cluster role couldn't be found") + // ErrCannotUseAdditionalRoles is an error if the spec contains additional role and the + // EKSAllowAddRoles feature flag isn't enabled + ErrCannotUseAdditionalRoles = errors.New("additional rules cannot be added as this has been disabled") + // ErrNoSecurityGroup is an error when no security group is found for an EKS cluster + ErrNoSecurityGroup = errors.New("no security group for EKS cluster") +) diff --git a/pkg/cloud/services/eks/mock_eksiface/doc.go b/pkg/cloud/services/eks/mock_eksiface/doc.go new file mode 100644 index 0000000000..f91ff1c92b --- /dev/null +++ b/pkg/cloud/services/eks/mock_eksiface/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Run go generate to regenerate this mock. +//go:generate ../../../../../hack/tools/bin/mockgen -destination eksapi_mock.go -package mock_eksiface github.com/aws/aws-sdk-go/service/eks/eksiface EKSAPI +//go:generate /usr/bin/env bash -c "cat ../../../../../hack/boilerplate/boilerplate.generatego.txt eksapi_mock.go > _eksapi_mock.go && mv _eksapi_mock.go eksapi_mock.go" +package mock_eksiface //nolint diff --git a/pkg/cloud/services/eks/mock_eksiface/eksapi_mock.go b/pkg/cloud/services/eks/mock_eksiface/eksapi_mock.go new file mode 100644 index 0000000000..19ace40f6a --- /dev/null +++ b/pkg/cloud/services/eks/mock_eksiface/eksapi_mock.go @@ -0,0 +1,1366 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/aws-sdk-go/service/eks/eksiface (interfaces: EKSAPI) + +// Package mock_eksiface is a generated GoMock package. +package mock_eksiface + +import ( + context "context" + request "github.com/aws/aws-sdk-go/aws/request" + eks "github.com/aws/aws-sdk-go/service/eks" + gomock "github.com/golang/mock/gomock" + reflect "reflect" +) + +// MockEKSAPI is a mock of EKSAPI interface +type MockEKSAPI struct { + ctrl *gomock.Controller + recorder *MockEKSAPIMockRecorder +} + +// MockEKSAPIMockRecorder is the mock recorder for MockEKSAPI +type MockEKSAPIMockRecorder struct { + mock *MockEKSAPI +} + +// NewMockEKSAPI creates a new mock instance +func NewMockEKSAPI(ctrl *gomock.Controller) *MockEKSAPI { + mock := &MockEKSAPI{ctrl: ctrl} + mock.recorder = &MockEKSAPIMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockEKSAPI) EXPECT() *MockEKSAPIMockRecorder { + return m.recorder +} + +// CreateCluster mocks base method +func (m *MockEKSAPI) CreateCluster(arg0 *eks.CreateClusterInput) (*eks.CreateClusterOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateCluster", arg0) + ret0, _ := ret[0].(*eks.CreateClusterOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateCluster indicates an expected call of CreateCluster +func (mr *MockEKSAPIMockRecorder) CreateCluster(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateCluster", reflect.TypeOf((*MockEKSAPI)(nil).CreateCluster), arg0) +} + +// CreateClusterRequest mocks base method +func (m *MockEKSAPI) CreateClusterRequest(arg0 *eks.CreateClusterInput) (*request.Request, *eks.CreateClusterOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateClusterRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.CreateClusterOutput) + return ret0, ret1 +} + +// CreateClusterRequest indicates an expected call of CreateClusterRequest +func (mr *MockEKSAPIMockRecorder) CreateClusterRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateClusterRequest", reflect.TypeOf((*MockEKSAPI)(nil).CreateClusterRequest), arg0) +} + +// CreateClusterWithContext mocks base method +func (m *MockEKSAPI) CreateClusterWithContext(arg0 context.Context, arg1 *eks.CreateClusterInput, arg2 ...request.Option) (*eks.CreateClusterOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateClusterWithContext", varargs...) + ret0, _ := ret[0].(*eks.CreateClusterOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateClusterWithContext indicates an expected call of CreateClusterWithContext +func (mr *MockEKSAPIMockRecorder) CreateClusterWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateClusterWithContext", reflect.TypeOf((*MockEKSAPI)(nil).CreateClusterWithContext), varargs...) +} + +// CreateFargateProfile mocks base method +func (m *MockEKSAPI) CreateFargateProfile(arg0 *eks.CreateFargateProfileInput) (*eks.CreateFargateProfileOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateFargateProfile", arg0) + ret0, _ := ret[0].(*eks.CreateFargateProfileOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateFargateProfile indicates an expected call of CreateFargateProfile +func (mr *MockEKSAPIMockRecorder) CreateFargateProfile(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateFargateProfile", reflect.TypeOf((*MockEKSAPI)(nil).CreateFargateProfile), arg0) +} + +// CreateFargateProfileRequest mocks base method +func (m *MockEKSAPI) CreateFargateProfileRequest(arg0 *eks.CreateFargateProfileInput) (*request.Request, *eks.CreateFargateProfileOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateFargateProfileRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.CreateFargateProfileOutput) + return ret0, ret1 +} + +// CreateFargateProfileRequest indicates an expected call of CreateFargateProfileRequest +func (mr *MockEKSAPIMockRecorder) CreateFargateProfileRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateFargateProfileRequest", reflect.TypeOf((*MockEKSAPI)(nil).CreateFargateProfileRequest), arg0) +} + +// CreateFargateProfileWithContext mocks base method +func (m *MockEKSAPI) CreateFargateProfileWithContext(arg0 context.Context, arg1 *eks.CreateFargateProfileInput, arg2 ...request.Option) (*eks.CreateFargateProfileOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateFargateProfileWithContext", varargs...) + ret0, _ := ret[0].(*eks.CreateFargateProfileOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateFargateProfileWithContext indicates an expected call of CreateFargateProfileWithContext +func (mr *MockEKSAPIMockRecorder) CreateFargateProfileWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateFargateProfileWithContext", reflect.TypeOf((*MockEKSAPI)(nil).CreateFargateProfileWithContext), varargs...) +} + +// CreateNodegroup mocks base method +func (m *MockEKSAPI) CreateNodegroup(arg0 *eks.CreateNodegroupInput) (*eks.CreateNodegroupOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateNodegroup", arg0) + ret0, _ := ret[0].(*eks.CreateNodegroupOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateNodegroup indicates an expected call of CreateNodegroup +func (mr *MockEKSAPIMockRecorder) CreateNodegroup(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateNodegroup", reflect.TypeOf((*MockEKSAPI)(nil).CreateNodegroup), arg0) +} + +// CreateNodegroupRequest mocks base method +func (m *MockEKSAPI) CreateNodegroupRequest(arg0 *eks.CreateNodegroupInput) (*request.Request, *eks.CreateNodegroupOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateNodegroupRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.CreateNodegroupOutput) + return ret0, ret1 +} + +// CreateNodegroupRequest indicates an expected call of CreateNodegroupRequest +func (mr *MockEKSAPIMockRecorder) CreateNodegroupRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateNodegroupRequest", reflect.TypeOf((*MockEKSAPI)(nil).CreateNodegroupRequest), arg0) +} + +// CreateNodegroupWithContext mocks base method +func (m *MockEKSAPI) CreateNodegroupWithContext(arg0 context.Context, arg1 *eks.CreateNodegroupInput, arg2 ...request.Option) (*eks.CreateNodegroupOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateNodegroupWithContext", varargs...) + ret0, _ := ret[0].(*eks.CreateNodegroupOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateNodegroupWithContext indicates an expected call of CreateNodegroupWithContext +func (mr *MockEKSAPIMockRecorder) CreateNodegroupWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateNodegroupWithContext", reflect.TypeOf((*MockEKSAPI)(nil).CreateNodegroupWithContext), varargs...) +} + +// DeleteCluster mocks base method +func (m *MockEKSAPI) DeleteCluster(arg0 *eks.DeleteClusterInput) (*eks.DeleteClusterOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteCluster", arg0) + ret0, _ := ret[0].(*eks.DeleteClusterOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteCluster indicates an expected call of DeleteCluster +func (mr *MockEKSAPIMockRecorder) DeleteCluster(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCluster", reflect.TypeOf((*MockEKSAPI)(nil).DeleteCluster), arg0) +} + +// DeleteClusterRequest mocks base method +func (m *MockEKSAPI) DeleteClusterRequest(arg0 *eks.DeleteClusterInput) (*request.Request, *eks.DeleteClusterOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteClusterRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.DeleteClusterOutput) + return ret0, ret1 +} + +// DeleteClusterRequest indicates an expected call of DeleteClusterRequest +func (mr *MockEKSAPIMockRecorder) DeleteClusterRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteClusterRequest", reflect.TypeOf((*MockEKSAPI)(nil).DeleteClusterRequest), arg0) +} + +// DeleteClusterWithContext mocks base method +func (m *MockEKSAPI) DeleteClusterWithContext(arg0 context.Context, arg1 *eks.DeleteClusterInput, arg2 ...request.Option) (*eks.DeleteClusterOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteClusterWithContext", varargs...) + ret0, _ := ret[0].(*eks.DeleteClusterOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteClusterWithContext indicates an expected call of DeleteClusterWithContext +func (mr *MockEKSAPIMockRecorder) DeleteClusterWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteClusterWithContext", reflect.TypeOf((*MockEKSAPI)(nil).DeleteClusterWithContext), varargs...) +} + +// DeleteFargateProfile mocks base method +func (m *MockEKSAPI) DeleteFargateProfile(arg0 *eks.DeleteFargateProfileInput) (*eks.DeleteFargateProfileOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteFargateProfile", arg0) + ret0, _ := ret[0].(*eks.DeleteFargateProfileOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteFargateProfile indicates an expected call of DeleteFargateProfile +func (mr *MockEKSAPIMockRecorder) DeleteFargateProfile(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteFargateProfile", reflect.TypeOf((*MockEKSAPI)(nil).DeleteFargateProfile), arg0) +} + +// DeleteFargateProfileRequest mocks base method +func (m *MockEKSAPI) DeleteFargateProfileRequest(arg0 *eks.DeleteFargateProfileInput) (*request.Request, *eks.DeleteFargateProfileOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteFargateProfileRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.DeleteFargateProfileOutput) + return ret0, ret1 +} + +// DeleteFargateProfileRequest indicates an expected call of DeleteFargateProfileRequest +func (mr *MockEKSAPIMockRecorder) DeleteFargateProfileRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteFargateProfileRequest", reflect.TypeOf((*MockEKSAPI)(nil).DeleteFargateProfileRequest), arg0) +} + +// DeleteFargateProfileWithContext mocks base method +func (m *MockEKSAPI) DeleteFargateProfileWithContext(arg0 context.Context, arg1 *eks.DeleteFargateProfileInput, arg2 ...request.Option) (*eks.DeleteFargateProfileOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteFargateProfileWithContext", varargs...) + ret0, _ := ret[0].(*eks.DeleteFargateProfileOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteFargateProfileWithContext indicates an expected call of DeleteFargateProfileWithContext +func (mr *MockEKSAPIMockRecorder) DeleteFargateProfileWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteFargateProfileWithContext", reflect.TypeOf((*MockEKSAPI)(nil).DeleteFargateProfileWithContext), varargs...) +} + +// DeleteNodegroup mocks base method +func (m *MockEKSAPI) DeleteNodegroup(arg0 *eks.DeleteNodegroupInput) (*eks.DeleteNodegroupOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteNodegroup", arg0) + ret0, _ := ret[0].(*eks.DeleteNodegroupOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteNodegroup indicates an expected call of DeleteNodegroup +func (mr *MockEKSAPIMockRecorder) DeleteNodegroup(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteNodegroup", reflect.TypeOf((*MockEKSAPI)(nil).DeleteNodegroup), arg0) +} + +// DeleteNodegroupRequest mocks base method +func (m *MockEKSAPI) DeleteNodegroupRequest(arg0 *eks.DeleteNodegroupInput) (*request.Request, *eks.DeleteNodegroupOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteNodegroupRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.DeleteNodegroupOutput) + return ret0, ret1 +} + +// DeleteNodegroupRequest indicates an expected call of DeleteNodegroupRequest +func (mr *MockEKSAPIMockRecorder) DeleteNodegroupRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteNodegroupRequest", reflect.TypeOf((*MockEKSAPI)(nil).DeleteNodegroupRequest), arg0) +} + +// DeleteNodegroupWithContext mocks base method +func (m *MockEKSAPI) DeleteNodegroupWithContext(arg0 context.Context, arg1 *eks.DeleteNodegroupInput, arg2 ...request.Option) (*eks.DeleteNodegroupOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteNodegroupWithContext", varargs...) + ret0, _ := ret[0].(*eks.DeleteNodegroupOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteNodegroupWithContext indicates an expected call of DeleteNodegroupWithContext +func (mr *MockEKSAPIMockRecorder) DeleteNodegroupWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteNodegroupWithContext", reflect.TypeOf((*MockEKSAPI)(nil).DeleteNodegroupWithContext), varargs...) +} + +// DescribeCluster mocks base method +func (m *MockEKSAPI) DescribeCluster(arg0 *eks.DescribeClusterInput) (*eks.DescribeClusterOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeCluster", arg0) + ret0, _ := ret[0].(*eks.DescribeClusterOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeCluster indicates an expected call of DescribeCluster +func (mr *MockEKSAPIMockRecorder) DescribeCluster(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeCluster", reflect.TypeOf((*MockEKSAPI)(nil).DescribeCluster), arg0) +} + +// DescribeClusterRequest mocks base method +func (m *MockEKSAPI) DescribeClusterRequest(arg0 *eks.DescribeClusterInput) (*request.Request, *eks.DescribeClusterOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeClusterRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.DescribeClusterOutput) + return ret0, ret1 +} + +// DescribeClusterRequest indicates an expected call of DescribeClusterRequest +func (mr *MockEKSAPIMockRecorder) DescribeClusterRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeClusterRequest", reflect.TypeOf((*MockEKSAPI)(nil).DescribeClusterRequest), arg0) +} + +// DescribeClusterWithContext mocks base method +func (m *MockEKSAPI) DescribeClusterWithContext(arg0 context.Context, arg1 *eks.DescribeClusterInput, arg2 ...request.Option) (*eks.DescribeClusterOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DescribeClusterWithContext", varargs...) + ret0, _ := ret[0].(*eks.DescribeClusterOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeClusterWithContext indicates an expected call of DescribeClusterWithContext +func (mr *MockEKSAPIMockRecorder) DescribeClusterWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeClusterWithContext", reflect.TypeOf((*MockEKSAPI)(nil).DescribeClusterWithContext), varargs...) +} + +// DescribeFargateProfile mocks base method +func (m *MockEKSAPI) DescribeFargateProfile(arg0 *eks.DescribeFargateProfileInput) (*eks.DescribeFargateProfileOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeFargateProfile", arg0) + ret0, _ := ret[0].(*eks.DescribeFargateProfileOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeFargateProfile indicates an expected call of DescribeFargateProfile +func (mr *MockEKSAPIMockRecorder) DescribeFargateProfile(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeFargateProfile", reflect.TypeOf((*MockEKSAPI)(nil).DescribeFargateProfile), arg0) +} + +// DescribeFargateProfileRequest mocks base method +func (m *MockEKSAPI) DescribeFargateProfileRequest(arg0 *eks.DescribeFargateProfileInput) (*request.Request, *eks.DescribeFargateProfileOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeFargateProfileRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.DescribeFargateProfileOutput) + return ret0, ret1 +} + +// DescribeFargateProfileRequest indicates an expected call of DescribeFargateProfileRequest +func (mr *MockEKSAPIMockRecorder) DescribeFargateProfileRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeFargateProfileRequest", reflect.TypeOf((*MockEKSAPI)(nil).DescribeFargateProfileRequest), arg0) +} + +// DescribeFargateProfileWithContext mocks base method +func (m *MockEKSAPI) DescribeFargateProfileWithContext(arg0 context.Context, arg1 *eks.DescribeFargateProfileInput, arg2 ...request.Option) (*eks.DescribeFargateProfileOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DescribeFargateProfileWithContext", varargs...) + ret0, _ := ret[0].(*eks.DescribeFargateProfileOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeFargateProfileWithContext indicates an expected call of DescribeFargateProfileWithContext +func (mr *MockEKSAPIMockRecorder) DescribeFargateProfileWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeFargateProfileWithContext", reflect.TypeOf((*MockEKSAPI)(nil).DescribeFargateProfileWithContext), varargs...) +} + +// DescribeNodegroup mocks base method +func (m *MockEKSAPI) DescribeNodegroup(arg0 *eks.DescribeNodegroupInput) (*eks.DescribeNodegroupOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeNodegroup", arg0) + ret0, _ := ret[0].(*eks.DescribeNodegroupOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeNodegroup indicates an expected call of DescribeNodegroup +func (mr *MockEKSAPIMockRecorder) DescribeNodegroup(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeNodegroup", reflect.TypeOf((*MockEKSAPI)(nil).DescribeNodegroup), arg0) +} + +// DescribeNodegroupRequest mocks base method +func (m *MockEKSAPI) DescribeNodegroupRequest(arg0 *eks.DescribeNodegroupInput) (*request.Request, *eks.DescribeNodegroupOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeNodegroupRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.DescribeNodegroupOutput) + return ret0, ret1 +} + +// DescribeNodegroupRequest indicates an expected call of DescribeNodegroupRequest +func (mr *MockEKSAPIMockRecorder) DescribeNodegroupRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeNodegroupRequest", reflect.TypeOf((*MockEKSAPI)(nil).DescribeNodegroupRequest), arg0) +} + +// DescribeNodegroupWithContext mocks base method +func (m *MockEKSAPI) DescribeNodegroupWithContext(arg0 context.Context, arg1 *eks.DescribeNodegroupInput, arg2 ...request.Option) (*eks.DescribeNodegroupOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DescribeNodegroupWithContext", varargs...) + ret0, _ := ret[0].(*eks.DescribeNodegroupOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeNodegroupWithContext indicates an expected call of DescribeNodegroupWithContext +func (mr *MockEKSAPIMockRecorder) DescribeNodegroupWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeNodegroupWithContext", reflect.TypeOf((*MockEKSAPI)(nil).DescribeNodegroupWithContext), varargs...) +} + +// DescribeUpdate mocks base method +func (m *MockEKSAPI) DescribeUpdate(arg0 *eks.DescribeUpdateInput) (*eks.DescribeUpdateOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeUpdate", arg0) + ret0, _ := ret[0].(*eks.DescribeUpdateOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeUpdate indicates an expected call of DescribeUpdate +func (mr *MockEKSAPIMockRecorder) DescribeUpdate(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeUpdate", reflect.TypeOf((*MockEKSAPI)(nil).DescribeUpdate), arg0) +} + +// DescribeUpdateRequest mocks base method +func (m *MockEKSAPI) DescribeUpdateRequest(arg0 *eks.DescribeUpdateInput) (*request.Request, *eks.DescribeUpdateOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeUpdateRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.DescribeUpdateOutput) + return ret0, ret1 +} + +// DescribeUpdateRequest indicates an expected call of DescribeUpdateRequest +func (mr *MockEKSAPIMockRecorder) DescribeUpdateRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeUpdateRequest", reflect.TypeOf((*MockEKSAPI)(nil).DescribeUpdateRequest), arg0) +} + +// DescribeUpdateWithContext mocks base method +func (m *MockEKSAPI) DescribeUpdateWithContext(arg0 context.Context, arg1 *eks.DescribeUpdateInput, arg2 ...request.Option) (*eks.DescribeUpdateOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DescribeUpdateWithContext", varargs...) + ret0, _ := ret[0].(*eks.DescribeUpdateOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeUpdateWithContext indicates an expected call of DescribeUpdateWithContext +func (mr *MockEKSAPIMockRecorder) DescribeUpdateWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeUpdateWithContext", reflect.TypeOf((*MockEKSAPI)(nil).DescribeUpdateWithContext), varargs...) +} + +// ListClusters mocks base method +func (m *MockEKSAPI) ListClusters(arg0 *eks.ListClustersInput) (*eks.ListClustersOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListClusters", arg0) + ret0, _ := ret[0].(*eks.ListClustersOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListClusters indicates an expected call of ListClusters +func (mr *MockEKSAPIMockRecorder) ListClusters(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListClusters", reflect.TypeOf((*MockEKSAPI)(nil).ListClusters), arg0) +} + +// ListClustersPages mocks base method +func (m *MockEKSAPI) ListClustersPages(arg0 *eks.ListClustersInput, arg1 func(*eks.ListClustersOutput, bool) bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListClustersPages", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListClustersPages indicates an expected call of ListClustersPages +func (mr *MockEKSAPIMockRecorder) ListClustersPages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListClustersPages", reflect.TypeOf((*MockEKSAPI)(nil).ListClustersPages), arg0, arg1) +} + +// ListClustersPagesWithContext mocks base method +func (m *MockEKSAPI) ListClustersPagesWithContext(arg0 context.Context, arg1 *eks.ListClustersInput, arg2 func(*eks.ListClustersOutput, bool) bool, arg3 ...request.Option) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListClustersPagesWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListClustersPagesWithContext indicates an expected call of ListClustersPagesWithContext +func (mr *MockEKSAPIMockRecorder) ListClustersPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListClustersPagesWithContext", reflect.TypeOf((*MockEKSAPI)(nil).ListClustersPagesWithContext), varargs...) +} + +// ListClustersRequest mocks base method +func (m *MockEKSAPI) ListClustersRequest(arg0 *eks.ListClustersInput) (*request.Request, *eks.ListClustersOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListClustersRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.ListClustersOutput) + return ret0, ret1 +} + +// ListClustersRequest indicates an expected call of ListClustersRequest +func (mr *MockEKSAPIMockRecorder) ListClustersRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListClustersRequest", reflect.TypeOf((*MockEKSAPI)(nil).ListClustersRequest), arg0) +} + +// ListClustersWithContext mocks base method +func (m *MockEKSAPI) ListClustersWithContext(arg0 context.Context, arg1 *eks.ListClustersInput, arg2 ...request.Option) (*eks.ListClustersOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListClustersWithContext", varargs...) + ret0, _ := ret[0].(*eks.ListClustersOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListClustersWithContext indicates an expected call of ListClustersWithContext +func (mr *MockEKSAPIMockRecorder) ListClustersWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListClustersWithContext", reflect.TypeOf((*MockEKSAPI)(nil).ListClustersWithContext), varargs...) +} + +// ListFargateProfiles mocks base method +func (m *MockEKSAPI) ListFargateProfiles(arg0 *eks.ListFargateProfilesInput) (*eks.ListFargateProfilesOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListFargateProfiles", arg0) + ret0, _ := ret[0].(*eks.ListFargateProfilesOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListFargateProfiles indicates an expected call of ListFargateProfiles +func (mr *MockEKSAPIMockRecorder) ListFargateProfiles(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListFargateProfiles", reflect.TypeOf((*MockEKSAPI)(nil).ListFargateProfiles), arg0) +} + +// ListFargateProfilesPages mocks base method +func (m *MockEKSAPI) ListFargateProfilesPages(arg0 *eks.ListFargateProfilesInput, arg1 func(*eks.ListFargateProfilesOutput, bool) bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListFargateProfilesPages", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListFargateProfilesPages indicates an expected call of ListFargateProfilesPages +func (mr *MockEKSAPIMockRecorder) ListFargateProfilesPages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListFargateProfilesPages", reflect.TypeOf((*MockEKSAPI)(nil).ListFargateProfilesPages), arg0, arg1) +} + +// ListFargateProfilesPagesWithContext mocks base method +func (m *MockEKSAPI) ListFargateProfilesPagesWithContext(arg0 context.Context, arg1 *eks.ListFargateProfilesInput, arg2 func(*eks.ListFargateProfilesOutput, bool) bool, arg3 ...request.Option) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListFargateProfilesPagesWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListFargateProfilesPagesWithContext indicates an expected call of ListFargateProfilesPagesWithContext +func (mr *MockEKSAPIMockRecorder) ListFargateProfilesPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListFargateProfilesPagesWithContext", reflect.TypeOf((*MockEKSAPI)(nil).ListFargateProfilesPagesWithContext), varargs...) +} + +// ListFargateProfilesRequest mocks base method +func (m *MockEKSAPI) ListFargateProfilesRequest(arg0 *eks.ListFargateProfilesInput) (*request.Request, *eks.ListFargateProfilesOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListFargateProfilesRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.ListFargateProfilesOutput) + return ret0, ret1 +} + +// ListFargateProfilesRequest indicates an expected call of ListFargateProfilesRequest +func (mr *MockEKSAPIMockRecorder) ListFargateProfilesRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListFargateProfilesRequest", reflect.TypeOf((*MockEKSAPI)(nil).ListFargateProfilesRequest), arg0) +} + +// ListFargateProfilesWithContext mocks base method +func (m *MockEKSAPI) ListFargateProfilesWithContext(arg0 context.Context, arg1 *eks.ListFargateProfilesInput, arg2 ...request.Option) (*eks.ListFargateProfilesOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListFargateProfilesWithContext", varargs...) + ret0, _ := ret[0].(*eks.ListFargateProfilesOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListFargateProfilesWithContext indicates an expected call of ListFargateProfilesWithContext +func (mr *MockEKSAPIMockRecorder) ListFargateProfilesWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListFargateProfilesWithContext", reflect.TypeOf((*MockEKSAPI)(nil).ListFargateProfilesWithContext), varargs...) +} + +// ListNodegroups mocks base method +func (m *MockEKSAPI) ListNodegroups(arg0 *eks.ListNodegroupsInput) (*eks.ListNodegroupsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListNodegroups", arg0) + ret0, _ := ret[0].(*eks.ListNodegroupsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListNodegroups indicates an expected call of ListNodegroups +func (mr *MockEKSAPIMockRecorder) ListNodegroups(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListNodegroups", reflect.TypeOf((*MockEKSAPI)(nil).ListNodegroups), arg0) +} + +// ListNodegroupsPages mocks base method +func (m *MockEKSAPI) ListNodegroupsPages(arg0 *eks.ListNodegroupsInput, arg1 func(*eks.ListNodegroupsOutput, bool) bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListNodegroupsPages", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListNodegroupsPages indicates an expected call of ListNodegroupsPages +func (mr *MockEKSAPIMockRecorder) ListNodegroupsPages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListNodegroupsPages", reflect.TypeOf((*MockEKSAPI)(nil).ListNodegroupsPages), arg0, arg1) +} + +// ListNodegroupsPagesWithContext mocks base method +func (m *MockEKSAPI) ListNodegroupsPagesWithContext(arg0 context.Context, arg1 *eks.ListNodegroupsInput, arg2 func(*eks.ListNodegroupsOutput, bool) bool, arg3 ...request.Option) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListNodegroupsPagesWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListNodegroupsPagesWithContext indicates an expected call of ListNodegroupsPagesWithContext +func (mr *MockEKSAPIMockRecorder) ListNodegroupsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListNodegroupsPagesWithContext", reflect.TypeOf((*MockEKSAPI)(nil).ListNodegroupsPagesWithContext), varargs...) +} + +// ListNodegroupsRequest mocks base method +func (m *MockEKSAPI) ListNodegroupsRequest(arg0 *eks.ListNodegroupsInput) (*request.Request, *eks.ListNodegroupsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListNodegroupsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.ListNodegroupsOutput) + return ret0, ret1 +} + +// ListNodegroupsRequest indicates an expected call of ListNodegroupsRequest +func (mr *MockEKSAPIMockRecorder) ListNodegroupsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListNodegroupsRequest", reflect.TypeOf((*MockEKSAPI)(nil).ListNodegroupsRequest), arg0) +} + +// ListNodegroupsWithContext mocks base method +func (m *MockEKSAPI) ListNodegroupsWithContext(arg0 context.Context, arg1 *eks.ListNodegroupsInput, arg2 ...request.Option) (*eks.ListNodegroupsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListNodegroupsWithContext", varargs...) + ret0, _ := ret[0].(*eks.ListNodegroupsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListNodegroupsWithContext indicates an expected call of ListNodegroupsWithContext +func (mr *MockEKSAPIMockRecorder) ListNodegroupsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListNodegroupsWithContext", reflect.TypeOf((*MockEKSAPI)(nil).ListNodegroupsWithContext), varargs...) +} + +// ListTagsForResource mocks base method +func (m *MockEKSAPI) ListTagsForResource(arg0 *eks.ListTagsForResourceInput) (*eks.ListTagsForResourceOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListTagsForResource", arg0) + ret0, _ := ret[0].(*eks.ListTagsForResourceOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListTagsForResource indicates an expected call of ListTagsForResource +func (mr *MockEKSAPIMockRecorder) ListTagsForResource(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListTagsForResource", reflect.TypeOf((*MockEKSAPI)(nil).ListTagsForResource), arg0) +} + +// ListTagsForResourceRequest mocks base method +func (m *MockEKSAPI) ListTagsForResourceRequest(arg0 *eks.ListTagsForResourceInput) (*request.Request, *eks.ListTagsForResourceOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListTagsForResourceRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.ListTagsForResourceOutput) + return ret0, ret1 +} + +// ListTagsForResourceRequest indicates an expected call of ListTagsForResourceRequest +func (mr *MockEKSAPIMockRecorder) ListTagsForResourceRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListTagsForResourceRequest", reflect.TypeOf((*MockEKSAPI)(nil).ListTagsForResourceRequest), arg0) +} + +// ListTagsForResourceWithContext mocks base method +func (m *MockEKSAPI) ListTagsForResourceWithContext(arg0 context.Context, arg1 *eks.ListTagsForResourceInput, arg2 ...request.Option) (*eks.ListTagsForResourceOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListTagsForResourceWithContext", varargs...) + ret0, _ := ret[0].(*eks.ListTagsForResourceOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListTagsForResourceWithContext indicates an expected call of ListTagsForResourceWithContext +func (mr *MockEKSAPIMockRecorder) ListTagsForResourceWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListTagsForResourceWithContext", reflect.TypeOf((*MockEKSAPI)(nil).ListTagsForResourceWithContext), varargs...) +} + +// ListUpdates mocks base method +func (m *MockEKSAPI) ListUpdates(arg0 *eks.ListUpdatesInput) (*eks.ListUpdatesOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListUpdates", arg0) + ret0, _ := ret[0].(*eks.ListUpdatesOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListUpdates indicates an expected call of ListUpdates +func (mr *MockEKSAPIMockRecorder) ListUpdates(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListUpdates", reflect.TypeOf((*MockEKSAPI)(nil).ListUpdates), arg0) +} + +// ListUpdatesPages mocks base method +func (m *MockEKSAPI) ListUpdatesPages(arg0 *eks.ListUpdatesInput, arg1 func(*eks.ListUpdatesOutput, bool) bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListUpdatesPages", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListUpdatesPages indicates an expected call of ListUpdatesPages +func (mr *MockEKSAPIMockRecorder) ListUpdatesPages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListUpdatesPages", reflect.TypeOf((*MockEKSAPI)(nil).ListUpdatesPages), arg0, arg1) +} + +// ListUpdatesPagesWithContext mocks base method +func (m *MockEKSAPI) ListUpdatesPagesWithContext(arg0 context.Context, arg1 *eks.ListUpdatesInput, arg2 func(*eks.ListUpdatesOutput, bool) bool, arg3 ...request.Option) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListUpdatesPagesWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListUpdatesPagesWithContext indicates an expected call of ListUpdatesPagesWithContext +func (mr *MockEKSAPIMockRecorder) ListUpdatesPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListUpdatesPagesWithContext", reflect.TypeOf((*MockEKSAPI)(nil).ListUpdatesPagesWithContext), varargs...) +} + +// ListUpdatesRequest mocks base method +func (m *MockEKSAPI) ListUpdatesRequest(arg0 *eks.ListUpdatesInput) (*request.Request, *eks.ListUpdatesOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListUpdatesRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.ListUpdatesOutput) + return ret0, ret1 +} + +// ListUpdatesRequest indicates an expected call of ListUpdatesRequest +func (mr *MockEKSAPIMockRecorder) ListUpdatesRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListUpdatesRequest", reflect.TypeOf((*MockEKSAPI)(nil).ListUpdatesRequest), arg0) +} + +// ListUpdatesWithContext mocks base method +func (m *MockEKSAPI) ListUpdatesWithContext(arg0 context.Context, arg1 *eks.ListUpdatesInput, arg2 ...request.Option) (*eks.ListUpdatesOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListUpdatesWithContext", varargs...) + ret0, _ := ret[0].(*eks.ListUpdatesOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListUpdatesWithContext indicates an expected call of ListUpdatesWithContext +func (mr *MockEKSAPIMockRecorder) ListUpdatesWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListUpdatesWithContext", reflect.TypeOf((*MockEKSAPI)(nil).ListUpdatesWithContext), varargs...) +} + +// TagResource mocks base method +func (m *MockEKSAPI) TagResource(arg0 *eks.TagResourceInput) (*eks.TagResourceOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TagResource", arg0) + ret0, _ := ret[0].(*eks.TagResourceOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// TagResource indicates an expected call of TagResource +func (mr *MockEKSAPIMockRecorder) TagResource(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagResource", reflect.TypeOf((*MockEKSAPI)(nil).TagResource), arg0) +} + +// TagResourceRequest mocks base method +func (m *MockEKSAPI) TagResourceRequest(arg0 *eks.TagResourceInput) (*request.Request, *eks.TagResourceOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TagResourceRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.TagResourceOutput) + return ret0, ret1 +} + +// TagResourceRequest indicates an expected call of TagResourceRequest +func (mr *MockEKSAPIMockRecorder) TagResourceRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagResourceRequest", reflect.TypeOf((*MockEKSAPI)(nil).TagResourceRequest), arg0) +} + +// TagResourceWithContext mocks base method +func (m *MockEKSAPI) TagResourceWithContext(arg0 context.Context, arg1 *eks.TagResourceInput, arg2 ...request.Option) (*eks.TagResourceOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "TagResourceWithContext", varargs...) + ret0, _ := ret[0].(*eks.TagResourceOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// TagResourceWithContext indicates an expected call of TagResourceWithContext +func (mr *MockEKSAPIMockRecorder) TagResourceWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagResourceWithContext", reflect.TypeOf((*MockEKSAPI)(nil).TagResourceWithContext), varargs...) +} + +// UntagResource mocks base method +func (m *MockEKSAPI) UntagResource(arg0 *eks.UntagResourceInput) (*eks.UntagResourceOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UntagResource", arg0) + ret0, _ := ret[0].(*eks.UntagResourceOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UntagResource indicates an expected call of UntagResource +func (mr *MockEKSAPIMockRecorder) UntagResource(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntagResource", reflect.TypeOf((*MockEKSAPI)(nil).UntagResource), arg0) +} + +// UntagResourceRequest mocks base method +func (m *MockEKSAPI) UntagResourceRequest(arg0 *eks.UntagResourceInput) (*request.Request, *eks.UntagResourceOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UntagResourceRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.UntagResourceOutput) + return ret0, ret1 +} + +// UntagResourceRequest indicates an expected call of UntagResourceRequest +func (mr *MockEKSAPIMockRecorder) UntagResourceRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntagResourceRequest", reflect.TypeOf((*MockEKSAPI)(nil).UntagResourceRequest), arg0) +} + +// UntagResourceWithContext mocks base method +func (m *MockEKSAPI) UntagResourceWithContext(arg0 context.Context, arg1 *eks.UntagResourceInput, arg2 ...request.Option) (*eks.UntagResourceOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UntagResourceWithContext", varargs...) + ret0, _ := ret[0].(*eks.UntagResourceOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UntagResourceWithContext indicates an expected call of UntagResourceWithContext +func (mr *MockEKSAPIMockRecorder) UntagResourceWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntagResourceWithContext", reflect.TypeOf((*MockEKSAPI)(nil).UntagResourceWithContext), varargs...) +} + +// UpdateClusterConfig mocks base method +func (m *MockEKSAPI) UpdateClusterConfig(arg0 *eks.UpdateClusterConfigInput) (*eks.UpdateClusterConfigOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateClusterConfig", arg0) + ret0, _ := ret[0].(*eks.UpdateClusterConfigOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateClusterConfig indicates an expected call of UpdateClusterConfig +func (mr *MockEKSAPIMockRecorder) UpdateClusterConfig(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateClusterConfig", reflect.TypeOf((*MockEKSAPI)(nil).UpdateClusterConfig), arg0) +} + +// UpdateClusterConfigRequest mocks base method +func (m *MockEKSAPI) UpdateClusterConfigRequest(arg0 *eks.UpdateClusterConfigInput) (*request.Request, *eks.UpdateClusterConfigOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateClusterConfigRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.UpdateClusterConfigOutput) + return ret0, ret1 +} + +// UpdateClusterConfigRequest indicates an expected call of UpdateClusterConfigRequest +func (mr *MockEKSAPIMockRecorder) UpdateClusterConfigRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateClusterConfigRequest", reflect.TypeOf((*MockEKSAPI)(nil).UpdateClusterConfigRequest), arg0) +} + +// UpdateClusterConfigWithContext mocks base method +func (m *MockEKSAPI) UpdateClusterConfigWithContext(arg0 context.Context, arg1 *eks.UpdateClusterConfigInput, arg2 ...request.Option) (*eks.UpdateClusterConfigOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateClusterConfigWithContext", varargs...) + ret0, _ := ret[0].(*eks.UpdateClusterConfigOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateClusterConfigWithContext indicates an expected call of UpdateClusterConfigWithContext +func (mr *MockEKSAPIMockRecorder) UpdateClusterConfigWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateClusterConfigWithContext", reflect.TypeOf((*MockEKSAPI)(nil).UpdateClusterConfigWithContext), varargs...) +} + +// UpdateClusterVersion mocks base method +func (m *MockEKSAPI) UpdateClusterVersion(arg0 *eks.UpdateClusterVersionInput) (*eks.UpdateClusterVersionOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateClusterVersion", arg0) + ret0, _ := ret[0].(*eks.UpdateClusterVersionOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateClusterVersion indicates an expected call of UpdateClusterVersion +func (mr *MockEKSAPIMockRecorder) UpdateClusterVersion(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateClusterVersion", reflect.TypeOf((*MockEKSAPI)(nil).UpdateClusterVersion), arg0) +} + +// UpdateClusterVersionRequest mocks base method +func (m *MockEKSAPI) UpdateClusterVersionRequest(arg0 *eks.UpdateClusterVersionInput) (*request.Request, *eks.UpdateClusterVersionOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateClusterVersionRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.UpdateClusterVersionOutput) + return ret0, ret1 +} + +// UpdateClusterVersionRequest indicates an expected call of UpdateClusterVersionRequest +func (mr *MockEKSAPIMockRecorder) UpdateClusterVersionRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateClusterVersionRequest", reflect.TypeOf((*MockEKSAPI)(nil).UpdateClusterVersionRequest), arg0) +} + +// UpdateClusterVersionWithContext mocks base method +func (m *MockEKSAPI) UpdateClusterVersionWithContext(arg0 context.Context, arg1 *eks.UpdateClusterVersionInput, arg2 ...request.Option) (*eks.UpdateClusterVersionOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateClusterVersionWithContext", varargs...) + ret0, _ := ret[0].(*eks.UpdateClusterVersionOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateClusterVersionWithContext indicates an expected call of UpdateClusterVersionWithContext +func (mr *MockEKSAPIMockRecorder) UpdateClusterVersionWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateClusterVersionWithContext", reflect.TypeOf((*MockEKSAPI)(nil).UpdateClusterVersionWithContext), varargs...) +} + +// UpdateNodegroupConfig mocks base method +func (m *MockEKSAPI) UpdateNodegroupConfig(arg0 *eks.UpdateNodegroupConfigInput) (*eks.UpdateNodegroupConfigOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateNodegroupConfig", arg0) + ret0, _ := ret[0].(*eks.UpdateNodegroupConfigOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateNodegroupConfig indicates an expected call of UpdateNodegroupConfig +func (mr *MockEKSAPIMockRecorder) UpdateNodegroupConfig(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateNodegroupConfig", reflect.TypeOf((*MockEKSAPI)(nil).UpdateNodegroupConfig), arg0) +} + +// UpdateNodegroupConfigRequest mocks base method +func (m *MockEKSAPI) UpdateNodegroupConfigRequest(arg0 *eks.UpdateNodegroupConfigInput) (*request.Request, *eks.UpdateNodegroupConfigOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateNodegroupConfigRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.UpdateNodegroupConfigOutput) + return ret0, ret1 +} + +// UpdateNodegroupConfigRequest indicates an expected call of UpdateNodegroupConfigRequest +func (mr *MockEKSAPIMockRecorder) UpdateNodegroupConfigRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateNodegroupConfigRequest", reflect.TypeOf((*MockEKSAPI)(nil).UpdateNodegroupConfigRequest), arg0) +} + +// UpdateNodegroupConfigWithContext mocks base method +func (m *MockEKSAPI) UpdateNodegroupConfigWithContext(arg0 context.Context, arg1 *eks.UpdateNodegroupConfigInput, arg2 ...request.Option) (*eks.UpdateNodegroupConfigOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateNodegroupConfigWithContext", varargs...) + ret0, _ := ret[0].(*eks.UpdateNodegroupConfigOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateNodegroupConfigWithContext indicates an expected call of UpdateNodegroupConfigWithContext +func (mr *MockEKSAPIMockRecorder) UpdateNodegroupConfigWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateNodegroupConfigWithContext", reflect.TypeOf((*MockEKSAPI)(nil).UpdateNodegroupConfigWithContext), varargs...) +} + +// UpdateNodegroupVersion mocks base method +func (m *MockEKSAPI) UpdateNodegroupVersion(arg0 *eks.UpdateNodegroupVersionInput) (*eks.UpdateNodegroupVersionOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateNodegroupVersion", arg0) + ret0, _ := ret[0].(*eks.UpdateNodegroupVersionOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateNodegroupVersion indicates an expected call of UpdateNodegroupVersion +func (mr *MockEKSAPIMockRecorder) UpdateNodegroupVersion(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateNodegroupVersion", reflect.TypeOf((*MockEKSAPI)(nil).UpdateNodegroupVersion), arg0) +} + +// UpdateNodegroupVersionRequest mocks base method +func (m *MockEKSAPI) UpdateNodegroupVersionRequest(arg0 *eks.UpdateNodegroupVersionInput) (*request.Request, *eks.UpdateNodegroupVersionOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateNodegroupVersionRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.UpdateNodegroupVersionOutput) + return ret0, ret1 +} + +// UpdateNodegroupVersionRequest indicates an expected call of UpdateNodegroupVersionRequest +func (mr *MockEKSAPIMockRecorder) UpdateNodegroupVersionRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateNodegroupVersionRequest", reflect.TypeOf((*MockEKSAPI)(nil).UpdateNodegroupVersionRequest), arg0) +} + +// UpdateNodegroupVersionWithContext mocks base method +func (m *MockEKSAPI) UpdateNodegroupVersionWithContext(arg0 context.Context, arg1 *eks.UpdateNodegroupVersionInput, arg2 ...request.Option) (*eks.UpdateNodegroupVersionOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateNodegroupVersionWithContext", varargs...) + ret0, _ := ret[0].(*eks.UpdateNodegroupVersionOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateNodegroupVersionWithContext indicates an expected call of UpdateNodegroupVersionWithContext +func (mr *MockEKSAPIMockRecorder) UpdateNodegroupVersionWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateNodegroupVersionWithContext", reflect.TypeOf((*MockEKSAPI)(nil).UpdateNodegroupVersionWithContext), varargs...) +} + +// WaitUntilClusterActive mocks base method +func (m *MockEKSAPI) WaitUntilClusterActive(arg0 *eks.DescribeClusterInput) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WaitUntilClusterActive", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// WaitUntilClusterActive indicates an expected call of WaitUntilClusterActive +func (mr *MockEKSAPIMockRecorder) WaitUntilClusterActive(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilClusterActive", reflect.TypeOf((*MockEKSAPI)(nil).WaitUntilClusterActive), arg0) +} + +// WaitUntilClusterActiveWithContext mocks base method +func (m *MockEKSAPI) WaitUntilClusterActiveWithContext(arg0 context.Context, arg1 *eks.DescribeClusterInput, arg2 ...request.WaiterOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "WaitUntilClusterActiveWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// WaitUntilClusterActiveWithContext indicates an expected call of WaitUntilClusterActiveWithContext +func (mr *MockEKSAPIMockRecorder) WaitUntilClusterActiveWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilClusterActiveWithContext", reflect.TypeOf((*MockEKSAPI)(nil).WaitUntilClusterActiveWithContext), varargs...) +} + +// WaitUntilClusterDeleted mocks base method +func (m *MockEKSAPI) WaitUntilClusterDeleted(arg0 *eks.DescribeClusterInput) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WaitUntilClusterDeleted", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// WaitUntilClusterDeleted indicates an expected call of WaitUntilClusterDeleted +func (mr *MockEKSAPIMockRecorder) WaitUntilClusterDeleted(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilClusterDeleted", reflect.TypeOf((*MockEKSAPI)(nil).WaitUntilClusterDeleted), arg0) +} + +// WaitUntilClusterDeletedWithContext mocks base method +func (m *MockEKSAPI) WaitUntilClusterDeletedWithContext(arg0 context.Context, arg1 *eks.DescribeClusterInput, arg2 ...request.WaiterOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "WaitUntilClusterDeletedWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// WaitUntilClusterDeletedWithContext indicates an expected call of WaitUntilClusterDeletedWithContext +func (mr *MockEKSAPIMockRecorder) WaitUntilClusterDeletedWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilClusterDeletedWithContext", reflect.TypeOf((*MockEKSAPI)(nil).WaitUntilClusterDeletedWithContext), varargs...) +} + +// WaitUntilNodegroupActive mocks base method +func (m *MockEKSAPI) WaitUntilNodegroupActive(arg0 *eks.DescribeNodegroupInput) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WaitUntilNodegroupActive", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// WaitUntilNodegroupActive indicates an expected call of WaitUntilNodegroupActive +func (mr *MockEKSAPIMockRecorder) WaitUntilNodegroupActive(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilNodegroupActive", reflect.TypeOf((*MockEKSAPI)(nil).WaitUntilNodegroupActive), arg0) +} + +// WaitUntilNodegroupActiveWithContext mocks base method +func (m *MockEKSAPI) WaitUntilNodegroupActiveWithContext(arg0 context.Context, arg1 *eks.DescribeNodegroupInput, arg2 ...request.WaiterOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "WaitUntilNodegroupActiveWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// WaitUntilNodegroupActiveWithContext indicates an expected call of WaitUntilNodegroupActiveWithContext +func (mr *MockEKSAPIMockRecorder) WaitUntilNodegroupActiveWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilNodegroupActiveWithContext", reflect.TypeOf((*MockEKSAPI)(nil).WaitUntilNodegroupActiveWithContext), varargs...) +} + +// WaitUntilNodegroupDeleted mocks base method +func (m *MockEKSAPI) WaitUntilNodegroupDeleted(arg0 *eks.DescribeNodegroupInput) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WaitUntilNodegroupDeleted", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// WaitUntilNodegroupDeleted indicates an expected call of WaitUntilNodegroupDeleted +func (mr *MockEKSAPIMockRecorder) WaitUntilNodegroupDeleted(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilNodegroupDeleted", reflect.TypeOf((*MockEKSAPI)(nil).WaitUntilNodegroupDeleted), arg0) +} + +// WaitUntilNodegroupDeletedWithContext mocks base method +func (m *MockEKSAPI) WaitUntilNodegroupDeletedWithContext(arg0 context.Context, arg1 *eks.DescribeNodegroupInput, arg2 ...request.WaiterOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "WaitUntilNodegroupDeletedWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// WaitUntilNodegroupDeletedWithContext indicates an expected call of WaitUntilNodegroupDeletedWithContext +func (mr *MockEKSAPIMockRecorder) WaitUntilNodegroupDeletedWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilNodegroupDeletedWithContext", reflect.TypeOf((*MockEKSAPI)(nil).WaitUntilNodegroupDeletedWithContext), varargs...) +} diff --git a/pkg/cloud/services/eks/roles.go b/pkg/cloud/services/eks/roles.go new file mode 100644 index 0000000000..649ecb1586 --- /dev/null +++ b/pkg/cloud/services/eks/roles.go @@ -0,0 +1,380 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package eks + +import ( + "encoding/json" + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/pkg/errors" + + infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3" + "sigs.k8s.io/cluster-api-provider-aws/pkg/record" +) + +// TrustRelationshipPolicyDocument represesnts an IAM policy docyment +type TrustRelationshipPolicyDocument struct { + Version string + Statement []StatementEntry +} + +// ToJSONString converts the document to a JSON string +func (d *TrustRelationshipPolicyDocument) ToJSONString() (string, error) { + b, err := json.Marshal(d) + if err != nil { + return "", err + } + + return string(b), nil +} + +// StatementEntry represents a statement within an IAM policy document +type StatementEntry struct { + Effect string + Action []string + Principal map[string][]string +} + +func (s *Service) reconcileControlPlaneIAMRole() error { + s.scope.V(2).Info("Reconciling EKS Control Plane IAM Role") + + if s.scope.ControlPlane.Spec.RoleName == nil { + //TODO (richardcase): in the future use a default role created by clusterawsadm + if !s.scope.EnableIAM() { + return ErrClusterRoleNameMissing + } + s.scope.ControlPlane.Spec.RoleName = aws.String(fmt.Sprintf("%s-iam-service-role", s.scope.Name())) + } + + role, err := s.getIAMRole(*s.scope.ControlPlane.Spec.RoleName) + if err != nil { + if !isNotFound(err) { + return err + } + + // If the disable IAM flag is used then the role must exist + if !s.scope.EnableIAM() { + return ErrClusterRoleNotFound + } + + role, err = s.createRole(*s.scope.ControlPlane.Spec.RoleName) + if err != nil { + record.Warnf(s.scope.ControlPlane, "FailedIAMRoleCreation", "Failed to create control plane IAM role %q: %v", *s.scope.ControlPlane.Spec.RoleName, err) + return err + } + record.Eventf(s.scope.ControlPlane, "SucessfulIAMRoleCreation", "Created control plane IAM role %q", *s.scope.ControlPlane.Spec.RoleName) + } + + if s.isUnmanaged(role) { + s.scope.V(2).Info("Skipping, EKS control plane role policy assignment as role is unamanged") + return nil + } + + //TODO: check tags and trust relationship to see if they need updating + + policies := []*string{ + aws.String("arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"), + } + if s.scope.ControlPlane.Spec.RoleAdditionalPolicies != nil { + if !s.scope.AllowAdditionalRoles() && len(*s.scope.ControlPlane.Spec.RoleAdditionalPolicies) > 0 { + return ErrCannotUseAdditionalRoles + } + + for _, policy := range *s.scope.ControlPlane.Spec.RoleAdditionalPolicies { + additionalPolicy := policy + policies = append(policies, &additionalPolicy) + } + } + err = s.ensurePoliciesAttached(role, policies) + if err != nil { + return errors.Wrapf(err, "error ensuring policies are attached: %v", policies) + } + + return nil +} + +func (s *Service) getIAMRole(name string) (*iam.Role, error) { + input := &iam.GetRoleInput{ + RoleName: aws.String(name), + } + + out, err := s.IAMClient.GetRole(input) + if err != nil { + return nil, err + } + + return out.Role, nil +} + +func (s *Service) getIAMPolicy(policyArn string) (*iam.Policy, error) { + input := &iam.GetPolicyInput{ + PolicyArn: &policyArn, + } + + out, err := s.IAMClient.GetPolicy(input) + if err != nil { + return nil, err + } + + return out.Policy, nil +} + +func (s *Service) getIAMRolePolicies(roleName string) ([]*string, error) { + input := &iam.ListAttachedRolePoliciesInput{ + RoleName: &roleName, + } + + out, err := s.IAMClient.ListAttachedRolePolicies(input) + if err != nil { + return nil, errors.Wrapf(err, "error listing role polices for %s", roleName) + } + + policies := []*string{} + for _, policy := range out.AttachedPolicies { + policies = append(policies, policy.PolicyArn) + } + + return policies, nil +} + +func (s *Service) detachIAMRolePolicy(roleName string, policyARN string) error { + input := &iam.DetachRolePolicyInput{ + RoleName: aws.String(roleName), + PolicyArn: aws.String(policyARN), + } + + _, err := s.IAMClient.DetachRolePolicy(input) + if err != nil { + return errors.Wrapf(err, "error detaching policy %s from role %s", policyARN, roleName) + } + + return nil +} + +func (s *Service) attachIAMRolePolicy(roleName string, policyARN string) error { + input := &iam.AttachRolePolicyInput{ + RoleName: aws.String(roleName), + PolicyArn: aws.String(policyARN), + } + + _, err := s.IAMClient.AttachRolePolicy(input) + if err != nil { + return errors.Wrapf(err, "error attaching policy %s to role %s", policyARN, roleName) + } + + return nil +} + +func (s *Service) ensurePoliciesAttached(role *iam.Role, policies []*string) error { + s.scope.V(2).Info("Ensuring Polices are attached to EKS Control Plane IAM Role") + existingPolices, err := s.getIAMRolePolicies(*role.RoleName) + if err != nil { + return err + } + + // Remove polices that aren't in the list + for _, existingPolicy := range existingPolices { + found := findStringInSlice(policies, *existingPolicy) + if !found { + err = s.detachIAMRolePolicy(*role.RoleName, *existingPolicy) + if err != nil { + return err + } + s.scope.V(2).Info("Detached policy from role", "role", role.RoleName, "policy", existingPolicy) + } + } + + // Add any policies that aren't currently attached + for _, policy := range policies { + found := findStringInSlice(existingPolices, *policy) + if !found { + // Make sure policy exists before attaching + _, err := s.getIAMPolicy(*policy) + if err != nil { + return errors.Wrapf(err, "error getting policy %s", *policy) + } + + err = s.attachIAMRolePolicy(*role.RoleName, *policy) + if err != nil { + return err + } + s.scope.V(2).Info("Attached policy to role", "role", role.RoleName, "policy", *policy) + } + } + + return nil +} + +func (s *Service) createRole(name string) (*iam.Role, error) { + //TODO: tags also needs a separate sync + additionalTags := s.scope.AdditionalTags() + additionalTags[infrav1.ClusterAWSCloudProviderTagKey(s.scope.Name())] = string(infrav1.ResourceLifecycleOwned) + tags := []*iam.Tag{} + for k, v := range additionalTags { + tags = append(tags, &iam.Tag{ + Key: aws.String(k), + Value: aws.String(v), + }) + } + + trustRelationship := s.controlPlaneTrustRelationship(false) + trustRelationShipJSON, err := trustRelationship.ToJSONString() + if err != nil { + return nil, errors.Wrap(err, "error converting trust relationship to json") + } + + input := &iam.CreateRoleInput{ + RoleName: aws.String(name), + Tags: tags, + AssumeRolePolicyDocument: aws.String(trustRelationShipJSON), + } + + out, err := s.IAMClient.CreateRole(input) + if err != nil { + return nil, err + } + + return out.Role, nil +} + +func (s *Service) detachAllPoliciesForRole(name string) error { + s.scope.V(3).Info("Detaching all policies for role", "role", name) + input := &iam.ListAttachedRolePoliciesInput{ + RoleName: &name, + } + policies, err := s.IAMClient.ListAttachedRolePolicies(input) + if err != nil { + return errors.Wrapf(err, "error fetching policies for role %s", name) + } + for _, p := range policies.AttachedPolicies { + s.scope.V(2).Info("Detaching policy", "policy", *p) + if err := s.detachIAMRolePolicy(name, *p.PolicyArn); err != nil { + return err + } + } + return nil +} + +func (s *Service) deleteRole(name string) error { + if err := s.detachAllPoliciesForRole(name); err != nil { + return errors.Wrapf(err, "error detaching policies for role %s", name) + } + + input := &iam.DeleteRoleInput{ + RoleName: aws.String(name), + } + + _, err := s.IAMClient.DeleteRole(input) + if err != nil { + return errors.Wrapf(err, "error deleting role %s", name) + } + + return nil +} + +func (s *Service) deleteControlPlaneIAMRole() error { + if !s.scope.EnableIAM() { + s.scope.V(2).Info("EKS IAM disabled, skipping deleting EKS Control Plane IAM Role") + return nil + } + + s.scope.V(2).Info("Deleting EKS Control Plane IAM Role") + + role, err := s.getIAMRole(*s.scope.ControlPlane.Spec.RoleName) + if err != nil { + if isNotFound(err) { + s.scope.V(2).Info("EKS Control Plane IAM Role already deleted") + return nil + } + + return errors.Wrap(err, "getting eks control plane iam role") + } + + if s.isUnmanaged(role) { + s.scope.V(2).Info("Skipping, EKS control plane iam role deletion as role is unamanged") + return nil + } + + err = s.deleteRole(*s.scope.ControlPlane.Spec.RoleName) + if err != nil { + record.Eventf(s.scope.ControlPlane, "FailedIAMRoleDeletion", "Failed to delete control Plane IAM role %q: %v", *s.scope.ControlPlane.Spec.RoleName, err) + return err + } + + record.Eventf(s.scope.ControlPlane, "SucessfulIAMRoleDeletion", "Deleted Control Plane IAM role %q", *s.scope.ControlPlane.Spec.RoleName) + return nil +} + +func (s *Service) isUnmanaged(role *iam.Role) bool { + keyToFind := infrav1.ClusterAWSCloudProviderTagKey(s.scope.Name()) + for _, tag := range role.Tags { + if *tag.Key == keyToFind && *tag.Value == string(infrav1.ResourceLifecycleOwned) { + return false + } + } + + return true +} + +func (s *Service) controlPlaneTrustRelationship(enableFargate bool) *TrustRelationshipPolicyDocument { + principal := make(map[string][]string) + principal["Service"] = []string{"eks.amazonaws.com"} + if enableFargate { + principal["Service"] = append(principal["Service"], "eks-fargate-pods.amazonaws.com") + } + + policy := &TrustRelationshipPolicyDocument{ + Version: "2012-10-17", + Statement: []StatementEntry{ + { + Effect: "Allow", + Action: []string{ + "sts:AssumeRole", + }, + Principal: principal, + }, + }, + } + + return policy +} + +func isNotFound(err error) bool { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case iam.ErrCodeNoSuchEntityException: + return true + default: + return false + } + } + + return false +} + +func findStringInSlice(slice []*string, toFind string) bool { + for _, item := range slice { + if *item == toFind { + return true + } + } + + return false +} diff --git a/pkg/cloud/services/eks/securitygroup.go b/pkg/cloud/services/eks/securitygroup.go new file mode 100644 index 0000000000..99932e0982 --- /dev/null +++ b/pkg/cloud/services/eks/securitygroup.go @@ -0,0 +1,63 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package eks + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/eks" + + infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3" + "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/converters" +) + +func (s *Service) reconcileSecurityGroups(cluster *eks.Cluster) error { + s.scope.Info("Reconciling EKS security groups", "cluster-name", cluster.Name) + + if s.scope.Network().SecurityGroups == nil { + s.scope.Network().SecurityGroups = make(map[infrav1.SecurityGroupRole]infrav1.SecurityGroup) + } + + input := &ec2.DescribeSecurityGroupsInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("tag:aws:eks:cluster-name"), + Values: []*string{cluster.Name}, + }, + }, + } + + output, err := s.EC2Client.DescribeSecurityGroups(input) + if err != nil { + return fmt.Errorf("describing security groups: %w", err) + } + + if len(output.SecurityGroups) == 0 { + return ErrNoSecurityGroup + } + + sg := infrav1.SecurityGroup{ + ID: *output.SecurityGroups[0].GroupId, + Name: *output.SecurityGroups[0].GroupName, + Tags: converters.TagsToMap(output.SecurityGroups[0].Tags), + } + s.scope.ControlPlane.Status.Network.SecurityGroups[infrav1.SecurityGroupNode] = sg + + return nil +} diff --git a/pkg/cloud/services/eks/service.go b/pkg/cloud/services/eks/service.go new file mode 100644 index 0000000000..fd3c822f42 --- /dev/null +++ b/pkg/cloud/services/eks/service.go @@ -0,0 +1,48 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package eks + +import ( + "github.com/aws/aws-sdk-go/service/ec2/ec2iface" + "github.com/aws/aws-sdk-go/service/eks/eksiface" + "github.com/aws/aws-sdk-go/service/iam/iamiface" + "github.com/aws/aws-sdk-go/service/sts/stsiface" + + "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope" +) + +// Service holds a collection of interfaces. +// The interfaces are broken down like this to group functions together. +// One alternative is to have a large list of functions from the ec2 client. +type Service struct { + scope *scope.ManagedControlPlaneScope + EC2Client ec2iface.EC2API + EKSClient eksiface.EKSAPI + IAMClient iamiface.IAMAPI + STSClient stsiface.STSAPI +} + +// NewService returns a new service given the api clients. +func NewService(controlPlaneScope *scope.ManagedControlPlaneScope) *Service { + return &Service{ + scope: controlPlaneScope, + EC2Client: scope.NewEC2Client(controlPlaneScope, controlPlaneScope, controlPlaneScope.ControlPlane), + EKSClient: scope.NewEKSClient(controlPlaneScope, controlPlaneScope, controlPlaneScope.ControlPlane), + IAMClient: scope.NewIAMClient(controlPlaneScope, controlPlaneScope, controlPlaneScope.ControlPlane), + STSClient: scope.NewSTSClient(controlPlaneScope, controlPlaneScope, controlPlaneScope.ControlPlane), + } +} diff --git a/pkg/cloud/services/eks/tags.go b/pkg/cloud/services/eks/tags.go new file mode 100644 index 0000000000..d8e24406d8 --- /dev/null +++ b/pkg/cloud/services/eks/tags.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package eks + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/eks" + + infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3" + "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/converters" + "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/tags" +) + +func (s *Service) reconcileTags(cluster *eks.Cluster) error { + clusterTags := converters.MapPtrToMap(cluster.Tags) + buildParams := s.getEKSTagParams(*cluster.Arn) + tagsBuilder := tags.New(buildParams, tags.WithEKS(s.EKSClient)) + if err := tagsBuilder.Ensure(clusterTags); err != nil { + return fmt.Errorf("failed ensuring tags on cluster: %w", err) + } + + return nil +} + +func (s *Service) getEKSTagParams(id string) *infrav1.BuildParams { + name := s.scope.KubernetesClusterName() + + return &infrav1.BuildParams{ + ClusterName: s.scope.Name(), + ResourceID: id, + Lifecycle: infrav1.ResourceLifecycleOwned, + Name: aws.String(name), + Role: aws.String(infrav1.CommonRoleTagValue), + Additional: s.scope.AdditionalTags(), + } +} diff --git a/pkg/cloud/services/elb/loadbalancer.go b/pkg/cloud/services/elb/loadbalancer.go index df579684eb..535a8908b0 100644 --- a/pkg/cloud/services/elb/loadbalancer.go +++ b/pkg/cloud/services/elb/loadbalancer.go @@ -32,7 +32,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/converters" "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/wait" - "sigs.k8s.io/cluster-api-provider-aws/pkg/internal/hash" + "sigs.k8s.io/cluster-api-provider-aws/pkg/hash" "sigs.k8s.io/cluster-api-provider-aws/pkg/record" ) diff --git a/pkg/cloud/services/securitygroup/securitygroups.go b/pkg/cloud/services/securitygroup/securitygroups.go index 613e866a1c..51bc4405aa 100644 --- a/pkg/cloud/services/securitygroup/securitygroups.go +++ b/pkg/cloud/services/securitygroup/securitygroups.go @@ -49,6 +49,16 @@ const ( IPProtocolICMPv6 = "58" ) +var ( + defaultRoles = []infrav1.SecurityGroupRole{ + infrav1.SecurityGroupBastion, + infrav1.SecurityGroupAPIServerLB, + infrav1.SecurityGroupLB, + infrav1.SecurityGroupControlPlane, + infrav1.SecurityGroupNode, + } +) + func (s *Service) ReconcileSecurityGroups() error { s.scope.V(2).Info("Reconciling security groups") @@ -61,18 +71,9 @@ func (s *Service) ReconcileSecurityGroups() error { return err } - // Declare all security group roles that the reconcile loop takes care of. - roles := []infrav1.SecurityGroupRole{ - infrav1.SecurityGroupBastion, - infrav1.SecurityGroupAPIServerLB, - infrav1.SecurityGroupLB, - infrav1.SecurityGroupControlPlane, - infrav1.SecurityGroupNode, - } - // First iteration makes sure that the security group are valid and fully created. - for i := range roles { - role := roles[i] + for i := range s.roles { + role := s.roles[i] sg := s.getDefaultSecurityGroup(role) existing, ok := sgs[*sg.GroupName] @@ -92,6 +93,10 @@ func (s *Service) ReconcileSecurityGroups() error { // TODO(vincepri): validate / update security group if necessary. s.scope.SecurityGroups()[role] = existing + if s.isEKSOwned(existing) { + continue + } + // Make sure tags are up to date. if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) { buildParams := s.getSecurityGroupTagParams(existing.Name, existing.ID, role) @@ -109,7 +114,7 @@ func (s *Service) ReconcileSecurityGroups() error { // the specified ingress rules. for i := range s.scope.SecurityGroups() { sg := s.scope.SecurityGroups()[i] - if sg.Tags.HasAWSCloudProviderOwned(s.scope.Name()) { + if sg.Tags.HasAWSCloudProviderOwned(s.scope.Name()) || s.isEKSOwned(sg) { // skip rule reconciliation, as we expect the in-cluster cloud integration to manage them continue } @@ -156,6 +161,10 @@ func (s *Service) DeleteSecurityGroups() error { for _, sg := range s.scope.SecurityGroups() { current := sg.IngressRules + if s.isEKSOwned(sg) { + continue + } + if err := s.revokeAllSecurityGroupIngressRules(sg.ID); awserrors.IsIgnorableSecurityGroupError(err) != nil { return err } @@ -165,6 +174,11 @@ func (s *Service) DeleteSecurityGroups() error { for i := range s.scope.SecurityGroups() { sg := s.scope.SecurityGroups()[i] + + if s.isEKSOwned(sg) { + continue + } + if err := s.deleteSecurityGroup(&sg, "managed"); err != nil { return err } @@ -485,6 +499,11 @@ func (s *Service) getSecurityGroupTagParams(name, id string, role infrav1.Securi } } +func (s *Service) isEKSOwned(sg infrav1.SecurityGroup) bool { + _, ok := sg.Tags["aws:eks:cluster-name"] + return ok +} + func ingressRuleToSDKType(i *infrav1.IngressRule) (res *ec2.IpPermission) { // AWS seems to ignore the From/To port when set on protocols where it doesn't apply, but // we avoid serializing it out for clarity's sake. diff --git a/pkg/cloud/services/securitygroup/service.go b/pkg/cloud/services/securitygroup/service.go index ba62c41fc2..3bdbea9bc2 100644 --- a/pkg/cloud/services/securitygroup/service.go +++ b/pkg/cloud/services/securitygroup/service.go @@ -49,6 +49,7 @@ type Scope interface { // One alternative is to have a large list of functions from the ec2 client. type Service struct { scope Scope + roles []infrav1.SecurityGroupRole EC2Client ec2iface.EC2API } @@ -56,6 +57,17 @@ type Service struct { func NewService(sgScope Scope) *Service { return &Service{ scope: sgScope, + roles: defaultRoles, + EC2Client: scope.NewEC2Client(sgScope, sgScope, sgScope.InfraCluster()), + } +} + +// NewServiceWithRoles returns a new service given the api clients with a defined +// set of roles +func NewServiceWithRoles(sgScope Scope, roles []infrav1.SecurityGroupRole) *Service { + return &Service{ + scope: sgScope, + roles: roles, EC2Client: scope.NewEC2Client(sgScope, sgScope, sgScope.InfraCluster()), } } diff --git a/pkg/cloud/tags/tags.go b/pkg/cloud/tags/tags.go index 8bf7976da5..d8b2a227d3 100644 --- a/pkg/cloud/tags/tags.go +++ b/pkg/cloud/tags/tags.go @@ -23,6 +23,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ec2/ec2iface" + "github.com/aws/aws-sdk-go/service/eks" + "github.com/aws/aws-sdk-go/service/eks/eksiface" "github.com/pkg/errors" infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3" @@ -106,6 +108,32 @@ func WithEC2(ec2client ec2iface.EC2API) BuilderOption { } } +// WithEKS is used to specify that the tags builder will be targetting EKS +func WithEKS(eksclient eksiface.EKSAPI) BuilderOption { + return func(b *Builder) { + b.applyFunc = func(params *infrav1.BuildParams) error { + tags := infrav1.Build(*params) + + eksTags := make(map[string]*string, len(tags)) + for k, v := range tags { + eksTags[k] = aws.String(v) + } + + tagResourcesInput := &eks.TagResourceInput{ + ResourceArn: aws.String(params.ResourceID), + Tags: eksTags, + } + + _, err := eksclient.TagResource(tagResourcesInput) + if err != nil { + return errors.Wrapf(err, "failed to tag eks cluster %q in cluster %q", params.ResourceID, params.ClusterName) + } + + return nil + } + } +} + func computeDiff(current infrav1.Tags, buildParams infrav1.BuildParams) infrav1.Tags { want := infrav1.Build(buildParams) diff --git a/pkg/internal/hash/base36.go b/pkg/hash/base36.go similarity index 100% rename from pkg/internal/hash/base36.go rename to pkg/hash/base36.go diff --git a/pkg/internal/tristate/tristate.go b/pkg/internal/tristate/tristate.go new file mode 100644 index 0000000000..c8b75a40b0 --- /dev/null +++ b/pkg/internal/tristate/tristate.go @@ -0,0 +1,30 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tristate + +// withDefault evaluates a pointer to a bool with a default value +func withDefault(def bool, b *bool) bool { + if b == nil { + return def + } + return *b +} + +// EqualWithDefault compares two bool pointers using a default value +func EqualWithDefault(def bool, a *bool, b *bool) bool { + return withDefault(def, a) == withDefault(def, b) +} diff --git a/templates/cluster-template-managed.yaml b/templates/cluster-template-managed.yaml new file mode 100644 index 0000000000..565d83200a --- /dev/null +++ b/templates/cluster-template-managed.yaml @@ -0,0 +1,31 @@ +--- +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" +spec: + clusterNetwork: + pods: + cidrBlocks: ["192.168.0.0/16"] + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: AWSManagedCluster + name: "${CLUSTER_NAME}" + controlPlaneRef: + kind: AWSManagedControlPlane + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + name: "${CLUSTER_NAME}-control-plane" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: AWSManagedCluster +metadata: + name: "${CLUSTER_NAME}" +--- +kind: AWSManagedControlPlane +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + region: "${AWS_REGION}" + sshKeyName: "${AWS_SSH_KEY_NAME}" + version: "${EKS_KUBERNETES_VERSION}"