diff --git a/Makefile b/Makefile index 4047e33a25f..3a9a4091ce7 100644 --- a/Makefile +++ b/Makefile @@ -37,6 +37,7 @@ TOOLS_BIN_DIR := $(TOOLS_DIR)/bin TEMPLATES_DIR := $(ROOT_DIR)/templates BIN_DIR := bin RELEASE_NOTES_BIN := bin/release-notes +EXP_DIR := exp # Binaries. CLUSTERCTL := $(BIN_DIR)/clusterctl diff --git a/Tiltfile b/Tiltfile index c1fac0fad36..fc23004b7e7 100644 --- a/Tiltfile +++ b/Tiltfile @@ -11,7 +11,6 @@ settings = { "kind_cluster_name": "capz", "capi_version": "v0.3.5", "cert_manager_version": "v0.11.0", - "feature_gates": '--feature-gates MachinePool=true' } keys = ["AZURE_SUBSCRIPTION_ID_B64", "AZURE_TENANT_ID_B64", "AZURE_CLIENT_SECRET_B64", "AZURE_CLIENT_ID_B64"] @@ -55,17 +54,47 @@ def deploy_cert_manager(): def deploy_capi(): version = settings.get("capi_version") local("kubectl apply -f https://github.com/kubernetes-sigs/cluster-api/releases/download/{}/cluster-api-components.yaml".format(version)) - if settings.get("feature_gates"): - args_str = str(local('kubectl get deployments capi-controller-manager -n capi-system -o jsonpath={.spec.template.spec.containers[1].args}')) - if settings.get("feature_gates") not in args_str: - args = args_str[1:-1].split() # "[arg1 arg2 ...]" trim off the first and last, then split - args.append("\"{}\"".format(settings.get("feature_gates"))) - patch = [{ - "op": "replace", - "path": "/spec/template/spec/containers/1/args", - "value": args, - }] - local("kubectl patch deployment capi-controller-manager -n capi-system --type json -p='{}'".format(str(encode_json(patch)).replace("\n", ""))) + if settings.get("extra_args"): + extra_args = settings.get("extra_args") + if extra_args.get("core"): + core_extra_args = extra_args.get("core") + if core_extra_args: + for namespace in ["capi-system", "capi-webhook-system"]: + patch_args_with_extra_args(namespace, "capi-controller-manager", core_extra_args) + patch_capi_manager_role_with_exp_infra_rbac() + if extra_args.get("kubeadm-bootstrap"): + kb_extra_args = extra_args.get("kubeadm-bootstrap") + if kb_extra_args: + patch_args_with_extra_args("capi-kubeadm-bootstrap-system", "capi-kubeadm-bootstrap-controller-manager", kb_extra_args) + + +def patch_args_with_extra_args(namespace, name, extra_args): + args_str = str(local('kubectl get deployments {} -n {} -o jsonpath={{.spec.template.spec.containers[1].args}}'.format(name, namespace))) + args_to_add = [arg for arg in extra_args if arg not in args_str] + if args_to_add: + args = args_str[1:-1].split() + args.extend(args_to_add) + patch = [{ + "op": "replace", + "path": "/spec/template/spec/containers/1/args", + "value": args, + }] + local("kubectl patch deployment {} -n {} --type json -p='{}'".format(name, namespace, str(encode_json(patch)).replace("\n", ""))) + + +# patch the CAPI manager role to also provide access to experimental infrastructure +def patch_capi_manager_role_with_exp_infra_rbac(): + api_groups_str = str(local('kubectl get clusterrole capi-manager-role -o jsonpath={.rules[1].apiGroups}')) + exp_infra_group = "exp.infrastructure.cluster.x-k8s.io" + if exp_infra_group not in api_groups_str: + groups = api_groups_str[1:-1].split() # "[arg1 arg2 ...]" trim off the first and last, then split + groups.append(exp_infra_group) + patch = [{ + "op": "replace", + "path": "/rules/1/apiGroups", + "value": groups, + }] + local("kubectl patch clusterrole capi-manager-role --type json -p='{}'".format(str(encode_json(patch)).replace("\n", ""))) # Users may define their own Tilt customizations in tilt.d. This directory is excluded from git and these files will @@ -76,13 +105,13 @@ def include_user_tilt_files(): include(f) -def append_arg_for_container_in_deployment(yaml_stream, name, namespace, contains_image_name, arg): +def append_arg_for_container_in_deployment(yaml_stream, name, namespace, contains_image_name, args): for item in yaml_stream: if item["kind"] == "Deployment" and item.get("metadata").get("name") == name and item.get("metadata").get("namespace") == namespace: containers = item.get("spec").get("template").get("spec").get("containers") for container in containers: if contains_image_name in container.get("image"): - container.get("args").append("\"{}\"".format(arg)) + container.get("args").extend(args) def fixup_yaml_empty_arrays(yaml_str): @@ -122,12 +151,14 @@ def capz(): value = substitutions[substitution] yaml = yaml.replace("${" + substitution + "}", value) - # add feature gates if they are defined - if settings.get("feature_gates"): - yaml_dict = decode_yaml_stream(yaml) - append_arg_for_container_in_deployment(yaml_dict, "capz-controller-manager", "capz-system", "cluster-api-azure-controller", settings.get("feature_gates")) - yaml = str(encode_yaml_stream(yaml_dict)) - yaml = fixup_yaml_empty_arrays(yaml) + # add extra_args if they are defined + if settings.get("extra_args"): + azure_extra_args = settings.get("extra_args").get("azure") + if azure_extra_args: + yaml_dict = decode_yaml_stream(yaml) + append_arg_for_container_in_deployment(yaml_dict, "capz-controller-manager", "capz-system", "cluster-api-azure-controller", azure_extra_args) + yaml = str(encode_yaml_stream(yaml_dict)) + yaml = fixup_yaml_empty_arrays(yaml) # Set up a local_resource build of the provider's manager binary. local_resource( diff --git a/api/v1alpha3/azureimage_validation.go b/api/v1alpha3/azureimage_validation.go index b129b99caf1..82e44795776 100644 --- a/api/v1alpha3/azureimage_validation.go +++ b/api/v1alpha3/azureimage_validation.go @@ -69,7 +69,7 @@ func validateSingleDetailsOnly(image *Image, fldPath *field.Path) field.ErrorLis } if !imageDetailsFound { - allErrs = append(allErrs, field.Required(fldPath, "You must supply a ID, Marketplace and SharedGallery image details")) + allErrs = append(allErrs, field.Required(fldPath, "You must supply a ID, Marketplace or SharedGallery image details")) } return allErrs diff --git a/config/crd/bases/exp.infrastructure.cluster.x-k8s.io_azuremachinepools.yaml b/config/crd/bases/exp.infrastructure.cluster.x-k8s.io_azuremachinepools.yaml new file mode 100644 index 00000000000..b273c43338c --- /dev/null +++ b/config/crd/bases/exp.infrastructure.cluster.x-k8s.io_azuremachinepools.yaml @@ -0,0 +1,282 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.9 + creationTimestamp: null + name: azuremachinepools.exp.infrastructure.cluster.x-k8s.io +spec: + group: exp.infrastructure.cluster.x-k8s.io + names: + kind: AzureMachinePool + listKind: AzureMachinePoolList + plural: azuremachinepools + singular: azuremachinepool + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: AzureMachinePool replicas count + jsonPath: .status.replicas + name: Replicas + type: string + - description: AzureMachinePool replicas count + jsonPath: .status.ready + name: Ready + type: string + - description: Azure VMSS provisioning state + jsonPath: .status.provisioningState + name: State + type: string + - description: Cluster to which this AzureMachinePool belongs + jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name + name: Cluster + priority: 1 + type: string + - description: MachinePool object to which this AzureMachinePool belongs + jsonPath: .metadata.ownerReferences[?(@.kind=="MachinePool")].name + name: MachinePool + priority: 1 + type: string + - description: Azure VMSS ID + jsonPath: .spec.providerID + name: VMSS ID + priority: 1 + type: string + - description: Azure VM Size + jsonPath: .spec.template.vmSize + name: VM Size + priority: 1 + type: string + name: v1alpha3 + schema: + openAPIV3Schema: + description: AzureMachinePool is the Schema for the azuremachinepools API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AzureMachinePoolSpec defines the desired state of AzureMachinePool + properties: + additionalTags: + additionalProperties: + type: string + description: AdditionalTags is an optional set of tags to add to an + instance, in addition to the ones added by default by the Azure + provider. If both the AzureCluster and the AzureMachine specify + the same tag name with different values, the AzureMachine's value + takes precedence. + type: object + location: + description: Location is the Azure region location e.g. westus2 + type: string + providerID: + description: ProviderID is the identification ID of the Virtual Machine + Scale Set + type: string + providerIDList: + description: ProviderIDList are the identification IDs of machine + instances provided by the provider. This field must match the provider + IDs as seen on the node objects corresponding to a machine pool's + machine instances. + items: + type: string + type: array + template: + description: Template contains the details used to build a replica + virtual machine within the Machine Pool + properties: + image: + description: Image is used to provide details of an image to use + during Virtual Machine creation. If image details are omitted + the image will default the Azure Marketplace "capi" offer, which + is based on Ubuntu. + properties: + id: + description: ID specifies an image to use by ID + type: string + marketplace: + description: Marketplace specifies an image to use from the + Azure Marketplace + properties: + offer: + description: Offer specifies the name of a group of related + images created by the publisher. For example, UbuntuServer, + WindowsServer + minLength: 1 + type: string + publisher: + description: Publisher is the name of the organization + that created the image + minLength: 1 + type: string + sku: + description: SKU specifies an instance of an offer, such + as a major release of a distribution. For example, 18.04-LTS, + 2019-Datacenter + minLength: 1 + type: string + version: + description: Version specifies the version of an image + sku. The allowed formats are Major.Minor.Build or 'latest'. + Major, Minor, and Build are decimal numbers. Specify + 'latest' to use the latest version of an image available + at deploy time. Even if you use 'latest', the VM image + will not automatically update after deploy time even + if a new version becomes available. + minLength: 1 + type: string + required: + - offer + - publisher + - sku + - version + type: object + sharedGallery: + description: SharedGallery specifies an image to use from + an Azure Shared Image Gallery + properties: + gallery: + description: Gallery specifies the name of the shared + image gallery that contains the image + minLength: 1 + type: string + name: + description: Name is the name of the image + minLength: 1 + type: string + resourceGroup: + description: ResourceGroup specifies the resource group + containing the shared image gallery + minLength: 1 + type: string + subscriptionID: + description: SubscriptionID is the identifier of the subscription + that contains the shared image gallery + minLength: 1 + type: string + version: + description: Version specifies the version of the marketplace + image. The allowed formats are Major.Minor.Build or + 'latest'. Major, Minor, and Build are decimal numbers. + Specify 'latest' to use the latest version of an image + available at deploy time. Even if you use 'latest', + the VM image will not automatically update after deploy + time even if a new version becomes available. + minLength: 1 + type: string + required: + - gallery + - name + - resourceGroup + - subscriptionID + - version + type: object + type: object + osDisk: + description: OSDisk contains the operating system disk information + for a Virtual Machine + properties: + diskSizeGB: + format: int32 + type: integer + managedDisk: + properties: + storageAccountType: + type: string + required: + - storageAccountType + type: object + osType: + type: string + required: + - diskSizeGB + - managedDisk + - osType + type: object + sshPublicKey: + description: SSHPublicKey is the SSH public key string base64 + encoded to add to a Virtual Machine + type: string + vmSize: + description: VMSize is the size of the Virtual Machine to build. + See https://docs.microsoft.com/en-us/rest/api/compute/virtualmachines/createorupdate#virtualmachinesizetypes + type: string + required: + - osDisk + - sshPublicKey + - vmSize + type: object + required: + - location + - template + type: object + status: + description: AzureMachinePoolStatus defines the observed state of AzureMachinePool + properties: + failureMessage: + description: "ErrorMessage will be set in the event that there is + a terminal problem reconciling the MachinePool and will contain + a more verbose string suitable for logging and human consumption. + \n This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over time (like + service outages), but instead indicate that something is fundamentally + wrong with the MachinePool's spec or the configuration of the controller, + and that manual intervention is required. Examples of terminal errors + would be invalid combinations of settings in the spec, values that + are unsupported by the controller, or the responsible controller + itself being critically misconfigured. \n Any transient errors that + occur during the reconciliation of MachinePools can be added as + events to the MachinePool object and/or logged in the controller's + output." + type: string + failureReason: + description: "ErrorReason will be set in the event that there is a + terminal problem reconciling the MachinePool and will contain a + succinct value suitable for machine interpretation. \n This field + should not be set for transitive errors that a controller faces + that are expected to be fixed automatically over time (like service + outages), but instead indicate that something is fundamentally wrong + with the MachinePool's spec or the configuration of the controller, + and that manual intervention is required. Examples of terminal errors + would be invalid combinations of settings in the spec, values that + are unsupported by the controller, or the responsible controller + itself being critically misconfigured. \n Any transient errors that + occur during the reconciliation of MachinePools can be added as + events to the MachinePool object and/or logged in the controller's + output." + type: string + provisioningState: + description: VMState is the provisioning state of the Azure virtual + machine. + type: string + ready: + description: Ready is true when the provider resource is ready. + type: boolean + replicas: + description: Replicas is the most recently observed number of replicas. + format: int32 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 214dd10da50..f69440c90e0 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -8,6 +8,7 @@ resources: - bases/infrastructure.cluster.x-k8s.io_azuremachines.yaml - bases/infrastructure.cluster.x-k8s.io_azureclusters.yaml - bases/infrastructure.cluster.x-k8s.io_azuremachinetemplates.yaml + - bases/exp.infrastructure.cluster.x-k8s.io_azuremachinepools.yaml # +kubebuilder:scaffold:crdkustomizeresource patchesStrategicMerge: @@ -16,6 +17,7 @@ patchesStrategicMerge: - patches/webhook_in_azuremachines.yaml - patches/webhook_in_azureclusters.yaml - patches/webhook_in_azuremachinetemplates.yaml + - patches/webhook_in_azuremachinepools.yaml # +kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. @@ -23,6 +25,7 @@ patchesStrategicMerge: - patches/cainjection_in_azuremachines.yaml - patches/cainjection_in_azureclusters.yaml - patches/cainjection_in_azuremachinetemplates.yaml + - patches/cainjection_in_azuremachinepools.yaml # +kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/config/crd/patches/cainjection_in_azuremachinepools.yaml b/config/crd/patches/cainjection_in_azuremachinepools.yaml new file mode 100644 index 00000000000..1a7f89e993e --- /dev/null +++ b/config/crd/patches/cainjection_in_azuremachinepools.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: azuremachinepools.exp.infrastructure.cluster.x-k8s.io diff --git a/config/crd/patches/webhook_in_azuremachinepools.yaml b/config/crd/patches/webhook_in_azuremachinepools.yaml new file mode 100644 index 00000000000..d15c8280f0c --- /dev/null +++ b/config/crd/patches/webhook_in_azuremachinepools.yaml @@ -0,0 +1,19 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: azuremachinepools.exp.infrastructure.cluster.x-k8s.io +spec: + conversion: + strategy: Webhook + webhook: + conversionReviewVersions: ["v1", "v1beta1"] + clientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 97c8d491a93..932e5f42a52 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -43,6 +43,35 @@ rules: - get - list - watch +- apiGroups: + - exp.cluster.x-k8s.io + resources: + - machinepools + - machinepools/status + verbs: + - get + - list + - watch +- apiGroups: + - exp.infrastructure.cluster.x-k8s.io + resources: + - azuremachinepools + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - exp.infrastructure.cluster.x-k8s.io + resources: + - azuremachinepools/status + verbs: + - get + - patch + - update - apiGroups: - infrastructure.cluster.x-k8s.io resources: @@ -83,3 +112,12 @@ rules: - get - patch - update +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - azuremachinetemplates + - azuremachinetemplates/status + verbs: + - get + - list + - watch diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index f4a324bce17..0d351932969 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -1,4 +1,31 @@ +--- +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration +metadata: + creationTimestamp: null + name: mutating-webhook-configuration +webhooks: +- clientConfig: + caBundle: Cg== + service: + name: webhook-service + namespace: system + path: /mutate-exp-cluster-x-k8s-io-x-k8s-io-v1alpha3-azuremachinepool + failurePolicy: Fail + matchPolicy: Equivalent + name: mazuremachinepool.kb.io + rules: + - apiGroups: + - exp.cluster.x-k8s.io.x-k8s.io + apiVersions: + - v1alpha3 + operations: + - CREATE + - UPDATE + resources: + - azuremachinepools + --- apiVersion: admissionregistration.k8s.io/v1beta1 kind: ValidatingWebhookConfiguration @@ -25,3 +52,22 @@ webhooks: - UPDATE resources: - azuremachine +- clientConfig: + caBundle: Cg== + service: + name: webhook-service + namespace: system + path: /validate-exp-cluster-x-k8s-io-x-k8s-io-v1alpha3-azuremachinepool + failurePolicy: Fail + matchPolicy: Equivalent + name: vazuremachinepool.kb.io + rules: + - apiGroups: + - exp.cluster.x-k8s.io.x-k8s.io + apiVersions: + - v1alpha3 + operations: + - CREATE + - UPDATE + resources: + - azuremachinepools diff --git a/controllers/azurecluster_controller.go b/controllers/azurecluster_controller.go index 0fec77b9d90..4a48d026bd3 100644 --- a/controllers/azurecluster_controller.go +++ b/controllers/azurecluster_controller.go @@ -52,10 +52,11 @@ func (r *AzureClusterReconciler) SetupWithManager(mgr ctrl.Manager, options cont // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azureclusters,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azureclusters/status,verbs=get;update;patch // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azuremachinetemplates;azuremachinetemplates/status,verbs=get;list;watch func (r *AzureClusterReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reterr error) { ctx := context.TODO() - log := r.Log.WithValues("namespace", req.Namespace, "azureCluster", req.Name) + log := r.Log.WithValues("namespace", req.Namespace, "AzureCluster", req.Name) // Fetch the AzureCluster instance azureCluster := &infrav1.AzureCluster{} diff --git a/controllers/azuremachine_controller.go b/controllers/azuremachine_controller.go index 5717e71ec1f..3c71a5e6d37 100644 --- a/controllers/azuremachine_controller.go +++ b/controllers/azuremachine_controller.go @@ -117,7 +117,7 @@ func (r *AzureMachineReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, ret return reconcile.Result{}, nil } - logger = logger.WithValues("azureCluster", azureCluster.Name) + logger = logger.WithValues("AzureCluster", azureCluster.Name) // Create the cluster scope clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{ diff --git a/controllers/azuremachine_tags.go b/controllers/azuremachine_tags.go index d0f7de691d0..4ad6caedb1e 100644 --- a/controllers/azuremachine_tags.go +++ b/controllers/azuremachine_tags.go @@ -39,7 +39,7 @@ func (r *AzureMachineReconciler) reconcileTags(machineScope *scope.MachineScope, if err != nil { return err } - changed, created, deleted, newAnnotation := tagsChanged(annotation, additionalTags) + changed, created, deleted, newAnnotation := TagsChanged(annotation, additionalTags) if changed { machineScope.Info("Updating tags on AzureMachine") vmSpec := &virtualmachines.Spec{ @@ -80,8 +80,8 @@ func (r *AzureMachineReconciler) reconcileTags(machineScope *scope.MachineScope, return nil } -// tagsChanged determines which tags to delete and which to add. -func tagsChanged(annotation map[string]interface{}, src map[string]string) (bool, map[string]string, map[string]string, map[string]interface{}) { +// TagsChanged determines which tags to delete and which to add. +func TagsChanged(annotation map[string]interface{}, src map[string]string) (bool, map[string]string, map[string]string, map[string]interface{}) { // Bool tracking if we found any changed state. changed := false diff --git a/controllers/azuremachine_tags_unit_test.go b/controllers/azuremachine_tags_unit_test.go index a1bfd875782..ffc25674c10 100644 --- a/controllers/azuremachine_tags_unit_test.go +++ b/controllers/azuremachine_tags_unit_test.go @@ -110,7 +110,7 @@ func TestTagsChanged(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { - changed, created, deleted, newAnnotation := tagsChanged(test.annotation, test.src) + changed, created, deleted, newAnnotation := TagsChanged(test.annotation, test.src) g.Expect(changed).To(Equal(test.expectedResult)) g.Expect(created).To(Equal(test.expectedCreated)) g.Expect(deleted).To(Equal(test.expectedDeleted)) diff --git a/docs/development.md b/docs/development.md index 411e07f450b..5aaba208bdb 100644 --- a/docs/development.md +++ b/docs/development.md @@ -140,6 +140,8 @@ To build a kind cluster and start Tilt, just run: ```shell make tilt-up ``` +By default, the Cluster API components deployed by Tilt have experimental features turned off. +If you would like to enable these features, add `extra_args` as specified in [The Cluster API Book](https://cluster-api.sigs.k8s.io/developer/tilt.html#create-a-tilt-settingsjson-file). Once your kind management cluster is up and running, you can [deploy a workload cluster](#deploying-a-workload-cluster). diff --git a/docs/topics/machinepools.md b/docs/topics/machinepools.md new file mode 100644 index 00000000000..f3faaa96eb5 --- /dev/null +++ b/docs/topics/machinepools.md @@ -0,0 +1,129 @@ +# MachinePools +- **Feature status:** Experimental +- **Feature gate:** MachinePool=true + +> In Cluster API (CAPI) v1alpha2, users can create MachineDeployment, MachineSet or Machine custom +> resources. When you create a MachineDeployment or MachineSet, Cluster API components react and +> eventually Machine resources are created. Cluster API's current architecture mandates that a +> Machine maps to a single machine (virtual or bare metal) with the provider being responsible for +> the management of the underlying machine's infrastructure. + +> Nearly all infrastructure providers have a way for their users to manage a group of machines +> (virtual or bare metal) as a single entity. Each infrastructure provider offers their own unique +> features, but nearly all are concerned with managing availability, health, and configuration updates. + +> A MachinePool is similar to a MachineDeployment in that they both define +> configuration and policy for how a set of machines are managed. They Both define a common +> configuration, number of desired machine replicas, and policy for update. Both types also combine +> information from Kubernetes as well as the underlying provider infrastructure to give a view of +> the overall health of the machines in the set. + +> MachinePool diverges from MachineDeployment in that the MachineDeployment controller uses +> MachineSets to achieve the aforementioned desired number of machines and to orchestrate updates +> to the Machines in the managed set, while MachinePool delegates the responsibility of these +> concerns to an infrastructure provider specific resource such as AWS Auto Scale Groups, GCP +> Managed Instance Groups, and Azure Virtual Machine Scale Sets. + +> MachinePool is optional and doesn't replace the need for MachineSet/Machine since not every +> infrastructure provider will have an abstraction for managing multiple machines (i.e. bare metal). +> Users may always opt to choose MachineSet/Machine when they don't see additional value in +> MachinePool for their use case. + +*Source: [MachinePool API Proposal](https://github.com/kubernetes-sigs/cluster-api/blob/bf51a2502f9007b531f6a9a2c1a4eae1586fb8ca/docs/proposals/20190919-machinepool-api.md)* + +## AzureMachinePool +Cluster API Provider Azure (CAPZ) has experimental support for `MachinePool` though the infrastructure +type `AzureMachinePool`. An `AzureMachinePool` corresponds to an [Azure Virtual Machine Scale Set](https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/overview), +which provides the cloud provider specific resource for orchestrating a group of Virtual Machines. + +### Using `clusterctl` to deploy +To deploy a MachinePool / AzureMachinePool via `clusterctl config` there's a [flavor](https://cluster-api.sigs.k8s.io/clusterctl/commands/config-cluster.html#flavors) +for that. + +Make sure to set up your Azure environment as described [here](https://cluster-api.sigs.k8s.io/user/quick-start.html). + +```shell +clusterctl config cluster my-cluster --kubernetes-version v1.17.4 --flavor machinepool > my-cluster.yaml +``` + +The template used for this [flavor](https://cluster-api.sigs.k8s.io/clusterctl/commands/config-cluster.html#flavors) +is located [here](../../templates/cluster-template-machinepool.yaml). + +### Example MachinePool, AzureMachinePool and KubeadmConfig Resources +Below is an example of the resources needed to create a pool of Virtual Machines orchestrated with +a Virtual Machine Scale Set. +```yaml +--- +apiVersion: exp.cluster.x-k8s.io/v1alpha3 +kind: MachinePool +metadata: + name: capz-mp-0 +spec: + clusterName: capz + replicas: 2 + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: KubeadmConfig + name: capz-mp-0 + clusterName: capz + infrastructureRef: + apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 + kind: AzureMachinePool + name: capz-mp-0 + version: v1.17.4 +--- +apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 +kind: AzureMachinePool +metadata: + name: capz-mp-0 +spec: + location: westus2 + template: + osDisk: + diskSizeGB: 30 + managedDisk: + storageAccountType: Premium_LRS + osType: Linux + sshPublicKey: ${YOUR_SSH_PUB_KEY} + vmSize: Standard_D2s_v3 +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 +kind: KubeadmConfig +metadata: + name: capz-mp-0 +spec: + files: + - content: | + { + "cloud": "AzurePublicCloud", + "tenantId": "tenantID", + "subscriptionId": "subscriptionID", + "aadClientId": "clientID", + "aadClientSecret": "secret", + "resourceGroup": "capz", + "securityGroupName": "capz-node-nsg", + "location": "westus2", + "vmType": "vmss", + "vnetName": "capz-vnet", + "vnetResourceGroup": "capz", + "subnetName": "capz-node-subnet", + "routeTableName": "capz-node-routetable", + "loadBalancerSku": "standard", + "maximumLoadBalancerRuleCount": 250, + "useManagedIdentityExtension": false, + "useInstanceMetadata": true + } + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-config: /etc/kubernetes/azure.json + cloud-provider: azure + name: '{{ ds.meta_data["local_hostname"] }}' + useExperimentalRetryJoin: true +``` \ No newline at end of file diff --git a/exp/PROJECT b/exp/PROJECT index c5eda7e4dc7..1e99aa6fcd8 100644 --- a/exp/PROJECT +++ b/exp/PROJECT @@ -1,4 +1,7 @@ domain: exp.infrastructure.x-k8s.io repo: sigs.k8s.io/cluster-api-provider-azure/exp version: "2" -resources: [] +resources: +- group: exp.infrastructure + kind: AzureMachinePool + version: v1alpha3 diff --git a/exp/README.md b/exp/README.md index 9faa282b203..62912486c94 100644 --- a/exp/README.md +++ b/exp/README.md @@ -6,4 +6,5 @@ This subrepository holds experimental code and API types. In short, code in this subrepository is not subject to any compatibility or deprecation promise. + For policy around graduation timeline, see [Cluster API Exp](https://github.com/kubernetes-sigs/cluster-api/tree/master/exp). diff --git a/exp/api/v1alpha3/azuremachinepool_test.go b/exp/api/v1alpha3/azuremachinepool_test.go new file mode 100644 index 00000000000..92c6afb82c0 --- /dev/null +++ b/exp/api/v1alpha3/azuremachinepool_test.go @@ -0,0 +1,94 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3_test + +import ( + "testing" + + "github.com/onsi/gomega" + + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha3" + exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha3" +) + +func TestAzureMachinePool_Validate(t *testing.T) { + cases := []struct { + Name string + Factory func(g *gomega.GomegaWithT) *exp.AzureMachinePool + Expect func(g *gomega.GomegaWithT, actual error) + }{ + { + Name: "HasNoImage", + Factory: func(_ *gomega.GomegaWithT) *exp.AzureMachinePool { + return new(exp.AzureMachinePool) + }, + Expect: func(g *gomega.GomegaWithT, actual error) { + g.Expect(actual).ToNot(gomega.HaveOccurred()) + }, + }, + { + Name: "HasValidImage", + Factory: func(_ *gomega.GomegaWithT) *exp.AzureMachinePool { + return &exp.AzureMachinePool{ + Spec: exp.AzureMachinePoolSpec{ + Template: exp.AzureMachineTemplate{ + Image: &infrav1.Image{ + SharedGallery: &infrav1.AzureSharedGalleryImage{ + SubscriptionID: "foo", + ResourceGroup: "blah", + Name: "bin", + Gallery: "bazz", + Version: "1.2.3", + }, + }, + }, + }, + } + }, + Expect: func(g *gomega.GomegaWithT, actual error) { + g.Expect(actual).ToNot(gomega.HaveOccurred()) + }, + }, + { + Name: "HasInvalidImage", + Factory: func(_ *gomega.GomegaWithT) *exp.AzureMachinePool { + return &exp.AzureMachinePool{ + Spec: exp.AzureMachinePoolSpec{ + Template: exp.AzureMachineTemplate{ + Image: new(infrav1.Image), + }, + }, + } + }, + Expect: func(g *gomega.GomegaWithT, actual error) { + g.Expect(actual).To(gomega.HaveOccurred()) + g.Expect(actual.Error()).To(gomega.ContainSubstring("You must supply a ID, Marketplace or SharedGallery image details")) + }, + }, + } + + for _, c := range cases { + c := c + t.Run(c.Name, func(t *testing.T) { + t.Parallel() + g := gomega.NewGomegaWithT(t) + amp := c.Factory(g) + actualErr := amp.Validate() + c.Expect(g, actualErr) + }) + } +} diff --git a/exp/api/v1alpha3/azuremachinepool_types.go b/exp/api/v1alpha3/azuremachinepool_types.go new file mode 100644 index 00000000000..d0188a7ad33 --- /dev/null +++ b/exp/api/v1alpha3/azuremachinepool_types.go @@ -0,0 +1,153 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/cluster-api/errors" + + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha3" +) + +type ( + AzureMachineTemplate struct { + // VMSize is the size of the Virtual Machine to build. + // See https://docs.microsoft.com/en-us/rest/api/compute/virtualmachines/createorupdate#virtualmachinesizetypes + VMSize string `json:"vmSize"` + + // Image is used to provide details of an image to use during Virtual Machine creation. + // If image details are omitted the image will default the Azure Marketplace "capi" offer, + // which is based on Ubuntu. + // +kubebuilder:validation:nullable + // +optional + Image *infrav1.Image `json:"image,omitempty"` + + // OSDisk contains the operating system disk information for a Virtual Machine + OSDisk infrav1.OSDisk `json:"osDisk"` + + // SSHPublicKey is the SSH public key string base64 encoded to add to a Virtual Machine + SSHPublicKey string `json:"sshPublicKey"` + } + + // AzureMachinePoolSpec defines the desired state of AzureMachinePool + AzureMachinePoolSpec struct { + // Location is the Azure region location e.g. westus2 + Location string `json:"location"` + + // Template contains the details used to build a replica virtual machine within the Machine Pool + Template AzureMachineTemplate `json:"template"` + + // AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the + // Azure provider. If both the AzureCluster and the AzureMachine specify the same tag name with different values, the + // AzureMachine's value takes precedence. + // +optional + AdditionalTags infrav1.Tags `json:"additionalTags,omitempty"` + + // ProviderID is the identification ID of the Virtual Machine Scale Set + ProviderID string `json:"providerID,omitempty"` + + // ProviderIDList are the identification IDs of machine instances provided by the provider. + // This field must match the provider IDs as seen on the node objects corresponding to a machine pool's machine instances. + // +optional + ProviderIDList []string `json:"providerIDList,omitempty"` + } + + // AzureMachinePoolStatus defines the observed state of AzureMachinePool + AzureMachinePoolStatus struct { + // Ready is true when the provider resource is ready. + // +optional + Ready bool `json:"ready"` + + // Replicas is the most recently observed number of replicas. + // +optional + Replicas int32 `json:"replicas"` + + // VMState is the provisioning state of the Azure virtual machine. + // +optional + ProvisioningState *infrav1.VMState `json:"provisioningState,omitempty"` + + // ErrorReason will be set in the event that there is a terminal problem + // reconciling the MachinePool and will contain a succinct value suitable + // for machine interpretation. + // + // This field should not be set for transitive errors that a controller + // faces that are expected to be fixed automatically over + // time (like service outages), but instead indicate that something is + // fundamentally wrong with the MachinePool's spec or the configuration of + // the controller, and that manual intervention is required. Examples + // of terminal errors would be invalid combinations of settings in the + // spec, values that are unsupported by the controller, or the + // responsible controller itself being critically misconfigured. + // + // Any transient errors that occur during the reconciliation of MachinePools + // can be added as events to the MachinePool object and/or logged in the + // controller's output. + // +optional + FailureReason *errors.MachineStatusError `json:"failureReason,omitempty"` + + // ErrorMessage will be set in the event that there is a terminal problem + // reconciling the MachinePool and will contain a more verbose string suitable + // for logging and human consumption. + // + // This field should not be set for transitive errors that a controller + // faces that are expected to be fixed automatically over + // time (like service outages), but instead indicate that something is + // fundamentally wrong with the MachinePool's spec or the configuration of + // the controller, and that manual intervention is required. Examples + // of terminal errors would be invalid combinations of settings in the + // spec, values that are unsupported by the controller, or the + // responsible controller itself being critically misconfigured. + // + // Any transient errors that occur during the reconciliation of MachinePools + // can be added as events to the MachinePool object and/or logged in the + // controller's output. + // +optional + FailureMessage *string `json:"failureMessage,omitempty"` + } + + // +kubebuilder:object:root=true + // +kubebuilder:subresource:status + // +kubebuilder:printcolumn:name="Replicas",type="string",JSONPath=".status.replicas",description="AzureMachinePool replicas count" + // +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="AzureMachinePool replicas count" + // +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.provisioningState",description="Azure VMSS provisioning state" + // +kubebuilder:printcolumn:name="Cluster",type="string",priority=1,JSONPath=".metadata.labels.cluster\\.x-k8s\\.io/cluster-name",description="Cluster to which this AzureMachinePool belongs" + // +kubebuilder:printcolumn:name="MachinePool",type="string",priority=1,JSONPath=".metadata.ownerReferences[?(@.kind==\"MachinePool\")].name",description="MachinePool object to which this AzureMachinePool belongs" + // +kubebuilder:printcolumn:name="VMSS ID",type="string",priority=1,JSONPath=".spec.providerID",description="Azure VMSS ID" + // +kubebuilder:printcolumn:name="VM Size",type="string",priority=1,JSONPath=".spec.template.vmSize",description="Azure VM Size" + + // AzureMachinePool is the Schema for the azuremachinepools API + AzureMachinePool struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AzureMachinePoolSpec `json:"spec,omitempty"` + Status AzureMachinePoolStatus `json:"status,omitempty"` + } + + // +kubebuilder:object:root=true + + // AzureMachinePoolList contains a list of AzureMachinePool + AzureMachinePoolList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AzureMachinePool `json:"items"` + } +) + +func init() { + SchemeBuilder.Register(&AzureMachinePool{}, &AzureMachinePoolList{}) +} diff --git a/exp/api/v1alpha3/azuremachinepool_webhook.go b/exp/api/v1alpha3/azuremachinepool_webhook.go new file mode 100644 index 00000000000..b72ccc29e72 --- /dev/null +++ b/exp/api/v1alpha3/azuremachinepool_webhook.go @@ -0,0 +1,101 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +import ( + "k8s.io/apimachinery/pkg/runtime" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha3" +) + +// log is for logging in this package. +var azuremachinepoollog = logf.Log.WithName("azuremachinepool-resource") + +func (amp *AzureMachinePool) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(amp). + Complete() +} + +// +kubebuilder:webhook:path=/mutate-exp-cluster-x-k8s-io-x-k8s-io-v1alpha3-azuremachinepool,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=exp.cluster.x-k8s.io.x-k8s.io,resources=azuremachinepools,verbs=create;update,versions=v1alpha3,name=mazuremachinepool.kb.io + +var _ webhook.Defaulter = &AzureMachinePool{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (amp *AzureMachinePool) Default() { + azuremachinepoollog.Info("default", "name", amp.Name) +} + +// +kubebuilder:webhook:verbs=create;update,path=/validate-exp-cluster-x-k8s-io-x-k8s-io-v1alpha3-azuremachinepool,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=exp.cluster.x-k8s.io.x-k8s.io,resources=azuremachinepools,versions=v1alpha3,name=vazuremachinepool.kb.io + +var _ webhook.Validator = &AzureMachinePool{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (amp *AzureMachinePool) ValidateCreate() error { + azuremachinepoollog.Info("validate create", "name", amp.Name) + return amp.Validate() +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (amp *AzureMachinePool) ValidateUpdate(old runtime.Object) error { + azuremachinepoollog.Info("validate update", "name", amp.Name) + return amp.Validate() +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (amp *AzureMachinePool) ValidateDelete() error { + azuremachinepoollog.Info("validate delete", "name", amp.Name) + return nil +} + +// Validate the Azure Machine Pool and return an aggregate error +func (amp *AzureMachinePool) Validate() error { + validators := []func() error{ + amp.ValidateImage, + } + + var errs []error + for _, validator := range validators { + if err := validator(); err != nil { + errs = append(errs, err) + } + } + + if len(errs) > 0 { + return kerrors.NewAggregate(errs) + } + + return nil +} + +// ValidateImage of an AzureMachinePool +func (amp *AzureMachinePool) ValidateImage() error { + if amp.Spec.Template.Image != nil { + image := amp.Spec.Template.Image + if errs := infrav1.ValidateImage(image, field.NewPath("image")); len(errs) > 0 { + agg := kerrors.NewAggregate(errs.ToAggregate().Errors()) + azuremachinepoollog.Info("Invalid image: %s", agg.Error()) + return agg + } + } + return nil +} diff --git a/exp/api/v1alpha3/types.go b/exp/api/v1alpha3/types.go new file mode 100644 index 00000000000..67fff704148 --- /dev/null +++ b/exp/api/v1alpha3/types.go @@ -0,0 +1,44 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +import ( + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha3" +) + +type ( + VMSSVM struct { + ID string `json:"id,omitempty"` + InstanceID string `json:"instanceID,omitempty"` + Name string `json:"name,omitempty"` + AvailabilityZone string `json:"availabilityZone,omitempty"` + State infrav1.VMState `json:"vmState,omitempty"` + } + + VMSS struct { + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Sku string `json:"sku,omitempty"` + Capacity int64 `json:"capacity,omitempty"` + Zones []string `json:"zones,omitempty"` + Image infrav1.Image `json:"image,omitempty"` + State infrav1.VMState `json:"vmState,omitempty"` + Identity infrav1.VMIdentity `json:"identity,omitempty"` + Tags infrav1.Tags `json:"tags,omitempty"` + Instances []VMSSVM `json:"instances,omitempty"` + } +) diff --git a/exp/api/v1alpha3/zz_generated.deepcopy.go b/exp/api/v1alpha3/zz_generated.deepcopy.go new file mode 100644 index 00000000000..29a91021640 --- /dev/null +++ b/exp/api/v1alpha3/zz_generated.deepcopy.go @@ -0,0 +1,213 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + "k8s.io/apimachinery/pkg/runtime" + apiv1alpha3 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha3" + "sigs.k8s.io/cluster-api/errors" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureMachinePool) DeepCopyInto(out *AzureMachinePool) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMachinePool. +func (in *AzureMachinePool) DeepCopy() *AzureMachinePool { + if in == nil { + return nil + } + out := new(AzureMachinePool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AzureMachinePool) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureMachinePoolList) DeepCopyInto(out *AzureMachinePoolList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AzureMachinePool, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMachinePoolList. +func (in *AzureMachinePoolList) DeepCopy() *AzureMachinePoolList { + if in == nil { + return nil + } + out := new(AzureMachinePoolList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AzureMachinePoolList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureMachinePoolSpec) DeepCopyInto(out *AzureMachinePoolSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) + if in.AdditionalTags != nil { + in, out := &in.AdditionalTags, &out.AdditionalTags + *out = make(apiv1alpha3.Tags, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ProviderIDList != nil { + in, out := &in.ProviderIDList, &out.ProviderIDList + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMachinePoolSpec. +func (in *AzureMachinePoolSpec) DeepCopy() *AzureMachinePoolSpec { + if in == nil { + return nil + } + out := new(AzureMachinePoolSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureMachinePoolStatus) DeepCopyInto(out *AzureMachinePoolStatus) { + *out = *in + if in.ProvisioningState != nil { + in, out := &in.ProvisioningState, &out.ProvisioningState + *out = new(apiv1alpha3.VMState) + **out = **in + } + if in.FailureReason != nil { + in, out := &in.FailureReason, &out.FailureReason + *out = new(errors.MachineStatusError) + **out = **in + } + if in.FailureMessage != nil { + in, out := &in.FailureMessage, &out.FailureMessage + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMachinePoolStatus. +func (in *AzureMachinePoolStatus) DeepCopy() *AzureMachinePoolStatus { + if in == nil { + return nil + } + out := new(AzureMachinePoolStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureMachineTemplate) DeepCopyInto(out *AzureMachineTemplate) { + *out = *in + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(apiv1alpha3.Image) + (*in).DeepCopyInto(*out) + } + out.OSDisk = in.OSDisk +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMachineTemplate. +func (in *AzureMachineTemplate) DeepCopy() *AzureMachineTemplate { + if in == nil { + return nil + } + out := new(AzureMachineTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VMSS) DeepCopyInto(out *VMSS) { + *out = *in + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.Image.DeepCopyInto(&out.Image) + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(apiv1alpha3.Tags, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Instances != nil { + in, out := &in.Instances, &out.Instances + *out = make([]VMSSVM, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VMSS. +func (in *VMSS) DeepCopy() *VMSS { + if in == nil { + return nil + } + out := new(VMSS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VMSSVM) DeepCopyInto(out *VMSSVM) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VMSSVM. +func (in *VMSSVM) DeepCopy() *VMSSVM { + if in == nil { + return nil + } + out := new(VMSSVM) + in.DeepCopyInto(out) + return out +} diff --git a/exp/cloud/converters/vmss.go b/exp/cloud/converters/vmss.go new file mode 100644 index 00000000000..cfb579e114a --- /dev/null +++ b/exp/cloud/converters/vmss.go @@ -0,0 +1,67 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package converters + +import ( + "github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute" + "github.com/Azure/go-autorest/autorest/to" + + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha3" + infrav1convert "sigs.k8s.io/cluster-api-provider-azure/cloud/converters" + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha3" +) + +// SDKToVMSS converts an Azure SDK VirtualMachineScaleSet to the AzureMachinePool type. +func SDKToVMSS(sdkvmss compute.VirtualMachineScaleSet, sdkinstances []compute.VirtualMachineScaleSetVM) *infrav1exp.VMSS { + vmss := &infrav1exp.VMSS{ + ID: to.String(sdkvmss.ID), + Name: to.String(sdkvmss.Name), + State: infrav1.VMState(to.String(sdkvmss.ProvisioningState)), + } + + if sdkvmss.Sku != nil { + vmss.Sku = to.String(sdkvmss.Sku.Name) + vmss.Capacity = to.Int64(sdkvmss.Sku.Capacity) + } + + if sdkvmss.Zones != nil && len(*sdkvmss.Zones) > 0 { + vmss.Zones = to.StringSlice(sdkvmss.Zones) + } + + if len(sdkvmss.Tags) > 0 { + vmss.Tags = infrav1convert.MapToTags(sdkvmss.Tags) + } + + if len(sdkinstances) > 0 { + vmss.Instances = make([]infrav1exp.VMSSVM, len(sdkinstances)) + for i, vm := range sdkinstances { + instance := infrav1exp.VMSSVM{ + ID: to.String(vm.ID), + InstanceID: to.String(vm.InstanceID), + Name: to.String(vm.Name), + State: infrav1.VMState(to.String(vm.ProvisioningState)), + } + + if vm.Zones != nil && len(*vm.Zones) > 0 { + instance.AvailabilityZone = to.StringSlice(vm.Zones)[0] + } + vmss.Instances[i] = instance + } + } + + return vmss +} diff --git a/exp/cloud/converters/vmss_test.go b/exp/cloud/converters/vmss_test.go new file mode 100644 index 00000000000..20a5886e38d --- /dev/null +++ b/exp/cloud/converters/vmss_test.go @@ -0,0 +1,118 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package converters_test + +import ( + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute" + "github.com/Azure/go-autorest/autorest/to" + "github.com/onsi/gomega" + + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha3" + "sigs.k8s.io/cluster-api-provider-azure/exp/cloud/converters" +) + +func Test_SDKToVMSS(t *testing.T) { + cases := []struct { + Name string + SubjectFactory func(*gomega.GomegaWithT) (compute.VirtualMachineScaleSet, []compute.VirtualMachineScaleSetVM) + Expect func(*gomega.GomegaWithT, *infrav1exp.VMSS) + }{ + { + Name: "ShouldPopulateWithData", + SubjectFactory: func(g *gomega.GomegaWithT) (compute.VirtualMachineScaleSet, []compute.VirtualMachineScaleSetVM) { + tags := map[string]*string{ + "foo": to.StringPtr("bazz"), + } + zones := []string{"zone0", "zone1"} + return compute.VirtualMachineScaleSet{ + Sku: &compute.Sku{ + Name: to.StringPtr("skuName"), + Tier: to.StringPtr("skuTier"), + Capacity: to.Int64Ptr(2), + }, + Zones: to.StringSlicePtr(zones), + ID: to.StringPtr("vmssID"), + Name: to.StringPtr("vmssName"), + Location: to.StringPtr("westus2"), + Tags: tags, + VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{ + ProvisioningState: to.StringPtr(string(compute.ProvisioningState1Succeeded)), + }, + }, + []compute.VirtualMachineScaleSetVM{ + { + InstanceID: to.StringPtr("0"), + ID: to.StringPtr("vm/0"), + Name: to.StringPtr("vm0"), + Zones: to.StringSlicePtr([]string{"zone0"}), + VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{ + ProvisioningState: to.StringPtr(string(compute.ProvisioningState1Succeeded)), + }, + }, + { + InstanceID: to.StringPtr("1"), + ID: to.StringPtr("vm/1"), + Name: to.StringPtr("vm1"), + Zones: to.StringSlicePtr([]string{"zone1"}), + VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{ + ProvisioningState: to.StringPtr(string(compute.ProvisioningState1Succeeded)), + }, + }, + } + }, + Expect: func(g *gomega.GomegaWithT, actual *infrav1exp.VMSS) { + expected := infrav1exp.VMSS{ + ID: "vmssID", + Name: "vmssName", + Sku: "skuName", + Capacity: 2, + Zones: []string{"zone0", "zone1"}, + State: "Succeeded", + Tags: map[string]string{ + "foo": "bazz", + }, + Instances: make([]infrav1exp.VMSSVM, 2), + } + + for i := 0; i < 2; i++ { + expected.Instances[i] = infrav1exp.VMSSVM{ + ID: fmt.Sprintf("vm/%d", i), + InstanceID: fmt.Sprintf("%d", i), + Name: fmt.Sprintf("vm%d", i), + AvailabilityZone: fmt.Sprintf("zone%d", i), + State: "Succeeded", + } + } + g.Expect(actual).To(gomega.Equal(&expected)) + }, + }, + } + + for _, c := range cases { + c := c + t.Run(c.Name, func(t *testing.T) { + t.Parallel() + g := gomega.NewGomegaWithT(t) + vmss, instances := c.SubjectFactory(g) + subject := converters.SDKToVMSS(vmss, instances) + c.Expect(g, subject) + }) + } +} diff --git a/exp/cloud/scope/machinepool.go b/exp/cloud/scope/machinepool.go new file mode 100644 index 00000000000..7fb111f47b6 --- /dev/null +++ b/exp/cloud/scope/machinepool.go @@ -0,0 +1,191 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scope + +import ( + "context" + "encoding/base64" + + "github.com/go-logr/logr" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/klogr" + "k8s.io/utils/pointer" + capiv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + "sigs.k8s.io/cluster-api/controllers/noderefutil" + capierrors "sigs.k8s.io/cluster-api/errors" + capiv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha3" + "sigs.k8s.io/cluster-api/util/patch" + + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha3" + capzscope "sigs.k8s.io/cluster-api-provider-azure/cloud/scope" + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha3" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type ( + // MachinePoolScopeParams defines the input parameters used to create a new MachinePoolScope. + MachinePoolScopeParams struct { + capzscope.AzureClients + Client client.Client + Logger logr.Logger + Cluster *capiv1.Cluster + MachinePool *capiv1exp.MachinePool + AzureCluster *infrav1.AzureCluster + AzureMachinePool *infrav1exp.AzureMachinePool + } + + // MachinePoolScope defines a scope defined around a machine pool and its cluster. + MachinePoolScope struct { + logr.Logger + client client.Client + patchHelper *patch.Helper + Cluster *capiv1.Cluster + MachinePool *capiv1exp.MachinePool + AzureCluster *infrav1.AzureCluster + AzureMachinePool *infrav1exp.AzureMachinePool + } +) + +// NewMachinePoolScope creates a new MachinePoolScope from the supplied parameters. +// This is meant to be called for each reconcile iteration. +func NewMachinePoolScope(params MachinePoolScopeParams) (*MachinePoolScope, error) { + if params.Client == nil { + return nil, errors.New("client is required when creating a MachinePoolScope") + } + if params.MachinePool == nil { + return nil, errors.New("machine pool is required when creating a MachinePoolScope") + } + if params.Cluster == nil { + return nil, errors.New("cluster is required when creating a MachinePoolScope") + } + if params.AzureCluster == nil { + return nil, errors.New("azure cluster is required when creating a MachinePoolScope") + } + if params.AzureMachinePool == nil { + return nil, errors.New("azure machine pool is required when creating a MachinePoolScope") + } + + if params.Logger == nil { + params.Logger = klogr.New() + } + + helper, err := patch.NewHelper(params.AzureMachinePool, params.Client) + if err != nil { + return nil, errors.Wrap(err, "failed to init patch helper") + } + return &MachinePoolScope{ + client: params.Client, + Cluster: params.Cluster, + MachinePool: params.MachinePool, + AzureCluster: params.AzureCluster, + AzureMachinePool: params.AzureMachinePool, + Logger: params.Logger, + patchHelper: helper, + }, nil +} + +func (m *MachinePoolScope) Name() string { + return m.AzureMachinePool.Name +} + +// GetID returns the AzureMachinePool ID by parsing Spec.ProviderID. +func (m *MachinePoolScope) GetID() *string { + parsed, err := noderefutil.NewProviderID(m.AzureMachinePool.Spec.ProviderID) + if err != nil { + return nil + } + return pointer.StringPtr(parsed.ID()) +} + +// SetReady sets the AzureMachinePool Ready Status +func (m *MachinePoolScope) SetReady() { + m.AzureMachinePool.Status.Ready = true +} + +// SetFailureMessage sets the AzureMachinePool status failure message. +func (m *MachinePoolScope) SetFailureMessage(v error) { + m.AzureMachinePool.Status.FailureMessage = pointer.StringPtr(v.Error()) +} + +// SetFailureReason sets the AzureMachinePool status failure reason. +func (m *MachinePoolScope) SetFailureReason(v capierrors.MachineStatusError) { + m.AzureMachinePool.Status.FailureReason = &v +} + +// AdditionalTags merges AdditionalTags from the scope's AzureCluster and AzureMachinePool. If the same key is present in both, +// the value from AzureMachinePool takes precedence. +func (m *MachinePoolScope) AdditionalTags() infrav1.Tags { + tags := make(infrav1.Tags) + tags.Merge(m.AzureCluster.Spec.AdditionalTags) + tags.Merge(m.AzureMachinePool.Spec.AdditionalTags) + return tags +} + +// SetAnnotation sets a key value annotation on the AzureMachinePool. +func (m *MachinePoolScope) SetAnnotation(key, value string) { + if m.AzureMachinePool.Annotations == nil { + m.AzureMachinePool.Annotations = map[string]string{} + } + m.AzureMachinePool.Annotations[key] = value +} + +// PatchObject persists the machine spec and status. +func (m *MachinePoolScope) PatchObject() error { + // TODO[dj]: any function we are building where we are not adding context to the signature is welcoming unbound execution times. This needs to be addressed. + return m.patchHelper.Patch(context.TODO(), m.AzureMachinePool) +} + +func (m *MachinePoolScope) AzureMachineTemplate(ctx context.Context) (*infrav1.AzureMachineTemplate, error) { + ref := m.MachinePool.Spec.Template.Spec.InfrastructureRef + return getAzureMachineTemplate(ctx, m.client, ref.Name, ref.Namespace) +} + +// Close the MachineScope by updating the machine spec, machine status. +func (m *MachinePoolScope) Close() error { + return m.patchHelper.Patch(context.TODO(), m.AzureMachinePool) +} + +func getAzureMachineTemplate(ctx context.Context, c client.Client, name, namespace string) (*infrav1.AzureMachineTemplate, error) { + m := &infrav1.AzureMachineTemplate{} + key := client.ObjectKey{Name: name, Namespace: namespace} + if err := c.Get(ctx, key, m); err != nil { + return nil, err + } + return m, nil +} + +// GetBootstrapData returns the bootstrap data from the secret in the Machine's bootstrap.dataSecretName. +func (m *MachinePoolScope) GetBootstrapData() (string, error) { + dataSecretName := m.MachinePool.Spec.Template.Spec.Bootstrap.DataSecretName + if dataSecretName == nil { + return "", errors.New("error retrieving bootstrap data: linked Machine Spec's bootstrap.dataSecretName is nil") + } + secret := &corev1.Secret{} + key := types.NamespacedName{Namespace: m.AzureMachinePool.Namespace, Name: *dataSecretName} + if err := m.client.Get(context.TODO(), key, secret); err != nil { + return "", errors.Wrapf(err, "failed to retrieve bootstrap data secret for AzureMachinePool %s/%s", m.AzureMachinePool.Namespace, m.Name()) + } + + value, ok := secret.Data["value"] + if !ok { + return "", errors.New("error retrieving bootstrap data: secret value key is missing") + } + return base64.StdEncoding.EncodeToString(value), nil +} diff --git a/exp/cloud/services/scalesets/client.go b/exp/cloud/services/scalesets/client.go new file mode 100644 index 00000000000..2e2cae063b3 --- /dev/null +++ b/exp/cloud/services/scalesets/client.go @@ -0,0 +1,134 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scalesets + +import ( + "context" + "fmt" + + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-11-01/network" + "github.com/Azure/go-autorest/autorest" + + azure "sigs.k8s.io/cluster-api-provider-azure/cloud" +) + +// Client wraps go-sdk +type Client interface { + ListInstances(context.Context, string, string) ([]compute.VirtualMachineScaleSetVM, error) + Get(context.Context, string, string) (compute.VirtualMachineScaleSet, error) + CreateOrUpdate(context.Context, string, string, compute.VirtualMachineScaleSet) error + Delete(context.Context, string, string) error + GetPublicIPAddress(context.Context, string, string) (network.PublicIPAddress, error) +} + +// AzureClient contains the Azure go-sdk Client +type AzureClient struct { + scalesetvms compute.VirtualMachineScaleSetVMsClient + scalesets compute.VirtualMachineScaleSetsClient + publicIPs network.PublicIPAddressesClient +} + +var _ Client = &AzureClient{} + +// NewClient creates a new VMSS client from subscription ID. +func NewClient(subscriptionID string, authorizer autorest.Authorizer) *AzureClient { + return &AzureClient{ + scalesetvms: newVirtualMachineScaleSetVMsClient(subscriptionID, authorizer), + scalesets: newVirtualMachineScaleSetsClient(subscriptionID, authorizer), + publicIPs: newPublicIPsClient(subscriptionID, authorizer), + } +} + +// newVirtualMachineScaleSetVMsClient creates a new vmss VM client from subscription ID. +func newVirtualMachineScaleSetVMsClient(subscriptionID string, authorizer autorest.Authorizer) compute.VirtualMachineScaleSetVMsClient { + c := compute.NewVirtualMachineScaleSetVMsClient(subscriptionID) + c.Authorizer = authorizer + _ = c.AddToUserAgent(azure.UserAgent) // intentionally ignore error as it doesn't matter + return c +} + +// newVirtualMachineScaleSetsClient creates a new vmss client from subscription ID. +func newVirtualMachineScaleSetsClient(subscriptionID string, authorizer autorest.Authorizer) compute.VirtualMachineScaleSetsClient { + c := compute.NewVirtualMachineScaleSetsClient(subscriptionID) + c.Authorizer = authorizer + _ = c.AddToUserAgent(azure.UserAgent) // intentionally ignore error as it doesn't matter + return c +} + +// newPublicIPsClient creates a new publicIPs client from subscription ID. +func newPublicIPsClient(subscriptionID string, authorizer autorest.Authorizer) network.PublicIPAddressesClient { + c := network.NewPublicIPAddressesClient(subscriptionID) + c.Authorizer = authorizer + _ = c.AddToUserAgent(azure.UserAgent) // intentionally ignore error as it doesn't matter + return c +} + +// Get retrieves information about the model view of a virtual machine scale set. +func (ac *AzureClient) ListInstances(ctx context.Context, resourceGroupName, vmssName string) ([]compute.VirtualMachineScaleSetVM, error) { + itr, err := ac.scalesetvms.ListComplete(ctx, resourceGroupName, vmssName, "", "", "") + if err != nil { + return nil, err + } + + var instances []compute.VirtualMachineScaleSetVM + for ; itr.NotDone(); err = itr.NextWithContext(ctx) { + if err != nil { + return nil, fmt.Errorf("failed to iterate vm scale sets [%w]", err) + } + vm := itr.Value() + instances = append(instances, vm) + } + return instances, nil +} + +// Get retrieves information about the model view of a virtual machine scale set. +func (ac *AzureClient) Get(ctx context.Context, resourceGroupName, vmssName string) (compute.VirtualMachineScaleSet, error) { + return ac.scalesets.Get(ctx, resourceGroupName, vmssName) +} + +// CreateOrUpdate the operation to create or update a virtual machine scale set. +func (ac *AzureClient) CreateOrUpdate(ctx context.Context, resourceGroupName, vmssName string, vmss compute.VirtualMachineScaleSet) error { + future, err := ac.scalesets.CreateOrUpdate(ctx, resourceGroupName, vmssName, vmss) + if err != nil { + return err + } + err = future.WaitForCompletionRef(ctx, ac.scalesets.Client) + if err != nil { + return err + } + _, err = future.Result(ac.scalesets) + return err +} + +// Delete the operation to delete a virtual machine scale set. +func (ac *AzureClient) Delete(ctx context.Context, resourceGroupName, vmssName string) error { + future, err := ac.scalesets.Delete(ctx, resourceGroupName, vmssName) + if err != nil { + return err + } + err = future.WaitForCompletionRef(ctx, ac.scalesets.Client) + if err != nil { + return err + } + _, err = future.Result(ac.scalesets) + return err +} + +func (ac *AzureClient) GetPublicIPAddress(ctx context.Context, resourceGroupName, publicIPName string) (network.PublicIPAddress, error) { + return ac.publicIPs.Get(ctx, resourceGroupName, publicIPName, "true") +} diff --git a/exp/cloud/services/scalesets/mock_scalesets/doc.go b/exp/cloud/services/scalesets/mock_scalesets/doc.go new file mode 100644 index 00000000000..d3fbf1923c3 --- /dev/null +++ b/exp/cloud/services/scalesets/mock_scalesets/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Run go generate to regenerate this mock. +//go:generate ../../../../../hack/tools/bin/mockgen -destination scalesets_mock.go -package mock_scalesets -source ../client.go Client +//go:generate /usr/bin/env bash -c "cat ../../../../../hack/boilerplate/boilerplate.generatego.txt scalesets_mock.go > _scalesets_mock.go && mv _scalesets_mock.go scalesets_mock.go" +package mock_scalesets //nolint diff --git a/exp/cloud/services/scalesets/mock_scalesets/scalesets_mock.go b/exp/cloud/services/scalesets/mock_scalesets/scalesets_mock.go new file mode 100644 index 00000000000..ade5de9bfb9 --- /dev/null +++ b/exp/cloud/services/scalesets/mock_scalesets/scalesets_mock.go @@ -0,0 +1,125 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by MockGen. DO NOT EDIT. +// Source: ../client.go + +// Package mock_scalesets is a generated GoMock package. +package mock_scalesets + +import ( + context "context" + compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute" + network "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-11-01/network" + gomock "github.com/golang/mock/gomock" + reflect "reflect" +) + +// MockClient is a mock of Client interface +type MockClient struct { + ctrl *gomock.Controller + recorder *MockClientMockRecorder +} + +// MockClientMockRecorder is the mock recorder for MockClient +type MockClientMockRecorder struct { + mock *MockClient +} + +// NewMockClient creates a new mock instance +func NewMockClient(ctrl *gomock.Controller) *MockClient { + mock := &MockClient{ctrl: ctrl} + mock.recorder = &MockClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockClient) EXPECT() *MockClientMockRecorder { + return m.recorder +} + +// ListInstances mocks base method +func (m *MockClient) ListInstances(arg0 context.Context, arg1, arg2 string) ([]compute.VirtualMachineScaleSetVM, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListInstances", arg0, arg1, arg2) + ret0, _ := ret[0].([]compute.VirtualMachineScaleSetVM) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListInstances indicates an expected call of ListInstances +func (mr *MockClientMockRecorder) ListInstances(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListInstances", reflect.TypeOf((*MockClient)(nil).ListInstances), arg0, arg1, arg2) +} + +// Get mocks base method +func (m *MockClient) Get(arg0 context.Context, arg1, arg2 string) (compute.VirtualMachineScaleSet, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2) + ret0, _ := ret[0].(compute.VirtualMachineScaleSet) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get +func (mr *MockClientMockRecorder) Get(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockClient)(nil).Get), arg0, arg1, arg2) +} + +// CreateOrUpdate mocks base method +func (m *MockClient) CreateOrUpdate(arg0 context.Context, arg1, arg2 string, arg3 compute.VirtualMachineScaleSet) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateOrUpdate", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateOrUpdate indicates an expected call of CreateOrUpdate +func (mr *MockClientMockRecorder) CreateOrUpdate(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockClient)(nil).CreateOrUpdate), arg0, arg1, arg2, arg3) +} + +// Delete mocks base method +func (m *MockClient) Delete(arg0 context.Context, arg1, arg2 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete +func (mr *MockClientMockRecorder) Delete(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockClient)(nil).Delete), arg0, arg1, arg2) +} + +// GetPublicIPAddress mocks base method +func (m *MockClient) GetPublicIPAddress(arg0 context.Context, arg1, arg2 string) (network.PublicIPAddress, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPublicIPAddress", arg0, arg1, arg2) + ret0, _ := ret[0].(network.PublicIPAddress) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPublicIPAddress indicates an expected call of GetPublicIPAddress +func (mr *MockClientMockRecorder) GetPublicIPAddress(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPublicIPAddress", reflect.TypeOf((*MockClient)(nil).GetPublicIPAddress), arg0, arg1, arg2) +} diff --git a/exp/cloud/services/scalesets/service.go b/exp/cloud/services/scalesets/service.go new file mode 100644 index 00000000000..88e457ed546 --- /dev/null +++ b/exp/cloud/services/scalesets/service.go @@ -0,0 +1,38 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scalesets + +import ( + "sigs.k8s.io/cluster-api-provider-azure/cloud/scope" + infrav1expscope "sigs.k8s.io/cluster-api-provider-azure/exp/cloud/scope" +) + +// Service provides operations on azure resources +type Service struct { + Client + Scope *scope.ClusterScope + MachinePoolScope *infrav1expscope.MachinePoolScope +} + +// NewService creates a new service. +func NewService(scope *scope.ClusterScope, machinePoolScope *infrav1expscope.MachinePoolScope) *Service { + return &Service{ + Scope: scope, + MachinePoolScope: machinePoolScope, + Client: NewClient(scope.SubscriptionID, scope.Authorizer), + } +} diff --git a/exp/cloud/services/scalesets/vmss.go b/exp/cloud/services/scalesets/vmss.go new file mode 100644 index 00000000000..f6f9987a23d --- /dev/null +++ b/exp/cloud/services/scalesets/vmss.go @@ -0,0 +1,199 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scalesets + +import ( + "context" + "fmt" + + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute" + "github.com/Azure/go-autorest/autorest/to" + "github.com/pkg/errors" + "k8s.io/klog" + + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha3" + azure "sigs.k8s.io/cluster-api-provider-azure/cloud" + "sigs.k8s.io/cluster-api-provider-azure/cloud/converters" + convertersexp "sigs.k8s.io/cluster-api-provider-azure/exp/cloud/converters" +) + +// Spec contains properties to create a managed cluster. +// Spec input specification for Get/CreateOrUpdate/Delete calls +type ( + Spec struct { + Name string + Sku string + Capacity int64 + SSHKeyData string + Image *infrav1.Image + OSDisk infrav1.OSDisk + CustomData string + } +) + +func (s *Service) Get(ctx context.Context, spec interface{}) (interface{}, error) { + vmssSpec, ok := spec.(*Spec) + if !ok { + return compute.VirtualMachineScaleSet{}, errors.New("invalid VMSS specification") + } + + vmss, err := s.Client.Get(ctx, s.Scope.ResourceGroup(), vmssSpec.Name) + if err != nil { + return vmss, err + } + + vmssInstances, err := s.Client.ListInstances(ctx, s.Scope.ResourceGroup(), vmssSpec.Name) + if err != nil { + return vmss, err + } + + return convertersexp.SDKToVMSS(vmss, vmssInstances), nil +} + +func (s *Service) Reconcile(ctx context.Context, spec interface{}) error { + vmssSpec, ok := spec.(*Spec) + if !ok { + return errors.New("invalid VMSS specification") + } + + storageProfile, err := generateStorageProfile(*vmssSpec) + if err != nil { + return err + } + + // Make sure to use the MachineScope here to get the merger of AzureCluster and AzureMachine tags + additionalTags := s.MachinePoolScope.AdditionalTags() + // Set the cloud provider tag + additionalTags[infrav1.ClusterAzureCloudProviderTagKey(s.MachinePoolScope.Name())] = string(infrav1.ResourceLifecycleOwned) + + vmss := compute.VirtualMachineScaleSet{ + Location: to.StringPtr(s.Scope.Location()), + Tags: converters.TagsToMap(infrav1.Build(infrav1.BuildParams{ + ClusterName: s.Scope.Name(), + Lifecycle: infrav1.ResourceLifecycleOwned, + Name: to.StringPtr(s.MachinePoolScope.Name()), + Role: to.StringPtr(infrav1.Node), + Additional: additionalTags, + })), + Sku: &compute.Sku{ + Name: to.StringPtr(vmssSpec.Sku), + Tier: to.StringPtr("Standard"), + Capacity: to.Int64Ptr(vmssSpec.Capacity), + }, + VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{ + UpgradePolicy: &compute.UpgradePolicy{ + Mode: compute.Manual, + }, + VirtualMachineProfile: &compute.VirtualMachineScaleSetVMProfile{ + OsProfile: &compute.VirtualMachineScaleSetOSProfile{ + ComputerNamePrefix: to.StringPtr(vmssSpec.Name), + AdminUsername: to.StringPtr(azure.DefaultUserName), + CustomData: to.StringPtr(vmssSpec.CustomData), + LinuxConfiguration: &compute.LinuxConfiguration{ + SSH: &compute.SSHConfiguration{ + PublicKeys: &[]compute.SSHPublicKey{ + { + Path: to.StringPtr(fmt.Sprintf("/home/%s/.ssh/authorized_keys", azure.DefaultUserName)), + KeyData: to.StringPtr(vmssSpec.SSHKeyData), + }, + }, + }, + DisablePasswordAuthentication: to.BoolPtr(true), + }, + }, + StorageProfile: storageProfile, + NetworkProfile: &compute.VirtualMachineScaleSetNetworkProfile{ + NetworkInterfaceConfigurations: &[]compute.VirtualMachineScaleSetNetworkConfiguration{ + { + Name: to.StringPtr(vmssSpec.Name + "-netconfig"), + VirtualMachineScaleSetNetworkConfigurationProperties: &compute.VirtualMachineScaleSetNetworkConfigurationProperties{ + Primary: to.BoolPtr(true), + EnableIPForwarding: to.BoolPtr(true), + IPConfigurations: &[]compute.VirtualMachineScaleSetIPConfiguration{ + { + Name: to.StringPtr(vmssSpec.Name + "-ipconfig"), + VirtualMachineScaleSetIPConfigurationProperties: &compute.VirtualMachineScaleSetIPConfigurationProperties{ + Subnet: &compute.APIEntityReference{ + ID: to.StringPtr(s.Scope.AzureCluster.Spec.NetworkSpec.Subnets[0].ID), + }, + Primary: to.BoolPtr(true), + PrivateIPAddressVersion: compute.IPv4, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + err = s.Client.CreateOrUpdate( + ctx, + s.Scope.ResourceGroup(), + vmssSpec.Name, + vmss) + if err != nil { + return errors.Wrapf(err, "cannot create VMSS") + } + + klog.V(2).Infof("successfully created VMSS %s ", vmssSpec.Name) + return nil +} + +func (s *Service) Delete(ctx context.Context, spec interface{}) error { + vmSpec, ok := spec.(*Spec) + if !ok { + return errors.New("invalid VMSS specification") + } + klog.V(2).Infof("deleting VMSS %s ", vmSpec.Name) + err := s.Client.Delete(ctx, s.Scope.ResourceGroup(), vmSpec.Name) + if err != nil && azure.ResourceNotFound(err) { + // already deleted + return nil + } + if err != nil { + return errors.Wrapf(err, "failed to delete VMSS %s in resource group %s", vmSpec.Name, s.Scope.ResourceGroup()) + } + + klog.V(2).Infof("successfully deleted VMSS %s ", vmSpec.Name) + return nil +} + +// generateStorageProfile generates a pointer to a compute.VirtualMachineScaleSetStorageProfile which can utilized for VM creation. +func generateStorageProfile(vmssSpec Spec) (*compute.VirtualMachineScaleSetStorageProfile, error) { + storageProfile := &compute.VirtualMachineScaleSetStorageProfile{ + OsDisk: &compute.VirtualMachineScaleSetOSDisk{ + OsType: compute.OperatingSystemTypes(vmssSpec.OSDisk.OSType), + CreateOption: compute.DiskCreateOptionTypesFromImage, + DiskSizeGB: to.Int32Ptr(vmssSpec.OSDisk.DiskSizeGB), + ManagedDisk: &compute.VirtualMachineScaleSetManagedDiskParameters{ + StorageAccountType: compute.StorageAccountTypes(vmssSpec.OSDisk.ManagedDisk.StorageAccountType), + }, + }, + } + + imageRef, err := converters.ImageToSDK(vmssSpec.Image) + if err != nil { + return nil, err + } + + storageProfile.ImageReference = imageRef + + return storageProfile, nil +} diff --git a/exp/cloud/services/scalesets/vmss_test.go b/exp/cloud/services/scalesets/vmss_test.go new file mode 100644 index 00000000000..7ac75e677de --- /dev/null +++ b/exp/cloud/services/scalesets/vmss_test.go @@ -0,0 +1,466 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scalesets + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/to" + "github.com/golang/mock/gomock" + "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/scheme" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha3" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha3" + azure "sigs.k8s.io/cluster-api-provider-azure/cloud" + "sigs.k8s.io/cluster-api-provider-azure/cloud/scope" + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha3" + scopeExp "sigs.k8s.io/cluster-api-provider-azure/exp/cloud/scope" + "sigs.k8s.io/cluster-api-provider-azure/exp/cloud/services/scalesets/mock_scalesets" + "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers" +) + +func init() { + _ = clusterv1.AddToScheme(scheme.Scheme) +} + +func TestNewService(t *testing.T) { + cluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"}, + } + client := fake.NewFakeClientWithScheme(scheme.Scheme, cluster) + s, err := scope.NewClusterScope(scope.ClusterScopeParams{ + AzureClients: scope.AzureClients{ + SubscriptionID: "123", + Authorizer: autorest.NullAuthorizer{}, + }, + Client: client, + Cluster: cluster, + AzureCluster: &infrav1.AzureCluster{ + Spec: infrav1.AzureClusterSpec{ + Location: "test-location", + ResourceGroup: "my-rg", + NetworkSpec: infrav1.NetworkSpec{ + Vnet: infrav1.VnetSpec{Name: "my-vnet", ResourceGroup: "my-rg"}, + }, + }, + }, + }) + g := gomega.NewGomegaWithT(t) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + mps, err := scopeExp.NewMachinePoolScope(scopeExp.MachinePoolScopeParams{ + AzureClients: s.AzureClients, + Client: client, + Logger: s.Logger, + Cluster: s.Cluster, + MachinePool: new(clusterv1exp.MachinePool), + AzureCluster: s.AzureCluster, + AzureMachinePool: new(infrav1exp.AzureMachinePool), + }) + g.Expect(err).ToNot(gomega.HaveOccurred()) + actual := NewService(s, mps) + g.Expect(actual).ToNot(gomega.BeNil()) + g.Expect(actual.MachinePoolScope).To(gomega.Equal(mps)) + g.Expect(actual.Scope).To(gomega.Equal(s)) +} + +func TestService_Get(t *testing.T) { + cases := []struct { + Name string + SpecFactory func(g *gomega.GomegaWithT, svc *Service) interface{} + Setup func(ctx context.Context, g *gomega.GomegaWithT, svc *Service) + Expect func(ctx context.Context, g *gomega.GomegaWithT, result interface{}, err error) + }{ + { + Name: "WithInvalidSepcType", + SpecFactory: func(g *gomega.GomegaWithT, _ *Service) interface{} { + return "bin" + }, + Expect: func(_ context.Context, g *gomega.GomegaWithT, result interface{}, err error) { + g.Expect(err).To(gomega.MatchError("invalid VMSS specification")) + }, + }, + { + Name: "WithValidSpecBut404FromAzureOnVMSS", + SpecFactory: func(g *gomega.GomegaWithT, svc *Service) interface{} { + return &Spec{ + Name: svc.MachinePoolScope.Name(), + } + }, + Setup: func(ctx context.Context, g *gomega.GomegaWithT, svc *Service) { + mockCtrl := gomock.NewController(t) + vmssMock := mock_scalesets.NewMockClient(mockCtrl) + svc.Client = vmssMock + vmssMock.EXPECT().Get(gomock.Any(), svc.Scope.AzureCluster.Spec.ResourceGroup, svc.MachinePoolScope.Name()).Return(compute.VirtualMachineScaleSet{}, autorest.DetailedError{ + StatusCode: 404, + }) + }, + Expect: func(ctx context.Context, g *gomega.GomegaWithT, result interface{}, err error) { + g.Expect(err).To(gomega.Equal(autorest.DetailedError{ + StatusCode: 404, + })) + }, + }, + { + Name: "WithValidSpecBut404FromAzureOnInstances", + SpecFactory: func(g *gomega.GomegaWithT, svc *Service) interface{} { + return &Spec{ + Name: svc.MachinePoolScope.Name(), + } + }, + Setup: func(ctx context.Context, g *gomega.GomegaWithT, svc *Service) { + mockCtrl := gomock.NewController(t) + vmssMock := mock_scalesets.NewMockClient(mockCtrl) + svc.Client = vmssMock + vmssMock.EXPECT().Get(gomock.Any(), svc.Scope.AzureCluster.Spec.ResourceGroup, svc.MachinePoolScope.Name()).Return(compute.VirtualMachineScaleSet{}, nil) + vmssMock.EXPECT().ListInstances(gomock.Any(), svc.Scope.AzureCluster.Spec.ResourceGroup, svc.MachinePoolScope.Name()).Return([]compute.VirtualMachineScaleSetVM{}, autorest.DetailedError{ + StatusCode: 404, + }) + }, + Expect: func(ctx context.Context, g *gomega.GomegaWithT, result interface{}, err error) { + g.Expect(err).To(gomega.Equal(autorest.DetailedError{ + StatusCode: 404, + })) + }, + }, + { + Name: "WithValidSpecWithVMSSAndInstancesReturned", + SpecFactory: func(g *gomega.GomegaWithT, svc *Service) interface{} { + return &Spec{ + Name: svc.MachinePoolScope.Name(), + } + }, + Setup: func(ctx context.Context, g *gomega.GomegaWithT, svc *Service) { + mockCtrl := gomock.NewController(t) + vmssMock := mock_scalesets.NewMockClient(mockCtrl) + svc.Client = vmssMock + vmssMock.EXPECT().Get(gomock.Any(), svc.Scope.AzureCluster.Spec.ResourceGroup, svc.MachinePoolScope.Name()).Return(compute.VirtualMachineScaleSet{ + Name: to.StringPtr(svc.MachinePoolScope.Name()), + Sku: &compute.Sku{ + Capacity: to.Int64Ptr(1), + Name: to.StringPtr("Standard"), + }, + VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{ + ProvisioningState: to.StringPtr("Succeeded"), + }, + }, nil) + vmssMock.EXPECT().ListInstances(gomock.Any(), svc.Scope.AzureCluster.Spec.ResourceGroup, svc.MachinePoolScope.Name()).Return([]compute.VirtualMachineScaleSetVM{ + { + Name: to.StringPtr("vm0"), + InstanceID: to.StringPtr("0"), + VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{ + ProvisioningState: to.StringPtr("Succeeded"), + }, + }, + }, nil) + }, + Expect: func(ctx context.Context, g *gomega.GomegaWithT, result interface{}, err error) { + g.Expect(result).To(gomega.Equal(&infrav1exp.VMSS{ + Name: "capz-mp-0", + Sku: "Standard", + Capacity: 1, + Image: infrav1.Image{}, + State: "Succeeded", + Instances: []infrav1exp.VMSSVM{ + { + InstanceID: "0", + Name: "vm0", + State: "Succeeded", + }, + }, + })) + }, + }, + } + + for _, c := range cases { + c := c + t.Run(c.Name, func(t *testing.T) { + t.Parallel() + g := gomega.NewGomegaWithT(t) + svc := getNewService(g) + spec := c.SpecFactory(g, svc) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + if c.Setup != nil { + c.Setup(ctx, g, svc) + } + res, err := svc.Get(context.Background(), spec) + c.Expect(ctx, g, res, err) + }) + } +} + +func TestService_Reconcile(t *testing.T) { + cases := []struct { + Name string + SpecFactory func(g *gomega.GomegaWithT, svc *Service) interface{} + Setup func(ctx context.Context, g *gomega.GomegaWithT, svc *Service, spec *Spec) + Expect func(ctx context.Context, g *gomega.GomegaWithT, err error) + }{ + { + Name: "WithInvalidSepcType", + SpecFactory: func(g *gomega.GomegaWithT, _ *Service) interface{} { + return "bazz" + }, + Expect: func(_ context.Context, g *gomega.GomegaWithT, err error) { + g.Expect(err).To(gomega.MatchError("invalid VMSS specification")) + }, + }, + { + Name: "WithValidSpec", + SpecFactory: func(g *gomega.GomegaWithT, svc *Service) interface{} { + return &Spec{ + Name: svc.MachinePoolScope.Name(), + Sku: "skuName", + Capacity: 2, + SSHKeyData: "sshKeyData", + OSDisk: infrav1.OSDisk{ + OSType: "Linux", + DiskSizeGB: 120, + ManagedDisk: infrav1.ManagedDisk{ + StorageAccountType: "accountType", + }, + }, + Image: &infrav1.Image{ + ID: to.StringPtr("image"), + }, + CustomData: "customData", + } + }, + Setup: func(ctx context.Context, g *gomega.GomegaWithT, svc *Service, spec *Spec) { + mockCtrl := gomock.NewController(t) + vmssMock := mock_scalesets.NewMockClient(mockCtrl) + svc.Client = vmssMock + + storageProfile, err := generateStorageProfile(*spec) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + vmss := compute.VirtualMachineScaleSet{ + Location: to.StringPtr(svc.Scope.Location()), + Tags: map[string]*string{ + "Name": to.StringPtr("capz-mp-0"), + "kubernetes.io_cluster_capz-mp-0": to.StringPtr("owned"), + "sigs.k8s.io_cluster-api-provider-azure_cluster_test-cluster": to.StringPtr("owned"), + "sigs.k8s.io_cluster-api-provider-azure_role": to.StringPtr("node"), + }, + Sku: &compute.Sku{ + Name: to.StringPtr(spec.Sku), + Tier: to.StringPtr("Standard"), + Capacity: to.Int64Ptr(spec.Capacity), + }, + VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{ + UpgradePolicy: &compute.UpgradePolicy{ + Mode: compute.Manual, + }, + VirtualMachineProfile: &compute.VirtualMachineScaleSetVMProfile{ + OsProfile: &compute.VirtualMachineScaleSetOSProfile{ + ComputerNamePrefix: to.StringPtr(spec.Name), + AdminUsername: to.StringPtr(azure.DefaultUserName), + CustomData: to.StringPtr(spec.CustomData), + LinuxConfiguration: &compute.LinuxConfiguration{ + SSH: &compute.SSHConfiguration{ + PublicKeys: &[]compute.SSHPublicKey{ + { + Path: to.StringPtr(fmt.Sprintf("/home/%s/.ssh/authorized_keys", azure.DefaultUserName)), + KeyData: to.StringPtr(spec.SSHKeyData), + }, + }, + }, + DisablePasswordAuthentication: to.BoolPtr(true), + }, + }, + StorageProfile: storageProfile, + NetworkProfile: &compute.VirtualMachineScaleSetNetworkProfile{ + NetworkInterfaceConfigurations: &[]compute.VirtualMachineScaleSetNetworkConfiguration{ + { + Name: to.StringPtr(spec.Name + "-netconfig"), + VirtualMachineScaleSetNetworkConfigurationProperties: &compute.VirtualMachineScaleSetNetworkConfigurationProperties{ + Primary: to.BoolPtr(true), + EnableIPForwarding: to.BoolPtr(true), + IPConfigurations: &[]compute.VirtualMachineScaleSetIPConfiguration{ + { + Name: to.StringPtr(spec.Name + "-ipconfig"), + VirtualMachineScaleSetIPConfigurationProperties: &compute.VirtualMachineScaleSetIPConfigurationProperties{ + Subnet: &compute.APIEntityReference{ + ID: to.StringPtr(svc.Scope.AzureCluster.Spec.NetworkSpec.Subnets[0].ID), + }, + Primary: to.BoolPtr(true), + PrivateIPAddressVersion: compute.IPv4, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + vmssMock.EXPECT().CreateOrUpdate(gomock.Any(), svc.Scope.AzureCluster.Spec.ResourceGroup, spec.Name, matchers.DiffEq(vmss)).Return(nil) + }, + Expect: func(ctx context.Context, g *gomega.GomegaWithT, err error) { + g.Expect(err).ToNot(gomega.HaveOccurred()) + }, + }, + } + + for _, c := range cases { + c := c + t.Run(c.Name, func(t *testing.T) { + t.Parallel() + g := gomega.NewGomegaWithT(t) + svc := getNewService(g) + spec := c.SpecFactory(g, svc) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + if c.Setup != nil { + c.Setup(ctx, g, svc, spec.(*Spec)) + } + err := svc.Reconcile(context.Background(), spec) + c.Expect(ctx, g, err) + }) + } +} + +func TestService_Delete(t *testing.T) { + cases := []struct { + Name string + SpecFactory func(g *gomega.GomegaWithT, svc *Service) interface{} + Setup func(ctx context.Context, g *gomega.GomegaWithT, svc *Service) + Expect func(ctx context.Context, g *gomega.GomegaWithT, err error) + }{ + { + Name: "WithInvalidSepcType", + SpecFactory: func(g *gomega.GomegaWithT, _ *Service) interface{} { + return "foo" + }, + Expect: func(_ context.Context, g *gomega.GomegaWithT, err error) { + g.Expect(err).To(gomega.MatchError("invalid VMSS specification")) + }, + }, + { + Name: "WithValidSpecBut404FromAzureOnVMSSAssumeAlreadyDeleted", + SpecFactory: func(g *gomega.GomegaWithT, svc *Service) interface{} { + return &Spec{ + Name: svc.MachinePoolScope.Name(), + } + }, + Setup: func(ctx context.Context, g *gomega.GomegaWithT, svc *Service) { + mockCtrl := gomock.NewController(t) + vmssMock := mock_scalesets.NewMockClient(mockCtrl) + svc.Client = vmssMock + vmssMock.EXPECT().Delete(gomock.Any(), svc.Scope.AzureCluster.Spec.ResourceGroup, svc.MachinePoolScope.Name()).Return(autorest.DetailedError{ + StatusCode: 404, + }) + }, + Expect: func(ctx context.Context, g *gomega.GomegaWithT, err error) { + g.Expect(err).ToNot(gomega.HaveOccurred()) + }, + }, + { + Name: "WithValidSpecAndSuccessfulDelete", + SpecFactory: func(g *gomega.GomegaWithT, svc *Service) interface{} { + return &Spec{ + Name: svc.MachinePoolScope.Name(), + } + }, + Setup: func(ctx context.Context, g *gomega.GomegaWithT, svc *Service) { + mockCtrl := gomock.NewController(t) + vmssMock := mock_scalesets.NewMockClient(mockCtrl) + svc.Client = vmssMock + vmssMock.EXPECT().Delete(gomock.Any(), svc.Scope.AzureCluster.Spec.ResourceGroup, svc.MachinePoolScope.Name()).Return(nil) + }, + Expect: func(ctx context.Context, g *gomega.GomegaWithT, err error) { + g.Expect(err).ToNot(gomega.HaveOccurred()) + }, + }, + } + + for _, c := range cases { + c := c + t.Run(c.Name, func(t *testing.T) { + t.Parallel() + g := gomega.NewGomegaWithT(t) + svc := getNewService(g) + spec := c.SpecFactory(g, svc) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + if c.Setup != nil { + c.Setup(ctx, g, svc) + } + err := svc.Delete(context.Background(), spec) + c.Expect(ctx, g, err) + }) + } +} + +func getNewService(g *gomega.GomegaWithT) *Service { + cluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"}, + } + client := fake.NewFakeClientWithScheme(scheme.Scheme, cluster) + s, err := scope.NewClusterScope(scope.ClusterScopeParams{ + AzureClients: scope.AzureClients{ + SubscriptionID: "123", + Authorizer: autorest.NullAuthorizer{}, + }, + Client: client, + Cluster: cluster, + AzureCluster: &infrav1.AzureCluster{ + Spec: infrav1.AzureClusterSpec{ + Location: "test-location", + ResourceGroup: "my-rg", + NetworkSpec: infrav1.NetworkSpec{ + Vnet: infrav1.VnetSpec{Name: "my-vnet", ResourceGroup: "my-rg"}, + Subnets: infrav1.Subnets{ + { + ID: "subnet0.id", + }, + }, + }, + }, + }, + }) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + mps, err := scopeExp.NewMachinePoolScope(scopeExp.MachinePoolScopeParams{ + AzureClients: s.AzureClients, + Client: client, + Logger: s.Logger, + Cluster: s.Cluster, + MachinePool: new(clusterv1exp.MachinePool), + AzureCluster: s.AzureCluster, + AzureMachinePool: &infrav1exp.AzureMachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "capz-mp-0", + }, + }, + }) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + return NewService(s, mps) +} diff --git a/exp/controllers/.gitkeep b/exp/controllers/.gitkeep deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/exp/controllers/azuremachinepool_controller.go b/exp/controllers/azuremachinepool_controller.go new file mode 100644 index 00000000000..3422a166a30 --- /dev/null +++ b/exp/controllers/azuremachinepool_controller.go @@ -0,0 +1,596 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + + "github.com/Azure/go-autorest/autorest/to" + "github.com/go-logr/logr" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/client-go/tools/record" + capiv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + capierrors "sigs.k8s.io/cluster-api/errors" + capiv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha3" + "sigs.k8s.io/cluster-api/util" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + capzcntr "sigs.k8s.io/cluster-api-provider-azure/controllers" + + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha3" + azure "sigs.k8s.io/cluster-api-provider-azure/cloud" + "sigs.k8s.io/cluster-api-provider-azure/cloud/scope" + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha3" + infrav1expscope "sigs.k8s.io/cluster-api-provider-azure/exp/cloud/scope" + "sigs.k8s.io/cluster-api-provider-azure/exp/cloud/services/scalesets" +) + +type ( + // AzureMachinePoolReconciler reconciles a AzureMachinePool object + AzureMachinePoolReconciler struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + Recorder record.EventRecorder + } + + // azureMachinePoolService provides structure and behavior around the operations needed to reconcile Azure Machine Pools + azureMachinePoolService struct { + machinePoolScope *infrav1expscope.MachinePoolScope + clusterScope *scope.ClusterScope + virtualMachinesScaleSetSvc azure.GetterService + } + + // annotationReaderWriter provides an interface to read and write annotations + annotationReaderWriter interface { + GetAnnotations() map[string]string + SetAnnotations(annotations map[string]string) + } +) + +func (r *AzureMachinePoolReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error { + return ctrl.NewControllerManagedBy(mgr). + WithOptions(options). + For(&infrav1exp.AzureMachinePool{}). + Watches( + &source.Kind{Type: &capiv1exp.MachinePool{}}, + &handler.EnqueueRequestsFromMapFunc{ + ToRequests: machinePoolToInfrastructureMapFunc(infrav1exp.GroupVersion.WithKind("AzureMachinePool"), r.Log), + }, + ). + Watches( + &source.Kind{Type: &infrav1.AzureCluster{}}, + &handler.EnqueueRequestsFromMapFunc{ + ToRequests: azureClusterToAzureMachinePoolsFunc(r.Client, r.Log), + }). + Complete(r) +} + +// +kubebuilder:rbac:groups=exp.infrastructure.cluster.x-k8s.io,resources=azuremachinepools,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=exp.infrastructure.cluster.x-k8s.io,resources=azuremachinepools/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=exp.cluster.x-k8s.io,resources=machinepools;machinepools/status,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch +// +kubebuilder:rbac:groups="",resources=secrets;,verbs=get;list;watch + +func (r *AzureMachinePoolReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reterr error) { + ctx := context.TODO() + logger := r.Log.WithValues("namespace", req.Namespace, "azureMachine", req.Name) + + azMachinePool := &infrav1exp.AzureMachinePool{} + err := r.Get(ctx, req.NamespacedName, azMachinePool) + if err != nil { + if apierrors.IsNotFound(err) { + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + + // Fetch the CAPI MachinePool. + machinePool, err := getOwnerMachinePool(ctx, r.Client, azMachinePool.ObjectMeta) + if err != nil { + return reconcile.Result{}, err + } + if machinePool == nil { + logger.Info("MachinePool Controller has not yet set OwnerRef") + return reconcile.Result{}, nil + } + + logger = logger.WithValues("machinePool", machinePool.Name) + + // Fetch the Cluster. + cluster, err := util.GetClusterFromMetadata(ctx, r.Client, machinePool.ObjectMeta) + if err != nil { + logger.Info("MachinePool is missing cluster label or cluster does not exist") + return reconcile.Result{}, nil + } + + logger = logger.WithValues("cluster", cluster.Name) + + azureCluster := &infrav1.AzureCluster{} + + azureClusterName := client.ObjectKey{ + Namespace: azMachinePool.Namespace, + Name: cluster.Spec.InfrastructureRef.Name, + } + if err := r.Client.Get(ctx, azureClusterName, azureCluster); err != nil { + logger.Info("AzureCluster is not available yet") + return reconcile.Result{}, nil + } + + logger = logger.WithValues("AzureCluster", azureCluster.Name) + + // Create the cluster scope + clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{ + Client: r.Client, + Logger: logger, + Cluster: cluster, + AzureCluster: azureCluster, + }) + if err != nil { + return reconcile.Result{}, err + } + + // Create the machine pool scope + machinePoolScope, err := infrav1expscope.NewMachinePoolScope(infrav1expscope.MachinePoolScopeParams{ + Logger: logger, + Client: r.Client, + Cluster: cluster, + MachinePool: machinePool, + AzureCluster: azureCluster, + AzureMachinePool: azMachinePool, + }) + if err != nil { + return reconcile.Result{}, errors.Errorf("failed to create scope: %+v", err) + } + + // Always close the scope when exiting this function so we can persist any AzureMachine changes. + defer func() { + if err := machinePoolScope.Close(); err != nil && reterr == nil { + reterr = err + } + }() + + // Handle deleted machine pools + if !azMachinePool.ObjectMeta.DeletionTimestamp.IsZero() { + return r.reconcileDelete(machinePoolScope, clusterScope) + } + + // Handle non-deleted machine pools + return r.reconcileNormal(ctx, machinePoolScope, clusterScope) +} + +func (r *AzureMachinePoolReconciler) reconcileNormal(ctx context.Context, machinePoolScope *infrav1expscope.MachinePoolScope, clusterScope *scope.ClusterScope) (_ reconcile.Result, reterr error) { + machinePoolScope.Info("Reconciling AzureMachinePool") + // If the AzureMachine is in an error state, return early. + if machinePoolScope.AzureMachinePool.Status.FailureReason != nil || machinePoolScope.AzureMachinePool.Status.FailureMessage != nil { + machinePoolScope.Info("Error state detected, skipping reconciliation") + return reconcile.Result{}, nil + } + + // If the AzureMachine doesn't have our finalizer, add it. + controllerutil.AddFinalizer(machinePoolScope.AzureMachinePool, capiv1exp.MachinePoolFinalizer) + // Register the finalizer immediately to avoid orphaning Azure resources on delete + if err := machinePoolScope.PatchObject(); err != nil { + return reconcile.Result{}, err + } + + if !machinePoolScope.Cluster.Status.InfrastructureReady { + machinePoolScope.Info("Cluster infrastructure is not ready yet") + return reconcile.Result{}, nil + } + + // Make sure bootstrap data is available and populated. + if machinePoolScope.MachinePool.Spec.Template.Spec.Bootstrap.DataSecretName == nil { + machinePoolScope.Info("Bootstrap data secret reference is not yet available") + return reconcile.Result{}, nil + } + + // Check that the image is valid + // NOTE: this validation logic is also in the validating webhook + if machinePoolScope.AzureMachinePool.Spec.Template.Image != nil { + image := machinePoolScope.AzureMachinePool.Spec.Template.Image + if errs := infrav1.ValidateImage(image, field.NewPath("image")); len(errs) > 0 { + agg := kerrors.NewAggregate(errs.ToAggregate().Errors()) + machinePoolScope.Info("Invalid image: %s", agg.Error()) + r.Recorder.Eventf(machinePoolScope.AzureMachinePool, corev1.EventTypeWarning, "InvalidImage", "Invalid image: %s", agg.Error()) + return reconcile.Result{}, nil + } + } + + ams := newAzureMachinePoolService(machinePoolScope, clusterScope) + + // Get or create the virtual machine. + vmss, err := ams.CreateOrUpdate() + if err != nil { + return reconcile.Result{}, err + } + + // Make sure Spec.ProviderID is always set. + machinePoolScope.AzureMachinePool.Spec.ProviderID = fmt.Sprintf("azure:////%s", vmss.ID) + providerIDList := make([]string, len(vmss.Instances)) + var readyCount int32 + for i, vm := range vmss.Instances { + providerIDList[i] = fmt.Sprintf("azure:////%s", vm.ID) + if vm.State == infrav1.VMStateSucceeded { + readyCount++ + } + } + machinePoolScope.AzureMachinePool.Spec.ProviderIDList = providerIDList + machinePoolScope.AzureMachinePool.Status.ProvisioningState = &vmss.State + machinePoolScope.AzureMachinePool.Status.Replicas = int32(len(providerIDList)) + machinePoolScope.SetAnnotation("cluster-api-provider-azure", "true") + + switch vmss.State { + case infrav1.VMStateSucceeded: + machinePoolScope.Info("Machine Pool is running", "id", *machinePoolScope.GetID()) + machinePoolScope.SetReady() + case infrav1.VMStateUpdating: + machinePoolScope.Info("Machine Pool is updating", "id", *machinePoolScope.GetID()) + default: + machinePoolScope.SetFailureReason(capierrors.UpdateMachineError) + machinePoolScope.SetFailureMessage(errors.Errorf("Azure VMSS state %q is unexpected", vmss.State)) + } + + // Ensure that the tags are correct. + err = r.reconcileTags(machinePoolScope, clusterScope, machinePoolScope.AdditionalTags()) + if err != nil { + return reconcile.Result{}, errors.Errorf("failed to ensure tags: %+v", err) + } + + return reconcile.Result{}, nil +} + +func (r *AzureMachinePoolReconciler) reconcileDelete(machinePoolScope *infrav1expscope.MachinePoolScope, clusterScope *scope.ClusterScope) (_ reconcile.Result, reterr error) { + machinePoolScope.Info("Handling deleted AzureMachinePool") + + if err := newAzureMachinePoolService(machinePoolScope, clusterScope).Delete(); err != nil { + return reconcile.Result{}, errors.Wrapf(err, "error deleting AzureCluster %s/%s", clusterScope.Namespace(), clusterScope.Name()) + } + + defer func() { + if reterr == nil { + // VM is deleted so remove the finalizer. + controllerutil.RemoveFinalizer(machinePoolScope.AzureMachinePool, capiv1exp.MachinePoolFinalizer) + } + }() + + return reconcile.Result{}, nil +} + +// machinePoolToInfrastructureMapFunc returns a handler.ToRequestsFunc that watches for +// MachinePool events and returns reconciliation requests for an infrastructure provider object. +func machinePoolToInfrastructureMapFunc(gvk schema.GroupVersionKind, log logr.Logger) handler.ToRequestsFunc { + return func(o handler.MapObject) []reconcile.Request { + m, ok := o.Object.(*capiv1exp.MachinePool) + if !ok { + log.Info("attempt to map incorrect type", "type", fmt.Sprintf("%T", o.Object)) + return nil + } + + gk := gvk.GroupKind() + // Return early if the GroupVersionKind doesn't match what we expect. + infraGK := m.Spec.Template.Spec.InfrastructureRef.GroupVersionKind().GroupKind() + if gk != infraGK { + log.Info("gk does not match", "gk", gk, "infraGK", infraGK) + return nil + } + + return []reconcile.Request{ + { + NamespacedName: client.ObjectKey{ + Namespace: m.Namespace, + Name: m.Spec.Template.Spec.InfrastructureRef.Name, + }, + }, + } + } +} + +// azureClusterToAzureMachinePoolsFunc is a handler.ToRequestsFunc to be used to enqueue +// requests for reconciliation of AzureMachinePools. +func azureClusterToAzureMachinePoolsFunc(kClient client.Client, log logr.Logger) handler.ToRequestsFunc { + return func(o handler.MapObject) []reconcile.Request { + c, ok := o.Object.(*infrav1.AzureCluster) + if !ok { + log.Error(errors.Errorf("expected a AzureCluster but got a %T", o.Object), "failed to get AzureCluster") + return nil + } + logWithValues := log.WithValues("AzureCluster", c.Name, "Namespace", c.Namespace) + + cluster, err := util.GetOwnerCluster(context.TODO(), kClient, c.ObjectMeta) + switch { + case apierrors.IsNotFound(err) || cluster == nil: + logWithValues.Info("owning cluster not found") + return nil + case err != nil: + logWithValues.Error(err, "failed to get owning cluster") + return nil + } + + labels := map[string]string{capiv1.ClusterLabelName: cluster.Name} + mpl := &capiv1exp.MachinePoolList{} + if err := kClient.List(context.TODO(), mpl, client.InNamespace(c.Namespace), client.MatchingLabels(labels)); err != nil { + logWithValues.Error(err, "failed to list Machines") + return nil + } + + var result []reconcile.Request + for _, m := range mpl.Items { + if m.Spec.Template.Spec.InfrastructureRef.Name == "" { + continue + } + result = append(result, reconcile.Request{ + NamespacedName: client.ObjectKey{ + Namespace: m.Namespace, + Name: m.Spec.Template.Spec.InfrastructureRef.Name, + }, + }) + } + + return result + } +} + +// Ensure that the tags of the machine are correct +func (r *AzureMachinePoolReconciler) reconcileTags(machinePoolScope *infrav1expscope.MachinePoolScope, clusterScope *scope.ClusterScope, additionalTags map[string]string) error { + machinePoolScope.Info("Updating tags on AzureMachinePool") + annotation, err := r.AnnotationJSON(machinePoolScope.AzureMachinePool, capzcntr.TagsLastAppliedAnnotation) + if err != nil { + return err + } + changed, created, deleted, newAnnotation := capzcntr.TagsChanged(annotation, additionalTags) + if changed { + vmssSpec := &scalesets.Spec{ + Name: machinePoolScope.Name(), + } + svc := scalesets.NewService(clusterScope, machinePoolScope) + vm, err := svc.Client.Get(clusterScope.Context, clusterScope.ResourceGroup(), machinePoolScope.Name()) + if err != nil { + return errors.Wrapf(err, "failed to query AzureMachine VMSS") + } + tags := vm.Tags + for k, v := range created { + tags[k] = to.StringPtr(v) + } + + for k := range deleted { + delete(tags, k) + } + + vm.Tags = tags + + err = svc.Client.CreateOrUpdate( + clusterScope.Context, + clusterScope.ResourceGroup(), + vmssSpec.Name, + vm) + if err != nil { + return errors.Wrapf(err, "cannot update VMSS tags") + } + + // We also need to update the annotation if anything changed. + err = r.updateAnnotationJSON(machinePoolScope.AzureMachinePool, capzcntr.TagsLastAppliedAnnotation, newAnnotation) + if err != nil { + return err + } + } + + return nil +} + +// updateAnnotationJSON updates the `annotation` on an `annotationReaderWriter` with +// `content`. `content` in this case should be a `map[string]interface{}` +// suitable for turning into JSON. This `content` map will be marshalled into a +// JSON string before being set as the given `annotation`. +func (r *AzureMachinePoolReconciler) updateAnnotationJSON(rw annotationReaderWriter, annotation string, content map[string]interface{}) error { + b, err := json.Marshal(content) + if err != nil { + return err + } + + r.updateAnnotation(rw, annotation, string(b)) + return nil +} + +// updateMachinePoolAnnotation updates the `annotation` on an `annotationReaderWriter` with +// `content`. +func (r *AzureMachinePoolReconciler) updateAnnotation(rw annotationReaderWriter, annotation string, content string) { + // Get the annotations + annotations := rw.GetAnnotations() + + // Set our annotation to the given content. + annotations[annotation] = content + + // Update the machine pool object with these annotations + rw.SetAnnotations(annotations) +} + +// Returns a map[string]interface from a JSON annotation. +// This method gets the given `annotation` from an `annotationReaderWriter` and unmarshalls it +// from a JSON string into a `map[string]interface{}`. +func (r *AzureMachinePoolReconciler) AnnotationJSON(rw annotationReaderWriter, annotation string) (map[string]interface{}, error) { + out := map[string]interface{}{} + + jsonAnnotation := r.Annotation(rw, annotation) + if len(jsonAnnotation) == 0 { + return out, nil + } + + err := json.Unmarshal([]byte(jsonAnnotation), &out) + if err != nil { + return out, err + } + + return out, nil +} + +// Fetches the specific machine annotation. +func (r *AzureMachinePoolReconciler) Annotation(rw annotationReaderWriter, annotation string) string { + return rw.GetAnnotations()[annotation] +} + +// newAzureMachinePoolService populates all the services based on input scope +func newAzureMachinePoolService(machinePoolScope *infrav1expscope.MachinePoolScope, clusterScope *scope.ClusterScope) *azureMachinePoolService { + return &azureMachinePoolService{ + machinePoolScope: machinePoolScope, + clusterScope: clusterScope, + virtualMachinesScaleSetSvc: scalesets.NewService(clusterScope, machinePoolScope), + } +} + +func (s *azureMachinePoolService) CreateOrUpdate() (*infrav1exp.VMSS, error) { + ampSpec := s.machinePoolScope.AzureMachinePool.Spec + var replicas int64 + if s.machinePoolScope.MachinePool.Spec.Replicas != nil { + replicas = int64(to.Int32(s.machinePoolScope.MachinePool.Spec.Replicas)) + } + + decoded, err := base64.StdEncoding.DecodeString(ampSpec.Template.SSHPublicKey) + if err != nil { + return nil, errors.Wrapf(err, "failed to base64 decode ssh public key") + } + + image, err := getVMImage(s.machinePoolScope) + if err != nil { + return nil, errors.Wrap(err, "failed to get VMSS image") + } + + bootstrapData, err := s.machinePoolScope.GetBootstrapData() + if err != nil { + return nil, errors.Wrap(err, "failed to retrieve bootstrap data") + } + + vmssSpec := &scalesets.Spec{ + Name: s.machinePoolScope.Name(), + Sku: ampSpec.Template.VMSize, + Capacity: replicas, + SSHKeyData: string(decoded), + Image: image, + OSDisk: ampSpec.Template.OSDisk, + CustomData: bootstrapData, + } + + err = s.virtualMachinesScaleSetSvc.Reconcile(context.TODO(), vmssSpec) + if err != nil { + return nil, errors.Wrapf(err, "failed to create or get machine") + } + + newVMSS, err := s.virtualMachinesScaleSetSvc.Get(s.clusterScope.Context, vmssSpec) + if err != nil { + return nil, errors.Wrap(err, "failed to get VMSS") + } + + vmss, ok := newVMSS.(*infrav1exp.VMSS) + if !ok { + return nil, errors.New("returned incorrect VMSS interface") + } + if vmss.State == "" { + return nil, errors.Errorf("VMSS %s is nil provisioning state, reconcile", s.machinePoolScope.Name()) + } + + if vmss.State == infrav1.VMStateFailed { + // If VM failed provisioning, delete it so it can be recreated + err = s.virtualMachinesScaleSetSvc.Delete(s.clusterScope.Context, vmssSpec) + if err != nil { + return nil, errors.Wrapf(err, "failed to delete machine pool") + } + return nil, errors.Errorf("VMSS %s is deleted, retry creating in next reconcile", s.machinePoolScope.Name()) + } else if vmss.State != infrav1.VMStateSucceeded { + return nil, errors.Errorf("VMSS %s is still in provisioningState %s, reconcile", s.machinePoolScope.Name(), vmss.State) + } + + return vmss, nil +} + +// Delete reconciles all the services in pre determined order +func (s *azureMachinePoolService) Delete() error { + vmssSpec := &scalesets.Spec{ + Name: s.machinePoolScope.Name(), + } + + err := s.virtualMachinesScaleSetSvc.Delete(s.clusterScope.Context, vmssSpec) + if err != nil { + return errors.Wrapf(err, "failed to delete machine pool") + } + + return nil +} + +// Get fetches a VMSS if it exists +func (s *azureMachinePoolService) Get() (*infrav1exp.VMSS, error) { + vmssSpec := &scalesets.Spec{ + Name: s.machinePoolScope.Name(), + } + + vmss, err := s.virtualMachinesScaleSetSvc.Get(s.clusterScope.Context, vmssSpec) + if err != nil && !azure.ResourceNotFound(err) { + return nil, errors.Wrapf(err, "failed to fetch machine pool") + } + + if err != nil && azure.ResourceNotFound(err) { + return nil, nil + } + + return vmss.(*infrav1exp.VMSS), err +} + +// getOwnerMachinePool returns the MachinePool object owning the current resource. +func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*capiv1exp.MachinePool, error) { + for _, ref := range obj.OwnerReferences { + if ref.Kind == "MachinePool" && ref.APIVersion == capiv1exp.GroupVersion.String() { + + return getMachineByName(ctx, c, obj.Namespace, ref.Name) + } + } + return nil, nil +} + +// getMachinePoolByName finds and return a Machine object using the specified params. +func getMachineByName(ctx context.Context, c client.Client, namespace, name string) (*capiv1exp.MachinePool, error) { + m := &capiv1exp.MachinePool{} + key := client.ObjectKey{Name: name, Namespace: namespace} + if err := c.Get(ctx, key, m); err != nil { + return nil, err + } + return m, nil +} + +// Pick image from the machine configuration, or use a default one. +func getVMImage(scope *infrav1expscope.MachinePoolScope) (*infrav1.Image, error) { + // Use custom Marketplace image, Image ID or a Shared Image Gallery image if provided + if scope.AzureMachinePool.Spec.Template.Image != nil { + return scope.AzureMachinePool.Spec.Template.Image, nil + } + scope.Info("No image specified for machine pool, using default", "machinePool", scope.AzureMachinePool.GetName()) + return azure.GetDefaultUbuntuImage(to.String(scope.MachinePool.Spec.Template.Spec.Version)) +} diff --git a/exp/controllers/azuremachinepool_controller_test.go b/exp/controllers/azuremachinepool_controller_test.go new file mode 100644 index 00000000000..8a8cc776971 --- /dev/null +++ b/exp/controllers/azuremachinepool_controller_test.go @@ -0,0 +1,53 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha3" +) + +var _ = Describe("AzureMachinePoolReconciler", func() { + BeforeEach(func() {}) + AfterEach(func() {}) + + Context("Reconcile an AzureMachinePool", func() { + It("should not error with minimal set up", func() { + reconciler := &AzureMachinePoolReconciler{ + Client: k8sClient, + Log: log.Log, + } + By("Calling reconcile") + instance := &infrav1exp.AzureMachinePool{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} + result, err := reconciler.Reconcile(ctrl.Request{ + NamespacedName: client.ObjectKey{ + Namespace: instance.Namespace, + Name: instance.Name, + }, + }) + Expect(err).To(BeNil()) + Expect(result.RequeueAfter).To(BeZero()) + }) + }) +}) diff --git a/exp/controllers/azuremachinepool_controller_unit_test.go b/exp/controllers/azuremachinepool_controller_unit_test.go new file mode 100644 index 00000000000..8d0303bc7c4 --- /dev/null +++ b/exp/controllers/azuremachinepool_controller_unit_test.go @@ -0,0 +1,308 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "testing" + + "github.com/golang/mock/gomock" + "github.com/onsi/gomega" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha3" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha3" + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha3" + "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers" + "sigs.k8s.io/cluster-api-provider-azure/internal/test/mock_log" +) + +func Test_machinePoolToInfrastructureMapFunc(t *testing.T) { + cases := []struct { + Name string + Setup func(logMock *mock_log.MockLogger) + MapObjectFactory func(*gomega.GomegaWithT) handler.MapObject + Expect func(*gomega.GomegaWithT, []reconcile.Request) + }{ + { + Name: "MachinePoolToAzureMachinePool", + MapObjectFactory: func(g *gomega.GomegaWithT) handler.MapObject { + return handler.MapObject{ + Object: newMachinePoolWithInfrastructureRef("azureCluster", "machinePool"), + } + }, + Expect: func(g *gomega.GomegaWithT, reqs []reconcile.Request) { + g.Expect(reqs).To(gomega.HaveLen(1)) + g.Expect(reqs[0]).To(gomega.Equal(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "azuremachinePool", + Namespace: "default", + }, + })) + }, + }, + { + Name: "MachinePoolWithoutMatchingInfraRef", + MapObjectFactory: func(g *gomega.GomegaWithT) handler.MapObject { + return handler.MapObject{ + Object: newMachinePool("azureCluster", "machinePool"), + } + }, + Setup: func(logMock *mock_log.MockLogger) { + ampGK := infrav1exp.GroupVersion.WithKind("AzureMachinePool").GroupKind() + logMock.EXPECT().Info("gk does not match", "gk", ampGK, "infraGK", gomock.Any()) + }, + Expect: func(g *gomega.GomegaWithT, reqs []reconcile.Request) { + g.Expect(reqs).To(gomega.HaveLen(0)) + }, + }, + { + Name: "NotAMachinePool", + MapObjectFactory: func(g *gomega.GomegaWithT) handler.MapObject { + return handler.MapObject{ + Object: newCluster("azureCluster"), + } + }, + Setup: func(logMock *mock_log.MockLogger) { + logMock.EXPECT().Info("attempt to map incorrect type", "type", "*v1alpha3.Cluster") + }, + Expect: func(g *gomega.GomegaWithT, reqs []reconcile.Request) { + g.Expect(reqs).To(gomega.HaveLen(0)) + }, + }, + } + + for _, c := range cases { + c := c + t.Run(c.Name, func(t *testing.T) { + t.Parallel() + g := gomega.NewGomegaWithT(t) + log := mock_log.NewMockLogger(gomock.NewController(t)) + if c.Setup != nil { + c.Setup(log) + } + f := machinePoolToInfrastructureMapFunc(infrav1exp.GroupVersion.WithKind("AzureMachinePool"), log) + reqs := f(c.MapObjectFactory(g)) + c.Expect(g, reqs) + }) + } +} + +func Test_azureClusterToAzureMachinePoolsFunc(t *testing.T) { + cases := []struct { + Name string + Setup func(*gomega.GomegaWithT, *testing.T) (*mock_log.MockLogger, client.Client) + MapObjectFactory func(*gomega.GomegaWithT) handler.MapObject + Expect func(*gomega.GomegaWithT, []reconcile.Request) + }{ + { + Name: "NotAnAzureCluster", + MapObjectFactory: func(g *gomega.GomegaWithT) handler.MapObject { + return handler.MapObject{ + Object: newMachinePool("foo", "bar"), + } + }, + Setup: func(g *gomega.GomegaWithT, t *testing.T) (*mock_log.MockLogger, client.Client) { + log := mock_log.NewMockLogger(gomock.NewController(t)) + kClient := fake.NewFakeClientWithScheme(newScheme(g)) + log.EXPECT().Error(matchers.ErrStrEq("expected a AzureCluster but got a *v1alpha3.MachinePool"), "failed to get AzureCluster") + return log, kClient + }, + Expect: func(g *gomega.GomegaWithT, reqs []reconcile.Request) { + g.Expect(reqs).To(gomega.HaveLen(0)) + }, + }, + { + Name: "AzureClusterDoesNotExist", + MapObjectFactory: func(g *gomega.GomegaWithT) handler.MapObject { + return handler.MapObject{ + Object: newAzureCluster("foo"), + } + }, + Setup: func(g *gomega.GomegaWithT, t *testing.T) (*mock_log.MockLogger, client.Client) { + log := mock_log.NewMockLogger(gomock.NewController(t)) + logWithValues := mock_log.NewMockLogger(gomock.NewController(t)) + kClient := fake.NewFakeClientWithScheme(newScheme(g)) + log.EXPECT().WithValues("AzureCluster", "azurefoo", "Namespace", "default").Return(logWithValues) + logWithValues.EXPECT().Info("owning cluster not found") + return log, kClient + }, + Expect: func(g *gomega.GomegaWithT, reqs []reconcile.Request) { + g.Expect(reqs).To(gomega.HaveLen(0)) + }, + }, + { + Name: "AzureClusterExistsButDoesNotHaveMachinePools", + MapObjectFactory: func(g *gomega.GomegaWithT) handler.MapObject { + return handler.MapObject{ + Object: newAzureCluster("foo"), + } + }, + Setup: func(g *gomega.GomegaWithT, t *testing.T) (*mock_log.MockLogger, client.Client) { + log := mock_log.NewMockLogger(gomock.NewController(t)) + logWithValues := mock_log.NewMockLogger(gomock.NewController(t)) + const clusterName = "foo" + initObj := []runtime.Object{ + newCluster(clusterName), + newAzureCluster(clusterName), + } + kClient := fake.NewFakeClientWithScheme(newScheme(g), initObj...) + log.EXPECT().WithValues("AzureCluster", "azurefoo", "Namespace", "default").Return(logWithValues) + return log, kClient + }, + Expect: func(g *gomega.GomegaWithT, reqs []reconcile.Request) { + g.Expect(reqs).To(gomega.HaveLen(0)) + }, + }, + { + Name: "AzureClusterExistsWithMachinePoolsButNoInfraRefs", + MapObjectFactory: func(g *gomega.GomegaWithT) handler.MapObject { + return handler.MapObject{ + Object: newAzureCluster("foo"), + } + }, + Setup: func(g *gomega.GomegaWithT, t *testing.T) (*mock_log.MockLogger, client.Client) { + log := mock_log.NewMockLogger(gomock.NewController(t)) + logWithValues := mock_log.NewMockLogger(gomock.NewController(t)) + const clusterName = "foo" + initObj := []runtime.Object{ + newCluster(clusterName), + newAzureCluster(clusterName), + newMachinePool(clusterName, "pool1"), + newMachinePool(clusterName, "pool2"), + } + kClient := fake.NewFakeClientWithScheme(newScheme(g), initObj...) + log.EXPECT().WithValues("AzureCluster", "azurefoo", "Namespace", "default").Return(logWithValues) + return log, kClient + }, + Expect: func(g *gomega.GomegaWithT, reqs []reconcile.Request) { + g.Expect(reqs).To(gomega.HaveLen(0)) + }, + }, + { + Name: "AzureClusterExistsWithMachinePoolsWithOneInfraRefs", + MapObjectFactory: func(g *gomega.GomegaWithT) handler.MapObject { + return handler.MapObject{ + Object: newAzureCluster("foo"), + } + }, + Setup: func(g *gomega.GomegaWithT, t *testing.T) (*mock_log.MockLogger, client.Client) { + log := mock_log.NewMockLogger(gomock.NewController(t)) + logWithValues := mock_log.NewMockLogger(gomock.NewController(t)) + const clusterName = "foo" + initObj := []runtime.Object{ + newCluster(clusterName), + newAzureCluster(clusterName), + newMachinePool(clusterName, "pool1"), + newMachinePoolWithInfrastructureRef(clusterName, "pool2"), + } + kClient := fake.NewFakeClientWithScheme(newScheme(g), initObj...) + log.EXPECT().WithValues("AzureCluster", "azurefoo", "Namespace", "default").Return(logWithValues) + return log, kClient + }, + Expect: func(g *gomega.GomegaWithT, reqs []reconcile.Request) { + g.Expect(reqs).To(gomega.HaveLen(1)) + g.Expect(reqs[0]).To(gomega.Equal(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "azurepool2", + Namespace: "default", + }, + })) + }, + }, + } + + for _, c := range cases { + c := c + t.Run(c.Name, func(t *testing.T) { + t.Parallel() + g := gomega.NewGomegaWithT(t) + log, kClient := c.Setup(g, t) + f := azureClusterToAzureMachinePoolsFunc(kClient, log) + reqs := f(c.MapObjectFactory(g)) + c.Expect(g, reqs) + }) + } +} + +func newScheme(g *gomega.GomegaWithT) *runtime.Scheme { + scheme := runtime.NewScheme() + for _, f := range []func(*runtime.Scheme) error{ + clusterv1.AddToScheme, + clusterv1exp.AddToScheme, + infrav1.AddToScheme, + infrav1exp.AddToScheme, + } { + g.Expect(f(scheme)).ToNot(gomega.HaveOccurred()) + } + return scheme +} + +func newMachinePool(clusterName, poolName string) *clusterv1exp.MachinePool { + return &clusterv1exp.MachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + clusterv1.ClusterLabelName: clusterName, + }, + Name: poolName, + Namespace: "default", + }, + } +} + +func newMachinePoolWithInfrastructureRef(clusterName, poolName string) *clusterv1exp.MachinePool { + m := newMachinePool(clusterName, poolName) + m.Spec.Template.Spec.InfrastructureRef = v1.ObjectReference{ + Kind: "AzureMachinePool", + Namespace: m.Namespace, + Name: "azure" + poolName, + APIVersion: infrav1exp.GroupVersion.String(), + } + return m +} + +func newAzureCluster(clusterName string) *infrav1.AzureCluster { + return &infrav1.AzureCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "azure" + clusterName, + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Cluster", + Name: clusterName, + }, + }, + }, + } +} + +func newCluster(name string) *clusterv1.Cluster { + return &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: "default", + }, + } +} diff --git a/exp/controllers/suite_test.go b/exp/controllers/suite_test.go new file mode 100644 index 00000000000..30641a6f7b6 --- /dev/null +++ b/exp/controllers/suite_test.go @@ -0,0 +1,92 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "path/filepath" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/klog" + "k8s.io/klog/klogr" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha3" + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha3" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment + +func init() { + klog.InitFlags(nil) + klog.SetOutput(GinkgoWriter) + logf.SetLogger(klogr.New()) +} + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecsWithDefaultAndCustomReporters(t, + "Controller Suite", + []Reporter{printer.NewlineReporter{}}) +} + +var _ = BeforeSuite(func(done Done) { + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{ + filepath.Join("..", "..", "config", "crd", "bases"), + }, + } + + var err error + cfg, err = testEnv.Start() + Expect(err).ToNot(HaveOccurred()) + Expect(cfg).ToNot(BeNil()) + + Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) + Expect(infrav1.AddToScheme(scheme.Scheme)).To(Succeed()) + Expect(infrav1exp.AddToScheme(scheme.Scheme)).To(Succeed()) + + // +kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).ToNot(HaveOccurred()) + Expect(k8sClient).ToNot(BeNil()) + + close(done) +}, 60) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).ToNot(HaveOccurred()) +}) diff --git a/exp/hack/boilerplate.go.txt b/exp/hack/boilerplate.go.txt new file mode 100644 index 00000000000..0926592d389 --- /dev/null +++ b/exp/hack/boilerplate.go.txt @@ -0,0 +1,15 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ diff --git a/go.mod b/go.mod index 357fd76ddf2..9c58f3394e6 100644 --- a/go.mod +++ b/go.mod @@ -11,6 +11,7 @@ require ( github.com/blang/semver v3.5.1+incompatible github.com/go-logr/logr v0.1.0 github.com/golang/mock v1.4.3 + github.com/google/go-cmp v0.4.0 github.com/google/gofuzz v1.1.0 github.com/onsi/ginkgo v1.12.0 github.com/onsi/gomega v1.9.0 diff --git a/internal/test/matchers/matchers.go b/internal/test/matchers/matchers.go new file mode 100644 index 00000000000..a6cd946f4dd --- /dev/null +++ b/internal/test/matchers/matchers.go @@ -0,0 +1,76 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package matchers + +import ( + "fmt" + + "github.com/golang/mock/gomock" + "github.com/google/go-cmp/cmp" +) + +type ( + cmpMatcher struct { + x interface{} + diff string + } + + errStrEq struct { + expected string + actual string + } +) + +// DiffEq will verify cmp.Diff(expected, actual) == "" using github.com/google/go-cmp/cmp +func DiffEq(x interface{}) gomock.Matcher { + return &cmpMatcher{ + x: x, + } +} + +func (c *cmpMatcher) Matches(x interface{}) bool { + c.diff = cmp.Diff(x, c.x) + return c.diff == "" +} + +func (c *cmpMatcher) String() string { + want := fmt.Sprintf("is equal to %v", c.x) + if c.diff != "" { + want = fmt.Sprintf("%s, but difference is %s", want, c.diff) + } + return want +} + +// ErrStrEq will verify the string matches error.Error() +func ErrStrEq(expected string) gomock.Matcher { + return &errStrEq{ + expected: expected, + } +} + +func (e *errStrEq) Matches(y interface{}) bool { + err, ok := y.(error) + if !ok { + return false + } + e.actual = err.Error() + return e.expected == e.actual +} + +func (e *errStrEq) String() string { + return fmt.Sprintf("error.Error() %q, but got %q", e.expected, e.actual) +} diff --git a/internal/test/mock_log/doc.go b/internal/test/mock_log/doc.go new file mode 100644 index 00000000000..f4332233679 --- /dev/null +++ b/internal/test/mock_log/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Run go generate to regenerate this mock. +//go:generate ../../../hack/tools/bin/mockgen -destination log_mock.go -package mock_log github.com/go-logr/logr Logger +//go:generate /usr/bin/env bash -c "cat ../../../hack/boilerplate/boilerplate.generatego.txt log_mock.go > _log_mock.go && mv _log_mock.go log_mock.go" +package mock_log //nolint diff --git a/internal/test/mock_log/log_mock.go b/internal/test/mock_log/log_mock.go new file mode 100644 index 00000000000..b86bf4e1add --- /dev/null +++ b/internal/test/mock_log/log_mock.go @@ -0,0 +1,144 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/go-logr/logr (interfaces: Logger) + +// Package mock_log is a generated GoMock package. +package mock_log + +import ( + logr "github.com/go-logr/logr" + gomock "github.com/golang/mock/gomock" + reflect "reflect" +) + +// MockLogger is a mock of Logger interface +type MockLogger struct { + ctrl *gomock.Controller + recorder *MockLoggerMockRecorder +} + +// MockLoggerMockRecorder is the mock recorder for MockLogger +type MockLoggerMockRecorder struct { + mock *MockLogger +} + +// NewMockLogger creates a new mock instance +func NewMockLogger(ctrl *gomock.Controller) *MockLogger { + mock := &MockLogger{ctrl: ctrl} + mock.recorder = &MockLoggerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockLogger) EXPECT() *MockLoggerMockRecorder { + return m.recorder +} + +// Enabled mocks base method +func (m *MockLogger) Enabled() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Enabled") + ret0, _ := ret[0].(bool) + return ret0 +} + +// Enabled indicates an expected call of Enabled +func (mr *MockLoggerMockRecorder) Enabled() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Enabled", reflect.TypeOf((*MockLogger)(nil).Enabled)) +} + +// Error mocks base method +func (m *MockLogger) Error(arg0 error, arg1 string, arg2 ...interface{}) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + m.ctrl.Call(m, "Error", varargs...) +} + +// Error indicates an expected call of Error +func (mr *MockLoggerMockRecorder) Error(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockLogger)(nil).Error), varargs...) +} + +// Info mocks base method +func (m *MockLogger) Info(arg0 string, arg1 ...interface{}) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0} + for _, a := range arg1 { + varargs = append(varargs, a) + } + m.ctrl.Call(m, "Info", varargs...) +} + +// Info indicates an expected call of Info +func (mr *MockLoggerMockRecorder) Info(arg0 interface{}, arg1 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0}, arg1...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockLogger)(nil).Info), varargs...) +} + +// V mocks base method +func (m *MockLogger) V(arg0 int) logr.InfoLogger { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "V", arg0) + ret0, _ := ret[0].(logr.InfoLogger) + return ret0 +} + +// V indicates an expected call of V +func (mr *MockLoggerMockRecorder) V(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "V", reflect.TypeOf((*MockLogger)(nil).V), arg0) +} + +// WithName mocks base method +func (m *MockLogger) WithName(arg0 string) logr.Logger { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WithName", arg0) + ret0, _ := ret[0].(logr.Logger) + return ret0 +} + +// WithName indicates an expected call of WithName +func (mr *MockLoggerMockRecorder) WithName(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithName", reflect.TypeOf((*MockLogger)(nil).WithName), arg0) +} + +// WithValues mocks base method +func (m *MockLogger) WithValues(arg0 ...interface{}) logr.Logger { + m.ctrl.T.Helper() + varargs := []interface{}{} + for _, a := range arg0 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "WithValues", varargs...) + ret0, _ := ret[0].(logr.Logger) + return ret0 +} + +// WithValues indicates an expected call of WithValues +func (mr *MockLoggerMockRecorder) WithValues(arg0 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithValues", reflect.TypeOf((*MockLogger)(nil).WithValues), arg0...) +} diff --git a/main.go b/main.go index e9af9fe8929..3a6fb0aa5c6 100644 --- a/main.go +++ b/main.go @@ -18,6 +18,7 @@ package main import ( "flag" + "fmt" "net/http" _ "net/http/pprof" //nolint "os" @@ -31,11 +32,15 @@ import ( cgrecord "k8s.io/client-go/tools/record" "k8s.io/klog" "k8s.io/klog/klogr" + infrav1alpha2 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha2" - infrastructurev1alpha3 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha3" infrav1alpha3 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha3" "sigs.k8s.io/cluster-api-provider-azure/controllers" + infrav1alpha3exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha3" + infrav1controllersexp "sigs.k8s.io/cluster-api-provider-azure/exp/controllers" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha3" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util/record" ctrl "sigs.k8s.io/controller-runtime" @@ -54,21 +59,23 @@ func init() { _ = clientgoscheme.AddToScheme(scheme) _ = infrav1alpha2.AddToScheme(scheme) _ = infrav1alpha3.AddToScheme(scheme) + _ = infrav1alpha3exp.AddToScheme(scheme) _ = clusterv1.AddToScheme(scheme) - _ = infrastructurev1alpha3.AddToScheme(scheme) + _ = clusterv1exp.AddToScheme(scheme) // +kubebuilder:scaffold:scheme } var ( - metricsAddr string - enableLeaderElection bool - watchNamespace string - profilerAddress string - azureClusterConcurrency int - azureMachineConcurrency int - syncPeriod time.Duration - healthAddr string - webhookPort int + metricsAddr string + enableLeaderElection bool + watchNamespace string + profilerAddress string + azureClusterConcurrency int + azureMachineConcurrency int + azureMachinePoolConcurrency int + syncPeriod time.Duration + healthAddr string + webhookPort int ) func InitFlags(fs *pflag.FlagSet) { @@ -112,6 +119,11 @@ func InitFlags(fs *pflag.FlagSet) { "Number of AzureMachines to process simultaneously", ) + fs.IntVar(&azureMachinePoolConcurrency, + "azuremachinepool-concurrency", + 10, + "Number of AzureMachinePools to process simultaneously") + fs.DurationVar(&syncPeriod, "sync-period", 10*time.Minute, @@ -193,19 +205,39 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "AzureCluster") os.Exit(1) } + + // just use CAPI MachinePool feature flag rather than create a new one + setupLog.V(1).Info(fmt.Sprintf("%+v\n", feature.Gates)) + if feature.Gates.Enabled(feature.MachinePool) { + if err = (&infrav1controllersexp.AzureMachinePoolReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("AzureMachinePool"), + Recorder: mgr.GetEventRecorderFor("azurecluster-reconciler"), + }).SetupWithManager(mgr, controller.Options{MaxConcurrentReconciles: azureMachinePoolConcurrency}); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "AzureMachinePool") + os.Exit(1) + } + } } else { - if err = (&infrastructurev1alpha3.AzureCluster{}).SetupWebhookWithManager(mgr); err != nil { + if err = (&infrav1alpha3.AzureCluster{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "AzureCluster") os.Exit(1) } - if err = (&infrastructurev1alpha3.AzureMachine{}).SetupWebhookWithManager(mgr); err != nil { + if err = (&infrav1alpha3.AzureMachine{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "AzureMachine") os.Exit(1) } - if err = (&infrastructurev1alpha3.AzureMachineTemplate{}).SetupWebhookWithManager(mgr); err != nil { + if err = (&infrav1alpha3.AzureMachineTemplate{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "AzureMachineTemplate") os.Exit(1) } + // just use CAPI MachinePool feature flag rather than create a new one + if feature.Gates.Enabled(feature.MachinePool) { + if err = (&infrav1alpha3exp.AzureMachinePool{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "AzureMachinePool") + os.Exit(1) + } + } } // +kubebuilder:scaffold:builder diff --git a/templates/cluster-template-machinepool.yaml b/templates/cluster-template-machinepool.yaml new file mode 100644 index 00000000000..26c33a213cb --- /dev/null +++ b/templates/cluster-template-machinepool.yaml @@ -0,0 +1,193 @@ +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 +kind: KubeadmConfig +metadata: + name: ${CLUSTER_NAME}-mp-0 +spec: + files: + - content: | + { + "cloud": "AzurePublicCloud", + "tenantId": "${AZURE_TENANT_ID}", + "subscriptionId": "${AZURE_SUBSCRIPTION_ID}", + "aadClientId": "${AZURE_CLIENT_ID}", + "aadClientSecret": "${AZURE_CLIENT_SECRET}", + "resourceGroup": "${CLUSTER_NAME}", + "securityGroupName": "${CLUSTER_NAME}-node-nsg", + "location": "${AZURE_LOCATION}", + "vmType": "vmss", + "vnetName": "${CLUSTER_NAME}-vnet", + "vnetResourceGroup": "${CLUSTER_NAME}", + "subnetName": "${CLUSTER_NAME}-node-subnet", + "routeTableName": "${CLUSTER_NAME}-node-routetable", + "loadBalancerSku": "standard", + "maximumLoadBalancerRuleCount": 250, + "useManagedIdentityExtension": false, + "useInstanceMetadata": true + } + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-config: /etc/kubernetes/azure.json + cloud-provider: azure + name: '{{ ds.meta_data["local_hostname"] }}' + useExperimentalRetryJoin: true +--- +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Cluster +metadata: + name: ${CLUSTER_NAME} + namespace: default +spec: + clusterNetwork: + pods: + cidrBlocks: + - 192.168.0.0/16 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + kind: KubeadmControlPlane + name: ${CLUSTER_NAME}-control-plane + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: AzureCluster + name: ${CLUSTER_NAME} +--- +apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 +kind: KubeadmControlPlane +metadata: + name: ${CLUSTER_NAME}-control-plane + namespace: default +spec: + infrastructureTemplate: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: AzureMachineTemplate + name: ${CLUSTER_NAME}-control-plane + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + extraArgs: + cloud-config: /etc/kubernetes/azure.json + cloud-provider: azure + extraVolumes: + - hostPath: /etc/kubernetes/azure.json + mountPath: /etc/kubernetes/azure.json + name: cloud-config + readOnly: true + timeoutForControlPlane: 20m + controllerManager: + extraArgs: + allocate-node-cidrs: "false" + cloud-config: /etc/kubernetes/azure.json + cloud-provider: azure + extraVolumes: + - hostPath: /etc/kubernetes/azure.json + mountPath: /etc/kubernetes/azure.json + name: cloud-config + readOnly: true + files: + - content: | + { + "cloud": "AzurePublicCloud", + "tenantId": "${AZURE_TENANT_ID}", + "subscriptionId": "${AZURE_SUBSCRIPTION_ID}", + "aadClientId": "${AZURE_CLIENT_ID}", + "aadClientSecret": "${AZURE_CLIENT_SECRET}", + "resourceGroup": "${AZURE_RESOURCE_GROUP}", + "securityGroupName": "${CLUSTER_NAME}-node-nsg", + "location": "${AZURE_LOCATION}", + "vmType": "standard", + "vnetName": "${CLUSTER_NAME}-vnet", + "vnetResourceGroup": "${CLUSTER_NAME}", + "subnetName": "${CLUSTER_NAME}-node-subnet", + "routeTableName": "${CLUSTER_NAME}-node-routetable", + "userAssignedID": "${CLUSTER_NAME}", + "loadBalancerSku": "standard", + "maximumLoadBalancerRuleCount": 250, + "useManagedIdentityExtension": false, + "useInstanceMetadata": true + } + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-config: /etc/kubernetes/azure.json + cloud-provider: azure + name: '{{ ds.meta_data["local_hostname"] }}' + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-config: /etc/kubernetes/azure.json + cloud-provider: azure + name: '{{ ds.meta_data["local_hostname"] }}' + useExperimentalRetryJoin: true + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + version: ${KUBERNETES_VERSION} +--- +apiVersion: exp.cluster.x-k8s.io/v1alpha3 +kind: MachinePool +metadata: + name: ${CLUSTER_NAME}-mp-0 +spec: + clusterName: ${CLUSTER_NAME} + replicas: ${WORKER_MACHINE_COUNT} + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: KubeadmConfig + name: ${CLUSTER_NAME}-mp-0 + clusterName: ${CLUSTER_NAME} + infrastructureRef: + apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 + kind: AzureMachinePool + name: ${CLUSTER_NAME}-mp-0 + version: ${KUBERNETES_VERSION} +--- +apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 +kind: AzureMachinePool +metadata: + name: ${CLUSTER_NAME}-mp-0 +spec: + location: ${AZURE_LOCATION} + template: + osDisk: + diskSizeGB: 30 + managedDisk: + storageAccountType: Premium_LRS + osType: Linux + sshPublicKey: ${AZURE_SSH_PUBLIC_KEY} + vmSize: ${AZURE_NODE_MACHINE_TYPE} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: AzureCluster +metadata: + name: ${CLUSTER_NAME} + namespace: default +spec: + location: ${AZURE_LOCATION} + networkSpec: + vnet: + name: ${AZURE_VNET_NAME} + resourceGroup: ${AZURE_RESOURCE_GROUP} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-control-plane + namespace: default +spec: + template: + spec: + location: ${AZURE_LOCATION} + osDisk: + diskSizeGB: 128 + managedDisk: + storageAccountType: Premium_LRS + osType: Linux + sshPublicKey: ${AZURE_SSH_PUBLIC_KEY} + vmSize: ${AZURE_CONTROL_PLANE_MACHINE_TYPE} diff --git a/templates/flavors/machinepool/kustomization.yaml b/templates/flavors/machinepool/kustomization.yaml new file mode 100644 index 00000000000..63d889cfb0e --- /dev/null +++ b/templates/flavors/machinepool/kustomization.yaml @@ -0,0 +1,3 @@ +resources: + - ../base + - machine-pool-deployment.yaml \ No newline at end of file diff --git a/templates/flavors/machinepool/machine-pool-deployment.yaml b/templates/flavors/machinepool/machine-pool-deployment.yaml new file mode 100644 index 00000000000..6087291c6b8 --- /dev/null +++ b/templates/flavors/machinepool/machine-pool-deployment.yaml @@ -0,0 +1,73 @@ +--- +apiVersion: exp.cluster.x-k8s.io/v1alpha3 +kind: MachinePool +metadata: + name: "${CLUSTER_NAME}-mp-0" +spec: + clusterName: "${CLUSTER_NAME}" + replicas: ${WORKER_MACHINE_COUNT} + template: + spec: + clusterName: "${CLUSTER_NAME}" + version: "${KUBERNETES_VERSION}" + bootstrap: + configRef: + name: "${CLUSTER_NAME}-mp-0" + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: KubeadmConfig + infrastructureRef: + name: "${CLUSTER_NAME}-mp-0" + apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 + kind: AzureMachinePool +--- +apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 +kind: AzureMachinePool +metadata: + name: "${CLUSTER_NAME}-mp-0" +spec: + location: ${AZURE_LOCATION} + template: + vmSize: ${AZURE_NODE_MACHINE_TYPE} + osDisk: + osType: "Linux" + diskSizeGB: 30 + managedDisk: + storageAccountType: "Premium_LRS" + sshPublicKey: ${AZURE_SSH_PUBLIC_KEY} +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 +kind: KubeadmConfig +metadata: + name: "${CLUSTER_NAME}-mp-0" +spec: + useExperimentalRetryJoin: true + joinConfiguration: + nodeRegistration: + name: '{{ ds.meta_data["local_hostname"] }}' + kubeletExtraArgs: + cloud-provider: azure + cloud-config: /etc/kubernetes/azure.json + files: + - path: /etc/kubernetes/azure.json + owner: "root:root" + permissions: "0644" + content: | + { + "cloud": "AzurePublicCloud", + "tenantId": "${AZURE_TENANT_ID}", + "subscriptionId": "${AZURE_SUBSCRIPTION_ID}", + "aadClientId": "${AZURE_CLIENT_ID}", + "aadClientSecret": "${AZURE_CLIENT_SECRET}", + "resourceGroup": "${CLUSTER_NAME}", + "securityGroupName": "${CLUSTER_NAME}-node-nsg", + "location": "${AZURE_LOCATION}", + "vmType": "vmss", + "vnetName": "${CLUSTER_NAME}-vnet", + "vnetResourceGroup": "${CLUSTER_NAME}", + "subnetName": "${CLUSTER_NAME}-node-subnet", + "routeTableName": "${CLUSTER_NAME}-node-routetable", + "loadBalancerSku": "standard", + "maximumLoadBalancerRuleCount": 250, + "useManagedIdentityExtension": false, + "useInstanceMetadata": true + }