diff --git a/.gitattributes b/.gitattributes index 980d8c0774d..f8edb51ef45 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,2 +1,5 @@ # Specify generated cluster templates as generated files **/cluster-template-*.yaml linguist-generated + +*.sh text eol=lf +*.yaml text eol=lf \ No newline at end of file diff --git a/Makefile b/Makefile index b943699b48b..b9bdbb92142 100644 --- a/Makefile +++ b/Makefile @@ -116,6 +116,10 @@ YQ_VER := v4.14.2 YQ_BIN := yq YQ := $(TOOLS_BIN_DIR)/$(YQ_BIN)-$(YQ_VER) +KIND_VER := v0.14.0 +KIND_BIN := kind +KIND := $(TOOLS_BIN_DIR)/$(KIND_BIN)-$(KIND_VER) + KUBE_APISERVER=$(TOOLS_BIN_DIR)/kube-apiserver ETCD=$(TOOLS_BIN_DIR)/etcd @@ -242,10 +246,10 @@ verify-tiltfile: ## Verify Tiltfile format. ##@ Development: .PHONY: install-tools # populate hack/tools/bin -install-tools: $(ENVSUBST) $(KUSTOMIZE) $(KUBECTL) $(HELM) $(GINKGO) +install-tools: $(ENVSUBST) $(KUSTOMIZE) $(KUBECTL) $(HELM) $(GINKGO) $(KIND) .PHONY: create-management-cluster -create-management-cluster: $(KUSTOMIZE) $(ENVSUBST) ## Create a management cluster. +create-management-cluster: $(KUSTOMIZE) $(ENVSUBST) $(KUBECTL) $(KIND) ## Create a management cluster. # Create kind management cluster. $(MAKE) kind-create @@ -255,61 +259,64 @@ create-management-cluster: $(KUSTOMIZE) $(ENVSUBST) ## Create a management clust # Create secret for AzureClusterIdentity ./hack/create-identity-secret.sh + # Create customized cloud provider configs + ./hack/create-custom-cloud-provider-config.sh + # Deploy CAPI - curl --retry $(CURL_RETRIES) -sSL https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.1.4/cluster-api-components.yaml | $(ENVSUBST) | kubectl apply -f - + curl --retry $(CURL_RETRIES) -sSL https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.1.4/cluster-api-components.yaml | $(ENVSUBST) | $(KUBECTL) apply -f - # Deploy CAPZ - kind load docker-image $(CONTROLLER_IMG)-$(ARCH):$(TAG) --name=capz - $(KUSTOMIZE) build config/default | $(ENVSUBST) | kubectl apply -f - + $(KIND) load docker-image $(CONTROLLER_IMG)-$(ARCH):$(TAG) --name=capz + $(KUSTOMIZE) build config/default | $(ENVSUBST) | $(KUBECTL) apply -f - # Wait for CAPI deployments - kubectl wait --for=condition=Available --timeout=5m -n capi-system deployment -l cluster.x-k8s.io/provider=cluster-api - kubectl wait --for=condition=Available --timeout=5m -n capi-kubeadm-bootstrap-system deployment -l cluster.x-k8s.io/provider=bootstrap-kubeadm - kubectl wait --for=condition=Available --timeout=5m -n capi-kubeadm-control-plane-system deployment -l cluster.x-k8s.io/provider=control-plane-kubeadm + $(KUBECTL) wait --for=condition=Available --timeout=5m -n capi-system deployment -l cluster.x-k8s.io/provider=cluster-api + $(KUBECTL) wait --for=condition=Available --timeout=5m -n capi-kubeadm-bootstrap-system deployment -l cluster.x-k8s.io/provider=bootstrap-kubeadm + $(KUBECTL) wait --for=condition=Available --timeout=5m -n capi-kubeadm-control-plane-system deployment -l cluster.x-k8s.io/provider=control-plane-kubeadm # apply CNI ClusterResourceSets source ./scripts/ci-configmap.sh - kubectl apply -f templates/addons/calico-resource-set.yaml + $(KUBECTL) apply -f templates/addons/calico-resource-set.yaml # Wait for CAPZ deployments - kubectl wait --for=condition=Available --timeout=5m -n capz-system deployment -l cluster.x-k8s.io/provider=infrastructure-azure + $(KUBECTL) wait --for=condition=Available --timeout=5m -n capz-system deployment -l cluster.x-k8s.io/provider=infrastructure-azure # required sleep for when creating management and workload cluster simultaneously sleep 10 - @echo 'Set kubectl context to the kind management cluster by running "kubectl config set-context kind-capz"' + @echo 'Set kubectl context to the kind management cluster by running "$(KUBECTL) config set-context kind-capz"' .PHONY: create-workload-cluster -create-workload-cluster: $(ENVSUBST) ## Create a workload cluster. +create-workload-cluster: $(ENVSUBST) $(KUBECTL) ## Create a workload cluster. # Create workload Cluster. @if [ -f "$(TEMPLATES_DIR)/$(CLUSTER_TEMPLATE)" ]; then \ - $(ENVSUBST) < "$(TEMPLATES_DIR)/$(CLUSTER_TEMPLATE)" | kubectl apply -f -; \ + $(ENVSUBST) < "$(TEMPLATES_DIR)/$(CLUSTER_TEMPLATE)" | $(KUBECTL) apply -f -; \ elif [ -f "$(CLUSTER_TEMPLATE)" ]; then \ - $(ENVSUBST) < "$(CLUSTER_TEMPLATE)" | kubectl apply -f -; \ + $(ENVSUBST) < "$(CLUSTER_TEMPLATE)" | $(KUBECTL) apply -f -; \ else \ - curl --retry "$(CURL_RETRIES)" "$(CLUSTER_TEMPLATE)" | "$(ENVSUBST)" | kubectl apply -f -; \ + curl --retry "$(CURL_RETRIES)" "$(CLUSTER_TEMPLATE)" | "$(ENVSUBST)" | $(KUBECTL) apply -f -; \ fi # Wait for the kubeconfig to become available. - timeout --foreground 300 bash -c "while ! kubectl get secrets | grep $(CLUSTER_NAME)-kubeconfig; do sleep 1; done" + timeout --foreground 300 bash -c "while ! $(KUBECTL) get secrets | grep $(CLUSTER_NAME)-kubeconfig; do sleep 1; done" # Get kubeconfig and store it locally. - kubectl get secrets $(CLUSTER_NAME)-kubeconfig -o json | jq -r .data.value | base64 --decode > ./kubeconfig - timeout --foreground 600 bash -c "while ! kubectl --kubeconfig=./kubeconfig get nodes | grep control-plane; do sleep 1; done" + $(KUBECTL) get secrets $(CLUSTER_NAME)-kubeconfig -o json | jq -r .data.value | base64 --decode > ./kubeconfig + timeout --foreground 600 bash -c "while ! $(KUBECTL) --kubeconfig=./kubeconfig get nodes | grep control-plane; do sleep 1; done" - @echo 'run "kubectl --kubeconfig=./kubeconfig ..." to work with the new target cluster' + @echo 'run "$(KUBECTL) --kubeconfig=./kubeconfig ..." to work with the new target cluster' .PHONY: create-aks-cluster -create-aks-cluster: $(KUSTOMIZE) $(ENVSUBST) ## Create a aks cluster. +create-aks-cluster: $(KUSTOMIZE) $(ENVSUBST) $(KUBECTL) ## Create a aks cluster. # Create managed Cluster. - $(ENVSUBST) < $(TEMPLATES_DIR)/$(MANAGED_CLUSTER_TEMPLATE) | kubectl apply -f - + $(ENVSUBST) < $(TEMPLATES_DIR)/$(MANAGED_CLUSTER_TEMPLATE) | $(KUBECTL) apply -f - # Wait for the kubeconfig to become available. - timeout --foreground 300 bash -c "while ! kubectl get secrets | grep $(CLUSTER_NAME)-kubeconfig; do sleep 1; done" + timeout --foreground 300 bash -c "while ! $(KUBECTL) get secrets | grep $(CLUSTER_NAME)-kubeconfig; do sleep 1; done" # Get kubeconfig and store it locally. - kubectl get secrets $(CLUSTER_NAME)-kubeconfig -o json | jq -r .data.value | base64 --decode > ./kubeconfig - timeout --foreground 600 bash -c "while ! kubectl --kubeconfig=./kubeconfig get nodes | grep control-plane; do sleep 1; done" + $(KUBECTL) get secrets $(CLUSTER_NAME)-kubeconfig -o json | jq -r .data.value | base64 --decode > ./kubeconfig + timeout --foreground 600 bash -c "while ! $(KUBECTL) --kubeconfig=./kubeconfig get nodes | grep control-plane; do sleep 1; done" - @echo 'run "kubectl --kubeconfig=./kubeconfig ..." to work with the new target cluster' + @echo 'run "$(KUBECTL) --kubeconfig=./kubeconfig ..." to work with the new target cluster' .PHONY: create-cluster @@ -321,9 +328,9 @@ create-cluster: ## Create a workload development Kubernetes cluster on Azure in create-workload-cluster .PHONY: delete-workload-cluster -delete-workload-cluster: ## Deletes the example workload Kubernetes cluster. +delete-workload-cluster: $(KUBECTL) ## Deletes the example workload Kubernetes cluster. @echo 'Your Azure resources will now be deleted, this can take up to 20 minutes' - kubectl delete cluster $(CLUSTER_NAME) + $(KUBECTL) delete cluster $(CLUSTER_NAME) ## -------------------------------------- ## Docker @@ -682,12 +689,12 @@ tilt-up: install-tools kind-create ## Start tilt and build kind cluster if neede .PHONY: delete-cluster delete-cluster: delete-workload-cluster ## Deletes the example kind cluster "capz". - kind delete cluster --name=capz + $(KIND) delete cluster --name=capz .PHONY: kind-reset kind-reset: ## Destroys the "capz" and "capz-e2e" kind clusters. - kind delete cluster --name=capz || true - kind delete cluster --name=capz-e2e || true + $(KIND) delete cluster --name=capz || true + $(KIND) delete cluster --name=capz-e2e || true ## -------------------------------------- ## Tooling Binaries @@ -709,6 +716,7 @@ ginkgo: $(GINKGO) ## Build a local copy of ginkgo. kubectl: $(KUBECTL) ## Build a local copy of kubectl. helm: $(HELM) ## Build a local copy of helm. yq: $(YQ) ## Build a local copy of yq. +kind: $(KIND) ## Build a local copy of kind. $(CONVERSION_VERIFIER): go.mod cd $(TOOLS_DIR); go build -tags=tools -o $@ sigs.k8s.io/cluster-api/hack/tools/conversion-verifier @@ -759,6 +767,9 @@ $(HELM): ## Put helm into tools folder. ln -sf $(HELM) $(TOOLS_BIN_DIR)/$(HELM_BIN) rm -f $(TOOLS_BIN_DIR)/get_helm.sh +$(KIND): ## Build kind into tools folder. + GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) sigs.k8s.io/kind $(KIND_BIN) $(KIND_VER) + .PHONY: $(ENVSUBST_BIN) $(ENVSUBST_BIN): $(ENVSUBST) @@ -776,3 +787,6 @@ $(YQ): ## Build yq from tools folder. .PHONY: $(YQ_BIN) $(YQ_BIN): $(YQ) ## Building yq from the tools folder. + +.PHONY: $(KIND_BIN) +$(KIND_BIN): $(KIND) diff --git a/Tiltfile b/Tiltfile index 9e1baecf7bf..0dca46219c6 100644 --- a/Tiltfile +++ b/Tiltfile @@ -3,6 +3,7 @@ envsubst_cmd = "./hack/tools/bin/envsubst" kubectl_cmd = "./hack/tools/bin/kubectl" helm_cmd = "./hack/tools/bin/helm" +kind_cmd = "./hack/tools/bin/kind" tools_bin = "./hack/tools/bin" #Add tools to path @@ -145,7 +146,7 @@ def observability(): ], )) - internal_kubeconfig = str(local("kind get kubeconfig --name ${KIND_CLUSTER_NAME:-capz} --internal")) + internal_kubeconfig = str(local(kind_cmd + " get kubeconfig --name ${KIND_CLUSTER_NAME:-capz} --internal")) k8s_yaml(helm( "./hack/observability/cluster-api-visualizer/chart", name = "visualize-cluster", diff --git a/api/v1alpha3/azuremachine_conversion.go b/api/v1alpha3/azuremachine_conversion.go index db4636ca6eb..c156bde1e9b 100644 --- a/api/v1alpha3/azuremachine_conversion.go +++ b/api/v1alpha3/azuremachine_conversion.go @@ -59,6 +59,10 @@ func (src *AzureMachine) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.Image.ComputeGallery = restored.Spec.Image.ComputeGallery } + if restored.Spec.AdditionalCapabilities != nil { + dst.Spec.AdditionalCapabilities = restored.Spec.AdditionalCapabilities + } + dst.Spec.SubnetName = restored.Spec.SubnetName dst.Status.LongRunningOperationStates = restored.Status.LongRunningOperationStates diff --git a/api/v1alpha3/azuremachinetemplate_conversion.go b/api/v1alpha3/azuremachinetemplate_conversion.go index 43ab04f3d2e..642dc94390e 100644 --- a/api/v1alpha3/azuremachinetemplate_conversion.go +++ b/api/v1alpha3/azuremachinetemplate_conversion.go @@ -57,6 +57,10 @@ func (src *AzureMachineTemplate) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.Template.Spec.Image.ComputeGallery = restored.Spec.Template.Spec.Image.ComputeGallery } + if restored.Spec.Template.Spec.AdditionalCapabilities != nil { + dst.Spec.Template.Spec.AdditionalCapabilities = restored.Spec.Template.Spec.AdditionalCapabilities + } + dst.Spec.Template.Spec.SubnetName = restored.Spec.Template.Spec.SubnetName dst.Spec.Template.ObjectMeta = restored.Spec.Template.ObjectMeta diff --git a/api/v1alpha3/zz_generated.conversion.go b/api/v1alpha3/zz_generated.conversion.go index 8389016dfee..4823a6e903c 100644 --- a/api/v1alpha3/zz_generated.conversion.go +++ b/api/v1alpha3/zz_generated.conversion.go @@ -899,6 +899,7 @@ func autoConvert_v1beta1_AzureMachineSpec_To_v1alpha3_AzureMachineSpec(in *v1bet } out.SSHPublicKey = in.SSHPublicKey out.AdditionalTags = *(*Tags)(unsafe.Pointer(&in.AdditionalTags)) + // WARNING: in.AdditionalCapabilities requires manual conversion: does not exist in peer-type out.AllocatePublicIP = in.AllocatePublicIP out.EnableIPForwarding = in.EnableIPForwarding out.AcceleratedNetworking = (*bool)(unsafe.Pointer(in.AcceleratedNetworking)) diff --git a/api/v1alpha4/azuremachine_conversion.go b/api/v1alpha4/azuremachine_conversion.go index 9e5b1f2afed..2473c3702fd 100644 --- a/api/v1alpha4/azuremachine_conversion.go +++ b/api/v1alpha4/azuremachine_conversion.go @@ -36,7 +36,6 @@ func (src *AzureMachine) ConvertTo(dstRaw conversion.Hub) error { return err } - if restored.Spec.NetworkInterfaces != nil { dst.Spec.NetworkInterfaces = restored.Spec.NetworkInterfaces } @@ -44,6 +43,11 @@ func (src *AzureMachine) ConvertTo(dstRaw conversion.Hub) error { if restored.Spec.Image != nil && restored.Spec.Image.ComputeGallery != nil { dst.Spec.Image.ComputeGallery = restored.Spec.Image.ComputeGallery } + + if restored.Spec.AdditionalCapabilities != nil { + dst.Spec.AdditionalCapabilities = restored.Spec.AdditionalCapabilities + } + return nil } diff --git a/api/v1alpha4/azuremachinetemplate_conversion.go b/api/v1alpha4/azuremachinetemplate_conversion.go index d93c20c87c2..bd84eaa1443 100644 --- a/api/v1alpha4/azuremachinetemplate_conversion.go +++ b/api/v1alpha4/azuremachinetemplate_conversion.go @@ -45,6 +45,10 @@ func (src *AzureMachineTemplate) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.Template.Spec.Image.ComputeGallery = restored.Spec.Template.Spec.Image.ComputeGallery } + if restored.Spec.Template.Spec.AdditionalCapabilities != nil { + dst.Spec.Template.Spec.AdditionalCapabilities = restored.Spec.Template.Spec.AdditionalCapabilities + } + dst.Spec.Template.ObjectMeta = restored.Spec.Template.ObjectMeta return nil diff --git a/api/v1alpha4/zz_generated.conversion.go b/api/v1alpha4/zz_generated.conversion.go index 15cde92c04a..eb4584f5de0 100644 --- a/api/v1alpha4/zz_generated.conversion.go +++ b/api/v1alpha4/zz_generated.conversion.go @@ -1046,6 +1046,7 @@ func autoConvert_v1beta1_AzureMachineSpec_To_v1alpha4_AzureMachineSpec(in *v1bet out.DataDisks = *(*[]DataDisk)(unsafe.Pointer(&in.DataDisks)) out.SSHPublicKey = in.SSHPublicKey out.AdditionalTags = *(*Tags)(unsafe.Pointer(&in.AdditionalTags)) + // WARNING: in.AdditionalCapabilities requires manual conversion: does not exist in peer-type out.AllocatePublicIP = in.AllocatePublicIP out.EnableIPForwarding = in.EnableIPForwarding out.AcceleratedNetworking = (*bool)(unsafe.Pointer(in.AcceleratedNetworking)) diff --git a/api/v1beta1/azureclusteridentity_types.go b/api/v1beta1/azureclusteridentity_types.go index afb9651ba31..6d07dbdc3f5 100644 --- a/api/v1beta1/azureclusteridentity_types.go +++ b/api/v1beta1/azureclusteridentity_types.go @@ -44,10 +44,10 @@ type AllowedNamespaces struct { // AzureClusterIdentitySpec defines the parameters that are used to create an AzureIdentity. type AzureClusterIdentitySpec struct { // Type is the type of Azure Identity used. - // ServicePrincipal, ServicePrincipalCertificate, or ManualServicePrincipal. + // ServicePrincipal, ServicePrincipalCertificate, UserAssignedMSI or ManualServicePrincipal. Type IdentityType `json:"type"` // ResourceID is the Azure resource ID for the User Assigned MSI resource. - // Not currently supported. + // Only applicable when type is UserAssignedMSI. // +optional ResourceID string `json:"resourceID,omitempty"` // ClientID is the service principal client ID. diff --git a/api/v1beta1/azuremachine_default.go b/api/v1beta1/azuremachine_default.go index 3905f6d65b0..c19d893c1e6 100644 --- a/api/v1beta1/azuremachine_default.go +++ b/api/v1beta1/azuremachine_default.go @@ -19,6 +19,7 @@ package v1beta1 import ( "encoding/base64" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-11-01/compute" "golang.org/x/crypto/ssh" "k8s.io/apimachinery/pkg/util/uuid" utilSSH "sigs.k8s.io/cluster-api-provider-azure/util/ssh" @@ -67,7 +68,12 @@ func (s *AzureMachineSpec) SetDataDisksDefaults() { } } if disk.CachingType == "" { - s.DataDisks[i].CachingType = "ReadWrite" + if s.DataDisks[i].ManagedDisk != nil && + s.DataDisks[i].ManagedDisk.StorageAccountType == string(compute.StorageAccountTypesUltraSSDLRS) { + s.DataDisks[i].CachingType = string(compute.CachingTypesNone) + } else { + s.DataDisks[i].CachingType = string(compute.CachingTypesReadWrite) + } } } } diff --git a/api/v1beta1/azuremachine_default_test.go b/api/v1beta1/azuremachine_default_test.go index 540593f6ee0..dce02b5a724 100644 --- a/api/v1beta1/azuremachine_default_test.go +++ b/api/v1beta1/azuremachine_default_test.go @@ -215,6 +215,14 @@ func TestAzureMachineSpec_SetDataDisksDefaults(t *testing.T) { DiskSizeGB: 30, Lun: to.Int32Ptr(2), }, + { + NameSuffix: "testdisk3", + DiskSizeGB: 30, + ManagedDisk: &ManagedDiskParameters{ + StorageAccountType: "UltraSSD_LRS", + }, + Lun: to.Int32Ptr(3), + }, }, output: []DataDisk{ { @@ -229,6 +237,15 @@ func TestAzureMachineSpec_SetDataDisksDefaults(t *testing.T) { Lun: to.Int32Ptr(2), CachingType: "ReadWrite", }, + { + NameSuffix: "testdisk3", + DiskSizeGB: 30, + Lun: to.Int32Ptr(3), + ManagedDisk: &ManagedDiskParameters{ + StorageAccountType: "UltraSSD_LRS", + }, + CachingType: "None", + }, }, }, } diff --git a/api/v1beta1/azuremachine_types.go b/api/v1beta1/azuremachine_types.go index 39fe2f619f0..135012a741e 100644 --- a/api/v1beta1/azuremachine_types.go +++ b/api/v1beta1/azuremachine_types.go @@ -86,6 +86,10 @@ type AzureMachineSpec struct { // +optional AdditionalTags Tags `json:"additionalTags,omitempty"` + // AdditionalCapabilities specifies additional capabilities enabled or disabled on the virtual machine. + // +optional + AdditionalCapabilities *AdditionalCapabilities `json:"additionalCapabilities,omitempty"` + // AllocatePublicIP allows the ability to create dynamic public ips for machines where this value is true. // +optional AllocatePublicIP bool `json:"allocatePublicIP,omitempty"` @@ -187,6 +191,15 @@ type AzureMachineStatus struct { LongRunningOperationStates Futures `json:"longRunningOperationStates,omitempty"` } +// AdditionalCapabilities enables or disables a capability on the virtual machine. +type AdditionalCapabilities struct { + // UltraSSDEnabled enables or disables Azure UltraSSD capability for the virtual machine. + // Defaults to true if Ultra SSD data disks are specified, + // otherwise it doesn't set the capability on the VM. + // +optional + UltraSSDEnabled *bool `json:"ultraSSDEnabled,omitempty"` +} + // +kubebuilder:object:root=true // +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" // +kubebuilder:printcolumn:name="Reason",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].reason" diff --git a/api/v1beta1/azuremachine_validation.go b/api/v1beta1/azuremachine_validation.go index 20782b3ceb9..c3d81646d50 100644 --- a/api/v1beta1/azuremachine_validation.go +++ b/api/v1beta1/azuremachine_validation.go @@ -154,7 +154,7 @@ func ValidateDataDisks(dataDisks []DataDisk, fieldPath *field.Path) field.ErrorL } // validate cachingType - allErrs = append(allErrs, validateCachingType(disk.CachingType, fieldPath)...) + allErrs = append(allErrs, validateCachingType(disk.CachingType, fieldPath, disk.ManagedDisk)...) } return allErrs } @@ -173,7 +173,7 @@ func ValidateOSDisk(osDisk OSDisk, fieldPath *field.Path) field.ErrorList { allErrs = append(allErrs, field.Required(fieldPath.Child("OSType"), "the OS type cannot be empty")) } - allErrs = append(allErrs, validateCachingType(osDisk.CachingType, fieldPath)...) + allErrs = append(allErrs, validateCachingType(osDisk.CachingType, fieldPath, osDisk.ManagedDisk)...) if osDisk.ManagedDisk != nil { if errs := validateManagedDisk(osDisk.ManagedDisk, fieldPath.Child("managedDisk"), true); len(errs) > 0 { @@ -289,10 +289,16 @@ func validateStorageAccountType(storageAccountType string, fieldPath *field.Path return allErrs } -func validateCachingType(cachingType string, fieldPath *field.Path) field.ErrorList { +func validateCachingType(cachingType string, fieldPath *field.Path, managedDisk *ManagedDiskParameters) field.ErrorList { allErrs := field.ErrorList{} cachingTypeChildPath := fieldPath.Child("CachingType") + if managedDisk != nil && managedDisk.StorageAccountType == string(compute.StorageAccountTypesUltraSSDLRS) { + if cachingType != string(compute.CachingTypesNone) { + allErrs = append(allErrs, field.Invalid(cachingTypeChildPath, cachingType, fmt.Sprintf("cachingType '%s' is not supported when storageAccountType is '%s'. Allowed values are: '%s'", cachingType, compute.StorageAccountTypesUltraSSDLRS, compute.CachingTypesNone))) + } + } + for _, possibleCachingType := range compute.PossibleCachingTypesValues() { if string(possibleCachingType) == cachingType { return allErrs diff --git a/api/v1beta1/azuremachine_validation_test.go b/api/v1beta1/azuremachine_validation_test.go index 31044364e30..9a743814e53 100644 --- a/api/v1beta1/azuremachine_validation_test.go +++ b/api/v1beta1/azuremachine_validation_test.go @@ -385,6 +385,51 @@ func TestAzureMachine_ValidateDataDisks(t *testing.T) { }, wantErr: true, }, + { + name: "valid combination of managed disk storage account type UltraSSD_LRS and cachingType None", + disks: []DataDisk{ + { + NameSuffix: "my_disk_1", + DiskSizeGB: 64, + ManagedDisk: &ManagedDiskParameters{ + StorageAccountType: string(compute.StorageAccountTypesUltraSSDLRS), + }, + Lun: to.Int32Ptr(0), + CachingType: string(compute.CachingTypesNone), + }, + }, + wantErr: false, + }, + { + name: "invalid combination of managed disk storage account type UltraSSD_LRS and cachingType ReadWrite", + disks: []DataDisk{ + { + NameSuffix: "my_disk_1", + DiskSizeGB: 64, + ManagedDisk: &ManagedDiskParameters{ + StorageAccountType: string(compute.StorageAccountTypesUltraSSDLRS), + }, + Lun: to.Int32Ptr(0), + CachingType: string(compute.CachingTypesReadWrite), + }, + }, + wantErr: true, + }, + { + name: "invalid combination of managed disk storage account type UltraSSD_LRS and cachingType ReadOnly", + disks: []DataDisk{ + { + NameSuffix: "my_disk_1", + DiskSizeGB: 64, + ManagedDisk: &ManagedDiskParameters{ + StorageAccountType: string(compute.StorageAccountTypesUltraSSDLRS), + }, + Lun: to.Int32Ptr(0), + CachingType: string(compute.CachingTypesReadOnly), + }, + }, + wantErr: true, + }, } for _, test := range testcases { diff --git a/api/v1beta1/types.go b/api/v1beta1/types.go index 40c925b579f..0e6f0d011b4 100644 --- a/api/v1beta1/types.go +++ b/api/v1beta1/types.go @@ -458,11 +458,11 @@ const ( ) // IdentityType represents different types of identities. -// +kubebuilder:validation:Enum=ServicePrincipal;ManualServicePrincipal;ServicePrincipalCertificate +// +kubebuilder:validation:Enum=ServicePrincipal;UserAssignedMSI;ManualServicePrincipal;ServicePrincipalCertificate type IdentityType string const ( - // UserAssignedMSI represents a user-assigned identity. + // UserAssignedMSI represents a user-assigned managed identity. UserAssignedMSI IdentityType = "UserAssignedMSI" // ServicePrincipal represents a service principal using a client password as secret. diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index 7ad1a57c279..8bc3dc9794d 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -29,6 +29,26 @@ import ( "sigs.k8s.io/cluster-api/errors" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdditionalCapabilities) DeepCopyInto(out *AdditionalCapabilities) { + *out = *in + if in.UltraSSDEnabled != nil { + in, out := &in.UltraSSDEnabled, &out.UltraSSDEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalCapabilities. +func (in *AdditionalCapabilities) DeepCopy() *AdditionalCapabilities { + if in == nil { + return nil + } + out := new(AdditionalCapabilities) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AddressRecord) DeepCopyInto(out *AddressRecord) { *out = *in @@ -598,6 +618,11 @@ func (in *AzureMachineSpec) DeepCopyInto(out *AzureMachineSpec) { (*out)[key] = val } } + if in.AdditionalCapabilities != nil { + in, out := &in.AdditionalCapabilities, &out.AdditionalCapabilities + *out = new(AdditionalCapabilities) + (*in).DeepCopyInto(*out) + } if in.AcceleratedNetworking != nil { in, out := &in.AcceleratedNetworking, &out.AcceleratedNetworking *out = new(bool) diff --git a/azure/converters/managedagentpool.go b/azure/converters/managedagentpool.go index 370ef823cfc..ab5ee6b5e2e 100644 --- a/azure/converters/managedagentpool.go +++ b/azure/converters/managedagentpool.go @@ -27,7 +27,7 @@ func AgentPoolToManagedClusterAgentPoolProfile(pool azure.AgentPoolSpec) contain return containerservice.ManagedClusterAgentPoolProfile{ Name: &pool.Name, VMSize: &pool.SKU, - OsType: containerservice.OSTypeLinux, + OsType: containerservice.OSType(to.String(pool.OSType)), OsDiskSizeGB: &pool.OSDiskSizeGB, Count: &pool.Replicas, Type: containerservice.AgentPoolTypeVirtualMachineScaleSets, @@ -51,7 +51,7 @@ func AgentPoolToContainerServiceAgentPool(pool azure.AgentPoolSpec) containerser return containerservice.AgentPool{ ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{ VMSize: &pool.SKU, - OsType: containerservice.OSTypeLinux, + OsType: containerservice.OSType(to.String(pool.OSType)), OsDiskSizeGB: &pool.OSDiskSizeGB, Count: &pool.Replicas, Type: containerservice.AgentPoolTypeVirtualMachineScaleSets, diff --git a/azure/converters/managedagentpool_test.go b/azure/converters/managedagentpool_test.go index eca1234efc2..3fc42a9650a 100644 --- a/azure/converters/managedagentpool_test.go +++ b/azure/converters/managedagentpool_test.go @@ -37,6 +37,7 @@ func Test_AgentPoolToManagedClusterAgentPoolProfile(t *testing.T) { SKU: "Standard_D2s_v3", OSDiskSizeGB: 100, Replicas: 2, + OSType: to.StringPtr(azure.LinuxOS), Version: to.StringPtr("1.22.6"), VnetSubnetID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg-123/providers/Microsoft.Network/virtualNetworks/vnet-123/subnets/subnet-123", Mode: "User", @@ -57,7 +58,7 @@ func Test_AgentPoolToManagedClusterAgentPoolProfile(t *testing.T) { g.Expect(result).To(Equal(containerservice.ManagedClusterAgentPoolProfile{ Name: to.StringPtr("agentpool1"), VMSize: to.StringPtr("Standard_D2s_v3"), - OsType: containerservice.OSTypeLinux, + OsType: azure.LinuxOS, OsDiskSizeGB: to.Int32Ptr(100), Count: to.Int32Ptr(2), Type: containerservice.AgentPoolTypeVirtualMachineScaleSets, @@ -102,6 +103,7 @@ func Test_AgentPoolToAgentPoolToContainerServiceAgentPool(t *testing.T) { Name: "agentpool1", SKU: "Standard_D2s_v3", OSDiskSizeGB: 100, + OSType: to.StringPtr(azure.LinuxOS), Replicas: 2, Version: to.StringPtr("1.22.6"), VnetSubnetID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg-123/providers/Microsoft.Network/virtualNetworks/vnet-123/subnets/subnet-123", @@ -122,7 +124,7 @@ func Test_AgentPoolToAgentPoolToContainerServiceAgentPool(t *testing.T) { g.Expect(result).To(Equal(containerservice.AgentPool{ ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{ VMSize: to.StringPtr("Standard_D2s_v3"), - OsType: containerservice.OSTypeLinux, + OsType: azure.LinuxOS, OsDiskSizeGB: to.Int32Ptr(100), Count: to.Int32Ptr(2), Type: containerservice.AgentPoolTypeVirtualMachineScaleSets, diff --git a/azure/scope/cluster.go b/azure/scope/cluster.go index 4f74556af36..acc9b7ba42b 100644 --- a/azure/scope/cluster.go +++ b/azure/scope/cluster.go @@ -269,9 +269,11 @@ func (s *ClusterScope) NatGatewaySpecs() []azure.ResourceSpecGetter { ResourceGroup: s.ResourceGroup(), SubscriptionID: s.SubscriptionID(), Location: s.Location(), + ClusterName: s.ClusterName(), NatGatewayIP: infrav1.PublicIPSpec{ Name: subnet.NatGateway.NatGatewayIP.Name, }, + AdditionalTags: s.AdditionalTags(), }) } } diff --git a/azure/scope/cluster_test.go b/azure/scope/cluster_test.go index 05ed0c85f89..58171ffd912 100644 --- a/azure/scope/cluster_test.go +++ b/azure/scope/cluster_test.go @@ -694,6 +694,11 @@ func TestNatGatewaySpecs(t *testing.T) { { name: "returns specified node NAT gateway if present", clusterScope: ClusterScope{ + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + }, + }, AzureClients: AzureClients{ EnvironmentSettings: auth.EnvironmentSettings{ Values: map[string]string{ @@ -737,15 +742,22 @@ func TestNatGatewaySpecs(t *testing.T) { ResourceGroup: "my-rg", Location: "centralIndia", SubscriptionID: "123", + ClusterName: "my-cluster", NatGatewayIP: infrav1.PublicIPSpec{ Name: "44.78.67.90", }, + AdditionalTags: make(infrav1.Tags), }, }, }, { name: "returns specified node NAT gateway if present and ignores duplicate", clusterScope: ClusterScope{ + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + }, + }, AzureClients: AzureClients{ EnvironmentSettings: auth.EnvironmentSettings{ Values: map[string]string{ @@ -807,15 +819,22 @@ func TestNatGatewaySpecs(t *testing.T) { ResourceGroup: "my-rg", Location: "centralIndia", SubscriptionID: "123", + ClusterName: "my-cluster", NatGatewayIP: infrav1.PublicIPSpec{ Name: "44.78.67.90", }, + AdditionalTags: make(infrav1.Tags), }, }, }, { name: "returns specified node NAT gateway if present and ignores control plane nat gateway", clusterScope: ClusterScope{ + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + }, + }, AzureClients: AzureClients{ EnvironmentSettings: auth.EnvironmentSettings{ Values: map[string]string{ @@ -876,9 +895,11 @@ func TestNatGatewaySpecs(t *testing.T) { ResourceGroup: "my-rg", Location: "centralIndia", SubscriptionID: "123", + ClusterName: "my-cluster", NatGatewayIP: infrav1.PublicIPSpec{ Name: "44.78.67.90", }, + AdditionalTags: make(infrav1.Tags), }, }, }, diff --git a/azure/scope/identity.go b/azure/scope/identity.go index 3a11f6132c2..1693070be31 100644 --- a/azure/scope/identity.go +++ b/azure/scope/identity.go @@ -138,7 +138,7 @@ func (p *ManagedControlPlaneCredentialsProvider) GetAuthorizer(ctx context.Conte func (p *AzureCredentialsProvider) GetAuthorizer(ctx context.Context, resourceManagerEndpoint, activeDirectoryEndpoint string, clusterMeta metav1.ObjectMeta) (autorest.Authorizer, error) { var spt *adal.ServicePrincipalToken switch p.Identity.Spec.Type { - case infrav1.ServicePrincipal, infrav1.ServicePrincipalCertificate: + case infrav1.ServicePrincipal, infrav1.ServicePrincipalCertificate, infrav1.UserAssignedMSI: if err := createAzureIdentityWithBindings(ctx, p.Identity, resourceManagerEndpoint, activeDirectoryEndpoint, clusterMeta, p.Client); err != nil { return nil, err } @@ -185,17 +185,20 @@ func (p *AzureCredentialsProvider) GetClientID() string { // NOTE: this only works if the Identity references a Service Principal Client Secret. // If using another type of credentials, such a Certificate, we return an empty string. func (p *AzureCredentialsProvider) GetClientSecret(ctx context.Context) (string, error) { - secretRef := p.Identity.Spec.ClientSecret - key := types.NamespacedName{ - Namespace: secretRef.Namespace, - Name: secretRef.Name, - } - secret := &corev1.Secret{} + if p.hasClientSecret() { + secretRef := p.Identity.Spec.ClientSecret + key := types.NamespacedName{ + Namespace: secretRef.Namespace, + Name: secretRef.Name, + } + secret := &corev1.Secret{} - if err := p.Client.Get(ctx, key, secret); err != nil { - return "", errors.Wrap(err, "Unable to fetch ClientSecret") + if err := p.Client.Get(ctx, key, secret); err != nil { + return "", errors.Wrap(err, "Unable to fetch ClientSecret") + } + return string(secret.Data[azureSecretKey]), nil } - return string(secret.Data[azureSecretKey]), nil + return "", nil } // GetTenantID returns the Tenant ID associated with the AzureCredentialsProvider's Identity. @@ -203,6 +206,12 @@ func (p *AzureCredentialsProvider) GetTenantID() string { return p.Identity.Spec.TenantID } +// hasClientSecret returns true if the identity has a Service Principal Client Secret. +// This does not include service principals with certificates or managed identities. +func (p *AzureCredentialsProvider) hasClientSecret() bool { + return p.Identity.Spec.Type == infrav1.ServicePrincipal || p.Identity.Spec.Type == infrav1.ManualServicePrincipal +} + func createAzureIdentityWithBindings(ctx context.Context, azureIdentity *infrav1.AzureClusterIdentity, resourceManagerEndpoint, activeDirectoryEndpoint string, clusterMeta metav1.ObjectMeta, kubeClient client.Client) error { azureIdentityType, err := getAzureIdentityType(azureIdentity) diff --git a/azure/scope/identity_test.go b/azure/scope/identity_test.go index ed866caf8dc..0b7ae18d348 100644 --- a/azure/scope/identity_test.go +++ b/azure/scope/identity_test.go @@ -294,3 +294,62 @@ func TestCreateAzureIdentityWithBindings(t *testing.T) { }) } } + +func TestHasClientSecret(t *testing.T) { + tests := []struct { + name string + identity *infrav1.AzureClusterIdentity + want bool + }{ + { + name: "user assigned identity", + identity: &infrav1.AzureClusterIdentity{ + Spec: infrav1.AzureClusterIdentitySpec{ + Type: infrav1.UserAssignedMSI, + ResourceID: "my-resource-id", + }, + }, + want: false, + }, + { + name: "service principal with secret", + identity: &infrav1.AzureClusterIdentity{ + Spec: infrav1.AzureClusterIdentitySpec{ + Type: infrav1.ServicePrincipal, + ClientSecret: corev1.SecretReference{Name: "my-client-secret"}, + }, + }, + want: true, + }, + { + name: "service principal with certificate", + identity: &infrav1.AzureClusterIdentity{ + Spec: infrav1.AzureClusterIdentitySpec{ + Type: infrav1.ServicePrincipalCertificate, + ClientSecret: corev1.SecretReference{Name: "my-client-secret"}, + }, + }, + want: false, + }, + { + name: "manual service principal", + identity: &infrav1.AzureClusterIdentity{ + Spec: infrav1.AzureClusterIdentitySpec{ + Type: infrav1.ManualServicePrincipal, + ClientSecret: corev1.SecretReference{Name: "my-client-secret"}, + }, + }, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + p := &AzureCredentialsProvider{ + Identity: tt.identity, + } + if got := p.hasClientSecret(); got != tt.want { + t.Errorf("AzureCredentialsProvider.hasClientSecret() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/azure/scope/machine.go b/azure/scope/machine.go index f63b1715654..a133797db21 100644 --- a/azure/scope/machine.go +++ b/azure/scope/machine.go @@ -165,6 +165,7 @@ func (m *MachineScope) VMSpec() azure.ResourceSpecGetter { SpotVMOptions: m.AzureMachine.Spec.SpotVMOptions, SecurityProfile: m.AzureMachine.Spec.SecurityProfile, AdditionalTags: m.AdditionalTags(), + AdditionalCapabilities: m.AzureMachine.Spec.AdditionalCapabilities, ProviderID: m.ProviderID(), } if m.cache != nil { diff --git a/azure/scope/managedcontrolplane_test.go b/azure/scope/managedcontrolplane_test.go index ebfebeac241..7f9b2e495ff 100644 --- a/azure/scope/managedcontrolplane_test.go +++ b/azure/scope/managedcontrolplane_test.go @@ -267,3 +267,135 @@ func TestManagedControlPlaneScope_AddonProfiles(t *testing.T) { }) } } + +func TestManagedControlPlaneScope_OSType(t *testing.T) { + scheme := runtime.NewScheme() + _ = capiv1exp.AddToScheme(scheme) + _ = infrav1.AddToScheme(scheme) + + cases := []struct { + Name string + Input ManagedControlPlaneScopeParams + Expected []azure.AgentPoolSpec + Err string + }{ + { + Name: "with Linux and Windows pools", + Input: ManagedControlPlaneScopeParams{ + AzureClients: AzureClients{ + Authorizer: autorest.NullAuthorizer{}, + }, + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + Namespace: "default", + }, + }, + ControlPlane: &infrav1.AzureManagedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + Namespace: "default", + }, + Spec: infrav1.AzureManagedControlPlaneSpec{ + Version: "v1.20.1", + SubscriptionID: "00000000-0000-0000-0000-000000000000", + }, + }, + ManagedMachinePools: []ManagedMachinePool{ + { + MachinePool: getMachinePool("pool0"), + InfraMachinePool: getAzureMachinePool("pool0", infrav1.NodePoolModeSystem), + }, + { + MachinePool: getMachinePool("pool1"), + InfraMachinePool: getLinuxAzureMachinePool("pool1"), + }, + { + MachinePool: getMachinePool("pool2"), + InfraMachinePool: getWindowsAzureMachinePool("pool2"), + }, + }, + }, + Expected: []azure.AgentPoolSpec{ + { + Name: "pool0", + SKU: "Standard_D2s_v3", + Mode: "System", + Replicas: 1, + Cluster: "cluster1", + VnetSubnetID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/", + }, + { + Name: "pool1", + SKU: "Standard_D2s_v3", + Mode: "User", + Replicas: 1, + Cluster: "cluster1", + VnetSubnetID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/", + OSType: to.StringPtr(azure.LinuxOS), + }, + { + Name: "pool2", + SKU: "Standard_D2s_v3", + Mode: "User", + Replicas: 1, + Cluster: "cluster1", + VnetSubnetID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/", + OSType: to.StringPtr(azure.WindowsOS), + }, + }, + }, + { + Name: "system pool required", + Input: ManagedControlPlaneScopeParams{ + AzureClients: AzureClients{ + Authorizer: autorest.NullAuthorizer{}, + }, + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + Namespace: "default", + }, + }, + ControlPlane: &infrav1.AzureManagedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + Namespace: "default", + }, + Spec: infrav1.AzureManagedControlPlaneSpec{ + Version: "v1.20.1", + SubscriptionID: "00000000-0000-0000-0000-000000000000", + }, + }, + ManagedMachinePools: []ManagedMachinePool{ + { + MachinePool: getMachinePool("pool0"), + InfraMachinePool: getLinuxAzureMachinePool("pool0"), + }, + { + MachinePool: getMachinePool("pool1"), + InfraMachinePool: getWindowsAzureMachinePool("pool1"), + }, + }, + }, + Err: "failed to fetch azuremanagedMachine pool with mode:System, require at least 1 system node pool", + }, + } + + for _, c := range cases { + c := c + t.Run(c.Name, func(t *testing.T) { + g := NewWithT(t) + fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(c.Input.ControlPlane).Build() + c.Input.Client = fakeClient + s, err := NewManagedControlPlaneScope(context.TODO(), c.Input) + g.Expect(err).To(Succeed()) + agentPools, err := s.GetAllAgentPoolSpecs() + if err != nil { + g.Expect(err.Error()).To(Equal(c.Err)) + } else { + g.Expect(agentPools).To(Equal(c.Expected)) + } + }) + } +} diff --git a/azure/scope/managedmachinepool.go b/azure/scope/managedmachinepool.go index 801d609ad01..d875b192d09 100644 --- a/azure/scope/managedmachinepool.go +++ b/azure/scope/managedmachinepool.go @@ -146,6 +146,7 @@ func buildAgentPoolSpec(managedControlPlane *infrav1exp.AzureManagedControlPlane SKU: managedMachinePool.Spec.SKU, Replicas: replicas, Version: normalizedVersion, + OSType: managedMachinePool.Spec.OSType, VnetSubnetID: azure.SubnetID( managedControlPlane.Spec.SubscriptionID, managedControlPlane.Spec.ResourceGroupName, diff --git a/azure/scope/managedmachinepool_test.go b/azure/scope/managedmachinepool_test.go index 907ca55a31a..709a7f86031 100644 --- a/azure/scope/managedmachinepool_test.go +++ b/azure/scope/managedmachinepool_test.go @@ -574,6 +574,18 @@ func getMachinePool(name string) *capiv1exp.MachinePool { } } +func getLinuxAzureMachinePool(name string) *infrav1.AzureManagedMachinePool { + managedPool := getAzureMachinePool(name, infrav1.NodePoolModeUser) + managedPool.Spec.OSType = to.StringPtr(azure.LinuxOS) + return managedPool +} + +func getWindowsAzureMachinePool(name string) *infrav1.AzureManagedMachinePool { + managedPool := getAzureMachinePool(name, infrav1.NodePoolModeUser) + managedPool.Spec.OSType = to.StringPtr(azure.WindowsOS) + return managedPool +} + func getMachinePoolWithVersion(name, version string) *capiv1exp.MachinePool { machine := getMachinePool(name) machine.Spec.Template.Spec.Version = to.StringPtr(version) diff --git a/azure/services/natgateways/natgateways_test.go b/azure/services/natgateways/natgateways_test.go index 17817f8ed66..2bf9cf9f4fe 100644 --- a/azure/services/natgateways/natgateways_test.go +++ b/azure/services/natgateways/natgateways_test.go @@ -45,6 +45,7 @@ var ( ResourceGroup: "my-rg", SubscriptionID: "my-sub", Location: "westus", + ClusterName: "my-cluster", NatGatewayIP: infrav1.PublicIPSpec{Name: "pip-node-subnet"}, } natGateway1 = network.NatGateway{ diff --git a/azure/services/natgateways/spec.go b/azure/services/natgateways/spec.go index 4cf0ee384fd..07a489bc678 100644 --- a/azure/services/natgateways/spec.go +++ b/azure/services/natgateways/spec.go @@ -23,6 +23,7 @@ import ( "github.com/pkg/errors" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" + "sigs.k8s.io/cluster-api-provider-azure/azure/converters" ) // NatGatewaySpec defines the specification for a NAT gateway. @@ -32,6 +33,8 @@ type NatGatewaySpec struct { SubscriptionID string Location string NatGatewayIP infrav1.PublicIPSpec + ClusterName string + AdditionalTags infrav1.Tags } // ResourceName returns the name of the NAT gateway. @@ -74,6 +77,12 @@ func (s *NatGatewaySpec) Parameters(existing interface{}) (params interface{}, e }, }, }, + Tags: converters.TagsToMap(infrav1.Build(infrav1.BuildParams{ + ClusterName: s.ClusterName, + Lifecycle: infrav1.ResourceLifecycleOwned, + Name: to.StringPtr(s.Name), + Additional: s.AdditionalTags, + })), } return natGatewayToCreate, nil diff --git a/azure/services/scalesets/scalesets.go b/azure/services/scalesets/scalesets.go index c4e4fd89a08..b599059897c 100644 --- a/azure/services/scalesets/scalesets.go +++ b/azure/services/scalesets/scalesets.go @@ -336,17 +336,30 @@ func (s *Service) validateSpec(ctx context.Context) error { return azure.WithTerminalError(errors.Errorf("encryption at host is not supported for VM type %s", spec.Size)) } - // check the support for ultra disks based on location and vm size - for _, disks := range spec.DataDisks { - location := s.Scope.Location() - zones, err := s.resourceSKUCache.GetZones(ctx, location) - if err != nil { - return azure.WithTerminalError(errors.Wrapf(err, "failed to get the zones for location %s", location)) - } + // Fetch location and zone to check for their support of ultra disks. + location := s.Scope.Location() + zones, err := s.resourceSKUCache.GetZones(ctx, location) + if err != nil { + return azure.WithTerminalError(errors.Wrapf(err, "failed to get the zones for location %s", location)) + } - for _, zone := range zones { - if disks.ManagedDisk != nil && disks.ManagedDisk.StorageAccountType == string(compute.StorageAccountTypesUltraSSDLRS) && !sku.HasLocationCapability(resourceskus.UltraSSDAvailable, location, zone) { - return azure.WithTerminalError(fmt.Errorf("vm size %s does not support ultra disks in location %s. select a different vm size or disable ultra disks", spec.Size, location)) + for _, zone := range zones { + hasLocationCapability := sku.HasLocationCapability(resourceskus.UltraSSDAvailable, location, zone) + err := fmt.Errorf("vm size %s does not support ultra disks in location %s. select a different vm size or disable ultra disks", spec.Size, location) + + // Check support for ultra disks as data disks. + for _, disks := range spec.DataDisks { + if disks.ManagedDisk != nil && + disks.ManagedDisk.StorageAccountType == string(compute.StorageAccountTypesUltraSSDLRS) && + !hasLocationCapability { + return azure.WithTerminalError(err) + } + } + // Check support for ultra disks as persistent volumes. + if spec.AdditionalCapabilities != nil && spec.AdditionalCapabilities.UltraSSDEnabled != nil { + if *spec.AdditionalCapabilities.UltraSSDEnabled && + !hasLocationCapability { + return azure.WithTerminalError(err) } } } @@ -494,15 +507,7 @@ func (s *Service) buildVMSSFromSpec(ctx context.Context, vmssSpec azure.ScaleSet PublicIPAddressConfiguration: &compute.VirtualMachineScaleSetPublicIPAddressConfiguration{}, }, } - if j == 0 { - ipconfig.Primary = to.BoolPtr(true) - if i == 0 { - // only set Load Balancer Backend Address Pool on primary nic/ipconfig - ipconfig.LoadBalancerBackendAddressPools = &backendAddressPools - } - } else { - ipconfig.Primary = to.BoolPtr(false) - } + ipconfig.Primary = to.BoolPtr(false) ipconfig.Subnet = &compute.APIEntityReference{ ID: to.StringPtr(azure.SubnetID(s.Scope.SubscriptionID(), vmssSpec.VNetResourceGroup, vmssSpec.VNetName, n.SubnetName)), } @@ -519,26 +524,20 @@ func (s *Service) buildVMSSFromSpec(ctx context.Context, vmssSpec azure.ScaleSet }, }, } - if j == 0 { - ipconfig.Primary = to.BoolPtr(true) - if i == 0 { - // only set Load Balancer Backend Address Pool on primary nic/ipconfig - ipconfig.LoadBalancerBackendAddressPools = &backendAddressPools - } - } else { - ipconfig.Primary = to.BoolPtr(false) - } + ipconfig.Primary = to.BoolPtr(false) ipconfig.Subnet = &compute.APIEntityReference{ ID: to.StringPtr(azure.SubnetID(s.Scope.SubscriptionID(), vmssSpec.VNetResourceGroup, vmssSpec.VNetName, n.SubnetName)), } ipconfigs = append(ipconfigs, ipconfig) } + if i == 0 { + ipconfigs[0].LoadBalancerBackendAddressPools = &backendAddressPools + nicConfig.Primary = to.BoolPtr(true) + } + ipconfigs[0].Primary = to.BoolPtr(true) nicConfig.VirtualMachineScaleSetNetworkConfigurationProperties.IPConfigurations = &ipconfigs } } - if i == 0 { - nicConfig.VirtualMachineScaleSetNetworkConfigurationProperties.Primary = to.BoolPtr(true) - } nicConfigs = append(nicConfigs, nicConfig) } vmss.VirtualMachineScaleSetProperties.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations = &nicConfigs @@ -563,6 +562,8 @@ func (s *Service) buildVMSSFromSpec(ctx context.Context, vmssSpec azure.ScaleSet } } + // Provisionally detect whether there is any Data Disk defined which uses UltraSSDs. + // If that's the case, enable the UltraSSD capability. for _, dataDisk := range vmssSpec.DataDisks { if dataDisk.ManagedDisk != nil && dataDisk.ManagedDisk.StorageAccountType == string(compute.StorageAccountTypesUltraSSDLRS) { vmss.VirtualMachineScaleSetProperties.AdditionalCapabilities = &compute.AdditionalCapabilities{ @@ -571,6 +572,14 @@ func (s *Service) buildVMSSFromSpec(ctx context.Context, vmssSpec azure.ScaleSet } } + // Set Additional Capabilities if any is present on the spec. + if vmssSpec.AdditionalCapabilities != nil { + // Set UltraSSDEnabled if a specific value is set on the spec for it. + if vmssSpec.AdditionalCapabilities.UltraSSDEnabled != nil { + vmss.AdditionalCapabilities.UltraSSDEnabled = vmssSpec.AdditionalCapabilities.UltraSSDEnabled + } + } + if vmssSpec.TerminateNotificationTimeout != nil { vmss.VirtualMachineScaleSetProperties.VirtualMachineProfile.ScheduledEventsProfile = &compute.ScheduledEventsProfile{ TerminateNotificationProfile: &compute.TerminateNotificationProfile{ diff --git a/azure/services/scalesets/scalesets_test.go b/azure/services/scalesets/scalesets_test.go index f06ebc7c94f..0d318febd53 100644 --- a/azure/services/scalesets/scalesets_test.go +++ b/azure/services/scalesets/scalesets_test.go @@ -571,7 +571,7 @@ func TestReconcileVMSS(t *testing.T) { }, }, { - name: "fail to create a vm with ultra disk enabled", + name: "fail to create a vm with ultra disk implicitly enabled by data disk, when location not supported", expectedError: "reconcile error that cannot be recovered occurred: vm size VM_SIZE_USSD does not support ultra disks in location test-location. select a different vm size or disable ultra disks. Object will not be requeued", expect: func(g *WithT, s *mock_scalesets.MockScaleSetScopeMockRecorder, m *mock_scalesets.MockClientMockRecorder) { s.ScaleSetSpec().Return(azure.ScaleSetSpec{ @@ -590,6 +590,45 @@ func TestReconcileVMSS(t *testing.T) { s.Location().AnyTimes().Return("test-location") }, }, + { + name: "fail to create a vm with ultra disk explicitly enabled via additional capabilities, when location not supported", + expectedError: "reconcile error that cannot be recovered occurred: vm size VM_SIZE_USSD does not support ultra disks in location test-location. select a different vm size or disable ultra disks. Object will not be requeued", + expect: func(g *WithT, s *mock_scalesets.MockScaleSetScopeMockRecorder, m *mock_scalesets.MockClientMockRecorder) { + s.ScaleSetSpec().Return(azure.ScaleSetSpec{ + Name: defaultVMSSName, + Size: "VM_SIZE_USSD", + Capacity: 2, + SSHKeyData: "ZmFrZXNzaGtleQo=", + AdditionalCapabilities: &infrav1.AdditionalCapabilities{ + UltraSSDEnabled: to.BoolPtr(true), + }, + }) + s.Location().AnyTimes().Return("test-location") + }, + }, + { + name: "fail to create a vm with ultra disk explicitly enabled via additional capabilities, when location not supported", + expectedError: "reconcile error that cannot be recovered occurred: vm size VM_SIZE_USSD does not support ultra disks in location test-location. select a different vm size or disable ultra disks. Object will not be requeued", + expect: func(g *WithT, s *mock_scalesets.MockScaleSetScopeMockRecorder, m *mock_scalesets.MockClientMockRecorder) { + s.ScaleSetSpec().Return(azure.ScaleSetSpec{ + Name: defaultVMSSName, + Size: "VM_SIZE_USSD", + Capacity: 2, + SSHKeyData: "ZmFrZXNzaGtleQo=", + DataDisks: []infrav1.DataDisk{ + { + ManagedDisk: &infrav1.ManagedDiskParameters{ + StorageAccountType: "UltraSSD_LRS", + }, + }, + }, + AdditionalCapabilities: &infrav1.AdditionalCapabilities{ + UltraSSDEnabled: to.BoolPtr(false), + }, + }) + s.Location().AnyTimes().Return("test-location") + }, + }, } for _, tc := range testcases { diff --git a/azure/services/virtualmachineimages/cache.go b/azure/services/virtualmachineimages/cache.go index a7aee5a59fe..191b8427147 100644 --- a/azure/services/virtualmachineimages/cache.go +++ b/azure/services/virtualmachineimages/cache.go @@ -61,7 +61,7 @@ func newCache(auth azure.Authorizer) *Cache { } } -// GetCache either creates a new VM images cache or returns an existing one. +// GetCache either creates a new VM images cache or returns the existing one. func GetCache(auth azure.Authorizer) (*Cache, error) { var err error doOnce.Do(func() { diff --git a/azure/services/virtualmachineimages/images.go b/azure/services/virtualmachineimages/images.go index bd88849c969..7fb12115241 100644 --- a/azure/services/virtualmachineimages/images.go +++ b/azure/services/virtualmachineimages/images.go @@ -52,7 +52,7 @@ func (s *Service) GetDefaultUbuntuImage(ctx context.Context, location, k8sVersio osVersion := getUbuntuOSVersion(v.Major, v.Minor, v.Patch) publisher, offer := azure.DefaultImagePublisherID, azure.DefaultImageOfferID - skuID, version, err := s.getDefaultImageSKUIDAndVersion( + skuID, version, err := s.getSKUAndVersion( ctx, location, publisher, offer, k8sVersion, fmt.Sprintf("ubuntu-%s", osVersion)) if err != nil { return nil, errors.Wrap(err, "failed to get default image") @@ -89,18 +89,18 @@ func (s *Service) GetDefaultWindowsImage(ctx context.Context, location, k8sVersi osAndVersion = azure.DefaultWindowsOsAndVersion } + // Starting with 1.22 we default to containerd for Windows unless the runtime flag is set. + if v.GE(v122) && runtime != "dockershim" && !strings.HasSuffix(osAndVersion, "-containerd") { + osAndVersion += "-containerd" + } + publisher, offer := azure.DefaultImagePublisherID, azure.DefaultWindowsImageOfferID - skuID, version, err := s.getDefaultImageSKUIDAndVersion( + skuID, version, err := s.getSKUAndVersion( ctx, location, publisher, offer, k8sVersion, osAndVersion) if err != nil { return nil, errors.Wrap(err, "failed to get default image") } - // Starting with 1.22 we default to containerd for Windows unless the runtime flag is set. - if v.GTE(v122) && runtime != "dockershim" { - skuID += "-containerd" - } - defaultImage := &infrav1.Image{ Marketplace: &infrav1.AzureMarketplaceImage{ ImagePlan: infrav1.ImagePlan{ @@ -115,12 +115,14 @@ func (s *Service) GetDefaultWindowsImage(ctx context.Context, location, k8sVersi return defaultImage, nil } -// GetDefaultImageSKUID gets the SKU ID and version of the image to use for the provided version of Kubernetes. +// getSKUAndVersion gets the SKU ID and version of the image to use for the provided version of Kubernetes. // note: osAndVersion is expected to be in the format of {os}-{version} (ex: ubuntu-2004 or windows-2022) -func (s *Service) getDefaultImageSKUIDAndVersion(ctx context.Context, location, publisher, offer, k8sVersion, osAndVersion string) (string, string, error) { - ctx, _, done := tele.StartSpanWithLogger(ctx, "virtualmachineimages.AzureClient.getDefaultImageSKUIDAndVersion") +func (s *Service) getSKUAndVersion(ctx context.Context, location, publisher, offer, k8sVersion, osAndVersion string) (string, string, error) { + ctx, log, done := tele.StartSpanWithLogger(ctx, "virtualmachineimages.Service.getSKUAndVersion") defer done() + log.Info("Getting VM image SKU and version", "location", location, "publisher", publisher, "offer", offer, "k8sVersion", k8sVersion, "osAndVersion", osAndVersion) + v, err := semver.ParseTolerant(k8sVersion) if err != nil { return "", "", errors.Wrapf(err, "unable to parse Kubernetes version \"%s\" in spec, expected valid SemVer string", k8sVersion) @@ -171,6 +173,8 @@ func (s *Service) getDefaultImageSKUIDAndVersion(ctx context.Context, location, return "", "", errors.Errorf("no VM image found for publisher \"%s\" offer \"%s\" sku \"%s\" with Kubernetes version \"%s\"", publisher, offer, sku, k8sVersion) } + log.Info("Found VM image SKU and version", "location", location, "publisher", publisher, "offer", offer, "sku", sku, "version", version) + return sku, version, nil } diff --git a/azure/services/virtualmachineimages/images_test.go b/azure/services/virtualmachineimages/images_test.go index 305e8b4eada..fa673d3e0eb 100644 --- a/azure/services/virtualmachineimages/images_test.go +++ b/azure/services/virtualmachineimages/images_test.go @@ -473,7 +473,7 @@ func TestGetDefaultImageSKUID(t *testing.T) { List(gomock.Any(), location, azure.DefaultImagePublisherID, offer, gomock.Any()). Return(test.versions, nil) } - id, version, err := svc.getDefaultImageSKUIDAndVersion(context.TODO(), location, azure.DefaultImagePublisherID, + id, version, err := svc.getSKUAndVersion(context.TODO(), location, azure.DefaultImagePublisherID, offer, test.k8sVersion, test.osAndVersion) g := NewWithT(t) diff --git a/azure/services/virtualmachines/spec.go b/azure/services/virtualmachines/spec.go index c245519094b..d77e12113ed 100644 --- a/azure/services/virtualmachines/spec.go +++ b/azure/services/virtualmachines/spec.go @@ -49,6 +49,7 @@ type VMSpec struct { SpotVMOptions *infrav1.SpotVMOptions SecurityProfile *infrav1.SecurityProfile AdditionalTags infrav1.Tags + AdditionalCapabilities *infrav1.AdditionalCapabilities SKU resourceskus.SKU Image *infrav1.Image BootstrapData string @@ -306,6 +307,9 @@ func (s *VMSpec) generateNICRefs() *[]compute.NetworkInterfaceReference { func (s *VMSpec) generateAdditionalCapabilities() *compute.AdditionalCapabilities { var capabilities *compute.AdditionalCapabilities + + // Provisionally detect whether there is any Data Disk defined which uses UltraSSDs. + // If that's the case, enable the UltraSSD capability. for _, dataDisk := range s.DataDisks { if dataDisk.ManagedDisk != nil && dataDisk.ManagedDisk.StorageAccountType == string(compute.StorageAccountTypesUltraSSDLRS) { capabilities = &compute.AdditionalCapabilities{ @@ -314,6 +318,18 @@ func (s *VMSpec) generateAdditionalCapabilities() *compute.AdditionalCapabilitie break } } + + // Set Additional Capabilities if any is present on the spec. + if s.AdditionalCapabilities != nil { + if capabilities == nil { + capabilities = &compute.AdditionalCapabilities{} + } + // Set UltraSSDEnabled if a specific value is set on the spec for it. + if s.AdditionalCapabilities.UltraSSDEnabled != nil { + capabilities.UltraSSDEnabled = s.AdditionalCapabilities.UltraSSDEnabled + } + } + return capabilities } diff --git a/azure/services/virtualmachines/spec_test.go b/azure/services/virtualmachines/spec_test.go index 60e03345843..f08a1248529 100644 --- a/azure/services/virtualmachines/spec_test.go +++ b/azure/services/virtualmachines/spec_test.go @@ -661,6 +661,184 @@ func TestParameters(t *testing.T) { }, expectedError: "reconcile error that cannot be recovered occurred: vm size Standard_D2v3 does not support ultra disks in location test-location. select a different vm size or disable ultra disks. Object will not be requeued", }, + { + name: "creates a vm with AdditionalCapabilities.UltraSSDEnabled false, if an ultra disk is specified as data disk but AdditionalCapabilities.UltraSSDEnabled is false", + spec: &VMSpec{ + Name: "my-ultra-ssd-vm", + Role: infrav1.Node, + NICIDs: []string{"my-nic"}, + SSHKeyData: "fakesshpublickey", + Size: "Standard_D2v3", + Location: "test-location", + Zone: "1", + Image: &infrav1.Image{ID: to.StringPtr("fake-image-id")}, + AdditionalCapabilities: &infrav1.AdditionalCapabilities{ + UltraSSDEnabled: to.BoolPtr(false), + }, + DataDisks: []infrav1.DataDisk{ + { + NameSuffix: "myDiskWithUltraDisk", + DiskSizeGB: 128, + Lun: to.Int32Ptr(1), + ManagedDisk: &infrav1.ManagedDiskParameters{ + StorageAccountType: "UltraSSD_LRS", + }, + }, + }, + SKU: validSKUWithUltraSSD, + }, + existing: nil, + expect: func(g *WithT, result interface{}) { + g.Expect(result).To(BeAssignableToTypeOf(compute.VirtualMachine{})) + g.Expect(result.(compute.VirtualMachine).AdditionalCapabilities.UltraSSDEnabled).To(Equal(to.BoolPtr(false))) + expectedDataDisks := &[]compute.DataDisk{ + { + Lun: to.Int32Ptr(1), + Name: to.StringPtr("my-ultra-ssd-vm_myDiskWithUltraDisk"), + CreateOption: "Empty", + DiskSizeGB: to.Int32Ptr(128), + ManagedDisk: &compute.ManagedDiskParameters{ + StorageAccountType: "UltraSSD_LRS", + }, + }, + } + g.Expect(gomockinternal.DiffEq(expectedDataDisks).Matches(result.(compute.VirtualMachine).StorageProfile.DataDisks)).To(BeTrue(), cmp.Diff(expectedDataDisks, result.(compute.VirtualMachine).StorageProfile.DataDisks)) + }, + expectedError: "", + }, + { + name: "creates a vm with AdditionalCapabilities.UltraSSDEnabled true, if an ultra disk is specified as data disk and no AdditionalCapabilities.UltraSSDEnabled is set", + spec: &VMSpec{ + Name: "my-ultra-ssd-vm", + Role: infrav1.Node, + NICIDs: []string{"my-nic"}, + SSHKeyData: "fakesshpublickey", + Size: "Standard_D2v3", + Location: "test-location", + Zone: "1", + Image: &infrav1.Image{ID: to.StringPtr("fake-image-id")}, + DataDisks: []infrav1.DataDisk{ + { + NameSuffix: "myDiskWithUltraDisk", + DiskSizeGB: 128, + Lun: to.Int32Ptr(1), + ManagedDisk: &infrav1.ManagedDiskParameters{ + StorageAccountType: "UltraSSD_LRS", + }, + }, + }, + SKU: validSKUWithUltraSSD, + }, + existing: nil, + expect: func(g *WithT, result interface{}) { + g.Expect(result).To(BeAssignableToTypeOf(compute.VirtualMachine{})) + g.Expect(result.(compute.VirtualMachine).AdditionalCapabilities.UltraSSDEnabled).To(Equal(to.BoolPtr(true))) + expectedDataDisks := &[]compute.DataDisk{ + { + Lun: to.Int32Ptr(1), + Name: to.StringPtr("my-ultra-ssd-vm_myDiskWithUltraDisk"), + CreateOption: "Empty", + DiskSizeGB: to.Int32Ptr(128), + ManagedDisk: &compute.ManagedDiskParameters{ + StorageAccountType: "UltraSSD_LRS", + }, + }, + } + g.Expect(gomockinternal.DiffEq(expectedDataDisks).Matches(result.(compute.VirtualMachine).StorageProfile.DataDisks)).To(BeTrue(), cmp.Diff(expectedDataDisks, result.(compute.VirtualMachine).StorageProfile.DataDisks)) + }, + expectedError: "", + }, + { + name: "creates a vm with AdditionalCapabilities.UltraSSDEnabled true, if an ultra disk is specified as data disk and AdditionalCapabilities.UltraSSDEnabled is true", + spec: &VMSpec{ + Name: "my-ultra-ssd-vm", + Role: infrav1.Node, + NICIDs: []string{"my-nic"}, + SSHKeyData: "fakesshpublickey", + Size: "Standard_D2v3", + Location: "test-location", + Zone: "1", + Image: &infrav1.Image{ID: to.StringPtr("fake-image-id")}, + AdditionalCapabilities: &infrav1.AdditionalCapabilities{ + UltraSSDEnabled: to.BoolPtr(true), + }, + DataDisks: []infrav1.DataDisk{ + { + NameSuffix: "myDiskWithUltraDisk", + DiskSizeGB: 128, + Lun: to.Int32Ptr(1), + ManagedDisk: &infrav1.ManagedDiskParameters{ + StorageAccountType: "UltraSSD_LRS", + }, + }, + }, + SKU: validSKUWithUltraSSD, + }, + existing: nil, + expect: func(g *WithT, result interface{}) { + g.Expect(result).To(BeAssignableToTypeOf(compute.VirtualMachine{})) + g.Expect(result.(compute.VirtualMachine).AdditionalCapabilities.UltraSSDEnabled).To(Equal(to.BoolPtr(true))) + expectedDataDisks := &[]compute.DataDisk{ + { + Lun: to.Int32Ptr(1), + Name: to.StringPtr("my-ultra-ssd-vm_myDiskWithUltraDisk"), + CreateOption: "Empty", + DiskSizeGB: to.Int32Ptr(128), + ManagedDisk: &compute.ManagedDiskParameters{ + StorageAccountType: "UltraSSD_LRS", + }, + }, + } + g.Expect(gomockinternal.DiffEq(expectedDataDisks).Matches(result.(compute.VirtualMachine).StorageProfile.DataDisks)).To(BeTrue(), cmp.Diff(expectedDataDisks, result.(compute.VirtualMachine).StorageProfile.DataDisks)) + }, + expectedError: "", + }, + { + name: "creates a vm with AdditionalCapabilities.UltraSSDEnabled true, if no ultra disk is specified as data disk and AdditionalCapabilities.UltraSSDEnabled is true", + spec: &VMSpec{ + Name: "my-ultra-ssd-vm", + Role: infrav1.Node, + NICIDs: []string{"my-nic"}, + SSHKeyData: "fakesshpublickey", + Size: "Standard_D2v3", + Location: "test-location", + Zone: "1", + Image: &infrav1.Image{ID: to.StringPtr("fake-image-id")}, + AdditionalCapabilities: &infrav1.AdditionalCapabilities{ + UltraSSDEnabled: to.BoolPtr(true), + }, + SKU: validSKUWithUltraSSD, + }, + existing: nil, + expect: func(g *WithT, result interface{}) { + g.Expect(result).To(BeAssignableToTypeOf(compute.VirtualMachine{})) + g.Expect(result.(compute.VirtualMachine).AdditionalCapabilities.UltraSSDEnabled).To(Equal(to.BoolPtr(true))) + }, + expectedError: "", + }, + { + name: "creates a vm with AdditionalCapabilities.UltraSSDEnabled false, if no ultra disk is specified as data disk and AdditionalCapabilities.UltraSSDEnabled is false", + spec: &VMSpec{ + Name: "my-ultra-ssd-vm", + Role: infrav1.Node, + NICIDs: []string{"my-nic"}, + SSHKeyData: "fakesshpublickey", + Size: "Standard_D2v3", + Location: "test-location", + Zone: "1", + Image: &infrav1.Image{ID: to.StringPtr("fake-image-id")}, + AdditionalCapabilities: &infrav1.AdditionalCapabilities{ + UltraSSDEnabled: to.BoolPtr(false), + }, + SKU: validSKUWithUltraSSD, + }, + existing: nil, + expect: func(g *WithT, result interface{}) { + g.Expect(result).To(BeAssignableToTypeOf(compute.VirtualMachine{})) + g.Expect(result.(compute.VirtualMachine).AdditionalCapabilities.UltraSSDEnabled).To(Equal(to.BoolPtr(false))) + }, + expectedError: "", + }, } for _, tc := range testcases { tc := tc diff --git a/azure/types.go b/azure/types.go index a598375063c..28933176860 100644 --- a/azure/types.go +++ b/azure/types.go @@ -105,6 +105,9 @@ type AgentPoolSpec struct { // EnableUltraSSD enables the storage type UltraSSD_LRS for the agent pool. EnableUltraSSD *bool `json:"enableUltraSSD,omitempty"` + + // OSType specifies the operating system for the node pool. Allowed values are 'Linux' and 'Windows' + OSType *string `json:"osType,omitempty"` } // ScaleSetSpec defines the specification for a Scale Set. @@ -126,6 +129,7 @@ type ScaleSetSpec struct { UserAssignedIdentities []infrav1.UserAssignedIdentity SecurityProfile *infrav1.SecurityProfile SpotVMOptions *infrav1.SpotVMOptions + AdditionalCapabilities *infrav1.AdditionalCapabilities FailureDomains []string NetworkInterfaces []infrav1.AzureNetworkInterface } diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azureclusteridentities.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azureclusteridentities.yaml index 22517fe9ac5..761c7552a58 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_azureclusteridentities.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azureclusteridentities.yaml @@ -421,16 +421,17 @@ spec: type: object resourceID: description: ResourceID is the Azure resource ID for the User Assigned - MSI resource. Not currently supported. + MSI resource. Only applicable when type is UserAssignedMSI. type: string tenantID: description: TenantID is the service principal primary tenant id. type: string type: description: Type is the type of Azure Identity used. ServicePrincipal, - ServicePrincipalCertificate, or ManualServicePrincipal. + ServicePrincipalCertificate, UserAssignedMSI or ManualServicePrincipal. enum: - ServicePrincipal + - UserAssignedMSI - ManualServicePrincipal - ServicePrincipalCertificate type: string diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachines.yaml index 04a6a140ca9..26918c6ba7f 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachines.yaml @@ -1050,6 +1050,17 @@ spec: is set to true with a VMSize that does not support it, Azure will return an error. type: boolean + additionalCapabilities: + description: AdditionalCapabilities specifies additional capabilities + enabled or disabled on the virtual machine. + properties: + ultraSSDEnabled: + description: UltraSSDEnabled enables or disables Azure UltraSSD + capability for the virtual machine. Defaults to true if Ultra + SSD data disks are specified, otherwise it doesn't set the capability + on the VM. + type: boolean + type: object additionalTags: additionalProperties: type: string diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachinetemplates.yaml index 0c9b126785b..680b621b5eb 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachinetemplates.yaml @@ -818,6 +818,17 @@ spec: If AcceleratedNetworking is set to true with a VMSize that does not support it, Azure will return an error. type: boolean + additionalCapabilities: + description: AdditionalCapabilities specifies additional capabilities + enabled or disabled on the virtual machine. + properties: + ultraSSDEnabled: + description: UltraSSDEnabled enables or disables Azure + UltraSSD capability for the virtual machine. Defaults + to true if Ultra SSD data disks are specified, otherwise + it doesn't set the capability on the VM. + type: boolean + type: object additionalTags: additionalProperties: type: string diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedmachinepools.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedmachinepools.yaml index 425b15f688a..ed3eb1dcc92 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedmachinepools.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedmachinepools.yaml @@ -245,6 +245,13 @@ spec: - Ephemeral - Managed type: string + osType: + description: 'OSType specifies the virtual machine operating system. + Default to Linux. Possible values include: ''Linux'', ''Windows''' + enum: + - Linux + - Windows + type: string providerIDList: description: ProviderIDList is the unique identifier as specified by the cloud provider. diff --git a/docs/book/src/developers/development.md b/docs/book/src/developers/development.md index 08659ce7e74..be0140c1f50 100644 --- a/docs/book/src/developers/development.md +++ b/docs/book/src/developers/development.md @@ -56,7 +56,7 @@ - `sudo apt install gettext` on Windows + WSL2. - `sudo apt install gettext` on Ubuntu Linux. 4. Install [KIND][kind] - - `GO111MODULE="on" go get sigs.k8s.io/kind@v0.9.0`. + - `GO111MODULE="on" go get sigs.k8s.io/kind@v0.14.0`. 5. Install [Kustomize][kustomize] - `brew install kustomize` on macOS. - [install instructions](https://kubectl.docs.kubernetes.io/installation/kustomize/) on Windows + WSL2. diff --git a/docs/book/src/topics/data-disks.md b/docs/book/src/topics/data-disks.md index 605de04efe0..d5e76aeb8af 100644 --- a/docs/book/src/topics/data-disks.md +++ b/docs/book/src/topics/data-disks.md @@ -31,6 +31,24 @@ To check all available vm-sizes in a given region which supports availability zo ```bash az vm list-skus -l -z -s ``` + +Provided that the chosen region and zone support Ultra disks, Azure Machine objects having Ultra disks specified as Data disks will have their virtual machines created with the `AdditionalCapabilities.UltraSSDEnabled` additional capability set to `true`. This capability can also be manually set on the Azure Machine spec and will override the automatically chosen value (if any). + +When the chosen StorageAccountType is `UltraSSD_LRS`, caching is not supported for the disk and the corresponding `cachingType` field must be set to `None`. In this configuration, if no value is set, `cachingType` will be defaulted to `None`. + +See [Ultra disk](https://docs.microsoft.com/en-us/azure/virtual-machines/disks-types#ultra-disk) for ultra disk performance and GA scope. + +### Ultra disk support for Persistent Volumes +First, to check all available vm-sizes in a given region which supports availability zone that has the `UltraSSDAvailable` capability supported, execute following using Azure CLI: +```bash +az vm list-skus -l -z -s +``` + +Provided that the chosen region and zone support Ultra disks, Ultra disk based Persistent Volumes can be attached to Pods scheduled on specific Azure Machines, provided that the spec field `.spec.additionalCapabilities.ultraSSDEnabled` on those Machines has been set to `true`. +NOTE: A misconfiguration or lack this field on the targeted Node's Machine will result in the Pod using the PV be unable to reach the Running Phase. + +See [Use ultra disks dynamically with a storage class](https://docs.microsoft.com/en-us/azure/aks/use-ultra-disks#use-ultra-disks-dynamically-with-a-storage-class) for more information on how to configure an Ultra disk based StorageClass and PersistentVolumeClaim. + See [Ultra disk](https://docs.microsoft.com/en-us/azure/virtual-machines/disks-types#ultra-disk) for ultra disk performance and GA scope. ## Configuring partitions, file systems and mounts @@ -101,4 +119,4 @@ spec: - nameSuffix: mydisk diskSizeGB: 128 lun: 1 -```` \ No newline at end of file +```` diff --git a/docs/book/src/topics/managedcluster.md b/docs/book/src/topics/managedcluster.md index 0075742736a..8ea3ff3a22f 100644 --- a/docs/book/src/topics/managedcluster.md +++ b/docs/book/src/topics/managedcluster.md @@ -338,6 +338,25 @@ spec: value: kafka ``` +### AKS Node Pool OS Type +If your cluster uses the Azure network plugin (`AzureManagedControlPlane.networkPlugin`) you can set the operating system +for your User nodepools. The `osType` field is immutable and only can be set at creation time, it defaults to `Linux` and +can be either `Linux` or `Windows`. + +``` +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureManagedMachinePool +metadata: + name: agentpool0 +spec: + mode: User + osDiskSizeGB: 30 + sku: Standard_D2s_v3 + osDiskType: "Ephemeral" + osType: Windows +``` + + ### Enable AKS features with custom headers (--aks-custom-headers) To enable some AKS cluster / node pool features you need to pass special headers to the cluster / node pool create request. For example, to [add a node pool for GPU nodes](https://docs.microsoft.com/en-us/azure/aks/gpu-cluster#add-a-node-pool-for-gpu-nodes), @@ -419,24 +438,25 @@ those can only be set during the creation time. Following is the list of immutable fields for managed clusters: -| CRD | jsonPath | Comment | -|--------------------------|------------------------------------|---------------------------| -| AzureManagedControlPlane | .spec.subscriptionID | | -| AzureManagedControlPlane | .spec.resourceGroupName | | -| AzureManagedControlPlane | .spec.nodeResourceGroupName | | -| AzureManagedControlPlane | .spec.location | | -| AzureManagedControlPlane | .spec.sshPublicKey | | -| AzureManagedControlPlane | .spec.dnsServiceIP | | -| AzureManagedControlPlane | .spec.networkPlugin | | -| AzureManagedControlPlane | .spec.networkPolicy | | -| AzureManagedControlPlane | .spec.loadBalancerSKU | | -| AzureManagedControlPlane | .spec.apiServerAccessProfile | except AuthorizedIPRanges | -| AzureManagedMachinePool | .spec.sku | | -| AzureManagedMachinePool | .spec.osDiskSizeGB | | -| AzureManagedMachinePool | .spec.osDiskType | | -| AzureManagedMachinePool | .spec.taints | | -| AzureManagedMachinePool | .spec.availabilityZones | | -| AzureManagedMachinePool | .spec.maxPods | | +| CRD | jsonPath | Comment | +|--------------------------|------------------------------|---------------------------| +| AzureManagedControlPlane | .spec.subscriptionID | | +| AzureManagedControlPlane | .spec.resourceGroupName | | +| AzureManagedControlPlane | .spec.nodeResourceGroupName | | +| AzureManagedControlPlane | .spec.location | | +| AzureManagedControlPlane | .spec.sshPublicKey | | +| AzureManagedControlPlane | .spec.dnsServiceIP | | +| AzureManagedControlPlane | .spec.networkPlugin | | +| AzureManagedControlPlane | .spec.networkPolicy | | +| AzureManagedControlPlane | .spec.loadBalancerSKU | | +| AzureManagedControlPlane | .spec.apiServerAccessProfile | except AuthorizedIPRanges | +| AzureManagedMachinePool | .spec.sku | | +| AzureManagedMachinePool | .spec.osDiskSizeGB | | +| AzureManagedMachinePool | .spec.osDiskType | | +| AzureManagedMachinePool | .spec.taints | | +| AzureManagedMachinePool | .spec.availabilityZones | | +| AzureManagedMachinePool | .spec.maxPods | | +| AzureManagedMachinePool | .spec.osType | | ## Features diff --git a/docs/book/src/topics/multitenancy.md b/docs/book/src/topics/multitenancy.md index 5c00b221d72..223b9981eb8 100644 --- a/docs/book/src/topics/multitenancy.md +++ b/docs/book/src/topics/multitenancy.md @@ -4,7 +4,9 @@ To enable single controller multi-tenancy, a different Identity can be added to This is achieved using the [aad-pod-identity](https://azure.github.io/aad-pod-identity) library. -## Service Principal With Client Password +## Identity Types + +### Service Principal With Client Password Once a new SP Identity is created in Azure, the corresponding values should be used to create an `AzureClusterIdentity` resource: @@ -42,7 +44,7 @@ data: clientSecret: ``` -## Service Principal With Certificate +### Service Principal With Certificate Once a new SP Identity is created in Azure, the corresponding values should be used to create an `AzureClusterIdentity` resource: @@ -87,7 +89,47 @@ data: password: PASSWORD ``` -## Manual Service Principal Identity +### User-Assigned Managed Identity + + + +#### Prerequisites + +1. [Create](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-manage-user-assigned-managed-identities?pivots=identity-mi-methods-azp#create-a-user-assigned-managed-identity) a user-assigned managed identity in Azure. +2. [Create a role assignment](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/howto-assign-access-portal#use-azure-rbac-to-assign-a-managed-identity-access-to-another-resource) to give the identity Contributor access to the Azure subscription where the workload cluster will be created. +3. [Configure] the identity on the management cluster nodes by adding it to each worker node VM. If using AKS as the management cluster see [these instructions](https://docs.microsoft.com/en-us/azure/aks/use-managed-identity). + +#### Creating the AzureClusterIdentity + +After a user-assigned managed identity is created in Azure and assigned to the management cluster, the corresponding values should be used to create an `AzureClusterIdentity` resource: + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureClusterIdentity +metadata: + name: example-identity + namespace: default +spec: + type: ServicePrincipal + tenantID: + clientID: + resourceID: + allowedNamespaces: + list: + - +``` + +#### Assigning VM identities for cloud-provider authentication + +When using a user-assigned managed identity to create the workload cluster, a VM identity should also be assigned to each control-plane machine in the workload cluster for Cloud Provider to use. See [here](../topics/vm-identity.md#managed-identities) for more information. + +### Manual Service Principal Identity Manual Service Principal Identity is similar to [Service Principal Identity](https://capz.sigs.k8s.io/topics/multitenancy.html#service-principal-identity) except that the service principal's `clientSecret` is directly fetched from the secret containing it. To use this type of identity, set the identity type as `ManualServicePrincipal` in `AzureClusterIdentity`. For example, @@ -143,7 +185,3 @@ spec: ``` For more details on how aad-pod-identity works, please check the guide [here](https://azure.github.io/aad-pod-identity/docs/). - -## User Assigned Identity - -_will be supported in a future release_ \ No newline at end of file diff --git a/exp/api/v1alpha3/azuremanagedmachinepool_conversion.go b/exp/api/v1alpha3/azuremanagedmachinepool_conversion.go index 503fa8b4bf6..659c27ffd2b 100644 --- a/exp/api/v1alpha3/azuremanagedmachinepool_conversion.go +++ b/exp/api/v1alpha3/azuremanagedmachinepool_conversion.go @@ -42,6 +42,7 @@ func (src *AzureManagedMachinePool) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.AvailabilityZones = restored.Spec.AvailabilityZones dst.Spec.MaxPods = restored.Spec.MaxPods dst.Spec.OsDiskType = restored.Spec.OsDiskType + dst.Spec.OSType = restored.Spec.OSType dst.Spec.NodeLabels = restored.Spec.NodeLabels dst.Spec.EnableUltraSSD = restored.Spec.EnableUltraSSD diff --git a/exp/api/v1alpha3/zz_generated.conversion.go b/exp/api/v1alpha3/zz_generated.conversion.go index 00ae36ca362..398144d856f 100644 --- a/exp/api/v1alpha3/zz_generated.conversion.go +++ b/exp/api/v1alpha3/zz_generated.conversion.go @@ -890,6 +890,7 @@ func autoConvert_v1beta1_AzureManagedMachinePoolSpec_To_v1alpha3_AzureManagedMac // WARNING: in.MaxPods requires manual conversion: does not exist in peer-type // WARNING: in.OsDiskType requires manual conversion: does not exist in peer-type // WARNING: in.EnableUltraSSD requires manual conversion: does not exist in peer-type + // WARNING: in.OSType requires manual conversion: does not exist in peer-type return nil } diff --git a/exp/api/v1alpha4/azuremanagedmachinepool_conversion.go b/exp/api/v1alpha4/azuremanagedmachinepool_conversion.go index 8d329b9df95..f7215d530a4 100644 --- a/exp/api/v1alpha4/azuremanagedmachinepool_conversion.go +++ b/exp/api/v1alpha4/azuremanagedmachinepool_conversion.go @@ -42,6 +42,7 @@ func (src *AzureManagedMachinePool) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.AvailabilityZones = restored.Spec.AvailabilityZones dst.Spec.MaxPods = restored.Spec.MaxPods dst.Spec.OsDiskType = restored.Spec.OsDiskType + dst.Spec.OSType = restored.Spec.OSType dst.Spec.NodeLabels = restored.Spec.NodeLabels dst.Spec.EnableUltraSSD = restored.Spec.EnableUltraSSD diff --git a/exp/api/v1alpha4/zz_generated.conversion.go b/exp/api/v1alpha4/zz_generated.conversion.go index 857184a61fc..6843b3f5a7e 100644 --- a/exp/api/v1alpha4/zz_generated.conversion.go +++ b/exp/api/v1alpha4/zz_generated.conversion.go @@ -1190,6 +1190,7 @@ func autoConvert_v1beta1_AzureManagedMachinePoolSpec_To_v1alpha4_AzureManagedMac // WARNING: in.MaxPods requires manual conversion: does not exist in peer-type // WARNING: in.OsDiskType requires manual conversion: does not exist in peer-type // WARNING: in.EnableUltraSSD requires manual conversion: does not exist in peer-type + // WARNING: in.OSType requires manual conversion: does not exist in peer-type return nil } diff --git a/exp/api/v1beta1/azuremanagedmachinepool_types.go b/exp/api/v1beta1/azuremanagedmachinepool_types.go index cb07ea973ca..fb82771a1d8 100644 --- a/exp/api/v1beta1/azuremanagedmachinepool_types.go +++ b/exp/api/v1beta1/azuremanagedmachinepool_types.go @@ -19,6 +19,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-azure/azure" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" capierrors "sigs.k8s.io/cluster-api/errors" ) @@ -32,6 +33,9 @@ const ( // NodePoolModeUser represents mode user for azuremachinepool. NodePoolModeUser NodePoolMode = "User" + + // DefaultOSType represents the default operating system for azmachinepool. + DefaultOSType string = azure.LinuxOS ) // NodePoolMode enumerates the values for agent pool mode. @@ -89,6 +93,11 @@ type AzureManagedMachinePoolSpec struct { // EnableUltraSSD enables the storage type UltraSSD_LRS for the agent pool. // +optional EnableUltraSSD *bool `json:"enableUltraSSD,omitempty"` + + // OSType specifies the virtual machine operating system. Default to Linux. Possible values include: 'Linux', 'Windows' + // +kubebuilder:validation:Enum=Linux;Windows + // +optional + OSType *string `json:"osType,omitempty"` } // ManagedMachinePoolScaling specifies scaling options. diff --git a/exp/api/v1beta1/azuremanagedmachinepool_webhook.go b/exp/api/v1beta1/azuremanagedmachinepool_webhook.go index 17259f2ff8d..7cf9a6f3bb8 100644 --- a/exp/api/v1beta1/azuremanagedmachinepool_webhook.go +++ b/exp/api/v1beta1/azuremanagedmachinepool_webhook.go @@ -45,6 +45,10 @@ func (m *AzureManagedMachinePool) Default(client client.Client) { if m.Spec.Name == nil || *m.Spec.Name == "" { m.Spec.Name = &m.Name } + + if m.Spec.OSType == nil { + m.Spec.OSType = to.StringPtr(DefaultOSType) + } } //+kubebuilder:webhook:verbs=update;delete,path=/validate-infrastructure-cluster-x-k8s-io-v1beta1-azuremanagedmachinepool,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=azuremanagedmachinepools,versions=v1beta1,name=validation.azuremanagedmachinepools.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 @@ -53,6 +57,8 @@ func (m *AzureManagedMachinePool) Default(client client.Client) { func (m *AzureManagedMachinePool) ValidateCreate(client client.Client) error { validators := []func() error{ m.validateMaxPods, + m.validateOSType, + m.validateName, } var errs []error @@ -269,6 +275,31 @@ func (m *AzureManagedMachinePool) validateMaxPods() error { return nil } +func (m *AzureManagedMachinePool) validateOSType() error { + if m.Spec.Mode == string(NodePoolModeSystem) { + if m.Spec.OSType != nil && *m.Spec.OSType != azure.LinuxOS { + return field.Forbidden( + field.NewPath("Spec", "OSType"), + "System node pooll must have OSType 'Linux'") + } + } + + return nil +} + +func (m *AzureManagedMachinePool) validateName() error { + if m.Spec.OSType != nil && *m.Spec.OSType == azure.WindowsOS { + if len(m.Name) > 6 { + return field.Invalid( + field.NewPath("Name"), + m.Name, + "Windows agent pool name can not be longer than 6 characters.") + } + } + + return nil +} + func ensureStringSlicesAreEqual(a []string, b []string) bool { if len(a) != len(b) { return false diff --git a/exp/api/v1beta1/azuremanagedmachinepool_webhook_test.go b/exp/api/v1beta1/azuremanagedmachinepool_webhook_test.go index 006f03926ed..462033df2dc 100644 --- a/exp/api/v1beta1/azuremanagedmachinepool_webhook_test.go +++ b/exp/api/v1beta1/azuremanagedmachinepool_webhook_test.go @@ -23,6 +23,7 @@ import ( "github.com/Azure/go-autorest/autorest/to" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -47,6 +48,7 @@ func TestAzureManagedMachinePoolDefaultingWebhook(t *testing.T) { g.Expect(ok).To(BeTrue()) g.Expect(val).To(Equal("System")) g.Expect(*ammp.Spec.Name).To(Equal("fooName")) + g.Expect(*ammp.Spec.OSType).To(Equal(azure.LinuxOS)) t.Logf("Testing ammp defaulting webhook with empty string name specified in Spec") emptyName := "" @@ -511,6 +513,54 @@ func TestAzureManagedMachinePool_ValidateCreate(t *testing.T) { wantErr: true, errorLen: 1, }, + { + name: "ostype Windows with System mode not allowed", + ammp: &AzureManagedMachinePool{ + Spec: AzureManagedMachinePoolSpec{ + Mode: "System", + OSType: to.StringPtr(azure.WindowsOS), + }, + }, + wantErr: true, + errorLen: 1, + }, + { + name: "ostype windows with User mode", + ammp: &AzureManagedMachinePool{ + Spec: AzureManagedMachinePoolSpec{ + Mode: "User", + OSType: to.StringPtr(azure.WindowsOS), + }, + }, + wantErr: false, + }, + { + name: "Windows clusters with 6char or less name", + ammp: &AzureManagedMachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pool0", + }, + Spec: AzureManagedMachinePoolSpec{ + Mode: "User", + OSType: to.StringPtr(azure.WindowsOS), + }, + }, + wantErr: false, + }, + { + name: "Windows clusters with more than 6char names are not allowed", + ammp: &AzureManagedMachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pool0-name-too-long", + }, + Spec: AzureManagedMachinePoolSpec{ + Mode: "User", + OSType: to.StringPtr(azure.WindowsOS), + }, + }, + wantErr: true, + errorLen: 1, + }, } var client client.Client for _, tc := range tests { diff --git a/exp/api/v1beta1/zz_generated.deepcopy.go b/exp/api/v1beta1/zz_generated.deepcopy.go index 5d3953ec4a3..a96951774f8 100644 --- a/exp/api/v1beta1/zz_generated.deepcopy.go +++ b/exp/api/v1beta1/zz_generated.deepcopy.go @@ -846,6 +846,11 @@ func (in *AzureManagedMachinePoolSpec) DeepCopyInto(out *AzureManagedMachinePool *out = new(bool) **out = **in } + if in.OSType != nil { + in, out := &in.OSType, &out.OSType + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedMachinePoolSpec. diff --git a/go.mod b/go.mod index da964344bb6..ed7f42cb9c3 100644 --- a/go.mod +++ b/go.mod @@ -34,6 +34,7 @@ require ( go.opentelemetry.io/otel/trace v1.4.0 golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871 golang.org/x/mod v0.5.1 + golang.org/x/text v0.3.7 helm.sh/helm/v3 v3.8.1 k8s.io/api v0.23.5 k8s.io/apimachinery v0.23.5 @@ -180,7 +181,6 @@ require ( golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect - golang.org/x/text v0.3.7 // indirect golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect diff --git a/hack/create-custom-cloud-provider-config.sh b/hack/create-custom-cloud-provider-config.sh new file mode 100755 index 00000000000..86231a14624 --- /dev/null +++ b/hack/create-custom-cloud-provider-config.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# Copyright 2022 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail +set +o xtrace + +# Install kubectl +REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +KUBECTL="${REPO_ROOT}/hack/tools/bin/kubectl" +make --directory="${REPO_ROOT}" "${KUBECTL##*/}" + +if [[ -n "${CUSTOM_CLOUD_PROVIDER_CONFIG:-}" ]]; then + curl -sL -o tmp_azure_json "${CUSTOM_CLOUD_PROVIDER_CONFIG}" + envsubst < tmp_azure_json > azure_json + kubectl create secret generic "${CLUSTER_NAME}-control-plane-azure-json" \ + --from-file=control-plane-azure.json=azure_json \ + --from-file=worker-node-azure.json=azure_json + rm tmp_azure_json azure_json +fi diff --git a/hack/create-dev-cluster.sh b/hack/create-dev-cluster.sh index 8adf6838e42..c186d30655d 100755 --- a/hack/create-dev-cluster.sh +++ b/hack/create-dev-cluster.sh @@ -48,7 +48,7 @@ export CONTROL_PLANE_MACHINE_COUNT=${CONTROL_PLANE_MACHINE_COUNT:-3} export AZURE_CONTROL_PLANE_MACHINE_TYPE="${CONTROL_PLANE_MACHINE_TYPE:-Standard_D2s_v3}" export AZURE_NODE_MACHINE_TYPE="${NODE_MACHINE_TYPE:-Standard_D2s_v3}" export WORKER_MACHINE_COUNT=${WORKER_MACHINE_COUNT:-2} -export KUBERNETES_VERSION="${KUBERNETES_VERSION:-v1.22.1}" +export KUBERNETES_VERSION="${KUBERNETES_VERSION:-v1.22.11}" export CLUSTER_TEMPLATE="${CLUSTER_TEMPLATE:-cluster-template.yaml}" # identity secret settings. diff --git a/hack/create-identity-secret.sh b/hack/create-identity-secret.sh index ee0190d475b..3c768d542ba 100755 --- a/hack/create-identity-secret.sh +++ b/hack/create-identity-secret.sh @@ -21,7 +21,7 @@ set +o xtrace # Install kubectl REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. KUBECTL="${REPO_ROOT}/hack/tools/bin/kubectl" -cd "${REPO_ROOT}" && make "${KUBECTL##*/}" +make --directory="${REPO_ROOT}" "${KUBECTL##*/}" # shellcheck source=hack/parse-prow-creds.sh source "${REPO_ROOT}/hack/parse-prow-creds.sh" diff --git a/hack/ensure-kind.sh b/hack/ensure-kind.sh deleted file mode 100755 index d75dc1cc737..00000000000 --- a/hack/ensure-kind.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2019 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -set -o nounset -set -o pipefail - -GOPATH_BIN="$(go env GOPATH)/bin/" -MINIMUM_KIND_VERSION=v0.10.0 -goarch="$(go env GOARCH)" -goos="$(go env GOOS)" - -# Ensure the kind tool exists and is a viable version, or installs it -verify_kind_version() { - - # If kind is not available on the path, get it - if ! [ -x "$(command -v kind)" ]; then - if [ "$goos" == "linux" ] || [ "$goos" == "darwin" ]; then - echo 'kind not found, installing' - if ! [ -d "${GOPATH_BIN}" ]; then - mkdir -p "${GOPATH_BIN}" - fi - curl -sLo "${GOPATH_BIN}/kind" "https://github.com/kubernetes-sigs/kind/releases/download/${MINIMUM_KIND_VERSION}/kind-${goos}-${goarch}" - chmod +x "${GOPATH_BIN}/kind" - else - echo "Missing required binary in path: kind" - return 2 - fi - fi - - local kind_version - IFS=" " read -ra kind_version <<< "$(kind version)" - if [[ "${MINIMUM_KIND_VERSION}" != $(echo -e "${MINIMUM_KIND_VERSION}\n${kind_version[1]}" | sort -s -t. -k 1,1 -k 2,2n -k 3,3n | head -n1) ]]; then - cat </dev/null +make --directory="${REPO_ROOT}" "${YQ##*/}" &>/dev/null KEYS=() while IFS='' read -r line; do KEYS+=("$line"); done < <(${YQ} e '.aliases["cluster-api-azure-maintainers"][]' OWNERS_ALIASES) diff --git a/hack/install-cert-manager.sh b/hack/install-cert-manager.sh index 9955d058b9c..90bebaf63d1 100755 --- a/hack/install-cert-manager.sh +++ b/hack/install-cert-manager.sh @@ -49,7 +49,7 @@ END # Install kubectl REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. KUBECTL="${REPO_ROOT}/hack/tools/bin/kubectl" -cd "${REPO_ROOT}" && make "${KUBECTL##*/}" +make --directory="${REPO_ROOT}" "${KUBECTL##*/}" ## Install cert manager and wait for availability "${KUBECTL}" apply -f https://github.com/jetstack/cert-manager/releases/download/v1.5.0/cert-manager.yaml diff --git a/hack/kustomize-sub.sh b/hack/kustomize-sub.sh index 35cea480199..56102d02b99 100755 --- a/hack/kustomize-sub.sh +++ b/hack/kustomize-sub.sh @@ -17,5 +17,9 @@ set -o errexit set -o nounset set -o pipefail -root=$(dirname "${BASH_SOURCE[0]}") -"$root/tools/bin/kustomize" build "$1" | "$root/tools/bin/envsubst" \ No newline at end of file +root=$(dirname "${BASH_SOURCE[0]}")/.. +kustomize="${root}/hack/tools/bin/kustomize" +envsubst="${root}/hack/tools/bin/envsubst" +make --directory="${root}" "${kustomize##*/}" "${envsubst##*/}" + +"${kustomize}" build "$1" | "${envsubst}" diff --git a/hack/observability/jaeger/chart/templates/ingress.yaml b/hack/observability/jaeger/chart/templates/ingress.yaml index 04fbb9f407d..6a0781204e2 100644 --- a/hack/observability/jaeger/chart/templates/ingress.yaml +++ b/hack/observability/jaeger/chart/templates/ingress.yaml @@ -1,43 +1,43 @@ -{{- if .Values.enabled }} -{{- if .Values.ingress.enabled -}} -{{- $fullName := include "jaeger-all-in-one.fullname" . -}} -{{- $svcPort := .Values.service.port -}} -{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} -apiVersion: networking.k8s.io/v1beta1 -{{- else -}} -apiVersion: extensions/v1beta1 -{{- end }} -kind: Ingress -metadata: - name: {{ $fullName }} - labels: - {{- include "jaeger-all-in-one.labels" . | nindent 4 }} - {{- with .Values.ingress.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: -{{- if .Values.ingress.tls }} - tls: - {{- range .Values.ingress.tls }} - - hosts: - {{- range .hosts }} - - {{ . | quote }} - {{- end }} - secretName: {{ .secretName }} - {{- end }} -{{- end }} - rules: - {{- range .Values.ingress.hosts }} - - host: {{ .host | quote }} - http: - paths: - {{- range .paths }} - - path: {{ . }} - backend: - serviceName: {{ $fullName }} - servicePort: {{ $svcPort }} - {{- end }} - {{- end }} -{{- end }} +{{- if .Values.enabled }} +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "jaeger-all-in-one.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "jaeger-all-in-one.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + backend: + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} +{{- end }} {{- end }} \ No newline at end of file diff --git a/hack/observability/jaeger/chart/templates/jaeger-volume.yaml b/hack/observability/jaeger/chart/templates/jaeger-volume.yaml index 2c06c6d0dc3..b9e93b3dc8b 100644 --- a/hack/observability/jaeger/chart/templates/jaeger-volume.yaml +++ b/hack/observability/jaeger/chart/templates/jaeger-volume.yaml @@ -1,17 +1,17 @@ -{{- if .Values.enabled }} -{{- if .Values.volume.enabled -}} -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: {{ include "jaeger-all-in-one.fullname" . }} -spec: - {{- if .Values.volume.className }} - storageClassName: {{ .Values.volume.className }} - {{- end }} - accessModes: - - ReadWriteOnce - resources: - requests: - storage: {{ .Values.volume.size }} -{{- end }} +{{- if .Values.enabled }} +{{- if .Values.volume.enabled -}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ include "jaeger-all-in-one.fullname" . }} +spec: + {{- if .Values.volume.className }} + storageClassName: {{ .Values.volume.className }} + {{- end }} + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ .Values.volume.size }} +{{- end }} {{- end }} \ No newline at end of file diff --git a/hack/observability/jaeger/chart/templates/service-headless.yaml b/hack/observability/jaeger/chart/templates/service-headless.yaml index 05d8f7b1366..f77017a50a8 100644 --- a/hack/observability/jaeger/chart/templates/service-headless.yaml +++ b/hack/observability/jaeger/chart/templates/service-headless.yaml @@ -1,55 +1,55 @@ -{{- if .Values.enabled }} -apiVersion: v1 -kind: Service -metadata: - name: {{ include "jaeger-all-in-one.fullname" . }}-headless - labels: - {{- include "jaeger-all-in-one.labels" . | nindent 4 }} - {{- with .Values.service.headless.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - clusterIP: None - ports: - - port: 6831 - targetPort: udp-com-thr - protocol: UDP - name: udp-com-thr - - port: 6832 - targetPort: udp-bin-thr - protocol: UDP - name: udp-bin-thr - - port: 5775 - targetPort: udp-bin-thr-o - protocol: UDP - name: udp-bin-thr-o - - port: 5778 - targetPort: http-configs - protocol: TCP - name: http-configs - - port: {{ .Values.service.port }} - targetPort: http-ui - protocol: TCP - name: http-ui - - port: 14250 - targetPort: grpc-proto - protocol: TCP - name: grpc-proto - - port: 14268 - targetPort: http-bin-thr - protocol: TCP - name: http-bin-thr - - port: 14269 - targetPort: http-admin - protocol: TCP - name: http-admin - {{- if .Values.enableHttpZipkinCollector }} - - port: 9411 - targetPort: http-zipkin - protocol: TCP - name: http-zipkin - {{- end }} - selector: - {{- include "jaeger-all-in-one.selectorLabels" . | nindent 4 }} +{{- if .Values.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "jaeger-all-in-one.fullname" . }}-headless + labels: + {{- include "jaeger-all-in-one.labels" . | nindent 4 }} + {{- with .Values.service.headless.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + clusterIP: None + ports: + - port: 6831 + targetPort: udp-com-thr + protocol: UDP + name: udp-com-thr + - port: 6832 + targetPort: udp-bin-thr + protocol: UDP + name: udp-bin-thr + - port: 5775 + targetPort: udp-bin-thr-o + protocol: UDP + name: udp-bin-thr-o + - port: 5778 + targetPort: http-configs + protocol: TCP + name: http-configs + - port: {{ .Values.service.port }} + targetPort: http-ui + protocol: TCP + name: http-ui + - port: 14250 + targetPort: grpc-proto + protocol: TCP + name: grpc-proto + - port: 14268 + targetPort: http-bin-thr + protocol: TCP + name: http-bin-thr + - port: 14269 + targetPort: http-admin + protocol: TCP + name: http-admin + {{- if .Values.enableHttpZipkinCollector }} + - port: 9411 + targetPort: http-zipkin + protocol: TCP + name: http-zipkin + {{- end }} + selector: + {{- include "jaeger-all-in-one.selectorLabels" . | nindent 4 }} {{- end }} \ No newline at end of file diff --git a/hack/observability/jaeger/chart/templates/service.yaml b/hack/observability/jaeger/chart/templates/service.yaml index b0fd23daca6..df7a7903222 100644 --- a/hack/observability/jaeger/chart/templates/service.yaml +++ b/hack/observability/jaeger/chart/templates/service.yaml @@ -1,55 +1,55 @@ -{{- if .Values.enabled }} -apiVersion: v1 -kind: Service -metadata: - name: {{ include "jaeger-all-in-one.fullname" . }} - labels: - {{- include "jaeger-all-in-one.labels" . | nindent 4 }} - {{- with .Values.service.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - type: {{ .Values.service.type }} - ports: - - port: 6831 - targetPort: udp-com-thr - protocol: UDP - name: udp-com-thr - - port: 6832 - targetPort: udp-bin-thr - protocol: UDP - name: udp-bin-thr - - port: 5775 - targetPort: udp-bin-thr-o - protocol: UDP - name: udp-bin-thr-o - - port: 5778 - targetPort: http-configs - protocol: TCP - name: http-configs - - port: {{ .Values.service.port }} - targetPort: http-ui - protocol: TCP - name: http-ui - - port: 14250 - targetPort: grpc-proto - protocol: TCP - name: grpc-proto - - port: 14268 - targetPort: http-bin-thr - protocol: TCP - name: http-bin-thr - - port: 14269 - targetPort: http-admin - protocol: TCP - name: http-admin - {{- if .Values.enableHttpZipkinCollector }} - - port: 9411 - targetPort: http-zipkin - protocol: TCP - name: http-zipkin - {{- end }} - selector: - {{- include "jaeger-all-in-one.selectorLabels" . | nindent 4 }} +{{- if .Values.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "jaeger-all-in-one.fullname" . }} + labels: + {{- include "jaeger-all-in-one.labels" . | nindent 4 }} + {{- with .Values.service.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - port: 6831 + targetPort: udp-com-thr + protocol: UDP + name: udp-com-thr + - port: 6832 + targetPort: udp-bin-thr + protocol: UDP + name: udp-bin-thr + - port: 5775 + targetPort: udp-bin-thr-o + protocol: UDP + name: udp-bin-thr-o + - port: 5778 + targetPort: http-configs + protocol: TCP + name: http-configs + - port: {{ .Values.service.port }} + targetPort: http-ui + protocol: TCP + name: http-ui + - port: 14250 + targetPort: grpc-proto + protocol: TCP + name: grpc-proto + - port: 14268 + targetPort: http-bin-thr + protocol: TCP + name: http-bin-thr + - port: 14269 + targetPort: http-admin + protocol: TCP + name: http-admin + {{- if .Values.enableHttpZipkinCollector }} + - port: 9411 + targetPort: http-zipkin + protocol: TCP + name: http-zipkin + {{- end }} + selector: + {{- include "jaeger-all-in-one.selectorLabels" . | nindent 4 }} {{- end }} \ No newline at end of file diff --git a/hack/observability/jaeger/chart/templates/serviceaccount.yaml b/hack/observability/jaeger/chart/templates/serviceaccount.yaml index 40e6566eda7..4d60e1566df 100644 --- a/hack/observability/jaeger/chart/templates/serviceaccount.yaml +++ b/hack/observability/jaeger/chart/templates/serviceaccount.yaml @@ -1,10 +1,10 @@ -{{- if .Values.enabled }} -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "jaeger-all-in-one.serviceAccountName" . }} - labels: -{{ include "jaeger-all-in-one.labels" . | nindent 4 }} -{{- end -}} +{{- if .Values.enabled }} +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "jaeger-all-in-one.serviceAccountName" . }} + labels: +{{ include "jaeger-all-in-one.labels" . | nindent 4 }} +{{- end -}} {{- end }} \ No newline at end of file diff --git a/hack/observability/jaeger/chart/templates/statefulset.yaml b/hack/observability/jaeger/chart/templates/statefulset.yaml index 62cf8741357..c32881ed62c 100644 --- a/hack/observability/jaeger/chart/templates/statefulset.yaml +++ b/hack/observability/jaeger/chart/templates/statefulset.yaml @@ -1,109 +1,109 @@ -{{- if .Values.enabled }} -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ include "jaeger-all-in-one.fullname" . }} - labels: - {{- include "jaeger-all-in-one.labels" . | nindent 4 }} -spec: - serviceName: {{ include "jaeger-all-in-one.fullname" . }}-headless - replicas: {{ .Values.replicaCount }} - selector: - matchLabels: - {{- include "jaeger-all-in-one.selectorLabels" . | nindent 6 }} - template: - metadata: - labels: - {{- include "jaeger-all-in-one.selectorLabels" . | nindent 8 }} - {{- with .Values.podAnnotations }} - annotations: - {{- toYaml . | nindent 8 }} - {{- end }} - spec: - terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - serviceAccountName: {{ include "jaeger-all-in-one.serviceAccountName" . }} - securityContext: - {{- toYaml .Values.podSecurityContext | nindent 8 }} - {{- if .Values.volume.enabled }} - volumes: - - name: jaeger-volume - persistentVolumeClaim: - claimName: {{ include "jaeger-all-in-one.fullname" . }} - {{- end }} - containers: - - name: {{ .Chart.Name }} - securityContext: - {{- toYaml .Values.securityContext | nindent 12 }} - image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - ports: - - name: udp-com-thr - containerPort: 6831 - protocol: UDP - - name: udp-bin-thr - containerPort: 6832 - protocol: UDP - - name: udp-bin-thr-o - containerPort: 5775 - protocol: UDP - - name: http-configs - containerPort: 5778 - protocol: TCP - - name: http-ui - containerPort: 16686 - protocol: TCP - - name: grpc-proto - containerPort: 14250 - protocol: TCP - - name: http-bin-thr - containerPort: 14268 - protocol: TCP - - name: http-admin - containerPort: 14269 - protocol: TCP - {{- if .Values.enableHttpZipkinCollector }} - - name: http-zipkin - containerPort: 9411 - protocol: TCP - {{- end }} - {{- if .Values.volume.enabled }} - volumeMounts: - - mountPath: "/badger" - name: jaeger-volume - {{- end }} - livenessProbe: - httpGet: - path: {{ .Values.healthCheckUrl | quote }} - port: http-admin - readinessProbe: - httpGet: - path: {{ .Values.healthCheckUrl | quote }} - port: http-admin - resources: - {{- toYaml .Values.resources | nindent 12 }} - env: - {{- range $key, $value := .Values.environmentVariables }} - - name: {{ $key }} - value: {{ $value | quote }} - {{- end }} - {{- if .Values.enableHttpZipkinCollector }} - - name: COLLECTOR_ZIPKIN_HOST_PORT - value: "9411" - {{- end }} - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} -{{- end }} +{{- if .Values.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "jaeger-all-in-one.fullname" . }} + labels: + {{- include "jaeger-all-in-one.labels" . | nindent 4 }} +spec: + serviceName: {{ include "jaeger-all-in-one.fullname" . }}-headless + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "jaeger-all-in-one.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "jaeger-all-in-one.selectorLabels" . | nindent 8 }} + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "jaeger-all-in-one.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + {{- if .Values.volume.enabled }} + volumes: + - name: jaeger-volume + persistentVolumeClaim: + claimName: {{ include "jaeger-all-in-one.fullname" . }} + {{- end }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: udp-com-thr + containerPort: 6831 + protocol: UDP + - name: udp-bin-thr + containerPort: 6832 + protocol: UDP + - name: udp-bin-thr-o + containerPort: 5775 + protocol: UDP + - name: http-configs + containerPort: 5778 + protocol: TCP + - name: http-ui + containerPort: 16686 + protocol: TCP + - name: grpc-proto + containerPort: 14250 + protocol: TCP + - name: http-bin-thr + containerPort: 14268 + protocol: TCP + - name: http-admin + containerPort: 14269 + protocol: TCP + {{- if .Values.enableHttpZipkinCollector }} + - name: http-zipkin + containerPort: 9411 + protocol: TCP + {{- end }} + {{- if .Values.volume.enabled }} + volumeMounts: + - mountPath: "/badger" + name: jaeger-volume + {{- end }} + livenessProbe: + httpGet: + path: {{ .Values.healthCheckUrl | quote }} + port: http-admin + readinessProbe: + httpGet: + path: {{ .Values.healthCheckUrl | quote }} + port: http-admin + resources: + {{- toYaml .Values.resources | nindent 12 }} + env: + {{- range $key, $value := .Values.environmentVariables }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- if .Values.enableHttpZipkinCollector }} + - name: COLLECTOR_ZIPKIN_HOST_PORT + value: "9411" + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/hack/observability/jaeger/chart/templates/tests/test-connection.yaml b/hack/observability/jaeger/chart/templates/tests/test-connection.yaml index 458831992e8..ed0bbc9e68b 100644 --- a/hack/observability/jaeger/chart/templates/tests/test-connection.yaml +++ b/hack/observability/jaeger/chart/templates/tests/test-connection.yaml @@ -1,15 +1,15 @@ -apiVersion: v1 -kind: Pod -metadata: - name: "{{ include "jaeger-all-in-one.fullname" . }}-test-connection" - labels: -{{ include "jaeger-all-in-one.labels" . | nindent 4 }} - annotations: - "helm.sh/hook": test-success -spec: - containers: - - name: wget - image: busybox - command: ['wget'] - args: ['{{ include "jaeger-all-in-one.fullname" . }}:{{ .Values.service.port }}'] - restartPolicy: Never +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "jaeger-all-in-one.fullname" . }}-test-connection" + labels: +{{ include "jaeger-all-in-one.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test-success +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "jaeger-all-in-one.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/hack/observability/jaeger/chart/values.yaml b/hack/observability/jaeger/chart/values.yaml index 4d61ab60989..acc581f55c6 100644 --- a/hack/observability/jaeger/chart/values.yaml +++ b/hack/observability/jaeger/chart/values.yaml @@ -1,95 +1,95 @@ -# Default values for jaeger-all-in-one. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -enabled: true -replicaCount: 1 - -image: - repository: jaegertracing/all-in-one - pullPolicy: IfNotPresent - -healthCheckUrl: / -imagePullSecrets: [] -nameOverride: "" -fullnameOverride: "" -terminationGracePeriodSeconds: 10 -environmentVariables: - MEMORY_MAX_TRACES: 100000 - SPAN_STORAGE_TYPE: badger - BADGER_EPHEMERAL: false - BADGER_DIRECTORY_VALUE: /badger/data - BADGER_DIRECTORY_KEY: /badger/key - -enableHttpZipkinCollector: false - -serviceAccount: - # Specifies whether a service account should be created - create: true - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - name: - -podAnnotations: - prometheus.io/scrape: "true" - prometheus.io/path: "/metrics" - prometheus.io/port: "14269" - -podSecurityContext: {} - # fsGroup: 2000 - -securityContext: {} - # capabilities: - # drop: - # - ALL - # readOnlyRootFilesystem: true - # runAsNonRoot: true - # runAsUser: 1000 - -service: - type: ClusterIP - port: 16686 - annotations: - prometheus.io/probe: "true" - prometheus.io/probe-path: "/" - headless: - annotations: {} - -ingress: - enabled: false - annotations: {} - # kubernetes.io/ingress.class: nginx - # cert-manager.io/cluster-issuer: letsencrypt - # nginx.ingress.kubernetes.io/force-ssl-redirect: "true" - # nginx.ingress.kubernetes.io/from-to-www-redirect: "true" - hosts: [] - # - host: jaeger.localhost - # paths: - # - / - tls: [] - # - secretName: tls-secret - # hosts: - # - jaeger.localhost - -resources: {} - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi - -nodeSelector: {} - -tolerations: [] - -affinity: {} - -volume: - enabled: true - className: "" +# Default values for jaeger-all-in-one. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +enabled: true +replicaCount: 1 + +image: + repository: jaegertracing/all-in-one + pullPolicy: IfNotPresent + +healthCheckUrl: / +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" +terminationGracePeriodSeconds: 10 +environmentVariables: + MEMORY_MAX_TRACES: 100000 + SPAN_STORAGE_TYPE: badger + BADGER_EPHEMERAL: false + BADGER_DIRECTORY_VALUE: /badger/data + BADGER_DIRECTORY_KEY: /badger/key + +enableHttpZipkinCollector: false + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/path: "/metrics" + prometheus.io/port: "14269" + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 16686 + annotations: + prometheus.io/probe: "true" + prometheus.io/probe-path: "/" + headless: + annotations: {} + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # cert-manager.io/cluster-issuer: letsencrypt + # nginx.ingress.kubernetes.io/force-ssl-redirect: "true" + # nginx.ingress.kubernetes.io/from-to-www-redirect: "true" + hosts: [] + # - host: jaeger.localhost + # paths: + # - / + tls: [] + # - secretName: tls-secret + # hosts: + # - jaeger.localhost + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +volume: + enabled: true + className: "" size: 3Gi \ No newline at end of file diff --git a/pkg/record/recorder.go b/pkg/record/recorder.go index e573654e6d6..a3c838b9f6c 100644 --- a/pkg/record/recorder.go +++ b/pkg/record/recorder.go @@ -17,9 +17,10 @@ limitations under the License. package record import ( - "strings" "sync" + "golang.org/x/text/cases" + "golang.org/x/text/language" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/record" @@ -28,10 +29,12 @@ import ( var ( initOnce sync.Once defaultRecorder record.EventRecorder + eng cases.Caser ) func init() { defaultRecorder = new(record.FakeRecorder) + eng = cases.Title(language.English) } // InitFromRecorder initializes the global default recorder. It can only be called once. @@ -44,20 +47,20 @@ func InitFromRecorder(recorder record.EventRecorder) { // Event constructs an event from the given information and puts it in the queue for sending. func Event(object runtime.Object, reason, message string) { - defaultRecorder.Event(object, corev1.EventTypeNormal, strings.Title(reason), message) + defaultRecorder.Event(object, corev1.EventTypeNormal, eng.String(reason), message) } // Eventf is just like Event, but with Sprintf for the message field. func Eventf(object runtime.Object, reason, message string, args ...interface{}) { - defaultRecorder.Eventf(object, corev1.EventTypeNormal, strings.Title(reason), message, args...) + defaultRecorder.Eventf(object, corev1.EventTypeNormal, eng.String(reason), message, args...) } // Warn constructs a warning event from the given information and puts it in the queue for sending. func Warn(object runtime.Object, reason, message string) { - defaultRecorder.Event(object, corev1.EventTypeWarning, strings.Title(reason), message) + defaultRecorder.Event(object, corev1.EventTypeWarning, eng.String(reason), message) } // Warnf is just like Warn, but with Sprintf for the message field. func Warnf(object runtime.Object, reason, message string, args ...interface{}) { - defaultRecorder.Eventf(object, corev1.EventTypeWarning, strings.Title(reason), message, args...) + defaultRecorder.Eventf(object, corev1.EventTypeWarning, eng.String(reason), message, args...) } diff --git a/scripts/ci-apidiff.sh b/scripts/ci-apidiff.sh index 3eb1a71c973..b041d93e803 100755 --- a/scripts/ci-apidiff.sh +++ b/scripts/ci-apidiff.sh @@ -22,7 +22,7 @@ REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. APIDIFF="${REPO_ROOT}/hack/tools/bin/go-apidiff" -cd "${REPO_ROOT}" && make "${APIDIFF##*/}" +make --directory="${REPO_ROOT}" "${APIDIFF##*/}" echo "*** Running go-apidiff ***" -${APIDIFF} "${PULL_BASE_SHA}" --print-compatible \ No newline at end of file +${APIDIFF} "${PULL_BASE_SHA}" --print-compatible diff --git a/scripts/ci-build.sh b/scripts/ci-build.sh index 448ac7f2761..e769a1deece 100755 --- a/scripts/ci-build.sh +++ b/scripts/ci-build.sh @@ -22,4 +22,4 @@ REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. # shellcheck source=hack/ensure-go.sh source "${REPO_ROOT}/hack/ensure-go.sh" -cd "${REPO_ROOT}" && make binaries +make --directory="${REPO_ROOT}" binaries diff --git a/scripts/ci-configmap.sh b/scripts/ci-configmap.sh old mode 100644 new mode 100755 index 5ce04f4b6c4..27296df1aee --- a/scripts/ci-configmap.sh +++ b/scripts/ci-configmap.sh @@ -19,9 +19,11 @@ set -o nounset set -o pipefail REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +KUBECTL="${REPO_ROOT}/hack/tools/bin/kubectl" +make --directory="${REPO_ROOT}" "${KUBECTL##*/}" CM_NAMES=("calico-addon" "calico-ipv6-addon" "calico-dual-stack-addon" "calico-windows-addon") CM_FILES=("calico.yaml" "calico-ipv6.yaml" "calico-dual-stack.yaml" "windows/calico") for i in "${!CM_NAMES[@]}"; do - kubectl create configmap "${CM_NAMES[i]}" --from-file="${REPO_ROOT}/templates/addons/${CM_FILES[i]}" --dry-run -o yaml | kubectl apply -f - + "${KUBECTL}" create configmap "${CM_NAMES[i]}" --from-file="${REPO_ROOT}/templates/addons/${CM_FILES[i]}" --dry-run -o yaml | kubectl apply -f - done diff --git a/scripts/ci-conformance.sh b/scripts/ci-conformance.sh index 40e21fb06c8..03cf09b5a0c 100755 --- a/scripts/ci-conformance.sh +++ b/scripts/ci-conformance.sh @@ -26,14 +26,12 @@ set -o pipefail # Install kubectl REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. KUBECTL="${REPO_ROOT}/hack/tools/bin/kubectl" -cd "${REPO_ROOT}" && make "${KUBECTL##*/}" +KIND="${REPO_ROOT}/hack/tools/bin/kind" +KUSTOMIZE="${REPO_ROOT}/hack/tools/bin/kustomize" +make --directory="${REPO_ROOT}" "${KUBECTL##*/}" "${KIND##*/}" "${KUSTOMIZE##*/}" # shellcheck source=hack/ensure-go.sh source "${REPO_ROOT}/hack/ensure-go.sh" -# shellcheck source=hack/ensure-kind.sh -source "${REPO_ROOT}/hack/ensure-kind.sh" -# shellcheck source=hack/ensure-kustomize.sh -source "${REPO_ROOT}/hack/ensure-kustomize.sh" # shellcheck source=hack/ensure-tags.sh source "${REPO_ROOT}/hack/ensure-tags.sh" # shellcheck source=hack/parse-prow-creds.sh diff --git a/scripts/ci-e2e.sh b/scripts/ci-e2e.sh index ede10020bfa..a975c4262a5 100755 --- a/scripts/ci-e2e.sh +++ b/scripts/ci-e2e.sh @@ -26,12 +26,11 @@ set -o pipefail # Install kubectl REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. KUBECTL="${REPO_ROOT}/hack/tools/bin/kubectl" -cd "${REPO_ROOT}" && make "${KUBECTL##*/}" +KIND="${REPO_ROOT}/hack/tools/bin/kind" +make --directory="${REPO_ROOT}" "${KUBECTL##*/}" "${KIND##*/}" # shellcheck source=hack/ensure-go.sh source "${REPO_ROOT}/hack/ensure-go.sh" -# shellcheck source=hack/ensure-kind.sh -source "${REPO_ROOT}/hack/ensure-kind.sh" # shellcheck source=hack/ensure-tags.sh source "${REPO_ROOT}/hack/ensure-tags.sh" # shellcheck source=hack/parse-prow-creds.sh diff --git a/scripts/ci-entrypoint.sh b/scripts/ci-entrypoint.sh index 48310e89d71..8799e7a9a26 100755 --- a/scripts/ci-entrypoint.sh +++ b/scripts/ci-entrypoint.sh @@ -22,21 +22,19 @@ set -o errexit set -o nounset set -o pipefail -# Install kubectl and helm +# Install kubectl, helm and kustomize REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. KUBECTL="${REPO_ROOT}/hack/tools/bin/kubectl" HELM="${REPO_ROOT}/hack/tools/bin/helm" -cd "${REPO_ROOT}" && make "${KUBECTL##*/}"; make "${HELM##*/}" +KIND="${REPO_ROOT}/hack/tools/bin/kind" +KUSTOMIZE="${REPO_ROOT}/hack/tools/bin/kustomize" +make --directory="${REPO_ROOT}" "${KUBECTL##*/}" "${HELM##*/}" "${KIND##*/}" "${KUSTOMIZE##*/}" # export the variables so they are available in bash -c wait_for_nodes below export KUBECTL export HELM # shellcheck source=hack/ensure-go.sh source "${REPO_ROOT}/hack/ensure-go.sh" -# shellcheck source=hack/ensure-kind.sh -source "${REPO_ROOT}/hack/ensure-kind.sh" -# shellcheck source=hack/ensure-kustomize.sh -source "${REPO_ROOT}/hack/ensure-kustomize.sh" # shellcheck source=hack/ensure-tags.sh source "${REPO_ROOT}/hack/ensure-tags.sh" # shellcheck source=hack/parse-prow-creds.sh diff --git a/scripts/ci-test-coverage.sh b/scripts/ci-test-coverage.sh index da6e10c4318..9f8dc48e115 100755 --- a/scripts/ci-test-coverage.sh +++ b/scripts/ci-test-coverage.sh @@ -22,5 +22,4 @@ REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. # shellcheck source=hack/ensure-go.sh source "${REPO_ROOT}/hack/ensure-go.sh" -cd "${REPO_ROOT}" && \ - make test-cover +make --directory="${REPO_ROOT}" test-cover diff --git a/scripts/ci-test.sh b/scripts/ci-test.sh index 45fb64df210..0458392f49a 100755 --- a/scripts/ci-test.sh +++ b/scripts/ci-test.sh @@ -22,5 +22,4 @@ REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. # shellcheck source=hack/ensure-go.sh source "${REPO_ROOT}/hack/ensure-go.sh" -cd "${REPO_ROOT}" && \ - make test +make --directory="${REPO_ROOT}" test diff --git a/scripts/kind-with-registry.sh b/scripts/kind-with-registry.sh index d3cccf0aa67..91454ca90ed 100755 --- a/scripts/kind-with-registry.sh +++ b/scripts/kind-with-registry.sh @@ -20,12 +20,13 @@ set -o pipefail # Install kubectl REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. KUBECTL="${REPO_ROOT}/hack/tools/bin/kubectl" -cd "${REPO_ROOT}" && make "${KUBECTL##*/}" +KIND="${REPO_ROOT}/hack/tools/bin/kind" +make --directory="${REPO_ROOT}" "${KUBECTL##*/}" "${KIND##*/}" # desired cluster name; default is "kind" KIND_CLUSTER_NAME="${KIND_CLUSTER_NAME:-capz}" -if [[ "$(kind get clusters)" =~ .*"${KIND_CLUSTER_NAME}".* ]]; then +if [[ "$("${KIND}" get clusters)" =~ .*"${KIND_CLUSTER_NAME}".* ]]; then echo "cluster already exists, moving on" exit 0 fi @@ -40,7 +41,7 @@ if [ "${running}" != 'true' ]; then fi # create a cluster with the local registry enabled in containerd -cat < 0 { - skus[i] = *sku.Name + // New SKUs don't contain the Kubernetes version and are named like "ubuntu-2004-gen1". + if match := capiSku.FindStringSubmatch(*sku.Name); len(match) > 0 { + for _, vmImage := range *res.Value { + // Versions are named like "121.13.20220601", for Kubernetes v1.21.13 published on June 1, 2022. + match = capiVersion.FindStringSubmatch(*vmImage.Name) + stringVer := fmt.Sprintf("%s.%s.%s", match[1], match[2], match[3]) + versions[stringVer] = semver.MustParse(stringVer) + } + continue + } + // Old SKUs before 1.21.12, 1.22.9, or 1.23.6 are named like "k8s-1dot21dot2-ubuntu-2004". + if match := oldCapiSku.FindStringSubmatch(*sku.Name); len(match) > 0 { + stringVer := fmt.Sprintf("%s.%s.%s", match[1], match[2], match[3]) + versions[stringVer] = semver.MustParse(stringVer) + } } } } - return skus -} - -// parseImageSkuNames parses SKU names in format "k8s-1dot17dot2-os-123" to extract the Kubernetes version. -// it returns a sorted list of all k8s versions found. -func parseImageSkuNames(skus []string) map[string]semver.Version { - capiSku := regexp.MustCompile(`^k8s-(0|[1-9][0-9]*)dot(0|[1-9][0-9]*)dot(0|[1-9][0-9]*)-[a-z]*.*$`) - versions := make(map[string]semver.Version, len(skus)) - for _, sku := range skus { - match := capiSku.FindStringSubmatch(sku) - if len(match) != 0 { - stringVer := fmt.Sprintf("%s.%s.%s", match[1], match[2], match[3]) - versions[stringVer] = semver.MustParse(stringVer) - } - } return versions } @@ -843,11 +846,13 @@ func InstallHelmChart(ctx context.Context, input clusterctl.ApplyClusterTemplate settings := helmCli.New() settings.KubeConfig = kubeConfigPath actionConfig := new(helmAction.Configuration) - err := actionConfig.Init(settings.RESTClientGetter(), "default", "secret", Logf) + ns := "default" + err := actionConfig.Init(settings.RESTClientGetter(), ns, "secret", Logf) Expect(err).To(BeNil()) i := helmAction.NewInstall(actionConfig) i.RepoURL = repoURL i.ReleaseName = releaseName + i.Namespace = ns Eventually(func() error { cp, err := i.ChartPathOptions.LocateChart(chartName, helmCli.New()) if err != nil { @@ -922,3 +927,20 @@ func podListHasNumPods(numPods int) func(pl *corev1.PodList) error { return nil } } + +// podListHasAtLeastNumPods fulfills the cluster-api PodListCondition type spec +// given a list of pods, we validate for at least 1 number of those pods in a Running state +func podListHasAtLeastNumPods(numPods int) func(pl *corev1.PodList) error { + return func(pl *corev1.PodList) error { + var runningPods int + for _, p := range pl.Items { + if p.Status.Phase == corev1.PodRunning { + runningPods++ + } + } + if runningPods >= numPods { + return errors.Errorf("expected %d Running pods, got %d", numPods, runningPods) + } + return nil + } +} diff --git a/test/e2e/kubernetes/deployment/deployment.go b/test/e2e/kubernetes/deployment/deployment.go index 4b888cc7abc..38de238874d 100644 --- a/test/e2e/kubernetes/deployment/deployment.go +++ b/test/e2e/kubernetes/deployment/deployment.go @@ -132,6 +132,21 @@ func (d *Builder) AddContainerPort(name, portName string, portNumber int32, prot } } +func (d *Builder)AddPVC(pvcName string) *Builder { + volumes:= []corev1.Volume{ + { + Name: "managed", + VolumeSource:corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvcName, + }, + }, + }, + } + d.deployment.Spec.Template.Spec.Volumes = volumes + return d +} + func (d *Builder) Deploy(ctx context.Context, clientset *kubernetes.Clientset) (*appsv1.Deployment, error) { var deployment *appsv1.Deployment Eventually(func() error { diff --git a/test/e2e/kubernetes/pvc/pvc.go b/test/e2e/kubernetes/pvc/pvc.go new file mode 100644 index 00000000000..27b4ed6cbb0 --- /dev/null +++ b/test/e2e/kubernetes/pvc/pvc.go @@ -0,0 +1,97 @@ +//go:build e2e +// +build e2e + +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pvc + +import ( + "context" + "log" + "time" + + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +const ( + pvcOperationTimeout = 30 * time.Second + pvcOperationSleepBetweenRetries = 3 * time.Second +) + +/* +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: dd-managed-hdd-5g + annotations: + volume.beta.kubernetes.io/storage-class: managedhdd +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi + */ + +type Builder struct { + pvc *corev1.PersistentVolumeClaim +} + +func Create(pvcName string, storageRequest string) (*Builder,error) { + qunatity,err:= resource.ParseQuantity("5Gi") + if err!=nil{ + return nil,err + } + pvcBuilder:=&Builder{ + pvc: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dd-managed-hdd-5g", + Annotations: map[string]string{ + "volume.beta.kubernetes.io/storage-class":"managedhdd", + }, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceStorage : qunatity, + }, + }, + }, + }, + } + return pvcBuilder,nil +} + +func (b *Builder)DeployPVC(clientset *kubernetes.Clientset) error { + Eventually(func() error { + _,err := clientset.CoreV1().PersistentVolumeClaims("default").Create(context.TODO(),b.pvc,metav1.CreateOptions{}) + if err != nil { + log.Printf("Error trying to deploy storage class %s in namespace %s:%s\n", b.pvc.Name, b.pvc.ObjectMeta.Namespace, err.Error()) + return err + } + return nil + }, pvcOperationTimeout, pvcOperationSleepBetweenRetries).Should(Succeed()) + + return nil +} \ No newline at end of file diff --git a/test/e2e/kubernetes/storageclass/storageclass.go b/test/e2e/kubernetes/storageclass/storageclass.go new file mode 100644 index 00000000000..0665166e924 --- /dev/null +++ b/test/e2e/kubernetes/storageclass/storageclass.go @@ -0,0 +1,90 @@ +//go:build e2e +// +build e2e + +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storageclass + +import ( + "context" + "log" + "time" + + . "github.com/onsi/gomega" + storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +/* +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: managedhdd +provisioner: kubernetes.io/azure-disk +volumeBindingMode: WaitForFirstConsumer +parameters: + storageaccounttype: Standard_LRS + kind: Managed + */ + +const ( + scOperationTimeout = 30 * time.Second + scOperationSleepBetweenRetries = 3 * time.Second + AzureDiskProvisioner = "kubernetes.io/azure-disk" +) + +// Builder provides a helper interface for building storage class manifest +type Builder struct { + sc *storagev1.StorageClass +} + +// Create creates a storage class builder manifest +func Create(scName string) *Builder { + scBuilder:= &Builder{ + sc: &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: scName, + }, + Provisioner: AzureDiskProvisioner, + Parameters: map[string]string{ + "storageaccounttype":"Standard_LRS", + "kind": "managed", + }, + }, + } + return scBuilder +} + +// WithWaitForFirstConsumer sets volume binding on first consumer +func (d *Builder)WithWaitForFirstConsumer() *Builder { + volumeBinding:= storagev1.VolumeBindingWaitForFirstConsumer + d.sc.VolumeBindingMode = &volumeBinding + return d +} + +// DeployStorageClass creates a storage class on the k8s cluster +func (d *Builder)DeployStorageClass(clientset *kubernetes.Clientset) { + Eventually(func() error { + _,err := clientset.StorageV1().StorageClasses().Create(context.TODO(),d.sc,metav1.CreateOptions{}) + if err != nil { + log.Printf("Error trying to deploy storage class %s in namespace %s:%s\n", d.sc.Name, d.sc.ObjectMeta.Namespace, err.Error()) + return err + } + return nil + }, scOperationTimeout, scOperationSleepBetweenRetries).Should(Succeed()) +} diff --git a/test/e2e/workloads/policies/backend-policy-allow-ingress-pod-label.yaml b/test/e2e/workloads/policies/backend-policy-allow-ingress-pod-label.yaml index d569d4a1496..d15e7c3f7f0 100644 --- a/test/e2e/workloads/policies/backend-policy-allow-ingress-pod-label.yaml +++ b/test/e2e/workloads/policies/backend-policy-allow-ingress-pod-label.yaml @@ -1,17 +1,17 @@ -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -metadata: - name: backend-allow-ingress-pod-label - namespace: development -spec: - podSelector: - matchLabels: - app: webapp - role: backend - ingress: - - from: - - namespaceSelector: {} - podSelector: - matchLabels: - app: webapp - role: frontend +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: backend-allow-ingress-pod-label + namespace: development +spec: + podSelector: + matchLabels: + app: webapp + role: backend + ingress: + - from: + - namespaceSelector: {} + podSelector: + matchLabels: + app: webapp + role: frontend diff --git a/test/e2e/workloads/policies/backend-policy-allow-ingress-pod-namespace-label.yaml b/test/e2e/workloads/policies/backend-policy-allow-ingress-pod-namespace-label.yaml index a2f06fd5754..7b5bcfecdaa 100644 --- a/test/e2e/workloads/policies/backend-policy-allow-ingress-pod-namespace-label.yaml +++ b/test/e2e/workloads/policies/backend-policy-allow-ingress-pod-namespace-label.yaml @@ -1,19 +1,19 @@ -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -metadata: - name: backend-policy-allow-ingress-pod-namespace-label - namespace: development -spec: - podSelector: - matchLabels: - app: webapp - role: backend - ingress: - - from: - - namespaceSelector: - matchLabels: - purpose: development - podSelector: - matchLabels: - app: webapp - role: frontend +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: backend-policy-allow-ingress-pod-namespace-label + namespace: development +spec: + podSelector: + matchLabels: + app: webapp + role: backend + ingress: + - from: + - namespaceSelector: + matchLabels: + purpose: development + podSelector: + matchLabels: + app: webapp + role: frontend diff --git a/test/e2e/workloads/policies/backend-policy-deny-ingress.yaml b/test/e2e/workloads/policies/backend-policy-deny-ingress.yaml index 58f42e13d60..de04a85d3fe 100644 --- a/test/e2e/workloads/policies/backend-policy-deny-ingress.yaml +++ b/test/e2e/workloads/policies/backend-policy-deny-ingress.yaml @@ -1,11 +1,11 @@ -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -metadata: - name: backend-deny-ingress - namespace: development -spec: - podSelector: - matchLabels: - app: webapp - role: backend - ingress: [] +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: backend-deny-ingress + namespace: development +spec: + podSelector: + matchLabels: + app: webapp + role: backend + ingress: []