diff --git a/.dockerignore b/.dockerignore index db1740e41b3..6d0544c6e30 100644 --- a/.dockerignore +++ b/.dockerignore @@ -6,7 +6,8 @@ !/cloud/** !/controllers/** !/exp/** +!/feature/** !/pkg/** !/main.go !/go.mod -!/go.sum \ No newline at end of file +!/go.sum diff --git a/Makefile b/Makefile index 3a9a4091ce7..dfd4bcc88df 100644 --- a/Makefile +++ b/Makefile @@ -73,6 +73,7 @@ RBAC_ROOT ?= $(MANIFEST_ROOT)/rbac PULL_POLICY ?= Always CLUSTER_TEMPLATE ?= cluster-template.yaml +MANAGED_CLUSTER_TEMPLATE ?= cluster-template-aks.yaml ## -------------------------------------- ## Help @@ -319,7 +320,7 @@ create-management-cluster: $(KUSTOMIZE) $(ENVSUBST) $(MAKE) kind-create # Install cert manager and wait for availability - kubectl create -f https://github.com/jetstack/cert-manager/releases/download/v0.11.1/cert-manager.yaml + kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v0.11.1/cert-manager.yaml kubectl wait --for=condition=Available --timeout=5m apiservice v1beta1.webhook.cert-manager.io # Deploy CAPI @@ -347,16 +348,30 @@ create-workload-cluster: $(ENVSUBST) $(ENVSUBST) < $(TEMPLATES_DIR)/$(CLUSTER_TEMPLATE) | kubectl apply -f - # Wait for the kubeconfig to become available. - timeout 300 bash -c "while ! kubectl get secrets | grep $(CLUSTER_NAME)-kubeconfig; do sleep 1; done" + timeout --foreground 300 bash -c "while ! kubectl get secrets | grep $(CLUSTER_NAME)-kubeconfig; do sleep 1; done" # Get kubeconfig and store it locally. kubectl get secrets $(CLUSTER_NAME)-kubeconfig -o json | jq -r .data.value | base64 --decode > ./kubeconfig - timeout 600 bash -c "while ! kubectl --kubeconfig=./kubeconfig get nodes | grep master; do sleep 1; done" + timeout --foreground 600 bash -c "while ! kubectl --kubeconfig=./kubeconfig get nodes | grep master; do sleep 1; done" # Deploy calico kubectl --kubeconfig=./kubeconfig apply -f templates/addons/calico.yaml @echo 'run "kubectl --kubeconfig=./kubeconfig ..." to work with the new target cluster' +.PHONY: create-aks-cluster +create-aks-cluster: $(KUSTOMIZE) $(ENVSUBST) + # Create managed Cluster. + $(ENVSUBST) < $(TEMPLATES_DIR)/$(MANAGED_CLUSTER_TEMPLATE) | kubectl apply -f - + + # Wait for the kubeconfig to become available. + timeout --foreground 300 bash -c "while ! kubectl get secrets | grep $(CLUSTER_NAME)-kubeconfig; do sleep 1; done" + # Get kubeconfig and store it locally. + kubectl get secrets $(CLUSTER_NAME)-kubeconfig -o json | jq -r .data.value | base64 --decode > ./kubeconfig + timeout --foreground 600 bash -c "while ! kubectl --kubeconfig=./kubeconfig get nodes | grep master; do sleep 1; done" + + @echo 'run "kubectl --kubeconfig=./kubeconfig ..." to work with the new target cluster' + + .PHONY: create-cluster create-cluster: create-management-cluster create-workload-cluster ## Create a workload development Kubernetes cluster on Azure in a kind management cluster. diff --git a/cloud/interfaces.go b/cloud/interfaces.go index 8ddf4df9cbc..28954037705 100644 --- a/cloud/interfaces.go +++ b/cloud/interfaces.go @@ -35,3 +35,10 @@ type GetterService interface { Reconcile(ctx context.Context, spec interface{}) error Delete(ctx context.Context, spec interface{}) error } + +// CredentialGetter is a GetterService which knows how to retrieve credentials for an Azure +// resource in a resource group. +type CredentialGetter interface { + GetterService + GetCredentials(ctx context.Context, group string, cluster string) ([]byte, error) +} diff --git a/cloud/scope/machinepool.go b/cloud/scope/machinepool.go index 329d20a7294..dbe469b2494 100644 --- a/cloud/scope/machinepool.go +++ b/cloud/scope/machinepool.go @@ -53,6 +53,7 @@ type ( // MachinePoolScope defines a scope defined around a machine pool and its cluster. MachinePoolScope struct { logr.Logger + AzureClients client client.Client patchHelper *patch.Helper Cluster *capiv1.Cluster @@ -85,6 +86,10 @@ func NewMachinePoolScope(params MachinePoolScopeParams) (*MachinePoolScope, erro params.Logger = klogr.New() } + if err := params.AzureClients.setCredentials(params.AzureCluster.Spec.SubscriptionID); err != nil { + return nil, errors.Wrap(err, "failed to create Azure session") + } + helper, err := patch.NewHelper(params.AzureMachinePool, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init patch helper") @@ -95,6 +100,7 @@ func NewMachinePoolScope(params MachinePoolScopeParams) (*MachinePoolScope, erro MachinePool: params.MachinePool, AzureCluster: params.AzureCluster, AzureMachinePool: params.AzureMachinePool, + AzureClients: params.AzureClients, Logger: params.Logger, patchHelper: helper, }, nil diff --git a/cloud/scope/managedcontrolplane.go b/cloud/scope/managedcontrolplane.go new file mode 100644 index 00000000000..9c80e636e2e --- /dev/null +++ b/cloud/scope/managedcontrolplane.go @@ -0,0 +1,100 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scope + +import ( + "context" + + "github.com/go-logr/logr" + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog/klogr" + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha3" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha3" + + "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ManagedControlPlaneScopeParams defines the input parameters used to create a new +type ManagedControlPlaneScopeParams struct { + AzureClients + Client client.Client + Logger logr.Logger + Cluster *clusterv1.Cluster + ControlPlane *infrav1exp.AzureManagedControlPlane + InfraMachinePool *infrav1exp.AzureManagedMachinePool + MachinePool *expv1.MachinePool + PatchTarget runtime.Object +} + +// NewManagedControlPlaneScope creates a new Scope from the supplied parameters. +// This is meant to be called for each reconcile iteration. +func NewManagedControlPlaneScope(params ManagedControlPlaneScopeParams) (*ManagedControlPlaneScope, error) { + if params.Cluster == nil { + return nil, errors.New("failed to generate new scope from nil Cluster") + } + + if params.ControlPlane == nil { + return nil, errors.New("failed to generate new scope from nil ControlPlane") + } + + if params.Logger == nil { + params.Logger = klogr.New() + } + + if err := params.AzureClients.setCredentials(params.ControlPlane.Spec.SubscriptionID); err != nil { + return nil, errors.Wrap(err, "failed to create Azure session") + } + + helper, err := patch.NewHelper(params.PatchTarget, params.Client) + if err != nil { + return nil, errors.Wrap(err, "failed to init patch helper") + } + + return &ManagedControlPlaneScope{ + Logger: params.Logger, + Client: params.Client, + AzureClients: params.AzureClients, + Cluster: params.Cluster, + ControlPlane: params.ControlPlane, + MachinePool: params.MachinePool, + InfraMachinePool: params.InfraMachinePool, + PatchTarget: params.PatchTarget, + patchHelper: helper, + }, nil +} + +// ManagedControlPlaneScope defines the basic context for an actuator to operate upon. +type ManagedControlPlaneScope struct { + logr.Logger + Client client.Client + patchHelper *patch.Helper + + AzureClients + Cluster *clusterv1.Cluster + MachinePool *expv1.MachinePool + ControlPlane *infrav1exp.AzureManagedControlPlane + InfraMachinePool *infrav1exp.AzureManagedMachinePool + PatchTarget runtime.Object +} + +// PatchObject persists the cluster configuration and status. +func (s *ManagedControlPlaneScope) PatchObject(ctx context.Context) error { + return s.patchHelper.Patch(ctx, s.PatchTarget) +} diff --git a/cloud/services/agentpools/agentpools.go b/cloud/services/agentpools/agentpools.go new file mode 100644 index 00000000000..fbb19e3a80c --- /dev/null +++ b/cloud/services/agentpools/agentpools.go @@ -0,0 +1,133 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package agentpools + +import ( + "context" + "fmt" + + "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice" + "github.com/google/go-cmp/cmp" + "github.com/pkg/errors" + "k8s.io/klog" + azure "sigs.k8s.io/cluster-api-provider-azure/cloud" +) + +// Spec contains properties to create a agent pool. +type Spec struct { + Name string + ResourceGroup string + Cluster string + Version *string + SKU string + Replicas int32 + OSDiskSizeGB int32 +} + +// Get fetches a agent pool from Azure. +func (s *Service) Get(ctx context.Context, spec interface{}) (interface{}, error) { + agentPoolSpec, ok := spec.(*Spec) + if !ok { + return containerservice.AgentPool{}, errors.New("expected agent pool specification") + } + return s.Client.Get(ctx, agentPoolSpec.ResourceGroup, agentPoolSpec.Cluster, agentPoolSpec.Name) +} + +// Reconcile idempotently creates or updates a agent pool, if possible. +func (s *Service) Reconcile(ctx context.Context, spec interface{}) error { + agentPoolSpec, ok := spec.(*Spec) + if !ok { + return errors.New("expected agent pool specification") + } + + profile := containerservice.AgentPool{ + ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{ + VMSize: containerservice.VMSizeTypes(agentPoolSpec.SKU), + OsDiskSizeGB: &agentPoolSpec.OSDiskSizeGB, + Count: &agentPoolSpec.Replicas, + Type: containerservice.VirtualMachineScaleSets, + OrchestratorVersion: agentPoolSpec.Version, + }, + } + + existingSpec, err := s.Get(ctx, spec) + if err != nil && !azure.ResourceNotFound(err) { + return errors.Wrapf(err, "failed to get existing agent pool") + } + existingPool, ok := existingSpec.(containerservice.AgentPool) + if !ok { + return errors.New("expected agent pool specification") + } + + // For updates, we want to pass whatever we find in the existing + // cluster, normalized to reflect the input we originally provided. + // AKS will populate defaults and read-only values, which we want + // to strip/clean to match what we expect. + isCreate := azure.ResourceNotFound(err) + if isCreate { + err = s.Client.CreateOrUpdate(ctx, agentPoolSpec.ResourceGroup, agentPoolSpec.Cluster, agentPoolSpec.Name, profile) + if err != nil { + return fmt.Errorf("failed to create or update agent pool, %#+v", err) + } + } else { + // Normalize individual agent pools to diff in case we need to update + existingProfile := containerservice.AgentPool{ + ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{ + VMSize: existingPool.ManagedClusterAgentPoolProfileProperties.VMSize, + OsDiskSizeGB: existingPool.ManagedClusterAgentPoolProfileProperties.OsDiskSizeGB, + Count: existingPool.ManagedClusterAgentPoolProfileProperties.Count, + Type: containerservice.VirtualMachineScaleSets, + OrchestratorVersion: existingPool.ManagedClusterAgentPoolProfileProperties.OrchestratorVersion, + }, + } + + // Diff and check if we require an update + diff := cmp.Diff(profile, existingProfile) + if diff != "" { + klog.V(2).Infof("Update required (+new -old):\n%s", diff) + err = s.Client.CreateOrUpdate(ctx, agentPoolSpec.ResourceGroup, agentPoolSpec.Cluster, agentPoolSpec.Name, profile) + if err != nil { + return fmt.Errorf("failed to create or update agent pool, %#+v", err.Error()) + } + } else { + klog.V(2).Infof("Normalized and desired agent pool matched, no update needed") + } + } + + return nil +} + +// Delete deletes the virtual network with the provided name. +func (s *Service) Delete(ctx context.Context, spec interface{}) error { + agentPoolSpec, ok := spec.(*Spec) + if !ok { + return errors.New("expected agent pool specification") + } + + klog.V(2).Infof("deleting agent pool %s ", agentPoolSpec.Name) + err := s.Client.Delete(ctx, agentPoolSpec.ResourceGroup, agentPoolSpec.Cluster, agentPoolSpec.Name) + if err != nil { + if azure.ResourceNotFound(err) { + // already deleted + return nil + } + return errors.Wrapf(err, "failed to delete agent pool %s in resource group %s", agentPoolSpec.Name, agentPoolSpec.ResourceGroup) + } + + klog.V(2).Infof("Successfully deleted agent pool %s ", agentPoolSpec.Name) + return nil +} diff --git a/cloud/services/agentpools/client.go b/cloud/services/agentpools/client.go new file mode 100644 index 00000000000..b62cc7ac26f --- /dev/null +++ b/cloud/services/agentpools/client.go @@ -0,0 +1,85 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package agentpools + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice" + "github.com/Azure/go-autorest/autorest" + "github.com/pkg/errors" + azure "sigs.k8s.io/cluster-api-provider-azure/cloud" +) + +// Client wraps go-sdk +type Client interface { + Get(context.Context, string, string, string) (containerservice.AgentPool, error) + CreateOrUpdate(context.Context, string, string, string, containerservice.AgentPool) error + Delete(context.Context, string, string, string) error +} + +// AzureClient contains the Azure go-sdk Client +type AzureClient struct { + agentpools containerservice.AgentPoolsClient +} + +var _ Client = &AzureClient{} + +// NewClient creates a new agent pools client from subscription ID. +func NewClient(subscriptionID string, authorizer autorest.Authorizer) *AzureClient { + c := newAgentPoolsClient(subscriptionID, authorizer) + return &AzureClient{c} +} + +// newAgentPoolsClient creates a new agent pool client from subscription ID. +func newAgentPoolsClient(subscriptionID string, authorizer autorest.Authorizer) containerservice.AgentPoolsClient { + agentPoolsClient := containerservice.NewAgentPoolsClient(subscriptionID) + agentPoolsClient.Authorizer = authorizer + agentPoolsClient.AddToUserAgent(azure.UserAgent) + return agentPoolsClient +} + +// Get gets an agent pool. +func (ac *AzureClient) Get(ctx context.Context, resourceGroupName, cluster, name string) (containerservice.AgentPool, error) { + return ac.agentpools.Get(ctx, resourceGroupName, cluster, name) +} + +// CreateOrUpdate creates or updates an agent pool. +func (ac *AzureClient) CreateOrUpdate(ctx context.Context, resourceGroupName, cluster, name string, properties containerservice.AgentPool) error { + future, err := ac.agentpools.CreateOrUpdate(ctx, resourceGroupName, cluster, name, properties) + if err != nil { + return errors.Wrap(err, "failed to begin operation") + } + if err := future.WaitForCompletionRef(ctx, ac.agentpools.Client); err != nil { + return errors.Wrap(err, "failed to end operation") + } + _, err = future.Result(ac.agentpools) + return err +} + +// Delete deletes an agent pool. +func (ac *AzureClient) Delete(ctx context.Context, resourceGroupName, cluster, name string) error { + future, err := ac.agentpools.Delete(ctx, resourceGroupName, cluster, name) + if err != nil { + return errors.Wrap(err, "failed to begin operation") + } + if err := future.WaitForCompletionRef(ctx, ac.agentpools.Client); err != nil { + return errors.Wrap(err, "failed to end operation") + } + _, err = future.Result(ac.agentpools) + return err +} diff --git a/cloud/services/agentpools/service.go b/cloud/services/agentpools/service.go new file mode 100644 index 00000000000..496b3eb1ca1 --- /dev/null +++ b/cloud/services/agentpools/service.go @@ -0,0 +1,33 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package agentpools + +import ( + "github.com/Azure/go-autorest/autorest" +) + +// Service provides operations on azure resources +type Service struct { + Client +} + +// NewService creates a new service. +func NewService(authorizer autorest.Authorizer, subscriptionID string) *Service { + return &Service{ + Client: NewClient(subscriptionID, authorizer), + } +} diff --git a/cloud/services/managedclusters/client.go b/cloud/services/managedclusters/client.go new file mode 100644 index 00000000000..43891dd9627 --- /dev/null +++ b/cloud/services/managedclusters/client.go @@ -0,0 +1,101 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package managedclusters + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice" + "github.com/Azure/go-autorest/autorest" + "github.com/pkg/errors" + azure "sigs.k8s.io/cluster-api-provider-azure/cloud" +) + +// Client wraps go-sdk +type Client interface { + Get(context.Context, string, string) (containerservice.ManagedCluster, error) + GetCredentials(context.Context, string, string) ([]byte, error) + CreateOrUpdate(context.Context, string, string, containerservice.ManagedCluster) error + Delete(context.Context, string, string) error +} + +// AzureClient contains the Azure go-sdk Client +type AzureClient struct { + managedclusters containerservice.ManagedClustersClient +} + +var _ Client = &AzureClient{} + +// NewClient creates a new VM client from subscription ID. +func NewClient(subscriptionID string, authorizer autorest.Authorizer) *AzureClient { + return &AzureClient{ + managedclusters: newManagedClustersClient(subscriptionID, authorizer), + } +} + +// newManagedClustersClient creates a new managed clusters client from subscription ID. +func newManagedClustersClient(subscriptionID string, authorizer autorest.Authorizer) containerservice.ManagedClustersClient { + managedClustersClient := containerservice.NewManagedClustersClient(subscriptionID) + managedClustersClient.Authorizer = authorizer + managedClustersClient.AddToUserAgent(azure.UserAgent) + return managedClustersClient +} + +// Get gets a managed cluster. +func (ac *AzureClient) Get(ctx context.Context, resourceGroupName, name string) (containerservice.ManagedCluster, error) { + return ac.managedclusters.Get(ctx, resourceGroupName, name) +} + +// GetCredentials fetches the admin kubeconfig for a managed cluster. +func (ac *AzureClient) GetCredentials(ctx context.Context, resourceGroupName, name string) ([]byte, error) { + credentialList, err := ac.managedclusters.ListClusterAdminCredentials(ctx, resourceGroupName, name) + if err != nil { + return nil, err + } + + if credentialList.Kubeconfigs == nil || len(*credentialList.Kubeconfigs) < 1 { + return nil, errors.New("no kubeconfigs available for the managed cluster cluster") + } + + return *(*credentialList.Kubeconfigs)[0].Value, nil +} + +// CreateOrUpdate creates or updates a managed cluster. +func (ac *AzureClient) CreateOrUpdate(ctx context.Context, resourceGroupName, name string, cluster containerservice.ManagedCluster) error { + future, err := ac.managedclusters.CreateOrUpdate(ctx, resourceGroupName, name, cluster) + if err != nil { + return errors.Wrapf(err, "failed to begin operation") + } + if err := future.WaitForCompletionRef(ctx, ac.managedclusters.Client); err != nil { + return errors.Wrapf(err, "failed to end operation") + } + _, err = future.Result(ac.managedclusters) + return err +} + +// Delete deletes a managed cluster. +func (ac *AzureClient) Delete(ctx context.Context, resourceGroupName, name string) error { + future, err := ac.managedclusters.Delete(ctx, resourceGroupName, name) + if err != nil { + return errors.Wrapf(err, "failed to begin operation") + } + if err := future.WaitForCompletionRef(ctx, ac.managedclusters.Client); err != nil { + return errors.Wrapf(err, "failed to end operation") + } + _, err = future.Result(ac.managedclusters) + return err +} diff --git a/cloud/services/managedclusters/managedclusters.go b/cloud/services/managedclusters/managedclusters.go new file mode 100644 index 00000000000..70d6190e0de --- /dev/null +++ b/cloud/services/managedclusters/managedclusters.go @@ -0,0 +1,208 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package managedclusters + +import ( + "context" + "fmt" + "net" + "strings" + + "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice" + "github.com/pkg/errors" + "k8s.io/klog" + azure "sigs.k8s.io/cluster-api-provider-azure/cloud" +) + +var ( + defaultUser string = "azureuser" + managedIdentity string = "msi" +) + +// Spec contains properties to create a managed cluster. +type Spec struct { + // Name is the name of this AKS Cluster. + Name string + + // ResourceGroup is the name of the Azure resource group for this AKS Cluster. + ResourceGroup string + + // Location is a string matching one of the canonical Azure region names. Examples: "westus2", "eastus". + Location string + + // Tags is a set of tags to add to this cluster. + Tags map[string]string + + // Version defines the desired Kubernetes version. + Version string + + // LoadBalancerSKU for the managed cluster. Possible values include: 'Standard', 'Basic'. Defaults to standard. + LoadBalancerSKU *string + + // NetworkPlugin used for building Kubernetes network. Possible values include: 'Azure', 'Kubenet'. Defaults to Azure. + NetworkPlugin *string + + // NetworkPolicy used for building Kubernetes network. Possible values include: 'Calico', 'Azure'. Defaults to Azure. + NetworkPolicy *string + + // SSHPublicKey is a string literal containing an ssh public key. Will autogenerate and discard if not provided. + SSHPublicKey string + + // AgentPools is the list of agent pool specifications in this cluster. + AgentPools []PoolSpec + + // PodCIDR is the CIDR block for IP addresses distributed to pods + PodCIDR string + + // ServiceCIDR is the CIDR block for IP addresses distributed to services + ServiceCIDR string +} + +type PoolSpec struct { + Name string + SKU string + Replicas int32 + OSDiskSizeGB int32 +} + +// Get fetches a managed cluster from Azure. +func (s *Service) Get(ctx context.Context, spec interface{}) (interface{}, error) { + managedClusterSpec, ok := spec.(*Spec) + if !ok { + return nil, errors.New("expected managed cluster specification") + } + return s.Client.Get(ctx, managedClusterSpec.ResourceGroup, managedClusterSpec.Name) +} + +// Get fetches a managed cluster kubeconfig from Azure. +func (s *Service) GetCredentials(ctx context.Context, group, name string) ([]byte, error) { + return s.Client.GetCredentials(ctx, group, name) +} + +// Reconcile idempotently creates or updates a managed cluster, if possible. +func (s *Service) Reconcile(ctx context.Context, spec interface{}) error { + managedClusterSpec, ok := spec.(*Spec) + if !ok { + return errors.New("expected managed cluster specification") + } + + properties := containerservice.ManagedCluster{ + Identity: &containerservice.ManagedClusterIdentity{ + Type: containerservice.SystemAssigned, + }, + Location: &managedClusterSpec.Location, + ManagedClusterProperties: &containerservice.ManagedClusterProperties{ + DNSPrefix: &managedClusterSpec.Name, + KubernetesVersion: &managedClusterSpec.Version, + LinuxProfile: &containerservice.LinuxProfile{ + AdminUsername: &defaultUser, + SSH: &containerservice.SSHConfiguration{ + PublicKeys: &[]containerservice.SSHPublicKey{ + { + KeyData: &managedClusterSpec.SSHPublicKey, + }, + }, + }, + }, + ServicePrincipalProfile: &containerservice.ManagedClusterServicePrincipalProfile{ + ClientID: &managedIdentity, + }, + AgentPoolProfiles: &[]containerservice.ManagedClusterAgentPoolProfile{}, + NetworkProfile: &containerservice.NetworkProfileType{ + NetworkPlugin: containerservice.Azure, + LoadBalancerSku: containerservice.Standard, + }, + }, + } + + if managedClusterSpec.NetworkPlugin != nil { + properties.NetworkProfile.NetworkPlugin = containerservice.NetworkPlugin(*managedClusterSpec.NetworkPlugin) + } + + if managedClusterSpec.PodCIDR != "" { + properties.NetworkProfile.PodCidr = &managedClusterSpec.PodCIDR + } + + if managedClusterSpec.ServiceCIDR != "" { + properties.NetworkProfile.ServiceCidr = &managedClusterSpec.ServiceCIDR + ip, _, err := net.ParseCIDR(managedClusterSpec.ServiceCIDR) + if err != nil { + return fmt.Errorf("failed to parse service cidr: %w", err) + } + // HACK: set the last octet of the IP to .10 + // This ensures the dns IP is valid in the service cidr without forcing the user + // to specify it in both the Capi cluster and the Azure control plane. + // https://golang.org/src/net/ip.go#L48 + ip[15] = byte(10) + dnsIP := ip.String() + properties.NetworkProfile.DNSServiceIP = &dnsIP + + } + + if managedClusterSpec.NetworkPolicy != nil { + if strings.EqualFold(*managedClusterSpec.NetworkPolicy, "Azure") { + properties.NetworkProfile.NetworkPolicy = containerservice.NetworkPolicyAzure + } else if strings.EqualFold(*managedClusterSpec.NetworkPolicy, "Calico") { + properties.NetworkProfile.NetworkPolicy = containerservice.NetworkPolicyCalico + } else { + return fmt.Errorf("invalid network policy: '%s'. Allowed options are 'calico' and 'azure'", *managedClusterSpec.NetworkPolicy) + } + } + + if managedClusterSpec.LoadBalancerSKU != nil { + properties.NetworkProfile.LoadBalancerSku = containerservice.LoadBalancerSku(*managedClusterSpec.LoadBalancerSKU) + } + + for _, pool := range managedClusterSpec.AgentPools { + profile := containerservice.ManagedClusterAgentPoolProfile{ + Name: &pool.Name, + VMSize: containerservice.VMSizeTypes(pool.SKU), + OsDiskSizeGB: &pool.OSDiskSizeGB, + Count: &pool.Replicas, + Type: containerservice.VirtualMachineScaleSets, + } + *properties.AgentPoolProfiles = append(*properties.AgentPoolProfiles, profile) + } + + err := s.Client.CreateOrUpdate(ctx, managedClusterSpec.ResourceGroup, managedClusterSpec.Name, properties) + if err != nil { + return fmt.Errorf("failed to create or update managed cluster, %#+v", err) + } + + return nil +} + +// Delete deletes the virtual network with the provided name. +func (s *Service) Delete(ctx context.Context, spec interface{}) error { + managedClusterSpec, ok := spec.(*Spec) + if !ok { + return errors.New("expected managed cluster specification") + } + + klog.V(2).Infof("Deleting managed cluster %s ", managedClusterSpec.Name) + err := s.Client.Delete(ctx, managedClusterSpec.ResourceGroup, managedClusterSpec.Name) + if err != nil { + if azure.ResourceNotFound(err) { + // already deleted + return nil + } + return errors.Wrapf(err, "failed to delete managed cluster %s in resource group %s", managedClusterSpec.Name, managedClusterSpec.ResourceGroup) + } + + klog.V(2).Infof("successfully deleted managed cluster %s ", managedClusterSpec.Name) + return nil +} diff --git a/cloud/services/managedclusters/service.go b/cloud/services/managedclusters/service.go new file mode 100644 index 00000000000..a188fab24b4 --- /dev/null +++ b/cloud/services/managedclusters/service.go @@ -0,0 +1,33 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package managedclusters + +import ( + "github.com/Azure/go-autorest/autorest" +) + +// Service provides operations on azure resources +type Service struct { + Client +} + +// NewService creates a new service. +func NewService(authorizer autorest.Authorizer, subscriptionID string) *Service { + return &Service{ + Client: NewClient(subscriptionID, authorizer), + } +} diff --git a/cloud/services/publicips/publicips.go b/cloud/services/publicips/publicips.go index c206965ab94..dbc135a6e5c 100644 --- a/cloud/services/publicips/publicips.go +++ b/cloud/services/publicips/publicips.go @@ -29,7 +29,8 @@ import ( // Spec specification for public ip type Spec struct { - Name string + Name string + DNSName string } // Get provides information about a public ip. @@ -70,7 +71,7 @@ func (s *Service) Reconcile(ctx context.Context, spec interface{}) error { PublicIPAllocationMethod: network.Static, DNSSettings: &network.PublicIPAddressDNSSettings{ DomainNameLabel: to.StringPtr(strings.ToLower(ipName)), - Fqdn: to.StringPtr(s.Scope.Network().APIServerIP.DNSName), + Fqdn: &publicIPSpec.DNSName, }, }, }, diff --git a/cloud/services/scalesets/client.go b/cloud/services/scalesets/client.go index 2e2cae063b3..29d93ddc6b4 100644 --- a/cloud/services/scalesets/client.go +++ b/cloud/services/scalesets/client.go @@ -29,6 +29,7 @@ import ( // Client wraps go-sdk type Client interface { + List(context.Context, string) ([]compute.VirtualMachineScaleSet, error) ListInstances(context.Context, string, string) ([]compute.VirtualMachineScaleSetVM, error) Get(context.Context, string, string) (compute.VirtualMachineScaleSet, error) CreateOrUpdate(context.Context, string, string, compute.VirtualMachineScaleSet) error @@ -86,6 +87,20 @@ func (ac *AzureClient) ListInstances(ctx context.Context, resourceGroupName, vms } var instances []compute.VirtualMachineScaleSetVM + for ; itr.NotDone(); err = itr.NextWithContext(ctx) { + if err != nil { + return nil, fmt.Errorf("failed to iterate vm scale set vms [%w]", err) + } + vm := itr.Value() + instances = append(instances, vm) + } + return instances, nil +} + +// Lists all scale sets in a resource group. +func (ac *AzureClient) List(ctx context.Context, resourceGroupName string) ([]compute.VirtualMachineScaleSet, error) { + itr, err := ac.scalesets.ListComplete(ctx, resourceGroupName) + var instances []compute.VirtualMachineScaleSet for ; itr.NotDone(); err = itr.NextWithContext(ctx) { if err != nil { return nil, fmt.Errorf("failed to iterate vm scale sets [%w]", err) diff --git a/cloud/services/scalesets/mock_scalesets/scalesets_mock.go b/cloud/services/scalesets/mock_scalesets/scalesets_mock.go index ade5de9bfb9..9624174246b 100644 --- a/cloud/services/scalesets/mock_scalesets/scalesets_mock.go +++ b/cloud/services/scalesets/mock_scalesets/scalesets_mock.go @@ -51,6 +51,21 @@ func (m *MockClient) EXPECT() *MockClientMockRecorder { return m.recorder } +// List mocks base method +func (m *MockClient) List(arg0 context.Context, arg1 string) ([]compute.VirtualMachineScaleSet, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "List", arg0, arg1) + ret0, _ := ret[0].([]compute.VirtualMachineScaleSet) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// List indicates an expected call of List +func (mr *MockClientMockRecorder) List(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockClient)(nil).List), arg0, arg1) +} + // ListInstances mocks base method func (m *MockClient) ListInstances(arg0 context.Context, arg1, arg2 string) ([]compute.VirtualMachineScaleSetVM, error) { m.ctrl.T.Helper() diff --git a/cloud/services/scalesets/service.go b/cloud/services/scalesets/service.go index 8fd594463cf..7ed0121b2a6 100644 --- a/cloud/services/scalesets/service.go +++ b/cloud/services/scalesets/service.go @@ -13,25 +13,20 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - package scalesets import ( - "sigs.k8s.io/cluster-api-provider-azure/cloud/scope" + "github.com/Azure/go-autorest/autorest" ) // Service provides operations on azure resources type Service struct { Client - Scope *scope.ClusterScope - MachinePoolScope *scope.MachinePoolScope } // NewService creates a new service. -func NewService(scope *scope.ClusterScope, machinePoolScope *scope.MachinePoolScope) *Service { +func NewService(authorizer autorest.Authorizer, subscriptionID string) *Service { return &Service{ - Scope: scope, - MachinePoolScope: machinePoolScope, - Client: NewClient(scope.SubscriptionID, scope.Authorizer), + Client: NewClient(subscriptionID, authorizer), } } diff --git a/cloud/services/scalesets/vmss.go b/cloud/services/scalesets/vmss.go index 1177a65c4e4..369398b286f 100644 --- a/cloud/services/scalesets/vmss.go +++ b/cloud/services/scalesets/vmss.go @@ -34,13 +34,19 @@ import ( // Spec input specification for Get/CreateOrUpdate/Delete calls type ( Spec struct { - Name string - Sku string - Capacity int64 - SSHKeyData string - Image *infrav1.Image - OSDisk infrav1.OSDisk - CustomData string + Name string + ResourceGroup string + Location string + ClusterName string + MachinePoolName string + Sku string + Capacity int64 + SSHKeyData string + Image *infrav1.Image + OSDisk infrav1.OSDisk + CustomData string + SubnetID string + AdditionalTags infrav1.Tags } ) @@ -50,12 +56,12 @@ func (s *Service) Get(ctx context.Context, spec interface{}) (interface{}, error return compute.VirtualMachineScaleSet{}, errors.New("invalid VMSS specification") } - vmss, err := s.Client.Get(ctx, s.Scope.ResourceGroup(), vmssSpec.Name) + vmss, err := s.Client.Get(ctx, vmssSpec.ResourceGroup, vmssSpec.Name) if err != nil { return vmss, err } - vmssInstances, err := s.Client.ListInstances(ctx, s.Scope.ResourceGroup(), vmssSpec.Name) + vmssInstances, err := s.Client.ListInstances(ctx, vmssSpec.ResourceGroup, vmssSpec.Name) if err != nil { return vmss, err } @@ -75,18 +81,20 @@ func (s *Service) Reconcile(ctx context.Context, spec interface{}) error { } // Make sure to use the MachineScope here to get the merger of AzureCluster and AzureMachine tags - additionalTags := s.MachinePoolScope.AdditionalTags() // Set the cloud provider tag - additionalTags[infrav1.ClusterAzureCloudProviderTagKey(s.MachinePoolScope.Name())] = string(infrav1.ResourceLifecycleOwned) + if vmssSpec.AdditionalTags == nil { + vmssSpec.AdditionalTags = make(infrav1.Tags) + } + vmssSpec.AdditionalTags[infrav1.ClusterAzureCloudProviderTagKey(vmssSpec.MachinePoolName)] = string(infrav1.ResourceLifecycleOwned) vmss := compute.VirtualMachineScaleSet{ - Location: to.StringPtr(s.Scope.Location()), + Location: to.StringPtr(vmssSpec.Location), Tags: converters.TagsToMap(infrav1.Build(infrav1.BuildParams{ - ClusterName: s.Scope.Name(), + ClusterName: vmssSpec.ClusterName, Lifecycle: infrav1.ResourceLifecycleOwned, - Name: to.StringPtr(s.MachinePoolScope.Name()), + Name: to.StringPtr(vmssSpec.MachinePoolName), Role: to.StringPtr(infrav1.Node), - Additional: additionalTags, + Additional: vmssSpec.AdditionalTags, })), Sku: &compute.Sku{ Name: to.StringPtr(vmssSpec.Sku), @@ -127,7 +135,7 @@ func (s *Service) Reconcile(ctx context.Context, spec interface{}) error { Name: to.StringPtr(vmssSpec.Name + "-ipconfig"), VirtualMachineScaleSetIPConfigurationProperties: &compute.VirtualMachineScaleSetIPConfigurationProperties{ Subnet: &compute.APIEntityReference{ - ID: to.StringPtr(s.Scope.AzureCluster.Spec.NetworkSpec.Subnets[0].ID), + ID: to.StringPtr(vmssSpec.SubnetID), }, Primary: to.BoolPtr(true), PrivateIPAddressVersion: compute.IPv4, @@ -144,7 +152,7 @@ func (s *Service) Reconcile(ctx context.Context, spec interface{}) error { err = s.Client.CreateOrUpdate( ctx, - s.Scope.ResourceGroup(), + vmssSpec.ResourceGroup, vmssSpec.Name, vmss) if err != nil { @@ -156,21 +164,21 @@ func (s *Service) Reconcile(ctx context.Context, spec interface{}) error { } func (s *Service) Delete(ctx context.Context, spec interface{}) error { - vmSpec, ok := spec.(*Spec) + vmssSpec, ok := spec.(*Spec) if !ok { return errors.New("invalid VMSS specification") } - klog.V(2).Infof("deleting VMSS %s ", vmSpec.Name) - err := s.Client.Delete(ctx, s.Scope.ResourceGroup(), vmSpec.Name) - if err != nil && azure.ResourceNotFound(err) { - // already deleted - return nil - } + klog.V(2).Infof("deleting VMSS %s ", vmssSpec.Name) + err := s.Client.Delete(ctx, vmssSpec.ResourceGroup, vmssSpec.Name) if err != nil { - return errors.Wrapf(err, "failed to delete VMSS %s in resource group %s", vmSpec.Name, s.Scope.ResourceGroup()) + if azure.ResourceNotFound(err) { + // already deleted + return nil + } + return errors.Wrapf(err, "failed to delete VMSS %s in resource group %s", vmssSpec.Name, vmssSpec.ResourceGroup) } - klog.V(2).Infof("successfully deleted VMSS %s ", vmSpec.Name) + klog.V(2).Infof("successfully deleted VMSS %s ", vmssSpec.Name) return nil } diff --git a/cloud/services/scalesets/vmss_test.go b/cloud/services/scalesets/vmss_test.go index 0bd0820347a..d1f96d392e8 100644 --- a/cloud/services/scalesets/vmss_test.go +++ b/cloud/services/scalesets/vmss_test.go @@ -79,22 +79,20 @@ func TestNewService(t *testing.T) { AzureMachinePool: new(infrav1exp.AzureMachinePool), }) g.Expect(err).ToNot(gomega.HaveOccurred()) - actual := NewService(s, mps) + actual := NewService(s.Authorizer, mps.AzureClients.SubscriptionID) g.Expect(actual).ToNot(gomega.BeNil()) - g.Expect(actual.MachinePoolScope).To(gomega.Equal(mps)) - g.Expect(actual.Scope).To(gomega.Equal(s)) } func TestService_Get(t *testing.T) { cases := []struct { Name string - SpecFactory func(g *gomega.GomegaWithT, svc *Service) interface{} - Setup func(ctx context.Context, g *gomega.GomegaWithT, svc *Service) + SpecFactory func(g *gomega.GomegaWithT, scope *scope.ClusterScope, mpScope *scope.MachinePoolScope) interface{} + Setup func(ctx context.Context, g *gomega.GomegaWithT, svc *Service, scope *scope.ClusterScope, mpScope *scope.MachinePoolScope) Expect func(ctx context.Context, g *gomega.GomegaWithT, result interface{}, err error) }{ { Name: "WithInvalidSepcType", - SpecFactory: func(g *gomega.GomegaWithT, _ *Service) interface{} { + SpecFactory: func(g *gomega.GomegaWithT, _ *scope.ClusterScope, _ *scope.MachinePoolScope) interface{} { return "bin" }, Expect: func(_ context.Context, g *gomega.GomegaWithT, result interface{}, err error) { @@ -103,16 +101,21 @@ func TestService_Get(t *testing.T) { }, { Name: "WithValidSpecBut404FromAzureOnVMSS", - SpecFactory: func(g *gomega.GomegaWithT, svc *Service) interface{} { + SpecFactory: func(g *gomega.GomegaWithT, scope *scope.ClusterScope, mpScope *scope.MachinePoolScope) interface{} { return &Spec{ - Name: svc.MachinePoolScope.Name(), + Name: mpScope.Name(), + ResourceGroup: scope.AzureCluster.Spec.ResourceGroup, + Location: scope.AzureCluster.Spec.Location, + ClusterName: scope.Cluster.Name, + SubnetID: scope.AzureCluster.Spec.NetworkSpec.Subnets[0].ID, + MachinePoolName: mpScope.Name(), } }, - Setup: func(ctx context.Context, g *gomega.GomegaWithT, svc *Service) { + Setup: func(ctx context.Context, g *gomega.GomegaWithT, svc *Service, scope *scope.ClusterScope, mpScope *scope.MachinePoolScope) { mockCtrl := gomock.NewController(t) vmssMock := mock_scalesets.NewMockClient(mockCtrl) svc.Client = vmssMock - vmssMock.EXPECT().Get(gomock.Any(), svc.Scope.AzureCluster.Spec.ResourceGroup, svc.MachinePoolScope.Name()).Return(compute.VirtualMachineScaleSet{}, autorest.DetailedError{ + vmssMock.EXPECT().Get(gomock.Any(), scope.AzureCluster.Spec.ResourceGroup, mpScope.Name()).Return(compute.VirtualMachineScaleSet{}, autorest.DetailedError{ StatusCode: 404, }) }, @@ -124,17 +127,22 @@ func TestService_Get(t *testing.T) { }, { Name: "WithValidSpecBut404FromAzureOnInstances", - SpecFactory: func(g *gomega.GomegaWithT, svc *Service) interface{} { + SpecFactory: func(g *gomega.GomegaWithT, scope *scope.ClusterScope, mpScope *scope.MachinePoolScope) interface{} { return &Spec{ - Name: svc.MachinePoolScope.Name(), + Name: mpScope.Name(), + ResourceGroup: scope.AzureCluster.Spec.ResourceGroup, + Location: scope.AzureCluster.Spec.Location, + ClusterName: scope.Cluster.Name, + SubnetID: scope.AzureCluster.Spec.NetworkSpec.Subnets[0].ID, + MachinePoolName: mpScope.Name(), } }, - Setup: func(ctx context.Context, g *gomega.GomegaWithT, svc *Service) { + Setup: func(ctx context.Context, g *gomega.GomegaWithT, svc *Service, scope *scope.ClusterScope, mpScope *scope.MachinePoolScope) { mockCtrl := gomock.NewController(t) vmssMock := mock_scalesets.NewMockClient(mockCtrl) svc.Client = vmssMock - vmssMock.EXPECT().Get(gomock.Any(), svc.Scope.AzureCluster.Spec.ResourceGroup, svc.MachinePoolScope.Name()).Return(compute.VirtualMachineScaleSet{}, nil) - vmssMock.EXPECT().ListInstances(gomock.Any(), svc.Scope.AzureCluster.Spec.ResourceGroup, svc.MachinePoolScope.Name()).Return([]compute.VirtualMachineScaleSetVM{}, autorest.DetailedError{ + vmssMock.EXPECT().Get(gomock.Any(), scope.AzureCluster.Spec.ResourceGroup, mpScope.Name()).Return(compute.VirtualMachineScaleSet{}, nil) + vmssMock.EXPECT().ListInstances(gomock.Any(), scope.AzureCluster.Spec.ResourceGroup, mpScope.Name()).Return([]compute.VirtualMachineScaleSetVM{}, autorest.DetailedError{ StatusCode: 404, }) }, @@ -146,17 +154,22 @@ func TestService_Get(t *testing.T) { }, { Name: "WithValidSpecWithVMSSAndInstancesReturned", - SpecFactory: func(g *gomega.GomegaWithT, svc *Service) interface{} { + SpecFactory: func(g *gomega.GomegaWithT, scope *scope.ClusterScope, mpScope *scope.MachinePoolScope) interface{} { return &Spec{ - Name: svc.MachinePoolScope.Name(), + Name: mpScope.Name(), + ResourceGroup: scope.AzureCluster.Spec.ResourceGroup, + Location: scope.AzureCluster.Spec.Location, + ClusterName: scope.Cluster.Name, + SubnetID: scope.AzureCluster.Spec.NetworkSpec.Subnets[0].ID, + MachinePoolName: mpScope.Name(), } }, - Setup: func(ctx context.Context, g *gomega.GomegaWithT, svc *Service) { + Setup: func(ctx context.Context, g *gomega.GomegaWithT, svc *Service, scope *scope.ClusterScope, mpScope *scope.MachinePoolScope) { mockCtrl := gomock.NewController(t) vmssMock := mock_scalesets.NewMockClient(mockCtrl) svc.Client = vmssMock - vmssMock.EXPECT().Get(gomock.Any(), svc.Scope.AzureCluster.Spec.ResourceGroup, svc.MachinePoolScope.Name()).Return(compute.VirtualMachineScaleSet{ - Name: to.StringPtr(svc.MachinePoolScope.Name()), + vmssMock.EXPECT().Get(gomock.Any(), scope.AzureCluster.Spec.ResourceGroup, mpScope.Name()).Return(compute.VirtualMachineScaleSet{ + Name: to.StringPtr(mpScope.Name()), Sku: &compute.Sku{ Capacity: to.Int64Ptr(1), Name: to.StringPtr("Standard"), @@ -165,7 +178,7 @@ func TestService_Get(t *testing.T) { ProvisioningState: to.StringPtr("Succeeded"), }, }, nil) - vmssMock.EXPECT().ListInstances(gomock.Any(), svc.Scope.AzureCluster.Spec.ResourceGroup, svc.MachinePoolScope.Name()).Return([]compute.VirtualMachineScaleSetVM{ + vmssMock.EXPECT().ListInstances(gomock.Any(), scope.AzureCluster.Spec.ResourceGroup, mpScope.Name()).Return([]compute.VirtualMachineScaleSetVM{ { Name: to.StringPtr("vm0"), InstanceID: to.StringPtr("0"), @@ -199,12 +212,13 @@ func TestService_Get(t *testing.T) { t.Run(c.Name, func(t *testing.T) { t.Parallel() g := gomega.NewGomegaWithT(t) - svc := getNewService(g) - spec := c.SpecFactory(g, svc) + s, mps := getScopes(g) + svc := NewService(s.Authorizer, mps.AzureClients.SubscriptionID) + spec := c.SpecFactory(g, s, mps) ctx, cancel := context.WithCancel(context.Background()) defer cancel() if c.Setup != nil { - c.Setup(ctx, g, svc) + c.Setup(ctx, g, svc, s, mps) } res, err := svc.Get(context.Background(), spec) c.Expect(ctx, g, res, err) @@ -215,13 +229,13 @@ func TestService_Get(t *testing.T) { func TestService_Reconcile(t *testing.T) { cases := []struct { Name string - SpecFactory func(g *gomega.GomegaWithT, svc *Service) interface{} - Setup func(ctx context.Context, g *gomega.GomegaWithT, svc *Service, spec *Spec) + SpecFactory func(g *gomega.GomegaWithT, scope *scope.ClusterScope, mpScope *scope.MachinePoolScope) interface{} + Setup func(ctx context.Context, g *gomega.GomegaWithT, svc *Service, scope *scope.ClusterScope, mpScope *scope.MachinePoolScope, spec *Spec) Expect func(ctx context.Context, g *gomega.GomegaWithT, err error) }{ { Name: "WithInvalidSepcType", - SpecFactory: func(g *gomega.GomegaWithT, _ *Service) interface{} { + SpecFactory: func(g *gomega.GomegaWithT, scope *scope.ClusterScope, mpScope *scope.MachinePoolScope) interface{} { return "bazz" }, Expect: func(_ context.Context, g *gomega.GomegaWithT, err error) { @@ -230,12 +244,17 @@ func TestService_Reconcile(t *testing.T) { }, { Name: "WithValidSpec", - SpecFactory: func(g *gomega.GomegaWithT, svc *Service) interface{} { + SpecFactory: func(g *gomega.GomegaWithT, scope *scope.ClusterScope, mpScope *scope.MachinePoolScope) interface{} { return &Spec{ - Name: svc.MachinePoolScope.Name(), - Sku: "skuName", - Capacity: 2, - SSHKeyData: "sshKeyData", + Name: mpScope.Name(), + ResourceGroup: scope.AzureCluster.Spec.ResourceGroup, + Location: scope.AzureCluster.Spec.Location, + ClusterName: scope.Cluster.Name, + SubnetID: scope.AzureCluster.Spec.NetworkSpec.Subnets[0].ID, + MachinePoolName: mpScope.Name(), + Sku: "skuName", + Capacity: 2, + SSHKeyData: "sshKeyData", OSDisk: infrav1.OSDisk{ OSType: "Linux", DiskSizeGB: 120, @@ -249,7 +268,7 @@ func TestService_Reconcile(t *testing.T) { CustomData: "customData", } }, - Setup: func(ctx context.Context, g *gomega.GomegaWithT, svc *Service, spec *Spec) { + Setup: func(ctx context.Context, g *gomega.GomegaWithT, svc *Service, scope *scope.ClusterScope, mpScope *scope.MachinePoolScope, spec *Spec) { mockCtrl := gomock.NewController(t) vmssMock := mock_scalesets.NewMockClient(mockCtrl) svc.Client = vmssMock @@ -258,7 +277,7 @@ func TestService_Reconcile(t *testing.T) { g.Expect(err).ToNot(gomega.HaveOccurred()) vmss := compute.VirtualMachineScaleSet{ - Location: to.StringPtr(svc.Scope.Location()), + Location: to.StringPtr(scope.Location()), Tags: map[string]*string{ "Name": to.StringPtr("capz-mp-0"), "kubernetes.io_cluster_capz-mp-0": to.StringPtr("owned"), @@ -304,7 +323,7 @@ func TestService_Reconcile(t *testing.T) { Name: to.StringPtr(spec.Name + "-ipconfig"), VirtualMachineScaleSetIPConfigurationProperties: &compute.VirtualMachineScaleSetIPConfigurationProperties{ Subnet: &compute.APIEntityReference{ - ID: to.StringPtr(svc.Scope.AzureCluster.Spec.NetworkSpec.Subnets[0].ID), + ID: to.StringPtr(scope.AzureCluster.Spec.NetworkSpec.Subnets[0].ID), }, Primary: to.BoolPtr(true), PrivateIPAddressVersion: compute.IPv4, @@ -319,7 +338,7 @@ func TestService_Reconcile(t *testing.T) { }, } - vmssMock.EXPECT().CreateOrUpdate(gomock.Any(), svc.Scope.AzureCluster.Spec.ResourceGroup, spec.Name, matchers.DiffEq(vmss)).Return(nil) + vmssMock.EXPECT().CreateOrUpdate(gomock.Any(), scope.AzureCluster.Spec.ResourceGroup, spec.Name, matchers.DiffEq(vmss)).Return(nil) }, Expect: func(ctx context.Context, g *gomega.GomegaWithT, err error) { g.Expect(err).ToNot(gomega.HaveOccurred()) @@ -332,12 +351,13 @@ func TestService_Reconcile(t *testing.T) { t.Run(c.Name, func(t *testing.T) { t.Parallel() g := gomega.NewGomegaWithT(t) - svc := getNewService(g) - spec := c.SpecFactory(g, svc) + s, mps := getScopes(g) + svc := NewService(s.Authorizer, mps.AzureClients.SubscriptionID) + spec := c.SpecFactory(g, s, mps) ctx, cancel := context.WithCancel(context.Background()) defer cancel() if c.Setup != nil { - c.Setup(ctx, g, svc, spec.(*Spec)) + c.Setup(ctx, g, svc, s, mps, spec.(*Spec)) } err := svc.Reconcile(context.Background(), spec) c.Expect(ctx, g, err) @@ -348,13 +368,13 @@ func TestService_Reconcile(t *testing.T) { func TestService_Delete(t *testing.T) { cases := []struct { Name string - SpecFactory func(g *gomega.GomegaWithT, svc *Service) interface{} - Setup func(ctx context.Context, g *gomega.GomegaWithT, svc *Service) + SpecFactory func(g *gomega.GomegaWithT, scope *scope.ClusterScope, mpScope *scope.MachinePoolScope) interface{} + Setup func(ctx context.Context, g *gomega.GomegaWithT, svc *Service, scope *scope.ClusterScope, mpScope *scope.MachinePoolScope) Expect func(ctx context.Context, g *gomega.GomegaWithT, err error) }{ { Name: "WithInvalidSepcType", - SpecFactory: func(g *gomega.GomegaWithT, _ *Service) interface{} { + SpecFactory: func(g *gomega.GomegaWithT, scope *scope.ClusterScope, mpScope *scope.MachinePoolScope) interface{} { return "foo" }, Expect: func(_ context.Context, g *gomega.GomegaWithT, err error) { @@ -363,16 +383,22 @@ func TestService_Delete(t *testing.T) { }, { Name: "WithValidSpecBut404FromAzureOnVMSSAssumeAlreadyDeleted", - SpecFactory: func(g *gomega.GomegaWithT, svc *Service) interface{} { + SpecFactory: func(g *gomega.GomegaWithT, scope *scope.ClusterScope, mpScope *scope.MachinePoolScope) interface{} { return &Spec{ - Name: svc.MachinePoolScope.Name(), + Name: mpScope.Name(), + ResourceGroup: scope.AzureCluster.Spec.ResourceGroup, + Location: scope.AzureCluster.Spec.Location, + ClusterName: scope.Cluster.Name, + SubnetID: scope.AzureCluster.Spec.NetworkSpec.Subnets[0].ID, + MachinePoolName: mpScope.Name(), } }, - Setup: func(ctx context.Context, g *gomega.GomegaWithT, svc *Service) { + Setup: func(ctx context.Context, g *gomega.GomegaWithT, svc *Service, scope *scope.ClusterScope, mpScope *scope.MachinePoolScope) { mockCtrl := gomock.NewController(t) vmssMock := mock_scalesets.NewMockClient(mockCtrl) svc.Client = vmssMock - vmssMock.EXPECT().Delete(gomock.Any(), svc.Scope.AzureCluster.Spec.ResourceGroup, svc.MachinePoolScope.Name()).Return(autorest.DetailedError{ + + vmssMock.EXPECT().Delete(gomock.Any(), scope.AzureCluster.Spec.ResourceGroup, mpScope.Name()).Return(autorest.DetailedError{ StatusCode: 404, }) }, @@ -382,16 +408,21 @@ func TestService_Delete(t *testing.T) { }, { Name: "WithValidSpecAndSuccessfulDelete", - SpecFactory: func(g *gomega.GomegaWithT, svc *Service) interface{} { + SpecFactory: func(g *gomega.GomegaWithT, scope *scope.ClusterScope, mpScope *scope.MachinePoolScope) interface{} { return &Spec{ - Name: svc.MachinePoolScope.Name(), + Name: mpScope.Name(), + ResourceGroup: scope.AzureCluster.Spec.ResourceGroup, + Location: scope.AzureCluster.Spec.Location, + ClusterName: scope.Cluster.Name, + SubnetID: scope.AzureCluster.Spec.NetworkSpec.Subnets[0].ID, + MachinePoolName: mpScope.Name(), } }, - Setup: func(ctx context.Context, g *gomega.GomegaWithT, svc *Service) { + Setup: func(ctx context.Context, g *gomega.GomegaWithT, svc *Service, scope *scope.ClusterScope, mpScope *scope.MachinePoolScope) { mockCtrl := gomock.NewController(t) vmssMock := mock_scalesets.NewMockClient(mockCtrl) svc.Client = vmssMock - vmssMock.EXPECT().Delete(gomock.Any(), svc.Scope.AzureCluster.Spec.ResourceGroup, svc.MachinePoolScope.Name()).Return(nil) + vmssMock.EXPECT().Delete(gomock.Any(), scope.AzureCluster.Spec.ResourceGroup, mpScope.Name()).Return(nil) }, Expect: func(ctx context.Context, g *gomega.GomegaWithT, err error) { g.Expect(err).ToNot(gomega.HaveOccurred()) @@ -404,12 +435,13 @@ func TestService_Delete(t *testing.T) { t.Run(c.Name, func(t *testing.T) { t.Parallel() g := gomega.NewGomegaWithT(t) - svc := getNewService(g) - spec := c.SpecFactory(g, svc) + s, mps := getScopes(g) + svc := NewService(s.Authorizer, mps.AzureClients.SubscriptionID) + spec := c.SpecFactory(g, s, mps) ctx, cancel := context.WithCancel(context.Background()) defer cancel() if c.Setup != nil { - c.Setup(ctx, g, svc) + c.Setup(ctx, g, svc, s, mps) } err := svc.Delete(context.Background(), spec) c.Expect(ctx, g, err) @@ -417,7 +449,7 @@ func TestService_Delete(t *testing.T) { } } -func getNewService(g *gomega.GomegaWithT) *Service { +func getScopes(g *gomega.GomegaWithT) (*scope.ClusterScope, *scope.MachinePoolScope) { cluster := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"}, } @@ -445,7 +477,6 @@ func getNewService(g *gomega.GomegaWithT) *Service { }, }) g.Expect(err).ToNot(gomega.HaveOccurred()) - mps, err := scope.NewMachinePoolScope(scope.MachinePoolScopeParams{ AzureClients: s.AzureClients, Client: client, @@ -461,5 +492,5 @@ func getNewService(g *gomega.GomegaWithT) *Service { }) g.Expect(err).ToNot(gomega.HaveOccurred()) - return NewService(s, mps) + return s, mps } diff --git a/config/crd/bases/exp.infrastructure.cluster.x-k8s.io_azuremachinepools.yaml b/config/crd/bases/exp.infrastructure.cluster.x-k8s.io_azuremachinepools.yaml index b273c43338c..c0580a48c65 100644 --- a/config/crd/bases/exp.infrastructure.cluster.x-k8s.io_azuremachinepools.yaml +++ b/config/crd/bases/exp.infrastructure.cluster.x-k8s.io_azuremachinepools.yaml @@ -10,9 +10,13 @@ metadata: spec: group: exp.infrastructure.cluster.x-k8s.io names: + categories: + - cluster-api kind: AzureMachinePool listKind: AzureMachinePoolList plural: azuremachinepools + shortNames: + - amp singular: azuremachinepool scope: Namespaced versions: diff --git a/config/crd/bases/exp.infrastructure.cluster.x-k8s.io_azuremanagedclusters.yaml b/config/crd/bases/exp.infrastructure.cluster.x-k8s.io_azuremanagedclusters.yaml new file mode 100644 index 00000000000..555bf3d1684 --- /dev/null +++ b/config/crd/bases/exp.infrastructure.cluster.x-k8s.io_azuremanagedclusters.yaml @@ -0,0 +1,77 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.9 + creationTimestamp: null + name: azuremanagedclusters.exp.infrastructure.cluster.x-k8s.io +spec: + group: exp.infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: AzureManagedCluster + listKind: AzureManagedClusterList + plural: azuremanagedclusters + shortNames: + - amc + singular: azuremanagedcluster + scope: Namespaced + versions: + - name: v1alpha3 + schema: + openAPIV3Schema: + description: AzureManagedCluster is the Schema for the azuremanagedclusters + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AzureManagedClusterSpec defines the desired state of AzureManagedCluster + properties: + controlPlaneEndpoint: + description: ControlPlaneEndpoint represents the endpoint used to + communicate with the control plane. + properties: + host: + description: The hostname on which the API server is serving. + type: string + port: + description: The port on which the API server is serving. + format: int32 + type: integer + required: + - host + - port + type: object + type: object + status: + description: AzureManagedClusterStatus defines the observed state of AzureManagedCluster + properties: + ready: + description: Ready is true when the provider resource is ready. + type: boolean + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/bases/exp.infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanes.yaml b/config/crd/bases/exp.infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanes.yaml new file mode 100644 index 00000000000..4340e67f578 --- /dev/null +++ b/config/crd/bases/exp.infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanes.yaml @@ -0,0 +1,150 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.9 + creationTimestamp: null + name: azuremanagedcontrolplanes.exp.infrastructure.cluster.x-k8s.io +spec: + group: exp.infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: AzureManagedControlPlane + listKind: AzureManagedControlPlaneList + plural: azuremanagedcontrolplanes + shortNames: + - amcp + singular: azuremanagedcontrolplane + scope: Namespaced + versions: + - name: v1alpha3 + schema: + openAPIV3Schema: + description: AzureManagedControlPlane is the Schema for the azuremanagedcontrolplanes + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AzureManagedControlPlaneSpec defines the desired state of + AzureManagedControlPlane + properties: + additionalTags: + additionalProperties: + type: string + description: AdditionalTags is an optional set of tags to add to Azure + resources managed by the Azure provider, in addition to the ones + added by default. + type: object + controlPlaneEndpoint: + description: ControlPlaneEndpoint represents the endpoint used to + communicate with the control plane. + properties: + host: + description: The hostname on which the API server is serving. + type: string + port: + description: The port on which the API server is serving. + format: int32 + type: integer + required: + - host + - port + type: object + defaultPoolRef: + description: 'DefaultPoolRef is the specification for the default + pool, without which an AKS cluster cannot be created. TODO(ace): + consider defaulting and making optional pointer?' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + loadBalancerSku: + description: 'LoadBalancerSKU for the managed cluster. Possible values + include: ''Standard'', ''Basic''. Defaults to standard.' + enum: + - Standard + - Basic + type: string + location: + description: 'Location is a string matching one of the canonical Azure + region names. Examples: "westus2", "eastus".' + type: string + networkPlugin: + description: 'NetworkPlugin used for building Kubernetes network. + Possible values include: ''Azure'', ''Kubenet''. Defaults to Azure.' + enum: + - Azure + - Kubenet + type: string + networkPolicy: + description: 'NetworkPolicy used for building Kubernetes network. + Possible values include: ''Calico'', ''Azure''' + enum: + - Calico + - Azure + type: string + resourceGroup: + description: ResourceGroup is the name of the Azure resource group + for this AKS Cluster. + type: string + sshPublicKey: + description: SSHPublicKey is a string literal containing an ssh public + key. + type: string + subscriptionID: + description: SubscriotionID is the GUID of the Azure subscription + to hold this cluster. + type: string + version: + description: Version defines the desired Kubernetes version. + minLength: 2 + pattern: ^(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)([-0-9a-zA-Z_\.+]*)?$ + type: string + required: + - defaultPoolRef + - location + - resourceGroup + - sshPublicKey + - version + type: object + status: + description: AzureManagedControlPlaneStatus defines the observed state + of AzureManagedControlPlane + properties: + initialized: + description: Initialized is true when the the control plane is available + for initial contact. This may occur before the control plane is + fully ready. In the AzureManagedControlPlane implementation, these + are identical. + type: boolean + ready: + description: Ready is true when the provider resource is ready. + type: boolean + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/bases/exp.infrastructure.cluster.x-k8s.io_azuremanagedmachinepools.yaml b/config/crd/bases/exp.infrastructure.cluster.x-k8s.io_azuremanagedmachinepools.yaml new file mode 100644 index 00000000000..e7ee641dd3a --- /dev/null +++ b/config/crd/bases/exp.infrastructure.cluster.x-k8s.io_azuremanagedmachinepools.yaml @@ -0,0 +1,95 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.9 + creationTimestamp: null + name: azuremanagedmachinepools.exp.infrastructure.cluster.x-k8s.io +spec: + group: exp.infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: AzureManagedMachinePool + listKind: AzureManagedMachinePoolList + plural: azuremanagedmachinepools + shortNames: + - ammp + singular: azuremanagedmachinepool + scope: Namespaced + versions: + - name: v1alpha3 + schema: + openAPIV3Schema: + description: AzureManagedMachinePool is the Schema for the azuremanagedmachinepools + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AzureManagedMachinePoolSpec defines the desired state of + AzureManagedMachinePool + properties: + osDiskSizeGB: + description: OSDiskSizeGB is the disk size for every machine in this + master/agent pool. If you specify 0, it will apply the default osDisk + size according to the vmSize specified. + format: int32 + type: integer + providerIDList: + description: ProviderIDList is the unique identifier as specified + by the cloud provider. + items: + type: string + type: array + sku: + description: SKU is the size of the VMs in the node pool. + type: string + required: + - sku + type: object + status: + description: AzureManagedMachinePoolStatus defines the observed state + of AzureManagedMachinePool + properties: + errorMessage: + description: Any transient errors that occur during the reconciliation + of Machines can be added as events to the Machine object and/or + logged in the controller's output. + type: string + errorReason: + description: Any transient errors that occur during the reconciliation + of Machines can be added as events to the Machine object and/or + logged in the controller's output. + type: string + ready: + description: Ready is true when the provider resource is ready. + type: boolean + replicas: + description: Replicas is the most recently observed number of replicas. + format: int32 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index f69440c90e0..8cf3f4ded60 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -9,8 +9,12 @@ resources: - bases/infrastructure.cluster.x-k8s.io_azureclusters.yaml - bases/infrastructure.cluster.x-k8s.io_azuremachinetemplates.yaml - bases/exp.infrastructure.cluster.x-k8s.io_azuremachinepools.yaml + - bases/exp.infrastructure.cluster.x-k8s.io_azuremanagedmachinepools.yaml + - bases/exp.infrastructure.cluster.x-k8s.io_azuremanagedclusters.yaml + - bases/exp.infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanes.yaml # +kubebuilder:scaffold:crdkustomizeresource + patchesStrategicMerge: # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. # patches here are for enabling the conversion webhook for each CRD @@ -18,6 +22,9 @@ patchesStrategicMerge: - patches/webhook_in_azureclusters.yaml - patches/webhook_in_azuremachinetemplates.yaml - patches/webhook_in_azuremachinepools.yaml + # - patches/webhook_in_azuremanagedmachinepools.yaml + # - patches/webhook_in_azuremanagedclusters.yaml + # - patches/webhook_in_azuremanagedcontrolplanes.yaml # +kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. @@ -26,6 +33,9 @@ patchesStrategicMerge: - patches/cainjection_in_azureclusters.yaml - patches/cainjection_in_azuremachinetemplates.yaml - patches/cainjection_in_azuremachinepools.yaml + # - patches/cainjection_in_azuremanagedmachinepools.yaml + # - patches/cainjection_in_azuremanagedclusters.yaml + # - patches/cainjection_in_azuremanagedcontrolplanes.yaml # +kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/config/crd/patches/cainjection_in_azuremanagedclusters.yaml b/config/crd/patches/cainjection_in_azuremanagedclusters.yaml new file mode 100644 index 00000000000..9c6698e9079 --- /dev/null +++ b/config/crd/patches/cainjection_in_azuremanagedclusters.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: azuremanagedclusters.exp.infrastructure.cluster.x-k8s.io diff --git a/config/crd/patches/cainjection_in_azuremanagedcontrolplanes.yaml b/config/crd/patches/cainjection_in_azuremanagedcontrolplanes.yaml new file mode 100644 index 00000000000..39c28853c97 --- /dev/null +++ b/config/crd/patches/cainjection_in_azuremanagedcontrolplanes.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: azuremanagedcontrolplanes.exp.infrastructure.cluster.x-k8s.io diff --git a/config/crd/patches/cainjection_in_azuremanagedmachinepools.yaml b/config/crd/patches/cainjection_in_azuremanagedmachinepools.yaml new file mode 100644 index 00000000000..4a31400c001 --- /dev/null +++ b/config/crd/patches/cainjection_in_azuremanagedmachinepools.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: azuremanagedmachinepools.exp.infrastructure.cluster.x-k8s.io diff --git a/config/crd/patches/webhook_in_azuremanagedclusters.yaml b/config/crd/patches/webhook_in_azuremanagedclusters.yaml new file mode 100644 index 00000000000..b31cf1822fe --- /dev/null +++ b/config/crd/patches/webhook_in_azuremanagedclusters.yaml @@ -0,0 +1,19 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: azuremanagedclusters.exp.infrastructure.cluster.x-k8s.io +spec: + conversion: + strategy: Webhook + webhook: + conversionReviewVersions: ["v1", "v1beta1"] + clientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert \ No newline at end of file diff --git a/config/crd/patches/webhook_in_azuremanagedcontrolplanes.yaml b/config/crd/patches/webhook_in_azuremanagedcontrolplanes.yaml new file mode 100644 index 00000000000..ff148a43f39 --- /dev/null +++ b/config/crd/patches/webhook_in_azuremanagedcontrolplanes.yaml @@ -0,0 +1,19 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: azuremanagedcontrolplanes.exp.infrastructure.cluster.x-k8s.io +spec: + conversion: + strategy: Webhook + webhook: + conversionReviewVersions: ["v1", "v1beta1"] + clientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/config/crd/patches/webhook_in_azuremanagedmachinepools.yaml b/config/crd/patches/webhook_in_azuremanagedmachinepools.yaml new file mode 100644 index 00000000000..f03a7a2d482 --- /dev/null +++ b/config/crd/patches/webhook_in_azuremanagedmachinepools.yaml @@ -0,0 +1,19 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: azuremanagedmachinepools.exp.infrastructure.cluster.x-k8s.io +spec: + conversion: + strategy: Webhook + webhook: + conversionReviewVersions: ["v1", "v1beta1"] + clientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 932e5f42a52..329b7e27c7b 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -22,8 +22,12 @@ rules: resources: - secrets verbs: + - create + - delete - get - list + - patch + - update - watch - apiGroups: - cluster.x-k8s.io @@ -33,6 +37,7 @@ rules: verbs: - get - list + - patch - watch - apiGroups: - cluster.x-k8s.io @@ -72,6 +77,66 @@ rules: - get - patch - update +- apiGroups: + - exp.infrastructure.cluster.x-k8s.io + resources: + - azuremanagedclusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - exp.infrastructure.cluster.x-k8s.io + resources: + - azuremanagedclusters/status + verbs: + - get + - patch + - update +- apiGroups: + - exp.infrastructure.cluster.x-k8s.io + resources: + - azuremanagedcontrolplanes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - exp.infrastructure.cluster.x-k8s.io + resources: + - azuremanagedcontrolplanes/status + verbs: + - get + - patch + - update +- apiGroups: + - exp.infrastructure.cluster.x-k8s.io + resources: + - azuremanagedmachinepools + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - exp.infrastructure.cluster.x-k8s.io + resources: + - azuremanagedmachinepools/status + verbs: + - get + - patch + - update - apiGroups: - infrastructure.cluster.x-k8s.io resources: diff --git a/controllers/azurecluster_reconciler.go b/controllers/azurecluster_reconciler.go index 915a0a8eb9c..14897af35b2 100644 --- a/controllers/azurecluster_reconciler.go +++ b/controllers/azurecluster_reconciler.go @@ -205,7 +205,8 @@ func (r *azureClusterReconciler) Reconcile() error { } publicIPSpec := &publicips.Spec{ - Name: r.scope.Network().APIServerIP.Name, + Name: r.scope.Network().APIServerIP.Name, + DNSName: r.scope.Network().APIServerIP.DNSName, } if err := r.publicIPSvc.Reconcile(r.scope.Context, publicIPSpec); err != nil { return errors.Wrapf(err, "failed to reconcile control plane public ip for cluster %s", r.scope.Name()) diff --git a/exp/api/v1alpha3/azuremachinepool_types.go b/exp/api/v1alpha3/azuremachinepool_types.go index d0188a7ad33..a1a55708313 100644 --- a/exp/api/v1alpha3/azuremachinepool_types.go +++ b/exp/api/v1alpha3/azuremachinepool_types.go @@ -121,6 +121,7 @@ type ( // +kubebuilder:object:root=true // +kubebuilder:subresource:status + // +kubebuilder:resource:path=azuremachinepools,scope=Namespaced,categories=cluster-api,shortName=amp // +kubebuilder:printcolumn:name="Replicas",type="string",JSONPath=".status.replicas",description="AzureMachinePool replicas count" // +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="AzureMachinePool replicas count" // +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.provisioningState",description="Azure VMSS provisioning state" diff --git a/exp/api/v1alpha3/azuremanagedcluster_types.go b/exp/api/v1alpha3/azuremanagedcluster_types.go new file mode 100644 index 00000000000..3163f4b437b --- /dev/null +++ b/exp/api/v1alpha3/azuremanagedcluster_types.go @@ -0,0 +1,63 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" +) + +// AzureManagedClusterSpec defines the desired state of AzureManagedCluster +type AzureManagedClusterSpec struct { + // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + // +optional + ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"` +} + +// AzureManagedClusterStatus defines the observed state of AzureManagedCluster +type AzureManagedClusterStatus struct { + // Ready is true when the provider resource is ready. + // +optional + Ready bool `json:"ready,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=azuremanagedclusters,scope=Namespaced,categories=cluster-api,shortName=amc +// +kubebuilder:storageversion +// +kubebuilder:subresource:status + +// AzureManagedCluster is the Schema for the azuremanagedclusters API +type AzureManagedCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AzureManagedClusterSpec `json:"spec,omitempty"` + Status AzureManagedClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AzureManagedClusterList contains a list of AzureManagedCluster +type AzureManagedClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AzureManagedCluster `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AzureManagedCluster{}, &AzureManagedClusterList{}) +} diff --git a/exp/api/v1alpha3/azuremanagedcontrolplane_types.go b/exp/api/v1alpha3/azuremanagedcontrolplane_types.go new file mode 100644 index 00000000000..ccc7558e8d1 --- /dev/null +++ b/exp/api/v1alpha3/azuremanagedcontrolplane_types.go @@ -0,0 +1,108 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" +) + +// AzureManagedControlPlaneSpec defines the desired state of AzureManagedControlPlane +type AzureManagedControlPlaneSpec struct { + // Version defines the desired Kubernetes version. + // +kubebuilder:validation:MinLength:=2 + // +kubebuilder:validation:Pattern:=^(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)([-0-9a-zA-Z_\.+]*)?$ + Version string `json:"version"` + + // ResourceGroup is the name of the Azure resource group for this AKS Cluster. + ResourceGroup string `json:"resourceGroup"` + + // SubscriotionID is the GUID of the Azure subscription to hold this cluster. + SubscriptionID string `json:"subscriptionID,omitempty"` + + // Location is a string matching one of the canonical Azure region names. Examples: "westus2", "eastus". + Location string `json:"location"` + + // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + // +optional + ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"` + + // AdditionalTags is an optional set of tags to add to Azure resources managed by the Azure provider, in addition to the + // ones added by default. + // +optional + AdditionalTags map[string]string `json:"additionalTags,omitempty"` + + // LoadBalancerSKU for the managed cluster. Possible values include: 'Standard', 'Basic'. Defaults to standard. + // +kubebuilder:validation:Enum=Standard;Basic + LoadBalancerSKU *string `json:"loadBalancerSku,omitempty"` + + // NetworkPlugin used for building Kubernetes network. Possible values include: 'Azure', 'Kubenet'. Defaults to Azure. + // +kubebuilder:validation:Enum=Azure;Kubenet + NetworkPlugin *string `json:"networkPlugin,omitempty"` + + // NetworkPolicy used for building Kubernetes network. Possible values include: 'Calico', 'Azure' + // +kubebuilder:validation:Enum=Calico;Azure + NetworkPolicy *string `json:"networkPolicy,omitempty"` + + // SSHPublicKey is a string literal containing an ssh public key. + SSHPublicKey string `json:"sshPublicKey"` + + // DefaultPoolRef is the specification for the default pool, without which an AKS cluster cannot be created. + // TODO(ace): consider defaulting and making optional pointer? + DefaultPoolRef corev1.LocalObjectReference `json:"defaultPoolRef"` +} + +// AzureManagedControlPlaneStatus defines the observed state of AzureManagedControlPlane +type AzureManagedControlPlaneStatus struct { + // Ready is true when the provider resource is ready. + // +optional + Ready bool `json:"ready,omitempty"` + + // Initialized is true when the the control plane is available for initial contact. + // This may occur before the control plane is fully ready. + // In the AzureManagedControlPlane implementation, these are identical. + // +optional + Initialized bool `json:"initialized,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=azuremanagedcontrolplanes,scope=Namespaced,categories=cluster-api,shortName=amcp +// +kubebuilder:storageversion +// +kubebuilder:subresource:status + +// AzureManagedControlPlane is the Schema for the azuremanagedcontrolplanes API +type AzureManagedControlPlane struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AzureManagedControlPlaneSpec `json:"spec,omitempty"` + Status AzureManagedControlPlaneStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AzureManagedControlPlaneList contains a list of AzureManagedControlPlane +type AzureManagedControlPlaneList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AzureManagedControlPlane `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AzureManagedControlPlane{}, &AzureManagedControlPlaneList{}) +} diff --git a/exp/api/v1alpha3/azuremanagedmachinepool_types.go b/exp/api/v1alpha3/azuremanagedmachinepool_types.go new file mode 100644 index 00000000000..94ec82e8e4e --- /dev/null +++ b/exp/api/v1alpha3/azuremanagedmachinepool_types.go @@ -0,0 +1,86 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + capierrors "sigs.k8s.io/cluster-api/errors" +) + +// AzureManagedMachinePoolSpec defines the desired state of AzureManagedMachinePool +type AzureManagedMachinePoolSpec struct { + // SKU is the size of the VMs in the node pool. + SKU string `json:"sku"` + + // OSDiskSizeGB is the disk size for every machine in this master/agent pool. + // If you specify 0, it will apply the default osDisk size according to the vmSize specified. + OSDiskSizeGB *int32 `json:"osDiskSizeGB,omitempty"` + + // ProviderIDList is the unique identifier as specified by the cloud provider. + // +optional + ProviderIDList []string `json:"providerIDList,omitempty"` +} + +// AzureManagedMachinePoolStatus defines the observed state of AzureManagedMachinePool +type AzureManagedMachinePoolStatus struct { + // Ready is true when the provider resource is ready. + // +optional + Ready bool `json:"ready"` + + // Replicas is the most recently observed number of replicas. + // +optional + Replicas int32 `json:"replicas"` + + // Any transient errors that occur during the reconciliation of Machines + // can be added as events to the Machine object and/or logged in the + // controller's output. + // +optional + ErrorReason *capierrors.MachineStatusError `json:"errorReason,omitempty"` + + // Any transient errors that occur during the reconciliation of Machines + // can be added as events to the Machine object and/or logged in the + // controller's output. + // +optional + ErrorMessage *string `json:"errorMessage,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=azuremanagedmachinepools,scope=Namespaced,categories=cluster-api,shortName=ammp +// +kubebuilder:storageversion +// +kubebuilder:subresource:status + +// AzureManagedMachinePool is the Schema for the azuremanagedmachinepools API +type AzureManagedMachinePool struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AzureManagedMachinePoolSpec `json:"spec,omitempty"` + Status AzureManagedMachinePoolStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AzureManagedMachinePoolList contains a list of AzureManagedMachinePool +type AzureManagedMachinePoolList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AzureManagedMachinePool `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AzureManagedMachinePool{}, &AzureManagedMachinePoolList{}) +} diff --git a/exp/api/v1alpha3/doc.go b/exp/api/v1alpha3/doc.go new file mode 100644 index 00000000000..e072c3ecbea --- /dev/null +++ b/exp/api/v1alpha3/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 diff --git a/exp/api/v1alpha3/zz_generated.deepcopy.go b/exp/api/v1alpha3/zz_generated.deepcopy.go index 29a91021640..8ba8f224f1e 100644 --- a/exp/api/v1alpha3/zz_generated.deepcopy.go +++ b/exp/api/v1alpha3/zz_generated.deepcopy.go @@ -164,6 +164,318 @@ func (in *AzureMachineTemplate) DeepCopy() *AzureMachineTemplate { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedCluster) DeepCopyInto(out *AzureManagedCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedCluster. +func (in *AzureManagedCluster) DeepCopy() *AzureManagedCluster { + if in == nil { + return nil + } + out := new(AzureManagedCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AzureManagedCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedClusterList) DeepCopyInto(out *AzureManagedClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AzureManagedCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedClusterList. +func (in *AzureManagedClusterList) DeepCopy() *AzureManagedClusterList { + if in == nil { + return nil + } + out := new(AzureManagedClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AzureManagedClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedClusterSpec) DeepCopyInto(out *AzureManagedClusterSpec) { + *out = *in + out.ControlPlaneEndpoint = in.ControlPlaneEndpoint +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedClusterSpec. +func (in *AzureManagedClusterSpec) DeepCopy() *AzureManagedClusterSpec { + if in == nil { + return nil + } + out := new(AzureManagedClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedClusterStatus) DeepCopyInto(out *AzureManagedClusterStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedClusterStatus. +func (in *AzureManagedClusterStatus) DeepCopy() *AzureManagedClusterStatus { + if in == nil { + return nil + } + out := new(AzureManagedClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedControlPlane) DeepCopyInto(out *AzureManagedControlPlane) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedControlPlane. +func (in *AzureManagedControlPlane) DeepCopy() *AzureManagedControlPlane { + if in == nil { + return nil + } + out := new(AzureManagedControlPlane) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AzureManagedControlPlane) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedControlPlaneList) DeepCopyInto(out *AzureManagedControlPlaneList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AzureManagedControlPlane, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedControlPlaneList. +func (in *AzureManagedControlPlaneList) DeepCopy() *AzureManagedControlPlaneList { + if in == nil { + return nil + } + out := new(AzureManagedControlPlaneList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AzureManagedControlPlaneList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedControlPlaneSpec) DeepCopyInto(out *AzureManagedControlPlaneSpec) { + *out = *in + out.ControlPlaneEndpoint = in.ControlPlaneEndpoint + if in.AdditionalTags != nil { + in, out := &in.AdditionalTags, &out.AdditionalTags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.LoadBalancerSKU != nil { + in, out := &in.LoadBalancerSKU, &out.LoadBalancerSKU + *out = new(string) + **out = **in + } + if in.NetworkPlugin != nil { + in, out := &in.NetworkPlugin, &out.NetworkPlugin + *out = new(string) + **out = **in + } + if in.NetworkPolicy != nil { + in, out := &in.NetworkPolicy, &out.NetworkPolicy + *out = new(string) + **out = **in + } + out.DefaultPoolRef = in.DefaultPoolRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedControlPlaneSpec. +func (in *AzureManagedControlPlaneSpec) DeepCopy() *AzureManagedControlPlaneSpec { + if in == nil { + return nil + } + out := new(AzureManagedControlPlaneSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedControlPlaneStatus) DeepCopyInto(out *AzureManagedControlPlaneStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedControlPlaneStatus. +func (in *AzureManagedControlPlaneStatus) DeepCopy() *AzureManagedControlPlaneStatus { + if in == nil { + return nil + } + out := new(AzureManagedControlPlaneStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedMachinePool) DeepCopyInto(out *AzureManagedMachinePool) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedMachinePool. +func (in *AzureManagedMachinePool) DeepCopy() *AzureManagedMachinePool { + if in == nil { + return nil + } + out := new(AzureManagedMachinePool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AzureManagedMachinePool) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedMachinePoolList) DeepCopyInto(out *AzureManagedMachinePoolList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AzureManagedMachinePool, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedMachinePoolList. +func (in *AzureManagedMachinePoolList) DeepCopy() *AzureManagedMachinePoolList { + if in == nil { + return nil + } + out := new(AzureManagedMachinePoolList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AzureManagedMachinePoolList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedMachinePoolSpec) DeepCopyInto(out *AzureManagedMachinePoolSpec) { + *out = *in + if in.OSDiskSizeGB != nil { + in, out := &in.OSDiskSizeGB, &out.OSDiskSizeGB + *out = new(int32) + **out = **in + } + if in.ProviderIDList != nil { + in, out := &in.ProviderIDList, &out.ProviderIDList + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedMachinePoolSpec. +func (in *AzureManagedMachinePoolSpec) DeepCopy() *AzureManagedMachinePoolSpec { + if in == nil { + return nil + } + out := new(AzureManagedMachinePoolSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedMachinePoolStatus) DeepCopyInto(out *AzureManagedMachinePoolStatus) { + *out = *in + if in.ErrorReason != nil { + in, out := &in.ErrorReason, &out.ErrorReason + *out = new(errors.MachineStatusError) + **out = **in + } + if in.ErrorMessage != nil { + in, out := &in.ErrorMessage, &out.ErrorMessage + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedMachinePoolStatus. +func (in *AzureManagedMachinePoolStatus) DeepCopy() *AzureManagedMachinePoolStatus { + if in == nil { + return nil + } + out := new(AzureManagedMachinePoolStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VMSS) DeepCopyInto(out *VMSS) { *out = *in diff --git a/exp/controllers/azuremachinepool_controller.go b/exp/controllers/azuremachinepool_controller.go index 8968d56fe8a..55e190ccd9d 100644 --- a/exp/controllers/azuremachinepool_controller.go +++ b/exp/controllers/azuremachinepool_controller.go @@ -45,12 +45,12 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - "sigs.k8s.io/cluster-api-provider-azure/cloud/services/scalesets" capzcntr "sigs.k8s.io/cluster-api-provider-azure/controllers" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha3" azure "sigs.k8s.io/cluster-api-provider-azure/cloud" "sigs.k8s.io/cluster-api-provider-azure/cloud/scope" + "sigs.k8s.io/cluster-api-provider-azure/cloud/services/scalesets" infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha3" ) @@ -372,7 +372,7 @@ func (r *AzureMachinePoolReconciler) reconcileTags(machinePoolScope *scope.Machi vmssSpec := &scalesets.Spec{ Name: machinePoolScope.Name(), } - svc := scalesets.NewService(clusterScope, machinePoolScope) + svc := scalesets.NewService(machinePoolScope.AzureClients.Authorizer, machinePoolScope.AzureClients.SubscriptionID) vm, err := svc.Client.Get(clusterScope.Context, clusterScope.ResourceGroup(), machinePoolScope.Name()) if err != nil { return errors.Wrapf(err, "failed to query AzureMachine VMSS") @@ -463,7 +463,7 @@ func newAzureMachinePoolService(machinePoolScope *scope.MachinePoolScope, cluste return &azureMachinePoolService{ machinePoolScope: machinePoolScope, clusterScope: clusterScope, - virtualMachinesScaleSetSvc: scalesets.NewService(clusterScope, machinePoolScope), + virtualMachinesScaleSetSvc: scalesets.NewService(machinePoolScope.AzureClients.Authorizer, machinePoolScope.AzureClients.SubscriptionID), } } @@ -490,13 +490,19 @@ func (s *azureMachinePoolService) CreateOrUpdate() (*infrav1exp.VMSS, error) { } vmssSpec := &scalesets.Spec{ - Name: s.machinePoolScope.Name(), - Sku: ampSpec.Template.VMSize, - Capacity: replicas, - SSHKeyData: string(decoded), - Image: image, - OSDisk: ampSpec.Template.OSDisk, - CustomData: bootstrapData, + Name: s.machinePoolScope.Name(), + ResourceGroup: s.clusterScope.ResourceGroup(), + Location: s.clusterScope.Location(), + ClusterName: s.clusterScope.Name(), + MachinePoolName: s.machinePoolScope.Name(), + Sku: ampSpec.Template.VMSize, + Capacity: replicas, + SSHKeyData: string(decoded), + Image: image, + OSDisk: ampSpec.Template.OSDisk, + CustomData: bootstrapData, + AdditionalTags: s.machinePoolScope.AdditionalTags(), + SubnetID: s.clusterScope.AzureCluster.Spec.NetworkSpec.Subnets[0].ID, } err = s.virtualMachinesScaleSetSvc.Reconcile(context.TODO(), vmssSpec) @@ -567,15 +573,14 @@ func (s *azureMachinePoolService) Get() (*infrav1exp.VMSS, error) { func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*capiv1exp.MachinePool, error) { for _, ref := range obj.OwnerReferences { if ref.Kind == "MachinePool" && ref.APIVersion == capiv1exp.GroupVersion.String() { - - return getMachineByName(ctx, c, obj.Namespace, ref.Name) + return getMachinePoolByName(ctx, c, obj.Namespace, ref.Name) } } return nil, nil } // getMachinePoolByName finds and return a Machine object using the specified params. -func getMachineByName(ctx context.Context, c client.Client, namespace, name string) (*capiv1exp.MachinePool, error) { +func getMachinePoolByName(ctx context.Context, c client.Client, namespace, name string) (*capiv1exp.MachinePool, error) { m := &capiv1exp.MachinePool{} key := client.ObjectKey{Name: name, Namespace: namespace} if err := c.Get(ctx, key, m); err != nil { diff --git a/exp/controllers/azuremanagedcluster_controller.go b/exp/controllers/azuremanagedcluster_controller.go new file mode 100644 index 00000000000..06129005db4 --- /dev/null +++ b/exp/controllers/azuremanagedcluster_controller.go @@ -0,0 +1,111 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + + "github.com/go-logr/logr" + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha3" + + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/patch" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// AzureManagedClusterReconciler reconciles a AzureManagedCluster object +type AzureManagedClusterReconciler struct { + client.Client + Log logr.Logger + Recorder record.EventRecorder +} + +func (r *AzureManagedClusterReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error { + return ctrl.NewControllerManagedBy(mgr). + WithOptions(options). + For(&infrav1exp.AzureManagedCluster{}). + Complete(r) +} + +// +kubebuilder:rbac:groups=exp.infrastructure.cluster.x-k8s.io,resources=azuremanagedclusters,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=exp.infrastructure.cluster.x-k8s.io,resources=azuremanagedclusters/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete + +func (r *AzureManagedClusterReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reterr error) { + ctx := context.TODO() + log := r.Log.WithValues("namespace", req.Namespace, "aksCluster", req.Name) + + // Fetch the AzureManagedCluster instance + aksCluster := &infrav1exp.AzureManagedCluster{} + err := r.Get(ctx, req.NamespacedName, aksCluster) + if err != nil { + if apierrors.IsNotFound(err) { + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + + // Fetch the Cluster. + cluster, err := util.GetOwnerCluster(ctx, r.Client, aksCluster.ObjectMeta) + if err != nil { + return reconcile.Result{}, err + } + if cluster == nil { + log.Info("Cluster Controller has not yet set OwnerRef") + return reconcile.Result{}, nil + } + + controlPlane := &infrav1exp.AzureManagedControlPlane{} + controlPlaneRef := types.NamespacedName{ + Name: cluster.Spec.ControlPlaneRef.Name, + Namespace: cluster.Namespace, + } + + log = log.WithValues("cluster", cluster.Name) + + if err := r.Get(ctx, controlPlaneRef, controlPlane); err != nil { + return reconcile.Result{}, errors.Wrap(err, "failed to get control plane ref") + } + + log = log.WithValues("controlPlane", controlPlaneRef.Name) + + patchhelper, err := patch.NewHelper(aksCluster, r.Client) + if err != nil { + return reconcile.Result{}, errors.Wrap(err, "failed to init patch helper") + } + + // Match whatever the control plane says. We should also enqueue + // requests from control plane to infra cluster to keep this accurate + aksCluster.Status.Ready = controlPlane.Status.Ready + aksCluster.Spec.ControlPlaneEndpoint = controlPlane.Spec.ControlPlaneEndpoint + + if err := patchhelper.Patch(ctx, aksCluster); err != nil { + return reconcile.Result{}, err + } + + log.Info("Successfully reconciled") + + return reconcile.Result{}, nil +} diff --git a/exp/controllers/azuremanagedmachinepool_controller.go b/exp/controllers/azuremanagedmachinepool_controller.go new file mode 100644 index 00000000000..d36fd705e70 --- /dev/null +++ b/exp/controllers/azuremanagedmachinepool_controller.go @@ -0,0 +1,161 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + + "github.com/go-logr/logr" + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/client-go/tools/record" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha3" + "sigs.k8s.io/cluster-api-provider-azure/cloud/scope" + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha3" + + "sigs.k8s.io/cluster-api/util" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// AzureManagedMachinePoolReconciler reconciles a AzureManagedMachinePool object +type AzureManagedMachinePoolReconciler struct { + client.Client + Log logr.Logger + Recorder record.EventRecorder +} + +func (r *AzureManagedMachinePoolReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error { + return ctrl.NewControllerManagedBy(mgr). + WithOptions(options). + For(&infrav1exp.AzureManagedMachinePool{}). + Complete(r) +} + +// +kubebuilder:rbac:groups=exp.infrastructure.cluster.x-k8s.io,resources=azuremanagedmachinepools,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=exp.infrastructure.cluster.x-k8s.io,resources=azuremanagedmachinepools/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch;patch +// +kubebuilder:rbac:groups=exp.cluster.x-k8s.io,resources=machinepools;machinepools/status,verbs=get;list;watch + +func (r *AzureManagedMachinePoolReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reterr error) { + ctx := context.TODO() + log := r.Log.WithValues("namespace", req.Namespace, "infraPool", req.Name) + + // Fetch the AzureManagedMachinePool instance + infraPool := &infrav1exp.AzureManagedMachinePool{} + err := r.Get(ctx, req.NamespacedName, infraPool) + if err != nil { + if apierrors.IsNotFound(err) { + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + + // Fetch the owning MachinePool. + ownerPool, err := getOwnerMachinePool(ctx, r.Client, infraPool.ObjectMeta) + if err != nil { + return reconcile.Result{}, err + } + if ownerPool == nil { + log.Info("MachinePool Controller has not yet set OwnerRef") + return reconcile.Result{}, nil + } + + // Fetch the Cluster. + ownerCluster, err := util.GetOwnerCluster(ctx, r.Client, ownerPool.ObjectMeta) + if err != nil { + return reconcile.Result{}, err + } + if ownerCluster == nil { + log.Info("Cluster Controller has not yet set OwnerRef") + return reconcile.Result{}, nil + } + + log = log.WithValues("ownerCluster", ownerCluster.Name) + + // Fetch the corresponding control plane which has all the interesting data. + controlPlane := &infrav1exp.AzureManagedControlPlane{} + controlPlaneName := client.ObjectKey{ + Namespace: ownerCluster.Spec.ControlPlaneRef.Namespace, + Name: ownerCluster.Spec.ControlPlaneRef.Name, + } + if err := r.Client.Get(ctx, controlPlaneName, controlPlane); err != nil { + return reconcile.Result{}, err + } + + // Create the scope. + mcpScope, err := scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{ + Client: r.Client, + Logger: log, + ControlPlane: controlPlane, + Cluster: ownerCluster, + MachinePool: ownerPool, + InfraMachinePool: infraPool, + PatchTarget: infraPool, + }) + if err != nil { + return reconcile.Result{}, errors.Errorf("failed to create scope: %+v", err) + } + + // Handle deleted clusters + if !infraPool.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, mcpScope) + } + + // Handle non-deleted clusters + return r.reconcileNormal(ctx, mcpScope) +} + +func (r *AzureManagedMachinePoolReconciler) reconcileNormal(ctx context.Context, scope *scope.ManagedControlPlaneScope) (reconcile.Result, error) { + scope.Logger.Info("Reconciling AzureManagedMachinePool") + + // If the AzureManagedMachinePool doesn't have our finalizer, add it. + controllerutil.AddFinalizer(scope.InfraMachinePool, infrav1.ClusterFinalizer) + // Register the finalizer immediately to avoid orphaning Azure resources on delete + if err := scope.PatchObject(ctx); err != nil { + return reconcile.Result{}, err + } + + if err := newAzureManagedMachinePoolReconciler(scope).Reconcile(ctx, scope); err != nil { + return reconcile.Result{}, errors.Wrapf(err, "error creating AzureManagedMachinePool %s/%s", scope.InfraMachinePool.Namespace, scope.InfraMachinePool.Name) + } + + // No errors, so mark us ready so the Cluster API Cluster Controller can pull it + scope.InfraMachinePool.Status.Ready = true + + return reconcile.Result{}, nil +} + +func (r *AzureManagedMachinePoolReconciler) reconcileDelete(ctx context.Context, scope *scope.ManagedControlPlaneScope) (reconcile.Result, error) { + scope.Logger.Info("Reconciling AzureManagedMachinePool delete") + + if err := newAzureManagedMachinePoolReconciler(scope).Delete(ctx, scope); err != nil { + return reconcile.Result{}, errors.Wrapf(err, "error deleting AzureManagedMachinePool %s/%s", scope.InfraMachinePool.Namespace, scope.InfraMachinePool.Name) + } + + // Cluster is deleted so remove the finalizer. + controllerutil.RemoveFinalizer(scope.InfraMachinePool, infrav1.ClusterFinalizer) + + if err := scope.PatchObject(ctx); err != nil { + return reconcile.Result{}, err + } + + return reconcile.Result{}, nil +} diff --git a/exp/controllers/azuremanagedmachinepool_reconciler.go b/exp/controllers/azuremanagedmachinepool_reconciler.go new file mode 100644 index 00000000000..fef47491f3b --- /dev/null +++ b/exp/controllers/azuremanagedmachinepool_reconciler.go @@ -0,0 +1,130 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute" + "github.com/pkg/errors" + azure "sigs.k8s.io/cluster-api-provider-azure/cloud" + "sigs.k8s.io/cluster-api-provider-azure/cloud/scope" + "sigs.k8s.io/cluster-api-provider-azure/cloud/services/agentpools" + "sigs.k8s.io/cluster-api-provider-azure/cloud/services/scalesets" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// azureManagedMachinePoolReconciler are list of services required by cluster controller +type azureManagedMachinePoolReconciler struct { + kubeclient client.Client + agentPoolsSvc azure.GetterService + scaleSetsSvc NodeLister +} + +// NodeLister is a service interface for returning generic lists. +type NodeLister interface { + ListInstances(context.Context, string, string) ([]compute.VirtualMachineScaleSetVM, error) + List(context.Context, string) ([]compute.VirtualMachineScaleSet, error) +} + +// newAzureManagedMachinePoolReconciler populates all the services based on input scope +func newAzureManagedMachinePoolReconciler(scope *scope.ManagedControlPlaneScope) *azureManagedMachinePoolReconciler { + return &azureManagedMachinePoolReconciler{ + kubeclient: scope.Client, + agentPoolsSvc: agentpools.NewService(scope.AzureClients.Authorizer, scope.AzureClients.SubscriptionID), + scaleSetsSvc: scalesets.NewService(scope.AzureClients.Authorizer, scope.AzureClients.SubscriptionID), + } +} + +// Reconcile reconciles all the services in pre determined order +func (r *azureManagedMachinePoolReconciler) Reconcile(ctx context.Context, scope *scope.ManagedControlPlaneScope) error { + scope.Logger.Info("reconciling machine pool") + agentPoolSpec := &agentpools.Spec{ + Name: scope.InfraMachinePool.Name, + ResourceGroup: scope.ControlPlane.Spec.ResourceGroup, + Cluster: scope.ControlPlane.Name, + SKU: scope.InfraMachinePool.Spec.SKU, + Replicas: 1, + Version: scope.MachinePool.Spec.Template.Spec.Version, + } + + if scope.InfraMachinePool.Spec.OSDiskSizeGB != nil { + agentPoolSpec.OSDiskSizeGB = *scope.InfraMachinePool.Spec.OSDiskSizeGB + } + + if scope.MachinePool.Spec.Replicas != nil { + agentPoolSpec.Replicas = *scope.MachinePool.Spec.Replicas + } + + if err := r.agentPoolsSvc.Reconcile(ctx, agentPoolSpec); err != nil { + return errors.Wrapf(err, "failed to reconcile machine pool %s", scope.InfraMachinePool.Name) + } + + nodeResourceGroup := fmt.Sprintf("MC_%s_%s_%s", scope.ControlPlane.Spec.ResourceGroup, scope.ControlPlane.Name, scope.ControlPlane.Spec.Location) + vmss, err := r.scaleSetsSvc.List(ctx, nodeResourceGroup) + if err != nil { + return errors.Wrapf(err, "failed to list vmss in resource group %s", nodeResourceGroup) + } + + var match *compute.VirtualMachineScaleSet + for _, ss := range vmss { + ss := ss + if ss.Tags["poolName"] != nil && *ss.Tags["poolName"] == scope.InfraMachinePool.Name { + match = &ss + break + } + } + + if match == nil { + return errors.New("failed to find vm scale set matching pool") + } + + instances, err := r.scaleSetsSvc.ListInstances(ctx, nodeResourceGroup, *match.Name) + if err != nil { + return errors.Wrapf(err, "failed to reconcile machine pool %s", scope.InfraMachinePool.Name) + } + + var providerIDs = make([]string, len(instances)) + for _, vm := range instances { + vm := vm + providerIDs = append(providerIDs, fmt.Sprintf("azure://%s", *vm.ID)) + } + + scope.InfraMachinePool.Spec.ProviderIDList = providerIDs + scope.InfraMachinePool.Status.Replicas = int32(len(providerIDs)) + scope.InfraMachinePool.Status.Ready = true + + scope.Logger.Info("reconciled machine pool successfully") + return nil +} + +// Delete reconciles all the services in pre determined order +func (r *azureManagedMachinePoolReconciler) Delete(ctx context.Context, scope *scope.ManagedControlPlaneScope) error { + agentPoolSpec := &agentpools.Spec{ + Name: scope.InfraMachinePool.Name, + ResourceGroup: scope.ControlPlane.Spec.ResourceGroup, + Cluster: scope.ControlPlane.Name, + SKU: scope.InfraMachinePool.Spec.SKU, + } + + if err := r.agentPoolsSvc.Delete(ctx, agentPoolSpec); err != nil { + return errors.Wrapf(err, "failed to delete machine pool %s", scope.InfraMachinePool.Name) + } + + return nil +} diff --git a/exp/controllers/azuremangedcontrolplane_controller.go b/exp/controllers/azuremangedcontrolplane_controller.go new file mode 100644 index 00000000000..167837631a4 --- /dev/null +++ b/exp/controllers/azuremangedcontrolplane_controller.go @@ -0,0 +1,170 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + + "github.com/go-logr/logr" + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/client-go/tools/record" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha3" + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha3" + + "sigs.k8s.io/cluster-api-provider-azure/cloud/scope" + "sigs.k8s.io/cluster-api/util" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// AzureManagedControlPlaneReconciler reconciles a AzureManagedControlPlane object +type AzureManagedControlPlaneReconciler struct { + client.Client + Log logr.Logger + Recorder record.EventRecorder +} + +func (r *AzureManagedControlPlaneReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error { + return ctrl.NewControllerManagedBy(mgr). + WithOptions(options). + For(&infrav1exp.AzureManagedControlPlane{}). + Complete(r) +} + +// +kubebuilder:rbac:groups=exp.infrastructure.cluster.x-k8s.io,resources=azuremanagedcontrolplanes,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=exp.infrastructure.cluster.x-k8s.io,resources=azuremanagedcontrolplanes/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch + +func (r *AzureManagedControlPlaneReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reterr error) { + ctx := context.TODO() + log := r.Log.WithValues("namespace", req.Namespace, "azureManagedControlPlanes", req.Name) + + // Fetch the AzureManagedControlPlane instance + azureControlPlane := &infrav1exp.AzureManagedControlPlane{} + err := r.Get(ctx, req.NamespacedName, azureControlPlane) + if err != nil { + if apierrors.IsNotFound(err) { + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + + // Fetch the Cluster. + cluster, err := util.GetOwnerCluster(ctx, r.Client, azureControlPlane.ObjectMeta) + if err != nil { + return reconcile.Result{}, err + } + if cluster == nil { + log.Info("Cluster Controller has not yet set OwnerRef") + return reconcile.Result{}, nil + } + + log = log.WithValues("cluster", cluster.Name) + + // fetch default pool + defaultPoolKey := client.ObjectKey{ + Name: azureControlPlane.Spec.DefaultPoolRef.Name, + Namespace: azureControlPlane.Namespace, + } + defaultPool := &infrav1exp.AzureManagedMachinePool{} + if err := r.Client.Get(ctx, defaultPoolKey, defaultPool); err != nil { + return reconcile.Result{}, errors.Wrapf(err, "failed to fetch default pool reference") + } + + log = log.WithValues("azureManagedMachinePool", defaultPoolKey.Name) + + // fetch owner of default pool + // TODO(ace): create a helper in util for this + // Fetch the owning MachinePool. + ownerPool, err := getOwnerMachinePool(ctx, r.Client, defaultPool.ObjectMeta) + if err != nil { + return reconcile.Result{}, err + } + if ownerPool == nil { + log.Info("failed to fetch owner ref for default pool") + return reconcile.Result{}, nil + } + + log = log.WithValues("machinePool", ownerPool.Name) + + // Create the scope. + mcpScope, err := scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{ + Client: r.Client, + Logger: log, + Cluster: cluster, + ControlPlane: azureControlPlane, + MachinePool: ownerPool, + InfraMachinePool: defaultPool, + PatchTarget: azureControlPlane, + }) + if err != nil { + return reconcile.Result{}, errors.Errorf("failed to create scope: %+v", err) + } + + // Always patch when exiting so we can persist changes to finalizers and status + defer func() { + if err := mcpScope.PatchObject(ctx); err != nil && reterr == nil { + reterr = err + } + }() + + // Handle deleted clusters + if !azureControlPlane.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, mcpScope) + } + + // Handle non-deleted clusters + return r.reconcileNormal(ctx, mcpScope) +} + +func (r *AzureManagedControlPlaneReconciler) reconcileNormal(ctx context.Context, scope *scope.ManagedControlPlaneScope) (reconcile.Result, error) { + scope.Logger.Info("Reconciling AzureManagedControlPlane") + + // If the AzureManagedControlPlane doesn't have our finalizer, add it. + controllerutil.AddFinalizer(scope.ControlPlane, infrav1.ClusterFinalizer) + // Register the finalizer immediately to avoid orphaning Azure resources on delete + if err := scope.PatchObject(ctx); err != nil { + return reconcile.Result{}, err + } + + if err := newAzureManagedControlPlaneReconciler(scope).Reconcile(ctx, scope); err != nil { + return reconcile.Result{}, errors.Wrapf(err, "error creating AzureManagedControlPlane %s/%s", scope.ControlPlane.Namespace, scope.ControlPlane.Name) + } + + // No errors, so mark us ready so the Cluster API Cluster Controller can pull it + scope.ControlPlane.Status.Ready = true + scope.ControlPlane.Status.Initialized = true + + return reconcile.Result{}, nil +} + +func (r *AzureManagedControlPlaneReconciler) reconcileDelete(ctx context.Context, scope *scope.ManagedControlPlaneScope) (reconcile.Result, error) { + scope.Logger.Info("Reconciling AzureManagedControlPlane delete") + + if err := newAzureManagedControlPlaneReconciler(scope).Delete(ctx, scope); err != nil { + return reconcile.Result{}, errors.Wrapf(err, "error deleting AzureManagedControlPlane %s/%s", scope.ControlPlane.Namespace, scope.ControlPlane.Name) + } + + // Cluster is deleted so remove the finalizer. + controllerutil.RemoveFinalizer(scope.ControlPlane, infrav1.ClusterFinalizer) + + return reconcile.Result{}, nil +} diff --git a/exp/controllers/azuremangedcontrolplane_reconciler.go b/exp/controllers/azuremangedcontrolplane_reconciler.go new file mode 100644 index 00000000000..e198fc6bb40 --- /dev/null +++ b/exp/controllers/azuremangedcontrolplane_reconciler.go @@ -0,0 +1,221 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + + "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + azure "sigs.k8s.io/cluster-api-provider-azure/cloud" + "sigs.k8s.io/cluster-api-provider-azure/cloud/scope" + "sigs.k8s.io/cluster-api-provider-azure/cloud/services/managedclusters" + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha3" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + "sigs.k8s.io/cluster-api/util/secret" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +// azureManagedControlPlaneReconciler are list of services required by cluster controller +type azureManagedControlPlaneReconciler struct { + kubeclient client.Client + managedClustersSvc azure.CredentialGetter +} + +// newAzureManagedControlPlaneReconciler populates all the services based on input scope +func newAzureManagedControlPlaneReconciler(scope *scope.ManagedControlPlaneScope) *azureManagedControlPlaneReconciler { + return &azureManagedControlPlaneReconciler{ + kubeclient: scope.Client, + managedClustersSvc: managedclusters.NewService(scope.AzureClients.Authorizer, scope.AzureClients.SubscriptionID), + } +} + +// Reconcile reconciles all the services in pre determined order +func (r *azureManagedControlPlaneReconciler) Reconcile(ctx context.Context, scope *scope.ManagedControlPlaneScope) error { + managedClusterSpec := &managedclusters.Spec{ + Name: scope.ControlPlane.Name, + ResourceGroup: scope.ControlPlane.Spec.ResourceGroup, + Location: scope.ControlPlane.Spec.Location, + Tags: scope.ControlPlane.Spec.AdditionalTags, + Version: scope.ControlPlane.Spec.Version, + LoadBalancerSKU: scope.ControlPlane.Spec.LoadBalancerSKU, + NetworkPlugin: scope.ControlPlane.Spec.NetworkPlugin, + NetworkPolicy: scope.ControlPlane.Spec.NetworkPolicy, + SSHPublicKey: scope.ControlPlane.Spec.SSHPublicKey, + } + + scope.Logger.V(2).Info("Reconciling managed cluster") + if err := r.reconcileManagedCluster(ctx, scope, managedClusterSpec); err != nil { + return errors.Wrapf(err, "failed to reconcile managed cluster") + } + + scope.Logger.V(2).Info("Reconciling endpoint") + if err := r.reconcileEndpoint(ctx, scope, managedClusterSpec); err != nil { + return errors.Wrapf(err, "failed to reconcile control plane endpoint") + } + + scope.Logger.V(2).Info("Reconciling kubeconfig") + if err := r.reconcileKubeconfig(ctx, scope, managedClusterSpec); err != nil { + return errors.Wrapf(err, "failed to reconcile kubeconfig secret") + } + + return nil +} + +// Delete reconciles all the services in pre determined order +func (r *azureManagedControlPlaneReconciler) Delete(ctx context.Context, scope *scope.ManagedControlPlaneScope) error { + managedClusterSpec := &managedclusters.Spec{ + Name: scope.ControlPlane.Name, + ResourceGroup: scope.ControlPlane.Spec.ResourceGroup, + Location: scope.ControlPlane.Spec.Location, + Tags: scope.ControlPlane.Spec.AdditionalTags, + Version: scope.ControlPlane.Spec.Version, + LoadBalancerSKU: scope.ControlPlane.Spec.LoadBalancerSKU, + NetworkPlugin: scope.ControlPlane.Spec.NetworkPlugin, + NetworkPolicy: scope.ControlPlane.Spec.NetworkPolicy, + SSHPublicKey: scope.ControlPlane.Spec.SSHPublicKey, + } + + if err := r.managedClustersSvc.Delete(ctx, managedClusterSpec); err != nil { + return errors.Wrapf(err, "failed to delete managed cluster %s", scope.ControlPlane.Name) + } + + return nil +} + +func (r *azureManagedControlPlaneReconciler) reconcileManagedCluster(ctx context.Context, scope *scope.ManagedControlPlaneScope, managedClusterSpec *managedclusters.Spec) error { + if net := scope.Cluster.Spec.ClusterNetwork; net != nil { + if net.Services != nil { + // A user may provide zero or one CIDR blocks. If they provide an empty array, + // we ignore it and use the default. AKS doesn't support > 1 Service/Pod CIDR. + if len(net.Services.CIDRBlocks) > 1 { + return errors.New("managed control planes only allow one service cidr") + } + if len(net.Services.CIDRBlocks) == 1 { + managedClusterSpec.ServiceCIDR = net.Services.CIDRBlocks[0] + } + } + if net.Pods != nil { + // A user may provide zero or one CIDR blocks. If they provide an empty array, + // we ignore it and use the default. AKS doesn't support > 1 Service/Pod CIDR. + if len(net.Pods.CIDRBlocks) > 1 { + return errors.New("managed control planes only allow one service cidr") + } + if len(net.Pods.CIDRBlocks) == 1 { + managedClusterSpec.PodCIDR = net.Pods.CIDRBlocks[0] + } + } + } + + _, err := r.managedClustersSvc.Get(ctx, managedClusterSpec) + // Transient or other failure not due to 404 + if err != nil && !azure.ResourceNotFound(err) { + return errors.Wrapf(err, "failed to fetch existing managed cluster") + } + + // We are creating this cluster for the first time. + // Configure the default pool, rest will be handled by machinepool controller + // We do this here because AKS will only let us mutate agent pools via managed + // clusters API at create time, not update. + if azure.ResourceNotFound(err) { + defaultPoolSpec := managedclusters.PoolSpec{ + Name: scope.InfraMachinePool.Name, + SKU: scope.InfraMachinePool.Spec.SKU, + Replicas: 1, + OSDiskSizeGB: 0, + } + + // Set optional values + if scope.InfraMachinePool.Spec.OSDiskSizeGB != nil { + defaultPoolSpec.OSDiskSizeGB = *scope.InfraMachinePool.Spec.OSDiskSizeGB + } + if scope.MachinePool.Spec.Replicas != nil { + defaultPoolSpec.Replicas = *scope.MachinePool.Spec.Replicas + } + + // Add to cluster spec + managedClusterSpec.AgentPools = []managedclusters.PoolSpec{defaultPoolSpec} + } + + // Send to Azure for create/update. + if err := r.managedClustersSvc.Reconcile(ctx, managedClusterSpec); err != nil { + return errors.Wrapf(err, "failed to reconcile managed cluster %s", scope.ControlPlane.Name) + } + return nil +} + +func (r *azureManagedControlPlaneReconciler) reconcileEndpoint(ctx context.Context, scope *scope.ManagedControlPlaneScope, managedClusterSpec *managedclusters.Spec) error { + // Fetch newly updated cluster + managedClusterResult, err := r.managedClustersSvc.Get(ctx, managedClusterSpec) + if err != nil { + return err + } + + managedCluster, ok := managedClusterResult.(containerservice.ManagedCluster) + if !ok { + return fmt.Errorf("expected containerservice ManagedCluster object") + } + + old := scope.ControlPlane.DeepCopyObject() + + scope.ControlPlane.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ + Host: *managedCluster.ManagedClusterProperties.Fqdn, + Port: 443, + } + + if err := r.kubeclient.Patch(ctx, scope.ControlPlane, client.MergeFrom(old)); err != nil { + return errors.Wrapf(err, "failed to set control plane endpoint") + } + + return nil +} + +func (r *azureManagedControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, scope *scope.ManagedControlPlaneScope, managedClusterSpec *managedclusters.Spec) error { + // Always fetch credentials in case of rotation + data, err := r.managedClustersSvc.GetCredentials(ctx, managedClusterSpec.ResourceGroup, managedClusterSpec.Name) + if err != nil { + return errors.Wrapf(err, "failed to get credentials for managed cluster") + } + + // Construct and store secret + kubeconfig := makeKubeconfig(scope.Cluster, scope.ControlPlane) + if _, err := controllerutil.CreateOrUpdate(ctx, r.kubeclient, kubeconfig, func() error { + kubeconfig.Data = map[string][]byte{ + secret.KubeconfigDataName: data, + } + return nil + }); err != nil { + return errors.Wrapf(err, "failed to kubeconfig secret for cluster") + } + return nil +} + +func makeKubeconfig(cluster *clusterv1.Cluster, controlPlane *infrav1exp.AzureManagedControlPlane) *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secret.Name(cluster.Name, secret.Kubeconfig), + Namespace: cluster.Namespace, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(controlPlane, infrav1exp.GroupVersion.WithKind("AzureManagedControlPlane")), + }, + }, + } +} diff --git a/feature/feature.go b/feature/feature.go new file mode 100644 index 00000000000..384974ff45f --- /dev/null +++ b/feature/feature.go @@ -0,0 +1,45 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package feature + +import ( + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/component-base/featuregate" +) + +const ( + // Every capz-specific feature gate should add method here following this template: + // + // // owner: @username + // // alpha: v1.X + // MyFeature featuregate.Feature = "MyFeature" + + // owner: @alexeldeib + // alpha: v0.4 + AKS featuregate.Feature = "AKS" +) + +func init() { + runtime.Must(MutableGates.Add(defaultCAPZFeatureGates)) +} + +// defaultCAPZFeatureGates consists of all known capz-specific feature keys. +// To add a new feature, define a key for it above and add it here. +var defaultCAPZFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ + // Every feature should be initiated here: + AKS: {Default: false, PreRelease: featuregate.Alpha}, +} diff --git a/feature/gates.go b/feature/gates.go new file mode 100644 index 00000000000..bf73fb3ff0b --- /dev/null +++ b/feature/gates.go @@ -0,0 +1,34 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package feature + +import ( + "k8s.io/component-base/featuregate" + "sigs.k8s.io/cluster-api/feature" +) + +var ( + // MutableGates is a mutable version of DefaultFeatureGate. + // Only top-level commands/options setup and the k8s.io/component-base/featuregate/testing package should make use of this. + // Tests that need to modify featuregate gates for the duration of their test should use: + // defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features., )() + MutableGates featuregate.MutableFeatureGate = feature.MutableGates + + // Gates is a shared global FeatureGate. + // Top-level commands/options setup that needs to modify this featuregate gate should use DefaultMutableFeatureGate. + Gates featuregate.FeatureGate = MutableGates +) diff --git a/go.mod b/go.mod index 9c58f3394e6..ff6b4d88978 100644 --- a/go.mod +++ b/go.mod @@ -24,6 +24,7 @@ require ( k8s.io/api v0.17.2 k8s.io/apimachinery v0.17.2 k8s.io/client-go v0.17.2 + k8s.io/component-base v0.17.2 k8s.io/klog v1.0.0 k8s.io/utils v0.0.0-20200229041039-0a110f9eb7ab sigs.k8s.io/cluster-api v0.3.5 diff --git a/main.go b/main.go index 3a6fb0aa5c6..d40af16f82c 100644 --- a/main.go +++ b/main.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2020 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -38,10 +38,11 @@ import ( "sigs.k8s.io/cluster-api-provider-azure/controllers" infrav1alpha3exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha3" infrav1controllersexp "sigs.k8s.io/cluster-api-provider-azure/exp/controllers" + capifeature "sigs.k8s.io/cluster-api/feature" + "sigs.k8s.io/cluster-api-provider-azure/feature" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha3" - "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util/record" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -205,10 +206,9 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "AzureCluster") os.Exit(1) } - // just use CAPI MachinePool feature flag rather than create a new one setupLog.V(1).Info(fmt.Sprintf("%+v\n", feature.Gates)) - if feature.Gates.Enabled(feature.MachinePool) { + if feature.Gates.Enabled(capifeature.MachinePool) { if err = (&infrav1controllersexp.AzureMachinePoolReconciler{ Client: mgr.GetClient(), Log: ctrl.Log.WithName("controllers").WithName("AzureMachinePool"), @@ -217,7 +217,34 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "AzureMachinePool") os.Exit(1) } + if feature.Gates.Enabled(feature.AKS) { + if err = (&infrav1controllersexp.AzureManagedMachinePoolReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("AzureManagedMachinePool"), + Recorder: mgr.GetEventRecorderFor("azuremachine-reconciler"), + }).SetupWithManager(mgr, controller.Options{MaxConcurrentReconciles: azureMachineConcurrency}); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "AzureManagedMachinePool") + os.Exit(1) + } + if err = (&infrav1controllersexp.AzureManagedClusterReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("AzureManagedCluster"), + Recorder: mgr.GetEventRecorderFor("azuremanagedcluster-reconciler"), + }).SetupWithManager(mgr, controller.Options{MaxConcurrentReconciles: azureClusterConcurrency}); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "AzureManagedCluster") + os.Exit(1) + } + if err = (&infrav1controllersexp.AzureManagedControlPlaneReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("AzureManagedControlPlane"), + Recorder: mgr.GetEventRecorderFor("azuremanagedcontrolplane-reconciler"), + }).SetupWithManager(mgr, controller.Options{MaxConcurrentReconciles: azureClusterConcurrency}); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "AzureManagedControlPlane") + os.Exit(1) + } + } } + // } } else { if err = (&infrav1alpha3.AzureCluster{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "AzureCluster") @@ -232,7 +259,7 @@ func main() { os.Exit(1) } // just use CAPI MachinePool feature flag rather than create a new one - if feature.Gates.Enabled(feature.MachinePool) { + if feature.Gates.Enabled(capifeature.MachinePool) { if err = (&infrav1alpha3exp.AzureMachinePool{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "AzureMachinePool") os.Exit(1) diff --git a/templates/cluster-template-aks.yaml b/templates/cluster-template-aks.yaml new file mode 100644 index 00000000000..401105e80ec --- /dev/null +++ b/templates/cluster-template-aks.yaml @@ -0,0 +1,94 @@ +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Cluster +metadata: + name: ${CLUSTER_NAME} + namespace: default +spec: + clusterNetwork: + services: + cidrBlocks: + - 192.168.0.0/16 + controlPlaneRef: + apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 + kind: AzureManagedControlPlane + name: ${CLUSTER_NAME}-control-plane + infrastructureRef: + apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 + kind: AzureManagedCluster + name: ${CLUSTER_NAME} +--- +apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 +kind: AzureManagedControlPlane +metadata: + name: ${CLUSTER_NAME}-control-plane +spec: + defaultPoolRef: + name: agentpool0 + location: ${AZURE_LOCATION} + resourceGroup: ${AZURE_RESOURCE_GROUP} + sshPublicKey: ${AZURE_SSH_PUBLIC_KEY} + subscriptionID: ${AZURE_SUBSCRIPTION_ID} + version: ${KUBERNETES_VERSION} +--- +apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 +kind: AzureManagedCluster +metadata: + name: ${CLUSTER_NAME} +spec: + subscriptionID: ${AZURE_SUBSCRIPTION_ID} +--- +apiVersion: exp.cluster.x-k8s.io/v1alpha3 +kind: MachinePool +metadata: + name: agentpool0 +spec: + clusterName: ${CLUSTER_NAME} + replicas: ${WORKER_MACHINE_COUNT} + template: + metadata: {} + spec: + bootstrap: + dataSecretName: "" + clusterName: ${CLUSTER_NAME} + infrastructureRef: + apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 + kind: AzureManagedMachinePool + name: agentpool0 + namespace: default + version: ${KUBERNETES_VERSION} +--- +apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 +kind: AzureManagedMachinePool +metadata: + name: agentpool0 +spec: + osDiskSizeGB: 512 + sku: ${AZURE_NODE_MACHINE_TYPE} +--- +apiVersion: exp.cluster.x-k8s.io/v1alpha3 +kind: MachinePool +metadata: + name: agentpool1 +spec: + clusterName: ${CLUSTER_NAME} + replicas: ${WORKER_MACHINE_COUNT} + template: + metadata: {} + spec: + bootstrap: + dataSecretName: "" + clusterName: ${CLUSTER_NAME} + infrastructureRef: + apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 + kind: AzureManagedMachinePool + name: agentpool1 + namespace: default + version: ${KUBERNETES_VERSION} +--- +apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 +kind: AzureManagedMachinePool +metadata: + name: agentpool1 +spec: + osDiskSizeGB: 1024 + sku: ${AZURE_NODE_MACHINE_TYPE} diff --git a/templates/flavors/aks/cluster-template.yaml b/templates/flavors/aks/cluster-template.yaml new file mode 100644 index 00000000000..4aa523e9d6f --- /dev/null +++ b/templates/flavors/aks/cluster-template.yaml @@ -0,0 +1,110 @@ +--- +# The Cluster object is the top level owner of all resources. +# It coordinates between the control plane and the infrastructure/machines. +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Cluster +metadata: + name: ${CLUSTER_NAME} + namespace: default +spec: + clusterNetwork: + services: + cidrBlocks: + - 192.168.0.0/16 + controlPlaneRef: + apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 + kind: AzureManagedControlPlane + name: ${CLUSTER_NAME}-control-plane + infrastructureRef: + apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 + kind: AzureManagedCluster + name: ${CLUSTER_NAME} +--- +# The control plane abstracts readiness and provisioning of an AKS cluster. +# Because AKS requires a default pool, this also requires a reference to the +# default machine pool. +apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 +kind: AzureManagedControlPlane +metadata: + name: ${CLUSTER_NAME}-control-plane +spec: + subscriptionID: ${AZURE_SUBSCRIPTION_ID} + resourceGroup: "${AZURE_RESOURCE_GROUP}" + location: "${AZURE_LOCATION}" + defaultPoolRef: + name: "agentpool0" + sshPublicKey: "${AZURE_SSH_PUBLIC_KEY}" + version: "${KUBERNETES_VERSION}" +--- +# Due to the nature of managed Kubernetes and the control plane implementation, +# the infrastructure provider for AKS cluster is basically a no-op. +# It sets itself to ready as soon as it sees the control plane ready. +apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 +kind: AzureManagedCluster +metadata: + name: ${CLUSTER_NAME} +spec: + subscriptionID: ${AZURE_SUBSCRIPTION_ID} +--- +# We provision a default machine pool with no boostrap data (AKS will provide it). +# We specify an AzureManagedMachinePool as the infrastructure machine it, which +# will be reflected in Azure as VMSS node pools attached to an AKS cluster. +apiVersion: exp.cluster.x-k8s.io/v1alpha3 +kind: MachinePool +metadata: + name: "agentpool0" +spec: + clusterName: "${CLUSTER_NAME}" + replicas: ${WORKER_MACHINE_COUNT} + template: + metadata: {} + spec: + bootstrap: + dataSecretName: "" + clusterName: "${CLUSTER_NAME}" + infrastructureRef: + apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 + kind: AzureManagedMachinePool + name: "agentpool0" + namespace: default + version: "${KUBERNETES_VERSION}" +--- +# The Azure-specific machine pool implementation drives the configuration of the +# VMSS instances backing the pool. +apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 +kind: AzureManagedMachinePool +metadata: + name: "agentpool0" +spec: + osDiskSizeGB: 512 + sku: "${AZURE_NODE_MACHINE_TYPE}" +--- +# Deploy a second agent pool with the same number of machines, but using potentially different infrastructure. +apiVersion: exp.cluster.x-k8s.io/v1alpha3 +kind: MachinePool +metadata: + name: "agentpool1" +spec: + clusterName: "${CLUSTER_NAME}" + replicas: ${WORKER_MACHINE_COUNT} + template: + metadata: {} + spec: + bootstrap: + dataSecretName: "" + clusterName: "${CLUSTER_NAME}" + infrastructureRef: + apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 + kind: AzureManagedMachinePool + name: "agentpool1" + namespace: default + version: "${KUBERNETES_VERSION}" +--- +# The infrastructure backing the second pool will use the same VM sku, but a larger OS disk. +apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 +kind: AzureManagedMachinePool +metadata: + name: "agentpool1" +spec: + osDiskSizeGB: 1024 + sku: "${AZURE_NODE_MACHINE_TYPE}" diff --git a/templates/flavors/aks/kustomization.yaml b/templates/flavors/aks/kustomization.yaml new file mode 100644 index 00000000000..bc7ab04c05c --- /dev/null +++ b/templates/flavors/aks/kustomization.yaml @@ -0,0 +1,4 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- cluster-template.yaml diff --git a/tilt-provider.json b/tilt-provider.json index 57c53b17dca..4684dc5d6f1 100644 --- a/tilt-provider.json +++ b/tilt-provider.json @@ -9,7 +9,8 @@ "api", "cloud", "controllers", - "pkg" + "pkg", + "exp" ] } -} +} \ No newline at end of file