diff --git a/api/v1alpha3/azuremanagedcluster_types.go b/api/v1alpha3/azuremanagedcluster_types.go new file mode 100644 index 00000000000..8651b6f9aa6 --- /dev/null +++ b/api/v1alpha3/azuremanagedcluster_types.go @@ -0,0 +1,99 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" +) + +// AzureManagedClusterSpec defines the desired state of AzureManagedCluster +type AzureManagedClusterSpec struct { + // Version defines the desired Kubernetes version. + // +kubebuilder:validation:MinLength:=2 + // +kubebuilder:validation:Pattern:=^(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)([-0-9a-zA-Z_\.+]*)?$ + Version string `json:"version"` + + // ResourceGroup is the name of the Azure resource group for this AKS Cluster. + ResourceGroup string `json:"resourceGroup"` + + // Location is a string matching one of the canonical Azure region names. Examples: "westus2", "eastus". + Location string `json:"location"` + + // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + // +optional + ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"` + + // AdditionalTags is an optional set of tags to add to Azure resources managed by the Azure provider, in addition to the + // ones added by default. + // +optional + AdditionalTags Tags `json:"additionalTags,omitempty"` + + // LoadBalancerSKU for the managed cluster. Possible values include: 'Standard', 'Basic'. Defaults to standard. + // +kubebuilder:validation:Enum=Standard;Basic + LoadBalancerSKU *string `json:"loadBalancerSku,omitempty"` + + // NetworkPlugin used for building Kubernetes network. Possible values include: 'Azure', 'Kubenet'. Defaults to Azure. + // +kubebuilder:validation:Enum=Azure;Kubenet + NetworkPlugin *string `json:"networkPlugin,omitempty"` + + // NetworkPolicy used for building Kubernetes network. Possible values include: 'NetworkPolicyCalico', 'NetworkPolicyAzure' + // +kubebuilder:validation:Enum=NetworkPolicyCalico;NetworkPolicyAzure + NetworkPolicy *string `json:"networkPolicy,omitempty"` + + // SSHPublicKey is a string literal containing an ssh public key. + SSHPublicKey string `json:"sshPublicKey"` + + // DefaultPoolRef is the specification for the default pool, without which an AKS cluster cannot be created. + // TODO(ace): consider defaulting and making optional pointer? + DefaultPoolRef corev1.LocalObjectReference `json:"defaultPoolRef"` +} + +// AzureManagedClusterStatus defines the observed state of AzureManagedCluster +type AzureManagedClusterStatus struct { + // Ready is true when the provider resource is ready. + // +optional + Ready bool `json:"ready,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=azuremanagedclusters,scope=Namespaced,categories=cluster-api,shortName=amc +// +kubebuilder:storageversion +// +kubebuilder:subresource:status + +// AzureManagedCluster is the Schema for the azuremanagedclusters API +type AzureManagedCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AzureManagedClusterSpec `json:"spec,omitempty"` + Status AzureManagedClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AzureManagedClusterList contains a list of AzureManagedCluster +type AzureManagedClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AzureManagedCluster `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AzureManagedCluster{}, &AzureManagedClusterList{}) +} diff --git a/api/v1alpha3/azuremanagedmachinepool_types.go b/api/v1alpha3/azuremanagedmachinepool_types.go new file mode 100644 index 00000000000..94ec82e8e4e --- /dev/null +++ b/api/v1alpha3/azuremanagedmachinepool_types.go @@ -0,0 +1,86 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + capierrors "sigs.k8s.io/cluster-api/errors" +) + +// AzureManagedMachinePoolSpec defines the desired state of AzureManagedMachinePool +type AzureManagedMachinePoolSpec struct { + // SKU is the size of the VMs in the node pool. + SKU string `json:"sku"` + + // OSDiskSizeGB is the disk size for every machine in this master/agent pool. + // If you specify 0, it will apply the default osDisk size according to the vmSize specified. + OSDiskSizeGB *int32 `json:"osDiskSizeGB,omitempty"` + + // ProviderIDList is the unique identifier as specified by the cloud provider. + // +optional + ProviderIDList []string `json:"providerIDList,omitempty"` +} + +// AzureManagedMachinePoolStatus defines the observed state of AzureManagedMachinePool +type AzureManagedMachinePoolStatus struct { + // Ready is true when the provider resource is ready. + // +optional + Ready bool `json:"ready"` + + // Replicas is the most recently observed number of replicas. + // +optional + Replicas int32 `json:"replicas"` + + // Any transient errors that occur during the reconciliation of Machines + // can be added as events to the Machine object and/or logged in the + // controller's output. + // +optional + ErrorReason *capierrors.MachineStatusError `json:"errorReason,omitempty"` + + // Any transient errors that occur during the reconciliation of Machines + // can be added as events to the Machine object and/or logged in the + // controller's output. + // +optional + ErrorMessage *string `json:"errorMessage,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=azuremanagedmachinepools,scope=Namespaced,categories=cluster-api,shortName=ammp +// +kubebuilder:storageversion +// +kubebuilder:subresource:status + +// AzureManagedMachinePool is the Schema for the azuremanagedmachinepools API +type AzureManagedMachinePool struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AzureManagedMachinePoolSpec `json:"spec,omitempty"` + Status AzureManagedMachinePoolStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AzureManagedMachinePoolList contains a list of AzureManagedMachinePool +type AzureManagedMachinePoolList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AzureManagedMachinePool `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AzureManagedMachinePool{}, &AzureManagedMachinePoolList{}) +} diff --git a/api/v1alpha3/zz_generated.deepcopy.go b/api/v1alpha3/zz_generated.deepcopy.go index dc0be6bb61e..c955aa0e153 100644 --- a/api/v1alpha3/zz_generated.deepcopy.go +++ b/api/v1alpha3/zz_generated.deepcopy.go @@ -386,6 +386,228 @@ func (in *AzureMachineTemplateSpec) DeepCopy() *AzureMachineTemplateSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedCluster) DeepCopyInto(out *AzureManagedCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedCluster. +func (in *AzureManagedCluster) DeepCopy() *AzureManagedCluster { + if in == nil { + return nil + } + out := new(AzureManagedCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AzureManagedCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedClusterList) DeepCopyInto(out *AzureManagedClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AzureManagedCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedClusterList. +func (in *AzureManagedClusterList) DeepCopy() *AzureManagedClusterList { + if in == nil { + return nil + } + out := new(AzureManagedClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AzureManagedClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedClusterSpec) DeepCopyInto(out *AzureManagedClusterSpec) { + *out = *in + out.ControlPlaneEndpoint = in.ControlPlaneEndpoint + if in.AdditionalTags != nil { + in, out := &in.AdditionalTags, &out.AdditionalTags + *out = make(Tags, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.LoadBalancerSKU != nil { + in, out := &in.LoadBalancerSKU, &out.LoadBalancerSKU + *out = new(string) + **out = **in + } + if in.NetworkPlugin != nil { + in, out := &in.NetworkPlugin, &out.NetworkPlugin + *out = new(string) + **out = **in + } + if in.NetworkPolicy != nil { + in, out := &in.NetworkPolicy, &out.NetworkPolicy + *out = new(string) + **out = **in + } + out.DefaultPoolRef = in.DefaultPoolRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedClusterSpec. +func (in *AzureManagedClusterSpec) DeepCopy() *AzureManagedClusterSpec { + if in == nil { + return nil + } + out := new(AzureManagedClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedClusterStatus) DeepCopyInto(out *AzureManagedClusterStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedClusterStatus. +func (in *AzureManagedClusterStatus) DeepCopy() *AzureManagedClusterStatus { + if in == nil { + return nil + } + out := new(AzureManagedClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedMachinePool) DeepCopyInto(out *AzureManagedMachinePool) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedMachinePool. +func (in *AzureManagedMachinePool) DeepCopy() *AzureManagedMachinePool { + if in == nil { + return nil + } + out := new(AzureManagedMachinePool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AzureManagedMachinePool) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedMachinePoolList) DeepCopyInto(out *AzureManagedMachinePoolList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AzureManagedMachinePool, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedMachinePoolList. +func (in *AzureManagedMachinePoolList) DeepCopy() *AzureManagedMachinePoolList { + if in == nil { + return nil + } + out := new(AzureManagedMachinePoolList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AzureManagedMachinePoolList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedMachinePoolSpec) DeepCopyInto(out *AzureManagedMachinePoolSpec) { + *out = *in + if in.OSDiskSizeGB != nil { + in, out := &in.OSDiskSizeGB, &out.OSDiskSizeGB + *out = new(int32) + **out = **in + } + if in.ProviderIDList != nil { + in, out := &in.ProviderIDList, &out.ProviderIDList + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedMachinePoolSpec. +func (in *AzureManagedMachinePoolSpec) DeepCopy() *AzureManagedMachinePoolSpec { + if in == nil { + return nil + } + out := new(AzureManagedMachinePoolSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedMachinePoolStatus) DeepCopyInto(out *AzureManagedMachinePoolStatus) { + *out = *in + if in.ErrorReason != nil { + in, out := &in.ErrorReason, &out.ErrorReason + *out = new(errors.MachineStatusError) + **out = **in + } + if in.ErrorMessage != nil { + in, out := &in.ErrorMessage, &out.ErrorMessage + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedMachinePoolStatus. +func (in *AzureManagedMachinePoolStatus) DeepCopy() *AzureManagedMachinePoolStatus { + if in == nil { + return nil + } + out := new(AzureManagedMachinePoolStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AzureMarketplaceImage) DeepCopyInto(out *AzureMarketplaceImage) { *out = *in diff --git a/cloud/interfaces.go b/cloud/interfaces.go index 8ddf4df9cbc..4b0bc45bdcc 100644 --- a/cloud/interfaces.go +++ b/cloud/interfaces.go @@ -35,3 +35,8 @@ type GetterService interface { Reconcile(ctx context.Context, spec interface{}) error Delete(ctx context.Context, spec interface{}) error } + +type CredentialGetter interface { + GetterService + GetCredentials(ctx context.Context, spec interface{}) ([]byte, error) +} diff --git a/cloud/scope/managedcluster.go b/cloud/scope/managedcluster.go new file mode 100644 index 00000000000..5933ef9139d --- /dev/null +++ b/cloud/scope/managedcluster.go @@ -0,0 +1,133 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scope + +import ( + "context" + + "github.com/go-logr/logr" + "github.com/pkg/errors" + "k8s.io/klog/klogr" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha3" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// TODO(ace): cleanup this file + +// ManagedClusterScopeParams defines the input parameters used to create a new Scope. +type ManagedClusterScopeParams struct { + AzureClients + Client client.Client + Logger logr.Logger + Cluster *clusterv1.Cluster + AzureManagedCluster *infrav1.AzureManagedCluster + Context context.Context +} + +// NewManagedClusterScope creates a new Scope from the supplied parameters. +// This is meant to be called for each reconcile iteration. +func NewManagedClusterScope(params ManagedClusterScopeParams) (*ManagedClusterScope, error) { + if params.Cluster == nil { + return nil, errors.New("failed to generate new scope from nil Cluster") + } + if params.AzureManagedCluster == nil { + return nil, errors.New("failed to generate new scope from nil AzureManagedCluster") + } + + if params.Logger == nil { + params.Logger = klogr.New() + } + + err := params.AzureClients.setCredentials() + if err != nil { + return nil, errors.Wrap(err, "failed to create Azure session") + } + + helper, err := patch.NewHelper(params.AzureManagedCluster, params.Client) + if err != nil { + return nil, errors.Wrap(err, "failed to init patch helper") + } + return &ManagedClusterScope{ + Logger: params.Logger, + client: params.Client, + AzureClients: params.AzureClients, + Cluster: params.Cluster, + AzureManagedCluster: params.AzureManagedCluster, + patchHelper: helper, + Context: context.Background(), + }, nil +} + +// ManagedClusterScope defines the basic context for an actuator to operate upon. +type ManagedClusterScope struct { + logr.Logger + client client.Client + patchHelper *patch.Helper + + AzureClients + Cluster *clusterv1.Cluster + AzureManagedCluster *infrav1.AzureManagedCluster + Context context.Context +} + +// ResourceGroup returns the cluster resource group. +func (s *ManagedClusterScope) ResourceGroup() string { + return s.AzureManagedCluster.Spec.ResourceGroup +} + +// Name returns the cluster name. +func (s *ManagedClusterScope) Name() string { + return s.Cluster.Name +} + +// Namespace returns the cluster namespace. +func (s *ManagedClusterScope) Namespace() string { + return s.Cluster.Namespace +} + +// Location returns the cluster location. +func (s *ManagedClusterScope) Location() string { + return s.AzureManagedCluster.Spec.Location +} + +// ListOptionsLabelSelector returns a ListOptions with a label selector for clusterName. +func (s *ManagedClusterScope) ListOptionsLabelSelector() client.ListOption { + return client.MatchingLabels(map[string]string{ + clusterv1.ClusterLabelName: s.Cluster.Name, + }) +} + +// PatchObject persists the cluster configuration and status. +func (s *ManagedClusterScope) PatchObject() error { + return s.patchHelper.Patch(context.TODO(), s.AzureManagedCluster) +} + +// Close closes the current scope persisting the cluster configuration and status. +func (s *ManagedClusterScope) Close() error { + return s.patchHelper.Patch(context.TODO(), s.AzureManagedCluster) +} + +// AdditionalTags returns AdditionalTags from the scope's AzureManagedCluster. +func (s *ManagedClusterScope) AdditionalTags() infrav1.Tags { + tags := make(infrav1.Tags) + if s.AzureManagedCluster.Spec.AdditionalTags != nil { + tags = s.AzureManagedCluster.Spec.AdditionalTags.DeepCopy() + } + return tags +} diff --git a/cloud/services/agentpools/agentpools.go b/cloud/services/agentpools/agentpools.go new file mode 100644 index 00000000000..ac8a3253ff6 --- /dev/null +++ b/cloud/services/agentpools/agentpools.go @@ -0,0 +1,120 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package agentpools + +import ( + "context" + "fmt" + + "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice" + "github.com/google/go-cmp/cmp" + "github.com/pkg/errors" + "k8s.io/klog" + azure "sigs.k8s.io/cluster-api-provider-azure/cloud" +) + +// Spec contains properties to create a agent pool. +type Spec struct { + Name string + ResourceGroup string + Cluster string + SKU string + Replicas int32 + OSDiskSizeGB int32 +} + +// Get fetches a agent pool from Azure. +func (s *Service) Get(ctx context.Context, spec interface{}) (interface{}, error) { + agentPoolSpec, ok := spec.(*Spec) + if !ok { + return containerservice.AgentPool{}, errors.New("expected agent pool specification") + } + return s.Client.Get(ctx, agentPoolSpec.ResourceGroup, agentPoolSpec.Cluster, agentPoolSpec.Name) +} + +// Reconcile idempotently creates or updates a agent pool, if possible. +func (s *Service) Reconcile(ctx context.Context, spec interface{}) error { + agentPoolSpec, ok := spec.(*Spec) + if !ok { + return errors.New("expected agent pool specification") + } + + profile := containerservice.AgentPool{ + ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{ + VMSize: containerservice.VMSizeTypes(agentPoolSpec.SKU), + OsDiskSizeGB: &agentPoolSpec.OSDiskSizeGB, + Count: &agentPoolSpec.Replicas, + Type: containerservice.VirtualMachineScaleSets, + }, + } + + existingSpec, err := s.Get(ctx, spec) + existingPool, ok := existingSpec.(containerservice.AgentPool) + if !ok { + return errors.New("expected agent pool specification") + } + + if err == nil { + existingProfile := containerservice.AgentPool{ + ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{ + VMSize: existingPool.ManagedClusterAgentPoolProfileProperties.VMSize, + OsDiskSizeGB: existingPool.ManagedClusterAgentPoolProfileProperties.OsDiskSizeGB, + Count: existingPool.ManagedClusterAgentPoolProfileProperties.Count, + Type: containerservice.VirtualMachineScaleSets, + }, + } + + diff := cmp.Diff(profile, existingProfile) + if diff != "" { + fmt.Printf("update required (+new -old):\n%s", diff) + err = s.Client.CreateOrUpdate(ctx, agentPoolSpec.ResourceGroup, agentPoolSpec.Cluster, agentPoolSpec.Name, profile) + if err != nil { + return fmt.Errorf("failed to create or update agent pool, %v", err) + } + } else { + fmt.Println("normalized and desired managed cluster matched, no update needed (go-cmp)") + } + } else if azure.ResourceNotFound(err) { + err = s.Client.CreateOrUpdate(ctx, agentPoolSpec.ResourceGroup, agentPoolSpec.Cluster, agentPoolSpec.Name, profile) + if err != nil { + return fmt.Errorf("failed to create or update agent pool, %v", err) + } + } + + return nil +} + +// Delete deletes the virtual network with the provided name. +func (s *Service) Delete(ctx context.Context, spec interface{}) error { + agentPoolSpec, ok := spec.(*Spec) + if !ok { + return errors.New("expected agent pool specification") + } + + klog.V(2).Infof("deleting agent pool %s ", agentPoolSpec.Name) + err := s.Client.Delete(ctx, agentPoolSpec.ResourceGroup, agentPoolSpec.Cluster, agentPoolSpec.Name) + if err != nil && azure.ResourceNotFound(err) { + // already deleted + return nil + } + if err != nil { + return errors.Wrapf(err, "failed to delete agent pool %s in resource group %s", agentPoolSpec.Name, agentPoolSpec.ResourceGroup) + } + + klog.V(2).Infof("successfully deleted agent pool %s ", agentPoolSpec.Name) + return nil +} diff --git a/cloud/services/agentpools/client.go b/cloud/services/agentpools/client.go new file mode 100644 index 00000000000..b00a5d82a44 --- /dev/null +++ b/cloud/services/agentpools/client.go @@ -0,0 +1,86 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package agentpools + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice" + "github.com/Azure/go-autorest/autorest" + azure "sigs.k8s.io/cluster-api-provider-azure/cloud" +) + +// Client wraps go-sdk +type Client interface { + Get(context.Context, string, string, string) (containerservice.AgentPool, error) + CreateOrUpdate(context.Context, string, string, string, containerservice.AgentPool) error + Delete(context.Context, string, string, string) error +} + +// AzureClient contains the Azure go-sdk Client +type AzureClient struct { + agentpools containerservice.AgentPoolsClient +} + +var _ Client = &AzureClient{} + +// NewClient creates a new agent pools client from subscription ID. +func NewClient(subscriptionID string, authorizer autorest.Authorizer) *AzureClient { + c := newAgentPoolsClient(subscriptionID, authorizer) + return &AzureClient{c} +} + +// newAgentPoolsClient creates a new agent pool client from subscription ID. +func newAgentPoolsClient(subscriptionID string, authorizer autorest.Authorizer) containerservice.AgentPoolsClient { + agentPoolsClient := containerservice.NewAgentPoolsClient(subscriptionID) + agentPoolsClient.Authorizer = authorizer + agentPoolsClient.AddToUserAgent(azure.UserAgent) + return agentPoolsClient +} + +// Get gets a agent pool. +func (ac *AzureClient) Get(ctx context.Context, resourceGroupName, cluster, name string) (containerservice.AgentPool, error) { + return ac.agentpools.Get(ctx, resourceGroupName, cluster, name) +} + +// CreateOrUpdate creates or updates a agent pool. +func (ac *AzureClient) CreateOrUpdate(ctx context.Context, resourceGroupName, cluster, name string, properties containerservice.AgentPool) error { + future, err := ac.agentpools.CreateOrUpdate(ctx, resourceGroupName, cluster, name, properties) + if err != nil { + return err + } + err = future.WaitForCompletionRef(ctx, ac.agentpools.Client) + if err != nil { + return err + } + _, err = future.Result(ac.agentpools) + return err +} + +// Delete deletes a agent pool. +func (ac *AzureClient) Delete(ctx context.Context, resourceGroupName, cluster, name string) error { + future, err := ac.agentpools.Delete(ctx, resourceGroupName, cluster, name) + if err != nil { + return err + } + err = future.WaitForCompletionRef(ctx, ac.agentpools.Client) + if err != nil { + return err + } + _, err = future.Result(ac.agentpools) + return err +} diff --git a/cloud/services/agentpools/service.go b/cloud/services/agentpools/service.go new file mode 100644 index 00000000000..496b3eb1ca1 --- /dev/null +++ b/cloud/services/agentpools/service.go @@ -0,0 +1,33 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package agentpools + +import ( + "github.com/Azure/go-autorest/autorest" +) + +// Service provides operations on azure resources +type Service struct { + Client +} + +// NewService creates a new service. +func NewService(authorizer autorest.Authorizer, subscriptionID string) *Service { + return &Service{ + Client: NewClient(subscriptionID, authorizer), + } +} diff --git a/cloud/services/managedclusters/client.go b/cloud/services/managedclusters/client.go new file mode 100644 index 00000000000..809c528882e --- /dev/null +++ b/cloud/services/managedclusters/client.go @@ -0,0 +1,102 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package managedclusters + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice" + "github.com/Azure/go-autorest/autorest" + "github.com/pkg/errors" + azure "sigs.k8s.io/cluster-api-provider-azure/cloud" +) + +// Client wraps go-sdk +type Client interface { + Get(context.Context, string, string) (containerservice.ManagedCluster, error) + GetCredentials(context.Context, string, string) ([]byte, error) + CreateOrUpdate(context.Context, string, string, containerservice.ManagedCluster) error + Delete(context.Context, string, string) error +} + +// AzureClient contains the Azure go-sdk Client +type AzureClient struct { + managedclusters containerservice.ManagedClustersClient +} + +var _ Client = &AzureClient{} + +// NewClient creates a new VM client from subscription ID. +func NewClient(subscriptionID string, authorizer autorest.Authorizer) *AzureClient { + c := newManagedClustersClient(subscriptionID, authorizer) + return &AzureClient{c} +} + +// newManagedClustersClient creates a new managed clusters client from subscription ID. +func newManagedClustersClient(subscriptionID string, authorizer autorest.Authorizer) containerservice.ManagedClustersClient { + managedClustersClient := containerservice.NewManagedClustersClient(subscriptionID) + managedClustersClient.Authorizer = authorizer + managedClustersClient.AddToUserAgent(azure.UserAgent) + return managedClustersClient +} + +// Get gets a managed cluster. +func (ac *AzureClient) Get(ctx context.Context, resourceGroupName, name string) (containerservice.ManagedCluster, error) { + return ac.managedclusters.Get(ctx, resourceGroupName, name) +} + +// GetCredentials fetches the admin kubeconfig for a managed cluster. +func (ac *AzureClient) GetCredentials(ctx context.Context, resourceGroupName, name string) ([]byte, error) { + credentialList, err := ac.managedclusters.ListClusterAdminCredentials(ctx, resourceGroupName, name) + if err != nil { + return nil, err + } + + if credentialList.Kubeconfigs == nil || len(*credentialList.Kubeconfigs) < 1 { + return nil, errors.New("no kubeconfigs available for the managed cluster cluster") + } + + return *(*credentialList.Kubeconfigs)[0].Value, nil +} + +// CreateOrUpdate creates or updates a managed cluster. +func (ac *AzureClient) CreateOrUpdate(ctx context.Context, resourceGroupName, name string, cluster containerservice.ManagedCluster) error { + future, err := ac.managedclusters.CreateOrUpdate(ctx, resourceGroupName, name, cluster) + if err != nil { + return err + } + err = future.WaitForCompletionRef(ctx, ac.managedclusters.Client) + if err != nil { + return err + } + _, err = future.Result(ac.managedclusters) + return err +} + +// Delete deletes a managed cluster. +func (ac *AzureClient) Delete(ctx context.Context, resourceGroupName, name string) error { + future, err := ac.managedclusters.Delete(ctx, resourceGroupName, name) + if err != nil { + return err + } + err = future.WaitForCompletionRef(ctx, ac.managedclusters.Client) + if err != nil { + return err + } + _, err = future.Result(ac.managedclusters) + return err +} diff --git a/cloud/services/managedclusters/managedclusters.go b/cloud/services/managedclusters/managedclusters.go new file mode 100644 index 00000000000..7574e97e53a --- /dev/null +++ b/cloud/services/managedclusters/managedclusters.go @@ -0,0 +1,184 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package managedclusters + +import ( + "context" + "fmt" + + "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice" + "github.com/Azure/go-autorest/autorest/azure/auth" + "github.com/pkg/errors" + "k8s.io/klog" + azure "sigs.k8s.io/cluster-api-provider-azure/cloud" +) + +var defaultUser string = "azureuser" + +// Spec contains properties to create a managed cluster. +type Spec struct { + // Name is the name of this AKS Cluster. + Name string + + // ResourceGroup is the name of the Azure resource group for this AKS Cluster. + ResourceGroup string + + // Location is a string matching one of the canonical Azure region names. Examples: "westus2", "eastus". + Location string + + // Tags is a set of tags to add to this cluster. + // +optional + Tags map[string]string + + // Version defines the desired Kubernetes version. + Version string + + // LoadBalancerSKU for the managed cluster. Possible values include: 'Standard', 'Basic'. Defaults to standard. + LoadBalancerSKU *string + + // NetworkPlugin used for building Kubernetes network. Possible values include: 'Azure', 'Kubenet'. Defaults to Azure. + NetworkPlugin *string + + // NetworkPolicy used for building Kubernetes network. Possible values include: 'NetworkPolicyCalico', 'NetworkPolicyAzure'. Defaults to Azure. + NetworkPolicy *string + + // SSHPublicKey is a string literal containing an ssh public key. Will autogenerate and discard if not provided. + SSHPublicKey string + + // AgentPools is the list of agent pool specifications in this cluster. + AgentPools []PoolSpec +} + +type PoolSpec struct { + Name string + SKU string + Replicas int32 + OSDiskSizeGB int32 +} + +// Get fetches a managed cluster from Azure. +func (s *Service) Get(ctx context.Context, spec interface{}) (interface{}, error) { + managedClusterSpec, ok := spec.(*Spec) + if !ok { + return containerservice.ManagedCluster{}, errors.New("expected managed cluster specification") + } + return s.Client.Get(ctx, managedClusterSpec.ResourceGroup, managedClusterSpec.Name) +} + +// Get fetches a managed cluster kubeconfig from Azure. +func (s *Service) GetCredentials(ctx context.Context, spec interface{}) ([]byte, error) { + managedClusterSpec, ok := spec.(*Spec) + if !ok { + return nil, errors.New("expected managed cluster specification") + } + return s.Client.GetCredentials(ctx, managedClusterSpec.ResourceGroup, managedClusterSpec.Name) +} + +// Reconcile idempotently creates or updates a managed cluster, if possible. +func (s *Service) Reconcile(ctx context.Context, spec interface{}) error { + managedClusterSpec, ok := spec.(*Spec) + if !ok { + return errors.New("expected managed cluster specification") + } + + // ****WARNING**** + // ****WARNING**** + // TODO(ace): probably don't do this. + settings, err := auth.GetSettingsFromEnvironment() + if err != nil { + return fmt.Errorf("failed to parse azure environment settings: %v", err) + } + aadClientID, aadClientSecret := settings.Values[auth.ClientID], settings.Values[auth.ClientSecret] + + properties := containerservice.ManagedCluster{ + Location: &managedClusterSpec.Location, + ManagedClusterProperties: &containerservice.ManagedClusterProperties{ + DNSPrefix: &managedClusterSpec.Name, + KubernetesVersion: &managedClusterSpec.Version, + LinuxProfile: &containerservice.LinuxProfile{ + AdminUsername: &defaultUser, + SSH: &containerservice.SSHConfiguration{ + PublicKeys: &[]containerservice.SSHPublicKey{ + { + KeyData: &managedClusterSpec.SSHPublicKey, + }, + }, + }, + }, + ServicePrincipalProfile: &containerservice.ManagedClusterServicePrincipalProfile{ + ClientID: &aadClientID, + Secret: &aadClientSecret, + }, + AgentPoolProfiles: &[]containerservice.ManagedClusterAgentPoolProfile{}, + NetworkProfile: &containerservice.NetworkProfileType{ + NetworkPlugin: containerservice.Azure, + LoadBalancerSku: containerservice.Standard, + }, + }, + } + + if managedClusterSpec.NetworkPlugin != nil { + properties.NetworkProfile.NetworkPlugin = containerservice.NetworkPlugin(*managedClusterSpec.NetworkPlugin) + } + + if managedClusterSpec.NetworkPolicy != nil { + properties.NetworkProfile.NetworkPolicy = containerservice.NetworkPolicy(*managedClusterSpec.NetworkPolicy) + } + + if managedClusterSpec.LoadBalancerSKU != nil { + properties.NetworkProfile.LoadBalancerSku = containerservice.LoadBalancerSku(*managedClusterSpec.LoadBalancerSKU) + } + + for _, pool := range managedClusterSpec.AgentPools { + profile := containerservice.ManagedClusterAgentPoolProfile{ + Name: &pool.Name, + VMSize: containerservice.VMSizeTypes(pool.SKU), + OsDiskSizeGB: &pool.OSDiskSizeGB, + Count: &pool.Replicas, + Type: containerservice.VirtualMachineScaleSets, + } + *properties.AgentPoolProfiles = append(*properties.AgentPoolProfiles, profile) + } + + err = s.Client.CreateOrUpdate(ctx, managedClusterSpec.ResourceGroup, managedClusterSpec.Name, properties) + if err != nil { + return fmt.Errorf("failed to create or update managed cluster, %v", err) + } + + return nil +} + +// Delete deletes the virtual network with the provided name. +func (s *Service) Delete(ctx context.Context, spec interface{}) error { + managedClusterSpec, ok := spec.(*Spec) + if !ok { + return errors.New("expected managed cluster specification") + } + + klog.V(2).Infof("deleting managed cluster %s ", managedClusterSpec.Name) + err := s.Client.Delete(ctx, managedClusterSpec.ResourceGroup, managedClusterSpec.Name) + if err != nil && azure.ResourceNotFound(err) { + // already deleted + return nil + } + if err != nil { + return errors.Wrapf(err, "failed to delete managed cluster %s in resource group %s", managedClusterSpec.Name, managedClusterSpec.ResourceGroup) + } + + klog.V(2).Infof("successfully deleted managed cluster %s ", managedClusterSpec.Name) + return nil +} diff --git a/cloud/services/managedclusters/service.go b/cloud/services/managedclusters/service.go new file mode 100644 index 00000000000..a188fab24b4 --- /dev/null +++ b/cloud/services/managedclusters/service.go @@ -0,0 +1,33 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package managedclusters + +import ( + "github.com/Azure/go-autorest/autorest" +) + +// Service provides operations on azure resources +type Service struct { + Client +} + +// NewService creates a new service. +func NewService(authorizer autorest.Authorizer, subscriptionID string) *Service { + return &Service{ + Client: NewClient(subscriptionID, authorizer), + } +} diff --git a/cloud/services/scalesets/client.go b/cloud/services/scalesets/client.go new file mode 100644 index 00000000000..aa82dfe73ec --- /dev/null +++ b/cloud/services/scalesets/client.go @@ -0,0 +1,65 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scalesets + +import ( + "context" + "fmt" + + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute" + "github.com/Azure/go-autorest/autorest" + azure "sigs.k8s.io/cluster-api-provider-azure/cloud" +) + +// Client wraps go-sdk +type Client interface { + List(context.Context, string) ([]interface{}, error) +} + +// AzureClient contains the Azure go-sdk Client +type AzureClient struct { + scalesets compute.VirtualMachineScaleSetsClient +} + +var _ Client = &AzureClient{} + +// NewClient creates a new VMSS client from subscription ID. +func NewClient(subscriptionID string, authorizer autorest.Authorizer) *AzureClient { + c := newVirtualMachineScaleSetsClient(subscriptionID, authorizer) + return &AzureClient{c} +} + +// newVirtualMachineScaleSetsClient creates a new vmss client from subscription ID. +func newVirtualMachineScaleSetsClient(subscriptionID string, authorizer autorest.Authorizer) compute.VirtualMachineScaleSetsClient { + scaleSetsClient := compute.NewVirtualMachineScaleSetsClient(subscriptionID) + scaleSetsClient.Authorizer = authorizer + scaleSetsClient.AddToUserAgent(azure.UserAgent) + return scaleSetsClient +} + +// Lists all scale sets in a resource group. +func (ac *AzureClient) List(ctx context.Context, resourceGroupName string) ([]interface{}, error) { + itr, err := ac.scalesets.ListComplete(ctx, resourceGroupName) + var instances []interface{} + for ; itr.NotDone(); err = itr.NextWithContext(ctx) { + if err != nil { + return nil, fmt.Errorf("failed to iterate vm scale sets [%w]", err) + } + instances = append(instances, itr.Value()) + } + return instances, nil +} diff --git a/cloud/services/scalesets/service.go b/cloud/services/scalesets/service.go new file mode 100644 index 00000000000..68eaae1500f --- /dev/null +++ b/cloud/services/scalesets/service.go @@ -0,0 +1,31 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scalesets + +import "github.com/Azure/go-autorest/autorest" + +// Service provides operations on azure resources +type Service struct { + Client +} + +// NewService creates a new service. +func NewService(authorizer autorest.Authorizer, subscriptionID string) *Service { + return &Service{ + Client: NewClient(subscriptionID, authorizer), + } +} diff --git a/cloud/services/scalesets/vmss.go b/cloud/services/scalesets/vmss.go new file mode 100644 index 00000000000..d2f6674bc76 --- /dev/null +++ b/cloud/services/scalesets/vmss.go @@ -0,0 +1,36 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scalesets + +import ( + "context" + "errors" +) + +// Spec contains properties to identify VMSS in a resource group. +type Spec struct { + ResourceGroup string +} + +// Listreturns a list of provider IDs for the given VM scale set. +func (s *Service) List(ctx context.Context, spec interface{}) ([]interface{}, error) { + scaleSetsSpec, ok := spec.(*Spec) + if !ok { + return nil, errors.New("expected scale set specification") + } + return s.Client.List(ctx, scaleSetsSpec.ResourceGroup) +} diff --git a/cloud/services/scalesetvms/client.go b/cloud/services/scalesetvms/client.go new file mode 100644 index 00000000000..f572017a757 --- /dev/null +++ b/cloud/services/scalesetvms/client.go @@ -0,0 +1,66 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scalesetvms + +import ( + "context" + "fmt" + + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute" + "github.com/Azure/go-autorest/autorest" + azure "sigs.k8s.io/cluster-api-provider-azure/cloud" +) + +// Client wraps go-sdk +type Client interface { + ListInstances(context.Context, string, string) ([]string, error) +} + +// AzureClient contains the Azure go-sdk Client +type AzureClient struct { + scalesetvms compute.VirtualMachineScaleSetVMsClient +} + +var _ Client = &AzureClient{} + +// NewClient creates a new VMSS client from subscription ID. +func NewClient(subscriptionID string, authorizer autorest.Authorizer) *AzureClient { + c := newVirtualMachineScaleSetVMsClient(subscriptionID, authorizer) + return &AzureClient{c} +} + +// newVirtualMachineScaleSetVMsClient creates a new vmss client from subscription ID. +func newVirtualMachineScaleSetVMsClient(subscriptionID string, authorizer autorest.Authorizer) compute.VirtualMachineScaleSetVMsClient { + scaleSetVMsClient := compute.NewVirtualMachineScaleSetVMsClient(subscriptionID) + scaleSetVMsClient.Authorizer = authorizer + scaleSetVMsClient.AddToUserAgent(azure.UserAgent) + return scaleSetVMsClient +} + +// Lists all instance IDs in a VM scale set. +func (ac *AzureClient) ListInstances(ctx context.Context, resourceGroupName, name string) ([]string, error) { + itr, err := ac.scalesetvms.ListComplete(ctx, resourceGroupName, name, "", "", "") + var instances []string + for ; itr.NotDone(); err = itr.NextWithContext(ctx) { + if err != nil { + return nil, fmt.Errorf("failed to iterate vm scale sets [%w]", err) + } + vm := itr.Value() + instances = append(instances, fmt.Sprintf("azure://%s", *vm.ID)) + } + return instances, nil +} diff --git a/cloud/services/scalesetvms/service.go b/cloud/services/scalesetvms/service.go new file mode 100644 index 00000000000..8414b5e154c --- /dev/null +++ b/cloud/services/scalesetvms/service.go @@ -0,0 +1,31 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scalesetvms + +import "github.com/Azure/go-autorest/autorest" + +// Service provides operations on azure resources +type Service struct { + Client +} + +// NewService creates a new service. +func NewService(authorizer autorest.Authorizer, subscriptionID string) *Service { + return &Service{ + Client: NewClient(subscriptionID, authorizer), + } +} diff --git a/cloud/services/scalesetvms/vmss.go b/cloud/services/scalesetvms/vmss.go new file mode 100644 index 00000000000..bbd9a180836 --- /dev/null +++ b/cloud/services/scalesetvms/vmss.go @@ -0,0 +1,37 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scalesetvms + +import ( + "context" + "errors" +) + +// Spec contains properties to create a managed cluster. +type Spec struct { + Name string + ResourceGroup string +} + +// ListInstances returns a list of provider IDs for the given VM scale set. +func (s *Service) ListInstances(ctx context.Context, spec interface{}) ([]string, error) { + scaleSetVMsSpec, ok := spec.(*Spec) + if !ok { + return nil, errors.New("expected scale set vms specification") + } + return s.Client.ListInstances(ctx, scaleSetVMsSpec.ResourceGroup, scaleSetVMsSpec.Name) +} diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedclusters.yaml new file mode 100644 index 00000000000..0ed43ac0641 --- /dev/null +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedclusters.yaml @@ -0,0 +1,138 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.6 + creationTimestamp: null + name: azuremanagedclusters.infrastructure.cluster.x-k8s.io +spec: + group: infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: AzureManagedCluster + listKind: AzureManagedClusterList + plural: azuremanagedclusters + shortNames: + - amc + singular: azuremanagedcluster + scope: Namespaced + versions: + - name: v1alpha3 + schema: + openAPIV3Schema: + description: AzureManagedCluster is the Schema for the azuremanagedclusters + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AzureManagedClusterSpec defines the desired state of AzureManagedCluster + properties: + additionalTags: + additionalProperties: + type: string + description: AdditionalTags is an optional set of tags to add to Azure + resources managed by the Azure provider, in addition to the ones + added by default. + type: object + controlPlaneEndpoint: + description: ControlPlaneEndpoint represents the endpoint used to + communicate with the control plane. + properties: + host: + description: The hostname on which the API server is serving. + type: string + port: + description: The port on which the API server is serving. + format: int32 + type: integer + required: + - host + - port + type: object + defaultPoolRef: + description: 'DefaultPoolRef is the specification for the default + pool, without which an AKS cluster cannot be created. TODO(ace): + consider defaulting and making optional pointer?' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + loadBalancerSku: + description: 'LoadBalancerSKU for the managed cluster. Possible values + include: ''Standard'', ''Basic''. Defaults to standard.' + enum: + - Standard + - Basic + type: string + location: + description: 'Location is a string matching one of the canonical Azure + region names. Examples: "westus2", "eastus".' + type: string + networkPlugin: + description: 'NetworkPlugin used for building Kubernetes network. + Possible values include: ''Azure'', ''Kubenet''. Defaults to Azure.' + enum: + - Azure + - Kubenet + type: string + networkPolicy: + description: 'NetworkPolicy used for building Kubernetes network. + Possible values include: ''NetworkPolicyCalico'', ''NetworkPolicyAzure''' + enum: + - NetworkPolicyCalico + - NetworkPolicyAzure + type: string + resourceGroup: + description: ResourceGroup is the name of the Azure resource group + for this AKS Cluster. + type: string + sshPublicKey: + description: SSHPublicKey is a string literal containing an ssh public + key. + type: string + version: + description: Version defines the desired Kubernetes version. + minLength: 2 + pattern: ^(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)([-0-9a-zA-Z_\.+]*)?$ + type: string + required: + - defaultPoolRef + - location + - resourceGroup + - sshPublicKey + - version + type: object + status: + description: AzureManagedClusterStatus defines the observed state of AzureManagedCluster + properties: + ready: + description: Ready is true when the provider resource is ready. + type: boolean + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedmachinepools.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedmachinepools.yaml new file mode 100644 index 00000000000..f987a149698 --- /dev/null +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedmachinepools.yaml @@ -0,0 +1,95 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.6 + creationTimestamp: null + name: azuremanagedmachinepools.infrastructure.cluster.x-k8s.io +spec: + group: infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: AzureManagedMachinePool + listKind: AzureManagedMachinePoolList + plural: azuremanagedmachinepools + shortNames: + - ammp + singular: azuremanagedmachinepool + scope: Namespaced + versions: + - name: v1alpha3 + schema: + openAPIV3Schema: + description: AzureManagedMachinePool is the Schema for the azuremanagedmachinepools + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AzureManagedMachinePoolSpec defines the desired state of + AzureManagedMachinePool + properties: + osDiskSizeGB: + description: OSDiskSizeGB is the disk size for every machine in this + master/agent pool. If you specify 0, it will apply the default osDisk + size according to the vmSize specified. + format: int32 + type: integer + providerIDList: + description: ProviderIDList is the unique identifier as specified + by the cloud provider. + items: + type: string + type: array + sku: + description: SKU is the size of the VMs in the node pool. + type: string + required: + - sku + type: object + status: + description: AzureManagedMachinePoolStatus defines the observed state + of AzureManagedMachinePool + properties: + errorMessage: + description: Any transient errors that occur during the reconciliation + of Machines can be added as events to the Machine object and/or + logged in the controller's output. + type: string + errorReason: + description: Any transient errors that occur during the reconciliation + of Machines can be added as events to the Machine object and/or + logged in the controller's output. + type: string + ready: + description: Ready is true when the provider resource is ready. + type: boolean + replicas: + description: Replicas is the most recently observed number of replicas. + format: int32 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 214dd10da50..fb62ec0effe 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -8,14 +8,19 @@ resources: - bases/infrastructure.cluster.x-k8s.io_azuremachines.yaml - bases/infrastructure.cluster.x-k8s.io_azureclusters.yaml - bases/infrastructure.cluster.x-k8s.io_azuremachinetemplates.yaml + - bases/infrastructure.cluster.x-k8s.io_azuremanagedmachinepools.yaml + - bases/infrastructure.cluster.x-k8s.io_azuremanagedclusters.yaml # +kubebuilder:scaffold:crdkustomizeresource + patchesStrategicMerge: # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. # patches here are for enabling the conversion webhook for each CRD - patches/webhook_in_azuremachines.yaml - patches/webhook_in_azureclusters.yaml - patches/webhook_in_azuremachinetemplates.yaml + # - patches/webhook_in_azuremanagedmachinepools.yaml + # - patches/webhook_in_azuremanagedclusters.yaml # +kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. @@ -23,6 +28,8 @@ patchesStrategicMerge: - patches/cainjection_in_azuremachines.yaml - patches/cainjection_in_azureclusters.yaml - patches/cainjection_in_azuremachinetemplates.yaml + # - patches/cainjection_in_azuremanagedmachinepools.yaml + # - patches/cainjection_in_azuremanagedclusters.yaml # +kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/config/crd/patches/cainjection_in_azuremanagedclusters.yaml b/config/crd/patches/cainjection_in_azuremanagedclusters.yaml new file mode 100644 index 00000000000..8a7eb576593 --- /dev/null +++ b/config/crd/patches/cainjection_in_azuremanagedclusters.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: azuremanagedclusters.infrastructure.cluster.x-k8s.io diff --git a/config/crd/patches/cainjection_in_azuremanagedmachinepools.yaml b/config/crd/patches/cainjection_in_azuremanagedmachinepools.yaml new file mode 100644 index 00000000000..dc9610ffc73 --- /dev/null +++ b/config/crd/patches/cainjection_in_azuremanagedmachinepools.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: azuremanagedmachinepools.infrastructure.cluster.x-k8s.io diff --git a/config/crd/patches/webhook_in_azuremanagedclusters.yaml b/config/crd/patches/webhook_in_azuremanagedclusters.yaml new file mode 100644 index 00000000000..8bdb4c52ee4 --- /dev/null +++ b/config/crd/patches/webhook_in_azuremanagedclusters.yaml @@ -0,0 +1,19 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: azuremanagedclusters.infrastructure.cluster.x-k8s.io +spec: + conversion: + strategy: Webhook + webhook: + conversionReviewVersions: ["v1", "v1beta1"] + clientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert \ No newline at end of file diff --git a/config/crd/patches/webhook_in_azuremanagedmachinepools.yaml b/config/crd/patches/webhook_in_azuremanagedmachinepools.yaml new file mode 100644 index 00000000000..ee0dca7d9e5 --- /dev/null +++ b/config/crd/patches/webhook_in_azuremanagedmachinepools.yaml @@ -0,0 +1,19 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: azuremanagedmachinepools.infrastructure.cluster.x-k8s.io +spec: + conversion: + strategy: Webhook + webhook: + conversionReviewVersions: ["v1", "v1beta1"] + clientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert \ No newline at end of file diff --git a/config/manager/manager_image_patch.yaml b/config/manager/manager_image_patch.yaml index 0876a1db40d..97b19f11c52 100644 --- a/config/manager/manager_image_patch.yaml +++ b/config/manager/manager_image_patch.yaml @@ -8,5 +8,5 @@ spec: spec: containers: # Change the value of image field below to your controller image URL - - image: gcr.io/k8s-staging-cluster-api-azure/cluster-api-azure-controller:latest + - image: docker.io/alexeldeib/cluster-api-azure-controller:latest name: manager diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 97c8d491a93..0a3db520da8 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -22,8 +22,10 @@ rules: resources: - secrets verbs: + - create - get - list + - patch - watch - apiGroups: - cluster.x-k8s.io @@ -33,6 +35,7 @@ rules: verbs: - get - list + - patch - watch - apiGroups: - cluster.x-k8s.io @@ -43,6 +46,15 @@ rules: - get - list - watch +- apiGroups: + - exp.cluster.x-k8s.io + resources: + - machinepools + - machinepools/status + verbs: + - get + - list + - watch - apiGroups: - infrastructure.cluster.x-k8s.io resources: @@ -83,3 +95,43 @@ rules: - get - patch - update +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - azuremanagedclusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - azuremanagedclusters/status + verbs: + - get + - patch + - update +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - azuremanagedmachinepools + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - azuremanagedmachinepools/status + verbs: + - get + - patch + - update diff --git a/controllers/azurecluster_controller.go b/controllers/azurecluster_controller.go index d14ce28ae20..fe4f4395a12 100644 --- a/controllers/azurecluster_controller.go +++ b/controllers/azurecluster_controller.go @@ -52,6 +52,7 @@ func (r *AzureClusterReconciler) SetupWithManager(mgr ctrl.Manager, options cont // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azureclusters,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azureclusters/status,verbs=get;update;patch // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;patch func (r *AzureClusterReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reterr error) { ctx := context.TODO() diff --git a/controllers/azuremanagedcluster_controller.go b/controllers/azuremanagedcluster_controller.go new file mode 100644 index 00000000000..f828d36034d --- /dev/null +++ b/controllers/azuremanagedcluster_controller.go @@ -0,0 +1,207 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "os" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure/auth" + "github.com/go-logr/logr" + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/client-go/tools/record" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha3" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha3" + + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/patch" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// TODO(ace): put this back in the scope package +type ManagedClusterContext struct { + log logr.Logger + patchhelper *patch.Helper + aksCluster *infrav1.AzureManagedCluster + ownerCluster *clusterv1.Cluster + infraPool *infrav1.AzureManagedMachinePool + ownerPool *expv1.MachinePool +} + +// AzureManagedClusterReconciler reconciles a AzureManagedCluster object +type AzureManagedClusterReconciler struct { + client.Client + Log logr.Logger + Recorder record.EventRecorder +} + +func (r *AzureManagedClusterReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error { + return ctrl.NewControllerManagedBy(mgr). + WithOptions(options). + For(&infrav1.AzureManagedCluster{}). + Complete(r) +} + +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azuremanagedclusters,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azuremanagedclusters/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch + +func (r *AzureManagedClusterReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reterr error) { + ctx := context.TODO() + log := r.Log.WithValues("namespace", req.Namespace, "aksCluster", req.Name) + + // Fetch the AzureManagedCluster instance + aksCluster := &infrav1.AzureManagedCluster{} + err := r.Get(ctx, req.NamespacedName, aksCluster) + if err != nil { + if apierrors.IsNotFound(err) { + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + + // Fetch the Cluster. + cluster, err := util.GetOwnerCluster(ctx, r.Client, aksCluster.ObjectMeta) + if err != nil { + return reconcile.Result{}, err + } + if cluster == nil { + log.Info("Cluster Controller has not yet set OwnerRef") + return reconcile.Result{}, nil + } + + log = log.WithValues("cluster", cluster.Name) + + // initialize patch helper + patchhelper, err := patch.NewHelper(aksCluster, r.Client) + if err != nil { + return reconcile.Result{}, errors.Wrap(err, "failed to init patchhelper") + } + + // Always close the scope when exiting this function so we can persist any changes. + defer func() { + if err := patchhelper.Patch(ctx, aksCluster); err != nil && reterr == nil { + reterr = err + } + }() + + // extract subscription ID from environment + // TODO(ace): don't do this here + subscriptionID := os.Getenv("AZURE_SUBSCRIPTION_ID") + if subscriptionID == "" { + return reconcile.Result{}, errors.New("error creating azure services. Environment variable AZURE_SUBSCRIPTION_ID is not set") + } + + // fetch azure authorizer + // TODO(ace): probably use a secret ref/object ref instead? + authorizer, err := auth.NewAuthorizerFromEnvironment() + if err != nil { + return reconcile.Result{}, errors.Wrap(err, "failed to get authorizer from environment") + } + + // fetch default pool + defaultPoolKey := client.ObjectKey{ + Name: aksCluster.Spec.DefaultPoolRef.Name, + Namespace: aksCluster.ObjectMeta.Namespace, + } + defaultPool := &infrav1.AzureManagedMachinePool{} + if err := r.Client.Get(ctx, defaultPoolKey, defaultPool); err != nil { + return reconcile.Result{}, errors.Wrapf(err, "failed to fetch default pool reference") + } + + // fetch owner of default pool + // TODO(ace): create a helper in util for this + // Fetch the owning MachinePool. + ownerPool, err := getOwnerMachinePool(ctx, r.Client, defaultPool.ObjectMeta) + if err != nil { + return reconcile.Result{}, err + } + if ownerPool == nil { + log.Info("failed to fetch owner ref for default pool") + return reconcile.Result{}, nil + } + + // TODO(ace): move to scope + scope := &ManagedClusterContext{ + log: log, + patchhelper: patchhelper, + aksCluster: aksCluster, + ownerCluster: cluster, + infraPool: defaultPool, + ownerPool: ownerPool, + } + + // Handle deleted clusters + if !aksCluster.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, scope, subscriptionID, authorizer) + } + + // Handle non-deleted clusters + return r.reconcileNormal(ctx, scope, subscriptionID, authorizer) +} + +func (r *AzureManagedClusterReconciler) reconcileNormal(ctx context.Context, scope *ManagedClusterContext, subscriptionID string, authorizer autorest.Authorizer) (reconcile.Result, error) { + scope.log.Info("Reconciling AzureManagedCluster") + + // If the AzureManagedCluster doesn't have our finalizer, add it. + controllerutil.AddFinalizer(scope.aksCluster, infrav1.ClusterFinalizer) + // Register the finalizer immediately to avoid orphaning Azure resources on delete + if err := scope.patchhelper.Patch(ctx, scope.aksCluster); err != nil { + return reconcile.Result{}, err + } + + if err := newAzureManagedClusterReconciler(subscriptionID, authorizer, r.Client).Reconcile(ctx, scope); err != nil { + return reconcile.Result{}, errors.Wrapf(err, "error creating AzureManagedCluster %s/%s", scope.aksCluster.Namespace, scope.aksCluster.Name) + } + + // No errors, so mark us ready so the Cluster API Cluster Controller can pull it + scope.aksCluster.Status.Ready = true + scope.ownerCluster.Status.ControlPlaneInitialized = true + scope.ownerCluster.Status.ControlPlaneReady = true + + if err := scope.patchhelper.Patch(ctx, scope.aksCluster); err != nil { + return reconcile.Result{}, err + } + + if err := scope.patchhelper.Patch(ctx, scope.ownerCluster); err != nil { + return reconcile.Result{}, err + } + + scope.log.Info("Successfully reconciled") + + return reconcile.Result{}, nil +} + +func (r *AzureManagedClusterReconciler) reconcileDelete(ctx context.Context, scope *ManagedClusterContext, subscriptionID string, authorizer autorest.Authorizer) (reconcile.Result, error) { + scope.log.Info("Reconciling AzureManagedCluster delete") + + if err := newAzureManagedClusterReconciler(subscriptionID, authorizer, r.Client).Delete(ctx, scope); err != nil { + return reconcile.Result{}, errors.Wrapf(err, "error deleting AzureManagedCluster %s/%s", scope.aksCluster.Namespace, scope.aksCluster.Name) + } + + // Cluster is deleted so remove the finalizer. + controllerutil.RemoveFinalizer(scope.aksCluster, infrav1.ClusterFinalizer) + + return reconcile.Result{}, nil +} diff --git a/controllers/azuremanagedcluster_reconciler.go b/controllers/azuremanagedcluster_reconciler.go new file mode 100644 index 00000000000..919788975ca --- /dev/null +++ b/controllers/azuremanagedcluster_reconciler.go @@ -0,0 +1,158 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + + "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice" + "github.com/Azure/go-autorest/autorest" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + azure "sigs.k8s.io/cluster-api-provider-azure/cloud" + "sigs.k8s.io/cluster-api-provider-azure/cloud/services/managedclusters" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + "sigs.k8s.io/cluster-api/util/secret" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +// azureManagedClusterReconciler are list of services required by cluster controller +type azureManagedClusterReconciler struct { + kubeclient client.Client + managedClustersSvc azure.CredentialGetter +} + +// newAzureManagedClusterReconciler populates all the services based on input scope +func newAzureManagedClusterReconciler(subscriptionID string, authorizer autorest.Authorizer, kubeclient client.Client) *azureManagedClusterReconciler { + return &azureManagedClusterReconciler{ + kubeclient: kubeclient, + managedClustersSvc: managedclusters.NewService(authorizer, subscriptionID), + } +} + +// Reconcile reconciles all the services in pre determined order +func (r *azureManagedClusterReconciler) Reconcile(ctx context.Context, scope *ManagedClusterContext) error { + scope.log.Info("reconciling cluster") + managedClusterSpec := &managedclusters.Spec{ + Name: scope.aksCluster.Name, + ResourceGroup: scope.aksCluster.Spec.ResourceGroup, + Location: scope.aksCluster.Spec.Location, + Tags: scope.aksCluster.Spec.AdditionalTags, + Version: scope.aksCluster.Spec.Version, + LoadBalancerSKU: scope.aksCluster.Spec.LoadBalancerSKU, + NetworkPlugin: scope.aksCluster.Spec.NetworkPlugin, + NetworkPolicy: scope.aksCluster.Spec.NetworkPolicy, + SSHPublicKey: scope.aksCluster.Spec.SSHPublicKey, + } + + _, err := r.managedClustersSvc.Get(ctx, managedClusterSpec) + if err != nil && !azure.ResourceNotFound(err) { + return errors.Wrapf(err, "failed to reconcile managed cluster %s", scope.aksCluster.Name) + } + + // Configure the default pool, rest will be handled by machinepool controller + defaultPoolSpec := managedclusters.PoolSpec{ + Name: scope.infraPool.Name, + SKU: scope.infraPool.Spec.SKU, + Replicas: 1, + OSDiskSizeGB: 0, + } + + if scope.infraPool.Spec.OSDiskSizeGB != nil { + defaultPoolSpec.OSDiskSizeGB = *scope.infraPool.Spec.OSDiskSizeGB + } + if scope.ownerPool.Spec.Replicas != nil { + defaultPoolSpec.Replicas = *scope.ownerPool.Spec.Replicas + } + + managedClusterSpec.AgentPools = []managedclusters.PoolSpec{defaultPoolSpec} + + if azure.ResourceNotFound(err) { + // We are creating this cluster for the first time. + if err := r.managedClustersSvc.Reconcile(ctx, managedClusterSpec); err != nil { + return errors.Wrapf(err, "failed to reconcile managed cluster %s", scope.aksCluster.Name) + } + scope.log.Info("reconciled managed cluster successfully") + return nil + } + + // Fetched newly created credentials + managedClusterResult, err := r.managedClustersSvc.Get(ctx, managedClusterSpec) + if err != nil { + return err + } + + managedCluster, ok := managedClusterResult.(containerservice.ManagedCluster) + if !ok { + return fmt.Errorf("expected containerservice ManagedCluster object") + } + + scope.aksCluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ + Host: *managedCluster.ManagedClusterProperties.Fqdn, + Port: 443, + } + + // Fetched newly created credentials + data, err := r.managedClustersSvc.GetCredentials(ctx, managedClusterSpec) + if err != nil { + return err + } + + // Construct and store secret + kubeconfig := makeKubeconfig(scope.ownerCluster) + _, err = controllerutil.CreateOrUpdate(ctx, r.kubeclient, kubeconfig, func() error { + kubeconfig.Data = map[string][]byte{ + secret.KubeconfigDataName: data, + } + return nil + }) + + // if err != nil { + // scope.log.Error(err) + // } + + return err +} + +// Delete reconciles all the services in pre determined order +func (r *azureManagedClusterReconciler) Delete(ctx context.Context, scope *ManagedClusterContext) error { + if err := r.managedClustersSvc.Delete(ctx, nil); err != nil { + return errors.Wrapf(err, "failed to delete managed cluster %s", scope.aksCluster.Name) + } + + return nil +} + +func makeKubeconfig(cluster *clusterv1.Cluster) *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secret.Name(cluster.Name, secret.Kubeconfig), + Namespace: cluster.Namespace, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Cluster", + Name: cluster.Name, + UID: cluster.UID, + }, + }, + }, + } +} diff --git a/controllers/azuremanagedmachinepool_controller.go b/controllers/azuremanagedmachinepool_controller.go new file mode 100644 index 00000000000..0b32619a590 --- /dev/null +++ b/controllers/azuremanagedmachinepool_controller.go @@ -0,0 +1,208 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "os" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure/auth" + "github.com/go-logr/logr" + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha3" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha3" + + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/patch" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// AzureManagedMachinePoolReconciler reconciles a AzureManagedMachinePool object +type AzureManagedMachinePoolReconciler struct { + client.Client + Log logr.Logger + Recorder record.EventRecorder +} + +func (r *AzureManagedMachinePoolReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error { + return ctrl.NewControllerManagedBy(mgr). + WithOptions(options). + For(&infrav1.AzureManagedMachinePool{}). + Complete(r) +} + +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azuremanagedmachinepools,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azuremanagedmachinepools/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch;patch +// +kubebuilder:rbac:groups=exp.cluster.x-k8s.io,resources=machinepools;machinepools/status,verbs=get;list;watch + +func (r *AzureManagedMachinePoolReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reterr error) { + ctx := context.TODO() + log := r.Log.WithValues("namespace", req.Namespace, "infraPool", req.Name) + + // Fetch the AzureManagedMachinePool instance + infraPool := &infrav1.AzureManagedMachinePool{} + err := r.Get(ctx, req.NamespacedName, infraPool) + if err != nil { + if apierrors.IsNotFound(err) { + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + + // Fetch the owning MachinePool. + ownerPool, err := getOwnerMachinePool(ctx, r.Client, infraPool.ObjectMeta) + if err != nil { + return reconcile.Result{}, err + } + if ownerPool == nil { + log.Info("MachinePool Controller has not yet set OwnerRef") + return reconcile.Result{}, nil + } + + // Fetch the Cluster. + ownerCluster, err := util.GetOwnerCluster(ctx, r.Client, ownerPool.ObjectMeta) + if err != nil { + return reconcile.Result{}, err + } + if ownerCluster == nil { + log.Info("Cluster Controller has not yet set OwnerRef") + return reconcile.Result{}, nil + } + + log = log.WithValues("ownerCluster", ownerCluster.Name) + + // Fetch the managed cluster + infraCluster := &infrav1.AzureManagedCluster{} + infraClusterName := client.ObjectKey{ + Namespace: infraPool.Namespace, + Name: ownerCluster.Spec.InfrastructureRef.Name, + } + if err := r.Client.Get(ctx, infraClusterName, infraCluster); err != nil { + return reconcile.Result{}, err + } + + // initialize patch helper + patchhelper, err := patch.NewHelper(infraPool, r.Client) + if err != nil { + return reconcile.Result{}, errors.Wrap(err, "failed to init patchhelper") + } + + // Always close the scope when exiting this function so we can persist any changes. + defer func() { + if err := patchhelper.Patch(ctx, infraPool); err != nil && reterr == nil { + reterr = err + } + }() + + // extract subscription ID from environment + // TODO(ace): don't do this here, probably put it in the spec + subscriptionID := os.Getenv("AZURE_SUBSCRIPTION_ID") + if subscriptionID == "" { + return reconcile.Result{}, errors.New("error creating azure services. Environment variable AZURE_SUBSCRIPTION_ID is not set") + } + + // fetch azure authorizer + // TODO(ace): probably use a secret ref/object ref instead? + authorizer, err := auth.NewAuthorizerFromEnvironment() + if err != nil { + return reconcile.Result{}, errors.Wrap(err, "failed to get authorizer from environment") + } + + // TODO(ace): put this back in the scope package + scope := &ManagedClusterContext{ + log: log, + patchhelper: patchhelper, + aksCluster: infraCluster, + infraPool: infraPool, + ownerCluster: ownerCluster, + ownerPool: ownerPool, + } + + // Handle deleted clusters + if !infraPool.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, scope, subscriptionID, authorizer) + } + + // Handle non-deleted clusters + return r.reconcileNormal(ctx, scope, subscriptionID, authorizer) +} + +func (r *AzureManagedMachinePoolReconciler) reconcileNormal(ctx context.Context, scope *ManagedClusterContext, subscriptionID string, authorizer autorest.Authorizer) (reconcile.Result, error) { + scope.log.Info("Reconciling AzureManagedMachinePool") + + // If the AzureManagedMachinePool doesn't have our finalizer, add it. + controllerutil.AddFinalizer(scope.infraPool, infrav1.ClusterFinalizer) + // Register the finalizer immediately to avoid orphaning Azure resources on delete + if err := scope.patchhelper.Patch(ctx, scope.infraPool); err != nil { + return reconcile.Result{}, err + } + + if err := newAzureManagedMachinePoolReconciler(subscriptionID, authorizer, r.Client).Reconcile(ctx, scope); err != nil { + return reconcile.Result{}, errors.Wrapf(err, "error creating AzureManagedMachinePool %s/%s", scope.infraPool.Namespace, scope.infraPool.Name) + } + + // No errors, so mark us ready so the Cluster API Cluster Controller can pull it + scope.infraPool.Status.Ready = true + + if err := scope.patchhelper.Patch(ctx, scope.infraPool); err != nil { + return reconcile.Result{}, err + } + + return reconcile.Result{}, nil +} + +func (r *AzureManagedMachinePoolReconciler) reconcileDelete(ctx context.Context, scope *ManagedClusterContext, subscriptionID string, authorizer autorest.Authorizer) (reconcile.Result, error) { + scope.log.Info("Reconciling AzureManagedMachinePool delete") + + if err := newAzureManagedMachinePoolReconciler(subscriptionID, authorizer, r.Client).Delete(ctx, scope); err != nil { + return reconcile.Result{}, errors.Wrapf(err, "error deleting AzureManagedMachinePool %s/%s", scope.infraPool.Namespace, scope.infraPool.Name) + } + + // Cluster is deleted so remove the finalizer. + controllerutil.RemoveFinalizer(scope.infraPool, infrav1.ClusterFinalizer) + + return reconcile.Result{}, nil +} + +// getOwnerMachinePool returns the MachinePool object owning the current resource. +func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*expv1.MachinePool, error) { + for _, ref := range obj.OwnerReferences { + if ref.Kind == "MachinePool" && ref.APIVersion == expv1.GroupVersion.String() { + return getMachinePoolByName(ctx, c, obj.Namespace, ref.Name) + } + } + return nil, nil +} + +// getMachinePoolByName finds and return a MachinePool object using the specified params. +func getMachinePoolByName(ctx context.Context, c client.Client, namespace, name string) (*expv1.MachinePool, error) { + m := &expv1.MachinePool{} + key := client.ObjectKey{Name: name, Namespace: namespace} + if err := c.Get(ctx, key, m); err != nil { + return nil, err + } + return m, nil +} diff --git a/controllers/azuremanagedmachinepool_reconciler.go b/controllers/azuremanagedmachinepool_reconciler.go new file mode 100644 index 00000000000..ca360177328 --- /dev/null +++ b/controllers/azuremanagedmachinepool_reconciler.go @@ -0,0 +1,138 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute" + "github.com/Azure/go-autorest/autorest" + "github.com/pkg/errors" + azure "sigs.k8s.io/cluster-api-provider-azure/cloud" + "sigs.k8s.io/cluster-api-provider-azure/cloud/services/agentpools" + "sigs.k8s.io/cluster-api-provider-azure/cloud/services/scalesets" + "sigs.k8s.io/cluster-api-provider-azure/cloud/services/scalesetvms" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// azureManagedMachinePoolReconciler are list of services required by cluster controller +type azureManagedMachinePoolReconciler struct { + kubeclient client.Client + agentPoolsSvc azure.GetterService + scaleSetVMsSvc NodeLister + scaleSetsSvc Lister +} + +// NodeLister is a service interface exclusively for returning a list of VMSS instance provider IDs. +type NodeLister interface { + ListInstances(context.Context, interface{}) ([]string, error) +} + +// Lister is a service interface for returning generic lists. +type Lister interface { + List(context.Context, interface{}) ([]interface{}, error) +} + +// newAzureManagedMachinePoolReconciler populates all the services based on input scope +func newAzureManagedMachinePoolReconciler(subscriptionID string, authorizer autorest.Authorizer, kubeclient client.Client) *azureManagedMachinePoolReconciler { + return &azureManagedMachinePoolReconciler{ + kubeclient: kubeclient, + agentPoolsSvc: agentpools.NewService(authorizer, subscriptionID), + scaleSetVMsSvc: scalesetvms.NewService(authorizer, subscriptionID), + scaleSetsSvc: scalesets.NewService(authorizer, subscriptionID), + } +} + +// Reconcile reconciles all the services in pre determined order +func (r *azureManagedMachinePoolReconciler) Reconcile(ctx context.Context, scope *ManagedClusterContext) error { + scope.log.Info("reconciling machine pool") + agentPoolSpec := &agentpools.Spec{ + Name: scope.infraPool.Name, + ResourceGroup: scope.aksCluster.Spec.ResourceGroup, + Cluster: scope.aksCluster.Name, + SKU: scope.infraPool.Spec.SKU, + Replicas: 1, + } + + if scope.infraPool.Spec.OSDiskSizeGB != nil { + agentPoolSpec.OSDiskSizeGB = *scope.infraPool.Spec.OSDiskSizeGB + } + + if scope.ownerPool.Spec.Replicas != nil { + agentPoolSpec.Replicas = *scope.ownerPool.Spec.Replicas + } + + if err := r.agentPoolsSvc.Reconcile(ctx, agentPoolSpec); err != nil { + return errors.Wrapf(err, "failed to reconcile machine pool %s", scope.infraPool.Name) + } + + nodeResourceGroup := fmt.Sprintf("MC_%s_%s_%s", scope.aksCluster.Spec.ResourceGroup, scope.aksCluster.Name, scope.aksCluster.Spec.Location) + vmss, err := r.scaleSetsSvc.List(ctx, &scalesets.Spec{ResourceGroup: nodeResourceGroup}) + if err != nil { + return errors.Wrapf(err, "failed to list vmss in resource group %s", nodeResourceGroup) + } + + var match *compute.VirtualMachineScaleSet + for _, ss := range vmss { + ss := ss + switch scaleset := ss.(type) { + case compute.VirtualMachineScaleSet: + if scaleset.Tags["poolName"] != nil && *scaleset.Tags["poolName"] == scope.infraPool.Name { + match = &scaleset + break + } + default: + return errors.New("expected vmss but found wrong interface type") + } + } + + if match == nil { + return errors.New("failed to find vm scale set matchinf pool") + } + + providerIDs, err := r.scaleSetVMsSvc.ListInstances(ctx, &scalesetvms.Spec{ + Name: *match.Name, + ResourceGroup: nodeResourceGroup, + }) + if err != nil { + return errors.Wrapf(err, "failed to reconcile machine pool %s", scope.infraPool.Name) + } + + scope.infraPool.Spec.ProviderIDList = providerIDs + scope.infraPool.Status.Replicas = int32(len(providerIDs)) + scope.infraPool.Status.Ready = true + + scope.log.Info("reconciled machine pool successfully") + return nil +} + +// Delete reconciles all the services in pre determined order +func (r *azureManagedMachinePoolReconciler) Delete(ctx context.Context, scope *ManagedClusterContext) error { + agentPoolSpec := &agentpools.Spec{ + Name: scope.infraPool.Name, + ResourceGroup: scope.aksCluster.Spec.ResourceGroup, + Cluster: scope.aksCluster.Name, + SKU: scope.infraPool.Spec.SKU, + } + + if err := r.agentPoolsSvc.Delete(ctx, agentPoolSpec); err != nil { + return errors.Wrapf(err, "failed to delete machine pool %s", scope.infraPool.Name) + } + + return nil +} diff --git a/go.mod b/go.mod index c06487ade2b..8b350d86b44 100644 --- a/go.mod +++ b/go.mod @@ -11,6 +11,7 @@ require ( github.com/blang/semver v3.5.1+incompatible github.com/go-logr/logr v0.1.0 github.com/golang/mock v1.4.0 + github.com/google/go-cmp v0.4.0 github.com/google/gofuzz v1.1.0 github.com/onsi/ginkgo v1.12.0 github.com/onsi/gomega v1.9.0 diff --git a/main.go b/main.go index e9af9fe8929..182800763d9 100644 --- a/main.go +++ b/main.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2020 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -36,6 +36,7 @@ import ( infrav1alpha3 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha3" "sigs.k8s.io/cluster-api-provider-azure/controllers" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha3" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util/record" ctrl "sigs.k8s.io/controller-runtime" @@ -56,6 +57,8 @@ func init() { _ = infrav1alpha3.AddToScheme(scheme) _ = clusterv1.AddToScheme(scheme) _ = infrastructurev1alpha3.AddToScheme(scheme) + _ = expv1.AddToScheme(scheme) + // +kubebuilder:scaffold:scheme } @@ -193,6 +196,22 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "AzureCluster") os.Exit(1) } + if err = (&controllers.AzureManagedMachinePoolReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("AzureManagedMachinePool"), + Recorder: mgr.GetEventRecorderFor("azuremachine-reconciler"), + }).SetupWithManager(mgr, controller.Options{MaxConcurrentReconciles: azureMachineConcurrency}); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "AzureManagedMachinePool") + os.Exit(1) + } + if err = (&controllers.AzureManagedClusterReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("AzureManagedCluster"), + Recorder: mgr.GetEventRecorderFor("azurecluster-reconciler"), + }).SetupWithManager(mgr, controller.Options{MaxConcurrentReconciles: azureClusterConcurrency}); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "AzureManagedCluster") + os.Exit(1) + } } else { if err = (&infrastructurev1alpha3.AzureCluster{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "AzureCluster")