Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: aks provider #482

Merged
merged 8 commits into from
May 14, 2020
Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .dockerignore
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@
!/cloud/**
!/controllers/**
!/exp/**
!/feature/**
!/pkg/**
!/main.go
!/go.mod
!/go.sum
!/go.sum
21 changes: 18 additions & 3 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ RBAC_ROOT ?= $(MANIFEST_ROOT)/rbac
PULL_POLICY ?= Always

CLUSTER_TEMPLATE ?= cluster-template.yaml
MANAGED_CLUSTER_TEMPLATE ?= cluster-template-aks.yaml

## --------------------------------------
## Help
Expand Down Expand Up @@ -319,7 +320,7 @@ create-management-cluster: $(KUSTOMIZE) $(ENVSUBST)
$(MAKE) kind-create

# Install cert manager and wait for availability
kubectl create -f https://github.com/jetstack/cert-manager/releases/download/v0.11.1/cert-manager.yaml
kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v0.11.1/cert-manager.yaml
kubectl wait --for=condition=Available --timeout=5m apiservice v1beta1.webhook.cert-manager.io

# Deploy CAPI
Expand Down Expand Up @@ -347,16 +348,30 @@ create-workload-cluster: $(ENVSUBST)
$(ENVSUBST) < $(TEMPLATES_DIR)/$(CLUSTER_TEMPLATE) | kubectl apply -f -

# Wait for the kubeconfig to become available.
timeout 300 bash -c "while ! kubectl get secrets | grep $(CLUSTER_NAME)-kubeconfig; do sleep 1; done"
timeout --foreground 300 bash -c "while ! kubectl get secrets | grep $(CLUSTER_NAME)-kubeconfig; do sleep 1; done"
# Get kubeconfig and store it locally.
kubectl get secrets $(CLUSTER_NAME)-kubeconfig -o json | jq -r .data.value | base64 --decode > ./kubeconfig
timeout 600 bash -c "while ! kubectl --kubeconfig=./kubeconfig get nodes | grep master; do sleep 1; done"
timeout --foreground 600 bash -c "while ! kubectl --kubeconfig=./kubeconfig get nodes | grep master; do sleep 1; done"

# Deploy calico
kubectl --kubeconfig=./kubeconfig apply -f templates/addons/calico.yaml

@echo 'run "kubectl --kubeconfig=./kubeconfig ..." to work with the new target cluster'

.PHONY: create-aks-cluster
create-aks-cluster: $(KUSTOMIZE) $(ENVSUBST)
# Create managed Cluster.
$(ENVSUBST) < $(TEMPLATES_DIR)/$(MANAGED_CLUSTER_TEMPLATE) | kubectl apply -f -

# Wait for the kubeconfig to become available.
timeout --foreground 300 bash -c "while ! kubectl get secrets | grep $(CLUSTER_NAME)-kubeconfig; do sleep 1; done"
# Get kubeconfig and store it locally.
kubectl get secrets $(CLUSTER_NAME)-kubeconfig -o json | jq -r .data.value | base64 --decode > ./kubeconfig
timeout --foreground 600 bash -c "while ! kubectl --kubeconfig=./kubeconfig get nodes | grep master; do sleep 1; done"

@echo 'run "kubectl --kubeconfig=./kubeconfig ..." to work with the new target cluster'


.PHONY: create-cluster
create-cluster: create-management-cluster create-workload-cluster ## Create a workload development Kubernetes cluster on Azure in a kind management cluster.

Expand Down
5 changes: 5 additions & 0 deletions cloud/interfaces.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,3 +35,8 @@ type GetterService interface {
Reconcile(ctx context.Context, spec interface{}) error
Delete(ctx context.Context, spec interface{}) error
}

type CredentialGetter interface {
alexeldeib marked this conversation as resolved.
Show resolved Hide resolved
GetterService
GetCredentials(ctx context.Context, spec interface{}) ([]byte, error)
}
2 changes: 2 additions & 0 deletions cloud/scope/machinepool.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ type (
// MachinePoolScope defines a scope defined around a machine pool and its cluster.
MachinePoolScope struct {
logr.Logger
AzureClients
client client.Client
patchHelper *patch.Helper
Cluster *capiv1.Cluster
Expand Down Expand Up @@ -95,6 +96,7 @@ func NewMachinePoolScope(params MachinePoolScopeParams) (*MachinePoolScope, erro
MachinePool: params.MachinePool,
AzureCluster: params.AzureCluster,
AzureMachinePool: params.AzureMachinePool,
AzureClients: params.AzureClients,
Logger: params.Logger,
patchHelper: helper,
}, nil
Expand Down
101 changes: 101 additions & 0 deletions cloud/scope/managedcontrolplane.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
/*
Copyright 2020 The Kubernetes Authors.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package scope

import (
"context"

"github.com/go-logr/logr"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog/klogr"
infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha3"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha3"

"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/controller-runtime/pkg/client"
)

// ManagedControlPlaneScopeParams defines the input parameters used to create a new
type ManagedControlPlaneScopeParams struct {
AzureClients
Client client.Client
Logger logr.Logger
Cluster *clusterv1.Cluster
ControlPlane *infrav1exp.AzureManagedControlPlane
InfraMachinePool *infrav1exp.AzureManagedMachinePool
MachinePool *expv1.MachinePool
PatchTarget runtime.Object
}

// NewManagedControlPlaneScope creates a new Scope from the supplied parameters.
// This is meant to be called for each reconcile iteration.
func NewManagedControlPlaneScope(params ManagedControlPlaneScopeParams) (*ManagedControlPlaneScope, error) {
if params.Cluster == nil {
return nil, errors.New("failed to generate new scope from nil Cluster")
}

if params.ControlPlane == nil {
return nil, errors.New("failed to generate new scope from nil ControlPlane")
}

if params.Logger == nil {
params.Logger = klogr.New()
}

err := params.AzureClients.setCredentials(params.ControlPlane.Spec.SubscriptionID)
if err != nil {
alexeldeib marked this conversation as resolved.
Show resolved Hide resolved
return nil, errors.Wrap(err, "failed to create Azure session")
}

helper, err := patch.NewHelper(params.PatchTarget, params.Client)
if err != nil {
return nil, errors.Wrap(err, "failed to init patch helper")
}

return &ManagedControlPlaneScope{
Logger: params.Logger,
Client: params.Client,
AzureClients: params.AzureClients,
Cluster: params.Cluster,
ControlPlane: params.ControlPlane,
MachinePool: params.MachinePool,
InfraMachinePool: params.InfraMachinePool,
PatchTarget: params.PatchTarget,
patchHelper: helper,
}, nil
}

// ManagedControlPlaneScope defines the basic context for an actuator to operate upon.
type ManagedControlPlaneScope struct {
logr.Logger
Client client.Client
patchHelper *patch.Helper

AzureClients
Cluster *clusterv1.Cluster
MachinePool *expv1.MachinePool
ControlPlane *infrav1exp.AzureManagedControlPlane
InfraMachinePool *infrav1exp.AzureManagedMachinePool
PatchTarget runtime.Object
}

// PatchObject persists the cluster configuration and status.
func (s *ManagedControlPlaneScope) PatchObject(ctx context.Context) error {
return s.patchHelper.Patch(ctx, s.PatchTarget)
}
123 changes: 123 additions & 0 deletions cloud/services/agentpools/agentpools.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,123 @@
/*
Copyright 2020 The Kubernetes Authors.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package agentpools

import (
"context"
"fmt"

"github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice"
"github.com/google/go-cmp/cmp"
"github.com/pkg/errors"
"k8s.io/klog"
azure "sigs.k8s.io/cluster-api-provider-azure/cloud"
)

// Spec contains properties to create a agent pool.
type Spec struct {
Name string
ResourceGroup string
Cluster string
Version *string
SKU string
Replicas int32
OSDiskSizeGB int32
}

// Get fetches a agent pool from Azure.
func (s *Service) Get(ctx context.Context, spec interface{}) (interface{}, error) {
agentPoolSpec, ok := spec.(*Spec)
if !ok {
return containerservice.AgentPool{}, errors.New("expected agent pool specification")
}
return s.Client.Get(ctx, agentPoolSpec.ResourceGroup, agentPoolSpec.Cluster, agentPoolSpec.Name)
}

// Reconcile idempotently creates or updates a agent pool, if possible.
func (s *Service) Reconcile(ctx context.Context, spec interface{}) error {
agentPoolSpec, ok := spec.(*Spec)
if !ok {
return errors.New("expected agent pool specification")
}

profile := containerservice.AgentPool{
ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{
VMSize: containerservice.VMSizeTypes(agentPoolSpec.SKU),
OsDiskSizeGB: &agentPoolSpec.OSDiskSizeGB,
Count: &agentPoolSpec.Replicas,
Type: containerservice.VirtualMachineScaleSets,
OrchestratorVersion: agentPoolSpec.Version,
},
}

existingSpec, err := s.Get(ctx, spec)
existingPool, ok := existingSpec.(containerservice.AgentPool)
if !ok {
return errors.New("expected agent pool specification")
}

if err == nil {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Are we missing the generic err != nil or we don't care about it?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The following paths are a little hard to read, maybe use a case switch with some comments on what the logic is doing?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nice catch, I originally left it off because we wouldn't care about transient errors but I should have added back when I differentiated between Create and Update.

I'll also add a doc comment that the reason for differentiating the paths is that you can't use the ManagedClusters API to create agent pools on update, only at creation time.

existingProfile := containerservice.AgentPool{
ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{
VMSize: existingPool.ManagedClusterAgentPoolProfileProperties.VMSize,
OsDiskSizeGB: existingPool.ManagedClusterAgentPoolProfileProperties.OsDiskSizeGB,
Count: existingPool.ManagedClusterAgentPoolProfileProperties.Count,
Type: containerservice.VirtualMachineScaleSets,
OrchestratorVersion: existingPool.ManagedClusterAgentPoolProfileProperties.OrchestratorVersion,
},
}

diff := cmp.Diff(profile, existingProfile)
if diff != "" {
klog.V(2).Infof("update required (+new -old):\n%s", diff)
err = s.Client.CreateOrUpdate(ctx, agentPoolSpec.ResourceGroup, agentPoolSpec.Cluster, agentPoolSpec.Name, profile)
if err != nil {
return fmt.Errorf("failed to create or update agent pool, %#+v", err)
}
} else {
klog.V(2).Infof("normalized and desired managed cluster matched, no update needed")
}
} else if azure.ResourceNotFound(err) {
err = s.Client.CreateOrUpdate(ctx, agentPoolSpec.ResourceGroup, agentPoolSpec.Cluster, agentPoolSpec.Name, profile)
alexeldeib marked this conversation as resolved.
Show resolved Hide resolved
if err != nil {
return fmt.Errorf("failed to create or update agent pool, %#+v", err)
}
}

return nil
}

// Delete deletes the virtual network with the provided name.
func (s *Service) Delete(ctx context.Context, spec interface{}) error {
agentPoolSpec, ok := spec.(*Spec)
if !ok {
return errors.New("expected agent pool specification")
}

klog.V(2).Infof("deleting agent pool %s ", agentPoolSpec.Name)
alexeldeib marked this conversation as resolved.
Show resolved Hide resolved
err := s.Client.Delete(ctx, agentPoolSpec.ResourceGroup, agentPoolSpec.Cluster, agentPoolSpec.Name)
if err != nil && azure.ResourceNotFound(err) {
// already deleted
return nil
}
if err != nil {
return errors.Wrapf(err, "failed to delete agent pool %s in resource group %s", agentPoolSpec.Name, agentPoolSpec.ResourceGroup)
}
alexeldeib marked this conversation as resolved.
Show resolved Hide resolved

klog.V(2).Infof("successfully deleted agent pool %s ", agentPoolSpec.Name)
alexeldeib marked this conversation as resolved.
Show resolved Hide resolved
return nil
}
86 changes: 86 additions & 0 deletions cloud/services/agentpools/client.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
/*
Copyright 2020 The Kubernetes Authors.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package agentpools

import (
"context"

"github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice"
"github.com/Azure/go-autorest/autorest"
azure "sigs.k8s.io/cluster-api-provider-azure/cloud"
)

// Client wraps go-sdk
type Client interface {
Get(context.Context, string, string, string) (containerservice.AgentPool, error)
CreateOrUpdate(context.Context, string, string, string, containerservice.AgentPool) error
Delete(context.Context, string, string, string) error
}

// AzureClient contains the Azure go-sdk Client
type AzureClient struct {
agentpools containerservice.AgentPoolsClient
}

var _ Client = &AzureClient{}

// NewClient creates a new agent pools client from subscription ID.
func NewClient(subscriptionID string, authorizer autorest.Authorizer) *AzureClient {
c := newAgentPoolsClient(subscriptionID, authorizer)
return &AzureClient{c}
}

// newAgentPoolsClient creates a new agent pool client from subscription ID.
func newAgentPoolsClient(subscriptionID string, authorizer autorest.Authorizer) containerservice.AgentPoolsClient {
agentPoolsClient := containerservice.NewAgentPoolsClient(subscriptionID)
agentPoolsClient.Authorizer = authorizer
agentPoolsClient.AddToUserAgent(azure.UserAgent)
return agentPoolsClient
}

// Get gets an agent pool.
func (ac *AzureClient) Get(ctx context.Context, resourceGroupName, cluster, name string) (containerservice.AgentPool, error) {
return ac.agentpools.Get(ctx, resourceGroupName, cluster, name)
}

// CreateOrUpdate creates or updates an agent pool.
func (ac *AzureClient) CreateOrUpdate(ctx context.Context, resourceGroupName, cluster, name string, properties containerservice.AgentPool) error {
future, err := ac.agentpools.CreateOrUpdate(ctx, resourceGroupName, cluster, name, properties)
if err != nil {
return err
}
err = future.WaitForCompletionRef(ctx, ac.agentpools.Client)
if err != nil {
alexeldeib marked this conversation as resolved.
Show resolved Hide resolved
return err
}
_, err = future.Result(ac.agentpools)
return err
}

// Delete deletes an agent pool.
func (ac *AzureClient) Delete(ctx context.Context, resourceGroupName, cluster, name string) error {
future, err := ac.agentpools.Delete(ctx, resourceGroupName, cluster, name)
if err != nil {
return err
}
err = future.WaitForCompletionRef(ctx, ac.agentpools.Client)
if err != nil {
alexeldeib marked this conversation as resolved.
Show resolved Hide resolved
return err
}
_, err = future.Result(ac.agentpools)
return err
}
Loading