Skip to content

Commit

Permalink
Fix the issue when ControlPlaneEndpoint was not updated on AzureManag…
Browse files Browse the repository at this point in the history
…edControlPlane correctly.

Before this fix, the issue happens when the create request failed/timed out and later update flow was not reading from the cluster fetched from cloud.
  • Loading branch information
karthikbalasub committed Feb 28, 2022
1 parent 7f605d5 commit a6843ee
Show file tree
Hide file tree
Showing 2 changed files with 111 additions and 0 deletions.
8 changes: 8 additions & 0 deletions azure/services/managedclusters/managedclusters.go
Original file line number Diff line number Diff line change
Expand Up @@ -329,6 +329,11 @@ func (s *Service) Reconcile(ctx context.Context) error {
if err != nil {
return fmt.Errorf("failed to update managed cluster, %w", err)
}
} else {
// No update required, but use the MC fetched from Azure for reading fields below.
// This is to ensure the read-only fields like Fqdn from the existing MC are used for updating the
// AzureManagedCluster.
managedCluster = existingMC
}
}

Expand All @@ -339,6 +344,9 @@ func (s *Service) Reconcile(ctx context.Context) error {
Port: 443,
}
s.Scope.SetControlPlaneEndpoint(endpoint)
} else {
// Fail if cluster api endpoint is not available.
return fmt.Errorf("failed to get API endpoint for managed cluster")
}

// Update kubeconfig data
Expand Down
103 changes: 103 additions & 0 deletions azure/services/managedclusters/managedclusters_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,11 @@ import (
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"k8s.io/utils/pointer"

"sigs.k8s.io/cluster-api-provider-azure/azure"
"sigs.k8s.io/cluster-api-provider-azure/azure/services/managedclusters/mock_managedclusters"
gomockinternal "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomock"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4"
)

func TestReconcile(t *testing.T) {
Expand Down Expand Up @@ -123,16 +125,113 @@ func TestReconcile(t *testing.T) {
{
name: "no managedcluster exists",
expectedError: "",
expect: func(m *mock_managedclusters.MockClientMockRecorder, s *mock_managedclusters.MockManagedClusterScopeMockRecorder) {
m.CreateOrUpdate(gomockinternal.AContext(), "my-rg", "my-managedcluster", gomock.Any(), gomock.Any()).Return(containerservice.ManagedCluster{ManagedClusterProperties: &containerservice.ManagedClusterProperties{
Fqdn: pointer.String("my-managedcluster-fqdn"),
ProvisioningState: pointer.String("Succeeded"),
}}, nil).Times(1)
m.Get(gomockinternal.AContext(), "my-rg", "my-managedcluster").Return(containerservice.ManagedCluster{}, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not Found"))
m.GetCredentials(gomockinternal.AContext(), "my-rg", "my-managedcluster").Times(1)
s.ClusterName().AnyTimes().Return("my-managedcluster")
s.ResourceGroup().AnyTimes().Return("my-rg")
s.ManagedClusterSpec().AnyTimes().Return(azure.ManagedClusterSpec{
Name: "my-managedcluster",
ResourceGroupName: "my-rg",
}, nil)
s.GetAllAgentPoolSpecs(gomockinternal.AContext()).AnyTimes().Return([]azure.AgentPoolSpec{
{
Name: "my-agentpool",
SKU: "Standard_D4s_v3",
Replicas: 1,
OSDiskSizeGB: 0,
},
}, nil)
s.SetControlPlaneEndpoint(gomock.Eq(clusterv1.APIEndpoint{
Host: "my-managedcluster-fqdn",
Port: 443,
})).Times(1)
s.SetKubeConfigData(gomock.Any()).Times(1)
},
},
{
name: "missing fqdn after create",
expectedError: "failed to get API endpoint for managed cluster",
expect: func(m *mock_managedclusters.MockClientMockRecorder, s *mock_managedclusters.MockManagedClusterScopeMockRecorder) {
m.CreateOrUpdate(gomockinternal.AContext(), "my-rg", "my-managedcluster", gomock.Any(), gomock.Any()).Return(containerservice.ManagedCluster{ManagedClusterProperties: &containerservice.ManagedClusterProperties{}}, nil)
m.Get(gomockinternal.AContext(), "my-rg", "my-managedcluster").Return(containerservice.ManagedCluster{}, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not Found"))
s.ClusterName().AnyTimes().Return("my-managedcluster")
s.ResourceGroup().AnyTimes().Return("my-rg")
s.ManagedClusterSpec().AnyTimes().Return(azure.ManagedClusterSpec{
Name: "my-managedcluster",
ResourceGroupName: "my-rg",
}, nil)
s.GetAllAgentPoolSpecs(gomockinternal.AContext()).AnyTimes().Return([]azure.AgentPoolSpec{
{
Name: "my-agentpool",
SKU: "Standard_D4s_v3",
Replicas: 1,
OSDiskSizeGB: 0,
},
}, nil)
},
},
{
name: "set correct ControlPlaneEndpoint using fqdn from existing MC after update",
expectedError: "",
expect: func(m *mock_managedclusters.MockClientMockRecorder, s *mock_managedclusters.MockManagedClusterScopeMockRecorder) {
m.CreateOrUpdate(gomockinternal.AContext(), "my-rg", "my-managedcluster", gomock.Any(), gomock.Any()).Return(containerservice.ManagedCluster{
ManagedClusterProperties: &containerservice.ManagedClusterProperties {
Fqdn: pointer.String("my-managedcluster-fqdn"),
ProvisioningState: pointer.String("Succeeded"),
},
}, nil)
m.Get(gomockinternal.AContext(), "my-rg", "my-managedcluster").Return(containerservice.ManagedCluster{
ManagedClusterProperties: &containerservice.ManagedClusterProperties {
Fqdn: pointer.String("my-managedcluster-fqdn"),
ProvisioningState: pointer.String("Succeeded"),
},
}, nil).Times(1)
m.GetCredentials(gomockinternal.AContext(), "my-rg", "my-managedcluster").Times(1)
s.ClusterName().AnyTimes().Return("my-managedcluster")
s.ResourceGroup().AnyTimes().Return("my-rg")
s.ManagedClusterSpec().AnyTimes().Return(azure.ManagedClusterSpec{
Name: "my-managedcluster",
ResourceGroupName: "my-rg",
}, nil)
s.GetAllAgentPoolSpecs(gomockinternal.AContext()).AnyTimes().Return([]azure.AgentPoolSpec{
{
Name: "my-agentpool",
SKU: "Standard_D4s_v3",
Replicas: 1,
OSDiskSizeGB: 0,
},
}, nil)
s.SetControlPlaneEndpoint(gomock.Eq(clusterv1.APIEndpoint{
Host: "my-managedcluster-fqdn",
Port: 443,
})).Times(1)
s.SetKubeConfigData(gomock.Any()).Times(1)
},
},
{
name: "no update needed - set correct ControlPlaneEndpoint using fqdn from existing MC",
expectedError: "",
expect: func(m *mock_managedclusters.MockClientMockRecorder, s *mock_managedclusters.MockManagedClusterScopeMockRecorder) {
m.Get(gomockinternal.AContext(), "my-rg", "my-managedcluster").Return(containerservice.ManagedCluster{
ManagedClusterProperties: &containerservice.ManagedClusterProperties {
Fqdn: pointer.String("my-managedcluster-fqdn"),
ProvisioningState: pointer.String("Succeeded"),
KubernetesVersion: pointer.String("1.1"),
},
}, nil).Times(1)
m.GetCredentials(gomockinternal.AContext(), "my-rg", "my-managedcluster").Times(1)
s.ClusterName().AnyTimes().Return("my-managedcluster")
s.ResourceGroup().AnyTimes().Return("my-rg")
s.ManagedClusterAnnotations().Times(1).Return(map[string]string{})
s.ManagedClusterSpec().AnyTimes().Return(azure.ManagedClusterSpec{
Name: "my-managedcluster",
ResourceGroupName: "my-rg",
Version: "1.1",
}, nil)
s.GetAllAgentPoolSpecs(gomockinternal.AContext()).AnyTimes().Return([]azure.AgentPoolSpec{
{
Expand All @@ -142,6 +241,10 @@ func TestReconcile(t *testing.T) {
OSDiskSizeGB: 0,
},
}, nil)
s.SetControlPlaneEndpoint(gomock.Eq(clusterv1.APIEndpoint{
Host: "my-managedcluster-fqdn",
Port: 443,
})).Times(1)
s.SetKubeConfigData(gomock.Any()).Times(1)
},
},
Expand Down

0 comments on commit a6843ee

Please sign in to comment.