Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

update the node labels for existing AKS nodepools #2559

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions azure/services/agentpools/agentpools.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,7 @@ func (s *Service) Reconcile(ctx context.Context) error {
EnableAutoScaling: existingPool.EnableAutoScaling,
MinCount: existingPool.MinCount,
MaxCount: existingPool.MaxCount,
NodeLabels: existingPool.NodeLabels,
},
}

Expand All @@ -122,6 +123,7 @@ func (s *Service) Reconcile(ctx context.Context) error {
EnableAutoScaling: profile.EnableAutoScaling,
MinCount: profile.MinCount,
MaxCount: profile.MaxCount,
NodeLabels: profile.NodeLabels,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

are there any existing unit tests that can be modified to test the behavior this PR fixes?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think the agentpool reconcile test could be modified for this. I will update the PR

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

added a separate test for normalized diff, as I expect that test to grow when we keep adding more changes with few immutable and some mutable properties to the agentpool

},
}

Expand Down
145 changes: 145 additions & 0 deletions azure/services/agentpools/agentpools_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -231,6 +231,79 @@ func TestReconcile(t *testing.T) {
m.CreateOrUpdate(gomockinternal.AContext(), "my-rg", "my-cluster", "my-agent-pool", gomock.AssignableToTypeOf(containerservice.AgentPool{}), gomock.Any()).Return(autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 500}, "Internal Server Error"))
},
},
}

for _, tc := range testcases {
t.Logf("Testing " + tc.name)
tc := tc
t.Run(tc.name, func(t *testing.T) {
g := NewWithT(t)
t.Parallel()
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()

replicas := tc.agentPoolsSpec.Replicas
osDiskSizeGB := tc.agentPoolsSpec.OSDiskSizeGB

agentpoolsMock := mock_agentpools.NewMockClient(mockCtrl)
machinePoolScope := &scope.ManagedMachinePoolScope{
ControlPlane: &infrav1exp.AzureManagedControlPlane{
ObjectMeta: metav1.ObjectMeta{
Name: tc.agentPoolsSpec.Cluster,
},
Spec: infrav1exp.AzureManagedControlPlaneSpec{
ResourceGroupName: tc.agentPoolsSpec.ResourceGroup,
},
},
MachinePool: &expv1.MachinePool{
Spec: expv1.MachinePoolSpec{
Replicas: &replicas,
Template: clusterv1.MachineTemplateSpec{
Spec: clusterv1.MachineSpec{
Version: tc.agentPoolsSpec.Version,
},
},
},
},
InfraMachinePool: &infrav1exp.AzureManagedMachinePool{
ObjectMeta: metav1.ObjectMeta{
Name: tc.agentPoolsSpec.Name,
},
Spec: infrav1exp.AzureManagedMachinePoolSpec{
Name: &tc.agentPoolsSpec.Name,
SKU: tc.agentPoolsSpec.SKU,
OSDiskSizeGB: &osDiskSizeGB,
MaxPods: to.Int32Ptr(12),
OsDiskType: to.StringPtr(string(containerservice.OSDiskTypeManaged)),
},
},
}

tc.expect(agentpoolsMock.EXPECT())

s := &Service{
Client: agentpoolsMock,
scope: machinePoolScope,
}

err := s.Reconcile(context.TODO())
if tc.expectedError != "" {
g.Expect(err).To(HaveOccurred())
g.Expect(err).To(MatchError(tc.expectedError))
} else {
g.Expect(err).NotTo(HaveOccurred())
}
})
}
}

func TestNormalizedDiff(t *testing.T) {
testcases := []struct {
name string
agentPoolsSpec azure.AgentPoolSpec
expectedError string
expect func(m *mock_agentpools.MockClientMockRecorder)
}{
{
name: "no update needed on Agent Pool",
agentPoolsSpec: azure.AgentPoolSpec{
Expand Down Expand Up @@ -261,6 +334,78 @@ func TestReconcile(t *testing.T) {
}, nil)
},
},
{
name: "update needed on autoscaler configuration change",
agentPoolsSpec: azure.AgentPoolSpec{
Name: "my-agent-pool",
ResourceGroup: "my-rg",
Cluster: "my-cluster",
SKU: "Standard_D2s_v3",
Version: to.StringPtr("9.99.9999"),
EnableAutoScaling: to.BoolPtr(true),
MinCount: to.Int32Ptr(1),
MaxCount: to.Int32Ptr(3),
OSDiskSizeGB: 100,
MaxPods: to.Int32Ptr(12),
OsDiskType: to.StringPtr(string(containerservice.OSDiskTypeEphemeral)),
},
expectedError: "",
expect: func(m *mock_agentpools.MockClientMockRecorder) {
m.Get(gomockinternal.AContext(), "my-rg", "my-cluster", "my-agent-pool").Return(containerservice.AgentPool{
ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{
EnableAutoScaling: to.BoolPtr(true),
MinCount: to.Int32Ptr(1),
MaxCount: to.Int32Ptr(5),
OsDiskSizeGB: to.Int32Ptr(100),
VMSize: to.StringPtr(string(containerservice.VMSizeTypesStandardD2sV3)),
OsType: containerservice.OSTypeLinux,
OrchestratorVersion: to.StringPtr("9.99.9999"),
ProvisioningState: to.StringPtr("Succeeded"),
VnetSubnetID: to.StringPtr(""),
MaxPods: to.Int32Ptr(12),
OsDiskType: containerservice.OSDiskTypeEphemeral,
},
}, nil)
m.CreateOrUpdate(gomockinternal.AContext(), "my-rg", "my-cluster", "my-agent-pool", gomock.AssignableToTypeOf(containerservice.AgentPool{}), gomock.Any()).Return(nil)
},
},
{
name: "update needed on nodepool labels change",
agentPoolsSpec: azure.AgentPoolSpec{
Name: "my-agent-pool",
ResourceGroup: "my-rg",
Cluster: "my-cluster",
SKU: "Standard_D2s_v3",
Version: to.StringPtr("9.99.9999"),
EnableAutoScaling: to.BoolPtr(true),
MinCount: to.Int32Ptr(1),
MaxCount: to.Int32Ptr(3),
OSDiskSizeGB: 100,
MaxPods: to.Int32Ptr(12),
OsDiskType: to.StringPtr(string(containerservice.OSDiskTypeEphemeral)),
NodeLabels: map[string]*string{"workload": to.StringPtr("stateless")},
},
expectedError: "",
expect: func(m *mock_agentpools.MockClientMockRecorder) {
m.Get(gomockinternal.AContext(), "my-rg", "my-cluster", "my-agent-pool").Return(containerservice.AgentPool{
ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{
EnableAutoScaling: to.BoolPtr(true),
MinCount: to.Int32Ptr(1),
MaxCount: to.Int32Ptr(3),
OsDiskSizeGB: to.Int32Ptr(100),
VMSize: to.StringPtr(string(containerservice.VMSizeTypesStandardD2sV3)),
OsType: containerservice.OSTypeLinux,
OrchestratorVersion: to.StringPtr("9.99.9999"),
ProvisioningState: to.StringPtr("Succeeded"),
VnetSubnetID: to.StringPtr(""),
MaxPods: to.Int32Ptr(12),
OsDiskType: containerservice.OSDiskTypeEphemeral,
NodeLabels: map[string]*string{"workload": to.StringPtr("all")},
},
}, nil)
m.CreateOrUpdate(gomockinternal.AContext(), "my-rg", "my-cluster", "my-agent-pool", gomock.AssignableToTypeOf(containerservice.AgentPool{}), gomock.Any()).Return(nil)
},
},
}

for _, tc := range testcases {
Expand Down