diff --git a/azure/services/managedclusters/managedclusters.go b/azure/services/managedclusters/managedclusters.go index 3b3fd23cd36..232f3e43c7b 100644 --- a/azure/services/managedclusters/managedclusters.go +++ b/azure/services/managedclusters/managedclusters.go @@ -98,6 +98,14 @@ func (s *Service) Reconcile(ctx context.Context) error { Host: ptr.Deref(managedCluster.Properties.Fqdn, ""), Port: 443, } + if managedCluster.Properties.APIServerAccessProfile != nil && + ptr.Deref(managedCluster.Properties.APIServerAccessProfile.EnablePrivateCluster, false) && + !ptr.Deref(managedCluster.Properties.APIServerAccessProfile.EnablePrivateClusterPublicFQDN, false) { + endpoint = clusterv1.APIEndpoint{ + Host: ptr.Deref(managedCluster.Properties.PrivateFQDN, ""), + Port: 443, + } + } s.Scope.SetControlPlaneEndpoint(endpoint) // Update kubeconfig data diff --git a/azure/services/managedclusters/managedclusters_test.go b/azure/services/managedclusters/managedclusters_test.go index ef0a592b6e4..de62c753e64 100644 --- a/azure/services/managedclusters/managedclusters_test.go +++ b/azure/services/managedclusters/managedclusters_test.go @@ -90,6 +90,44 @@ func TestReconcile(t *testing.T) { s.UpdatePutStatus(infrav1.ManagedClusterRunningCondition, serviceName, nil) }, }, + { + name: "create private managed cluster succeeds", + expectedError: "", + expect: func(m *mock_managedclusters.MockCredentialGetterMockRecorder, s *mock_managedclusters.MockManagedClusterScopeMockRecorder, r *mock_async.MockReconcilerMockRecorder) { + s.ManagedClusterSpec().Return(fakeManagedClusterSpec) + r.CreateOrUpdateResource(gomockinternal.AContext(), fakeManagedClusterSpec, serviceName).Return(armcontainerservice.ManagedCluster{ + Properties: &armcontainerservice.ManagedClusterProperties{ + APIServerAccessProfile: &armcontainerservice.ManagedClusterAPIServerAccessProfile{ + EnablePrivateCluster: ptr.To(true), + EnablePrivateClusterPublicFQDN: ptr.To(false), + }, + PrivateFQDN: ptr.To("my-managedcluster-fqdn.private"), + ProvisioningState: ptr.To("Succeeded"), + IdentityProfile: map[string]*armcontainerservice.UserAssignedIdentity{ + kubeletIdentityKey: { + ResourceID: ptr.To("kubelet-id"), + }, + }, + OidcIssuerProfile: &armcontainerservice.ManagedClusterOIDCIssuerProfile{ + Enabled: ptr.To(true), + IssuerURL: ptr.To("oidc issuer url"), + }, + }, + }, nil) + s.SetControlPlaneEndpoint(clusterv1.APIEndpoint{ + Host: "my-managedcluster-fqdn.private", + Port: 443, + }) + m.GetCredentials(gomockinternal.AContext(), "my-rg", "my-managedcluster").Return([]byte("credentials"), nil) + s.SetKubeConfigData([]byte("credentials")) + s.SetKubeletIdentity("kubelet-id") + s.SetOIDCIssuerProfileStatus(nil) + s.SetOIDCIssuerProfileStatus(&infrav1.OIDCIssuerProfileStatus{ + IssuerURL: ptr.To("oidc issuer url"), + }) + s.UpdatePutStatus(infrav1.ManagedClusterRunningCondition, serviceName, nil) + }, + }, { name: "fail to get managed cluster credentials", expectedError: "failed to get credentials for managed cluster: internal server error",