From 34b748e5723d372f1aa8e500d9948df96683186e Mon Sep 17 00:00:00 2001 From: Jon Huhn Date: Sun, 22 Sep 2024 09:46:57 -0700 Subject: [PATCH] cache credentials from AzureClusterIdentity --- azure/scope/cluster.go | 13 ++- azure/scope/cluster_test.go | 7 +- azure/scope/identity.go | 33 +++--- azure/scope/identity_test.go | 91 ++++++++++++--- azure/scope/managedcontrolplane.go | 3 +- azure/scope/managedcontrolplane_test.go | 1 + azure/scope/workload_identity.go | 107 +----------------- controllers/aso_credential_cache.go | 13 +-- controllers/asosecret_controller.go | 20 ++-- controllers/asosecret_controller_test.go | 6 +- controllers/azurecluster_controller.go | 13 ++- controllers/azurecluster_controller_test.go | 13 ++- controllers/azurejson_machine_controller.go | 11 +- .../azurejson_machine_controller_test.go | 6 +- .../azurejson_machinepool_controller.go | 4 +- .../azurejson_machinepool_controller_test.go | 12 +- .../azurejson_machinetemplate_controller.go | 11 +- ...urejson_machinetemplate_controller_test.go | 6 +- controllers/azuremachine_controller.go | 13 ++- controllers/azuremachine_controller_test.go | 24 ++-- .../azuremanagedcontrolplane_controller.go | 2 + ...zuremanagedcontrolplane_controller_test.go | 1 + .../azuremanagedmachinepool_controller.go | 13 ++- ...azuremanagedmachinepool_controller_test.go | 2 +- controllers/helpers.go | 20 ++-- controllers/helpers_test.go | 15 ++- controllers/suite_test.go | 11 +- .../azuremachinepool_controller.go | 6 +- .../azuremachinepool_controller_test.go | 5 +- .../azuremachinepoolmachine_controller.go | 6 +- ...azuremachinepoolmachine_controller_test.go | 2 +- exp/controllers/suite_test.go | 4 +- internal/test/env/env.go | 23 ++-- main.go | 10 ++ 34 files changed, 274 insertions(+), 253 deletions(-) diff --git a/azure/scope/cluster.go b/azure/scope/cluster.go index 3f12564fe7e..725d993f98d 100644 --- a/azure/scope/cluster.go +++ b/azure/scope/cluster.go @@ -58,11 +58,12 @@ import ( // ClusterScopeParams defines the input parameters used to create a new Scope. type ClusterScopeParams struct { AzureClients - Client client.Client - Cluster *clusterv1.Cluster - AzureCluster *infrav1.AzureCluster - Cache *ClusterCache - Timeouts azure.AsyncReconciler + Client client.Client + Cluster *clusterv1.Cluster + AzureCluster *infrav1.AzureCluster + Cache *ClusterCache + Timeouts azure.AsyncReconciler + CredentialCache azure.CredentialCache } // NewClusterScope creates a new Scope from the supplied parameters. @@ -78,7 +79,7 @@ func NewClusterScope(ctx context.Context, params ClusterScopeParams) (*ClusterSc return nil, errors.New("failed to generate new scope from nil AzureCluster") } - credentialsProvider, err := NewAzureCredentialsProvider(ctx, params.Client, params.AzureCluster.Spec.IdentityRef, params.AzureCluster.Namespace) + credentialsProvider, err := NewAzureCredentialsProvider(ctx, params.CredentialCache, params.Client, params.AzureCluster.Spec.IdentityRef, params.AzureCluster.Namespace) if err != nil { return nil, errors.Wrap(err, "failed to init credentials provider") } diff --git a/azure/scope/cluster_test.go b/azure/scope/cluster_test.go index 2f773244ad5..4bd147e81d0 100644 --- a/azure/scope/cluster_test.go +++ b/azure/scope/cluster_test.go @@ -116,9 +116,10 @@ func TestNewClusterScope(t *testing.T) { fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(initObjects...).Build() _, err := NewClusterScope(context.TODO(), ClusterScopeParams{ - Cluster: cluster, - AzureCluster: azureCluster, - Client: fakeClient, + Cluster: cluster, + AzureCluster: azureCluster, + Client: fakeClient, + CredentialCache: azure.NewCredentialCache(), }) g.Expect(err).NotTo(HaveOccurred()) } diff --git a/azure/scope/identity.go b/azure/scope/identity.go index cac8c5594c3..180b1c1fe81 100644 --- a/azure/scope/identity.go +++ b/azure/scope/identity.go @@ -32,6 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/pkg/ot" "sigs.k8s.io/cluster-api-provider-azure/util/tele" ) @@ -52,10 +53,12 @@ type CredentialsProvider interface { type AzureCredentialsProvider struct { Client client.Client Identity *infrav1.AzureClusterIdentity + + cache azure.CredentialCache } // NewAzureCredentialsProvider creates a new AzureClusterCredentialsProvider from the supplied inputs. -func NewAzureCredentialsProvider(ctx context.Context, kubeClient client.Client, identityRef *corev1.ObjectReference, defaultNamespace string) (*AzureCredentialsProvider, error) { +func NewAzureCredentialsProvider(ctx context.Context, cache azure.CredentialCache, kubeClient client.Client, identityRef *corev1.ObjectReference, defaultNamespace string) (*AzureCredentialsProvider, error) { if identityRef == nil { return nil, errors.New("failed to generate new AzureClusterCredentialsProvider from empty identityName") } @@ -74,6 +77,7 @@ func NewAzureCredentialsProvider(ctx context.Context, kubeClient client.Client, return &AzureCredentialsProvider{ Client: kubeClient, Identity: identity, + cache: cache, }, nil } @@ -93,15 +97,14 @@ func (p *AzureCredentialsProvider) GetTokenCredential(ctx context.Context, resou switch p.Identity.Spec.Type { case infrav1.WorkloadIdentity: - azwiCredOptions, err := NewWorkloadIdentityCredentialOptions(). - WithTenantID(p.Identity.Spec.TenantID). - WithClientID(p.Identity.Spec.ClientID). - WithDefaults() - if err != nil { - return nil, errors.Wrapf(err, "failed to setup azwi options for identity %s", p.Identity.Name) - } - azwiCredOptions.ClientOptions.TracingProvider = tracingProvider - cred, authErr = NewWorkloadIdentityCredential(azwiCredOptions) + cred, authErr = p.cache.GetOrStoreWorkloadIdentity(&azidentity.WorkloadIdentityCredentialOptions{ + ClientOptions: azcore.ClientOptions{ + TracingProvider: tracingProvider, + }, + TenantID: p.Identity.Spec.TenantID, + ClientID: p.Identity.Spec.ClientID, + TokenFilePath: GetProjectedTokenPath(), + }) case infrav1.ManualServicePrincipal: log.Info("Identity type ManualServicePrincipal is deprecated and will be removed in a future release. See https://capz.sigs.k8s.io/topics/identities to find a supported identity type.") @@ -125,7 +128,7 @@ func (p *AzureCredentialsProvider) GetTokenCredential(ctx context.Context, resou }, }, } - cred, authErr = azidentity.NewClientSecretCredential(p.GetTenantID(), p.Identity.Spec.ClientID, clientSecret, &options) + cred, authErr = p.cache.GetOrStoreClientSecret(p.GetTenantID(), p.Identity.Spec.ClientID, clientSecret, &options) case infrav1.ServicePrincipalCertificate: var certsContent []byte @@ -141,11 +144,7 @@ func (p *AzureCredentialsProvider) GetTokenCredential(ctx context.Context, resou } certsContent = []byte(clientSecret) } - certs, key, err := azidentity.ParseCertificates(certsContent, nil) - if err != nil { - return nil, errors.Wrap(err, "failed to parse certificate data") - } - cred, authErr = azidentity.NewClientCertificateCredential(p.GetTenantID(), p.Identity.Spec.ClientID, certs, key, &azidentity.ClientCertificateCredentialOptions{ + cred, authErr = p.cache.GetOrStoreClientCert(p.GetTenantID(), p.Identity.Spec.ClientID, certsContent, nil, &azidentity.ClientCertificateCredentialOptions{ ClientOptions: azcore.ClientOptions{ TracingProvider: tracingProvider, }, @@ -158,7 +157,7 @@ func (p *AzureCredentialsProvider) GetTokenCredential(ctx context.Context, resou }, ID: azidentity.ClientID(p.Identity.Spec.ClientID), } - cred, authErr = azidentity.NewManagedIdentityCredential(&options) + cred, authErr = p.cache.GetOrStoreManagedIdentity(&options) default: return nil, errors.Errorf("identity type %s not supported", p.Identity.Spec.Type) diff --git a/azure/scope/identity_test.go b/azure/scope/identity_test.go index 02b87646c73..9cb9a66c8fe 100644 --- a/azure/scope/identity_test.go +++ b/azure/scope/identity_test.go @@ -18,16 +18,21 @@ package scope import ( "context" - "encoding/base64" + "os" + "reflect" "testing" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" . "github.com/onsi/gomega" + "go.uber.org/mock/gomock" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client/fake" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-azure/azure/mock_azure" ) func TestAllowedNamespaces(t *testing.T) { @@ -202,13 +207,7 @@ func TestHasClientSecret(t *testing.T) { } func TestGetTokenCredential(t *testing.T) { - g := NewWithT(t) - - // Test cert data was generated with this command: - // openssl req -x509 -noenc -days 3650 -newkey rsa:2048 --keyout - -subj /CN=localhost | base64 - encodedCertData := "LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRRGpyZEVyOVAwVGFVRVMKZHNwRTZjeW8yMk5VOHloUnJiWWxWOVZIMnZXdm5Qc1RoWGN4aG5kK2NVcWRORUJzd2h3Z0ZsVVFjZy9lU1Z4dwpyciszbmgrYkZUWldQY1krMUxRWXhmcEtHc3JDWFFmQjgyTERKSVpEWDRnSFlyV2YzWjI3MmpYTjFYZUZBS3RpCndES2dEWFh1UEg3cjVsSDd2QzNSWGVBZmZxTHdRSmhaZitOb0hOdHY5TUg5SWRVa1FmbURGWnRJL0NRekNyYjYKK3ZPUzZFbVVEL1EyRk5IQnpneENndUdxZ055QmNRYnhKOVFuZytaaklGdWhHWVhKbHN5UlV0ZXh5elRSNS92MApWTks4VXNaZ1JCRmhYcXJCdi9Sb0NDRyt4VkpZdG1kMFFzcnZOekRxRzZRbmpVQjIxelZYcXpLRWtXMmdSdGpYCmN3NHZZUWVoQWdNQkFBRUNnZ0VBUzZ4dGpnMG5Bb2trMGpTK1pPcEtsa01aQUZhemEzWnZ5SGlwa0hEejRQTXQKdGw3UmI1b1FaR3ZXVDJyYkVPcnhleTdCQmk3TEhHaEl1OEV4UXAvaFJHUG9CQUVUUDdYbHlDZ2hXUGtQdEV0RQpkVS9tWHhMb04wTnN6SHVmLzJzaTdwbUg4WXFHWjZRQjB0Z3IyMnV0NjBtYksrQUpGc0VFZjRhU3BCVXNwZXBKCjI4MDBzUUhzcVBFNkw2a1lrZloyR1JSWTFWOXZVcllFT0RLWnBXek1oTjNVQTluQUtIOVBCNnh2UDJPZHlNTmgKaEtnbVVVTU5JRnR3cjhwWmxKbjYwY2YwVXJXcmM1Q3ZxUUx1YUdZbHpEZ1VRR1Y0SkVWanFtOUY2bE1mRVBVdwplTjcwTVZlMXBjTGVMcTJyR0NWV1UzZ2FraC9IdkpxbFIvc2E1NDZIZ3dLQmdRRHlmMXZreVg0dzVzYm9pNkRKCmNsNWRNVUx0TU1ScEIxT2FNRlZPSmpJOWdaSjhtQ2RSanFYZFlvNWFTMktJcXhpZTh0R0c5K1NvaHhEQVdsNHQKbFNVdERzRTQ0ZlNtSUxxQzV6SWF3TlJRbm5rdjBYOEx3bVl1MFFkN1lBakpNbExUV3lEUnNqRDlYUnE0bnNSKwptSlZ3cnQ4NWlTcFM1VUZ5cnlFelBiRmowd0tCZ1FEd1d6cmFlTjBFY2NmMWlJWW1Rc1l5K3lNRUFsSE5SNXlpCmdQWHVBaFN5YnYySlJlUmhkVWIzOWhMci9Mdkt3MFplWGlMV1htWVVHcGJ5elB5WEltMHMrUEwzTFdsNjVHVEYKbCtjZlY1d2ZBZERrazZyQWRFUEVFMnB4Tjg1Q2h5YVBZUG9ZcjBvaG1WOTdWUWNZYzVGcVkrajF0TTZSMVJEdAovZldCU2E4aU93S0JnUUNwYTFkdFdXVERqNGdxVWRyc3d1MndtRWtVNDd4bFVJd1ZMbTE2NHU2NHovemk5WDZLCjJXbUNhV2ZoSjhmWWlnanlpOXpkT2ZYVDFFRmMwZ1g0UExvelo1cVJQalFwbUxZVjNLYkIwRFRGZW1KYWlUZ0UKcERXMXdhNURnUTNDVzFsSWR1TlAvZm1DR2ZrZ1FUUXc2ak9GL1hiUmdNWkVFZzJPclZJNXRZRm9wd0tCZ0VSOQppcWpFdGg1VkdlakNqWStMaVpUdmNVdnNLVWs0dGM2c3R1ZXFtaUU2ZFc3UGhzT3F1cDFmOW9aZWoxaTVDbTFMCm45dThMSlJmKzFHV3pnZDNIT3NxeVhsYjdHbkRlVi9BNkhCSzg4YjJLb05uL01rNG1ETGdZWDEvckh2U3JVOUEKRUNSR2x2WTZFVFpBeFhQWFFzR3hWS25uYXRHdGlGUjVBS05senMwUEFvR0FhNStYK0RVcUdoOWFFNUlEM3dydgpqa2p4UTJLTEZKQ05TcThmOUdTdXZwdmdYc3RIaDZ3S29NNnZNd0lTaGpnWHVVUkg4VWI0dWhSc1dueE1pbGRGCjdFRStRYVdVOWpuQ20ySFFZQXJmWHJBV3c2REJ1ZGlTa0JxZ0tjNkhqREh1bjVmWGxZVW84VWVzTk1RT3JnN2IKYnlkUVo1LzRWLzFvU1dQRVRrN2pTcjA9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0KLS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURDVENDQWZHZ0F3SUJBZ0lVRlNudEVuK1R2NkhNMnhKUmVFQ0pwSmNDN2lVd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0ZERVNNQkFHQTFVRUF3d0piRzlqWVd4b2IzTjBNQjRYRFRJME1ERXdPREU1TlRReE5Gb1hEVE0wTURFdwpOVEU1TlRReE5Gb3dGREVTTUJBR0ExVUVBd3dKYkc5allXeG9iM04wTUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGCkFBT0NBUThBTUlJQkNnS0NBUUVBNDYzUksvVDlFMmxCRW5iS1JPbk1xTnRqVlBNb1VhMjJKVmZWUjlyMXI1ejcKRTRWM01ZWjNmbkZLblRSQWJNSWNJQlpWRUhJUDNrbGNjSzYvdDU0Zm14VTJWajNHUHRTMEdNWDZTaHJLd2wwSAp3Zk5pd3lTR1ExK0lCMksxbjkyZHU5bzF6ZFYzaFFDcllzQXlvQTExN2p4KzYrWlIrN3d0MFYzZ0gzNmk4RUNZCldYL2phQnpiYi9UQi9TSFZKRUg1Z3hXYlNQd2tNd3EyK3Zyemt1aEpsQS8wTmhUUndjNE1Rb0xocW9EY2dYRUcKOFNmVUo0UG1ZeUJib1JtRnlaYk1rVkxYc2NzMDBlZjc5RlRTdkZMR1lFUVJZVjZxd2IvMGFBZ2h2c1ZTV0xabgpkRUxLN3pjdzZodWtKNDFBZHRjMVY2c3loSkZ0b0ViWTEzTU9MMkVIb1FJREFRQUJvMU13VVRBZEJnTlZIUTRFCkZnUVVmcnkvS0R0YW13TWxSUXNGUGJCaHpkdjJVNWN3SHdZRFZSMGpCQmd3Rm9BVWZyeS9LRHRhbXdNbFJRc0YKUGJCaHpkdjJVNWN3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBeVlzdApWdmV3S1JScHVZUldjNFhHNlduWXBoVWR5WkxNb0lscTBzeVoxYWo2WWJxb0s5Tk1IQVlFbkN2U292NnpJWk9hCnRyaHVVY2Y5R0Z6NWUwaUoyeklsRGMzMTJJd3N2NDF4aUMvYnMxNmtFbjhZZi9TdWpFWGFzajd2bUEzSHJGV2YKd1pUSC95Rkw1YXpvL2YrbEExUTI4WXdxRnBIbWxlMHkwTzUzVXRoNHAwdG13bG51K0NyTzlmSHAza1RsYjdmRAo2bXFmazlOcnQ4dE9DNGFIWURvcXRZVWdaaHg1OHhzSE1PVGV0S2VSbHA4SE1GOW9ST3RyaXo0blltNkloVHdvCjVrMUExM1MzQmpheGtaQ3lQWENnWHNzdVhhZ05MYXNycjVRcStWZ2RiL25EaFZlaFY4K1o0SjBZbnp5OU1ac0UKSDFOMU5mTXRzQStQRXF0UFhBPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=" - certPEM, err := base64.StdEncoding.DecodeString(encodedCertData) - g.Expect(err).NotTo(HaveOccurred()) + testCertPath := "../../test/setup/certificate" tests := []struct { name string @@ -216,6 +215,7 @@ func TestGetTokenCredential(t *testing.T) { secret *corev1.Secret identity *infrav1.AzureClusterIdentity ActiveDirectoryAuthorityHost string + cacheExpect func(*mock_azure.MockCredentialCache) }{ { name: "workload identity", @@ -235,6 +235,14 @@ func TestGetTokenCredential(t *testing.T) { TenantID: fakeTenantID, }, }, + cacheExpect: func(cache *mock_azure.MockCredentialCache) { + cache.EXPECT().GetOrStoreWorkloadIdentity(gomock.Cond(func(opts *azidentity.WorkloadIdentityCredentialOptions) bool { + // ignore tracing provider + return opts.TenantID == fakeTenantID && + opts.ClientID == fakeClientID && + opts.TokenFilePath == GetProjectedTokenPath() + })) + }, }, { name: "manual service principal", @@ -251,6 +259,7 @@ func TestGetTokenCredential(t *testing.T) { Spec: infrav1.AzureClusterIdentitySpec{ Type: infrav1.ManualServicePrincipal, TenantID: fakeTenantID, + ClientID: fakeClientID, ClientSecret: corev1.SecretReference{ Name: "test-identity-secret", }, @@ -265,6 +274,20 @@ func TestGetTokenCredential(t *testing.T) { }, }, ActiveDirectoryAuthorityHost: "https://login.microsoftonline.com", + cacheExpect: func(cache *mock_azure.MockCredentialCache) { + cache.EXPECT().GetOrStoreClientSecret(fakeTenantID, fakeClientID, "fooSecret", gomock.Cond(func(opts *azidentity.ClientSecretCredentialOptions) bool { + // ignore tracing provider + return reflect.DeepEqual(opts.ClientOptions.Cloud, cloud.Configuration{ + ActiveDirectoryAuthorityHost: "https://login.microsoftonline.com", + Services: map[cloud.ServiceName]cloud.ServiceConfiguration{ + cloud.ResourceManager: { + Audience: "", + Endpoint: "", + }, + }, + }) + })) + }, }, { name: "service principal", @@ -281,6 +304,7 @@ func TestGetTokenCredential(t *testing.T) { Spec: infrav1.AzureClusterIdentitySpec{ Type: infrav1.ServicePrincipal, TenantID: fakeTenantID, + ClientID: fakeClientID, ClientSecret: corev1.SecretReference{ Name: "test-identity-secret", }, @@ -295,6 +319,20 @@ func TestGetTokenCredential(t *testing.T) { }, }, ActiveDirectoryAuthorityHost: "https://login.microsoftonline.com", + cacheExpect: func(cache *mock_azure.MockCredentialCache) { + cache.EXPECT().GetOrStoreClientSecret(fakeTenantID, fakeClientID, "fooSecret", gomock.Cond(func(opts *azidentity.ClientSecretCredentialOptions) bool { + // ignore tracing provider + return reflect.DeepEqual(opts.ClientOptions.Cloud, cloud.Configuration{ + ActiveDirectoryAuthorityHost: "https://login.microsoftonline.com", + Services: map[cloud.ServiceName]cloud.ServiceConfiguration{ + cloud.ResourceManager: { + Audience: "", + Endpoint: "", + }, + }, + }) + })) + }, }, { name: "service principal certificate", @@ -311,7 +349,10 @@ func TestGetTokenCredential(t *testing.T) { Spec: infrav1.AzureClusterIdentitySpec{ Type: infrav1.ServicePrincipalCertificate, TenantID: fakeTenantID, - CertPath: "../../test/setup/certificate", + ClientID: fakeClientID, + ClientSecret: corev1.SecretReference{ + Name: "test-identity-secret", + }, }, }, secret: &corev1.Secret{ @@ -319,9 +360,12 @@ func TestGetTokenCredential(t *testing.T) { Name: "test-identity-secret", }, Data: map[string][]byte{ - "clientSecret": certPEM, + "clientSecret": []byte("fooSecret"), }, }, + cacheExpect: func(cache *mock_azure.MockCredentialCache) { + cache.EXPECT().GetOrStoreClientCert(fakeTenantID, fakeClientID, []byte("fooSecret"), gomock.Nil(), gomock.Any()) + }, }, { name: "service principal certificate with certificate filepath", @@ -338,9 +382,17 @@ func TestGetTokenCredential(t *testing.T) { Spec: infrav1.AzureClusterIdentitySpec{ Type: infrav1.ServicePrincipalCertificate, TenantID: fakeTenantID, - CertPath: "../../test/setup/certificate", + ClientID: fakeClientID, + CertPath: testCertPath, }, }, + cacheExpect: func(cache *mock_azure.MockCredentialCache) { + expectedCert, err := os.ReadFile(testCertPath) + if err != nil { + panic(err) + } + cache.EXPECT().GetOrStoreClientCert(fakeTenantID, fakeClientID, expectedCert, gomock.Nil(), gomock.Any()) + }, }, { name: "user-assigned identity", @@ -357,8 +409,15 @@ func TestGetTokenCredential(t *testing.T) { Spec: infrav1.AzureClusterIdentitySpec{ Type: infrav1.UserAssignedMSI, TenantID: fakeTenantID, + ClientID: fakeClientID, }, }, + cacheExpect: func(cache *mock_azure.MockCredentialCache) { + cache.EXPECT().GetOrStoreManagedIdentity(gomock.Cond(func(opts *azidentity.ManagedIdentityCredentialOptions) bool { + // ignore tracing provider + return opts.ID == azidentity.ClientID(fakeClientID) + })) + }, }, } @@ -377,11 +436,15 @@ func TestGetTokenCredential(t *testing.T) { initObjects = append(initObjects, tt.secret) } fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(initObjects...).Build() - provider, err := NewAzureCredentialsProvider(context.Background(), fakeClient, tt.cluster.Spec.IdentityRef, "") + + mockCtrl := gomock.NewController(t) + cache := mock_azure.NewMockCredentialCache(mockCtrl) + tt.cacheExpect(cache) + + provider, err := NewAzureCredentialsProvider(context.Background(), cache, fakeClient, tt.cluster.Spec.IdentityRef, "") g.Expect(err).NotTo(HaveOccurred()) - cred, err := provider.GetTokenCredential(context.Background(), "", tt.ActiveDirectoryAuthorityHost, "") + _, err = provider.GetTokenCredential(context.Background(), "", tt.ActiveDirectoryAuthorityHost, "") g.Expect(err).NotTo(HaveOccurred()) - g.Expect(cred).NotTo(BeNil()) }) } } diff --git a/azure/scope/managedcontrolplane.go b/azure/scope/managedcontrolplane.go index 62760b7b83b..0956cf6478a 100644 --- a/azure/scope/managedcontrolplane.go +++ b/azure/scope/managedcontrolplane.go @@ -75,6 +75,7 @@ type ManagedControlPlaneScopeParams struct { ManagedMachinePools []ManagedMachinePool Cache *ManagedControlPlaneCache Timeouts azure.AsyncReconciler + CredentialCache azure.CredentialCache } // NewManagedControlPlaneScope creates a new Scope from the supplied parameters. @@ -91,7 +92,7 @@ func NewManagedControlPlaneScope(ctx context.Context, params ManagedControlPlane return nil, errors.New("failed to generate new scope from nil ControlPlane") } - credentialsProvider, err := NewAzureCredentialsProvider(ctx, params.Client, params.ControlPlane.Spec.IdentityRef, params.ControlPlane.Namespace) + credentialsProvider, err := NewAzureCredentialsProvider(ctx, params.CredentialCache, params.Client, params.ControlPlane.Spec.IdentityRef, params.ControlPlane.Namespace) if err != nil { return nil, errors.Wrap(err, "failed to init credentials provider") } diff --git a/azure/scope/managedcontrolplane_test.go b/azure/scope/managedcontrolplane_test.go index 10de5b2685a..c5158edc248 100644 --- a/azure/scope/managedcontrolplane_test.go +++ b/azure/scope/managedcontrolplane_test.go @@ -70,6 +70,7 @@ func TestNewManagedControlPlaneScope(t *testing.T) { }, }, }, + CredentialCache: azure.NewCredentialCache(), } fakeIdentity := &infrav1.AzureClusterIdentity{ ObjectMeta: metav1.ObjectMeta{ diff --git a/azure/scope/workload_identity.go b/azure/scope/workload_identity.go index 745c1d302ab..6384cfda6c1 100644 --- a/azure/scope/workload_identity.go +++ b/azure/scope/workload_identity.go @@ -17,15 +17,8 @@ limitations under the License. package scope import ( - "context" "os" "strings" - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/pkg/errors" ) /* @@ -33,15 +26,10 @@ import ( For workload identity to work we need the following. |-----------------------------------------------------------------------------------| -|AZURE_AUTHORITY_HOST | The Azure Active Directory (AAD) endpoint. | -|AZURE_CLIENT_ID | The client ID of the Azure AD | -| | application or user-assigned managed identity. | -|AZURE_TENANT_ID | The tenant ID of the Azure subscription. | |AZURE_FEDERATED_TOKEN_FILE | The path of the projected service account token file. | |-----------------------------------------------------------------------------------| -With the current implementation, AZURE_CLIENT_ID and AZURE_TENANT_ID are read via AzureClusterIdentity -object and fallback to reading from env variables if not found on AzureClusterIdentity. +With the current implementation, AZURE_CLIENT_ID and AZURE_TENANT_ID are read via AzureClusterIdentity. AZURE_FEDERATED_TOKEN_FILE is the path of the projected service account token which is by default "/var/run/secrets/azure/tokens/azure-identity-token". @@ -52,104 +40,15 @@ The path can be overridden by setting "AZURE_FEDERATED_TOKEN_FILE" env variable. const ( // azureFederatedTokenFileEnvKey is the env key for AZURE_FEDERATED_TOKEN_FILE. azureFederatedTokenFileEnvKey = "AZURE_FEDERATED_TOKEN_FILE" - // azureClientIDEnvKey is the env key for AZURE_CLIENT_ID. - azureClientIDEnvKey = "AZURE_CLIENT_ID" - // azureTenantIDEnvKey is the env key for AZURE_TENANT_ID. - azureTenantIDEnvKey = "AZURE_TENANT_ID" // azureTokenFilePath is the path of the projected token. azureTokenFilePath = "/var/run/secrets/azure/tokens/azure-identity-token" // #nosec G101 - // azureFederatedTokenFileRefreshTime is the time interval after which it should be read again. - azureFederatedTokenFileRefreshTime = 5 * time.Minute ) -type workloadIdentityCredential struct { - assertion string - file string - cred *azidentity.ClientAssertionCredential - lastRead time.Time -} - -// WorkloadIdentityCredentialOptions contains the configurable options for azwi. -type WorkloadIdentityCredentialOptions struct { - azcore.ClientOptions - ClientID string - TenantID string - TokenFilePath string -} - -// NewWorkloadIdentityCredentialOptions returns an empty instance of WorkloadIdentityCredentialOptions. -func NewWorkloadIdentityCredentialOptions() *WorkloadIdentityCredentialOptions { - return &WorkloadIdentityCredentialOptions{} -} - -// WithClientID sets client ID to WorkloadIdentityCredentialOptions. -func (w *WorkloadIdentityCredentialOptions) WithClientID(clientID string) *WorkloadIdentityCredentialOptions { - w.ClientID = strings.TrimSpace(clientID) - return w -} - -// WithTenantID sets tenant ID to WorkloadIdentityCredentialOptions. -func (w *WorkloadIdentityCredentialOptions) WithTenantID(tenantID string) *WorkloadIdentityCredentialOptions { - w.TenantID = strings.TrimSpace(tenantID) - return w -} - -// getProjectedTokenPath return projected token file path from the env variable. -func getProjectedTokenPath() string { +// GetProjectedTokenPath return projected token file path from the env variable. +func GetProjectedTokenPath() string { tokenPath := strings.TrimSpace(os.Getenv(azureFederatedTokenFileEnvKey)) if tokenPath == "" { return azureTokenFilePath } return tokenPath } - -// WithDefaults sets token file path. It also sets the client tenant ID from injected env in -// case empty values are passed. -func (w *WorkloadIdentityCredentialOptions) WithDefaults() (*WorkloadIdentityCredentialOptions, error) { - w.TokenFilePath = getProjectedTokenPath() - - // Fallback to using client ID from env variable if not set. - if w.ClientID == "" { - w.ClientID = strings.TrimSpace(os.Getenv(azureClientIDEnvKey)) - if w.ClientID == "" { - return nil, errors.New("empty client ID") - } - } - - // Fallback to using tenant ID from env variable. - if w.TenantID == "" { - w.TenantID = strings.TrimSpace(os.Getenv(azureTenantIDEnvKey)) - if w.TenantID == "" { - return nil, errors.New("empty tenant ID") - } - } - return w, nil -} - -// NewWorkloadIdentityCredential returns a workload identity credential. -func NewWorkloadIdentityCredential(options *WorkloadIdentityCredentialOptions) (azcore.TokenCredential, error) { - w := &workloadIdentityCredential{file: options.TokenFilePath} - cred, err := azidentity.NewClientAssertionCredential(options.TenantID, options.ClientID, w.getAssertion, &azidentity.ClientAssertionCredentialOptions{ClientOptions: options.ClientOptions}) - if err != nil { - return nil, err - } - w.cred = cred - return w, nil -} - -// GetToken returns the token for azwi. -func (w *workloadIdentityCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return w.cred.GetToken(ctx, opts) -} - -func (w *workloadIdentityCredential) getAssertion(context.Context) (string, error) { - if now := time.Now(); w.lastRead.Add(azureFederatedTokenFileRefreshTime).Before(now) { - content, err := os.ReadFile(w.file) - if err != nil { - return "", err - } - w.assertion = string(content) - w.lastRead = now - } - return w.assertion, nil -} diff --git a/controllers/aso_credential_cache.go b/controllers/aso_credential_cache.go index 2a84334044c..fc6c3a0cbb5 100644 --- a/controllers/aso_credential_cache.go +++ b/controllers/aso_credential_cache.go @@ -18,7 +18,6 @@ package controllers import ( "context" - "os" "strconv" "github.com/Azure/azure-sdk-for-go/sdk/azcore" @@ -31,6 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/cluster-api-provider-azure/azure" + "sigs.k8s.io/cluster-api-provider-azure/azure/scope" "sigs.k8s.io/cluster-api-provider-azure/pkg/ot" "sigs.k8s.io/cluster-api-provider-azure/util/tele" ) @@ -163,7 +163,7 @@ func (c *asoCredentialCache) authTokenForScopedASOSecret(secret *corev1.Secret, ClientOptions: clientOpts, TenantID: string(d[config.AzureTenantID]), ClientID: string(d[config.AzureClientID]), - TokenFilePath: federatedTokenFilePath(), + TokenFilePath: scope.GetProjectedTokenPath(), }, ) } @@ -177,7 +177,7 @@ func (c *asoCredentialCache) authTokenForGlobalASOSecret(secret *corev1.Secret, ClientOptions: clientOpts, TenantID: string(d[config.AzureTenantID]), ClientID: string(d[config.AzureClientID]), - TokenFilePath: federatedTokenFilePath(), + TokenFilePath: scope.GetProjectedTokenPath(), }, ) } @@ -212,10 +212,3 @@ func (c *asoCredentialCache) authTokenForGlobalASOSecret(secret *corev1.Secret, }, ) } - -func federatedTokenFilePath() string { - if env, ok := os.LookupEnv("AZURE_FEDERATED_TOKEN_FILE"); ok { - return env - } - return "/var/run/secrets/azure/tokens/azure-identity-token" -} diff --git a/controllers/asosecret_controller.go b/controllers/asosecret_controller.go index b97fa08f0c5..74b8851b674 100644 --- a/controllers/asosecret_controller.go +++ b/controllers/asosecret_controller.go @@ -41,6 +41,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/scope" "sigs.k8s.io/cluster-api-provider-azure/util/aso" "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" @@ -53,6 +54,7 @@ type ASOSecretReconciler struct { Recorder record.EventRecorder Timeouts reconciler.Timeouts WatchFilterValue string + CredentialCache azure.CredentialCache } // SetupWithManager initializes this controller with a manager. @@ -167,10 +169,11 @@ func (asos *ASOSecretReconciler) Reconcile(ctx context.Context, req ctrl.Request // Create the scope. clusterScope, err := scope.NewClusterScope(ctx, scope.ClusterScopeParams{ - Client: asos.Client, - Cluster: cluster, - AzureCluster: ownerType, - Timeouts: asos.Timeouts, + Client: asos.Client, + Cluster: cluster, + AzureCluster: ownerType, + Timeouts: asos.Timeouts, + CredentialCache: asos.CredentialCache, }) if err != nil { return reconcile.Result{}, errors.Wrap(err, "failed to create scope") @@ -193,10 +196,11 @@ func (asos *ASOSecretReconciler) Reconcile(ctx context.Context, req ctrl.Request // Create the scope. clusterScope, err := scope.NewManagedControlPlaneScope(ctx, scope.ManagedControlPlaneScopeParams{ - Client: asos.Client, - Cluster: cluster, - ControlPlane: ownerType, - Timeouts: asos.Timeouts, + Client: asos.Client, + Cluster: cluster, + ControlPlane: ownerType, + Timeouts: asos.Timeouts, + CredentialCache: asos.CredentialCache, }) if err != nil { return reconcile.Result{}, errors.Wrap(err, "failed to create scope") diff --git a/controllers/asosecret_controller_test.go b/controllers/asosecret_controller_test.go index 423a35e35f5..bde3e1c8a9b 100644 --- a/controllers/asosecret_controller_test.go +++ b/controllers/asosecret_controller_test.go @@ -35,6 +35,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-azure/azure" ) func TestASOSecretReconcile(t *testing.T) { @@ -317,8 +318,9 @@ func TestASOSecretReconcile(t *testing.T) { clientBuilder := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(tc.objects...).Build() reconciler := &ASOSecretReconciler{ - Client: clientBuilder, - Recorder: record.NewFakeRecorder(128), + Client: clientBuilder, + Recorder: record.NewFakeRecorder(128), + CredentialCache: azure.NewCredentialCache(), } _, err := reconciler.Reconcile(context.Background(), ctrl.Request{ diff --git a/controllers/azurecluster_controller.go b/controllers/azurecluster_controller.go index b32e17ab987..fc0ff18abec 100644 --- a/controllers/azurecluster_controller.go +++ b/controllers/azurecluster_controller.go @@ -50,18 +50,20 @@ type AzureClusterReconciler struct { Recorder record.EventRecorder Timeouts reconciler.Timeouts WatchFilterValue string + CredentialCache azure.CredentialCache createAzureClusterService azureClusterServiceCreator } type azureClusterServiceCreator func(clusterScope *scope.ClusterScope) (*azureClusterService, error) // NewAzureClusterReconciler returns a new AzureClusterReconciler instance. -func NewAzureClusterReconciler(client client.Client, recorder record.EventRecorder, timeouts reconciler.Timeouts, watchFilterValue string) *AzureClusterReconciler { +func NewAzureClusterReconciler(client client.Client, recorder record.EventRecorder, timeouts reconciler.Timeouts, watchFilterValue string, credCache azure.CredentialCache) *AzureClusterReconciler { acr := &AzureClusterReconciler{ Client: client, Recorder: recorder, Timeouts: timeouts, WatchFilterValue: watchFilterValue, + CredentialCache: credCache, } acr.createAzureClusterService = newAzureClusterService @@ -151,10 +153,11 @@ func (acr *AzureClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reque // Create the scope. clusterScope, err := scope.NewClusterScope(ctx, scope.ClusterScopeParams{ - Client: acr.Client, - Cluster: cluster, - AzureCluster: azureCluster, - Timeouts: acr.Timeouts, + Client: acr.Client, + Cluster: cluster, + AzureCluster: azureCluster, + Timeouts: acr.Timeouts, + CredentialCache: acr.CredentialCache, }) if err != nil { err = errors.Wrap(err, "failed to create scope") diff --git a/controllers/azurecluster_controller_test.go b/controllers/azurecluster_controller_test.go index 13609761814..74e0f9c04cf 100644 --- a/controllers/azurecluster_controller_test.go +++ b/controllers/azurecluster_controller_test.go @@ -69,7 +69,7 @@ var _ = Describe("AzureClusterReconciler", func() { Context("Reconcile an AzureCluster", func() { It("should not error with minimal set up", func() { - reconciler := NewAzureClusterReconciler(testEnv, testEnv.GetEventRecorderFor("azurecluster-reconciler"), reconciler.Timeouts{}, "") + reconciler := NewAzureClusterReconciler(testEnv, testEnv.GetEventRecorderFor("azurecluster-reconciler"), reconciler.Timeouts{}, "", azure.NewCredentialCache()) By("Calling reconcile") name := test.RandomName("foo", 10) instance := &infrav1.AzureCluster{ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}} @@ -276,7 +276,7 @@ func TestAzureClusterReconcilePaused(t *testing.T) { recorder := record.NewFakeRecorder(1) - reconciler := NewAzureClusterReconciler(c, recorder, reconciler.Timeouts{}, "") + reconciler := NewAzureClusterReconciler(c, recorder, reconciler.Timeouts{}, "", azure.NewCredentialCache()) name := test.RandomName("paused", 10) namespace := namespace @@ -506,10 +506,11 @@ func getClusterReconcileInputs(tc TestClusterReconcileInput) (*AzureClusterRecon } clusterScope, err := scope.NewClusterScope(context.Background(), scope.ClusterScopeParams{ - Client: client, - Cluster: cluster, - AzureCluster: azureCluster, - Cache: tc.cache, + Client: client, + Cluster: cluster, + AzureCluster: azureCluster, + Cache: tc.cache, + CredentialCache: azure.NewCredentialCache(), }) if err != nil { return nil, nil, err diff --git a/controllers/azurejson_machine_controller.go b/controllers/azurejson_machine_controller.go index 68fa53287c2..c4bbb198d8f 100644 --- a/controllers/azurejson_machine_controller.go +++ b/controllers/azurejson_machine_controller.go @@ -42,6 +42,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/scope" "sigs.k8s.io/cluster-api-provider-azure/azure/services/identities" azureutil "sigs.k8s.io/cluster-api-provider-azure/util/azure" @@ -55,6 +56,7 @@ type AzureJSONMachineReconciler struct { Recorder record.EventRecorder Timeouts reconciler.Timeouts WatchFilterValue string + CredentialCache azure.CredentialCache } // SetupWithManager initializes this controller with a manager. @@ -189,10 +191,11 @@ func (r *AzureJSONMachineReconciler) Reconcile(ctx context.Context, req ctrl.Req // Create the scope. clusterScope, err := scope.NewClusterScope(ctx, scope.ClusterScopeParams{ - Client: r.Client, - Cluster: cluster, - AzureCluster: azureCluster, - Timeouts: r.Timeouts, + Client: r.Client, + Cluster: cluster, + AzureCluster: azureCluster, + Timeouts: r.Timeouts, + CredentialCache: r.CredentialCache, }) if err != nil { return reconcile.Result{}, errors.Wrap(err, "failed to create scope") diff --git a/controllers/azurejson_machine_controller_test.go b/controllers/azurejson_machine_controller_test.go index 5a1e32e8faa..77ff583151b 100644 --- a/controllers/azurejson_machine_controller_test.go +++ b/controllers/azurejson_machine_controller_test.go @@ -34,6 +34,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/event" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-azure/azure" infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" ) @@ -230,8 +231,9 @@ func TestAzureJSONMachineReconciler(t *testing.T) { client := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(tc.objects...).Build() reconciler := &AzureJSONMachineReconciler{ - Client: client, - Recorder: record.NewFakeRecorder(128), + Client: client, + Recorder: record.NewFakeRecorder(128), + CredentialCache: azure.NewCredentialCache(), } _, err := reconciler.Reconcile(context.Background(), ctrl.Request{ diff --git a/controllers/azurejson_machinepool_controller.go b/controllers/azurejson_machinepool_controller.go index cce4fdc6123..b92382d87a1 100644 --- a/controllers/azurejson_machinepool_controller.go +++ b/controllers/azurejson_machinepool_controller.go @@ -37,6 +37,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/services/identities" infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" azureutil "sigs.k8s.io/cluster-api-provider-azure/util/azure" @@ -50,6 +51,7 @@ type AzureJSONMachinePoolReconciler struct { Recorder record.EventRecorder Timeouts reconciler.Timeouts WatchFilterValue string + CredentialCache azure.CredentialCache } // SetupWithManager initializes this controller with a manager. @@ -135,7 +137,7 @@ func (r *AzureJSONMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl return ctrl.Result{}, nil } - clusterScope, err := GetClusterScoper(ctx, log, r.Client, cluster, r.Timeouts) + clusterScope, err := GetClusterScoper(ctx, log, r.Client, cluster, r.Timeouts, r.CredentialCache) if err != nil { return reconcile.Result{}, errors.Wrapf(err, "failed to create cluster scope for cluster %s/%s", cluster.Namespace, cluster.Name) } diff --git a/controllers/azurejson_machinepool_controller_test.go b/controllers/azurejson_machinepool_controller_test.go index 547c67137d9..154941429fb 100644 --- a/controllers/azurejson_machinepool_controller_test.go +++ b/controllers/azurejson_machinepool_controller_test.go @@ -220,8 +220,9 @@ func TestAzureJSONPoolReconciler(t *testing.T) { client := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(tc.objects...).Build() reconciler := &AzureJSONMachinePoolReconciler{ - Client: client, - Recorder: record.NewFakeRecorder(128), + Client: client, + Recorder: record.NewFakeRecorder(128), + CredentialCache: azure.NewCredentialCache(), } _, err := reconciler.Reconcile(context.Background(), ctrl.Request{ @@ -375,9 +376,10 @@ func TestAzureJSONPoolReconcilerUserAssignedIdentities(t *testing.T) { client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(azureMP, ownerMP, cluster, azureCluster, sec, fakeIdentity).Build() rec := AzureJSONMachinePoolReconciler{ - Client: client, - Recorder: record.NewFakeRecorder(42), - Timeouts: reconciler.Timeouts{}, + Client: client, + Recorder: record.NewFakeRecorder(42), + Timeouts: reconciler.Timeouts{}, + CredentialCache: azure.NewCredentialCache(), } id := "azure:///subscriptions/123/resourceGroups/test-rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/fake-provider-id" getClient = func(auth azure.Authorizer) (identities.Client, error) { diff --git a/controllers/azurejson_machinetemplate_controller.go b/controllers/azurejson_machinetemplate_controller.go index be801d35131..f00dd284333 100644 --- a/controllers/azurejson_machinetemplate_controller.go +++ b/controllers/azurejson_machinetemplate_controller.go @@ -39,6 +39,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/scope" "sigs.k8s.io/cluster-api-provider-azure/azure/services/identities" azureutil "sigs.k8s.io/cluster-api-provider-azure/util/azure" @@ -52,6 +53,7 @@ type AzureJSONTemplateReconciler struct { Recorder record.EventRecorder Timeouts reconciler.Timeouts WatchFilterValue string + CredentialCache azure.CredentialCache } // SetupWithManager initializes this controller with a manager. @@ -149,10 +151,11 @@ func (r *AzureJSONTemplateReconciler) Reconcile(ctx context.Context, req ctrl.Re // Create the scope. clusterScope, err := scope.NewClusterScope(ctx, scope.ClusterScopeParams{ - Client: r.Client, - Cluster: cluster, - AzureCluster: azureCluster, - Timeouts: r.Timeouts, + Client: r.Client, + Cluster: cluster, + AzureCluster: azureCluster, + Timeouts: r.Timeouts, + CredentialCache: r.CredentialCache, }) if err != nil { return reconcile.Result{}, errors.Wrap(err, "failed to create scope") diff --git a/controllers/azurejson_machinetemplate_controller_test.go b/controllers/azurejson_machinetemplate_controller_test.go index 69dd8d6e315..53101ca5a4a 100644 --- a/controllers/azurejson_machinetemplate_controller_test.go +++ b/controllers/azurejson_machinetemplate_controller_test.go @@ -31,6 +31,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-azure/azure" ) func TestAzureJSONTemplateReconciler(t *testing.T) { @@ -163,8 +164,9 @@ func TestAzureJSONTemplateReconciler(t *testing.T) { client := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(tc.objects...).Build() reconciler := &AzureJSONTemplateReconciler{ - Client: client, - Recorder: record.NewFakeRecorder(128), + Client: client, + Recorder: record.NewFakeRecorder(128), + CredentialCache: azure.NewCredentialCache(), } _, err := reconciler.Reconcile(context.Background(), ctrl.Request{ diff --git a/controllers/azuremachine_controller.go b/controllers/azuremachine_controller.go index ff921250ea2..1e50a13a80f 100644 --- a/controllers/azuremachine_controller.go +++ b/controllers/azuremachine_controller.go @@ -51,18 +51,20 @@ type AzureMachineReconciler struct { Recorder record.EventRecorder Timeouts reconciler.Timeouts WatchFilterValue string + CredentialCache azure.CredentialCache createAzureMachineService azureMachineServiceCreator } type azureMachineServiceCreator func(machineScope *scope.MachineScope) (*azureMachineService, error) // NewAzureMachineReconciler returns a new AzureMachineReconciler instance. -func NewAzureMachineReconciler(client client.Client, recorder record.EventRecorder, timeouts reconciler.Timeouts, watchFilterValue string) *AzureMachineReconciler { +func NewAzureMachineReconciler(client client.Client, recorder record.EventRecorder, timeouts reconciler.Timeouts, watchFilterValue string, credCache azure.CredentialCache) *AzureMachineReconciler { amr := &AzureMachineReconciler{ Client: client, Recorder: recorder, Timeouts: timeouts, WatchFilterValue: watchFilterValue, + CredentialCache: credCache, } amr.createAzureMachineService = newAzureMachineService @@ -187,10 +189,11 @@ func (amr *AzureMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reque // Create the cluster scope clusterScope, err := scope.NewClusterScope(ctx, scope.ClusterScopeParams{ - Client: amr.Client, - Cluster: cluster, - AzureCluster: azureCluster, - Timeouts: amr.Timeouts, + Client: amr.Client, + Cluster: cluster, + AzureCluster: azureCluster, + Timeouts: amr.Timeouts, + CredentialCache: amr.CredentialCache, }) if err != nil { amr.Recorder.Eventf(azureCluster, corev1.EventTypeWarning, "Error creating the cluster scope", err.Error()) diff --git a/controllers/azuremachine_controller_test.go b/controllers/azuremachine_controller_test.go index 34a1d01e080..44445375bdc 100644 --- a/controllers/azuremachine_controller_test.go +++ b/controllers/azuremachine_controller_test.go @@ -147,8 +147,9 @@ func TestAzureMachineReconcile(t *testing.T) { g.Expect(fakeClient.Get(context.TODO(), key, resultIdentity)).To(Succeed()) reconciler := &AzureMachineReconciler{ - Client: fakeClient, - Recorder: record.NewFakeRecorder(128), + Client: fakeClient, + Recorder: record.NewFakeRecorder(128), + CredentialCache: azure.NewCredentialCache(), } _, err := reconciler.Reconcile(context.Background(), ctrl.Request{ @@ -402,16 +403,19 @@ func getMachineReconcileInputs(tc TestMachineReconcileInput) (*AzureMachineRecon ). Build() + credCache := azure.NewCredentialCache() reconciler := &AzureMachineReconciler{ Client: client, Recorder: record.NewFakeRecorder(128), createAzureMachineService: tc.createAzureMachineService, + CredentialCache: credCache, } clusterScope, err := scope.NewClusterScope(context.Background(), scope.ClusterScopeParams{ - Client: client, - Cluster: cluster, - AzureCluster: azureCluster, + Client: client, + Cluster: cluster, + AzureCluster: azureCluster, + CredentialCache: credCache, }) if err != nil { return nil, nil, nil, err @@ -785,12 +789,14 @@ func TestConditions(t *testing.T) { g.Expect(fakeClient.Get(context.TODO(), key, resultIdentity)).To(Succeed()) recorder := record.NewFakeRecorder(10) - reconciler := NewAzureMachineReconciler(fakeClient, recorder, reconciler.Timeouts{}, "") + credCache := azure.NewCredentialCache() + reconciler := NewAzureMachineReconciler(fakeClient, recorder, reconciler.Timeouts{}, "", credCache) clusterScope, err := scope.NewClusterScope(context.TODO(), scope.ClusterScopeParams{ - Client: fakeClient, - Cluster: cluster, - AzureCluster: azureCluster, + Client: fakeClient, + Cluster: cluster, + AzureCluster: azureCluster, + CredentialCache: credCache, }) g.Expect(err).NotTo(HaveOccurred()) diff --git a/controllers/azuremanagedcontrolplane_controller.go b/controllers/azuremanagedcontrolplane_controller.go index e315fa4406e..7f2143008ce 100644 --- a/controllers/azuremanagedcontrolplane_controller.go +++ b/controllers/azuremanagedcontrolplane_controller.go @@ -51,6 +51,7 @@ type AzureManagedControlPlaneReconciler struct { Recorder record.EventRecorder Timeouts reconciler.Timeouts WatchFilterValue string + CredentialCache azure.CredentialCache getNewAzureManagedControlPlaneReconciler func(scope *scope.ManagedControlPlaneScope) (*azureManagedControlPlaneService, error) } @@ -184,6 +185,7 @@ func (amcpr *AzureManagedControlPlaneReconciler) Reconcile(ctx context.Context, ControlPlane: azureControlPlane, ManagedMachinePools: pools, Timeouts: amcpr.Timeouts, + CredentialCache: amcpr.CredentialCache, }) if err != nil { return reconcile.Result{}, errors.Wrap(err, "failed to create scope") diff --git a/controllers/azuremanagedcontrolplane_controller_test.go b/controllers/azuremanagedcontrolplane_controller_test.go index b76af1a3570..3c8de1d39f9 100644 --- a/controllers/azuremanagedcontrolplane_controller_test.go +++ b/controllers/azuremanagedcontrolplane_controller_test.go @@ -125,6 +125,7 @@ func TestAzureManagedControlPlaneReconcilePaused(t *testing.T) { Recorder: recorder, Timeouts: reconciler.Timeouts{}, WatchFilterValue: "", + CredentialCache: azure.NewCredentialCache(), getNewAzureManagedControlPlaneReconciler: newAzureManagedControlPlaneReconciler, } name := test.RandomName("paused", 10) diff --git a/controllers/azuremanagedmachinepool_controller.go b/controllers/azuremanagedmachinepool_controller.go index 214a7d78452..96e8595c107 100644 --- a/controllers/azuremanagedmachinepool_controller.go +++ b/controllers/azuremanagedmachinepool_controller.go @@ -51,18 +51,20 @@ type AzureManagedMachinePoolReconciler struct { Recorder record.EventRecorder Timeouts reconciler.Timeouts WatchFilterValue string + CredentialCache azure.CredentialCache createAzureManagedMachinePoolService azureManagedMachinePoolServiceCreator } type azureManagedMachinePoolServiceCreator func(managedMachinePoolScope *scope.ManagedMachinePoolScope, apiCallTimeout time.Duration) (*azureManagedMachinePoolService, error) // NewAzureManagedMachinePoolReconciler returns a new AzureManagedMachinePoolReconciler instance. -func NewAzureManagedMachinePoolReconciler(client client.Client, recorder record.EventRecorder, timeouts reconciler.Timeouts, watchFilterValue string) *AzureManagedMachinePoolReconciler { +func NewAzureManagedMachinePoolReconciler(client client.Client, recorder record.EventRecorder, timeouts reconciler.Timeouts, watchFilterValue string, credCache azure.CredentialCache) *AzureManagedMachinePoolReconciler { ampr := &AzureManagedMachinePoolReconciler{ Client: client, Recorder: recorder, Timeouts: timeouts, WatchFilterValue: watchFilterValue, + CredentialCache: credCache, } ampr.createAzureManagedMachinePoolService = newAzureManagedMachinePoolService @@ -192,10 +194,11 @@ func (ammpr *AzureManagedMachinePoolReconciler) Reconcile(ctx context.Context, r // create the managed control plane scope managedControlPlaneScope, err := scope.NewManagedControlPlaneScope(ctx, scope.ManagedControlPlaneScopeParams{ - Client: ammpr.Client, - ControlPlane: controlPlane, - Cluster: ownerCluster, - Timeouts: ammpr.Timeouts, + Client: ammpr.Client, + ControlPlane: controlPlane, + Cluster: ownerCluster, + Timeouts: ammpr.Timeouts, + CredentialCache: ammpr.CredentialCache, }) if err != nil { return reconcile.Result{}, errors.Wrap(err, "failed to create ManagedControlPlane scope") diff --git a/controllers/azuremanagedmachinepool_controller_test.go b/controllers/azuremanagedmachinepool_controller_test.go index dfcfc7a5236..98d8195c9db 100644 --- a/controllers/azuremanagedmachinepool_controller_test.go +++ b/controllers/azuremanagedmachinepool_controller_test.go @@ -177,7 +177,7 @@ func TestAzureManagedMachinePoolReconcile(t *testing.T) { defer mockCtrl.Finish() c.Setup(cb, reconciler, agentpools.EXPECT(), nodelister.EXPECT()) - controller := NewAzureManagedMachinePoolReconciler(cb.Build(), nil, reconcilerutils.Timeouts{}, "foo") + controller := NewAzureManagedMachinePoolReconciler(cb.Build(), nil, reconcilerutils.Timeouts{}, "foo", azure.NewCredentialCache()) controller.createAzureManagedMachinePoolService = func(_ *scope.ManagedMachinePoolScope, _ time.Duration) (*azureManagedMachinePoolService, error) { return &azureManagedMachinePoolService{ scope: agentpools, diff --git a/controllers/helpers.go b/controllers/helpers.go index 9ecd9c3301f..0265a4898a7 100644 --- a/controllers/helpers.go +++ b/controllers/helpers.go @@ -1078,7 +1078,7 @@ func ClusterPauseChangeAndInfrastructureReady(log logr.Logger) predicate.Funcs { } // GetClusterScoper returns a ClusterScoper for the given cluster using the infra ref pointing to either an AzureCluster or an AzureManagedCluster. -func GetClusterScoper(ctx context.Context, logger logr.Logger, c client.Client, cluster *clusterv1.Cluster, timeouts reconciler.Timeouts) (ClusterScoper, error) { +func GetClusterScoper(ctx context.Context, logger logr.Logger, c client.Client, cluster *clusterv1.Cluster, timeouts reconciler.Timeouts, credCache azure.CredentialCache) (ClusterScoper, error) { infraRef := cluster.Spec.InfrastructureRef switch infraRef.Kind { case "AzureCluster": @@ -1095,10 +1095,11 @@ func GetClusterScoper(ctx context.Context, logger logr.Logger, c client.Client, // Create the cluster scope return scope.NewClusterScope(ctx, scope.ClusterScopeParams{ - Client: c, - Cluster: cluster, - AzureCluster: azureCluster, - Timeouts: timeouts, + Client: c, + Cluster: cluster, + AzureCluster: azureCluster, + Timeouts: timeouts, + CredentialCache: credCache, }) case "AzureManagedCluster": @@ -1115,10 +1116,11 @@ func GetClusterScoper(ctx context.Context, logger logr.Logger, c client.Client, // Create the control plane scope return scope.NewManagedControlPlaneScope(ctx, scope.ManagedControlPlaneScopeParams{ - Client: c, - Cluster: cluster, - ControlPlane: azureManagedControlPlane, - Timeouts: timeouts, + Client: c, + Cluster: cluster, + ControlPlane: azureManagedControlPlane, + Timeouts: timeouts, + CredentialCache: credCache, }) } diff --git a/controllers/helpers_test.go b/controllers/helpers_test.go index 2c030a70c4e..f393dfefced 100644 --- a/controllers/helpers_test.go +++ b/controllers/helpers_test.go @@ -44,6 +44,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/scope" "sigs.k8s.io/cluster-api-provider-azure/internal/test/mock_log" ) @@ -200,9 +201,10 @@ func TestGetCloudProviderConfig(t *testing.T) { g.Expect(fakeClient.Get(context.Background(), key, resultSecret)).To(Succeed()) clusterScope, err := scope.NewClusterScope(context.Background(), scope.ClusterScopeParams{ - Cluster: tc.cluster, - AzureCluster: tc.azureCluster, - Client: fakeClient, + Cluster: tc.cluster, + AzureCluster: tc.azureCluster, + Client: fakeClient, + CredentialCache: azure.NewCredentialCache(), }) g.Expect(err).NotTo(HaveOccurred()) @@ -319,9 +321,10 @@ func TestReconcileAzureSecret(t *testing.T) { kubeclient := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(initObjects...).Build() clusterScope, err := scope.NewClusterScope(context.Background(), scope.ClusterScopeParams{ - Cluster: cluster, - AzureCluster: azureCluster, - Client: kubeclient, + Cluster: cluster, + AzureCluster: azureCluster, + Client: kubeclient, + CredentialCache: azure.NewCredentialCache(), }) g.Expect(err).NotTo(HaveOccurred()) diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 96219bc85ac..0ea17b56925 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -45,10 +45,10 @@ func TestAPIs(t *testing.T) { var _ = BeforeSuite(func() { By("bootstrapping test environment") testEnv = env.NewTestEnvironment() - Expect(NewAzureClusterReconciler(testEnv, testEnv.GetEventRecorderFor("azurecluster-reconciler"), reconciler.Timeouts{}, ""). + Expect(NewAzureClusterReconciler(testEnv, testEnv.GetEventRecorderFor("azurecluster-reconciler"), reconciler.Timeouts{}, "", testEnv.CredentialCache). SetupWithManager(context.Background(), testEnv.Manager, Options{Options: controller.Options{MaxConcurrentReconciles: 1}})).To(Succeed()) - Expect(NewAzureMachineReconciler(testEnv, testEnv.GetEventRecorderFor("azuremachine-reconciler"), reconciler.Timeouts{}, ""). + Expect(NewAzureMachineReconciler(testEnv, testEnv.GetEventRecorderFor("azuremachine-reconciler"), reconciler.Timeouts{}, "", testEnv.CredentialCache). SetupWithManager(context.Background(), testEnv.Manager, Options{Options: controller.Options{MaxConcurrentReconciles: 1}})).To(Succeed()) Expect((&AzureManagedClusterReconciler{ @@ -57,12 +57,13 @@ var _ = BeforeSuite(func() { }).SetupWithManager(context.Background(), testEnv.Manager, Options{Options: controller.Options{MaxConcurrentReconciles: 1}})).To(Succeed()) Expect((&AzureManagedControlPlaneReconciler{ - Client: testEnv, - Recorder: testEnv.GetEventRecorderFor("azuremanagedcontrolplane-reconciler"), + Client: testEnv, + Recorder: testEnv.GetEventRecorderFor("azuremanagedcontrolplane-reconciler"), + CredentialCache: testEnv.CredentialCache, }).SetupWithManager(context.Background(), testEnv.Manager, Options{Options: controller.Options{MaxConcurrentReconciles: 1}})).To(Succeed()) Expect(NewAzureManagedMachinePoolReconciler(testEnv, testEnv.GetEventRecorderFor("azuremanagedmachinepool-reconciler"), - reconciler.Timeouts{}, "").SetupWithManager(context.Background(), testEnv.Manager, Options{Options: controller.Options{MaxConcurrentReconciles: 1}})).To(Succeed()) + reconciler.Timeouts{}, "", testEnv.CredentialCache).SetupWithManager(context.Background(), testEnv.Manager, Options{Options: controller.Options{MaxConcurrentReconciles: 1}})).To(Succeed()) // +kubebuilder:scaffold:scheme diff --git a/exp/controllers/azuremachinepool_controller.go b/exp/controllers/azuremachinepool_controller.go index 042d36b3137..edb82b3384e 100644 --- a/exp/controllers/azuremachinepool_controller.go +++ b/exp/controllers/azuremachinepool_controller.go @@ -65,6 +65,7 @@ type ( WatchFilterValue string createAzureMachinePoolService azureMachinePoolServiceCreator BootstrapConfigGVK schema.GroupVersionKind + CredentialCache azure.CredentialCache } // annotationReaderWriter provides an interface to read and write annotations. @@ -77,7 +78,7 @@ type ( type azureMachinePoolServiceCreator func(machinePoolScope *scope.MachinePoolScope) (*azureMachinePoolService, error) // NewAzureMachinePoolReconciler returns a new AzureMachinePoolReconciler instance. -func NewAzureMachinePoolReconciler(client client.Client, recorder record.EventRecorder, timeouts reconciler.Timeouts, watchFilterValue, bootstrapConfigGVK string) *AzureMachinePoolReconciler { +func NewAzureMachinePoolReconciler(client client.Client, recorder record.EventRecorder, timeouts reconciler.Timeouts, watchFilterValue, bootstrapConfigGVK string, credCache azure.CredentialCache) *AzureMachinePoolReconciler { gvk := schema.FromAPIVersionAndKind(kubeadmv1.GroupVersion.String(), reflect.TypeOf((*kubeadmv1.KubeadmConfig)(nil)).Elem().Name()) userGVK, _ := schema.ParseKindArg(bootstrapConfigGVK) @@ -91,6 +92,7 @@ func NewAzureMachinePoolReconciler(client client.Client, recorder record.EventRe Timeouts: timeouts, WatchFilterValue: watchFilterValue, BootstrapConfigGVK: gvk, + CredentialCache: credCache, } ampr.createAzureMachinePoolService = newAzureMachinePoolService @@ -228,7 +230,7 @@ func (ampr *AzureMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl. logger = logger.WithValues("cluster", cluster.Name) - clusterScope, err := infracontroller.GetClusterScoper(ctx, logger, ampr.Client, cluster, ampr.Timeouts) + clusterScope, err := infracontroller.GetClusterScoper(ctx, logger, ampr.Client, cluster, ampr.Timeouts, ampr.CredentialCache) if err != nil { return reconcile.Result{}, errors.Wrapf(err, "failed to create cluster scope for cluster %s/%s", cluster.Namespace, cluster.Name) } diff --git a/exp/controllers/azuremachinepool_controller_test.go b/exp/controllers/azuremachinepool_controller_test.go index 20cd390066e..3196f9d518d 100644 --- a/exp/controllers/azuremachinepool_controller_test.go +++ b/exp/controllers/azuremachinepool_controller_test.go @@ -33,6 +33,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-azure/azure" infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/internal/test" "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" @@ -45,7 +46,7 @@ var _ = Describe("AzureMachinePoolReconciler", func() { Context("Reconcile an AzureMachinePool", func() { It("should not error with minimal set up", func() { reconciler := NewAzureMachinePoolReconciler(testEnv, testEnv.GetEventRecorderFor("azuremachinepool-reconciler"), - reconciler.Timeouts{}, "", "") + reconciler.Timeouts{}, "", "", testEnv.CredentialCache) By("Calling reconcile") instance := &infrav1exp.AzureMachinePool{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} result, err := reconciler.Reconcile(context.Background(), ctrl.Request{ @@ -80,7 +81,7 @@ func TestAzureMachinePoolReconcilePaused(t *testing.T) { recorder := record.NewFakeRecorder(1) - reconciler := NewAzureMachinePoolReconciler(c, recorder, reconciler.Timeouts{}, "", "") + reconciler := NewAzureMachinePoolReconciler(c, recorder, reconciler.Timeouts{}, "", "", azure.NewCredentialCache()) name := test.RandomName("paused", 10) namespace := "default" diff --git a/exp/controllers/azuremachinepoolmachine_controller.go b/exp/controllers/azuremachinepoolmachine_controller.go index 63e32885bab..9f7416c844c 100644 --- a/exp/controllers/azuremachinepoolmachine_controller.go +++ b/exp/controllers/azuremachinepoolmachine_controller.go @@ -61,6 +61,7 @@ type ( Timeouts reconciler.Timeouts WatchFilterValue string reconcilerFactory azureMachinePoolMachineReconcilerFactory + CredentialCache azure.CredentialCache } azureMachinePoolMachineReconciler struct { @@ -70,13 +71,14 @@ type ( ) // NewAzureMachinePoolMachineController creates a new AzureMachinePoolMachineController to handle updates to Azure Machine Pool Machines. -func NewAzureMachinePoolMachineController(c client.Client, recorder record.EventRecorder, timeouts reconciler.Timeouts, watchFilterValue string) *AzureMachinePoolMachineController { +func NewAzureMachinePoolMachineController(c client.Client, recorder record.EventRecorder, timeouts reconciler.Timeouts, watchFilterValue string, credCache azure.CredentialCache) *AzureMachinePoolMachineController { return &AzureMachinePoolMachineController{ Client: c, Recorder: recorder, Timeouts: timeouts, WatchFilterValue: watchFilterValue, reconcilerFactory: newAzureMachinePoolMachineReconciler, + CredentialCache: credCache, } } @@ -168,7 +170,7 @@ func (ampmr *AzureMachinePoolMachineController) Reconcile(ctx context.Context, r return ctrl.Result{}, nil } - clusterScope, err := infracontroller.GetClusterScoper(ctx, logger, ampmr.Client, cluster, ampmr.Timeouts) + clusterScope, err := infracontroller.GetClusterScoper(ctx, logger, ampmr.Client, cluster, ampmr.Timeouts, ampmr.CredentialCache) if err != nil { return reconcile.Result{}, errors.Wrapf(err, "failed to create cluster scope for cluster %s/%s", cluster.Namespace, cluster.Name) } diff --git a/exp/controllers/azuremachinepoolmachine_controller_test.go b/exp/controllers/azuremachinepoolmachine_controller_test.go index 96239ef405e..c508d43123d 100644 --- a/exp/controllers/azuremachinepoolmachine_controller_test.go +++ b/exp/controllers/azuremachinepoolmachine_controller_test.go @@ -136,7 +136,7 @@ func TestAzureMachinePoolMachineReconciler_Reconcile(t *testing.T) { c.Setup(cb, reconciler.EXPECT()) cl := cb.Build() - controller := NewAzureMachinePoolMachineController(cl, nil, reconcilerutils.Timeouts{}, "foo") + controller := NewAzureMachinePoolMachineController(cl, nil, reconcilerutils.Timeouts{}, "foo", azure.NewCredentialCache()) controller.reconcilerFactory = func(_ *scope.MachinePoolMachineScope) (azure.Reconciler, error) { return reconciler, nil } diff --git a/exp/controllers/suite_test.go b/exp/controllers/suite_test.go index 6064e99e233..35f3270c54f 100644 --- a/exp/controllers/suite_test.go +++ b/exp/controllers/suite_test.go @@ -55,10 +55,10 @@ var _ = BeforeSuite(func() { ctx = log.IntoContext(ctx, logr.New(testEnv.Log)) Expect(NewAzureMachinePoolReconciler(testEnv, testEnv.GetEventRecorderFor("azuremachinepool-reconciler"), - reconciler.Timeouts{}, "", "").SetupWithManager(ctx, testEnv.Manager, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: 1}})).To(Succeed()) + reconciler.Timeouts{}, "", "", testEnv.CredentialCache).SetupWithManager(ctx, testEnv.Manager, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: 1}})).To(Succeed()) Expect(NewAzureMachinePoolMachineController(testEnv, testEnv.GetEventRecorderFor("azuremachinepoolmachine-reconciler"), - reconciler.Timeouts{}, "").SetupWithManager(ctx, testEnv.Manager, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: 1}})).To(Succeed()) + reconciler.Timeouts{}, "", testEnv.CredentialCache).SetupWithManager(ctx, testEnv.Manager, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: 1}})).To(Succeed()) // +kubebuilder:scaffold:scheme diff --git a/internal/test/env/env.go b/internal/test/env/env.go index b9520458680..d41cd8b4003 100644 --- a/internal/test/env/env.go +++ b/internal/test/env/env.go @@ -43,6 +43,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/metrics/server" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-azure/azure" infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/internal/test/record" ) @@ -86,10 +87,11 @@ type ( TestEnvironment struct { manager.Manager client.Client - Config *rest.Config - Log logr.LogSink - LogRecorder *record.Logger - doneMgr chan struct{} + Config *rest.Config + Log logr.LogSink + LogRecorder *record.Logger + CredentialCache azure.CredentialCache + doneMgr chan struct{} } ) @@ -113,12 +115,13 @@ func NewTestEnvironment() *TestEnvironment { } return &TestEnvironment{ - Manager: mgr, - Client: mgr.GetClient(), - Config: mgr.GetConfig(), - LogRecorder: logger, - Log: logger, - doneMgr: make(chan struct{}), + Manager: mgr, + Client: mgr.GetClient(), + Config: mgr.GetConfig(), + LogRecorder: logger, + Log: logger, + CredentialCache: azure.NewCredentialCache(), + doneMgr: make(chan struct{}), } } diff --git a/main.go b/main.go index 2a37ade5a70..ce7484ba5c7 100644 --- a/main.go +++ b/main.go @@ -380,6 +380,7 @@ func registerControllers(ctx context.Context, mgr manager.Manager) { mgr.GetEventRecorderFor("azuremachine-reconciler"), timeouts, watchFilterValue, + credCache, ).SetupWithManager(ctx, mgr, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: azureMachineConcurrency}, Cache: machineCache}); err != nil { setupLog.Error(err, "unable to create controller", "controller", "AzureMachine") os.Exit(1) @@ -394,6 +395,7 @@ func registerControllers(ctx context.Context, mgr manager.Manager) { mgr.GetEventRecorderFor("azurecluster-reconciler"), timeouts, watchFilterValue, + credCache, ).SetupWithManager(ctx, mgr, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: azureClusterConcurrency}, Cache: clusterCache}); err != nil { setupLog.Error(err, "unable to create controller", "controller", "AzureCluster") os.Exit(1) @@ -404,6 +406,7 @@ func registerControllers(ctx context.Context, mgr manager.Manager) { Recorder: mgr.GetEventRecorderFor("azurejsontemplate-reconciler"), Timeouts: timeouts, WatchFilterValue: watchFilterValue, + CredentialCache: credCache, }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: azureMachineConcurrency}); err != nil { setupLog.Error(err, "unable to create controller", "controller", "AzureJSONTemplate") os.Exit(1) @@ -414,6 +417,7 @@ func registerControllers(ctx context.Context, mgr manager.Manager) { Recorder: mgr.GetEventRecorderFor("azurejsonmachine-reconciler"), Timeouts: timeouts, WatchFilterValue: watchFilterValue, + CredentialCache: credCache, }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: azureMachineConcurrency}); err != nil { setupLog.Error(err, "unable to create controller", "controller", "AzureJSONMachine") os.Exit(1) @@ -424,6 +428,7 @@ func registerControllers(ctx context.Context, mgr manager.Manager) { Recorder: mgr.GetEventRecorderFor("asosecret-reconciler"), Timeouts: timeouts, WatchFilterValue: watchFilterValue, + CredentialCache: credCache, }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: azureClusterConcurrency}); err != nil { setupLog.Error(err, "unable to create controller", "controller", "ASOSecret") os.Exit(1) @@ -443,6 +448,7 @@ func registerControllers(ctx context.Context, mgr manager.Manager) { timeouts, watchFilterValue, azureBootrapConfigGVK, + credCache, ).SetupWithManager(ctx, mgr, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: azureMachinePoolConcurrency}, Cache: mpCache}); err != nil { setupLog.Error(err, "unable to create controller", "controller", "AzureMachinePool") os.Exit(1) @@ -458,6 +464,7 @@ func registerControllers(ctx context.Context, mgr manager.Manager) { mgr.GetEventRecorderFor("azuremachinepoolmachine-reconciler"), timeouts, watchFilterValue, + credCache, ).SetupWithManager(ctx, mgr, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: azureMachinePoolMachineConcurrency}, Cache: mpmCache}); err != nil { setupLog.Error(err, "unable to create controller", "controller", "AzureMachinePoolMachine") os.Exit(1) @@ -468,6 +475,7 @@ func registerControllers(ctx context.Context, mgr manager.Manager) { Recorder: mgr.GetEventRecorderFor("azurejsonmachinepool-reconciler"), Timeouts: timeouts, WatchFilterValue: watchFilterValue, + CredentialCache: credCache, }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: azureMachinePoolConcurrency}); err != nil { setupLog.Error(err, "unable to create controller", "controller", "AzureJSONMachinePool") os.Exit(1) @@ -483,6 +491,7 @@ func registerControllers(ctx context.Context, mgr manager.Manager) { mgr.GetEventRecorderFor("azuremanagedmachinepoolmachine-reconciler"), timeouts, watchFilterValue, + credCache, ).SetupWithManager(ctx, mgr, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: azureMachinePoolConcurrency}, Cache: mmpmCache}); err != nil { setupLog.Error(err, "unable to create controller", "controller", "AzureManagedMachinePool") os.Exit(1) @@ -513,6 +522,7 @@ func registerControllers(ctx context.Context, mgr manager.Manager) { Recorder: mgr.GetEventRecorderFor("azuremanagedcontrolplane-reconciler"), Timeouts: timeouts, WatchFilterValue: watchFilterValue, + CredentialCache: credCache, }).SetupWithManager(ctx, mgr, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: azureClusterConcurrency}, Cache: mcpCache}); err != nil { setupLog.Error(err, "unable to create controller", "controller", "AzureManagedControlPlane") os.Exit(1)