diff --git a/api/v1beta1/azurecluster_webhook.go b/api/v1beta1/azurecluster_webhook.go index f3b5b0c6d81..3ca704c1a03 100644 --- a/api/v1beta1/azurecluster_webhook.go +++ b/api/v1beta1/azurecluster_webhook.go @@ -23,13 +23,9 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation/field" ctrl "sigs.k8s.io/controller-runtime" - logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" ) -// log is for logging in this package. -var clusterlog = logf.Log.WithName("azurecluster-resource") - // SetupWebhookWithManager sets up and registers the webhook with the manager. func (c *AzureCluster) SetupWebhookWithManager(mgr ctrl.Manager) error { return ctrl.NewWebhookManagedBy(mgr). @@ -45,21 +41,16 @@ var _ webhook.Defaulter = &AzureCluster{} // Default implements webhook.Defaulter so a webhook will be registered for the type. func (c *AzureCluster) Default() { - clusterlog.Info("default", "name", c.Name) - c.setDefaults() } // ValidateCreate implements webhook.Validator so a webhook will be registered for the type. func (c *AzureCluster) ValidateCreate() error { - clusterlog.Info("validate create", "name", c.Name) - return c.validateCluster(nil) } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. func (c *AzureCluster) ValidateUpdate(oldRaw runtime.Object) error { - clusterlog.Info("validate update", "name", c.Name) var allErrs field.ErrorList old := oldRaw.(*AzureCluster) @@ -132,7 +123,5 @@ func (c *AzureCluster) ValidateUpdate(oldRaw runtime.Object) error { // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. func (c *AzureCluster) ValidateDelete() error { - clusterlog.Info("validate delete", "name", c.Name) - return nil } diff --git a/api/v1beta1/azuremachine_default.go b/api/v1beta1/azuremachine_default.go index ee7cfbfa0f8..591d2aa1928 100644 --- a/api/v1beta1/azuremachine_default.go +++ b/api/v1beta1/azuremachine_default.go @@ -19,9 +19,9 @@ package v1beta1 import ( "encoding/base64" - "github.com/go-logr/logr" "golang.org/x/crypto/ssh" "k8s.io/apimachinery/pkg/util/uuid" + ctrl "sigs.k8s.io/controller-runtime" utilSSH "sigs.k8s.io/cluster-api-provider-azure/util/ssh" ) @@ -85,10 +85,10 @@ func (s *AzureMachineSpec) SetIdentityDefaults() { } // SetDefaults sets to the defaults for the AzureMachineSpec. -func (s *AzureMachineSpec) SetDefaults(log logr.Logger) { +func (s *AzureMachineSpec) SetDefaults() { err := s.SetDefaultSSHPublicKey() if err != nil { - log.Error(err, "SetDefaultSshPublicKey failed") + ctrl.Log.WithName("SetDefault").Error(err, "SetDefaultSshPublicKey failed") } s.SetDefaultCachingType() s.SetDataDisksDefaults() diff --git a/api/v1beta1/azuremachine_webhook.go b/api/v1beta1/azuremachine_webhook.go index e2b2f1bdd5f..d27cea2774e 100644 --- a/api/v1beta1/azuremachine_webhook.go +++ b/api/v1beta1/azuremachine_webhook.go @@ -23,13 +23,9 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation/field" ctrl "sigs.k8s.io/controller-runtime" - logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" ) -// log is for logging in this package. -var machinelog = logf.Log.WithName("azuremachine-resource") - // SetupWebhookWithManager sets up and registers the webhook with the manager. func (m *AzureMachine) SetupWebhookWithManager(mgr ctrl.Manager) error { return ctrl.NewWebhookManagedBy(mgr). @@ -44,8 +40,6 @@ var _ webhook.Validator = &AzureMachine{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type. func (m *AzureMachine) ValidateCreate() error { - machinelog.Info("validate create", "name", m.Name) - if allErrs := ValidateAzureMachineSpec(m.Spec); len(allErrs) > 0 { return apierrors.NewInvalid(GroupVersion.WithKind("AzureMachine").GroupKind(), m.Name, allErrs) } @@ -55,7 +49,6 @@ func (m *AzureMachine) ValidateCreate() error { // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. func (m *AzureMachine) ValidateUpdate(oldRaw runtime.Object) error { - machinelog.Info("validate update", "name", m.Name) var allErrs field.ErrorList old := oldRaw.(*AzureMachine) @@ -151,13 +144,10 @@ func (m *AzureMachine) ValidateUpdate(oldRaw runtime.Object) error { // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. func (m *AzureMachine) ValidateDelete() error { - machinelog.Info("validate delete", "name", m.Name) - return nil } // Default implements webhookutil.defaulter so a webhook will be registered for the type. func (m *AzureMachine) Default() { - machinelog.Info("default", "name", m.Name) - m.Spec.SetDefaults(machinelog) + m.Spec.SetDefaults() } diff --git a/api/v1beta1/azuremachinetemplate_webhook.go b/api/v1beta1/azuremachinetemplate_webhook.go index 4ca25d9deac..42b4bfff74f 100644 --- a/api/v1beta1/azuremachinetemplate_webhook.go +++ b/api/v1beta1/azuremachinetemplate_webhook.go @@ -23,16 +23,12 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation/field" ctrl "sigs.k8s.io/controller-runtime" - logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" ) // AzureMachineTemplateImmutableMsg ... const AzureMachineTemplateImmutableMsg = "AzureMachineTemplate spec.template.spec field is immutable. Please create new resource instead. ref doc: https://cluster-api.sigs.k8s.io/tasks/change-machine-template.html" -// log is for logging in this package. -var machinetemplatelog = logf.Log.WithName("azuremachinetemplate-resource") - // SetupWebhookWithManager sets up and registers the webhook with the manager. func (r *AzureMachineTemplate) SetupWebhookWithManager(mgr ctrl.Manager) error { return ctrl.NewWebhookManagedBy(mgr). @@ -48,7 +44,6 @@ var _ webhook.Validator = &AzureMachineTemplate{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type. func (r *AzureMachineTemplate) ValidateCreate() error { - machinetemplatelog.Info("validate create", "name", r.Name) spec := r.Spec.Template.Spec if allErrs := ValidateAzureMachineSpec(spec); len(allErrs) > 0 { @@ -59,7 +54,6 @@ func (r *AzureMachineTemplate) ValidateCreate() error { // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. func (r *AzureMachineTemplate) ValidateUpdate(oldRaw runtime.Object) error { - machinetemplatelog.Info("validate update", "name", r.Name) var allErrs field.ErrorList old := oldRaw.(*AzureMachineTemplate) @@ -98,6 +92,5 @@ func (r *AzureMachineTemplate) ValidateDelete() error { // Default implements webhookutil.defaulter so a webhook will be registered for the type. func (r *AzureMachineTemplate) Default() { - machinetemplatelog.Info("default", "name", r.Name) - r.Spec.Template.Spec.SetDefaults(machinetemplatelog) + r.Spec.Template.Spec.SetDefaults() } diff --git a/azure/defaults.go b/azure/defaults.go index fb8e2b4608b..7a019fd6102 100644 --- a/azure/defaults.go +++ b/azure/defaults.go @@ -20,12 +20,10 @@ import ( "fmt" "net/http" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "github.com/blang/semver" "github.com/pkg/errors" - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/util/tele" "sigs.k8s.io/cluster-api-provider-azure/version" diff --git a/azure/scope/cluster.go b/azure/scope/cluster.go index 4e09fc96453..637d7150397 100644 --- a/azure/scope/cluster.go +++ b/azure/scope/cluster.go @@ -26,9 +26,7 @@ import ( "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/to" - "github.com/go-logr/logr" "github.com/pkg/errors" - "k8s.io/klog/v2/klogr" "k8s.io/utils/net" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" @@ -47,7 +45,6 @@ import ( type ClusterScopeParams struct { AzureClients Client client.Client - Logger logr.Logger Cluster *clusterv1.Cluster AzureCluster *infrav1.AzureCluster } @@ -65,10 +62,6 @@ func NewClusterScope(ctx context.Context, params ClusterScopeParams) (*ClusterSc return nil, errors.New("failed to generate new scope from nil AzureCluster") } - if params.Logger == nil { - params.Logger = klogr.New() - } - if params.AzureCluster.Spec.IdentityRef == nil { err := params.AzureClients.setCredentials(params.AzureCluster.Spec.SubscriptionID, params.AzureCluster.Spec.AzureEnvironment) if err != nil { @@ -91,7 +84,6 @@ func NewClusterScope(ctx context.Context, params ClusterScopeParams) (*ClusterSc } return &ClusterScope{ - Logger: params.Logger, Client: params.Client, AzureClients: params.AzureClients, Cluster: params.Cluster, @@ -102,7 +94,6 @@ func NewClusterScope(ctx context.Context, params ClusterScopeParams) (*ClusterSc // ClusterScope defines the basic context for an actuator to operate upon. type ClusterScope struct { - logr.Logger Client client.Client patchHelper *patch.Helper @@ -219,7 +210,7 @@ func (s *ClusterScope) LBSpecs() []azure.LBSpec { // RouteTableSpecs returns the node route table. func (s *ClusterScope) RouteTableSpecs() []azure.RouteTableSpec { - routetables := []azure.RouteTableSpec{} + var routetables []azure.RouteTableSpec for _, subnet := range s.AzureCluster.Spec.NetworkSpec.Subnets { if subnet.RouteTable.Name != "" { routetables = append(routetables, azure.RouteTableSpec{Name: subnet.RouteTable.Name, Subnet: subnet}) @@ -231,7 +222,7 @@ func (s *ClusterScope) RouteTableSpecs() []azure.RouteTableSpec { // NatGatewaySpecs returns the node nat gateway. func (s *ClusterScope) NatGatewaySpecs() []azure.NatGatewaySpec { - natGateways := []azure.NatGatewaySpec{} + var natGateways []azure.NatGatewaySpec // We ignore the control plane nat gateway, as we will always use a LB to enable egress on the control plane. for _, subnet := range s.NodeSubnets() { @@ -251,12 +242,12 @@ func (s *ClusterScope) NatGatewaySpecs() []azure.NatGatewaySpec { // NSGSpecs returns the security group specs. func (s *ClusterScope) NSGSpecs() []azure.NSGSpec { - nsgspecs := []azure.NSGSpec{} - for _, subnet := range s.AzureCluster.Spec.NetworkSpec.Subnets { - nsgspecs = append(nsgspecs, azure.NSGSpec{ + nsgspecs := make([]azure.NSGSpec, len(s.AzureCluster.Spec.NetworkSpec.Subnets)) + for i, subnet := range s.AzureCluster.Spec.NetworkSpec.Subnets { + nsgspecs[i] = azure.NSGSpec{ Name: subnet.SecurityGroup.Name, SecurityRules: subnet.SecurityGroup.SecurityRules, - }) + } } return nsgspecs @@ -264,7 +255,12 @@ func (s *ClusterScope) NSGSpecs() []azure.NSGSpec { // SubnetSpecs returns the subnets specs. func (s *ClusterScope) SubnetSpecs() []azure.SubnetSpec { - subnetSpecs := []azure.SubnetSpec{} + numberOfSubnets := len(s.AzureCluster.Spec.NetworkSpec.Subnets) + if s.AzureCluster.Spec.BastionSpec.AzureBastion != nil { + numberOfSubnets++ + } + + subnetSpecs := make([]azure.SubnetSpec, 0, numberOfSubnets) for _, subnet := range s.AzureCluster.Spec.NetworkSpec.Subnets { subnetSpec := azure.SubnetSpec{ Name: subnet.Name, diff --git a/azure/scope/machine.go b/azure/scope/machine.go index e2216f63499..f57b948e741 100644 --- a/azure/scope/machine.go +++ b/azure/scope/machine.go @@ -24,11 +24,9 @@ import ( "time" "github.com/Azure/go-autorest/autorest/to" - "github.com/go-logr/logr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/klog/v2/klogr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/noderefutil" capierrors "sigs.k8s.io/cluster-api/errors" @@ -49,7 +47,6 @@ import ( // MachineScopeParams defines the input parameters used to create a new MachineScope. type MachineScopeParams struct { Client client.Client - Logger logr.Logger ClusterScope azure.ClusterScoper Machine *clusterv1.Machine AzureMachine *infrav1.AzureMachine @@ -68,9 +65,6 @@ func NewMachineScope(params MachineScopeParams) (*MachineScope, error) { if params.AzureMachine == nil { return nil, errors.New("azure machine is required when creating a MachineScope") } - if params.Logger == nil { - params.Logger = klogr.New() - } helper, err := patch.NewHelper(params.AzureMachine, params.Client) if err != nil { @@ -81,7 +75,6 @@ func NewMachineScope(params MachineScopeParams) (*MachineScope, error) { client: params.Client, Machine: params.Machine, AzureMachine: params.AzureMachine, - Logger: params.Logger, patchHelper: helper, ClusterScoper: params.ClusterScope, cache: params.Cache, @@ -90,7 +83,6 @@ func NewMachineScope(params MachineScopeParams) (*MachineScope, error) { // MachineScope defines a scope defined around a machine and its cluster. type MachineScope struct { - logr.Logger client client.Client patchHelper *patch.Helper @@ -121,7 +113,7 @@ func (m *MachineScope) InitMachineCache(ctx context.Context) error { return err } - m.cache.VMImage, err = m.GetVMImage() + m.cache.VMImage, err = m.GetVMImage(ctx) if err != nil { return err } @@ -442,18 +434,21 @@ func (m *MachineScope) SetFailureReason(v capierrors.MachineStatusError) { } // SetBootstrapConditions sets the AzureMachine BootstrapSucceeded condition based on the extension provisioning states. -func (m *MachineScope) SetBootstrapConditions(provisioningState string, extensionName string) error { +func (m *MachineScope) SetBootstrapConditions(ctx context.Context, provisioningState string, extensionName string) error { + _, log, done := tele.StartSpanWithLogger(ctx, "scope.MachineScope.SetBootstrapConditions") + defer done() + switch infrav1.ProvisioningState(provisioningState) { case infrav1.Succeeded: - m.V(4).Info("extension provisioning state is succeeded", "vm extension", extensionName, "virtual machine", m.Name()) + log.V(4).Info("extension provisioning state is succeeded", "vm extension", extensionName, "virtual machine", m.Name()) conditions.MarkTrue(m.AzureMachine, infrav1.BootstrapSucceededCondition) return nil case infrav1.Creating: - m.V(4).Info("extension provisioning state is creating", "vm extension", extensionName, "virtual machine", m.Name()) + log.V(4).Info("extension provisioning state is creating", "vm extension", extensionName, "virtual machine", m.Name()) conditions.MarkFalse(m.AzureMachine, infrav1.BootstrapSucceededCondition, infrav1.BootstrapInProgressReason, clusterv1.ConditionSeverityInfo, "") return azure.WithTransientError(errors.New("extension is still in provisioning state. This likely means that bootstrapping has not yet completed on the VM"), 30*time.Second) case infrav1.Failed: - m.V(4).Info("extension provisioning state is failed", "vm extension", extensionName, "virtual machine", m.Name()) + log.V(4).Info("extension provisioning state is failed", "vm extension", extensionName, "virtual machine", m.Name()) conditions.MarkFalse(m.AzureMachine, infrav1.BootstrapSucceededCondition, infrav1.BootstrapFailedReason, clusterv1.ConditionSeverityError, "") return azure.WithTerminalError(errors.New("extension state failed. This likely means the Kubernetes node bootstrapping process failed or timed out. Check VM boot diagnostics logs to learn more")) default: @@ -542,6 +537,9 @@ func (m *MachineScope) AdditionalTags() infrav1.Tags { // GetBootstrapData returns the bootstrap data from the secret in the Machine's bootstrap.dataSecretName. func (m *MachineScope) GetBootstrapData(ctx context.Context) (string, error) { + ctx, _, done := tele.StartSpanWithLogger(ctx, "scope.MachineScope.GetBootstrapData") + defer done() + if m.Machine.Spec.Bootstrap.DataSecretName == nil { return "", errors.New("error retrieving bootstrap data: linked Machine's bootstrap.dataSecretName is nil") } @@ -559,7 +557,10 @@ func (m *MachineScope) GetBootstrapData(ctx context.Context) (string, error) { } // GetVMImage returns the image from the machine configuration, or a default one. -func (m *MachineScope) GetVMImage() (*infrav1.Image, error) { +func (m *MachineScope) GetVMImage(ctx context.Context) (*infrav1.Image, error) { + _, log, done := tele.StartSpanWithLogger(ctx, "scope.MachineScope.GetVMImage") + defer done() + // Use custom Marketplace image, Image ID or a Shared Image Gallery image if provided if m.AzureMachine.Spec.Image != nil { return m.AzureMachine.Spec.Image, nil @@ -567,11 +568,11 @@ func (m *MachineScope) GetVMImage() (*infrav1.Image, error) { if m.AzureMachine.Spec.OSDisk.OSType == azure.WindowsOS { runtime := m.AzureMachine.Annotations["runtime"] - m.Info("No image specified for machine, using default Windows Image", "machine", m.AzureMachine.GetName(), "runtime", runtime) + log.Info("No image specified for machine, using default Windows Image", "machine", m.AzureMachine.GetName(), "runtime", runtime) return azure.GetDefaultWindowsImage(to.String(m.Machine.Spec.Version), runtime) } - m.Info("No image specified for machine, using default Linux Image", "machine", m.AzureMachine.GetName()) + log.Info("No image specified for machine, using default Linux Image", "machine", m.AzureMachine.GetName()) return azure.GetDefaultUbuntuImage(to.String(m.Machine.Spec.Version)) } diff --git a/azure/scope/machine_test.go b/azure/scope/machine_test.go index 5db946275c8..2a137a1410e 100644 --- a/azure/scope/machine_test.go +++ b/azure/scope/machine_test.go @@ -17,22 +17,21 @@ limitations under the License. package scope import ( + "context" "reflect" "testing" - "k8s.io/klog/v2/klogr" - autorestazure "github.com/Azure/go-autorest/autorest/azure" "github.com/Azure/go-autorest/autorest/azure/auth" "github.com/Azure/go-autorest/autorest/to" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/pointer" - "sigs.k8s.io/cluster-api-provider-azure/azure" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/services/disks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) func TestMachineScope_Name(t *testing.T) { @@ -1115,7 +1114,6 @@ func TestMachineScope_GetVMImage(t *testing.T) { { name: "if no image is specified and os specified is windows with version below 1.22, returns windows dockershim image", machineScope: MachineScope{ - Logger: klogr.New(), Machine: &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine-name", @@ -1144,7 +1142,6 @@ func TestMachineScope_GetVMImage(t *testing.T) { { name: "if no image is specified and os specified is windows with version is 1.22+ with no annotation, returns windows containerd image", machineScope: MachineScope{ - Logger: klogr.New(), Machine: &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine-name", @@ -1173,7 +1170,6 @@ func TestMachineScope_GetVMImage(t *testing.T) { { name: "if no image is specified and os specified is windows with version is 1.22+ with annotation dockershim, returns windows dockershim image", machineScope: MachineScope{ - Logger: klogr.New(), Machine: &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine-name", @@ -1205,7 +1201,6 @@ func TestMachineScope_GetVMImage(t *testing.T) { { name: "if no image is specified and os specified is windows with version is less and 1.22 with annotation dockershim, returns windows dockershim image", machineScope: MachineScope{ - Logger: klogr.New(), Machine: &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine-name", @@ -1237,7 +1232,6 @@ func TestMachineScope_GetVMImage(t *testing.T) { { name: "if no image is specified and os specified is windows with version is less and 1.22 with annotation containerd, returns error", machineScope: MachineScope{ - Logger: klogr.New(), Machine: &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine-name", @@ -1266,7 +1260,6 @@ func TestMachineScope_GetVMImage(t *testing.T) { { name: "if no image and OS is specified, returns linux image", machineScope: MachineScope{ - Logger: klogr.New(), Machine: &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine-name", @@ -1290,7 +1283,7 @@ func TestMachineScope_GetVMImage(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - gotImage, err := tt.machineScope.GetVMImage() + gotImage, err := tt.machineScope.GetVMImage(context.TODO()) gotError := false if err != nil { gotError = true diff --git a/azure/scope/machinepool.go b/azure/scope/machinepool.go index dd0b7ca4d40..4174d70c074 100644 --- a/azure/scope/machinepool.go +++ b/azure/scope/machinepool.go @@ -25,18 +25,11 @@ import ( "sigs.k8s.io/cluster-api-provider-azure/util/futures" "github.com/Azure/go-autorest/autorest/to" - "github.com/go-logr/logr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/klog/v2/klogr" "k8s.io/utils/pointer" - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" - "sigs.k8s.io/cluster-api-provider-azure/azure" - machinepool "sigs.k8s.io/cluster-api-provider-azure/azure/scope/strategies/machinepool_deployments" - infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" - "sigs.k8s.io/cluster-api-provider-azure/util/tele" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/noderefutil" capierrors "sigs.k8s.io/cluster-api/errors" @@ -45,6 +38,12 @@ import ( "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-azure/azure" + machinepool "sigs.k8s.io/cluster-api-provider-azure/azure/scope/strategies/machinepool_deployments" + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-azure/util/tele" ) // ScalesetsServiceName is the name of the scalesets service. @@ -56,7 +55,6 @@ type ( // MachinePoolScopeParams defines the input parameters used to create a new MachinePoolScope. MachinePoolScopeParams struct { Client client.Client - Logger logr.Logger MachinePool *capiv1exp.MachinePool AzureMachinePool *infrav1exp.AzureMachinePool ClusterScope azure.ClusterScoper @@ -65,7 +63,6 @@ type ( // MachinePoolScope defines a scope defined around a machine pool and its cluster. MachinePoolScope struct { azure.ClusterScoper - logr.Logger AzureMachinePool *infrav1exp.AzureMachinePool MachinePool *capiv1exp.MachinePool client client.Client @@ -95,10 +92,6 @@ func NewMachinePoolScope(params MachinePoolScopeParams) (*MachinePoolScope, erro return nil, errors.New("azure machine pool is required when creating a MachinePoolScope") } - if params.Logger == nil { - params.Logger = klogr.New() - } - helper, err := patch.NewHelper(params.AzureMachinePool, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init patch helper") @@ -108,7 +101,6 @@ func NewMachinePoolScope(params MachinePoolScopeParams) (*MachinePoolScope, erro client: params.Client, MachinePool: params.MachinePool, AzureMachinePool: params.AzureMachinePool, - Logger: params.Logger, patchHelper: helper, ClusterScoper: params.ClusterScope, }, nil @@ -235,10 +227,7 @@ func (m *MachinePoolScope) updateReplicasAndProviderIDs(ctx context.Context) err } func (m *MachinePoolScope) getMachinePoolMachines(ctx context.Context) ([]infrav1exp.AzureMachinePoolMachine, error) { - ctx, _, done := tele.StartSpanWithLogger( - ctx, - "scope.MachinePoolScope.getMachinePoolMachines", - ) + ctx, _, done := tele.StartSpanWithLogger(ctx, "scope.MachinePoolScope.getMachinePoolMachines") defer done() labels := map[string]string{ @@ -254,14 +243,11 @@ func (m *MachinePoolScope) getMachinePoolMachines(ctx context.Context) ([]infrav } func (m *MachinePoolScope) applyAzureMachinePoolMachines(ctx context.Context) error { - ctx, _, done := tele.StartSpanWithLogger( - ctx, - "scope.MachinePoolScope.applyAzureMachinePoolMachines", - ) + ctx, log, done := tele.StartSpanWithLogger(ctx, "scope.MachinePoolScope.applyAzureMachinePoolMachines") defer done() if m.vmssState == nil { - m.Info("vmssState is nil") + log.Info("vmssState is nil") return nil } @@ -283,7 +269,7 @@ func (m *MachinePoolScope) applyAzureMachinePoolMachines(ctx context.Context) er azureMachinesByProviderID := m.vmssState.InstancesByProviderID() for key, val := range azureMachinesByProviderID { if _, ok := existingMachinesByProviderID[key]; !ok { - m.V(4).Info("creating AzureMachinePoolMachine", "providerID", key) + log.V(4).Info("creating AzureMachinePoolMachine", "providerID", key) if err := m.createMachine(ctx, val); err != nil { return errors.Wrap(err, "failed creating AzureMachinePoolMachine") } @@ -297,7 +283,7 @@ func (m *MachinePoolScope) applyAzureMachinePoolMachines(ctx context.Context) er machine := machine if _, ok := azureMachinesByProviderID[key]; !ok { deleted = true - m.V(4).Info("deleting AzureMachinePoolMachine because it no longer exists in the VMSS", "providerID", key) + log.V(4).Info("deleting AzureMachinePoolMachine because it no longer exists in the VMSS", "providerID", key) delete(existingMachinesByProviderID, key) if err := m.client.Delete(ctx, &machine); err != nil { return errors.Wrap(err, "failed deleting AzureMachinePoolMachine to reduce replica count") @@ -306,20 +292,20 @@ func (m *MachinePoolScope) applyAzureMachinePoolMachines(ctx context.Context) er } if deleted { - m.V(4).Info("exiting early due to finding AzureMachinePoolMachine(s) that were deleted because they no longer exist in the VMSS") + log.V(4).Info("exiting early due to finding AzureMachinePoolMachine(s) that were deleted because they no longer exist in the VMSS") // exit early to be less greedy about delete return nil } if futures.Has(m.AzureMachinePool, m.Name(), ScalesetsServiceName) { - m.V(4).Info("exiting early due an in-progress long running operation on the ScaleSet") + log.V(4).Info("exiting early due an in-progress long running operation on the ScaleSet") // exit early to be less greedy about delete return nil } deleteSelector := m.getDeploymentStrategy() if deleteSelector == nil { - m.V(4).Info("can not select AzureMachinePoolMachines to delete because no deployment strategy is specified") + log.V(4).Info("can not select AzureMachinePoolMachines to delete because no deployment strategy is specified") return nil } @@ -331,13 +317,13 @@ func (m *MachinePoolScope) applyAzureMachinePoolMachines(ctx context.Context) er for _, machine := range toDelete { machine := machine - m.Info("deleting selected AzureMachinePoolMachine", "providerID", machine.Spec.ProviderID) + log.Info("deleting selected AzureMachinePoolMachine", "providerID", machine.Spec.ProviderID) if err := m.client.Delete(ctx, &machine); err != nil { return errors.Wrap(err, "failed deleting AzureMachinePoolMachine to reduce replica count") } } - m.V(4).Info("done reconciling AzureMachinePoolMachine(s)") + log.V(4).Info("done reconciling AzureMachinePoolMachine(s)") return nil } @@ -456,18 +442,21 @@ func (m *MachinePoolScope) SetFailureReason(v capierrors.MachineStatusError) { } // SetBootstrapConditions sets the AzureMachinePool BootstrapSucceeded condition based on the extension provisioning states. -func (m *MachinePoolScope) SetBootstrapConditions(provisioningState string, extensionName string) error { +func (m *MachinePoolScope) SetBootstrapConditions(ctx context.Context, provisioningState string, extensionName string) error { + _, log, done := tele.StartSpanWithLogger(ctx, "scope.MachinePoolScope.SetBootstrapConditions") + defer done() + switch infrav1.ProvisioningState(provisioningState) { case infrav1.Succeeded: - m.V(4).Info("extension provisioning state is succeeded", "vm extension", extensionName, "scale set", m.Name()) + log.V(4).Info("extension provisioning state is succeeded", "vm extension", extensionName, "scale set", m.Name()) conditions.MarkTrue(m.AzureMachinePool, infrav1.BootstrapSucceededCondition) return nil case infrav1.Creating: - m.V(4).Info("extension provisioning state is creating", "vm extension", extensionName, "scale set", m.Name()) + log.V(4).Info("extension provisioning state is creating", "vm extension", extensionName, "scale set", m.Name()) conditions.MarkFalse(m.AzureMachinePool, infrav1.BootstrapSucceededCondition, infrav1.BootstrapInProgressReason, clusterv1.ConditionSeverityInfo, "") return azure.WithTransientError(errors.New("extension is still in provisioning state. This likely means that bootstrapping has not yet completed on the VM"), 30*time.Second) case infrav1.Failed: - m.V(4).Info("extension provisioning state is failed", "vm extension", extensionName, "scale set", m.Name()) + log.V(4).Info("extension provisioning state is failed", "vm extension", extensionName, "scale set", m.Name()) conditions.MarkFalse(m.AzureMachinePool, infrav1.BootstrapSucceededCondition, infrav1.BootstrapFailedReason, clusterv1.ConditionSeverityError, "") return azure.WithTerminalError(errors.New("extension state failed. This likely means the Kubernetes node bootstrapping process failed or timed out. Check VM boot diagnostics logs to learn more")) default: @@ -499,10 +488,7 @@ func (m *MachinePoolScope) SetAnnotation(key, value string) { // PatchObject persists the machine spec and status. func (m *MachinePoolScope) PatchObject(ctx context.Context) error { - ctx, _, done := tele.StartSpanWithLogger( - ctx, - "scope.MachinePoolScope.PatchObject", - ) + ctx, _, done := tele.StartSpanWithLogger(ctx, "scope.MachinePoolScope.PatchObject") defer done() return m.patchHelper.Patch(ctx, m.AzureMachinePool) @@ -510,12 +496,12 @@ func (m *MachinePoolScope) PatchObject(ctx context.Context) error { // Close the MachineScope by updating the machine spec, machine status. func (m *MachinePoolScope) Close(ctx context.Context) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "scope.MachinePoolScope.Close") + ctx, log, done := tele.StartSpanWithLogger(ctx, "scope.MachinePoolScope.Close") defer done() if m.vmssState != nil { if err := m.applyAzureMachinePoolMachines(ctx); err != nil { - m.Error(err, "failed to apply changes to the AzureMachinePoolMachines") + log.Error(err, "failed to apply changes to the AzureMachinePoolMachines") return errors.Wrap(err, "failed to apply changes to AzureMachinePoolMachines") } @@ -530,10 +516,7 @@ func (m *MachinePoolScope) Close(ctx context.Context) error { // GetBootstrapData returns the bootstrap data from the secret in the Machine's bootstrap.dataSecretName. func (m *MachinePoolScope) GetBootstrapData(ctx context.Context) (string, error) { - ctx, _, done := tele.StartSpanWithLogger( - ctx, - "scope.MachinePoolScope.GetBootstrapData", - ) + ctx, _, done := tele.StartSpanWithLogger(ctx, "scope.MachinePoolScope.GetBootstrapData") defer done() dataSecretName := m.MachinePool.Spec.Template.Spec.Bootstrap.DataSecretName @@ -554,7 +537,10 @@ func (m *MachinePoolScope) GetBootstrapData(ctx context.Context) (string, error) } // GetVMImage picks an image from the machine configuration, or uses a default one. -func (m *MachinePoolScope) GetVMImage() (*infrav1.Image, error) { +func (m *MachinePoolScope) GetVMImage(ctx context.Context) (*infrav1.Image, error) { + _, log, done := tele.StartSpanWithLogger(ctx, "scope.MachinePoolScope.GetVMImage") + defer done() + // Use custom Marketplace image, Image ID or a Shared Image Gallery image if provided if m.AzureMachinePool.Spec.Template.Image != nil { return m.AzureMachinePool.Spec.Template.Image, nil @@ -566,7 +552,7 @@ func (m *MachinePoolScope) GetVMImage() (*infrav1.Image, error) { ) if m.AzureMachinePool.Spec.Template.OSDisk.OSType == azure.WindowsOS { runtime := m.AzureMachinePool.Annotations["runtime"] - m.V(4).Info("No image specified for machine, using default Windows Image", "machine", m.MachinePool.GetName(), "runtime", runtime) + log.V(4).Info("No image specified for machine, using default Windows Image", "machine", m.MachinePool.GetName(), "runtime", runtime) defaultImage, err = azure.GetDefaultWindowsImage(to.String(m.MachinePool.Spec.Template.Spec.Version), runtime) } else { defaultImage, err = azure.GetDefaultUbuntuImage(to.String(m.MachinePool.Spec.Template.Spec.Version)) diff --git a/azure/scope/machinepool_test.go b/azure/scope/machinepool_test.go index 5f72777af06..344008e13b5 100644 --- a/azure/scope/machinepool_test.go +++ b/azure/scope/machinepool_test.go @@ -31,7 +31,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/klog/v2/klogr" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" @@ -154,9 +153,8 @@ func TestMachinePoolScope_SetBootstrapConditions(t *testing.T) { state, name := c.Setup() s := &MachinePoolScope{ AzureMachinePool: &infrav1exp.AzureMachinePool{}, - Logger: klogr.New(), } - err := s.SetBootstrapConditions(state, name) + err := s.SetBootstrapConditions(context.TODO(), state, name) c.Verify(g, s.AzureMachinePool, err) }) } @@ -255,7 +253,6 @@ func TestMachinePoolScope_MaxSurge(t *testing.T) { s := &MachinePoolScope{ MachinePool: mp, AzureMachinePool: amp, - Logger: klogr.New(), } surge, err := s.MaxSurge() c.Verify(g, surge, err) @@ -282,7 +279,6 @@ func TestMachinePoolScope_SaveVMImageToStatus(t *testing.T) { } s = &MachinePoolScope{ AzureMachinePool: amp, - Logger: klogr.New(), } image = &infrav1.Image{ Marketplace: &infrav1.AzureMarketplaceImage{ @@ -391,9 +387,8 @@ func TestMachinePoolScope_GetVMImage(t *testing.T) { s := &MachinePoolScope{ MachinePool: mp, AzureMachinePool: amp, - Logger: klogr.New(), } - image, err := s.GetVMImage() + image, err := s.GetVMImage(context.TODO()) c.Verify(g, amp, image, err) }) } @@ -513,7 +508,6 @@ func TestMachinePoolScope_NeedsRequeue(t *testing.T) { vmssState: vmssState, MachinePool: mp, AzureMachinePool: amp, - Logger: klogr.New(), } c.Verify(g, s.NeedsRequeue()) }) @@ -619,7 +613,6 @@ func TestMachinePoolScope_updateReplicasAndProviderIDs(t *testing.T) { Cluster: cluster, }, AzureMachinePool: amp, - Logger: klogr.New(), } err := s.updateReplicasAndProviderIDs(context.TODO()) c.Verify(g, s.AzureMachinePool, err) diff --git a/azure/scope/machinepoolmachine.go b/azure/scope/machinepoolmachine.go index 3d23d094acd..9cda7062fa9 100644 --- a/azure/scope/machinepoolmachine.go +++ b/azure/scope/machinepoolmachine.go @@ -22,13 +22,11 @@ import ( "reflect" "time" - "github.com/go-logr/logr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" - "k8s.io/klog/v2/klogr" kubedrain "k8s.io/kubectl/pkg/drain" "k8s.io/utils/pointer" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -69,7 +67,6 @@ type ( AzureMachinePoolMachine *infrav1exp.AzureMachinePoolMachine Client client.Client ClusterScope azure.ClusterScoper - Logger logr.Logger MachinePool *capiv1exp.MachinePool // workloadNodeGetter is only used for testing purposes and provides a way for mocking requests to the workload cluster @@ -79,7 +76,6 @@ type ( // MachinePoolMachineScope defines a scope defined around a machine pool machine. MachinePoolMachineScope struct { azure.ClusterScoper - logr.Logger AzureMachinePoolMachine *infrav1exp.AzureMachinePoolMachine AzureMachinePool *infrav1exp.AzureMachinePool MachinePool *capiv1exp.MachinePool @@ -126,13 +122,8 @@ func NewMachinePoolMachineScope(params MachinePoolMachineScopeParams) (*MachineP ) } - if params.Logger == nil { - params.Logger = klogr.New() - } - mpScope, err := NewMachinePoolScope(MachinePoolScopeParams{ Client: params.Client, - Logger: params.Logger, MachinePool: params.MachinePool, AzureMachinePool: params.AzureMachinePool, ClusterScope: params.ClusterScope, @@ -150,7 +141,6 @@ func NewMachinePoolMachineScope(params MachinePoolMachineScopeParams) (*MachineP AzureMachinePool: params.AzureMachinePool, AzureMachinePoolMachine: params.AzureMachinePoolMachine, ClusterScoper: params.ClusterScope, - Logger: params.Logger, MachinePool: params.MachinePool, MachinePoolScope: mpScope, client: params.Client, @@ -315,7 +305,7 @@ func (s *MachinePoolMachineScope) UpdateStatus(ctx context.Context) error { } if s.instance != nil { - hasLatestModel, err := s.hasLatestModelApplied() + hasLatestModel, err := s.hasLatestModelApplied(ctx) if err != nil { return errors.Wrap(err, "failed to determine if the VMSS instance has the latest model") } @@ -329,7 +319,7 @@ func (s *MachinePoolMachineScope) UpdateStatus(ctx context.Context) error { // CordonAndDrain will cordon and drain the Kubernetes node associated with this AzureMachinePoolMachine. func (s *MachinePoolMachineScope) CordonAndDrain(ctx context.Context) error { - ctx, _, done := tele.StartSpanWithLogger( + ctx, log, done := tele.StartSpanWithLogger( ctx, "scope.MachinePoolMachineScope.CordonAndDrain", ) @@ -365,7 +355,7 @@ func (s *MachinePoolMachineScope) CordonAndDrain(ctx context.Context) error { return errors.Wrap(err, "failed to build a patchHelper when draining node") } - s.V(4).Info("Draining node", "node", node.Name) + log.V(4).Info("Draining node", "node", node.Name) // The DrainingSucceededCondition never exists before the node is drained for the first time, // so its transition time can be used to record the first time draining. // This `if` condition prevents the transition time to be changed more than once. @@ -395,7 +385,7 @@ func (s *MachinePoolMachineScope) CordonAndDrain(ctx context.Context) error { } func (s *MachinePoolMachineScope) drainNode(ctx context.Context, node *corev1.Node) error { - ctx, _, done := tele.StartSpanWithLogger( + ctx, log, done := tele.StartSpanWithLogger( ctx, "scope.MachinePoolMachineScope.drainNode", ) @@ -407,13 +397,13 @@ func (s *MachinePoolMachineScope) drainNode(ctx context.Context, node *corev1.No }) if err != nil { - s.Error(err, "Error creating a remote client while deleting Machine, won't retry") + log.Error(err, "Error creating a remote client while deleting Machine, won't retry") return nil } kubeClient, err := kubernetes.NewForConfig(restConfig) if err != nil { - s.Error(err, "Error creating a remote client while deleting Machine, won't retry") + log.Error(err, "Error creating a remote client while deleting Machine, won't retry") return nil } @@ -432,7 +422,7 @@ func (s *MachinePoolMachineScope) drainNode(ctx context.Context, node *corev1.No if usingEviction { verbStr = "Evicted" } - s.V(4).Info(fmt.Sprintf("%s pod from Node", verbStr), + log.V(4).Info(fmt.Sprintf("%s pod from Node", verbStr), "pod", fmt.Sprintf("%s/%s", pod.Name, pod.Namespace)) }, Out: writer{klog.Info}, @@ -454,7 +444,7 @@ func (s *MachinePoolMachineScope) drainNode(ctx context.Context, node *corev1.No return azure.WithTransientError(errors.Wrap(err, "Drain failed, retry in 20s"), 20*time.Second) } - s.V(4).Info("Drain successful") + log.V(4).Info("Drain successful") return nil } @@ -491,12 +481,18 @@ func (s *MachinePoolMachineScope) nodeDrainTimeoutExceeded() bool { return diff.Seconds() >= s.AzureMachinePool.Spec.NodeDrainTimeout.Seconds() } -func (s *MachinePoolMachineScope) hasLatestModelApplied() (bool, error) { +func (s *MachinePoolMachineScope) hasLatestModelApplied(ctx context.Context) (bool, error) { + ctx, _, done := tele.StartSpanWithLogger( + ctx, + "scope.MachinePoolMachineScope.hasLatestModelApplied", + ) + defer done() + if s.instance == nil { return false, errors.New("instance must not be nil") } - image, err := s.MachinePoolScope.GetVMImage() + image, err := s.MachinePoolScope.GetVMImage(ctx) if err != nil { return false, errors.Wrap(err, "unable to build vm image information from MachinePoolScope") } diff --git a/azure/scope/machinepoolmachine_test.go b/azure/scope/machinepoolmachine_test.go index 8bb1f9aa1a6..69f83991157 100644 --- a/azure/scope/machinepoolmachine_test.go +++ b/azure/scope/machinepoolmachine_test.go @@ -27,7 +27,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/klog/v2/klogr" "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" mock_scope "sigs.k8s.io/cluster-api-provider-azure/azure/scope/mocks" @@ -398,7 +397,6 @@ func TestMachinePoolMachineScope_CordonAndDrain(t *testing.T) { s, err := NewMachinePoolMachineScope(params) g.Expect(err).NotTo(HaveOccurred()) g.Expect(s).ToNot(BeNil()) - s.Logger = klogr.New() s.workloadNodeGetter = mockClient err = s.CordonAndDrain(context.TODO()) diff --git a/azure/scope/managedcontrolplane.go b/azure/scope/managedcontrolplane.go index e01880f3821..800a85313e7 100644 --- a/azure/scope/managedcontrolplane.go +++ b/azure/scope/managedcontrolplane.go @@ -26,11 +26,9 @@ import ( "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/to" - "github.com/go-logr/logr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/klog/v2/klogr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" capiexputil "sigs.k8s.io/cluster-api/exp/util" @@ -43,6 +41,7 @@ import ( "sigs.k8s.io/cluster-api-provider-azure/azure/services/groups" infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/util/futures" + "sigs.k8s.io/cluster-api-provider-azure/util/tele" ) // ManagedControlPlaneScopeParams defines the input parameters used to create a new managed @@ -50,7 +49,6 @@ import ( type ManagedControlPlaneScopeParams struct { AzureClients Client client.Client - Logger logr.Logger Cluster *clusterv1.Cluster ControlPlane *infrav1exp.AzureManagedControlPlane InfraMachinePool *infrav1exp.AzureManagedMachinePool @@ -61,6 +59,9 @@ type ManagedControlPlaneScopeParams struct { // NewManagedControlPlaneScope creates a new Scope from the supplied parameters. // This is meant to be called for each reconcile iteration. func NewManagedControlPlaneScope(ctx context.Context, params ManagedControlPlaneScopeParams) (*ManagedControlPlaneScope, error) { + ctx, _, done := tele.StartSpanWithLogger(ctx, "scope.NewManagedControlPlaneScope") + defer done() + if params.Cluster == nil { return nil, errors.New("failed to generate new scope from nil Cluster") } @@ -69,10 +70,6 @@ func NewManagedControlPlaneScope(ctx context.Context, params ManagedControlPlane return nil, errors.New("failed to generate new scope from nil ControlPlane") } - if params.Logger == nil { - params.Logger = klogr.New() - } - if params.ControlPlane.Spec.IdentityRef == nil { if err := params.AzureClients.setCredentials(params.ControlPlane.Spec.SubscriptionID, ""); err != nil { return nil, errors.Wrap(err, "failed to create Azure session") @@ -94,7 +91,6 @@ func NewManagedControlPlaneScope(ctx context.Context, params ManagedControlPlane } return &ManagedControlPlaneScope{ - Logger: params.Logger, Client: params.Client, AzureClients: params.AzureClients, Cluster: params.Cluster, @@ -108,7 +104,6 @@ func NewManagedControlPlaneScope(ctx context.Context, params ManagedControlPlane // ManagedControlPlaneScope defines the basic context for an actuator to operate upon. type ManagedControlPlaneScope struct { - logr.Logger Client client.Client patchHelper *patch.Helper kubeConfigData []byte @@ -183,11 +178,17 @@ func (s *ManagedControlPlaneScope) Authorizer() autorest.Authorizer { // PatchObject persists the cluster configuration and status. func (s *ManagedControlPlaneScope) PatchObject(ctx context.Context) error { + ctx, _, done := tele.StartSpanWithLogger(ctx, "scope.ManagedControlPlaneScope.PatchObject") + defer done() + return s.patchHelper.Patch(ctx, s.PatchTarget) } // Close closes the current scope persisting the cluster configuration and status. func (s *ManagedControlPlaneScope) Close(ctx context.Context) error { + ctx, _, done := tele.StartSpanWithLogger(ctx, "scope.ManagedControlPlaneScope.Close") + defer done() + return s.PatchObject(ctx) } @@ -260,7 +261,7 @@ func (s *ManagedControlPlaneScope) NodeSubnet() infrav1.SubnetSpec { // SetSubnet sets the passed subnet spec into the scope. // This is not used when using a managed control plane. -func (s *ManagedControlPlaneScope) SetSubnet(subnetSpec infrav1.SubnetSpec) { +func (s *ManagedControlPlaneScope) SetSubnet(_ infrav1.SubnetSpec) { // no-op } @@ -307,7 +308,7 @@ func (s *ManagedControlPlaneScope) APIServerLBName() string { } // APIServerLBPoolName returns the API Server LB backend pool name. -func (s *ManagedControlPlaneScope) APIServerLBPoolName(loadBalancerName string) string { +func (s *ManagedControlPlaneScope) APIServerLBPoolName(_ string) string { return "" // does not apply for AKS } @@ -378,25 +379,25 @@ func (s *ManagedControlPlaneScope) ManagedClusterSpec() (azure.ManagedClusterSpe managedClusterSpec.LoadBalancerSKU = *s.ControlPlane.Spec.LoadBalancerSKU } - if net := s.Cluster.Spec.ClusterNetwork; net != nil { - if net.Services != nil { + if clusterNetwork := s.Cluster.Spec.ClusterNetwork; clusterNetwork != nil { + if clusterNetwork.Services != nil { // A user may provide zero or one CIDR blocks. If they provide an empty array, // we ignore it and use the default. AKS doesn't support > 1 Service/Pod CIDR. - if len(net.Services.CIDRBlocks) > 1 { + if len(clusterNetwork.Services.CIDRBlocks) > 1 { return azure.ManagedClusterSpec{}, errors.New("managed control planes only allow one service cidr") } - if len(net.Services.CIDRBlocks) == 1 { - managedClusterSpec.ServiceCIDR = net.Services.CIDRBlocks[0] + if len(clusterNetwork.Services.CIDRBlocks) == 1 { + managedClusterSpec.ServiceCIDR = clusterNetwork.Services.CIDRBlocks[0] } } - if net.Pods != nil { + if clusterNetwork.Pods != nil { // A user may provide zero or one CIDR blocks. If they provide an empty array, // we ignore it and use the default. AKS doesn't support > 1 Service/Pod CIDR. - if len(net.Pods.CIDRBlocks) > 1 { + if len(clusterNetwork.Pods.CIDRBlocks) > 1 { return azure.ManagedClusterSpec{}, errors.New("managed control planes only allow one service cidr") } - if len(net.Pods.CIDRBlocks) == 1 { - managedClusterSpec.PodCIDR = net.Pods.CIDRBlocks[0] + if len(clusterNetwork.Pods.CIDRBlocks) == 1 { + managedClusterSpec.PodCIDR = clusterNetwork.Pods.CIDRBlocks[0] } } } @@ -453,6 +454,9 @@ func (s *ManagedControlPlaneScope) ManagedClusterSpec() (azure.ManagedClusterSpe // GetAgentPoolSpecs gets a slice of azure.AgentPoolSpec for the list of agent pools. func (s *ManagedControlPlaneScope) GetAgentPoolSpecs(ctx context.Context) ([]azure.AgentPoolSpec, error) { + ctx, log, done := tele.StartSpanWithLogger(ctx, "scope.ManagedControlPlaneScope.GetAgentPoolSpecs") + defer done() + if len(s.AllNodePools) == 0 { opt1 := client.InNamespace(s.ControlPlane.Namespace) opt2 := client.MatchingLabels(map[string]string{ @@ -468,19 +472,20 @@ func (s *ManagedControlPlaneScope) GetAgentPoolSpecs(ctx context.Context) ([]azu s.AllNodePools = ammpList.Items } - ammps := []azure.AgentPoolSpec{} - - foundSystemPool := false + var ( + ammps = make([]azure.AgentPoolSpec, 0, len(s.AllNodePools)) + foundSystemPool = false + ) for _, pool := range s.AllNodePools { // Fetch the owning MachinePool. ownerPool, err := capiexputil.GetOwnerMachinePool(ctx, s.Client, pool.ObjectMeta) if err != nil { - s.Logger.Error(err, "failed to fetch owner ref for system pool: %s", pool.Name) + log.Error(err, "failed to fetch owner ref for system pool: %s", pool.Name) continue } if ownerPool == nil { - s.Logger.Info("failed to fetch owner ref for system pool") + log.Info("failed to fetch owner ref for system pool") continue } diff --git a/azure/services/agentpools/agentpools.go b/azure/services/agentpools/agentpools.go index 02395b35cd6..bc5c9fc6136 100644 --- a/azure/services/agentpools/agentpools.go +++ b/azure/services/agentpools/agentpools.go @@ -22,9 +22,9 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2021-05-01/containerservice" - "github.com/go-logr/logr" "github.com/google/go-cmp/cmp" "github.com/pkg/errors" + infrav1alpha4 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/util/tele" @@ -32,7 +32,6 @@ import ( // ManagedMachinePoolScope defines the scope interface for a managed machine pool. type ManagedMachinePoolScope interface { - logr.Logger azure.ClusterDescriber NodeResourceGroup() string diff --git a/azure/services/agentpools/agentpools_test.go b/azure/services/agentpools/agentpools_test.go index 68b4756e38d..62346a598a3 100644 --- a/azure/services/agentpools/agentpools_test.go +++ b/azure/services/agentpools/agentpools_test.go @@ -27,14 +27,13 @@ import ( "github.com/golang/mock/gomock" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - capi "sigs.k8s.io/cluster-api/api/v1beta1" - capiexp "sigs.k8s.io/cluster-api/exp/api/v1beta1" - "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/scope" "sigs.k8s.io/cluster-api-provider-azure/azure/services/agentpools/mock_agentpools" infraexpv1 "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" gomockinternal "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomock" + capi "sigs.k8s.io/cluster-api/api/v1beta1" + capiexp "sigs.k8s.io/cluster-api/exp/api/v1beta1" ) func TestReconcile(t *testing.T) { diff --git a/azure/services/async/async.go b/azure/services/async/async.go index 5977d7c375e..d7a374a336c 100644 --- a/azure/services/async/async.go +++ b/azure/services/async/async.go @@ -22,6 +22,7 @@ import ( azureautorest "github.com/Azure/go-autorest/autorest/azure" "github.com/pkg/errors" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/converters" @@ -32,12 +33,12 @@ import ( // processOngoingOperation is a helper function that will process an ongoing operation to check if it is done. // If it is not done, it will return a transient error. func processOngoingOperation(ctx context.Context, scope FutureScope, client FutureHandler, resourceName string, serviceName string) (interface{}, error) { - ctx, _, done := tele.StartSpanWithLogger(ctx, "async.Service.processOngoingOperation") + ctx, log, done := tele.StartSpanWithLogger(ctx, "async.Service.processOngoingOperation") defer done() future := scope.GetLongRunningOperationState(resourceName, serviceName) if future == nil { - scope.V(2).Info("no long running operation found", "service", serviceName, "resource", resourceName) + log.V(2).Info("no long running operation found", "service", serviceName, "resource", resourceName) return nil, nil } sdkFuture, err := converters.FutureToSDK(*future) @@ -48,6 +49,7 @@ func processOngoingOperation(ctx context.Context, scope FutureScope, client Futu scope.DeleteLongRunningOperationState(resourceName, serviceName) return nil, errors.Wrap(err, "could not decode future data, resetting long-running operation state") } + isDone, err := client.IsDone(ctx, sdkFuture) if err != nil { return nil, errors.Wrap(err, "failed checking if the operation was complete") @@ -55,12 +57,12 @@ func processOngoingOperation(ctx context.Context, scope FutureScope, client Futu if !isDone { // Operation is still in progress, update conditions and requeue. - scope.V(2).Info("long running operation is still ongoing", "service", serviceName, "resource", resourceName) + log.V(2).Info("long running operation is still ongoing", "service", serviceName, "resource", resourceName) return nil, azure.WithTransientError(azure.NewOperationNotDoneError(future), retryAfter(sdkFuture)) } // Resource has been created/deleted/updated. - scope.V(2).Info("long running operation has completed", "service", serviceName, "resource", resourceName) + log.V(2).Info("long running operation has completed", "service", serviceName, "resource", resourceName) result, err := client.Result(ctx, sdkFuture, future.Type) if err == nil { scope.DeleteLongRunningOperationState(resourceName, serviceName) @@ -70,7 +72,7 @@ func processOngoingOperation(ctx context.Context, scope FutureScope, client Futu // CreateResource implements the logic for creating a resource Asynchronously. func CreateResource(ctx context.Context, scope FutureScope, client Creator, spec azure.ResourceSpecGetter, serviceName string) (interface{}, error) { - ctx, _, done := tele.StartSpanWithLogger(ctx, "async.Service.CreateResource") + ctx, log, done := tele.StartSpanWithLogger(ctx, "async.Service.CreateResource") defer done() resourceName := spec.ResourceName() @@ -83,7 +85,7 @@ func CreateResource(ctx context.Context, scope FutureScope, client Creator, spec } // No long running operation is active, so create the resource. - scope.V(2).Info("creating resource", "service", serviceName, "resource", resourceName, "resourceGroup", rgName) + log.V(2).Info("creating resource", "service", serviceName, "resource", resourceName, "resourceGroup", rgName) result, sdkFuture, err := client.CreateOrUpdateAsync(ctx, spec) if sdkFuture != nil { future, err := converters.SDKToFuture(sdkFuture, infrav1.PutFuture, serviceName, resourceName, rgName) @@ -96,13 +98,13 @@ func CreateResource(ctx context.Context, scope FutureScope, client Creator, spec return nil, errors.Wrapf(err, "failed to create resource %s/%s (service: %s)", rgName, resourceName, serviceName) } - scope.V(2).Info("successfully created resource", "service", serviceName, "resource", resourceName, "resourceGroup", rgName) + log.V(2).Info("successfully created resource", "service", serviceName, "resource", resourceName, "resourceGroup", rgName) return result, nil } // DeleteResource implements the logic for deleting a resource Asynchronously. func DeleteResource(ctx context.Context, scope FutureScope, client Deleter, spec azure.ResourceSpecGetter, serviceName string) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "async.Service.DeleteResource") + ctx, log, done := tele.StartSpanWithLogger(ctx, "async.Service.DeleteResource") defer done() resourceName := spec.ResourceName() @@ -116,7 +118,7 @@ func DeleteResource(ctx context.Context, scope FutureScope, client Deleter, spec } // No long running operation is active, so delete the resource. - scope.V(2).Info("deleting resource", "service", serviceName, "resource", resourceName, "resourceGroup", rgName) + log.V(2).Info("deleting resource", "service", serviceName, "resource", resourceName, "resourceGroup", rgName) sdkFuture, err := client.DeleteAsync(ctx, spec) if sdkFuture != nil { future, err := converters.SDKToFuture(sdkFuture, infrav1.DeleteFuture, serviceName, resourceName, rgName) @@ -133,7 +135,7 @@ func DeleteResource(ctx context.Context, scope FutureScope, client Deleter, spec return errors.Wrapf(err, "failed to delete resource %s/%s (service: %s)", rgName, resourceName, serviceName) } - scope.V(2).Info("successfully deleted resource", "service", serviceName, "resource", resourceName, "resourceGroup", rgName) + log.V(2).Info("successfully deleted resource", "service", serviceName, "resource", resourceName, "resourceGroup", rgName) return nil } diff --git a/azure/services/async/async_test.go b/azure/services/async/async_test.go index 96e264ffca0..4a7166b0f49 100644 --- a/azure/services/async/async_test.go +++ b/azure/services/async/async_test.go @@ -28,8 +28,6 @@ import ( "github.com/Azure/go-autorest/autorest/to" "github.com/golang/mock/gomock" . "github.com/onsi/gomega" - "k8s.io/klog/v2/klogr" - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure/mock_azure" "sigs.k8s.io/cluster-api-provider-azure/azure/services/async/mock_async" @@ -78,7 +76,6 @@ func TestProcessOngoingOperation(t *testing.T) { resourceName: "test-resource", serviceName: "test-service", expect: func(s *mock_async.MockFutureScopeMockRecorder, c *mock_async.MockFutureHandlerMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.GetLongRunningOperationState("test-resource", "test-service").Return(nil) }, }, @@ -88,7 +85,6 @@ func TestProcessOngoingOperation(t *testing.T) { resourceName: "test-resource", serviceName: "test-service", expect: func(s *mock_async.MockFutureScopeMockRecorder, c *mock_async.MockFutureHandlerMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.GetLongRunningOperationState("test-resource", "test-service").Return(&invalidFuture) s.DeleteLongRunningOperationState("test-resource", "test-service") }, @@ -99,7 +95,6 @@ func TestProcessOngoingOperation(t *testing.T) { resourceName: "test-resource", serviceName: "test-service", expect: func(s *mock_async.MockFutureScopeMockRecorder, c *mock_async.MockFutureHandlerMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.GetLongRunningOperationState("test-resource", "test-service").Return(&validDeleteFuture) c.IsDone(gomockinternal.AContext(), gomock.AssignableToTypeOf(&azureautorest.Future{})).Return(false, fakeError) }, @@ -110,7 +105,6 @@ func TestProcessOngoingOperation(t *testing.T) { resourceName: "test-resource", serviceName: "test-service", expect: func(s *mock_async.MockFutureScopeMockRecorder, c *mock_async.MockFutureHandlerMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.GetLongRunningOperationState("test-resource", "test-service").Return(&validDeleteFuture) c.IsDone(gomockinternal.AContext(), gomock.AssignableToTypeOf(&azureautorest.Future{})).Return(false, nil) }, @@ -122,7 +116,6 @@ func TestProcessOngoingOperation(t *testing.T) { resourceName: "test-resource", serviceName: "test-service", expect: func(s *mock_async.MockFutureScopeMockRecorder, c *mock_async.MockFutureHandlerMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.GetLongRunningOperationState("test-resource", "test-service").Return(&validDeleteFuture) c.IsDone(gomockinternal.AContext(), gomock.AssignableToTypeOf(&azureautorest.Future{})).Return(true, nil) s.DeleteLongRunningOperationState("test-resource", "test-service") @@ -174,7 +167,6 @@ func TestCreateResource(t *testing.T) { expectedError: "operation type PUT on Azure resource test-group/test-resource is not done. Object will be requeued after 15s", serviceName: "test-service", expect: func(s *mock_async.MockFutureScopeMockRecorder, c *mock_async.MockCreatorMockRecorder, r *mock_azure.MockResourceSpecGetterMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) r.ResourceName().Return("test-resource") r.ResourceGroupName().Return("test-group") s.GetLongRunningOperationState("test-resource", "test-service").Times(2).Return(&validCreateFuture) @@ -187,7 +179,6 @@ func TestCreateResource(t *testing.T) { expectedResult: "test-resource", serviceName: "test-service", expect: func(s *mock_async.MockFutureScopeMockRecorder, c *mock_async.MockCreatorMockRecorder, r *mock_azure.MockResourceSpecGetterMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) r.ResourceName().Return("test-resource") r.ResourceGroupName().Return("test-group") s.GetLongRunningOperationState("test-resource", "test-service").Return(nil) @@ -199,7 +190,6 @@ func TestCreateResource(t *testing.T) { expectedError: "failed to create resource test-group/test-resource (service: test-service)", serviceName: "test-service", expect: func(s *mock_async.MockFutureScopeMockRecorder, c *mock_async.MockCreatorMockRecorder, r *mock_azure.MockResourceSpecGetterMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) r.ResourceName().Return("test-resource") r.ResourceGroupName().Return("test-group") s.GetLongRunningOperationState("test-resource", "test-service").Return(nil) @@ -211,7 +201,6 @@ func TestCreateResource(t *testing.T) { expectedError: "operation type PUT on Azure resource test-group/test-resource is not done. Object will be requeued after 15s", serviceName: "test-service", expect: func(s *mock_async.MockFutureScopeMockRecorder, c *mock_async.MockCreatorMockRecorder, r *mock_azure.MockResourceSpecGetterMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) r.ResourceName().Return("test-resource") r.ResourceGroupName().Return("test-group") s.GetLongRunningOperationState("test-resource", "test-service").Return(nil) @@ -260,7 +249,6 @@ func TestDeleteResource(t *testing.T) { expectedError: "operation type DELETE on Azure resource test-group/test-resource is not done. Object will be requeued after 15s", serviceName: "test-service", expect: func(s *mock_async.MockFutureScopeMockRecorder, c *mock_async.MockDeleterMockRecorder, r *mock_azure.MockResourceSpecGetterMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) r.ResourceName().Return("test-resource") r.ResourceGroupName().Return("test-group") s.GetLongRunningOperationState("test-resource", "test-service").Times(2).Return(&validDeleteFuture) @@ -272,7 +260,6 @@ func TestDeleteResource(t *testing.T) { expectedError: "", serviceName: "test-service", expect: func(s *mock_async.MockFutureScopeMockRecorder, c *mock_async.MockDeleterMockRecorder, r *mock_azure.MockResourceSpecGetterMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) r.ResourceName().Return("test-resource") r.ResourceGroupName().Return("test-group") s.GetLongRunningOperationState("test-resource", "test-service").Return(nil) @@ -284,7 +271,6 @@ func TestDeleteResource(t *testing.T) { expectedError: "failed to delete resource test-group/test-resource (service: test-service)", serviceName: "test-service", expect: func(s *mock_async.MockFutureScopeMockRecorder, c *mock_async.MockDeleterMockRecorder, r *mock_azure.MockResourceSpecGetterMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) r.ResourceName().Return("test-resource") r.ResourceGroupName().Return("test-group") s.GetLongRunningOperationState("test-resource", "test-service").Return(nil) @@ -296,7 +282,6 @@ func TestDeleteResource(t *testing.T) { expectedError: "operation type DELETE on Azure resource test-group/test-resource is not done. Object will be requeued after 15s", serviceName: "test-service", expect: func(s *mock_async.MockFutureScopeMockRecorder, c *mock_async.MockDeleterMockRecorder, r *mock_azure.MockResourceSpecGetterMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) r.ResourceName().Return("test-resource") r.ResourceGroupName().Return("test-group") s.GetLongRunningOperationState("test-resource", "test-service").Return(nil) diff --git a/azure/services/async/interfaces.go b/azure/services/async/interfaces.go index b68b7b0cea4..21fb893b85a 100644 --- a/azure/services/async/interfaces.go +++ b/azure/services/async/interfaces.go @@ -20,13 +20,11 @@ import ( "context" azureautorest "github.com/Azure/go-autorest/autorest/azure" - "github.com/go-logr/logr" "sigs.k8s.io/cluster-api-provider-azure/azure" ) // FutureScope is a scope that can perform store futures and conditions in Status. type FutureScope interface { - logr.Logger azure.AsyncStatusUpdater } diff --git a/azure/services/async/mock_async/async_mock.go b/azure/services/async/mock_async/async_mock.go index de490f7564a..dd5595034f3 100644 --- a/azure/services/async/mock_async/async_mock.go +++ b/azure/services/async/mock_async/async_mock.go @@ -25,7 +25,6 @@ import ( reflect "reflect" azure "github.com/Azure/go-autorest/autorest/azure" - logr "github.com/go-logr/logr" gomock "github.com/golang/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure0 "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -67,37 +66,6 @@ func (mr *MockFutureScopeMockRecorder) DeleteLongRunningOperationState(arg0, arg return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteLongRunningOperationState", reflect.TypeOf((*MockFutureScope)(nil).DeleteLongRunningOperationState), arg0, arg1) } -// Enabled mocks base method. -func (m *MockFutureScope) Enabled() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Enabled") - ret0, _ := ret[0].(bool) - return ret0 -} - -// Enabled indicates an expected call of Enabled. -func (mr *MockFutureScopeMockRecorder) Enabled() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Enabled", reflect.TypeOf((*MockFutureScope)(nil).Enabled)) -} - -// Error mocks base method. -func (m *MockFutureScope) Error(err error, msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{err, msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Error", varargs...) -} - -// Error indicates an expected call of Error. -func (mr *MockFutureScopeMockRecorder) Error(err, msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{err, msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockFutureScope)(nil).Error), varargs...) -} - // GetLongRunningOperationState mocks base method. func (m *MockFutureScope) GetLongRunningOperationState(arg0, arg1 string) *v1beta1.Future { m.ctrl.T.Helper() @@ -112,23 +80,6 @@ func (mr *MockFutureScopeMockRecorder) GetLongRunningOperationState(arg0, arg1 i return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLongRunningOperationState", reflect.TypeOf((*MockFutureScope)(nil).GetLongRunningOperationState), arg0, arg1) } -// Info mocks base method. -func (m *MockFutureScope) Info(msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Info", varargs...) -} - -// Info indicates an expected call of Info. -func (mr *MockFutureScopeMockRecorder) Info(msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockFutureScope)(nil).Info), varargs...) -} - // SetLongRunningOperationState mocks base method. func (m *MockFutureScope) SetLongRunningOperationState(arg0 *v1beta1.Future) { m.ctrl.T.Helper() @@ -177,52 +128,6 @@ func (mr *MockFutureScopeMockRecorder) UpdatePutStatus(arg0, arg1, arg2 interfac return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePutStatus", reflect.TypeOf((*MockFutureScope)(nil).UpdatePutStatus), arg0, arg1, arg2) } -// V mocks base method. -func (m *MockFutureScope) V(level int) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "V", level) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// V indicates an expected call of V. -func (mr *MockFutureScopeMockRecorder) V(level interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "V", reflect.TypeOf((*MockFutureScope)(nil).V), level) -} - -// WithName mocks base method. -func (m *MockFutureScope) WithName(name string) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WithName", name) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithName indicates an expected call of WithName. -func (mr *MockFutureScopeMockRecorder) WithName(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithName", reflect.TypeOf((*MockFutureScope)(nil).WithName), name) -} - -// WithValues mocks base method. -func (m *MockFutureScope) WithValues(keysAndValues ...interface{}) logr.Logger { - m.ctrl.T.Helper() - varargs := []interface{}{} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "WithValues", varargs...) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithValues indicates an expected call of WithValues. -func (mr *MockFutureScopeMockRecorder) WithValues(keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithValues", reflect.TypeOf((*MockFutureScope)(nil).WithValues), keysAndValues...) -} - // MockFutureHandler is a mock of FutureHandler interface. type MockFutureHandler struct { ctrl *gomock.Controller diff --git a/azure/services/availabilitysets/availabilitysets.go b/azure/services/availabilitysets/availabilitysets.go index a6cfa35f434..a7952e1f588 100644 --- a/azure/services/availabilitysets/availabilitysets.go +++ b/azure/services/availabilitysets/availabilitysets.go @@ -22,9 +22,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-04-01/compute" "github.com/Azure/go-autorest/autorest/to" - "github.com/go-logr/logr" "github.com/pkg/errors" - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/converters" @@ -34,7 +32,6 @@ import ( // AvailabilitySetScope defines the scope interface for a availability sets service. type AvailabilitySetScope interface { - logr.Logger azure.ClusterDescriber AvailabilitySet() (string, bool) } @@ -57,7 +54,7 @@ func New(scope AvailabilitySetScope, skuCache *resourceskus.Cache) *Service { // Reconcile creates or updates availability sets. func (s *Service) Reconcile(ctx context.Context) error { - ctx, _, done := tele.StartSpanWithLogger( + ctx, log, done := tele.StartSpanWithLogger( ctx, "availabilitysets.Service.Reconcile", ) @@ -83,7 +80,7 @@ func (s *Service) Reconcile(ctx context.Context) error { return errors.Wrap(err, "failed to determine max fault domain count") } - s.Scope.V(2).Info("creating availability set", "availability set", availabilitySetName) + log.V(2).Info("creating availability set", "availability set", availabilitySetName) asParams := compute.AvailabilitySet{ Sku: &compute.Sku{ @@ -107,14 +104,14 @@ func (s *Service) Reconcile(ctx context.Context) error { return errors.Wrapf(err, "failed to create availability set %s", availabilitySetName) } - s.Scope.V(2).Info("successfully created availability set", "availability set", availabilitySetName) + log.V(2).Info("successfully created availability set", "availability set", availabilitySetName) return nil } // Delete deletes availability sets. func (s *Service) Delete(ctx context.Context) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "availabilitysets.Service.Delete") + ctx, log, done := tele.StartSpanWithLogger(ctx, "availabilitysets.Service.Delete") defer done() availabilitySetName, ok := s.Scope.AvailabilitySet() @@ -137,7 +134,7 @@ func (s *Service) Delete(ctx context.Context) error { return nil } - s.Scope.V(2).Info("deleting availability set", "availability set", availabilitySetName) + log.V(2).Info("deleting availability set", "availability set", availabilitySetName) err = s.Client.Delete(ctx, s.Scope.ResourceGroup(), availabilitySetName) if err != nil && azure.ResourceNotFound(err) { // already deleted @@ -148,7 +145,7 @@ func (s *Service) Delete(ctx context.Context) error { return errors.Wrapf(err, "failed to delete availability set %s in resource group %s", availabilitySetName, s.Scope.ResourceGroup()) } - s.Scope.V(2).Info("successfully delete availability set", "availability set", availabilitySetName) + log.V(2).Info("successfully delete availability set", "availability set", availabilitySetName) return nil } diff --git a/azure/services/availabilitysets/availabilitysets_test.go b/azure/services/availabilitysets/availabilitysets_test.go index 7f08dd58dc2..d9b59c6f7a6 100644 --- a/azure/services/availabilitysets/availabilitysets_test.go +++ b/azure/services/availabilitysets/availabilitysets_test.go @@ -27,11 +27,9 @@ import ( "github.com/golang/mock/gomock" . "github.com/onsi/gomega" "k8s.io/utils/pointer" + "sigs.k8s.io/cluster-api-provider-azure/azure/services/availabilitysets/mock_availabilitysets" "sigs.k8s.io/cluster-api-provider-azure/azure/services/resourceskus" gomockinternal "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomock" - - "k8s.io/klog/v2/klogr" - "sigs.k8s.io/cluster-api-provider-azure/azure/services/availabilitysets/mock_availabilitysets" ) func TestReconcileAvailabilitySets(t *testing.T) { @@ -45,7 +43,6 @@ func TestReconcileAvailabilitySets(t *testing.T) { name: "create or update availability set", expectedError: "", expect: func(s *mock_availabilitysets.MockAvailabilitySetScopeMockRecorder, m *mock_availabilitysets.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).MinTimes(2).Return(klogr.New()) s.AvailabilitySet().Return("as-name", true) s.ResourceGroup().Return("my-rg") s.ClusterName().Return("cl-name") @@ -92,7 +89,6 @@ func TestReconcileAvailabilitySets(t *testing.T) { name: "return error", expectedError: "failed to create availability set as-name: something went wrong", expect: func(s *mock_availabilitysets.MockAvailabilitySetScopeMockRecorder, m *mock_availabilitysets.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.AvailabilitySet().Return("as-name", true) s.ResourceGroup().Return("my-rg") s.ClusterName().Return("cl-name") @@ -159,7 +155,6 @@ func TestDeleteAvailabilitySets(t *testing.T) { name: "deletes availability set", expectedError: "", expect: func(s *mock_availabilitysets.MockAvailabilitySetScopeMockRecorder, m *mock_availabilitysets.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.AvailabilitySet().Return("as-name", true) s.ResourceGroup().Return("my-rg").Times(2) m.Get(gomockinternal.AContext(), "my-rg", "as-name"). @@ -189,7 +184,6 @@ func TestDeleteAvailabilitySets(t *testing.T) { name: "noop if availability set is already deleted - get returns 404", expectedError: "", expect: func(s *mock_availabilitysets.MockAvailabilitySetScopeMockRecorder, m *mock_availabilitysets.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.AvailabilitySet().Return("as-name", true) s.ResourceGroup().Return("my-rg") m.Get(gomockinternal.AContext(), "my-rg", "as-name").Return(compute.AvailabilitySet{}, @@ -200,7 +194,6 @@ func TestDeleteAvailabilitySets(t *testing.T) { name: "noop if availability set is already deleted - delete returns 404", expectedError: "", expect: func(s *mock_availabilitysets.MockAvailabilitySetScopeMockRecorder, m *mock_availabilitysets.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.AvailabilitySet().Return("as-name", true) s.ResourceGroup().Return("my-rg").Times(2) m.Get(gomockinternal.AContext(), "my-rg", "as-name").Return(compute.AvailabilitySet{}, nil) @@ -221,7 +214,6 @@ func TestDeleteAvailabilitySets(t *testing.T) { name: "returns error when delete fails", expectedError: "failed to delete availability set as-name in resource group my-rg: something went wrong", expect: func(s *mock_availabilitysets.MockAvailabilitySetScopeMockRecorder, m *mock_availabilitysets.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.AvailabilitySet().Return("as-name", true) s.ResourceGroup().Return("my-rg").Times(3) m.Get(gomockinternal.AContext(), "my-rg", "as-name").Return(compute.AvailabilitySet{}, nil) diff --git a/azure/services/availabilitysets/mock_availabilitysets/availabilitysets_mock.go b/azure/services/availabilitysets/mock_availabilitysets/availabilitysets_mock.go index dc3ab411970..baeb90044b7 100644 --- a/azure/services/availabilitysets/mock_availabilitysets/availabilitysets_mock.go +++ b/azure/services/availabilitysets/mock_availabilitysets/availabilitysets_mock.go @@ -24,7 +24,6 @@ import ( reflect "reflect" autorest "github.com/Azure/go-autorest/autorest" - logr "github.com/go-logr/logr" gomock "github.com/golang/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" ) @@ -193,37 +192,6 @@ func (mr *MockAvailabilitySetScopeMockRecorder) ClusterName() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterName", reflect.TypeOf((*MockAvailabilitySetScope)(nil).ClusterName)) } -// Enabled mocks base method. -func (m *MockAvailabilitySetScope) Enabled() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Enabled") - ret0, _ := ret[0].(bool) - return ret0 -} - -// Enabled indicates an expected call of Enabled. -func (mr *MockAvailabilitySetScopeMockRecorder) Enabled() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Enabled", reflect.TypeOf((*MockAvailabilitySetScope)(nil).Enabled)) -} - -// Error mocks base method. -func (m *MockAvailabilitySetScope) Error(err error, msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{err, msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Error", varargs...) -} - -// Error indicates an expected call of Error. -func (mr *MockAvailabilitySetScopeMockRecorder) Error(err, msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{err, msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockAvailabilitySetScope)(nil).Error), varargs...) -} - // FailureDomains mocks base method. func (m *MockAvailabilitySetScope) FailureDomains() []string { m.ctrl.T.Helper() @@ -252,23 +220,6 @@ func (mr *MockAvailabilitySetScopeMockRecorder) HashKey() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HashKey", reflect.TypeOf((*MockAvailabilitySetScope)(nil).HashKey)) } -// Info mocks base method. -func (m *MockAvailabilitySetScope) Info(msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Info", varargs...) -} - -// Info indicates an expected call of Info. -func (mr *MockAvailabilitySetScopeMockRecorder) Info(msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockAvailabilitySetScope)(nil).Info), varargs...) -} - // Location mocks base method. func (m *MockAvailabilitySetScope) Location() string { m.ctrl.T.Helper() @@ -324,49 +275,3 @@ func (mr *MockAvailabilitySetScopeMockRecorder) TenantID() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TenantID", reflect.TypeOf((*MockAvailabilitySetScope)(nil).TenantID)) } - -// V mocks base method. -func (m *MockAvailabilitySetScope) V(level int) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "V", level) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// V indicates an expected call of V. -func (mr *MockAvailabilitySetScopeMockRecorder) V(level interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "V", reflect.TypeOf((*MockAvailabilitySetScope)(nil).V), level) -} - -// WithName mocks base method. -func (m *MockAvailabilitySetScope) WithName(name string) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WithName", name) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithName indicates an expected call of WithName. -func (mr *MockAvailabilitySetScopeMockRecorder) WithName(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithName", reflect.TypeOf((*MockAvailabilitySetScope)(nil).WithName), name) -} - -// WithValues mocks base method. -func (m *MockAvailabilitySetScope) WithValues(keysAndValues ...interface{}) logr.Logger { - m.ctrl.T.Helper() - varargs := []interface{}{} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "WithValues", varargs...) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithValues indicates an expected call of WithValues. -func (mr *MockAvailabilitySetScopeMockRecorder) WithValues(keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithValues", reflect.TypeOf((*MockAvailabilitySetScope)(nil).WithValues), keysAndValues...) -} diff --git a/azure/services/bastionhosts/azurebastion.go b/azure/services/bastionhosts/azurebastion.go index eca7c33abc7..6ddc1bbfd1b 100644 --- a/azure/services/bastionhosts/azurebastion.go +++ b/azure/services/bastionhosts/azurebastion.go @@ -24,6 +24,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network" "github.com/Azure/go-autorest/autorest/to" "github.com/pkg/errors" + "sigs.k8s.io/cluster-api-provider-azure/util/tele" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -31,19 +32,22 @@ import ( ) func (s *Service) ensureAzureBastion(ctx context.Context, azureBastionSpec azure.AzureBastionSpec) error { - s.Scope.V(2).Info("getting azure bastion public IP", "publicIP", azureBastionSpec.PublicIPName) + ctx, log, done := tele.StartSpanWithLogger(ctx, "bastionhosts.Service.ensureAzureBastion") + defer done() + + log.V(2).Info("getting azure bastion public IP", "publicIP", azureBastionSpec.PublicIPName) publicIP, err := s.publicIPsClient.Get(ctx, s.Scope.ResourceGroup(), azureBastionSpec.PublicIPName) if err != nil { return errors.Wrap(err, "failed to get public IP for azure bastion") } - s.Scope.V(2).Info("getting azure bastion subnet", "subnet", azureBastionSpec.SubnetSpec) + log.V(2).Info("getting azure bastion subnet", "subnet", azureBastionSpec.SubnetSpec) subnet, err := s.subnetsClient.Get(ctx, s.Scope.ResourceGroup(), azureBastionSpec.VNetName, azureBastionSpec.SubnetSpec.Name) if err != nil { return errors.Wrap(err, "failed to get subnet for azure bastion") } - s.Scope.V(2).Info("creating bastion host", "bastion", azureBastionSpec.Name) + log.V(2).Info("creating bastion host", "bastion", azureBastionSpec.Name) bastionHostIPConfigName := fmt.Sprintf("%s-%s", azureBastionSpec.Name, "bastionIP") err = s.client.CreateOrUpdate( ctx, @@ -81,12 +85,15 @@ func (s *Service) ensureAzureBastion(ctx context.Context, azureBastionSpec azure return errors.Wrap(err, "cannot create Azure Bastion") } - s.Scope.V(2).Info("successfully created bastion host", "bastion", azureBastionSpec.Name) + log.V(2).Info("successfully created bastion host", "bastion", azureBastionSpec.Name) return nil } func (s *Service) ensureAzureBastionDeleted(ctx context.Context, azureBastionSpec azure.AzureBastionSpec) error { - s.Scope.V(2).Info("deleting bastion host", "bastion", azureBastionSpec.Name) + ctx, log, done := tele.StartSpanWithLogger(ctx, "bastionhosts.Service.ensureAzureBastionDeleted") + defer done() + + log.V(2).Info("deleting bastion host", "bastion", azureBastionSpec.Name) err := s.client.Delete(ctx, s.Scope.ResourceGroup(), azureBastionSpec.Name) if err != nil && azure.ResourceNotFound(err) { @@ -95,7 +102,7 @@ func (s *Service) ensureAzureBastionDeleted(ctx context.Context, azureBastionSpe return errors.Wrapf(err, "failed to delete Azure Bastion %s in resource group %s", azureBastionSpec.Name, s.Scope.ResourceGroup()) } - s.Scope.V(2).Info("successfully deleted bastion host", "bastion", azureBastionSpec.Name) + log.V(2).Info("successfully deleted bastion host", "bastion", azureBastionSpec.Name) return nil } diff --git a/azure/services/bastionhosts/bastionhosts.go b/azure/services/bastionhosts/bastionhosts.go index d7b34305584..c365ec9cd6b 100644 --- a/azure/services/bastionhosts/bastionhosts.go +++ b/azure/services/bastionhosts/bastionhosts.go @@ -19,11 +19,8 @@ package bastionhosts import ( "context" - "github.com/go-logr/logr" "github.com/pkg/errors" - "sigs.k8s.io/cluster-api-provider-azure/azure" - "sigs.k8s.io/cluster-api-provider-azure/azure/services/publicips" "sigs.k8s.io/cluster-api-provider-azure/azure/services/subnets" "sigs.k8s.io/cluster-api-provider-azure/util/tele" @@ -31,7 +28,6 @@ import ( // BastionScope defines the scope interface for a bastion host service. type BastionScope interface { - logr.Logger azure.ClusterDescriber azure.NetworkDescriber BastionSpec() azure.BastionSpec @@ -57,10 +53,7 @@ func New(scope BastionScope) *Service { // Reconcile gets/creates/updates a bastion host. func (s *Service) Reconcile(ctx context.Context) error { - ctx, _, done := tele.StartSpanWithLogger( - ctx, - "bastionhosts.Service.Reconcile", - ) + ctx, _, done := tele.StartSpanWithLogger(ctx, "bastionhosts.Service.Reconcile") defer done() azureBastionSpec := s.Scope.BastionSpec().AzureBastion diff --git a/azure/services/bastionhosts/bastionhosts_test.go b/azure/services/bastionhosts/bastionhosts_test.go index f6c6fb986a6..b101ab17fb6 100644 --- a/azure/services/bastionhosts/bastionhosts_test.go +++ b/azure/services/bastionhosts/bastionhosts_test.go @@ -21,21 +21,17 @@ import ( "net/http" "testing" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network" + "github.com/Azure/go-autorest/autorest" + "github.com/golang/mock/gomock" . "github.com/onsi/gomega" - + "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" mock_bastionhosts "sigs.k8s.io/cluster-api-provider-azure/azure/services/bastionhosts/mocks_bastionhosts" "sigs.k8s.io/cluster-api-provider-azure/azure/services/publicips/mock_publicips" "sigs.k8s.io/cluster-api-provider-azure/azure/services/subnets/mock_subnets" gomockinternal "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomock" - - "github.com/Azure/go-autorest/autorest" - "github.com/golang/mock/gomock" - - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/klog/v2/klogr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) @@ -59,7 +55,6 @@ func TestReconcileBastionHosts(t *testing.T) { m *mock_bastionhosts.MockclientMockRecorder, mSubnet *mock_subnets.MockClientMockRecorder, mPublicIP *mock_publicips.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.BastionSpec().Return(azure.BastionSpec{ AzureBastion: &azure.AzureBastionSpec{ Name: "my-bastion", @@ -81,7 +76,6 @@ func TestReconcileBastionHosts(t *testing.T) { m *mock_bastionhosts.MockclientMockRecorder, mSubnet *mock_subnets.MockClientMockRecorder, mPublicIP *mock_publicips.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.BastionSpec().Return(azure.BastionSpec{ AzureBastion: &azure.AzureBastionSpec{ Name: "my-bastion", @@ -107,7 +101,6 @@ func TestReconcileBastionHosts(t *testing.T) { m *mock_bastionhosts.MockclientMockRecorder, mSubnet *mock_subnets.MockClientMockRecorder, mPublicIP *mock_publicips.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.BastionSpec().Return(azure.BastionSpec{ AzureBastion: &azure.AzureBastionSpec{ Name: "my-bastion", @@ -135,7 +128,6 @@ func TestReconcileBastionHosts(t *testing.T) { m *mock_bastionhosts.MockclientMockRecorder, mSubnet *mock_subnets.MockClientMockRecorder, mPublicIP *mock_publicips.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.BastionSpec().Return(azure.BastionSpec{ AzureBastion: &azure.AzureBastionSpec{ Name: "my-bastion", @@ -207,7 +199,6 @@ func TestDeleteBastionHost(t *testing.T) { m *mock_bastionhosts.MockclientMockRecorder, mSubnet *mock_subnets.MockClientMockRecorder, mPublicIP *mock_publicips.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.BastionSpec().Return(azure.BastionSpec{ AzureBastion: &azure.AzureBastionSpec{ Name: "my-bastionhost", @@ -230,7 +221,6 @@ func TestDeleteBastionHost(t *testing.T) { m *mock_bastionhosts.MockclientMockRecorder, mSubnet *mock_subnets.MockClientMockRecorder, mPublicIP *mock_publicips.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.BastionSpec().Return(azure.BastionSpec{ AzureBastion: &azure.AzureBastionSpec{ Name: "my-bastionhost", @@ -254,7 +244,6 @@ func TestDeleteBastionHost(t *testing.T) { m *mock_bastionhosts.MockclientMockRecorder, mSubnet *mock_subnets.MockClientMockRecorder, mPublicIP *mock_publicips.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.BastionSpec().Return(azure.BastionSpec{ AzureBastion: &azure.AzureBastionSpec{ Name: "my-bastionhost", diff --git a/azure/services/bastionhosts/mocks_bastionhosts/bastionhosts_mock.go b/azure/services/bastionhosts/mocks_bastionhosts/bastionhosts_mock.go index aca6e9d9eba..0e88cd26deb 100644 --- a/azure/services/bastionhosts/mocks_bastionhosts/bastionhosts_mock.go +++ b/azure/services/bastionhosts/mocks_bastionhosts/bastionhosts_mock.go @@ -24,7 +24,6 @@ import ( reflect "reflect" autorest "github.com/Azure/go-autorest/autorest" - logr "github.com/go-logr/logr" gomock "github.com/golang/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -249,37 +248,6 @@ func (mr *MockBastionScopeMockRecorder) ControlPlaneSubnet() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControlPlaneSubnet", reflect.TypeOf((*MockBastionScope)(nil).ControlPlaneSubnet)) } -// Enabled mocks base method. -func (m *MockBastionScope) Enabled() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Enabled") - ret0, _ := ret[0].(bool) - return ret0 -} - -// Enabled indicates an expected call of Enabled. -func (mr *MockBastionScopeMockRecorder) Enabled() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Enabled", reflect.TypeOf((*MockBastionScope)(nil).Enabled)) -} - -// Error mocks base method. -func (m *MockBastionScope) Error(err error, msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{err, msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Error", varargs...) -} - -// Error indicates an expected call of Error. -func (mr *MockBastionScopeMockRecorder) Error(err, msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{err, msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockBastionScope)(nil).Error), varargs...) -} - // FailureDomains mocks base method. func (m *MockBastionScope) FailureDomains() []string { m.ctrl.T.Helper() @@ -322,23 +290,6 @@ func (mr *MockBastionScopeMockRecorder) HashKey() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HashKey", reflect.TypeOf((*MockBastionScope)(nil).HashKey)) } -// Info mocks base method. -func (m *MockBastionScope) Info(msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Info", varargs...) -} - -// Info indicates an expected call of Info. -func (mr *MockBastionScopeMockRecorder) Info(msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockBastionScope)(nil).Info), varargs...) -} - // IsAPIServerPrivate mocks base method. func (m *MockBastionScope) IsAPIServerPrivate() bool { m.ctrl.T.Helper() @@ -519,20 +470,6 @@ func (mr *MockBastionScopeMockRecorder) TenantID() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TenantID", reflect.TypeOf((*MockBastionScope)(nil).TenantID)) } -// V mocks base method. -func (m *MockBastionScope) V(level int) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "V", level) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// V indicates an expected call of V. -func (mr *MockBastionScopeMockRecorder) V(level interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "V", reflect.TypeOf((*MockBastionScope)(nil).V), level) -} - // Vnet mocks base method. func (m *MockBastionScope) Vnet() *v1beta1.VnetSpec { m.ctrl.T.Helper() @@ -546,35 +483,3 @@ func (mr *MockBastionScopeMockRecorder) Vnet() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Vnet", reflect.TypeOf((*MockBastionScope)(nil).Vnet)) } - -// WithName mocks base method. -func (m *MockBastionScope) WithName(name string) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WithName", name) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithName indicates an expected call of WithName. -func (mr *MockBastionScopeMockRecorder) WithName(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithName", reflect.TypeOf((*MockBastionScope)(nil).WithName), name) -} - -// WithValues mocks base method. -func (m *MockBastionScope) WithValues(keysAndValues ...interface{}) logr.Logger { - m.ctrl.T.Helper() - varargs := []interface{}{} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "WithValues", varargs...) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithValues indicates an expected call of WithValues. -func (mr *MockBastionScopeMockRecorder) WithValues(keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithValues", reflect.TypeOf((*MockBastionScope)(nil).WithValues), keysAndValues...) -} diff --git a/azure/services/disks/disks.go b/azure/services/disks/disks.go index c415c124e71..a37b4e1653d 100644 --- a/azure/services/disks/disks.go +++ b/azure/services/disks/disks.go @@ -19,8 +19,6 @@ package disks import ( "context" - "github.com/go-logr/logr" - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/services/async" @@ -32,7 +30,6 @@ const serviceName = "disks" // DiskScope defines the scope interface for a disk service. type DiskScope interface { - logr.Logger azure.ClusterDescriber azure.AsyncStatusUpdater DiskSpecs() []azure.ResourceSpecGetter diff --git a/azure/services/disks/disks_test.go b/azure/services/disks/disks_test.go index 88947d99469..2e170aa5cea 100644 --- a/azure/services/disks/disks_test.go +++ b/azure/services/disks/disks_test.go @@ -25,7 +25,7 @@ import ( "github.com/Azure/go-autorest/autorest" "github.com/golang/mock/gomock" . "github.com/onsi/gomega" - "k8s.io/klog/v2/klogr" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/services/disks/mock_disks" @@ -62,7 +62,6 @@ func TestDeleteDisk(t *testing.T) { name: "delete the disk", expectedError: "", expect: func(s *mock_disks.MockDiskScopeMockRecorder, m *mock_disks.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.DiskSpecs().Return(fakeDiskSpecs) gomock.InOrder( s.GetLongRunningOperationState("my-disk-1", serviceName), @@ -77,7 +76,6 @@ func TestDeleteDisk(t *testing.T) { name: "disk already deleted", expectedError: "", expect: func(s *mock_disks.MockDiskScopeMockRecorder, m *mock_disks.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.DiskSpecs().Return(fakeDiskSpecs) gomock.InOrder( s.GetLongRunningOperationState("my-disk-1", serviceName), @@ -92,7 +90,6 @@ func TestDeleteDisk(t *testing.T) { name: "error while trying to delete the disk", expectedError: "failed to delete resource my-group/my-disk-1 (service: disks): #: Internal Server Error: StatusCode=500", expect: func(s *mock_disks.MockDiskScopeMockRecorder, m *mock_disks.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.DiskSpecs().Return(fakeDiskSpecs) gomock.InOrder( s.GetLongRunningOperationState("my-disk-1", serviceName), diff --git a/azure/services/disks/mock_disks/disks_mock.go b/azure/services/disks/mock_disks/disks_mock.go index f037c1bfc60..0d6e14b30bb 100644 --- a/azure/services/disks/mock_disks/disks_mock.go +++ b/azure/services/disks/mock_disks/disks_mock.go @@ -24,7 +24,6 @@ import ( reflect "reflect" autorest "github.com/Azure/go-autorest/autorest" - logr "github.com/go-logr/logr" gomock "github.com/golang/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -206,37 +205,6 @@ func (mr *MockDiskScopeMockRecorder) DiskSpecs() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DiskSpecs", reflect.TypeOf((*MockDiskScope)(nil).DiskSpecs)) } -// Enabled mocks base method. -func (m *MockDiskScope) Enabled() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Enabled") - ret0, _ := ret[0].(bool) - return ret0 -} - -// Enabled indicates an expected call of Enabled. -func (mr *MockDiskScopeMockRecorder) Enabled() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Enabled", reflect.TypeOf((*MockDiskScope)(nil).Enabled)) -} - -// Error mocks base method. -func (m *MockDiskScope) Error(err error, msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{err, msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Error", varargs...) -} - -// Error indicates an expected call of Error. -func (mr *MockDiskScopeMockRecorder) Error(err, msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{err, msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockDiskScope)(nil).Error), varargs...) -} - // FailureDomains mocks base method. func (m *MockDiskScope) FailureDomains() []string { m.ctrl.T.Helper() @@ -279,23 +247,6 @@ func (mr *MockDiskScopeMockRecorder) HashKey() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HashKey", reflect.TypeOf((*MockDiskScope)(nil).HashKey)) } -// Info mocks base method. -func (m *MockDiskScope) Info(msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Info", varargs...) -} - -// Info indicates an expected call of Info. -func (mr *MockDiskScopeMockRecorder) Info(msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockDiskScope)(nil).Info), varargs...) -} - // Location mocks base method. func (m *MockDiskScope) Location() string { m.ctrl.T.Helper() @@ -399,49 +350,3 @@ func (mr *MockDiskScopeMockRecorder) UpdatePutStatus(arg0, arg1, arg2 interface{ mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePutStatus", reflect.TypeOf((*MockDiskScope)(nil).UpdatePutStatus), arg0, arg1, arg2) } - -// V mocks base method. -func (m *MockDiskScope) V(level int) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "V", level) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// V indicates an expected call of V. -func (mr *MockDiskScopeMockRecorder) V(level interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "V", reflect.TypeOf((*MockDiskScope)(nil).V), level) -} - -// WithName mocks base method. -func (m *MockDiskScope) WithName(name string) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WithName", name) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithName indicates an expected call of WithName. -func (mr *MockDiskScopeMockRecorder) WithName(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithName", reflect.TypeOf((*MockDiskScope)(nil).WithName), name) -} - -// WithValues mocks base method. -func (m *MockDiskScope) WithValues(keysAndValues ...interface{}) logr.Logger { - m.ctrl.T.Helper() - varargs := []interface{}{} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "WithValues", varargs...) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithValues indicates an expected call of WithValues. -func (mr *MockDiskScopeMockRecorder) WithValues(keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithValues", reflect.TypeOf((*MockDiskScope)(nil).WithValues), keysAndValues...) -} diff --git a/azure/services/groups/groups.go b/azure/services/groups/groups.go index f86901fb6ae..7f3d33deb95 100644 --- a/azure/services/groups/groups.go +++ b/azure/services/groups/groups.go @@ -19,9 +19,7 @@ package groups import ( "context" - "github.com/go-logr/logr" "github.com/pkg/errors" - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/converters" @@ -40,7 +38,6 @@ type Service struct { // GroupScope defines the scope interface for a group service. type GroupScope interface { - logr.Logger azure.Authorizer azure.AsyncStatusUpdater GroupSpec() azure.ResourceSpecGetter @@ -72,7 +69,7 @@ func (s *Service) Reconcile(ctx context.Context) error { // Delete deletes the resource group if it is managed by capz. func (s *Service) Delete(ctx context.Context) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "groups.Service.Delete") + ctx, log, done := tele.StartSpanWithLogger(ctx, "groups.Service.Delete") defer done() ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultAzureServiceReconcileTimeout) @@ -92,7 +89,7 @@ func (s *Service) Delete(ctx context.Context) error { return errors.Wrap(err, "could not get resource group management state") } if !managed { - s.Scope.V(2).Info("Should not delete resource group in unmanaged mode") + log.V(2).Info("Should not delete resource group in unmanaged mode") return azure.ErrNotOwned } diff --git a/azure/services/groups/groups_test.go b/azure/services/groups/groups_test.go index 02e6650c2dc..2cd47e02d88 100644 --- a/azure/services/groups/groups_test.go +++ b/azure/services/groups/groups_test.go @@ -29,8 +29,6 @@ import ( "github.com/Azure/go-autorest/autorest/to" "github.com/golang/mock/gomock" . "github.com/onsi/gomega" - "k8s.io/klog/v2/klogr" - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/services/groups/mock_groups" @@ -78,7 +76,6 @@ func TestReconcileGroups(t *testing.T) { name: "create group succeeds", expectedError: "", expect: func(s *mock_groups.MockGroupScopeMockRecorder, m *mock_groups.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.GroupSpec().Return(&fakeGroupSpec) s.GetLongRunningOperationState("test-group", serviceName) m.CreateOrUpdateAsync(gomockinternal.AContext(), &fakeGroupSpec).Return(nil, nil, nil) @@ -89,7 +86,6 @@ func TestReconcileGroups(t *testing.T) { name: "create resource group fails", expectedError: "failed to create resource test-group/test-group (service: group): #: Internal Server Error: StatusCode=500", expect: func(s *mock_groups.MockGroupScopeMockRecorder, m *mock_groups.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.GroupSpec().Return(&fakeGroupSpec) s.GetLongRunningOperationState("test-group", serviceName) m.CreateOrUpdateAsync(gomockinternal.AContext(), &fakeGroupSpec).Return(nil, nil, internalError) @@ -137,7 +133,6 @@ func TestDeleteGroups(t *testing.T) { name: "long running delete operation is done", expectedError: "", expect: func(s *mock_groups.MockGroupScopeMockRecorder, m *mock_groups.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.GroupSpec().AnyTimes().Return(&fakeGroupSpec) m.Get(gomockinternal.AContext(), "test-group").Return(sampleManagedGroup, nil) s.ClusterName().Return("test-cluster") @@ -152,7 +147,6 @@ func TestDeleteGroups(t *testing.T) { name: "long running delete operation is not done", expectedError: "operation type DELETE on Azure resource test-group/test-group is not done. Object will be requeued after 15s", expect: func(s *mock_groups.MockGroupScopeMockRecorder, m *mock_groups.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.GroupSpec().AnyTimes().Return(&fakeGroupSpec) m.Get(gomockinternal.AContext(), "test-group").Return(sampleManagedGroup, nil) s.ClusterName().Return("test-cluster") @@ -165,7 +159,6 @@ func TestDeleteGroups(t *testing.T) { name: "resource group is not managed by capz", expectedError: azure.ErrNotOwned.Error(), expect: func(s *mock_groups.MockGroupScopeMockRecorder, m *mock_groups.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.GroupSpec().AnyTimes().Return(&fakeGroupSpec) m.Get(gomockinternal.AContext(), "test-group").Return(sampleBYOGroup, nil) s.ClusterName().Return("test-cluster") @@ -175,7 +168,6 @@ func TestDeleteGroups(t *testing.T) { name: "fail to check if resource group is managed", expectedError: "could not get resource group management state", expect: func(s *mock_groups.MockGroupScopeMockRecorder, m *mock_groups.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.GroupSpec().AnyTimes().Return(&fakeGroupSpec) m.Get(gomockinternal.AContext(), "test-group").Return(resources.Group{}, internalError) }, @@ -184,7 +176,6 @@ func TestDeleteGroups(t *testing.T) { name: "resource group doesn't exist", expectedError: "", expect: func(s *mock_groups.MockGroupScopeMockRecorder, m *mock_groups.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.GroupSpec().AnyTimes().Return(&fakeGroupSpec) m.Get(gomockinternal.AContext(), "test-group").Return(resources.Group{}, notFoundError) s.DeleteLongRunningOperationState("test-group", serviceName) @@ -195,7 +186,6 @@ func TestDeleteGroups(t *testing.T) { name: "error occurs when deleting resource group", expectedError: "failed to delete resource test-group/test-group (service: group): #: Internal Server Error: StatusCode=500", expect: func(s *mock_groups.MockGroupScopeMockRecorder, m *mock_groups.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.GroupSpec().AnyTimes().Return(&fakeGroupSpec) s.GetLongRunningOperationState("test-group", serviceName).Return(nil) m.Get(gomockinternal.AContext(), "test-group").Return(sampleManagedGroup, nil) @@ -208,7 +198,6 @@ func TestDeleteGroups(t *testing.T) { name: "context deadline exceeded while deleting resource group", expectedError: "operation type DELETE on Azure resource test-group/test-group is not done. Object will be requeued after 15s", expect: func(s *mock_groups.MockGroupScopeMockRecorder, m *mock_groups.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.GroupSpec().AnyTimes().Return(&fakeGroupSpec) s.GetLongRunningOperationState("test-group", serviceName).Return(nil) m.Get(gomockinternal.AContext(), "test-group").Return(sampleManagedGroup, nil) @@ -222,7 +211,6 @@ func TestDeleteGroups(t *testing.T) { name: "delete the resource group successfully", expectedError: "", expect: func(s *mock_groups.MockGroupScopeMockRecorder, m *mock_groups.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.GroupSpec().AnyTimes().Return(&fakeGroupSpec) s.GetLongRunningOperationState("test-group", serviceName).Return(nil) m.Get(gomockinternal.AContext(), "test-group").Return(sampleManagedGroup, nil) diff --git a/azure/services/groups/mock_groups/groups_mock.go b/azure/services/groups/mock_groups/groups_mock.go index 32455bf73e3..6f109b0e4f7 100644 --- a/azure/services/groups/mock_groups/groups_mock.go +++ b/azure/services/groups/mock_groups/groups_mock.go @@ -24,7 +24,6 @@ import ( reflect "reflect" autorest "github.com/Azure/go-autorest/autorest" - logr "github.com/go-logr/logr" gomock "github.com/golang/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -150,37 +149,6 @@ func (mr *MockGroupScopeMockRecorder) DeleteLongRunningOperationState(arg0, arg1 return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteLongRunningOperationState", reflect.TypeOf((*MockGroupScope)(nil).DeleteLongRunningOperationState), arg0, arg1) } -// Enabled mocks base method. -func (m *MockGroupScope) Enabled() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Enabled") - ret0, _ := ret[0].(bool) - return ret0 -} - -// Enabled indicates an expected call of Enabled. -func (mr *MockGroupScopeMockRecorder) Enabled() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Enabled", reflect.TypeOf((*MockGroupScope)(nil).Enabled)) -} - -// Error mocks base method. -func (m *MockGroupScope) Error(err error, msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{err, msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Error", varargs...) -} - -// Error indicates an expected call of Error. -func (mr *MockGroupScopeMockRecorder) Error(err, msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{err, msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockGroupScope)(nil).Error), varargs...) -} - // GetLongRunningOperationState mocks base method. func (m *MockGroupScope) GetLongRunningOperationState(arg0, arg1 string) *v1beta1.Future { m.ctrl.T.Helper() @@ -223,23 +191,6 @@ func (mr *MockGroupScopeMockRecorder) HashKey() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HashKey", reflect.TypeOf((*MockGroupScope)(nil).HashKey)) } -// Info mocks base method. -func (m *MockGroupScope) Info(msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Info", varargs...) -} - -// Info indicates an expected call of Info. -func (mr *MockGroupScopeMockRecorder) Info(msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockGroupScope)(nil).Info), varargs...) -} - // SetLongRunningOperationState mocks base method. func (m *MockGroupScope) SetLongRunningOperationState(arg0 *v1beta1.Future) { m.ctrl.T.Helper() @@ -315,49 +266,3 @@ func (mr *MockGroupScopeMockRecorder) UpdatePutStatus(arg0, arg1, arg2 interface mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePutStatus", reflect.TypeOf((*MockGroupScope)(nil).UpdatePutStatus), arg0, arg1, arg2) } - -// V mocks base method. -func (m *MockGroupScope) V(level int) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "V", level) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// V indicates an expected call of V. -func (mr *MockGroupScopeMockRecorder) V(level interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "V", reflect.TypeOf((*MockGroupScope)(nil).V), level) -} - -// WithName mocks base method. -func (m *MockGroupScope) WithName(name string) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WithName", name) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithName indicates an expected call of WithName. -func (mr *MockGroupScopeMockRecorder) WithName(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithName", reflect.TypeOf((*MockGroupScope)(nil).WithName), name) -} - -// WithValues mocks base method. -func (m *MockGroupScope) WithValues(keysAndValues ...interface{}) logr.Logger { - m.ctrl.T.Helper() - varargs := []interface{}{} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "WithValues", varargs...) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithValues indicates an expected call of WithValues. -func (mr *MockGroupScopeMockRecorder) WithValues(keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithValues", reflect.TypeOf((*MockGroupScope)(nil).WithValues), keysAndValues...) -} diff --git a/azure/services/inboundnatrules/inboundnatrules.go b/azure/services/inboundnatrules/inboundnatrules.go index edbf11d08f3..553af9724ea 100644 --- a/azure/services/inboundnatrules/inboundnatrules.go +++ b/azure/services/inboundnatrules/inboundnatrules.go @@ -21,9 +21,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network" "github.com/Azure/go-autorest/autorest/to" - "github.com/go-logr/logr" "github.com/pkg/errors" - "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/services/loadbalancers" "sigs.k8s.io/cluster-api-provider-azure/util/tele" @@ -31,7 +29,6 @@ import ( // InboundNatScope defines the scope interface for an inbound NAT service. type InboundNatScope interface { - logr.Logger azure.ClusterDescriber InboundNatSpecs() []azure.InboundNatSpec } @@ -54,11 +51,11 @@ func New(scope InboundNatScope) *Service { // Reconcile gets/creates/updates an inbound NAT rule. func (s *Service) Reconcile(ctx context.Context) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "inboundnatrules.Service.Reconcile") + ctx, log, done := tele.StartSpanWithLogger(ctx, "inboundnatrules.Service.Reconcile") defer done() for _, inboundNatSpec := range s.Scope.InboundNatSpecs() { - s.Scope.V(2).Info("creating inbound NAT rule", "NAT rule", inboundNatSpec.Name) + log.V(2).Info("creating inbound NAT rule", "NAT rule", inboundNatSpec.Name) lb, err := s.loadBalancersClient.Get(ctx, s.Scope.ResourceGroup(), inboundNatSpec.LoadBalancerName) if err != nil { @@ -70,12 +67,12 @@ func (s *Service) Reconcile(ctx context.Context) error { } ports := make(map[int32]struct{}) - if s.natRuleExists(ports)(*lb.InboundNatRules, inboundNatSpec.Name) { + if s.natRuleExists(ports)(ctx, *lb.InboundNatRules, inboundNatSpec.Name) { // Inbound NAT Rule already exists, nothing to do here. continue } - sshFrontendPort, err := s.getAvailablePort(ports) + sshFrontendPort, err := s.getAvailablePort(ctx, ports) if err != nil { return errors.Wrapf(err, "failed to find available SSH Frontend port for NAT Rule %s in load balancer %s", inboundNatSpec.Name, to.String(lb.Name)) } @@ -93,40 +90,43 @@ func (s *Service) Reconcile(ctx context.Context) error { FrontendPort: &sshFrontendPort, }, } - s.Scope.V(3).Info("Creating rule %s using port %d", "NAT rule", inboundNatSpec.Name, "port", sshFrontendPort) + log.V(3).Info("Creating rule %s using port %d", "NAT rule", inboundNatSpec.Name, "port", sshFrontendPort) err = s.client.CreateOrUpdate(ctx, s.Scope.ResourceGroup(), to.String(lb.Name), inboundNatSpec.Name, rule) if err != nil { return errors.Wrapf(err, "failed to create inbound NAT rule %s", inboundNatSpec.Name) } - s.Scope.V(2).Info("successfully created inbound NAT rule", "NAT rule", inboundNatSpec.Name) + log.V(2).Info("successfully created inbound NAT rule", "NAT rule", inboundNatSpec.Name) } return nil } // Delete deletes the inbound NAT rule with the provided name. func (s *Service) Delete(ctx context.Context) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "inboundnatrules.Service.Delete") + ctx, log, done := tele.StartSpanWithLogger(ctx, "inboundnatrules.Service.Delete") defer done() for _, inboundNatSpec := range s.Scope.InboundNatSpecs() { - s.Scope.V(2).Info("deleting inbound NAT rule", "NAT rule", inboundNatSpec.Name) + log.V(2).Info("deleting inbound NAT rule", "NAT rule", inboundNatSpec.Name) err := s.client.Delete(ctx, s.Scope.ResourceGroup(), inboundNatSpec.LoadBalancerName, inboundNatSpec.Name) if err != nil && !azure.ResourceNotFound(err) { return errors.Wrapf(err, "failed to delete inbound NAT rule %s", inboundNatSpec.Name) } - s.Scope.V(2).Info("successfully deleted inbound NAT rule", "NAT rule", inboundNatSpec.Name) + log.V(2).Info("successfully deleted inbound NAT rule", "NAT rule", inboundNatSpec.Name) } return nil } -func (s *Service) natRuleExists(ports map[int32]struct{}) func([]network.InboundNatRule, string) bool { - return func(rules []network.InboundNatRule, name string) bool { +func (s *Service) natRuleExists(ports map[int32]struct{}) func(context.Context, []network.InboundNatRule, string) bool { + return func(ctx context.Context, rules []network.InboundNatRule, name string) bool { + _, log, done := tele.StartSpanWithLogger(ctx, "inboundnatrules.Service.natRuleExists") + defer done() + for _, v := range rules { if to.String(v.Name) == name { - s.Scope.V(2).Info("NAT rule already exists", "NAT rule", name) + log.V(2).Info("NAT rule already exists", "NAT rule", name) return true } ports[*v.InboundNatRulePropertiesFormat.FrontendPort] = struct{}{} @@ -135,17 +135,20 @@ func (s *Service) natRuleExists(ports map[int32]struct{}) func([]network.Inbound } } -func (s *Service) getAvailablePort(ports map[int32]struct{}) (int32, error) { +func (s *Service) getAvailablePort(ctx context.Context, ports map[int32]struct{}) (int32, error) { + _, log, done := tele.StartSpanWithLogger(ctx, "inboundnatrules.Service.getAvailablePort") + defer done() + var i int32 = 22 if _, ok := ports[22]; ok { for i = 2201; i < 2220; i++ { if _, ok := ports[i]; !ok { - s.Scope.V(2).Info("Found available port", "port", i) + log.V(2).Info("Found available port", "port", i) return i, nil } } return i, errors.Errorf("No available SSH Frontend ports") } - s.Scope.V(2).Info("Found available port", "port", i) + log.V(2).Info("Found available port", "port", i) return i, nil } diff --git a/azure/services/inboundnatrules/inboundnatrules_test.go b/azure/services/inboundnatrules/inboundnatrules_test.go index 37fddafa130..47a3d3200e5 100644 --- a/azure/services/inboundnatrules/inboundnatrules_test.go +++ b/azure/services/inboundnatrules/inboundnatrules_test.go @@ -26,9 +26,7 @@ import ( "github.com/Azure/go-autorest/autorest/to" "github.com/golang/mock/gomock" . "github.com/onsi/gomega" - "k8s.io/klog/v2/klogr" "k8s.io/utils/pointer" - "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/services/inboundnatrules/mock_inboundnatrules" "sigs.k8s.io/cluster-api-provider-azure/azure/services/loadbalancers/mock_loadbalancers" @@ -57,7 +55,6 @@ func TestReconcileInboundNATRule(t *testing.T) { }) s.ResourceGroup().AnyTimes().Return("my-rg") s.Location().AnyTimes().Return("fake-location") - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) gomock.InOrder( mLoadBalancer.Get(gomockinternal.AContext(), "my-rg", "my-lb").Return(network.LoadBalancer{ Name: to.StringPtr("my-lb"), @@ -99,7 +96,6 @@ func TestReconcileInboundNATRule(t *testing.T) { }) s.ResourceGroup().AnyTimes().Return("my-rg") s.Location().AnyTimes().Return("fake-location") - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) gomock.InOrder( mLoadBalancer.Get(gomockinternal.AContext(), "my-rg", "my-public-lb"). Return(network.LoadBalancer{}, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 500}, "Internal Server Error"))) @@ -119,7 +115,6 @@ func TestReconcileInboundNATRule(t *testing.T) { }) s.ResourceGroup().AnyTimes().Return("my-rg") s.Location().AnyTimes().Return("fake-location") - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) gomock.InOrder( mLoadBalancer.Get(gomockinternal.AContext(), "my-rg", "my-public-lb").Return(network.LoadBalancer{ Name: to.StringPtr("my-public-lb"), @@ -181,7 +176,6 @@ func TestReconcileInboundNATRule(t *testing.T) { }) s.ResourceGroup().AnyTimes().Return("my-rg") s.Location().AnyTimes().Return("fake-location") - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) gomock.InOrder( mLoadBalancer.Get(gomockinternal.AContext(), "my-rg", "my-public-lb").Return(network.LoadBalancer{ Name: to.StringPtr("my-public-lb"), @@ -273,7 +267,6 @@ func TestDeleteNetworkInterface(t *testing.T) { LoadBalancerName: "my-public-lb", }, }) - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.ResourceGroup().AnyTimes().Return("my-rg") m.Delete(gomockinternal.AContext(), "my-rg", "my-public-lb", "azure-md-0") }, @@ -290,7 +283,6 @@ func TestDeleteNetworkInterface(t *testing.T) { }, }) s.ResourceGroup().AnyTimes().Return("my-rg") - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) m.Delete(gomockinternal.AContext(), "my-rg", "my-public-lb", "azure-md-1"). Return(autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not found")) }, @@ -306,7 +298,6 @@ func TestDeleteNetworkInterface(t *testing.T) { LoadBalancerName: "my-public-lb", }, }) - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.ResourceGroup().AnyTimes().Return("my-rg") m.Delete(gomockinternal.AContext(), "my-rg", "my-public-lb", "azure-md-2"). Return(autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 500}, "Internal Server Error")) @@ -374,7 +365,6 @@ func TestNatRuleExists(t *testing.T) { expectedResult: true, expect: func(s *mock_inboundnatrules.MockInboundNatScopeMockRecorder, m *mock_inboundnatrules.MockclientMockRecorder, mLoadBalancer *mock_loadbalancers.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).Return(klogr.New()) }, }, { @@ -435,7 +425,7 @@ func TestNatRuleExists(t *testing.T) { } ports := make(map[int32]struct{}) - exists := s.natRuleExists(ports)(tc.existingRules, tc.ruleName) + exists := s.natRuleExists(ports)(context.TODO(), tc.existingRules, tc.ruleName) g.Expect(exists).To(Equal(tc.expectedResult)) if !exists { g.Expect(ports).To(Equal(tc.expectedPorts)) @@ -460,7 +450,6 @@ func TestGetAvailablePort(t *testing.T) { expectedPortResult: 22, expect: func(s *mock_inboundnatrules.MockInboundNatScopeMockRecorder, m *mock_inboundnatrules.MockclientMockRecorder, mLoadBalancer *mock_loadbalancers.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).Return(klogr.New()) }, }, { @@ -472,7 +461,6 @@ func TestGetAvailablePort(t *testing.T) { expectedPortResult: 2201, expect: func(s *mock_inboundnatrules.MockInboundNatScopeMockRecorder, m *mock_inboundnatrules.MockclientMockRecorder, mLoadBalancer *mock_loadbalancers.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).Return(klogr.New()) }, }, { @@ -487,7 +475,6 @@ func TestGetAvailablePort(t *testing.T) { expectedPortResult: 2203, expect: func(s *mock_inboundnatrules.MockInboundNatScopeMockRecorder, m *mock_inboundnatrules.MockclientMockRecorder, mLoadBalancer *mock_loadbalancers.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).Return(klogr.New()) }, }, { @@ -520,7 +507,7 @@ func TestGetAvailablePort(t *testing.T) { loadBalancersClient: loadBalancerMock, } - res, err := s.getAvailablePort(tc.portsInput) + res, err := s.getAvailablePort(context.TODO(), tc.portsInput) if tc.expectedError != "" { g.Expect(err).To(HaveOccurred()) g.Expect(err).To(MatchError(tc.expectedError)) diff --git a/azure/services/inboundnatrules/mock_inboundnatrules/inboundnatrules_mock.go b/azure/services/inboundnatrules/mock_inboundnatrules/inboundnatrules_mock.go index f847ce8cfd3..72b637170b1 100644 --- a/azure/services/inboundnatrules/mock_inboundnatrules/inboundnatrules_mock.go +++ b/azure/services/inboundnatrules/mock_inboundnatrules/inboundnatrules_mock.go @@ -24,7 +24,6 @@ import ( reflect "reflect" autorest "github.com/Azure/go-autorest/autorest" - logr "github.com/go-logr/logr" gomock "github.com/golang/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -179,37 +178,6 @@ func (mr *MockInboundNatScopeMockRecorder) ClusterName() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterName", reflect.TypeOf((*MockInboundNatScope)(nil).ClusterName)) } -// Enabled mocks base method. -func (m *MockInboundNatScope) Enabled() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Enabled") - ret0, _ := ret[0].(bool) - return ret0 -} - -// Enabled indicates an expected call of Enabled. -func (mr *MockInboundNatScopeMockRecorder) Enabled() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Enabled", reflect.TypeOf((*MockInboundNatScope)(nil).Enabled)) -} - -// Error mocks base method. -func (m *MockInboundNatScope) Error(err error, msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{err, msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Error", varargs...) -} - -// Error indicates an expected call of Error. -func (mr *MockInboundNatScopeMockRecorder) Error(err, msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{err, msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockInboundNatScope)(nil).Error), varargs...) -} - // FailureDomains mocks base method. func (m *MockInboundNatScope) FailureDomains() []string { m.ctrl.T.Helper() @@ -252,23 +220,6 @@ func (mr *MockInboundNatScopeMockRecorder) InboundNatSpecs() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InboundNatSpecs", reflect.TypeOf((*MockInboundNatScope)(nil).InboundNatSpecs)) } -// Info mocks base method. -func (m *MockInboundNatScope) Info(msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Info", varargs...) -} - -// Info indicates an expected call of Info. -func (mr *MockInboundNatScopeMockRecorder) Info(msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockInboundNatScope)(nil).Info), varargs...) -} - // Location mocks base method. func (m *MockInboundNatScope) Location() string { m.ctrl.T.Helper() @@ -324,49 +275,3 @@ func (mr *MockInboundNatScopeMockRecorder) TenantID() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TenantID", reflect.TypeOf((*MockInboundNatScope)(nil).TenantID)) } - -// V mocks base method. -func (m *MockInboundNatScope) V(level int) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "V", level) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// V indicates an expected call of V. -func (mr *MockInboundNatScopeMockRecorder) V(level interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "V", reflect.TypeOf((*MockInboundNatScope)(nil).V), level) -} - -// WithName mocks base method. -func (m *MockInboundNatScope) WithName(name string) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WithName", name) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithName indicates an expected call of WithName. -func (mr *MockInboundNatScopeMockRecorder) WithName(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithName", reflect.TypeOf((*MockInboundNatScope)(nil).WithName), name) -} - -// WithValues mocks base method. -func (m *MockInboundNatScope) WithValues(keysAndValues ...interface{}) logr.Logger { - m.ctrl.T.Helper() - varargs := []interface{}{} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "WithValues", varargs...) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithValues indicates an expected call of WithValues. -func (mr *MockInboundNatScopeMockRecorder) WithValues(keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithValues", reflect.TypeOf((*MockInboundNatScope)(nil).WithValues), keysAndValues...) -} diff --git a/azure/services/loadbalancers/loadbalancers.go b/azure/services/loadbalancers/loadbalancers.go index 0950b0f7c75..64b89bd5de1 100644 --- a/azure/services/loadbalancers/loadbalancers.go +++ b/azure/services/loadbalancers/loadbalancers.go @@ -21,9 +21,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network" "github.com/Azure/go-autorest/autorest/to" - "github.com/go-logr/logr" "github.com/pkg/errors" - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/converters" @@ -39,7 +37,6 @@ const ( // LBScope defines the scope interface for a load balancer service. type LBScope interface { - logr.Logger azure.ClusterDescriber azure.NetworkDescriber LBSpecs() []azure.LBSpec @@ -63,7 +60,7 @@ func New(scope LBScope) *Service { // Reconcile gets/creates/updates a load balancer. func (s *Service) Reconcile(ctx context.Context) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "loadbalancers.Service.Reconcile") + ctx, log, done := tele.StartSpanWithLogger(ctx, "loadbalancers.Service.Reconcile") defer done() for _, lbSpec := range s.Scope.LBSpecs() { @@ -83,7 +80,7 @@ func (s *Service) Reconcile(ctx context.Context) error { return errors.Wrapf(err, "failed to get LB %s in %s", lbSpec.Name, s.Scope.ResourceGroup()) case err == nil: // LB already exists - s.Scope.V(2).Info("found existing load balancer, checking if updates are needed", "load balancer", lbSpec.Name) + log.V(2).Info("found existing load balancer, checking if updates are needed", "load balancer", lbSpec.Name) // We append the existing LB etag to the header to ensure we only apply the updates if the LB has not been modified. etag = existingLB.Etag update := false @@ -132,11 +129,11 @@ func (s *Service) Reconcile(ctx context.Context) error { if !update { // Skip update for LB as the required defaults are present - s.Scope.V(2).Info("LB exists and no defaults are missing, skipping update", "load balancer", lbSpec.Name) + log.V(2).Info("LB exists and no defaults are missing, skipping update", "load balancer", lbSpec.Name) continue } default: - s.Scope.V(2).Info("creating load balancer", "load balancer", lbSpec.Name) + log.V(2).Info("creating load balancer", "load balancer", lbSpec.Name) frontendIPConfigs, frontendIDs = s.getFrontendIPConfigs(lbSpec) loadBalancingRules = s.getLoadBalancingRules(lbSpec, frontendIDs) backendAddressPools = s.getBackendAddressPools(lbSpec) @@ -169,18 +166,18 @@ func (s *Service) Reconcile(ctx context.Context) error { return errors.Wrapf(err, "failed to create load balancer \"%s\"", lbSpec.Name) } - s.Scope.V(2).Info("successfully created load balancer", "load balancer", lbSpec.Name) + log.V(2).Info("successfully created load balancer", "load balancer", lbSpec.Name) } return nil } // Delete deletes the public load balancer with the provided name. func (s *Service) Delete(ctx context.Context) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "loadbalancers.Service.Delete") + ctx, log, done := tele.StartSpanWithLogger(ctx, "loadbalancers.Service.Delete") defer done() for _, lbSpec := range s.Scope.LBSpecs() { - s.Scope.V(2).Info("deleting load balancer", "load balancer", lbSpec.Name) + log.V(2).Info("deleting load balancer", "load balancer", lbSpec.Name) err := s.Client.Delete(ctx, s.Scope.ResourceGroup(), lbSpec.Name) if err != nil && azure.ResourceNotFound(err) { // already deleted @@ -190,7 +187,7 @@ func (s *Service) Delete(ctx context.Context) error { return errors.Wrapf(err, "failed to delete load balancer %s in resource group %s", lbSpec.Name, s.Scope.ResourceGroup()) } - s.Scope.V(2).Info("deleted public load balancer", "load balancer", lbSpec.Name) + log.V(2).Info("deleted public load balancer", "load balancer", lbSpec.Name) } return nil } diff --git a/azure/services/loadbalancers/loadbalancers_test.go b/azure/services/loadbalancers/loadbalancers_test.go index ee711920c95..8eab2346f35 100644 --- a/azure/services/loadbalancers/loadbalancers_test.go +++ b/azure/services/loadbalancers/loadbalancers_test.go @@ -26,8 +26,6 @@ import ( "github.com/Azure/go-autorest/autorest/to" "github.com/golang/mock/gomock" . "github.com/onsi/gomega" - "k8s.io/klog/v2/klogr" - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/services/loadbalancers/mock_loadbalancers" @@ -294,7 +292,6 @@ func TestDeleteLoadBalancer(t *testing.T) { name: "successfully delete an existing load balancer", expectedError: "", expect: func(s *mock_loadbalancers.MockLBScopeMockRecorder, m *mock_loadbalancers.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.LBSpecs().Return([]azure.LBSpec{ { Name: "my-internallb", @@ -312,7 +309,6 @@ func TestDeleteLoadBalancer(t *testing.T) { name: "load balancer already deleted", expectedError: "", expect: func(s *mock_loadbalancers.MockLBScopeMockRecorder, m *mock_loadbalancers.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.LBSpecs().Return([]azure.LBSpec{ { Name: "my-publiclb", @@ -327,7 +323,6 @@ func TestDeleteLoadBalancer(t *testing.T) { name: "load balancer deletion fails", expectedError: "failed to delete load balancer my-publiclb in resource group my-rg: #: Internal Server Error: StatusCode=500", expect: func(s *mock_loadbalancers.MockLBScopeMockRecorder, m *mock_loadbalancers.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.LBSpecs().Return([]azure.LBSpec{ { Name: "my-publiclb", @@ -554,7 +549,6 @@ func newDefaultInternalAPIServerLB() network.LoadBalancer { } func setupDefaultLBExpectations(s *mock_loadbalancers.MockLBScopeMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.SubscriptionID().AnyTimes().Return("123") s.ResourceGroup().AnyTimes().Return("my-rg") s.Location().AnyTimes().Return("testlocation") diff --git a/azure/services/loadbalancers/mock_loadbalancers/loadbalancers_mock.go b/azure/services/loadbalancers/mock_loadbalancers/loadbalancers_mock.go index f41464c4c2c..10031a3d074 100644 --- a/azure/services/loadbalancers/mock_loadbalancers/loadbalancers_mock.go +++ b/azure/services/loadbalancers/mock_loadbalancers/loadbalancers_mock.go @@ -24,7 +24,6 @@ import ( reflect "reflect" autorest "github.com/Azure/go-autorest/autorest" - logr "github.com/go-logr/logr" gomock "github.com/golang/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -235,37 +234,6 @@ func (mr *MockLBScopeMockRecorder) ControlPlaneSubnet() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControlPlaneSubnet", reflect.TypeOf((*MockLBScope)(nil).ControlPlaneSubnet)) } -// Enabled mocks base method. -func (m *MockLBScope) Enabled() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Enabled") - ret0, _ := ret[0].(bool) - return ret0 -} - -// Enabled indicates an expected call of Enabled. -func (mr *MockLBScopeMockRecorder) Enabled() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Enabled", reflect.TypeOf((*MockLBScope)(nil).Enabled)) -} - -// Error mocks base method. -func (m *MockLBScope) Error(err error, msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{err, msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Error", varargs...) -} - -// Error indicates an expected call of Error. -func (mr *MockLBScopeMockRecorder) Error(err, msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{err, msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockLBScope)(nil).Error), varargs...) -} - // FailureDomains mocks base method. func (m *MockLBScope) FailureDomains() []string { m.ctrl.T.Helper() @@ -308,23 +276,6 @@ func (mr *MockLBScopeMockRecorder) HashKey() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HashKey", reflect.TypeOf((*MockLBScope)(nil).HashKey)) } -// Info mocks base method. -func (m *MockLBScope) Info(msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Info", varargs...) -} - -// Info indicates an expected call of Info. -func (mr *MockLBScopeMockRecorder) Info(msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockLBScope)(nil).Info), varargs...) -} - // IsAPIServerPrivate mocks base method. func (m *MockLBScope) IsAPIServerPrivate() bool { m.ctrl.T.Helper() @@ -519,20 +470,6 @@ func (mr *MockLBScopeMockRecorder) TenantID() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TenantID", reflect.TypeOf((*MockLBScope)(nil).TenantID)) } -// V mocks base method. -func (m *MockLBScope) V(level int) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "V", level) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// V indicates an expected call of V. -func (mr *MockLBScopeMockRecorder) V(level interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "V", reflect.TypeOf((*MockLBScope)(nil).V), level) -} - // Vnet mocks base method. func (m *MockLBScope) Vnet() *v1beta1.VnetSpec { m.ctrl.T.Helper() @@ -546,35 +483,3 @@ func (mr *MockLBScopeMockRecorder) Vnet() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Vnet", reflect.TypeOf((*MockLBScope)(nil).Vnet)) } - -// WithName mocks base method. -func (m *MockLBScope) WithName(name string) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WithName", name) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithName indicates an expected call of WithName. -func (mr *MockLBScopeMockRecorder) WithName(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithName", reflect.TypeOf((*MockLBScope)(nil).WithName), name) -} - -// WithValues mocks base method. -func (m *MockLBScope) WithValues(keysAndValues ...interface{}) logr.Logger { - m.ctrl.T.Helper() - varargs := []interface{}{} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "WithValues", varargs...) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithValues indicates an expected call of WithValues. -func (mr *MockLBScopeMockRecorder) WithValues(keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithValues", reflect.TypeOf((*MockLBScope)(nil).WithValues), keysAndValues...) -} diff --git a/azure/services/managedclusters/managedclusters.go b/azure/services/managedclusters/managedclusters.go index 331afbf1f90..ef4361dce26 100644 --- a/azure/services/managedclusters/managedclusters.go +++ b/azure/services/managedclusters/managedclusters.go @@ -24,7 +24,6 @@ import ( "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2021-05-01/containerservice" "github.com/Azure/go-autorest/autorest/to" - "github.com/go-logr/logr" "github.com/google/go-cmp/cmp" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" @@ -43,7 +42,6 @@ var ( // ManagedClusterScope defines the scope interface for a managed cluster. type ManagedClusterScope interface { - logr.Logger azure.ClusterDescriber ManagedClusterSpec() (azure.ManagedClusterSpec, error) GetAgentPoolSpecs(ctx context.Context) ([]azure.AgentPoolSpec, error) diff --git a/azure/services/managedclusters/mock_managedclusters/managedclusters_mock.go b/azure/services/managedclusters/mock_managedclusters/managedclusters_mock.go index 7a7e4439d8a..2fe2e8efef5 100644 --- a/azure/services/managedclusters/mock_managedclusters/managedclusters_mock.go +++ b/azure/services/managedclusters/mock_managedclusters/managedclusters_mock.go @@ -25,7 +25,6 @@ import ( reflect "reflect" autorest "github.com/Azure/go-autorest/autorest" - logr "github.com/go-logr/logr" gomock "github.com/golang/mock/gomock" v1 "k8s.io/api/core/v1" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" @@ -182,37 +181,6 @@ func (mr *MockManagedClusterScopeMockRecorder) ClusterName() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterName", reflect.TypeOf((*MockManagedClusterScope)(nil).ClusterName)) } -// Enabled mocks base method. -func (m *MockManagedClusterScope) Enabled() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Enabled") - ret0, _ := ret[0].(bool) - return ret0 -} - -// Enabled indicates an expected call of Enabled. -func (mr *MockManagedClusterScopeMockRecorder) Enabled() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Enabled", reflect.TypeOf((*MockManagedClusterScope)(nil).Enabled)) -} - -// Error mocks base method. -func (m *MockManagedClusterScope) Error(err error, msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{err, msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Error", varargs...) -} - -// Error indicates an expected call of Error. -func (mr *MockManagedClusterScopeMockRecorder) Error(err, msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{err, msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockManagedClusterScope)(nil).Error), varargs...) -} - // FailureDomains mocks base method. func (m *MockManagedClusterScope) FailureDomains() []string { m.ctrl.T.Helper() @@ -270,23 +238,6 @@ func (mr *MockManagedClusterScopeMockRecorder) HashKey() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HashKey", reflect.TypeOf((*MockManagedClusterScope)(nil).HashKey)) } -// Info mocks base method. -func (m *MockManagedClusterScope) Info(msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Info", varargs...) -} - -// Info indicates an expected call of Info. -func (mr *MockManagedClusterScopeMockRecorder) Info(msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockManagedClusterScope)(nil).Info), varargs...) -} - // Location mocks base method. func (m *MockManagedClusterScope) Location() string { m.ctrl.T.Helper() @@ -395,49 +346,3 @@ func (mr *MockManagedClusterScopeMockRecorder) TenantID() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TenantID", reflect.TypeOf((*MockManagedClusterScope)(nil).TenantID)) } - -// V mocks base method. -func (m *MockManagedClusterScope) V(level int) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "V", level) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// V indicates an expected call of V. -func (mr *MockManagedClusterScopeMockRecorder) V(level interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "V", reflect.TypeOf((*MockManagedClusterScope)(nil).V), level) -} - -// WithName mocks base method. -func (m *MockManagedClusterScope) WithName(name string) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WithName", name) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithName indicates an expected call of WithName. -func (mr *MockManagedClusterScopeMockRecorder) WithName(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithName", reflect.TypeOf((*MockManagedClusterScope)(nil).WithName), name) -} - -// WithValues mocks base method. -func (m *MockManagedClusterScope) WithValues(keysAndValues ...interface{}) logr.Logger { - m.ctrl.T.Helper() - varargs := []interface{}{} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "WithValues", varargs...) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithValues indicates an expected call of WithValues. -func (mr *MockManagedClusterScopeMockRecorder) WithValues(keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithValues", reflect.TypeOf((*MockManagedClusterScope)(nil).WithValues), keysAndValues...) -} diff --git a/azure/services/natgateways/mock_natgateways/natgateways_mock.go b/azure/services/natgateways/mock_natgateways/natgateways_mock.go index e7cfd3eb621..c7f1878ab87 100644 --- a/azure/services/natgateways/mock_natgateways/natgateways_mock.go +++ b/azure/services/natgateways/mock_natgateways/natgateways_mock.go @@ -24,7 +24,6 @@ import ( reflect "reflect" autorest "github.com/Azure/go-autorest/autorest" - logr "github.com/go-logr/logr" gomock "github.com/golang/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -235,37 +234,6 @@ func (mr *MockNatGatewayScopeMockRecorder) ControlPlaneSubnet() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControlPlaneSubnet", reflect.TypeOf((*MockNatGatewayScope)(nil).ControlPlaneSubnet)) } -// Enabled mocks base method. -func (m *MockNatGatewayScope) Enabled() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Enabled") - ret0, _ := ret[0].(bool) - return ret0 -} - -// Enabled indicates an expected call of Enabled. -func (mr *MockNatGatewayScopeMockRecorder) Enabled() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Enabled", reflect.TypeOf((*MockNatGatewayScope)(nil).Enabled)) -} - -// Error mocks base method. -func (m *MockNatGatewayScope) Error(err error, msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{err, msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Error", varargs...) -} - -// Error indicates an expected call of Error. -func (mr *MockNatGatewayScopeMockRecorder) Error(err, msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{err, msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockNatGatewayScope)(nil).Error), varargs...) -} - // FailureDomains mocks base method. func (m *MockNatGatewayScope) FailureDomains() []string { m.ctrl.T.Helper() @@ -308,23 +276,6 @@ func (mr *MockNatGatewayScopeMockRecorder) HashKey() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HashKey", reflect.TypeOf((*MockNatGatewayScope)(nil).HashKey)) } -// Info mocks base method. -func (m *MockNatGatewayScope) Info(msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Info", varargs...) -} - -// Info indicates an expected call of Info. -func (mr *MockNatGatewayScopeMockRecorder) Info(msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockNatGatewayScope)(nil).Info), varargs...) -} - // IsAPIServerPrivate mocks base method. func (m *MockNatGatewayScope) IsAPIServerPrivate() bool { m.ctrl.T.Helper() @@ -519,20 +470,6 @@ func (mr *MockNatGatewayScopeMockRecorder) TenantID() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TenantID", reflect.TypeOf((*MockNatGatewayScope)(nil).TenantID)) } -// V mocks base method. -func (m *MockNatGatewayScope) V(level int) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "V", level) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// V indicates an expected call of V. -func (mr *MockNatGatewayScopeMockRecorder) V(level interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "V", reflect.TypeOf((*MockNatGatewayScope)(nil).V), level) -} - // Vnet mocks base method. func (m *MockNatGatewayScope) Vnet() *v1beta1.VnetSpec { m.ctrl.T.Helper() @@ -546,35 +483,3 @@ func (mr *MockNatGatewayScopeMockRecorder) Vnet() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Vnet", reflect.TypeOf((*MockNatGatewayScope)(nil).Vnet)) } - -// WithName mocks base method. -func (m *MockNatGatewayScope) WithName(name string) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WithName", name) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithName indicates an expected call of WithName. -func (mr *MockNatGatewayScopeMockRecorder) WithName(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithName", reflect.TypeOf((*MockNatGatewayScope)(nil).WithName), name) -} - -// WithValues mocks base method. -func (m *MockNatGatewayScope) WithValues(keysAndValues ...interface{}) logr.Logger { - m.ctrl.T.Helper() - varargs := []interface{}{} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "WithValues", varargs...) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithValues indicates an expected call of WithValues. -func (mr *MockNatGatewayScopeMockRecorder) WithValues(keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithValues", reflect.TypeOf((*MockNatGatewayScope)(nil).WithValues), keysAndValues...) -} diff --git a/azure/services/natgateways/natgateways.go b/azure/services/natgateways/natgateways.go index bd79d52d006..8e18950a364 100644 --- a/azure/services/natgateways/natgateways.go +++ b/azure/services/natgateways/natgateways.go @@ -23,9 +23,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network" autorest "github.com/Azure/go-autorest/autorest/azure" "github.com/Azure/go-autorest/autorest/to" - "github.com/go-logr/logr" "github.com/pkg/errors" - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/util/tele" @@ -33,7 +31,6 @@ import ( // NatGatewayScope defines the scope interface for nat gateway service. type NatGatewayScope interface { - logr.Logger azure.ClusterScoper NatGatewaySpecs() []azure.NatGatewaySpec } @@ -55,11 +52,11 @@ func New(scope NatGatewayScope) *Service { // Reconcile gets/creates/updates a nat gateway. // Only when the Nat Gateway 'Name' property is defined we create the Nat Gateway: it's opt-in. func (s *Service) Reconcile(ctx context.Context) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "natgateways.Service.Reconcile") + ctx, log, done := tele.StartSpanWithLogger(ctx, "natgateways.Service.Reconcile") defer done() if !s.Scope.Vnet().IsManaged(s.Scope.ClusterName()) { - s.Scope.V(4).Info("Skipping nat gateways reconcile in custom vnet mode") + log.V(4).Info("Skipping nat gateways reconcile in custom vnet mode") return nil } @@ -71,21 +68,21 @@ func (s *Service) Reconcile(ctx context.Context) error { return errors.Wrapf(err, "failed to get nat gateway %s in %s", natGatewaySpec.Name, s.Scope.ResourceGroup()) case err == nil: // nat gateway already exists - s.Scope.V(4).Info("nat gateway already exists", "nat gateway", natGatewaySpec.Name) + log.V(4).Info("nat gateway already exists", "nat gateway", natGatewaySpec.Name) natGatewaySpec.Subnet.NatGateway.ID = existingNatGateway.ID if existingNatGateway.NatGatewayIP.Name == natGatewaySpec.NatGatewayIP.Name { // Skip update for Nat Gateway as it exists with expected values - s.Scope.V(4).Info("Nat Gateway exists with expected values, skipping update", "nat gateway", natGatewaySpec.Name) + log.V(4).Info("Nat Gateway exists with expected values, skipping update", "nat gateway", natGatewaySpec.Name) natGatewaySpec.Subnet.NatGateway = *existingNatGateway s.Scope.SetSubnet(natGatewaySpec.Subnet) continue } else { - s.Scope.V(2).Info("updating NAT gateway IP name to match the spec", "old name", existingNatGateway.NatGatewayIP.Name, "desired name", natGatewaySpec.NatGatewayIP.Name) + log.V(2).Info("updating NAT gateway IP name to match the spec", "old name", existingNatGateway.NatGatewayIP.Name, "desired name", natGatewaySpec.NatGatewayIP.Name) } default: // nat gateway doesn't exist but its name was specified in the subnet, let's create it - s.Scope.V(2).Info("nat gateway doesn't exist yet, creating it", "nat gateway", natGatewaySpec.Name) + log.V(2).Info("nat gateway doesn't exist yet, creating it", "nat gateway", natGatewaySpec.Name) } natGatewayToCreate := network.NatGateway{ @@ -103,7 +100,7 @@ func (s *Service) Reconcile(ctx context.Context) error { if err != nil { return errors.Wrapf(err, "failed to create nat gateway %s in resource group %s", natGatewaySpec.Name, s.Scope.ResourceGroup()) } - s.Scope.V(2).Info("successfully created nat gateway", "nat gateway", natGatewaySpec.Name) + log.V(2).Info("successfully created nat gateway", "nat gateway", natGatewaySpec.Name) natGateway := infrav1.NatGateway{ ID: azure.NatGatewayID(s.Scope.SubscriptionID(), s.Scope.ResourceGroup(), natGatewaySpec.Name), Name: natGatewaySpec.Name, @@ -118,6 +115,9 @@ func (s *Service) Reconcile(ctx context.Context) error { } func (s *Service) getExisting(ctx context.Context, spec azure.NatGatewaySpec) (*infrav1.NatGateway, error) { + ctx, _, done := tele.StartSpanWithLogger(ctx, "natgateways.Service.getExisting") + defer done() + existingNatGateway, err := s.Get(ctx, s.Scope.ResourceGroup(), spec.Name) if err != nil { return nil, err @@ -149,15 +149,15 @@ func (s *Service) getExisting(ctx context.Context, spec azure.NatGatewaySpec) (* // Delete deletes the nat gateway with the provided name. func (s *Service) Delete(ctx context.Context) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "natgateways.Service.Delete") + ctx, log, done := tele.StartSpanWithLogger(ctx, "natgateways.Service.Delete") defer done() if !s.Scope.Vnet().IsManaged(s.Scope.ClusterName()) { - s.Scope.V(4).Info("Skipping nat gateway deletion in custom vnet mode") + log.V(4).Info("Skipping nat gateway deletion in custom vnet mode") return nil } for _, natGatewaySpec := range s.Scope.NatGatewaySpecs() { - s.Scope.V(2).Info("deleting nat gateway", "nat gateway", natGatewaySpec.Name) + log.V(2).Info("deleting nat gateway", "nat gateway", natGatewaySpec.Name) err := s.client.Delete(ctx, s.Scope.ResourceGroup(), natGatewaySpec.Name) if err != nil && azure.ResourceNotFound(err) { // already deleted @@ -167,7 +167,7 @@ func (s *Service) Delete(ctx context.Context) error { return errors.Wrapf(err, "failed to delete nat gateway %s in resource group %s", natGatewaySpec.Name, s.Scope.ResourceGroup()) } - s.Scope.V(2).Info("successfully deleted nat gateway", "nat gateway", natGatewaySpec.Name) + log.V(2).Info("successfully deleted nat gateway", "nat gateway", natGatewaySpec.Name) } return nil } diff --git a/azure/services/natgateways/natgateways_test.go b/azure/services/natgateways/natgateways_test.go index d41d743a7bf..16db094c4ba 100644 --- a/azure/services/natgateways/natgateways_test.go +++ b/azure/services/natgateways/natgateways_test.go @@ -27,13 +27,11 @@ import ( "github.com/golang/mock/gomock" . "github.com/onsi/gomega" "k8s.io/client-go/kubernetes/scheme" - "k8s.io/klog/v2/klogr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/services/natgateways/mock_natgateways" gomockinternal "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomock" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" ) func init() { @@ -60,7 +58,6 @@ func TestReconcileNatGateways(t *testing.T) { ID: "1234", Name: "my-vnet", }) - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.ClusterName() }, }, @@ -76,7 +73,6 @@ func TestReconcileNatGateways(t *testing.T) { s.Vnet().Return(&infrav1.VnetSpec{ Name: "my-vnet", }) - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.ClusterName() s.NatGatewaySpecs().Return([]azure.NatGatewaySpec{ { @@ -119,7 +115,6 @@ func TestReconcileNatGateways(t *testing.T) { s.Vnet().Return(&infrav1.VnetSpec{ Name: "my-vnet", }) - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.ClusterName() s.NatGatewaySpecs().Return([]azure.NatGatewaySpec{ { @@ -170,7 +165,6 @@ func TestReconcileNatGateways(t *testing.T) { s.Vnet().Return(&infrav1.VnetSpec{ Name: "my-vnet", }) - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.ClusterName() s.NatGatewaySpecs().Return([]azure.NatGatewaySpec{ { @@ -223,7 +217,6 @@ func TestReconcileNatGateways(t *testing.T) { s.Vnet().Return(&infrav1.VnetSpec{ Name: "my-vnet", }) - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.ClusterName() s.NatGatewaySpecs().Return([]azure.NatGatewaySpec{ { @@ -251,7 +244,6 @@ func TestReconcileNatGateways(t *testing.T) { s.Vnet().Return(&infrav1.VnetSpec{ Name: "my-vnet", }) - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.ClusterName() s.NatGatewaySpecs().Return([]azure.NatGatewaySpec{ { @@ -319,7 +311,6 @@ func TestDeleteNatGateway(t *testing.T) { ID: "1234", Name: "my-vnet", }) - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.ClusterName() }, }, @@ -335,7 +326,6 @@ func TestDeleteNatGateway(t *testing.T) { s.Vnet().Return(&infrav1.VnetSpec{ Name: "my-vnet", }) - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.ClusterName() s.NatGatewaySpecs().Return([]azure.NatGatewaySpec{ { @@ -362,7 +352,6 @@ func TestDeleteNatGateway(t *testing.T) { s.Vnet().Return(&infrav1.VnetSpec{ Name: "my-vnet", }) - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.ClusterName() s.NatGatewaySpecs().Return([]azure.NatGatewaySpec{ { @@ -391,7 +380,6 @@ func TestDeleteNatGateway(t *testing.T) { s.Vnet().Return(&infrav1.VnetSpec{ Name: "my-vnet", }) - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.ClusterName() s.NatGatewaySpecs().Return([]azure.NatGatewaySpec{ { diff --git a/azure/services/networkinterfaces/mock_networkinterfaces/networkinterfaces_mock.go b/azure/services/networkinterfaces/mock_networkinterfaces/networkinterfaces_mock.go index 4c1188753a0..1b36d6c0e5e 100644 --- a/azure/services/networkinterfaces/mock_networkinterfaces/networkinterfaces_mock.go +++ b/azure/services/networkinterfaces/mock_networkinterfaces/networkinterfaces_mock.go @@ -24,7 +24,6 @@ import ( reflect "reflect" autorest "github.com/Azure/go-autorest/autorest" - logr "github.com/go-logr/logr" gomock "github.com/golang/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -179,37 +178,6 @@ func (mr *MockNICScopeMockRecorder) ClusterName() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterName", reflect.TypeOf((*MockNICScope)(nil).ClusterName)) } -// Enabled mocks base method. -func (m *MockNICScope) Enabled() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Enabled") - ret0, _ := ret[0].(bool) - return ret0 -} - -// Enabled indicates an expected call of Enabled. -func (mr *MockNICScopeMockRecorder) Enabled() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Enabled", reflect.TypeOf((*MockNICScope)(nil).Enabled)) -} - -// Error mocks base method. -func (m *MockNICScope) Error(err error, msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{err, msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Error", varargs...) -} - -// Error indicates an expected call of Error. -func (mr *MockNICScopeMockRecorder) Error(err, msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{err, msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockNICScope)(nil).Error), varargs...) -} - // FailureDomains mocks base method. func (m *MockNICScope) FailureDomains() []string { m.ctrl.T.Helper() @@ -238,23 +206,6 @@ func (mr *MockNICScopeMockRecorder) HashKey() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HashKey", reflect.TypeOf((*MockNICScope)(nil).HashKey)) } -// Info mocks base method. -func (m *MockNICScope) Info(msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Info", varargs...) -} - -// Info indicates an expected call of Info. -func (mr *MockNICScopeMockRecorder) Info(msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockNICScope)(nil).Info), varargs...) -} - // Location mocks base method. func (m *MockNICScope) Location() string { m.ctrl.T.Helper() @@ -324,49 +275,3 @@ func (mr *MockNICScopeMockRecorder) TenantID() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TenantID", reflect.TypeOf((*MockNICScope)(nil).TenantID)) } - -// V mocks base method. -func (m *MockNICScope) V(level int) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "V", level) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// V indicates an expected call of V. -func (mr *MockNICScopeMockRecorder) V(level interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "V", reflect.TypeOf((*MockNICScope)(nil).V), level) -} - -// WithName mocks base method. -func (m *MockNICScope) WithName(name string) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WithName", name) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithName indicates an expected call of WithName. -func (mr *MockNICScopeMockRecorder) WithName(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithName", reflect.TypeOf((*MockNICScope)(nil).WithName), name) -} - -// WithValues mocks base method. -func (m *MockNICScope) WithValues(keysAndValues ...interface{}) logr.Logger { - m.ctrl.T.Helper() - varargs := []interface{}{} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "WithValues", varargs...) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithValues indicates an expected call of WithValues. -func (mr *MockNICScopeMockRecorder) WithValues(keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithValues", reflect.TypeOf((*MockNICScope)(nil).WithValues), keysAndValues...) -} diff --git a/azure/services/networkinterfaces/networkinterfaces.go b/azure/services/networkinterfaces/networkinterfaces.go index 9a3c62f82e5..62a6703c81e 100644 --- a/azure/services/networkinterfaces/networkinterfaces.go +++ b/azure/services/networkinterfaces/networkinterfaces.go @@ -21,7 +21,6 @@ import ( "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network" "github.com/Azure/go-autorest/autorest/to" - "github.com/go-logr/logr" "github.com/pkg/errors" "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -31,7 +30,6 @@ import ( // NICScope defines the scope interface for a network interfaces service. type NICScope interface { - logr.Logger azure.ClusterDescriber NICSpecs() []azure.NICSpec } @@ -54,7 +52,7 @@ func New(scope NICScope, skuCache *resourceskus.Cache) *Service { // Reconcile gets/creates/updates a network interface. func (s *Service) Reconcile(ctx context.Context) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "networkinterfaces.Service.Reconcile") + ctx, log, done := tele.StartSpanWithLogger(ctx, "networkinterfaces.Service.Reconcile") defer done() for _, nicSpec := range s.Scope.NICSpecs() { @@ -155,7 +153,7 @@ func (s *Service) Reconcile(ctx context.Context) error { if err != nil { return errors.Wrapf(err, "failed to create network interface %s in resource group %s", nicSpec.Name, s.Scope.ResourceGroup()) } - s.Scope.V(2).Info("successfully created network interface", "network interface", nicSpec.Name) + log.V(2).Info("successfully created network interface", "network interface", nicSpec.Name) } } return nil @@ -163,16 +161,16 @@ func (s *Service) Reconcile(ctx context.Context) error { // Delete deletes the network interface with the provided name. func (s *Service) Delete(ctx context.Context) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "networkinterfaces.Service.Delete") + ctx, log, done := tele.StartSpanWithLogger(ctx, "networkinterfaces.Service.Delete") defer done() for _, nicSpec := range s.Scope.NICSpecs() { - s.Scope.V(2).Info("deleting network interface", "network interface", nicSpec.Name) + log.V(2).Info("deleting network interface", "network interface", nicSpec.Name) err := s.Client.Delete(ctx, s.Scope.ResourceGroup(), nicSpec.Name) if err != nil && !azure.ResourceNotFound(err) { return errors.Wrapf(err, "failed to delete network interface %s in resource group %s", nicSpec.Name, s.Scope.ResourceGroup()) } - s.Scope.V(2).Info("successfully deleted NIC", "network interface", nicSpec.Name) + log.V(2).Info("successfully deleted NIC", "network interface", nicSpec.Name) } return nil } diff --git a/azure/services/networkinterfaces/networkinterfaces_test.go b/azure/services/networkinterfaces/networkinterfaces_test.go index 09e096c2e20..8cda24560fa 100644 --- a/azure/services/networkinterfaces/networkinterfaces_test.go +++ b/azure/services/networkinterfaces/networkinterfaces_test.go @@ -22,9 +22,6 @@ import ( "net/http" "testing" - "sigs.k8s.io/cluster-api-provider-azure/azure" - gomockinternal "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomock" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-04-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network" "github.com/Azure/go-autorest/autorest" @@ -32,9 +29,10 @@ import ( "github.com/golang/mock/gomock" "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" - "k8s.io/klog/v2/klogr" + "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/services/networkinterfaces/mock_networkinterfaces" "sigs.k8s.io/cluster-api-provider-azure/azure/services/resourceskus" + gomockinternal "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomock" ) func TestReconcileNetworkInterface(t *testing.T) { @@ -117,11 +115,9 @@ func TestReconcileNetworkInterface(t *testing.T) { AcceleratedNetworking: nil, }, }) - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.SubscriptionID().AnyTimes().Return("123") s.ResourceGroup().AnyTimes().Return("my-rg") s.Location().AnyTimes().Return("fake-location") - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) m.Get(gomockinternal.AContext(), "my-rg", "my-net-interface"). Return(network.Interface{}, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not found")) m.CreateOrUpdate(gomockinternal.AContext(), "my-rg", "my-net-interface", gomockinternal.DiffEq(network.Interface{ @@ -164,7 +160,6 @@ func TestReconcileNetworkInterface(t *testing.T) { s.SubscriptionID().AnyTimes().Return("123") s.ResourceGroup().AnyTimes().Return("my-rg") s.Location().AnyTimes().Return("fake-location") - s.V(gomock.AssignableToTypeOf(3)).AnyTimes().Return(klogr.New()) gomock.InOrder( m.Get(gomockinternal.AContext(), "my-rg", "my-net-interface"). Return(network.Interface{}, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not found")), @@ -210,7 +205,6 @@ func TestReconcileNetworkInterface(t *testing.T) { s.SubscriptionID().AnyTimes().Return("123") s.ResourceGroup().AnyTimes().Return("my-rg") s.Location().AnyTimes().Return("fake-location") - s.V(gomock.AssignableToTypeOf(3)).AnyTimes().Return(klogr.New()) m.Get(gomockinternal.AContext(), "my-rg", "my-net-interface"). Return(network.Interface{}, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not found")) m.CreateOrUpdate(gomockinternal.AContext(), "my-rg", "my-net-interface", gomockinternal.DiffEq(network.Interface{ @@ -254,7 +248,6 @@ func TestReconcileNetworkInterface(t *testing.T) { s.SubscriptionID().AnyTimes().Return("123") s.ResourceGroup().AnyTimes().Return("my-rg") s.Location().AnyTimes().Return("fake-location") - s.V(gomock.AssignableToTypeOf(3)).AnyTimes().Return(klogr.New()) m.Get(gomockinternal.AContext(), "my-rg", "my-public-net-interface"). Return(network.Interface{}, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not found")) m.CreateOrUpdate(gomockinternal.AContext(), "my-rg", "my-public-net-interface", gomock.AssignableToTypeOf(network.Interface{})) @@ -279,7 +272,6 @@ func TestReconcileNetworkInterface(t *testing.T) { s.SubscriptionID().AnyTimes().Return("123") s.ResourceGroup().AnyTimes().Return("my-rg") s.Location().AnyTimes().Return("fake-location") - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) m.Get(gomockinternal.AContext(), "my-rg", "my-net-interface"). Return(network.Interface{}, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not found")) m.CreateOrUpdate(gomockinternal.AContext(), "my-rg", "my-net-interface", gomockinternal.DiffEq(network.Interface{ @@ -320,9 +312,7 @@ func TestReconcileNetworkInterface(t *testing.T) { }) s.SubscriptionID().AnyTimes().Return("123") s.ResourceGroup().AnyTimes().Return("my-rg") - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.Location().AnyTimes().Return("fake-location") - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) m.Get(gomockinternal.AContext(), "my-rg", "my-net-interface"). Return(network.Interface{}, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not found")) m.CreateOrUpdate(gomockinternal.AContext(), "my-rg", "my-net-interface", gomockinternal.DiffEq(network.Interface{ @@ -365,7 +355,6 @@ func TestReconcileNetworkInterface(t *testing.T) { s.SubscriptionID().AnyTimes().Return("123") s.ResourceGroup().AnyTimes().Return("my-rg") s.Location().AnyTimes().Return("fake-location") - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) gomock.InOrder( m.Get(gomockinternal.AContext(), "my-rg", "my-net-interface"). Return(network.Interface{}, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not found")), @@ -467,7 +456,6 @@ func TestDeleteNetworkInterface(t *testing.T) { MachineName: "azure-test1", }, }) - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.ResourceGroup().AnyTimes().Return("my-rg") m.Delete(gomockinternal.AContext(), "my-rg", "my-net-interface") }, @@ -483,7 +471,6 @@ func TestDeleteNetworkInterface(t *testing.T) { MachineName: "azure-test1", }, }) - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.ResourceGroup().AnyTimes().Return("my-rg") m.Delete(gomockinternal.AContext(), "my-rg", "my-net-interface"). Return(autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not found")) @@ -501,7 +488,6 @@ func TestDeleteNetworkInterface(t *testing.T) { }, }) s.ResourceGroup().AnyTimes().Return("my-rg") - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) m.Delete(gomockinternal.AContext(), "my-rg", "my-net-interface"). Return(autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 500}, "Internal Server Error")) }, diff --git a/azure/services/privatedns/mock_privatedns/privatedns_mock.go b/azure/services/privatedns/mock_privatedns/privatedns_mock.go index 8b35c34f5ac..b071e4e5e1f 100644 --- a/azure/services/privatedns/mock_privatedns/privatedns_mock.go +++ b/azure/services/privatedns/mock_privatedns/privatedns_mock.go @@ -24,7 +24,6 @@ import ( reflect "reflect" autorest "github.com/Azure/go-autorest/autorest" - logr "github.com/go-logr/logr" gomock "github.com/golang/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -179,37 +178,6 @@ func (mr *MockScopeMockRecorder) ClusterName() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterName", reflect.TypeOf((*MockScope)(nil).ClusterName)) } -// Enabled mocks base method. -func (m *MockScope) Enabled() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Enabled") - ret0, _ := ret[0].(bool) - return ret0 -} - -// Enabled indicates an expected call of Enabled. -func (mr *MockScopeMockRecorder) Enabled() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Enabled", reflect.TypeOf((*MockScope)(nil).Enabled)) -} - -// Error mocks base method. -func (m *MockScope) Error(err error, msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{err, msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Error", varargs...) -} - -// Error indicates an expected call of Error. -func (mr *MockScopeMockRecorder) Error(err, msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{err, msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockScope)(nil).Error), varargs...) -} - // FailureDomains mocks base method. func (m *MockScope) FailureDomains() []string { m.ctrl.T.Helper() @@ -238,23 +206,6 @@ func (mr *MockScopeMockRecorder) HashKey() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HashKey", reflect.TypeOf((*MockScope)(nil).HashKey)) } -// Info mocks base method. -func (m *MockScope) Info(msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Info", varargs...) -} - -// Info indicates an expected call of Info. -func (mr *MockScopeMockRecorder) Info(msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockScope)(nil).Info), varargs...) -} - // Location mocks base method. func (m *MockScope) Location() string { m.ctrl.T.Helper() @@ -324,49 +275,3 @@ func (mr *MockScopeMockRecorder) TenantID() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TenantID", reflect.TypeOf((*MockScope)(nil).TenantID)) } - -// V mocks base method. -func (m *MockScope) V(level int) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "V", level) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// V indicates an expected call of V. -func (mr *MockScopeMockRecorder) V(level interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "V", reflect.TypeOf((*MockScope)(nil).V), level) -} - -// WithName mocks base method. -func (m *MockScope) WithName(name string) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WithName", name) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithName indicates an expected call of WithName. -func (mr *MockScopeMockRecorder) WithName(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithName", reflect.TypeOf((*MockScope)(nil).WithName), name) -} - -// WithValues mocks base method. -func (m *MockScope) WithValues(keysAndValues ...interface{}) logr.Logger { - m.ctrl.T.Helper() - varargs := []interface{}{} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "WithValues", varargs...) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithValues indicates an expected call of WithValues. -func (mr *MockScopeMockRecorder) WithValues(keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithValues", reflect.TypeOf((*MockScope)(nil).WithValues), keysAndValues...) -} diff --git a/azure/services/privatedns/privatedns.go b/azure/services/privatedns/privatedns.go index f4aced09871..c33c4bde653 100644 --- a/azure/services/privatedns/privatedns.go +++ b/azure/services/privatedns/privatedns.go @@ -21,7 +21,6 @@ import ( "github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns" "github.com/Azure/go-autorest/autorest/to" - "github.com/go-logr/logr" "github.com/pkg/errors" "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -31,7 +30,6 @@ import ( // Scope defines the scope interface for a private dns service. type Scope interface { - logr.Logger azure.ClusterDescriber PrivateDNSSpec() *azure.PrivateDNSSpec } @@ -52,22 +50,22 @@ func New(scope Scope) *Service { // Reconcile creates or updates the private zone, links it to the vnet, and creates DNS records. func (s *Service) Reconcile(ctx context.Context) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "privatedns.Service.Reconcile") + ctx, log, done := tele.StartSpanWithLogger(ctx, "privatedns.Service.Reconcile") defer done() zoneSpec := s.Scope.PrivateDNSSpec() if zoneSpec != nil { // Create the private DNS zone. - s.Scope.V(2).Info("creating private DNS zone", "private dns zone", zoneSpec.ZoneName) + log.V(2).Info("creating private DNS zone", "private dns zone", zoneSpec.ZoneName) err := s.client.CreateOrUpdateZone(ctx, s.Scope.ResourceGroup(), zoneSpec.ZoneName, privatedns.PrivateZone{Location: to.StringPtr(azure.Global)}) if err != nil { return errors.Wrapf(err, "failed to create private DNS zone %s", zoneSpec.ZoneName) } - s.Scope.V(2).Info("successfully created private DNS zone", "private dns zone", zoneSpec.ZoneName) + log.V(2).Info("successfully created private DNS zone", "private dns zone", zoneSpec.ZoneName) for _, linkSpec := range zoneSpec.Links { // Link each virtual network. - s.Scope.V(2).Info("creating a virtual network link", "virtual network", linkSpec.VNetName, "private dns zone", zoneSpec.ZoneName) + log.V(2).Info("creating a virtual network link", "virtual network", linkSpec.VNetName, "private dns zone", zoneSpec.ZoneName) link := privatedns.VirtualNetworkLink{ VirtualNetworkLinkProperties: &privatedns.VirtualNetworkLinkProperties{ VirtualNetwork: &privatedns.SubResource{ @@ -81,12 +79,12 @@ func (s *Service) Reconcile(ctx context.Context) error { if err != nil { return errors.Wrapf(err, "failed to create virtual network link %s", linkSpec.LinkName) } - s.Scope.V(2).Info("successfully created virtual network link", "virtual network", linkSpec.VNetName, "private dns zone", zoneSpec.ZoneName) + log.V(2).Info("successfully created virtual network link", "virtual network", linkSpec.VNetName, "private dns zone", zoneSpec.ZoneName) } // Create the record(s). for _, record := range zoneSpec.Records { - s.Scope.V(2).Info("creating record set", "private dns zone", zoneSpec.ZoneName, "record", record.Hostname) + log.V(2).Info("creating record set", "private dns zone", zoneSpec.ZoneName, "record", record.Hostname) set := privatedns.RecordSet{ RecordSetProperties: &privatedns.RecordSetProperties{ TTL: to.Int64Ptr(300), @@ -106,7 +104,7 @@ func (s *Service) Reconcile(ctx context.Context) error { if err != nil { return errors.Wrapf(err, "failed to create record %s in private DNS zone %s", record.Hostname, zoneSpec.ZoneName) } - s.Scope.V(2).Info("successfully created record set", "private dns zone", zoneSpec.ZoneName, "record", record.Hostname) + log.V(2).Info("successfully created record set", "private dns zone", zoneSpec.ZoneName, "record", record.Hostname) } } return nil @@ -114,14 +112,14 @@ func (s *Service) Reconcile(ctx context.Context) error { // Delete deletes the private zone. func (s *Service) Delete(ctx context.Context) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "privatedns.Service.Delete") + ctx, log, done := tele.StartSpanWithLogger(ctx, "privatedns.Service.Delete") defer done() zoneSpec := s.Scope.PrivateDNSSpec() if zoneSpec != nil { for _, linkSpec := range zoneSpec.Links { // Remove each virtual network link. - s.Scope.V(2).Info("removing virtual network link", "virtual network", linkSpec.VNetName, "private dns zone", zoneSpec.ZoneName) + log.V(2).Info("removing virtual network link", "virtual network", linkSpec.VNetName, "private dns zone", zoneSpec.ZoneName) err := s.client.DeleteLink(ctx, s.Scope.ResourceGroup(), zoneSpec.ZoneName, linkSpec.LinkName) if err != nil && !azure.ResourceNotFound(err) { return errors.Wrapf(err, "failed to delete virtual network link %s with zone %s in resource group %s", linkSpec.VNetName, zoneSpec.ZoneName, s.Scope.ResourceGroup()) @@ -129,7 +127,7 @@ func (s *Service) Delete(ctx context.Context) error { } // Delete the private DNS zone, which also deletes all records. - s.Scope.V(2).Info("deleting private dns zone", "private dns zone", zoneSpec.ZoneName) + log.V(2).Info("deleting private dns zone", "private dns zone", zoneSpec.ZoneName) err := s.client.DeleteZone(ctx, s.Scope.ResourceGroup(), zoneSpec.ZoneName) if err != nil && azure.ResourceNotFound(err) { // already deleted @@ -138,7 +136,7 @@ func (s *Service) Delete(ctx context.Context) error { if err != nil && !azure.ResourceNotFound(err) { return errors.Wrapf(err, "failed to delete private dns zone %s in resource group %s", zoneSpec.ZoneName, s.Scope.ResourceGroup()) } - s.Scope.V(2).Info("successfully deleted private dns zone", "private dns zone", zoneSpec.ZoneName) + log.V(2).Info("successfully deleted private dns zone", "private dns zone", zoneSpec.ZoneName) } return nil } diff --git a/azure/services/privatedns/privatedns_test.go b/azure/services/privatedns/privatedns_test.go index c318bb9497d..f0f55885311 100644 --- a/azure/services/privatedns/privatedns_test.go +++ b/azure/services/privatedns/privatedns_test.go @@ -22,17 +22,14 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns" - "github.com/Azure/go-autorest/autorest/to" - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" - "sigs.k8s.io/cluster-api-provider-azure/azure/services/privatedns/mock_privatedns" - gomockinternal "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomock" - "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/to" "github.com/golang/mock/gomock" . "github.com/onsi/gomega" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" - - "k8s.io/klog/v2/klogr" + "sigs.k8s.io/cluster-api-provider-azure/azure/services/privatedns/mock_privatedns" + gomockinternal "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomock" ) func TestReconcilePrivateDNS(t *testing.T) { @@ -52,7 +49,6 @@ func TestReconcilePrivateDNS(t *testing.T) { name: "create ipv4 private dns successfully", expectedError: "", expect: func(s *mock_privatedns.MockScopeMockRecorder, m *mock_privatedns.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.PrivateDNSSpec().Return(&azure.PrivateDNSSpec{ ZoneName: "my-dns-zone", Links: []azure.PrivateDNSLinkSpec{ @@ -97,7 +93,6 @@ func TestReconcilePrivateDNS(t *testing.T) { name: "create multiple ipv4 private dns successfully", expectedError: "", expect: func(s *mock_privatedns.MockScopeMockRecorder, m *mock_privatedns.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.PrivateDNSSpec().Return(&azure.PrivateDNSSpec{ ZoneName: "my-dns-zone", Links: []azure.PrivateDNSLinkSpec{ @@ -156,7 +151,6 @@ func TestReconcilePrivateDNS(t *testing.T) { name: "create ipv6 private dns successfully", expectedError: "", expect: func(s *mock_privatedns.MockScopeMockRecorder, m *mock_privatedns.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.PrivateDNSSpec().Return(&azure.PrivateDNSSpec{ ZoneName: "my-dns-zone", Links: []azure.PrivateDNSLinkSpec{ @@ -201,7 +195,6 @@ func TestReconcilePrivateDNS(t *testing.T) { name: "create multiple ipv6 private dns successfully", expectedError: "", expect: func(s *mock_privatedns.MockScopeMockRecorder, m *mock_privatedns.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.PrivateDNSSpec().Return(&azure.PrivateDNSSpec{ ZoneName: "my-dns-zone", Links: []azure.PrivateDNSLinkSpec{ @@ -260,7 +253,6 @@ func TestReconcilePrivateDNS(t *testing.T) { name: "link creation fails", expectedError: "failed to create virtual network link my-link: #: Internal Server Error: StatusCode=500", expect: func(s *mock_privatedns.MockScopeMockRecorder, m *mock_privatedns.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.PrivateDNSSpec().Return(&azure.PrivateDNSSpec{ ZoneName: "my-dns-zone", Links: []azure.PrivateDNSLinkSpec{ @@ -295,7 +287,6 @@ func TestReconcilePrivateDNS(t *testing.T) { name: "creating multiple links fails", expectedError: "failed to create virtual network link my-link-2: #: Internal Server Error: StatusCode=500", expect: func(s *mock_privatedns.MockScopeMockRecorder, m *mock_privatedns.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.PrivateDNSSpec().Return(&azure.PrivateDNSSpec{ ZoneName: "my-dns-zone", Links: []azure.PrivateDNSLinkSpec{ @@ -386,7 +377,6 @@ func TestDeletePrivateDNS(t *testing.T) { name: "no private dns", expectedError: "", expect: func(s *mock_privatedns.MockScopeMockRecorder, m *mock_privatedns.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.PrivateDNSSpec().Return(nil) }, }, @@ -394,7 +384,6 @@ func TestDeletePrivateDNS(t *testing.T) { name: "delete the dns zone", expectedError: "", expect: func(s *mock_privatedns.MockScopeMockRecorder, m *mock_privatedns.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.PrivateDNSSpec().Return(&azure.PrivateDNSSpec{ ZoneName: "my-dns-zone", Links: []azure.PrivateDNSLinkSpec{ @@ -420,7 +409,6 @@ func TestDeletePrivateDNS(t *testing.T) { name: "delete the dns zone with multiple links", expectedError: "", expect: func(s *mock_privatedns.MockScopeMockRecorder, m *mock_privatedns.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.PrivateDNSSpec().Return(&azure.PrivateDNSSpec{ ZoneName: "my-dns-zone", Links: []azure.PrivateDNSLinkSpec{ @@ -458,7 +446,6 @@ func TestDeletePrivateDNS(t *testing.T) { name: "link already deleted", expectedError: "", expect: func(s *mock_privatedns.MockScopeMockRecorder, m *mock_privatedns.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.PrivateDNSSpec().Return(&azure.PrivateDNSSpec{ ZoneName: "my-dns-zone", Links: []azure.PrivateDNSLinkSpec{ @@ -485,7 +472,6 @@ func TestDeletePrivateDNS(t *testing.T) { name: "one link already deleted with multiple links", expectedError: "", expect: func(s *mock_privatedns.MockScopeMockRecorder, m *mock_privatedns.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.PrivateDNSSpec().Return(&azure.PrivateDNSSpec{ ZoneName: "my-dns-zone", Links: []azure.PrivateDNSLinkSpec{ @@ -524,7 +510,6 @@ func TestDeletePrivateDNS(t *testing.T) { name: "zone already deleted", expectedError: "", expect: func(s *mock_privatedns.MockScopeMockRecorder, m *mock_privatedns.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.PrivateDNSSpec().Return(&azure.PrivateDNSSpec{ ZoneName: "my-dns-zone", Links: []azure.PrivateDNSLinkSpec{ @@ -552,7 +537,6 @@ func TestDeletePrivateDNS(t *testing.T) { name: "error while trying to delete the link", expectedError: "failed to delete virtual network link my-vnet with zone my-dns-zone in resource group my-rg: #: Internal Server Error: StatusCode=500", expect: func(s *mock_privatedns.MockScopeMockRecorder, m *mock_privatedns.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.PrivateDNSSpec().Return(&azure.PrivateDNSSpec{ ZoneName: "my-dns-zone", Links: []azure.PrivateDNSLinkSpec{ @@ -578,7 +562,6 @@ func TestDeletePrivateDNS(t *testing.T) { name: "error while trying to delete one link with multiple links", expectedError: "failed to delete virtual network link my-vnet-2 with zone my-dns-zone in resource group my-rg: #: Internal Server Error: StatusCode=500", expect: func(s *mock_privatedns.MockScopeMockRecorder, m *mock_privatedns.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.PrivateDNSSpec().Return(&azure.PrivateDNSSpec{ ZoneName: "my-dns-zone", Links: []azure.PrivateDNSLinkSpec{ @@ -615,7 +598,6 @@ func TestDeletePrivateDNS(t *testing.T) { name: "error while trying to delete the zone with one link", expectedError: "failed to delete private dns zone my-dns-zone in resource group my-rg: #: Internal Server Error: StatusCode=500", expect: func(s *mock_privatedns.MockScopeMockRecorder, m *mock_privatedns.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.PrivateDNSSpec().Return(&azure.PrivateDNSSpec{ ZoneName: "my-dns-zone", Links: []azure.PrivateDNSLinkSpec{ @@ -642,7 +624,6 @@ func TestDeletePrivateDNS(t *testing.T) { name: "error while trying to delete the zone with multiple links", expectedError: "failed to delete private dns zone my-dns-zone in resource group my-rg: #: Internal Server Error: StatusCode=500", expect: func(s *mock_privatedns.MockScopeMockRecorder, m *mock_privatedns.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.PrivateDNSSpec().Return(&azure.PrivateDNSSpec{ ZoneName: "my-dns-zone", Links: []azure.PrivateDNSLinkSpec{ diff --git a/azure/services/publicips/mock_publicips/publicips_mock.go b/azure/services/publicips/mock_publicips/publicips_mock.go index d64ce9a9d40..3e2c9c22195 100644 --- a/azure/services/publicips/mock_publicips/publicips_mock.go +++ b/azure/services/publicips/mock_publicips/publicips_mock.go @@ -24,7 +24,6 @@ import ( reflect "reflect" autorest "github.com/Azure/go-autorest/autorest" - logr "github.com/go-logr/logr" gomock "github.com/golang/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -179,37 +178,6 @@ func (mr *MockPublicIPScopeMockRecorder) ClusterName() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterName", reflect.TypeOf((*MockPublicIPScope)(nil).ClusterName)) } -// Enabled mocks base method. -func (m *MockPublicIPScope) Enabled() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Enabled") - ret0, _ := ret[0].(bool) - return ret0 -} - -// Enabled indicates an expected call of Enabled. -func (mr *MockPublicIPScopeMockRecorder) Enabled() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Enabled", reflect.TypeOf((*MockPublicIPScope)(nil).Enabled)) -} - -// Error mocks base method. -func (m *MockPublicIPScope) Error(err error, msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{err, msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Error", varargs...) -} - -// Error indicates an expected call of Error. -func (mr *MockPublicIPScopeMockRecorder) Error(err, msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{err, msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockPublicIPScope)(nil).Error), varargs...) -} - // FailureDomains mocks base method. func (m *MockPublicIPScope) FailureDomains() []string { m.ctrl.T.Helper() @@ -238,23 +206,6 @@ func (mr *MockPublicIPScopeMockRecorder) HashKey() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HashKey", reflect.TypeOf((*MockPublicIPScope)(nil).HashKey)) } -// Info mocks base method. -func (m *MockPublicIPScope) Info(msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Info", varargs...) -} - -// Info indicates an expected call of Info. -func (mr *MockPublicIPScopeMockRecorder) Info(msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockPublicIPScope)(nil).Info), varargs...) -} - // Location mocks base method. func (m *MockPublicIPScope) Location() string { m.ctrl.T.Helper() @@ -324,49 +275,3 @@ func (mr *MockPublicIPScopeMockRecorder) TenantID() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TenantID", reflect.TypeOf((*MockPublicIPScope)(nil).TenantID)) } - -// V mocks base method. -func (m *MockPublicIPScope) V(level int) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "V", level) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// V indicates an expected call of V. -func (mr *MockPublicIPScopeMockRecorder) V(level interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "V", reflect.TypeOf((*MockPublicIPScope)(nil).V), level) -} - -// WithName mocks base method. -func (m *MockPublicIPScope) WithName(name string) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WithName", name) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithName indicates an expected call of WithName. -func (mr *MockPublicIPScopeMockRecorder) WithName(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithName", reflect.TypeOf((*MockPublicIPScope)(nil).WithName), name) -} - -// WithValues mocks base method. -func (m *MockPublicIPScope) WithValues(keysAndValues ...interface{}) logr.Logger { - m.ctrl.T.Helper() - varargs := []interface{}{} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "WithValues", varargs...) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithValues indicates an expected call of WithValues. -func (mr *MockPublicIPScopeMockRecorder) WithValues(keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithValues", reflect.TypeOf((*MockPublicIPScope)(nil).WithValues), keysAndValues...) -} diff --git a/azure/services/publicips/publicips.go b/azure/services/publicips/publicips.go index 7d57b69d7ce..a9be7749476 100644 --- a/azure/services/publicips/publicips.go +++ b/azure/services/publicips/publicips.go @@ -22,9 +22,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network" "github.com/Azure/go-autorest/autorest/to" - "github.com/go-logr/logr" "github.com/pkg/errors" - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/converters" @@ -33,7 +31,6 @@ import ( // PublicIPScope defines the scope interface for a public IP service. type PublicIPScope interface { - logr.Logger azure.ClusterDescriber PublicIPSpecs() []azure.PublicIPSpec } @@ -54,11 +51,11 @@ func New(scope PublicIPScope) *Service { // Reconcile gets/creates/updates a public ip. func (s *Service) Reconcile(ctx context.Context) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "publicips.Service.Reconcile") + ctx, log, done := tele.StartSpanWithLogger(ctx, "publicips.Service.Reconcile") defer done() for _, ip := range s.Scope.PublicIPSpecs() { - s.Scope.V(2).Info("creating public IP", "public ip", ip.Name) + log.V(2).Info("creating public IP", "public ip", ip.Name) // only set DNS properties if there is a DNS name specified addressVersion := network.IPVersionIPv4 @@ -102,7 +99,7 @@ func (s *Service) Reconcile(ctx context.Context) error { return errors.Wrap(err, "cannot create public IP") } - s.Scope.V(2).Info("successfully created public IP", "public ip", ip.Name) + log.V(2).Info("successfully created public IP", "public ip", ip.Name) } return nil @@ -110,7 +107,7 @@ func (s *Service) Reconcile(ctx context.Context) error { // Delete deletes the public IP with the provided scope. func (s *Service) Delete(ctx context.Context) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "publicips.Service.Delete") + ctx, log, done := tele.StartSpanWithLogger(ctx, "publicips.Service.Delete") defer done() for _, ip := range s.Scope.PublicIPSpecs() { @@ -120,11 +117,11 @@ func (s *Service) Delete(ctx context.Context) error { } if !managed { - s.Scope.V(2).Info("Skipping IP deletion for unmanaged public IP", "public ip", ip.Name) + log.V(2).Info("Skipping IP deletion for unmanaged public IP", "public ip", ip.Name) continue } - s.Scope.V(2).Info("deleting public IP", "public ip", ip.Name) + log.V(2).Info("deleting public IP", "public ip", ip.Name) err = s.Client.Delete(ctx, s.Scope.ResourceGroup(), ip.Name) if err != nil && azure.ResourceNotFound(err) { // already deleted @@ -134,7 +131,7 @@ func (s *Service) Delete(ctx context.Context) error { return errors.Wrapf(err, "failed to delete public IP %s in resource group %s", ip.Name, s.Scope.ResourceGroup()) } - s.Scope.V(2).Info("deleted public IP", "public ip", ip.Name) + log.V(2).Info("deleted public IP", "public ip", ip.Name) } return nil } @@ -142,6 +139,9 @@ func (s *Service) Delete(ctx context.Context) error { // isIPManaged returns true if the IP has an owned tag with the cluster name as value, // meaning that the IP's lifecycle is managed. func (s *Service) isIPManaged(ctx context.Context, ipName string) (bool, error) { + ctx, _, done := tele.StartSpanWithLogger(ctx, "publicips.Service.isIPManaged") + defer done() + ip, err := s.Client.Get(ctx, s.Scope.ResourceGroup(), ipName) if err != nil { return false, err diff --git a/azure/services/publicips/publicips_test.go b/azure/services/publicips/publicips_test.go index 863183f70d4..e26dff5b6e3 100644 --- a/azure/services/publicips/publicips_test.go +++ b/azure/services/publicips/publicips_test.go @@ -21,22 +21,16 @@ import ( "net/http" "testing" - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" - - gomockinternal "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomock" - - "sigs.k8s.io/cluster-api-provider-azure/azure" - - . "github.com/onsi/gomega" - "sigs.k8s.io/cluster-api-provider-azure/azure/services/publicips/mock_publicips" - + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/to" "github.com/golang/mock/gomock" - - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network" + . "github.com/onsi/gomega" "k8s.io/client-go/kubernetes/scheme" - "k8s.io/klog/v2/klogr" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-azure/azure" + "sigs.k8s.io/cluster-api-provider-azure/azure/services/publicips/mock_publicips" + gomockinternal "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomock" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) @@ -54,7 +48,6 @@ func TestReconcilePublicIP(t *testing.T) { name: "can create public IPs", expectedError: "", expect: func(s *mock_publicips.MockPublicIPScopeMockRecorder, m *mock_publicips.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.PublicIPSpecs().Return([]azure.PublicIPSpec{ { Name: "my-publicip", @@ -154,7 +147,6 @@ func TestReconcilePublicIP(t *testing.T) { name: "fail to create a public IP", expectedError: "cannot create public IP: #: Internal Server Error: StatusCode=500", expect: func(s *mock_publicips.MockPublicIPScopeMockRecorder, m *mock_publicips.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.PublicIPSpecs().Return([]azure.PublicIPSpec{ { Name: "my-publicip", @@ -210,7 +202,6 @@ func TestDeletePublicIP(t *testing.T) { name: "successfully delete two existing public IP", expectedError: "", expect: func(s *mock_publicips.MockPublicIPScopeMockRecorder, m *mock_publicips.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.PublicIPSpecs().Return([]azure.PublicIPSpec{ { Name: "my-publicip", @@ -243,7 +234,6 @@ func TestDeletePublicIP(t *testing.T) { name: "public ip already deleted", expectedError: "", expect: func(s *mock_publicips.MockPublicIPScopeMockRecorder, m *mock_publicips.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.PublicIPSpecs().Return([]azure.PublicIPSpec{ { Name: "my-publicip", @@ -269,7 +259,6 @@ func TestDeletePublicIP(t *testing.T) { name: "public ip deletion fails", expectedError: "failed to delete public IP my-publicip in resource group my-rg: #: Internal Server Error: StatusCode=500", expect: func(s *mock_publicips.MockPublicIPScopeMockRecorder, m *mock_publicips.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.PublicIPSpecs().Return([]azure.PublicIPSpec{ { Name: "my-publicip", @@ -292,7 +281,6 @@ func TestDeletePublicIP(t *testing.T) { name: "skip unmanaged public ip deletion", expectedError: "", expect: func(s *mock_publicips.MockPublicIPScopeMockRecorder, m *mock_publicips.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.PublicIPSpecs().Return([]azure.PublicIPSpec{ { Name: "my-publicip", diff --git a/azure/services/roleassignments/mock_roleassignments/roleassignments_mock.go b/azure/services/roleassignments/mock_roleassignments/roleassignments_mock.go index 8c0070d9958..5f2b86d03f4 100644 --- a/azure/services/roleassignments/mock_roleassignments/roleassignments_mock.go +++ b/azure/services/roleassignments/mock_roleassignments/roleassignments_mock.go @@ -24,7 +24,6 @@ import ( reflect "reflect" autorest "github.com/Azure/go-autorest/autorest" - logr "github.com/go-logr/logr" gomock "github.com/golang/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -179,37 +178,6 @@ func (mr *MockRoleAssignmentScopeMockRecorder) ClusterName() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterName", reflect.TypeOf((*MockRoleAssignmentScope)(nil).ClusterName)) } -// Enabled mocks base method. -func (m *MockRoleAssignmentScope) Enabled() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Enabled") - ret0, _ := ret[0].(bool) - return ret0 -} - -// Enabled indicates an expected call of Enabled. -func (mr *MockRoleAssignmentScopeMockRecorder) Enabled() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Enabled", reflect.TypeOf((*MockRoleAssignmentScope)(nil).Enabled)) -} - -// Error mocks base method. -func (m *MockRoleAssignmentScope) Error(err error, msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{err, msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Error", varargs...) -} - -// Error indicates an expected call of Error. -func (mr *MockRoleAssignmentScopeMockRecorder) Error(err, msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{err, msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockRoleAssignmentScope)(nil).Error), varargs...) -} - // FailureDomains mocks base method. func (m *MockRoleAssignmentScope) FailureDomains() []string { m.ctrl.T.Helper() @@ -238,23 +206,6 @@ func (mr *MockRoleAssignmentScopeMockRecorder) HashKey() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HashKey", reflect.TypeOf((*MockRoleAssignmentScope)(nil).HashKey)) } -// Info mocks base method. -func (m *MockRoleAssignmentScope) Info(msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Info", varargs...) -} - -// Info indicates an expected call of Info. -func (mr *MockRoleAssignmentScopeMockRecorder) Info(msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockRoleAssignmentScope)(nil).Info), varargs...) -} - // Location mocks base method. func (m *MockRoleAssignmentScope) Location() string { m.ctrl.T.Helper() @@ -324,49 +275,3 @@ func (mr *MockRoleAssignmentScopeMockRecorder) TenantID() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TenantID", reflect.TypeOf((*MockRoleAssignmentScope)(nil).TenantID)) } - -// V mocks base method. -func (m *MockRoleAssignmentScope) V(level int) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "V", level) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// V indicates an expected call of V. -func (mr *MockRoleAssignmentScopeMockRecorder) V(level interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "V", reflect.TypeOf((*MockRoleAssignmentScope)(nil).V), level) -} - -// WithName mocks base method. -func (m *MockRoleAssignmentScope) WithName(name string) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WithName", name) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithName indicates an expected call of WithName. -func (mr *MockRoleAssignmentScopeMockRecorder) WithName(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithName", reflect.TypeOf((*MockRoleAssignmentScope)(nil).WithName), name) -} - -// WithValues mocks base method. -func (m *MockRoleAssignmentScope) WithValues(keysAndValues ...interface{}) logr.Logger { - m.ctrl.T.Helper() - varargs := []interface{}{} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "WithValues", varargs...) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithValues indicates an expected call of WithValues. -func (mr *MockRoleAssignmentScopeMockRecorder) WithValues(keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithValues", reflect.TypeOf((*MockRoleAssignmentScope)(nil).WithValues), keysAndValues...) -} diff --git a/azure/services/roleassignments/roleassignments.go b/azure/services/roleassignments/roleassignments.go index e0a51c5718d..5514c70b298 100644 --- a/azure/services/roleassignments/roleassignments.go +++ b/azure/services/roleassignments/roleassignments.go @@ -22,7 +22,6 @@ import ( "github.com/Azure/azure-sdk-for-go/profiles/2019-03-01/authorization/mgmt/authorization" "github.com/Azure/go-autorest/autorest/to" - "github.com/go-logr/logr" "github.com/pkg/errors" "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -35,7 +34,6 @@ const azureBuiltInContributorID = "b24988ac-6180-42a0-ab88-20f7382dd24c" // RoleAssignmentScope defines the scope interface for a role assignment service. type RoleAssignmentScope interface { - logr.Logger azure.ClusterDescriber RoleAssignmentSpecs() []azure.RoleAssignmentSpec } @@ -78,7 +76,7 @@ func (s *Service) Reconcile(ctx context.Context) error { } func (s *Service) reconcileVM(ctx context.Context, roleSpec azure.RoleAssignmentSpec) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "roleassignments.Service.reconcileVM") + ctx, log, done := tele.StartSpanWithLogger(ctx, "roleassignments.Service.reconcileVM") defer done() resultVM, err := s.virtualMachinesClient.Get(ctx, s.Scope.ResourceGroup(), roleSpec.MachineName) @@ -91,13 +89,13 @@ func (s *Service) reconcileVM(ctx context.Context, roleSpec azure.RoleAssignment return errors.Wrap(err, "cannot assign role to VM system assigned identity") } - s.Scope.V(2).Info("successfully created role assignment for generated Identity for VM", "virtual machine", roleSpec.MachineName) + log.V(2).Info("successfully created role assignment for generated Identity for VM", "virtual machine", roleSpec.MachineName) return nil } func (s *Service) reconcileVMSS(ctx context.Context, roleSpec azure.RoleAssignmentSpec) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "roleassignments.Service.reconcileVMSS") + ctx, log, done := tele.StartSpanWithLogger(ctx, "roleassignments.Service.reconcileVMSS") defer done() resultVMSS, err := s.virtualMachineScaleSetClient.Get(ctx, s.Scope.ResourceGroup(), roleSpec.MachineName) @@ -110,7 +108,7 @@ func (s *Service) reconcileVMSS(ctx context.Context, roleSpec azure.RoleAssignme return errors.Wrap(err, "cannot assign role to VMSS system assigned identity") } - s.Scope.V(2).Info("successfully created role assignment for generated Identity for VMSS", "virtual machine scale set", roleSpec.MachineName) + log.V(2).Info("successfully created role assignment for generated Identity for VMSS", "virtual machine scale set", roleSpec.MachineName) return nil } diff --git a/azure/services/roleassignments/roleassignments_test.go b/azure/services/roleassignments/roleassignments_test.go index 56c8487be27..82e14f6b246 100644 --- a/azure/services/roleassignments/roleassignments_test.go +++ b/azure/services/roleassignments/roleassignments_test.go @@ -27,9 +27,6 @@ import ( "github.com/Azure/go-autorest/autorest/to" "github.com/golang/mock/gomock" . "github.com/onsi/gomega" - - "k8s.io/klog/v2/klogr" - "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/services/roleassignments/mock_roleassignments" "sigs.k8s.io/cluster-api-provider-azure/azure/services/scalesets/mock_scalesets" @@ -47,7 +44,6 @@ func TestReconcileRoleAssignmentsVM(t *testing.T) { name: "create a role assignment", expectedError: "", expect: func(s *mock_roleassignments.MockRoleAssignmentScopeMockRecorder, m *mock_roleassignments.MockclientMockRecorder, v *mock_virtualmachines.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.SubscriptionID().AnyTimes().Return("12345") s.ResourceGroup().Return("my-rg") s.RoleAssignmentSpecs().Return([]azure.RoleAssignmentSpec{ @@ -73,7 +69,6 @@ func TestReconcileRoleAssignmentsVM(t *testing.T) { name: "error getting VM", expectedError: "cannot get VM to assign role to system assigned identity: #: Internal Server Error: StatusCode=500", expect: func(s *mock_roleassignments.MockRoleAssignmentScopeMockRecorder, m *mock_roleassignments.MockclientMockRecorder, v *mock_virtualmachines.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.SubscriptionID().AnyTimes().Return("12345") s.ResourceGroup().Return("my-rg") s.RoleAssignmentSpecs().Return([]azure.RoleAssignmentSpec{ @@ -89,7 +84,6 @@ func TestReconcileRoleAssignmentsVM(t *testing.T) { name: "return error when creating a role assignment", expectedError: "cannot assign role to VM system assigned identity: #: Internal Server Error: StatusCode=500", expect: func(s *mock_roleassignments.MockRoleAssignmentScopeMockRecorder, m *mock_roleassignments.MockclientMockRecorder, v *mock_virtualmachines.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.SubscriptionID().AnyTimes().Return("12345") s.ResourceGroup().Return("my-rg") s.RoleAssignmentSpecs().Return([]azure.RoleAssignmentSpec{ @@ -147,7 +141,6 @@ func TestReconcileRoleAssignmentsVMSS(t *testing.T) { name: "create a role assignment", expectedError: "", expect: func(s *mock_roleassignments.MockRoleAssignmentScopeMockRecorder, m *mock_roleassignments.MockclientMockRecorder, v *mock_scalesets.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.SubscriptionID().AnyTimes().Return("12345") s.ResourceGroup().Return("my-rg") s.RoleAssignmentSpecs().Return([]azure.RoleAssignmentSpec{ @@ -173,7 +166,6 @@ func TestReconcileRoleAssignmentsVMSS(t *testing.T) { name: "error getting VMSS", expectedError: "cannot get VMSS to assign role to system assigned identity: #: Internal Server Error: StatusCode=500", expect: func(s *mock_roleassignments.MockRoleAssignmentScopeMockRecorder, m *mock_roleassignments.MockclientMockRecorder, v *mock_scalesets.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.SubscriptionID().AnyTimes().Return("12345") s.ResourceGroup().Return("my-rg") s.RoleAssignmentSpecs().Return([]azure.RoleAssignmentSpec{ @@ -189,7 +181,6 @@ func TestReconcileRoleAssignmentsVMSS(t *testing.T) { name: "return error when creating a role assignment", expectedError: "cannot assign role to VMSS system assigned identity: #: Internal Server Error: StatusCode=500", expect: func(s *mock_roleassignments.MockRoleAssignmentScopeMockRecorder, m *mock_roleassignments.MockclientMockRecorder, v *mock_scalesets.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.SubscriptionID().AnyTimes().Return("12345") s.ResourceGroup().Return("my-rg") s.RoleAssignmentSpecs().Return([]azure.RoleAssignmentSpec{ diff --git a/azure/services/routetables/mock_routetables/routetables_mock.go b/azure/services/routetables/mock_routetables/routetables_mock.go index 5bbd2ccaabe..c6649694910 100644 --- a/azure/services/routetables/mock_routetables/routetables_mock.go +++ b/azure/services/routetables/mock_routetables/routetables_mock.go @@ -24,7 +24,6 @@ import ( reflect "reflect" autorest "github.com/Azure/go-autorest/autorest" - logr "github.com/go-logr/logr" gomock "github.com/golang/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -235,37 +234,6 @@ func (mr *MockRouteTableScopeMockRecorder) ControlPlaneSubnet() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControlPlaneSubnet", reflect.TypeOf((*MockRouteTableScope)(nil).ControlPlaneSubnet)) } -// Enabled mocks base method. -func (m *MockRouteTableScope) Enabled() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Enabled") - ret0, _ := ret[0].(bool) - return ret0 -} - -// Enabled indicates an expected call of Enabled. -func (mr *MockRouteTableScopeMockRecorder) Enabled() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Enabled", reflect.TypeOf((*MockRouteTableScope)(nil).Enabled)) -} - -// Error mocks base method. -func (m *MockRouteTableScope) Error(err error, msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{err, msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Error", varargs...) -} - -// Error indicates an expected call of Error. -func (mr *MockRouteTableScopeMockRecorder) Error(err, msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{err, msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockRouteTableScope)(nil).Error), varargs...) -} - // FailureDomains mocks base method. func (m *MockRouteTableScope) FailureDomains() []string { m.ctrl.T.Helper() @@ -308,23 +276,6 @@ func (mr *MockRouteTableScopeMockRecorder) HashKey() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HashKey", reflect.TypeOf((*MockRouteTableScope)(nil).HashKey)) } -// Info mocks base method. -func (m *MockRouteTableScope) Info(msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Info", varargs...) -} - -// Info indicates an expected call of Info. -func (mr *MockRouteTableScopeMockRecorder) Info(msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockRouteTableScope)(nil).Info), varargs...) -} - // IsAPIServerPrivate mocks base method. func (m *MockRouteTableScope) IsAPIServerPrivate() bool { m.ctrl.T.Helper() @@ -519,20 +470,6 @@ func (mr *MockRouteTableScopeMockRecorder) TenantID() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TenantID", reflect.TypeOf((*MockRouteTableScope)(nil).TenantID)) } -// V mocks base method. -func (m *MockRouteTableScope) V(level int) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "V", level) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// V indicates an expected call of V. -func (mr *MockRouteTableScopeMockRecorder) V(level interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "V", reflect.TypeOf((*MockRouteTableScope)(nil).V), level) -} - // Vnet mocks base method. func (m *MockRouteTableScope) Vnet() *v1beta1.VnetSpec { m.ctrl.T.Helper() @@ -546,35 +483,3 @@ func (mr *MockRouteTableScopeMockRecorder) Vnet() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Vnet", reflect.TypeOf((*MockRouteTableScope)(nil).Vnet)) } - -// WithName mocks base method. -func (m *MockRouteTableScope) WithName(name string) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WithName", name) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithName indicates an expected call of WithName. -func (mr *MockRouteTableScopeMockRecorder) WithName(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithName", reflect.TypeOf((*MockRouteTableScope)(nil).WithName), name) -} - -// WithValues mocks base method. -func (m *MockRouteTableScope) WithValues(keysAndValues ...interface{}) logr.Logger { - m.ctrl.T.Helper() - varargs := []interface{}{} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "WithValues", varargs...) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithValues indicates an expected call of WithValues. -func (mr *MockRouteTableScopeMockRecorder) WithValues(keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithValues", reflect.TypeOf((*MockRouteTableScope)(nil).WithValues), keysAndValues...) -} diff --git a/azure/services/routetables/routetables.go b/azure/services/routetables/routetables.go index 6842542fcad..426bcf3ab4d 100644 --- a/azure/services/routetables/routetables.go +++ b/azure/services/routetables/routetables.go @@ -21,7 +21,6 @@ import ( "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network" "github.com/Azure/go-autorest/autorest/to" - "github.com/go-logr/logr" "github.com/pkg/errors" "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -31,7 +30,6 @@ import ( // RouteTableScope defines the scope interface for route table service. type RouteTableScope interface { - logr.Logger azure.ClusterDescriber azure.NetworkDescriber RouteTableSpecs() []azure.RouteTableSpec @@ -53,11 +51,11 @@ func New(scope *scope.ClusterScope) *Service { // Reconcile gets/creates/updates a route table. func (s *Service) Reconcile(ctx context.Context) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "routetables.Service.Reconcile") + ctx, log, done := tele.StartSpanWithLogger(ctx, "routetables.Service.Reconcile") defer done() if !s.Scope.Vnet().IsManaged(s.Scope.ClusterName()) { - s.Scope.V(4).Info("Skipping route tables reconcile in custom vnet mode") + log.V(4).Info("Skipping route tables reconcile in custom vnet mode") return nil } @@ -77,7 +75,7 @@ func (s *Service) Reconcile(ctx context.Context) error { continue } - s.Scope.V(2).Info("creating Route Table", "route table", routeTableSpec.Name) + log.V(2).Info("creating Route Table", "route table", routeTableSpec.Name) err = s.client.CreateOrUpdate( ctx, s.Scope.ResourceGroup(), @@ -90,22 +88,22 @@ func (s *Service) Reconcile(ctx context.Context) error { if err != nil { return errors.Wrapf(err, "failed to create route table %s in resource group %s", routeTableSpec.Name, s.Scope.ResourceGroup()) } - s.Scope.V(2).Info("successfully created route table", "route table", routeTableSpec.Name) + log.V(2).Info("successfully created route table", "route table", routeTableSpec.Name) } return nil } // Delete deletes the route table with the provided name. func (s *Service) Delete(ctx context.Context) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "routetables.Service.Delete") + ctx, log, done := tele.StartSpanWithLogger(ctx, "routetables.Service.Delete") defer done() if !s.Scope.Vnet().IsManaged(s.Scope.ClusterName()) { - s.Scope.V(4).Info("Skipping route table deletion in custom vnet mode") + log.V(4).Info("Skipping route table deletion in custom vnet mode") return nil } for _, routeTableSpec := range s.Scope.RouteTableSpecs() { - s.Scope.V(2).Info("deleting route table", "route table", routeTableSpec.Name) + log.V(2).Info("deleting route table", "route table", routeTableSpec.Name) err := s.client.Delete(ctx, s.Scope.ResourceGroup(), routeTableSpec.Name) if err != nil && azure.ResourceNotFound(err) { // already deleted @@ -115,7 +113,7 @@ func (s *Service) Delete(ctx context.Context) error { return errors.Wrapf(err, "failed to delete route table %s in resource group %s", routeTableSpec.Name, s.Scope.ResourceGroup()) } - s.Scope.V(2).Info("successfully deleted route table", "route table", routeTableSpec.Name) + log.V(2).Info("successfully deleted route table", "route table", routeTableSpec.Name) } return nil } diff --git a/azure/services/routetables/routetables_test.go b/azure/services/routetables/routetables_test.go index dedad125796..32a41ae8b43 100644 --- a/azure/services/routetables/routetables_test.go +++ b/azure/services/routetables/routetables_test.go @@ -27,13 +27,11 @@ import ( "github.com/golang/mock/gomock" . "github.com/onsi/gomega" "k8s.io/client-go/kubernetes/scheme" - "k8s.io/klog/v2/klogr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/services/routetables/mock_routetables" gomockinternal "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomock" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) func init() { @@ -60,7 +58,6 @@ func TestReconcileRouteTables(t *testing.T) { ID: "1234", Name: "my-vnet", }) - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.ClusterName() }, }, @@ -76,7 +73,6 @@ func TestReconcileRouteTables(t *testing.T) { s.Vnet().Return(&infrav1.VnetSpec{ Name: "my-vnet", }) - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.ClusterName() s.RouteTableSpecs().Return([]azure.RouteTableSpec{ { @@ -116,7 +112,6 @@ func TestReconcileRouteTables(t *testing.T) { s.Vnet().Return(&infrav1.VnetSpec{ Name: "my-vnet", }) - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.ClusterName() s.RouteTableSpecs().AnyTimes().Return([]azure.RouteTableSpec{ { @@ -176,7 +171,6 @@ func TestReconcileRouteTables(t *testing.T) { s.Vnet().Return(&infrav1.VnetSpec{ Name: "my-vnet", }) - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.ClusterName() s.RouteTableSpecs().Return([]azure.RouteTableSpec{{ Name: "my-cp-routetable", @@ -204,7 +198,6 @@ func TestReconcileRouteTables(t *testing.T) { s.Vnet().Return(&infrav1.VnetSpec{ Name: "my-vnet", }) - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.ClusterName() s.RouteTableSpecs().Return([]azure.RouteTableSpec{{ Name: "my-cp-routetable", @@ -271,7 +264,6 @@ func TestDeleteRouteTable(t *testing.T) { ID: "1234", Name: "my-vnet", }) - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.ClusterName() }, }, @@ -287,7 +279,6 @@ func TestDeleteRouteTable(t *testing.T) { s.Vnet().Return(&infrav1.VnetSpec{ Name: "my-vnet", }) - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.ClusterName() s.RouteTableSpecs().Return([]azure.RouteTableSpec{ { @@ -324,7 +315,6 @@ func TestDeleteRouteTable(t *testing.T) { s.Vnet().Return(&infrav1.VnetSpec{ Name: "my-vnet", }) - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.ClusterName() s.RouteTableSpecs().Return([]azure.RouteTableSpec{ { @@ -361,7 +351,6 @@ func TestDeleteRouteTable(t *testing.T) { s.Vnet().Return(&infrav1.VnetSpec{ Name: "my-vnet", }) - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.ClusterName() s.RouteTableSpecs().Return([]azure.RouteTableSpec{{ Name: "my-cp-routetable", diff --git a/azure/services/scalesets/mock_scalesets/scalesets_mock.go b/azure/services/scalesets/mock_scalesets/scalesets_mock.go index 492d7c29dad..623a616200f 100644 --- a/azure/services/scalesets/mock_scalesets/scalesets_mock.go +++ b/azure/services/scalesets/mock_scalesets/scalesets_mock.go @@ -25,7 +25,6 @@ import ( reflect "reflect" autorest "github.com/Azure/go-autorest/autorest" - logr "github.com/go-logr/logr" gomock "github.com/golang/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -193,37 +192,6 @@ func (mr *MockScaleSetScopeMockRecorder) DeleteLongRunningOperationState(arg0, a return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteLongRunningOperationState", reflect.TypeOf((*MockScaleSetScope)(nil).DeleteLongRunningOperationState), arg0, arg1) } -// Enabled mocks base method. -func (m *MockScaleSetScope) Enabled() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Enabled") - ret0, _ := ret[0].(bool) - return ret0 -} - -// Enabled indicates an expected call of Enabled. -func (mr *MockScaleSetScopeMockRecorder) Enabled() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Enabled", reflect.TypeOf((*MockScaleSetScope)(nil).Enabled)) -} - -// Error mocks base method. -func (m *MockScaleSetScope) Error(err error, msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{err, msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Error", varargs...) -} - -// Error indicates an expected call of Error. -func (mr *MockScaleSetScopeMockRecorder) Error(err, msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{err, msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockScaleSetScope)(nil).Error), varargs...) -} - // FailureDomains mocks base method. func (m *MockScaleSetScope) FailureDomains() []string { m.ctrl.T.Helper() @@ -268,18 +236,18 @@ func (mr *MockScaleSetScopeMockRecorder) GetLongRunningOperationState(arg0, arg1 } // GetVMImage mocks base method. -func (m *MockScaleSetScope) GetVMImage() (*v1beta1.Image, error) { +func (m *MockScaleSetScope) GetVMImage(arg0 context.Context) (*v1beta1.Image, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetVMImage") + ret := m.ctrl.Call(m, "GetVMImage", arg0) ret0, _ := ret[0].(*v1beta1.Image) ret1, _ := ret[1].(error) return ret0, ret1 } // GetVMImage indicates an expected call of GetVMImage. -func (mr *MockScaleSetScopeMockRecorder) GetVMImage() *gomock.Call { +func (mr *MockScaleSetScopeMockRecorder) GetVMImage(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVMImage", reflect.TypeOf((*MockScaleSetScope)(nil).GetVMImage)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVMImage", reflect.TypeOf((*MockScaleSetScope)(nil).GetVMImage), arg0) } // HashKey mocks base method. @@ -296,23 +264,6 @@ func (mr *MockScaleSetScopeMockRecorder) HashKey() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HashKey", reflect.TypeOf((*MockScaleSetScope)(nil).HashKey)) } -// Info mocks base method. -func (m *MockScaleSetScope) Info(msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Info", varargs...) -} - -// Info indicates an expected call of Info. -func (mr *MockScaleSetScopeMockRecorder) Info(msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockScaleSetScope)(nil).Info), varargs...) -} - // Location mocks base method. func (m *MockScaleSetScope) Location() string { m.ctrl.T.Helper() @@ -494,20 +445,6 @@ func (mr *MockScaleSetScopeMockRecorder) UpdatePutStatus(arg0, arg1, arg2 interf return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePutStatus", reflect.TypeOf((*MockScaleSetScope)(nil).UpdatePutStatus), arg0, arg1, arg2) } -// V mocks base method. -func (m *MockScaleSetScope) V(level int) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "V", level) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// V indicates an expected call of V. -func (mr *MockScaleSetScopeMockRecorder) V(level interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "V", reflect.TypeOf((*MockScaleSetScope)(nil).V), level) -} - // VMSSExtensionSpecs mocks base method. func (m *MockScaleSetScope) VMSSExtensionSpecs() []azure.ExtensionSpec { m.ctrl.T.Helper() @@ -521,35 +458,3 @@ func (mr *MockScaleSetScopeMockRecorder) VMSSExtensionSpecs() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VMSSExtensionSpecs", reflect.TypeOf((*MockScaleSetScope)(nil).VMSSExtensionSpecs)) } - -// WithName mocks base method. -func (m *MockScaleSetScope) WithName(name string) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WithName", name) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithName indicates an expected call of WithName. -func (mr *MockScaleSetScopeMockRecorder) WithName(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithName", reflect.TypeOf((*MockScaleSetScope)(nil).WithName), name) -} - -// WithValues mocks base method. -func (m *MockScaleSetScope) WithValues(keysAndValues ...interface{}) logr.Logger { - m.ctrl.T.Helper() - varargs := []interface{}{} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "WithValues", varargs...) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithValues indicates an expected call of WithValues. -func (mr *MockScaleSetScopeMockRecorder) WithValues(keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithValues", reflect.TypeOf((*MockScaleSetScope)(nil).WithValues), keysAndValues...) -} diff --git a/azure/services/scalesets/scalesets.go b/azure/services/scalesets/scalesets.go index 869d8f07da9..4bf536fbf61 100644 --- a/azure/services/scalesets/scalesets.go +++ b/azure/services/scalesets/scalesets.go @@ -24,28 +24,24 @@ import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-04-01/compute" "github.com/Azure/go-autorest/autorest/to" - "github.com/go-logr/logr" "github.com/pkg/errors" - - "sigs.k8s.io/cluster-api-provider-azure/util/generators" - "sigs.k8s.io/cluster-api-provider-azure/util/slice" - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/converters" "sigs.k8s.io/cluster-api-provider-azure/azure/scope" "sigs.k8s.io/cluster-api-provider-azure/azure/services/resourceskus" + "sigs.k8s.io/cluster-api-provider-azure/util/generators" + "sigs.k8s.io/cluster-api-provider-azure/util/slice" "sigs.k8s.io/cluster-api-provider-azure/util/tele" ) type ( // ScaleSetScope defines the scope interface for a scale sets service. ScaleSetScope interface { - logr.Logger azure.ClusterDescriber azure.AsyncStatusUpdater GetBootstrapData(context.Context) (string, error) - GetVMImage() (*infrav1.Image, error) + GetVMImage(context.Context) (*infrav1.Image, error) SaveVMImageToStatus(*infrav1.Image) MaxSurge() (int, error) ScaleSetSpec() azure.ScaleSetSpec @@ -74,7 +70,7 @@ func NewService(scope ScaleSetScope, skuCache *resourceskus.Cache) *Service { // Reconcile idempotently gets, creates, and updates a scale set. func (s *Service) Reconcile(ctx context.Context) (retErr error) { - ctx, _, done := tele.StartSpanWithLogger(ctx, "scalesets.Service.Reconcile") + ctx, log, done := tele.StartSpanWithLogger(ctx, "scalesets.Service.Reconcile") defer done() if err := s.validateSpec(ctx); err != nil { @@ -97,7 +93,7 @@ func (s *Service) Reconcile(ctx context.Context) (retErr error) { if fetchedVMSS == nil { fetchedVMSS, err = s.getVirtualMachineScaleSet(ctx, scaleSetSpec.Name) if err != nil && !azure.ResourceNotFound(err) { - s.Scope.Error(err, "failed to get vmss in deferred update") + log.Error(err, "failed to get vmss in deferred update") } } @@ -150,7 +146,7 @@ func (s *Service) Reconcile(ctx context.Context) (retErr error) { // Delete deletes a scale set asynchronously. Delete sends a DELETE request to Azure and if accepted without error, // the VMSS will be considered deleted. The actual delete in Azure may take longer, but should eventually complete. func (s *Service) Delete(ctx context.Context) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "scalesets.Service.Delete") + ctx, log, done := tele.StartSpanWithLogger(ctx, "scalesets.Service.Delete") defer done() var err error @@ -161,7 +157,7 @@ func (s *Service) Delete(ctx context.Context) error { // save the updated state of the VMSS for the MachinePoolScope to use for updating K8s state fetchedVMSS, err := s.getVirtualMachineScaleSet(ctx, vmssSpec.Name) if err != nil && !azure.ResourceNotFound(err) { - s.Scope.Error(err, "failed to get vmss in deferred update") + log.Error(err, "failed to get vmss in deferred update") } if fetchedVMSS != nil { @@ -184,7 +180,7 @@ func (s *Service) Delete(ctx context.Context) error { } // no long running delete operation is active, so delete the ScaleSet - s.Scope.V(2).Info("deleting VMSS", "scale set", vmssSpec.Name) + log.V(2).Info("deleting VMSS", "scale set", vmssSpec.Name) future, err = s.Client.DeleteAsync(ctx, s.Scope.ResourceGroup(), vmssSpec.Name) if err != nil { if azure.ResourceNotFound(err) { @@ -208,7 +204,7 @@ func (s *Service) Delete(ctx context.Context) error { } func (s *Service) createVMSS(ctx context.Context) (*infrav1.Future, error) { - ctx, _, done := tele.StartSpanWithLogger(ctx, "scalesets.Service.createVMSS") + ctx, log, done := tele.StartSpanWithLogger(ctx, "scalesets.Service.createVMSS") defer done() spec := s.Scope.ScaleSetSpec() @@ -223,13 +219,13 @@ func (s *Service) createVMSS(ctx context.Context) (*infrav1.Future, error) { return nil, errors.Wrap(err, "cannot create VMSS") } - s.Scope.V(2).Info("starting to create VMSS", "scale set", spec.Name) + log.V(2).Info("starting to create VMSS", "scale set", spec.Name) s.Scope.SetLongRunningOperationState(future) return future, err } func (s *Service) patchVMSSIfNeeded(ctx context.Context, infraVMSS *azure.VMSS) (*infrav1.Future, error) { - ctx, _, done := tele.StartSpanWithLogger(ctx, "scalesets.Service.patchVMSSIfNeeded") + ctx, log, done := tele.StartSpanWithLogger(ctx, "scalesets.Service.patchVMSSIfNeeded") defer done() spec := s.Scope.ScaleSetSpec() @@ -253,18 +249,18 @@ func (s *Service) patchVMSSIfNeeded(ctx context.Context, infraVMSS *azure.VMSS) if maxSurge > 0 && (hasModelChanges || !infraVMSS.HasEnoughLatestModelOrNotMixedModel()) { // surge capacity with the intention of lowering during instance reconciliation surge := spec.Capacity + int64(maxSurge) - s.Scope.V(4).Info("surging...", "surge", surge) + log.V(4).Info("surging...", "surge", surge) patch.Sku.Capacity = to.Int64Ptr(surge) } // If there are no model changes and no increase in the replica count, do not update the VMSS. // Decreases in replica count is handled by deleting AzureMachinePoolMachine instances in the MachinePoolScope if *patch.Sku.Capacity <= infraVMSS.Capacity && !hasModelChanges { - s.Scope.V(4).Info("nothing to update on vmss", "scale set", spec.Name, "newReplicas", *patch.Sku.Capacity, "oldReplicas", infraVMSS.Capacity, "hasChanges", hasModelChanges) + log.V(4).Info("nothing to update on vmss", "scale set", spec.Name, "newReplicas", *patch.Sku.Capacity, "oldReplicas", infraVMSS.Capacity, "hasChanges", hasModelChanges) return nil, nil } - s.Scope.V(4).Info("patching vmss", "scale set", spec.Name, "patch", patch) + log.V(4).Info("patching vmss", "scale set", spec.Name, "patch", patch) future, err := s.UpdateAsync(ctx, s.Scope.ResourceGroup(), spec.Name, patch) if err != nil { if azure.ResourceConflict(err) { @@ -274,7 +270,7 @@ func (s *Service) patchVMSSIfNeeded(ctx context.Context, infraVMSS *azure.VMSS) } s.Scope.SetLongRunningOperationState(future) - s.Scope.V(2).Info("successfully started to update vmss", "scale set", spec.Name) + log.V(2).Info("successfully started to update vmss", "scale set", spec.Name) return future, err } @@ -370,7 +366,7 @@ func (s *Service) buildVMSSFromSpec(ctx context.Context, vmssSpec azure.ScaleSet extensions := s.generateExtensions() - storageProfile, err := s.generateStorageProfile(vmssSpec, sku) + storageProfile, err := s.generateStorageProfile(ctx, vmssSpec, sku) if err != nil { return compute.VirtualMachineScaleSet{}, err } @@ -409,7 +405,7 @@ func (s *Service) buildVMSSFromSpec(ctx context.Context, vmssSpec azure.ScaleSet Capacity: to.Int64Ptr(vmssSpec.Capacity), }, Zones: to.StringSlicePtr(vmssSpec.FailureDomains), - Plan: s.generateImagePlan(), + Plan: s.generateImagePlan(ctx), VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{ SinglePlacementGroup: to.BoolPtr(false), UpgradePolicy: &compute.UpgradePolicy{ @@ -560,7 +556,10 @@ func (s *Service) generateExtensions() []compute.VirtualMachineScaleSetExtension } // generateStorageProfile generates a pointer to a compute.VirtualMachineScaleSetStorageProfile which can utilized for VM creation. -func (s *Service) generateStorageProfile(vmssSpec azure.ScaleSetSpec, sku resourceskus.SKU) (*compute.VirtualMachineScaleSetStorageProfile, error) { +func (s *Service) generateStorageProfile(ctx context.Context, vmssSpec azure.ScaleSetSpec, sku resourceskus.SKU) (*compute.VirtualMachineScaleSetStorageProfile, error) { + ctx, _, done := tele.StartSpanWithLogger(ctx, "scalesets.Service.generateStorageProfile") + defer done() + storageProfile := &compute.VirtualMachineScaleSetStorageProfile{ OsDisk: &compute.VirtualMachineScaleSetOSDisk{ OsType: compute.OperatingSystemTypes(vmssSpec.OSDisk.OSType), @@ -611,7 +610,7 @@ func (s *Service) generateStorageProfile(vmssSpec azure.ScaleSetSpec, sku resour } storageProfile.DataDisks = &dataDisks - image, err := s.Scope.GetVMImage() + image, err := s.Scope.GetVMImage(ctx) if err != nil { return nil, errors.Wrap(err, "failed to get VM image") } @@ -674,10 +673,13 @@ func (s *Service) generateOSProfile(ctx context.Context, vmssSpec azure.ScaleSet return osProfile, nil } -func (s *Service) generateImagePlan() *compute.Plan { - image, err := s.Scope.GetVMImage() +func (s *Service) generateImagePlan(ctx context.Context) *compute.Plan { + ctx, log, done := tele.StartSpanWithLogger(ctx, "scalesets.Service.generateImagePlan") + defer done() + + image, err := s.Scope.GetVMImage(ctx) if err != nil { - s.Scope.Error(err, "failed to get vm image, disabling Plan") + log.Error(err, "failed to get vm image, disabling Plan") return nil } diff --git a/azure/services/scalesets/scalesets_test.go b/azure/services/scalesets/scalesets_test.go index c96900c61a5..247bfa9adf7 100644 --- a/azure/services/scalesets/scalesets_test.go +++ b/azure/services/scalesets/scalesets_test.go @@ -21,8 +21,6 @@ import ( "net/http" "testing" - "k8s.io/utils/pointer" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-04-01/compute" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/to" @@ -32,11 +30,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/scheme" - "k8s.io/klog/v2/klogr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1beta1" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - + "k8s.io/utils/pointer" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/scope" @@ -44,6 +38,9 @@ import ( "sigs.k8s.io/cluster-api-provider-azure/azure/services/scalesets/mock_scalesets" infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" gomockinternal "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomock" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client/fake" ) const ( @@ -88,7 +85,6 @@ func TestNewService(t *testing.T) { mps, err := scope.NewMachinePoolScope(scope.MachinePoolScopeParams{ Client: client, - Logger: s.Logger, MachinePool: new(clusterv1exp.MachinePool), AzureMachinePool: new(infrav1exp.AzureMachinePool), ClusterScope: s, @@ -113,7 +109,6 @@ func TestGetExistingVMSS(t *testing.T) { expectedError: "failed to get existing vmss: #: Not found: StatusCode=404", expect: func(s *mock_scalesets.MockScaleSetScopeMockRecorder, m *mock_scalesets.MockClientMockRecorder) { s.ResourceGroup().AnyTimes().Return("my-rg") - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) m.Get(gomockinternal.AContext(), "my-rg", "my-vmss").Return(compute.VirtualMachineScaleSet{}, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not found")) }, }, @@ -141,7 +136,6 @@ func TestGetExistingVMSS(t *testing.T) { expectedError: "", expect: func(s *mock_scalesets.MockScaleSetScopeMockRecorder, m *mock_scalesets.MockClientMockRecorder) { s.ResourceGroup().AnyTimes().Return("my-rg") - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) m.Get(gomockinternal.AContext(), "my-rg", "my-vmss").Return(compute.VirtualMachineScaleSet{ ID: to.StringPtr("my-id"), Name: to.StringPtr("my-vmss"), @@ -177,7 +171,6 @@ func TestGetExistingVMSS(t *testing.T) { expectedError: "failed to list instances: #: Not found: StatusCode=404", expect: func(s *mock_scalesets.MockScaleSetScopeMockRecorder, m *mock_scalesets.MockClientMockRecorder) { s.ResourceGroup().AnyTimes().Return("my-rg") - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) m.Get(gomockinternal.AContext(), "my-rg", "my-vmss").Return(compute.VirtualMachineScaleSet{ ID: to.StringPtr("my-id"), Name: to.StringPtr("my-vmss"), @@ -622,7 +615,6 @@ func TestDeleteVMSS(t *testing.T) { Capacity: 3, }).AnyTimes() s.ResourceGroup().AnyTimes().Return("my-existing-rg") - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) future := &infrav1.Future{} s.GetLongRunningOperationState("my-existing-vmss", scope.ScalesetsServiceName).Return(future) m.GetResultIfDone(gomockinternal.AContext(), future).Return(compute.VirtualMachineScaleSet{}, nil) @@ -641,7 +633,6 @@ func TestDeleteVMSS(t *testing.T) { Capacity: 3, }).AnyTimes() s.ResourceGroup().AnyTimes().Return(resourceGroup) - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.GetLongRunningOperationState(name, scope.ScalesetsServiceName).Return(nil) m.DeleteAsync(gomockinternal.AContext(), resourceGroup, name). Return(nil, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not found")) @@ -659,7 +650,6 @@ func TestDeleteVMSS(t *testing.T) { Capacity: 3, }).AnyTimes() s.ResourceGroup().AnyTimes().Return(resourceGroup) - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.GetLongRunningOperationState(name, scope.ScalesetsServiceName).Return(nil) m.DeleteAsync(gomockinternal.AContext(), resourceGroup, name). Return(nil, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 500}, "Internal Server Error")) @@ -1249,7 +1239,7 @@ func setupDefaultVMSSExpectations(s *mock_scalesets.MockScaleSetScopeMockRecorde Version: "1.0", }, } - s.GetVMImage().Return(image, nil).AnyTimes() + s.GetVMImage(gomockinternal.AContext()).Return(image, nil).AnyTimes() s.SaveVMImageToStatus(image) } @@ -1263,14 +1253,13 @@ func setupUpdateVMSSExpectations(s *mock_scalesets.MockScaleSetScopeMockRecorder Version: "2.0", }, } - s.GetVMImage().Return(image, nil).AnyTimes() + s.GetVMImage(gomockinternal.AContext()).Return(image, nil).AnyTimes() s.SaveVMImageToStatus(image) } func setupVMSSExpectationsWithoutVMImage(s *mock_scalesets.MockScaleSetScopeMockRecorder) { s.SubscriptionID().AnyTimes().Return(defaultSubscriptionID) s.ResourceGroup().AnyTimes().Return(defaultResourceGroup) - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.AdditionalTags() s.Location().AnyTimes().Return("test-location") s.ClusterName().Return("my-cluster") diff --git a/azure/services/scalesetvms/mock_scalesetvms/scalesetvms_mock.go b/azure/services/scalesetvms/mock_scalesetvms/scalesetvms_mock.go index 198f2b68e69..b87598c1426 100644 --- a/azure/services/scalesetvms/mock_scalesetvms/scalesetvms_mock.go +++ b/azure/services/scalesetvms/mock_scalesetvms/scalesetvms_mock.go @@ -24,7 +24,6 @@ import ( reflect "reflect" autorest "github.com/Azure/go-autorest/autorest" - logr "github.com/go-logr/logr" gomock "github.com/golang/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -192,37 +191,6 @@ func (mr *MockScaleSetVMScopeMockRecorder) DeleteLongRunningOperationState(arg0, return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteLongRunningOperationState", reflect.TypeOf((*MockScaleSetVMScope)(nil).DeleteLongRunningOperationState), arg0, arg1) } -// Enabled mocks base method. -func (m *MockScaleSetVMScope) Enabled() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Enabled") - ret0, _ := ret[0].(bool) - return ret0 -} - -// Enabled indicates an expected call of Enabled. -func (mr *MockScaleSetVMScopeMockRecorder) Enabled() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Enabled", reflect.TypeOf((*MockScaleSetVMScope)(nil).Enabled)) -} - -// Error mocks base method. -func (m *MockScaleSetVMScope) Error(err error, msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{err, msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Error", varargs...) -} - -// Error indicates an expected call of Error. -func (mr *MockScaleSetVMScopeMockRecorder) Error(err, msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{err, msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockScaleSetVMScope)(nil).Error), varargs...) -} - // FailureDomains mocks base method. func (m *MockScaleSetVMScope) FailureDomains() []string { m.ctrl.T.Helper() @@ -265,23 +233,6 @@ func (mr *MockScaleSetVMScopeMockRecorder) HashKey() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HashKey", reflect.TypeOf((*MockScaleSetVMScope)(nil).HashKey)) } -// Info mocks base method. -func (m *MockScaleSetVMScope) Info(msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Info", varargs...) -} - -// Info indicates an expected call of Info. -func (mr *MockScaleSetVMScopeMockRecorder) Info(msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockScaleSetVMScope)(nil).Info), varargs...) -} - // InstanceID mocks base method. func (m *MockScaleSetVMScope) InstanceID() string { m.ctrl.T.Helper() @@ -425,49 +376,3 @@ func (mr *MockScaleSetVMScopeMockRecorder) UpdatePutStatus(arg0, arg1, arg2 inte mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePutStatus", reflect.TypeOf((*MockScaleSetVMScope)(nil).UpdatePutStatus), arg0, arg1, arg2) } - -// V mocks base method. -func (m *MockScaleSetVMScope) V(level int) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "V", level) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// V indicates an expected call of V. -func (mr *MockScaleSetVMScopeMockRecorder) V(level interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "V", reflect.TypeOf((*MockScaleSetVMScope)(nil).V), level) -} - -// WithName mocks base method. -func (m *MockScaleSetVMScope) WithName(name string) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WithName", name) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithName indicates an expected call of WithName. -func (mr *MockScaleSetVMScopeMockRecorder) WithName(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithName", reflect.TypeOf((*MockScaleSetVMScope)(nil).WithName), name) -} - -// WithValues mocks base method. -func (m *MockScaleSetVMScope) WithValues(keysAndValues ...interface{}) logr.Logger { - m.ctrl.T.Helper() - varargs := []interface{}{} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "WithValues", varargs...) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithValues indicates an expected call of WithValues. -func (mr *MockScaleSetVMScopeMockRecorder) WithValues(keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithValues", reflect.TypeOf((*MockScaleSetVMScope)(nil).WithValues), keysAndValues...) -} diff --git a/azure/services/scalesetvms/scalesetvms.go b/azure/services/scalesetvms/scalesetvms.go index d5278c161df..d348a665be3 100644 --- a/azure/services/scalesetvms/scalesetvms.go +++ b/azure/services/scalesetvms/scalesetvms.go @@ -20,7 +20,6 @@ import ( "context" "time" - "github.com/go-logr/logr" "github.com/pkg/errors" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" @@ -34,7 +33,6 @@ const serviceName = "scalesetvms" type ( // ScaleSetVMScope defines the scope interface for a scale sets service. ScaleSetVMScope interface { - logr.Logger azure.ClusterDescriber azure.AsyncStatusUpdater InstanceID() string @@ -83,16 +81,20 @@ func (s *Service) Reconcile(ctx context.Context) error { // Delete deletes a scaleset instance asynchronously returning a future which encapsulates the long-running operation. func (s *Service) Delete(ctx context.Context) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "scalesetvms.Service.Delete") - defer done() - var ( resourceGroup = s.Scope.ResourceGroup() vmssName = s.Scope.ScaleSetName() instanceID = s.Scope.InstanceID() ) - log := s.Scope.WithValues("resourceGroup", resourceGroup, "scaleset", vmssName, "instanceID", instanceID) + ctx, log, done := tele.StartSpanWithLogger( + ctx, + "scalesetvms.Service.Delete", + tele.KVP("resourceGroup", resourceGroup), + tele.KVP("scaleset", vmssName), + tele.KVP("instanceID", instanceID), + ) + defer done() defer func() { if instance, err := s.Client.Get(ctx, resourceGroup, vmssName, instanceID); err == nil && instance.VirtualMachineScaleSetVMProperties != nil { diff --git a/azure/services/scalesetvms/scalesetvms_test.go b/azure/services/scalesetvms/scalesetvms_test.go index d68e4adc86c..aa851a324b4 100644 --- a/azure/services/scalesetvms/scalesetvms_test.go +++ b/azure/services/scalesetvms/scalesetvms_test.go @@ -78,7 +78,6 @@ func TestNewService(t *testing.T) { mpms, err := scope.NewMachinePoolMachineScope(scope.MachinePoolMachineScopeParams{ Client: client, - Logger: s.Logger, MachinePool: new(clusterv1exp.MachinePool), AzureMachinePool: new(infrav1exp.AzureMachinePool), AzureMachinePoolMachine: new(infrav1exp.AzureMachinePoolMachine), @@ -258,9 +257,6 @@ func TestService_Delete(t *testing.T) { scopeMock.EXPECT().SubscriptionID().Return("subID") scopeMock.EXPECT().BaseURI().Return("https://localhost/") scopeMock.EXPECT().Authorizer().Return(nil) - scopeMock.EXPECT().WithValues(gomock.Any()).Return(scopeMock) - scopeMock.EXPECT().V(gomock.Any()).Return(scopeMock).AnyTimes() - scopeMock.EXPECT().Info(gomock.Any(), gomock.Any()).AnyTimes() service := NewService(scopeMock) service.Client = clientMock diff --git a/azure/services/securitygroups/mock_securitygroups/securitygroups_mock.go b/azure/services/securitygroups/mock_securitygroups/securitygroups_mock.go index cf7ba2a6cb2..795a5f9bb61 100644 --- a/azure/services/securitygroups/mock_securitygroups/securitygroups_mock.go +++ b/azure/services/securitygroups/mock_securitygroups/securitygroups_mock.go @@ -24,7 +24,6 @@ import ( reflect "reflect" autorest "github.com/Azure/go-autorest/autorest" - logr "github.com/go-logr/logr" gomock "github.com/golang/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -235,37 +234,6 @@ func (mr *MockNSGScopeMockRecorder) ControlPlaneSubnet() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControlPlaneSubnet", reflect.TypeOf((*MockNSGScope)(nil).ControlPlaneSubnet)) } -// Enabled mocks base method. -func (m *MockNSGScope) Enabled() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Enabled") - ret0, _ := ret[0].(bool) - return ret0 -} - -// Enabled indicates an expected call of Enabled. -func (mr *MockNSGScopeMockRecorder) Enabled() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Enabled", reflect.TypeOf((*MockNSGScope)(nil).Enabled)) -} - -// Error mocks base method. -func (m *MockNSGScope) Error(err error, msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{err, msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Error", varargs...) -} - -// Error indicates an expected call of Error. -func (mr *MockNSGScopeMockRecorder) Error(err, msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{err, msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockNSGScope)(nil).Error), varargs...) -} - // FailureDomains mocks base method. func (m *MockNSGScope) FailureDomains() []string { m.ctrl.T.Helper() @@ -308,23 +276,6 @@ func (mr *MockNSGScopeMockRecorder) HashKey() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HashKey", reflect.TypeOf((*MockNSGScope)(nil).HashKey)) } -// Info mocks base method. -func (m *MockNSGScope) Info(msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Info", varargs...) -} - -// Info indicates an expected call of Info. -func (mr *MockNSGScopeMockRecorder) Info(msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockNSGScope)(nil).Info), varargs...) -} - // IsAPIServerPrivate mocks base method. func (m *MockNSGScope) IsAPIServerPrivate() bool { m.ctrl.T.Helper() @@ -519,20 +470,6 @@ func (mr *MockNSGScopeMockRecorder) TenantID() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TenantID", reflect.TypeOf((*MockNSGScope)(nil).TenantID)) } -// V mocks base method. -func (m *MockNSGScope) V(level int) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "V", level) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// V indicates an expected call of V. -func (mr *MockNSGScopeMockRecorder) V(level interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "V", reflect.TypeOf((*MockNSGScope)(nil).V), level) -} - // Vnet mocks base method. func (m *MockNSGScope) Vnet() *v1beta1.VnetSpec { m.ctrl.T.Helper() @@ -546,35 +483,3 @@ func (mr *MockNSGScopeMockRecorder) Vnet() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Vnet", reflect.TypeOf((*MockNSGScope)(nil).Vnet)) } - -// WithName mocks base method. -func (m *MockNSGScope) WithName(name string) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WithName", name) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithName indicates an expected call of WithName. -func (mr *MockNSGScopeMockRecorder) WithName(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithName", reflect.TypeOf((*MockNSGScope)(nil).WithName), name) -} - -// WithValues mocks base method. -func (m *MockNSGScope) WithValues(keysAndValues ...interface{}) logr.Logger { - m.ctrl.T.Helper() - varargs := []interface{}{} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "WithValues", varargs...) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithValues indicates an expected call of WithValues. -func (mr *MockNSGScopeMockRecorder) WithValues(keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithValues", reflect.TypeOf((*MockNSGScope)(nil).WithValues), keysAndValues...) -} diff --git a/azure/services/securitygroups/securitygroups.go b/azure/services/securitygroups/securitygroups.go index 9e69eec3d5b..1ce0c0a1f8f 100644 --- a/azure/services/securitygroups/securitygroups.go +++ b/azure/services/securitygroups/securitygroups.go @@ -22,9 +22,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network" "github.com/Azure/go-autorest/autorest/to" - "github.com/go-logr/logr" "github.com/pkg/errors" - "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/converters" "sigs.k8s.io/cluster-api-provider-azure/util/tele" @@ -32,7 +30,6 @@ import ( // NSGScope defines the scope interface for a security groups service. type NSGScope interface { - logr.Logger azure.ClusterDescriber azure.NetworkDescriber NSGSpecs() []azure.NSGSpec @@ -54,11 +51,11 @@ func New(scope NSGScope) *Service { // Reconcile gets/creates/updates a network security group. func (s *Service) Reconcile(ctx context.Context) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "securitygroups.Service.Reconcile") + ctx, log, done := tele.StartSpanWithLogger(ctx, "securitygroups.Service.Reconcile") defer done() if !s.Scope.IsVnetManaged() { - s.Scope.V(4).Info("Skipping network security group reconcile in custom VNet mode") + log.V(4).Info("Skipping network security group reconcile in custom VNet mode") return nil } @@ -86,11 +83,11 @@ func (s *Service) Reconcile(ctx context.Context) error { } if !update { // Skip update for NSG as the required default rules are present - s.Scope.V(2).Info("security group exists and no default rules are missing, skipping update", "security group", nsgSpec.Name) + log.V(2).Info("security group exists and no default rules are missing, skipping update", "security group", nsgSpec.Name) continue } default: - s.Scope.V(2).Info("creating security group", "security group", nsgSpec.Name) + log.V(2).Info("creating security group", "security group", nsgSpec.Name) for _, rule := range nsgSpec.SecurityRules { securityRules = append(securityRules, converters.SecurityRuleToSDK(rule)) } @@ -107,7 +104,7 @@ func (s *Service) Reconcile(ctx context.Context) error { return errors.Wrapf(err, "failed to create or update security group %s in resource group %s", nsgSpec.Name, s.Scope.ResourceGroup()) } - s.Scope.V(2).Info("successfully created or updated security group", "security group", nsgSpec.Name) + log.V(2).Info("successfully created or updated security group", "security group", nsgSpec.Name) } return nil } @@ -137,16 +134,16 @@ func ruleExists(rules []network.SecurityRule, rule network.SecurityRule) bool { // Delete deletes the network security group with the provided name. func (s *Service) Delete(ctx context.Context) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "securitygroups.Service.Delete") + ctx, log, done := tele.StartSpanWithLogger(ctx, "securitygroups.Service.Delete") defer done() if !s.Scope.IsVnetManaged() { - s.Scope.V(4).Info("Skipping network security group delete in custom VNet mode") + log.V(4).Info("Skipping network security group delete in custom VNet mode") return nil } for _, nsgSpec := range s.Scope.NSGSpecs() { - s.Scope.V(2).Info("deleting security group", "security group", nsgSpec.Name) + log.V(2).Info("deleting security group", "security group", nsgSpec.Name) err := s.client.Delete(ctx, s.Scope.ResourceGroup(), nsgSpec.Name) if err != nil && azure.ResourceNotFound(err) { // already deleted @@ -156,7 +153,7 @@ func (s *Service) Delete(ctx context.Context) error { return errors.Wrapf(err, "failed to delete security group %s in resource group %s", nsgSpec.Name, s.Scope.ResourceGroup()) } - s.Scope.V(2).Info("successfully deleted security group", "security group", nsgSpec.Name) + log.V(2).Info("successfully deleted security group", "security group", nsgSpec.Name) } return nil } diff --git a/azure/services/securitygroups/securitygroups_test.go b/azure/services/securitygroups/securitygroups_test.go index 00e2e670300..7cfaa874fd8 100644 --- a/azure/services/securitygroups/securitygroups_test.go +++ b/azure/services/securitygroups/securitygroups_test.go @@ -26,8 +26,6 @@ import ( "github.com/Azure/go-autorest/autorest/to" "github.com/golang/mock/gomock" . "github.com/onsi/gomega" - "k8s.io/klog/v2/klogr" - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/services/securitygroups/mock_securitygroups" @@ -78,7 +76,6 @@ func TestReconcileSecurityGroups(t *testing.T) { s.IsVnetManaged().Return(true) s.ResourceGroup().AnyTimes().Return("my-rg") s.Location().AnyTimes().Return("test-location") - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) m.Get(gomockinternal.AContext(), "my-rg", "nsg-one").Return(network.SecurityGroup{}, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not found")) m.CreateOrUpdate(gomockinternal.AContext(), "my-rg", "nsg-one", gomockinternal.DiffEq(network.SecurityGroup{ SecurityGroupPropertiesFormat: &network.SecurityGroupPropertiesFormat{ @@ -153,7 +150,6 @@ func TestReconcileSecurityGroups(t *testing.T) { s.IsVnetManaged().AnyTimes().Return(true) s.ResourceGroup().AnyTimes().Return("my-rg") s.Location().AnyTimes().Return("test-location") - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) m.Get(gomockinternal.AContext(), "my-rg", "nsg-one").Return(network.SecurityGroup{ Response: autorest.Response{}, SecurityGroupPropertiesFormat: &network.SecurityGroupPropertiesFormat{ @@ -228,7 +224,6 @@ func TestReconcileSecurityGroups(t *testing.T) { name: "skipping network security group reconcile in custom VNet mode", expect: func(s *mock_securitygroups.MockNSGScopeMockRecorder, m *mock_securitygroups.MockclientMockRecorder) { s.IsVnetManaged().Return(false) - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) }, }, } @@ -286,7 +281,6 @@ func TestDeleteSecurityGroups(t *testing.T) { }, }) s.ResourceGroup().AnyTimes().Return("my-rg") - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.IsVnetManaged().Return(true) m.Delete(gomockinternal.AContext(), "my-rg", "nsg-one") m.Delete(gomockinternal.AContext(), "my-rg", "nsg-two") @@ -306,7 +300,6 @@ func TestDeleteSecurityGroups(t *testing.T) { }, }) s.ResourceGroup().AnyTimes().Return("my-rg") - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.IsVnetManaged().Return(true) m.Delete(gomockinternal.AContext(), "my-rg", "nsg-one"). Return(autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not found")) @@ -317,7 +310,6 @@ func TestDeleteSecurityGroups(t *testing.T) { name: "skipping network security group delete in custom VNet mode", expect: func(s *mock_securitygroups.MockNSGScopeMockRecorder, m *mock_securitygroups.MockclientMockRecorder) { s.IsVnetManaged().Return(false) - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) }, }, } diff --git a/azure/services/subnets/mock_subnets/subnets_mock.go b/azure/services/subnets/mock_subnets/subnets_mock.go index 181e37b2e86..936dc885d34 100644 --- a/azure/services/subnets/mock_subnets/subnets_mock.go +++ b/azure/services/subnets/mock_subnets/subnets_mock.go @@ -24,7 +24,6 @@ import ( reflect "reflect" autorest "github.com/Azure/go-autorest/autorest" - logr "github.com/go-logr/logr" gomock "github.com/golang/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -235,37 +234,6 @@ func (mr *MockSubnetScopeMockRecorder) ControlPlaneSubnet() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControlPlaneSubnet", reflect.TypeOf((*MockSubnetScope)(nil).ControlPlaneSubnet)) } -// Enabled mocks base method. -func (m *MockSubnetScope) Enabled() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Enabled") - ret0, _ := ret[0].(bool) - return ret0 -} - -// Enabled indicates an expected call of Enabled. -func (mr *MockSubnetScopeMockRecorder) Enabled() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Enabled", reflect.TypeOf((*MockSubnetScope)(nil).Enabled)) -} - -// Error mocks base method. -func (m *MockSubnetScope) Error(err error, msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{err, msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Error", varargs...) -} - -// Error indicates an expected call of Error. -func (mr *MockSubnetScopeMockRecorder) Error(err, msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{err, msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockSubnetScope)(nil).Error), varargs...) -} - // FailureDomains mocks base method. func (m *MockSubnetScope) FailureDomains() []string { m.ctrl.T.Helper() @@ -308,23 +276,6 @@ func (mr *MockSubnetScopeMockRecorder) HashKey() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HashKey", reflect.TypeOf((*MockSubnetScope)(nil).HashKey)) } -// Info mocks base method. -func (m *MockSubnetScope) Info(msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Info", varargs...) -} - -// Info indicates an expected call of Info. -func (mr *MockSubnetScopeMockRecorder) Info(msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockSubnetScope)(nil).Info), varargs...) -} - // IsAPIServerPrivate mocks base method. func (m *MockSubnetScope) IsAPIServerPrivate() bool { m.ctrl.T.Helper() @@ -519,20 +470,6 @@ func (mr *MockSubnetScopeMockRecorder) TenantID() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TenantID", reflect.TypeOf((*MockSubnetScope)(nil).TenantID)) } -// V mocks base method. -func (m *MockSubnetScope) V(level int) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "V", level) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// V indicates an expected call of V. -func (mr *MockSubnetScopeMockRecorder) V(level interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "V", reflect.TypeOf((*MockSubnetScope)(nil).V), level) -} - // Vnet mocks base method. func (m *MockSubnetScope) Vnet() *v1beta1.VnetSpec { m.ctrl.T.Helper() @@ -546,35 +483,3 @@ func (mr *MockSubnetScopeMockRecorder) Vnet() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Vnet", reflect.TypeOf((*MockSubnetScope)(nil).Vnet)) } - -// WithName mocks base method. -func (m *MockSubnetScope) WithName(name string) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WithName", name) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithName indicates an expected call of WithName. -func (mr *MockSubnetScopeMockRecorder) WithName(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithName", reflect.TypeOf((*MockSubnetScope)(nil).WithName), name) -} - -// WithValues mocks base method. -func (m *MockSubnetScope) WithValues(keysAndValues ...interface{}) logr.Logger { - m.ctrl.T.Helper() - varargs := []interface{}{} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "WithValues", varargs...) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithValues indicates an expected call of WithValues. -func (mr *MockSubnetScopeMockRecorder) WithValues(keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithValues", reflect.TypeOf((*MockSubnetScope)(nil).WithValues), keysAndValues...) -} diff --git a/azure/services/subnets/subnets.go b/azure/services/subnets/subnets.go index 764fc794720..162f70b7f04 100644 --- a/azure/services/subnets/subnets.go +++ b/azure/services/subnets/subnets.go @@ -22,9 +22,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network" "github.com/Azure/go-autorest/autorest/to" - "github.com/go-logr/logr" "github.com/pkg/errors" - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/util/tele" @@ -32,7 +30,6 @@ import ( // SubnetScope defines the scope interface for a subnet service. type SubnetScope interface { - logr.Logger azure.ClusterScoper SubnetSpecs() []azure.SubnetSpec } @@ -53,7 +50,7 @@ func New(scope SubnetScope) *Service { // Reconcile gets/creates/updates a subnet. func (s *Service) Reconcile(ctx context.Context) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "subnets.Service.Reconcile") + ctx, log, done := tele.StartSpanWithLogger(ctx, "subnets.Service.Reconcile") defer done() for _, subnetSpec := range s.Scope.SubnetSpecs() { @@ -100,7 +97,7 @@ func (s *Service) Reconcile(ctx context.Context) error { } } - s.Scope.V(2).Info("creating subnet in vnet", "subnet", subnetSpec.Name, "vnet", subnetSpec.VNetName) + log.V(2).Info("creating subnet in vnet", "subnet", subnetSpec.Name, "vnet", subnetSpec.VNetName) err = s.Client.CreateOrUpdate( ctx, s.Scope.Vnet().ResourceGroup, @@ -114,7 +111,7 @@ func (s *Service) Reconcile(ctx context.Context) error { return errors.Wrapf(err, "failed to create subnet %s in resource group %s", subnetSpec.Name, s.Scope.Vnet().ResourceGroup) } - s.Scope.V(2).Info("successfully created subnet in vnet", "subnet", subnetSpec.Name, "vnet", subnetSpec.VNetName) + log.V(2).Info("successfully created subnet in vnet", "subnet", subnetSpec.Name, "vnet", subnetSpec.VNetName) } } return nil @@ -122,15 +119,15 @@ func (s *Service) Reconcile(ctx context.Context) error { // Delete deletes the subnet with the provided name. func (s *Service) Delete(ctx context.Context) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "subnets.Service.Delete") + ctx, log, done := tele.StartSpanWithLogger(ctx, "subnets.Service.Delete") defer done() for _, subnetSpec := range s.Scope.SubnetSpecs() { if !s.Scope.Vnet().IsManaged(s.Scope.ClusterName()) { - s.Scope.V(4).Info("Skipping subnets deletion in custom vnet mode") + log.V(4).Info("Skipping subnets deletion in custom vnet mode") continue } - s.Scope.V(2).Info("deleting subnet in vnet", "subnet", subnetSpec.Name, "vnet", subnetSpec.VNetName) + log.V(2).Info("deleting subnet in vnet", "subnet", subnetSpec.Name, "vnet", subnetSpec.VNetName) err := s.Client.Delete(ctx, s.Scope.Vnet().ResourceGroup, subnetSpec.VNetName, subnetSpec.Name) if err != nil && azure.ResourceNotFound(err) { // already deleted @@ -140,7 +137,7 @@ func (s *Service) Delete(ctx context.Context) error { return errors.Wrapf(err, "failed to delete subnet %s in resource group %s", subnetSpec.Name, s.Scope.Vnet().ResourceGroup) } - s.Scope.V(2).Info("successfully deleted subnet in vnet", "subnet", subnetSpec.Name, "vnet", subnetSpec.VNetName) + log.V(2).Info("successfully deleted subnet in vnet", "subnet", subnetSpec.Name, "vnet", subnetSpec.VNetName) } return nil } diff --git a/azure/services/subnets/subnets_test.go b/azure/services/subnets/subnets_test.go index 427bda0c66b..381f7adae50 100644 --- a/azure/services/subnets/subnets_test.go +++ b/azure/services/subnets/subnets_test.go @@ -21,19 +21,15 @@ import ( "net/http" "testing" - gomockinternal "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomock" - - . "github.com/onsi/gomega" - "k8s.io/klog/v2/klogr" - "sigs.k8s.io/cluster-api-provider-azure/azure" - "sigs.k8s.io/cluster-api-provider-azure/azure/services/subnets/mock_subnets" - - "github.com/golang/mock/gomock" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/to" + "github.com/golang/mock/gomock" + . "github.com/onsi/gomega" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-azure/azure" + "sigs.k8s.io/cluster-api-provider-azure/azure/services/subnets/mock_subnets" + gomockinternal "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomock" ) func TestReconcileSubnets(t *testing.T) { @@ -46,7 +42,6 @@ func TestReconcileSubnets(t *testing.T) { name: "subnet does not exist", expectedError: "", expect: func(s *mock_subnets.MockSubnetScopeMockRecorder, m *mock_subnets.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.SubnetSpecs().Return([]azure.SubnetSpec{ { Name: "my-subnet", @@ -78,7 +73,6 @@ func TestReconcileSubnets(t *testing.T) { name: "subnet ipv6 does not exist", expectedError: "", expect: func(s *mock_subnets.MockSubnetScopeMockRecorder, m *mock_subnets.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.SubnetSpecs().Return([]azure.SubnetSpec{ { Name: "my-ipv6-subnet", @@ -112,7 +106,6 @@ func TestReconcileSubnets(t *testing.T) { name: "fail to create subnet", expectedError: "failed to create subnet my-subnet in resource group : #: Internal Server Error: StatusCode=500", expect: func(s *mock_subnets.MockSubnetScopeMockRecorder, m *mock_subnets.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.SubnetSpecs().Return([]azure.SubnetSpec{ { Name: "my-subnet", @@ -138,7 +131,6 @@ func TestReconcileSubnets(t *testing.T) { name: "fail to get existing subnet", expectedError: "failed to get subnet my-subnet: failed to fetch subnet named my-vnet in vnet my-subnet: #: Internal Server Error: StatusCode=500", expect: func(s *mock_subnets.MockSubnetScopeMockRecorder, m *mock_subnets.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.SubnetSpecs().Return([]azure.SubnetSpec{ { Name: "my-subnet", @@ -161,7 +153,6 @@ func TestReconcileSubnets(t *testing.T) { name: "vnet was provided but subnet is missing", expectedError: "vnet was provided but subnet my-subnet is missing", expect: func(s *mock_subnets.MockSubnetScopeMockRecorder, m *mock_subnets.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.SubnetSpecs().Return([]azure.SubnetSpec{ { Name: "my-subnet", @@ -189,7 +180,6 @@ func TestReconcileSubnets(t *testing.T) { name: "vnet was provided and subnet exists", expectedError: "", expect: func(s *mock_subnets.MockSubnetScopeMockRecorder, m *mock_subnets.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.Subnet("my-subnet").AnyTimes().Return(infrav1.SubnetSpec{ ID: "subnet-id", Name: "my-subnet", @@ -274,7 +264,6 @@ func TestReconcileSubnets(t *testing.T) { name: "vnet for ipv6 is provided", expectedError: "", expect: func(s *mock_subnets.MockSubnetScopeMockRecorder, m *mock_subnets.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.Subnet("my-ipv6-subnet").AnyTimes().Return(infrav1.SubnetSpec{ ID: "subnet-id", Name: "my-ipv6-subnet", @@ -366,7 +355,6 @@ func TestReconcileSubnets(t *testing.T) { name: "doesn't overwrite existing NAT Gateway", expectedError: "", expect: func(s *mock_subnets.MockSubnetScopeMockRecorder, m *mock_subnets.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.Subnet("my-subnet").AnyTimes().Return(infrav1.SubnetSpec{ ID: "subnet-id", Name: "my-subnet", @@ -432,7 +420,6 @@ func TestReconcileSubnets(t *testing.T) { name: "spec has empty CIDR and ID data but GET from Azure has the values", expectedError: "", expect: func(s *mock_subnets.MockSubnetScopeMockRecorder, m *mock_subnets.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.Subnet("my-subnet").AnyTimes().Return(infrav1.SubnetSpec{ ID: "", Name: "my-subnet", @@ -518,7 +505,6 @@ func TestDeleteSubnets(t *testing.T) { name: "subnet deleted successfully", expectedError: "", expect: func(s *mock_subnets.MockSubnetScopeMockRecorder, m *mock_subnets.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.SubnetSpecs().Return([]azure.SubnetSpec{ { Name: "my-subnet", @@ -548,7 +534,6 @@ func TestDeleteSubnets(t *testing.T) { name: "subnet already deleted", expectedError: "", expect: func(s *mock_subnets.MockSubnetScopeMockRecorder, m *mock_subnets.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.SubnetSpecs().Return([]azure.SubnetSpec{ { Name: "my-subnet", @@ -570,7 +555,6 @@ func TestDeleteSubnets(t *testing.T) { name: "node subnet already deleted and controlplane subnet deleted successfully", expectedError: "", expect: func(s *mock_subnets.MockSubnetScopeMockRecorder, m *mock_subnets.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.SubnetSpecs().Return([]azure.SubnetSpec{ { Name: "my-subnet", @@ -601,7 +585,6 @@ func TestDeleteSubnets(t *testing.T) { name: "skip delete if vnet is managed", expectedError: "", expect: func(s *mock_subnets.MockSubnetScopeMockRecorder, m *mock_subnets.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.SubnetSpecs().Return([]azure.SubnetSpec{ { Name: "my-subnet", @@ -621,7 +604,6 @@ func TestDeleteSubnets(t *testing.T) { name: "fail delete subnet", expectedError: "failed to delete subnet my-subnet in resource group my-rg: #: Internal Server Error: StatusCode=500", expect: func(s *mock_subnets.MockSubnetScopeMockRecorder, m *mock_subnets.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.SubnetSpecs().Return([]azure.SubnetSpec{ { Name: "my-subnet", diff --git a/azure/services/tags/mock_tags/tags_mock.go b/azure/services/tags/mock_tags/tags_mock.go index 15f7c9154f3..ecb3350a04a 100644 --- a/azure/services/tags/mock_tags/tags_mock.go +++ b/azure/services/tags/mock_tags/tags_mock.go @@ -24,7 +24,6 @@ import ( reflect "reflect" autorest "github.com/Azure/go-autorest/autorest" - logr "github.com/go-logr/logr" gomock "github.com/golang/mock/gomock" azure "sigs.k8s.io/cluster-api-provider-azure/azure" ) @@ -151,37 +150,6 @@ func (mr *MockTagScopeMockRecorder) ClusterName() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterName", reflect.TypeOf((*MockTagScope)(nil).ClusterName)) } -// Enabled mocks base method. -func (m *MockTagScope) Enabled() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Enabled") - ret0, _ := ret[0].(bool) - return ret0 -} - -// Enabled indicates an expected call of Enabled. -func (mr *MockTagScopeMockRecorder) Enabled() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Enabled", reflect.TypeOf((*MockTagScope)(nil).Enabled)) -} - -// Error mocks base method. -func (m *MockTagScope) Error(err error, msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{err, msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Error", varargs...) -} - -// Error indicates an expected call of Error. -func (mr *MockTagScopeMockRecorder) Error(err, msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{err, msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockTagScope)(nil).Error), varargs...) -} - // HashKey mocks base method. func (m *MockTagScope) HashKey() string { m.ctrl.T.Helper() @@ -196,23 +164,6 @@ func (mr *MockTagScopeMockRecorder) HashKey() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HashKey", reflect.TypeOf((*MockTagScope)(nil).HashKey)) } -// Info mocks base method. -func (m *MockTagScope) Info(msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Info", varargs...) -} - -// Info indicates an expected call of Info. -func (mr *MockTagScopeMockRecorder) Info(msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockTagScope)(nil).Info), varargs...) -} - // SubscriptionID mocks base method. func (m *MockTagScope) SubscriptionID() string { m.ctrl.T.Helper() @@ -268,49 +219,3 @@ func (mr *MockTagScopeMockRecorder) UpdateAnnotationJSON(arg0, arg1 interface{}) mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAnnotationJSON", reflect.TypeOf((*MockTagScope)(nil).UpdateAnnotationJSON), arg0, arg1) } - -// V mocks base method. -func (m *MockTagScope) V(level int) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "V", level) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// V indicates an expected call of V. -func (mr *MockTagScopeMockRecorder) V(level interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "V", reflect.TypeOf((*MockTagScope)(nil).V), level) -} - -// WithName mocks base method. -func (m *MockTagScope) WithName(name string) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WithName", name) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithName indicates an expected call of WithName. -func (mr *MockTagScopeMockRecorder) WithName(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithName", reflect.TypeOf((*MockTagScope)(nil).WithName), name) -} - -// WithValues mocks base method. -func (m *MockTagScope) WithValues(keysAndValues ...interface{}) logr.Logger { - m.ctrl.T.Helper() - varargs := []interface{}{} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "WithValues", varargs...) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithValues indicates an expected call of WithValues. -func (mr *MockTagScopeMockRecorder) WithValues(keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithValues", reflect.TypeOf((*MockTagScope)(nil).WithValues), keysAndValues...) -} diff --git a/azure/services/tags/tags.go b/azure/services/tags/tags.go index 56e0193039c..e10998d508d 100644 --- a/azure/services/tags/tags.go +++ b/azure/services/tags/tags.go @@ -21,9 +21,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-10-01/resources" "github.com/Azure/go-autorest/autorest/to" - "github.com/go-logr/logr" "github.com/pkg/errors" - "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/converters" "sigs.k8s.io/cluster-api-provider-azure/util/tele" @@ -31,7 +29,6 @@ import ( // TagScope defines the scope interface for a tags service. type TagScope interface { - logr.Logger azure.Authorizer ClusterName() string TagsSpecs() []azure.TagsSpec @@ -55,7 +52,7 @@ func New(scope TagScope) *Service { // Reconcile ensures tags are correct. func (s *Service) Reconcile(ctx context.Context) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "tags.Service.Reconcile") + ctx, log, done := tele.StartSpanWithLogger(ctx, "tags.Service.Reconcile") defer done() for _, tagsSpec := range s.Scope.TagsSpecs() { @@ -69,7 +66,7 @@ func (s *Service) Reconcile(ctx context.Context) error { } if !s.isResourceManaged(tags) { - s.Scope.V(4).Info("Skipping tags reconcile for not managed resource") + log.V(4).Info("Skipping tags reconcile for not managed resource") continue } @@ -79,7 +76,7 @@ func (s *Service) Reconcile(ctx context.Context) error { } changed, createdOrUpdated, deleted, newAnnotation := tagsChanged(lastAppliedTags, tagsSpec.Tags, tags) if changed { - s.Scope.V(2).Info("Updating tags") + log.V(2).Info("Updating tags") if len(createdOrUpdated) > 0 { createdOrUpdatedTags := make(map[string]*string) for k, v := range createdOrUpdated { @@ -106,7 +103,7 @@ func (s *Service) Reconcile(ctx context.Context) error { if err = s.Scope.UpdateAnnotationJSON(tagsSpec.Annotation, newAnnotation); err != nil { return err } - s.Scope.V(2).Info("successfully updated tags") + log.V(2).Info("successfully updated tags") } } return nil diff --git a/azure/services/tags/tags_test.go b/azure/services/tags/tags_test.go index 32dd7dcd207..80b2c4f4776 100644 --- a/azure/services/tags/tags_test.go +++ b/azure/services/tags/tags_test.go @@ -26,8 +26,6 @@ import ( "github.com/Azure/go-autorest/autorest/to" "github.com/golang/mock/gomock" . "github.com/onsi/gomega" - "k8s.io/klog/v2/klogr" - "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/services/tags/mock_tags" gomockinternal "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomock" @@ -44,7 +42,6 @@ func TestReconcileTags(t *testing.T) { expectedError: "", expect: func(s *mock_tags.MockTagScopeMockRecorder, m *mock_tags.MockclientMockRecorder) { s.ClusterName().AnyTimes().Return("test-cluster") - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) gomock.InOrder( s.TagsSpecs().Return([]azure.TagsSpec{ { @@ -104,7 +101,6 @@ func TestReconcileTags(t *testing.T) { expectedError: "", expect: func(s *mock_tags.MockTagScopeMockRecorder, m *mock_tags.MockclientMockRecorder) { s.ClusterName().AnyTimes().Return("test-cluster") - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.TagsSpecs().Return([]azure.TagsSpec{ { Scope: "/sub/123/fake/scope", @@ -123,7 +119,6 @@ func TestReconcileTags(t *testing.T) { expectedError: "", expect: func(s *mock_tags.MockTagScopeMockRecorder, m *mock_tags.MockclientMockRecorder) { s.ClusterName().AnyTimes().Return("test-cluster") - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) gomock.InOrder( s.TagsSpecs().Return([]azure.TagsSpec{ { @@ -159,7 +154,6 @@ func TestReconcileTags(t *testing.T) { expectedError: "failed to get existing tags: #: Internal Server Error: StatusCode=500", expect: func(s *mock_tags.MockTagScopeMockRecorder, m *mock_tags.MockclientMockRecorder) { s.ClusterName().AnyTimes().Return("test-cluster") - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.TagsSpecs().Return([]azure.TagsSpec{ { Scope: "/sub/123/fake/scope", @@ -178,7 +172,6 @@ func TestReconcileTags(t *testing.T) { expectedError: "cannot update tags: #: Internal Server Error: StatusCode=500", expect: func(s *mock_tags.MockTagScopeMockRecorder, m *mock_tags.MockclientMockRecorder) { s.ClusterName().AnyTimes().Return("test-cluster") - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.TagsSpecs().Return([]azure.TagsSpec{ { Scope: "/sub/123/fake/scope", @@ -209,7 +202,6 @@ func TestReconcileTags(t *testing.T) { expectedError: "", expect: func(s *mock_tags.MockTagScopeMockRecorder, m *mock_tags.MockclientMockRecorder) { s.ClusterName().AnyTimes().Return("test-cluster") - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.TagsSpecs().Return([]azure.TagsSpec{ { Scope: "/sub/123/fake/scope", diff --git a/azure/services/virtualmachines/mock_virtualmachines/virtualmachines_mock.go b/azure/services/virtualmachines/mock_virtualmachines/virtualmachines_mock.go index ab9b890266b..200aca50596 100644 --- a/azure/services/virtualmachines/mock_virtualmachines/virtualmachines_mock.go +++ b/azure/services/virtualmachines/mock_virtualmachines/virtualmachines_mock.go @@ -24,7 +24,6 @@ import ( reflect "reflect" autorest "github.com/Azure/go-autorest/autorest" - logr "github.com/go-logr/logr" gomock "github.com/golang/mock/gomock" v1 "k8s.io/api/core/v1" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" @@ -137,37 +136,6 @@ func (mr *MockVMScopeMockRecorder) DeleteLongRunningOperationState(arg0, arg1 in return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteLongRunningOperationState", reflect.TypeOf((*MockVMScope)(nil).DeleteLongRunningOperationState), arg0, arg1) } -// Enabled mocks base method. -func (m *MockVMScope) Enabled() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Enabled") - ret0, _ := ret[0].(bool) - return ret0 -} - -// Enabled indicates an expected call of Enabled. -func (mr *MockVMScopeMockRecorder) Enabled() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Enabled", reflect.TypeOf((*MockVMScope)(nil).Enabled)) -} - -// Error mocks base method. -func (m *MockVMScope) Error(err error, msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{err, msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Error", varargs...) -} - -// Error indicates an expected call of Error. -func (mr *MockVMScopeMockRecorder) Error(err, msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{err, msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockVMScope)(nil).Error), varargs...) -} - // GetLongRunningOperationState mocks base method. func (m *MockVMScope) GetLongRunningOperationState(arg0, arg1 string) *v1beta1.Future { m.ctrl.T.Helper() @@ -196,23 +164,6 @@ func (mr *MockVMScopeMockRecorder) HashKey() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HashKey", reflect.TypeOf((*MockVMScope)(nil).HashKey)) } -// Info mocks base method. -func (m *MockVMScope) Info(msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Info", varargs...) -} - -// Info indicates an expected call of Info. -func (mr *MockVMScopeMockRecorder) Info(msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockVMScope)(nil).Info), varargs...) -} - // SetAddresses mocks base method. func (m *MockVMScope) SetAddresses(arg0 []v1.NodeAddress) { m.ctrl.T.Helper() @@ -337,20 +288,6 @@ func (mr *MockVMScopeMockRecorder) UpdatePutStatus(arg0, arg1, arg2 interface{}) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePutStatus", reflect.TypeOf((*MockVMScope)(nil).UpdatePutStatus), arg0, arg1, arg2) } -// V mocks base method. -func (m *MockVMScope) V(level int) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "V", level) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// V indicates an expected call of V. -func (mr *MockVMScopeMockRecorder) V(level interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "V", reflect.TypeOf((*MockVMScope)(nil).V), level) -} - // VMSpec mocks base method. func (m *MockVMScope) VMSpec() azure.ResourceSpecGetter { m.ctrl.T.Helper() @@ -364,35 +301,3 @@ func (mr *MockVMScopeMockRecorder) VMSpec() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VMSpec", reflect.TypeOf((*MockVMScope)(nil).VMSpec)) } - -// WithName mocks base method. -func (m *MockVMScope) WithName(name string) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WithName", name) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithName indicates an expected call of WithName. -func (mr *MockVMScopeMockRecorder) WithName(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithName", reflect.TypeOf((*MockVMScope)(nil).WithName), name) -} - -// WithValues mocks base method. -func (m *MockVMScope) WithValues(keysAndValues ...interface{}) logr.Logger { - m.ctrl.T.Helper() - varargs := []interface{}{} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "WithValues", varargs...) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithValues indicates an expected call of WithValues. -func (mr *MockVMScopeMockRecorder) WithValues(keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithValues", reflect.TypeOf((*MockVMScope)(nil).WithValues), keysAndValues...) -} diff --git a/azure/services/virtualmachines/virtualmachines.go b/azure/services/virtualmachines/virtualmachines.go index 9a8bf78cfae..4b6452a4825 100644 --- a/azure/services/virtualmachines/virtualmachines.go +++ b/azure/services/virtualmachines/virtualmachines.go @@ -22,7 +22,6 @@ import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-04-01/compute" "github.com/Azure/go-autorest/autorest/to" - "github.com/go-logr/logr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" @@ -41,7 +40,6 @@ const serviceName = "virtualmachine" // VMScope defines the scope interface for a virtual machines service. type VMScope interface { - logr.Logger azure.Authorizer azure.AsyncStatusUpdater VMSpec() azure.ResourceSpecGetter @@ -80,7 +78,6 @@ func (s *Service) Reconcile(ctx context.Context) error { defer cancel() vmSpec := s.Scope.VMSpec() - result, err := async.CreateResource(ctx, s.Scope, s.Client, vmSpec, serviceName) s.Scope.UpdatePutStatus(infrav1.VMRunningCondition, serviceName, err) // Set the DiskReady condition here since the disk gets created with the VM. @@ -117,7 +114,6 @@ func (s *Service) Delete(ctx context.Context) error { defer cancel() vmSpec := s.Scope.VMSpec() - err := async.DeleteResource(ctx, s.Scope, s.Client, vmSpec, serviceName) if err != nil { s.Scope.SetVMState(infrav1.Deleting) @@ -132,8 +128,7 @@ func (s *Service) getAddresses(ctx context.Context, vm compute.VirtualMachine, r ctx, _, done := tele.StartSpanWithLogger(ctx, "virtualmachines.Service.getAddresses") defer done() - addresses := []corev1.NodeAddress{} - + var addresses []corev1.NodeAddress if vm.NetworkProfile.NetworkInterfaces == nil { return addresses, nil } diff --git a/azure/services/virtualmachines/virtualmachines_test.go b/azure/services/virtualmachines/virtualmachines_test.go index 8a437e2a4b8..2dc1f38e849 100644 --- a/azure/services/virtualmachines/virtualmachines_test.go +++ b/azure/services/virtualmachines/virtualmachines_test.go @@ -31,8 +31,6 @@ import ( . "github.com/onsi/gomega" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" - "k8s.io/klog/v2/klogr" - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure/services/availabilitysets/mock_availabilitysets" "sigs.k8s.io/cluster-api-provider-azure/azure/services/networkinterfaces/mock_networkinterfaces" @@ -79,7 +77,6 @@ func TestReconcileVM(t *testing.T) { name: "create vm succeeds", expectedError: "", expect: func(s *mock_virtualmachines.MockVMScopeMockRecorder, m *mock_virtualmachines.MockClientMockRecorder, mnic *mock_networkinterfaces.MockClientMockRecorder, mpip *mock_publicips.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.VMSpec().Return(&fakeVMSpec) s.GetLongRunningOperationState("test-vm", serviceName) m.CreateOrUpdateAsync(gomockinternal.AContext(), &fakeVMSpec).Return(compute.VirtualMachine{ @@ -135,7 +132,6 @@ func TestReconcileVM(t *testing.T) { name: "create vm fails", expectedError: "failed to create resource test-group/test-vm (service: virtualmachine): #: Internal Server Error: StatusCode=500", expect: func(s *mock_virtualmachines.MockVMScopeMockRecorder, m *mock_virtualmachines.MockClientMockRecorder, mnic *mock_networkinterfaces.MockClientMockRecorder, mpip *mock_publicips.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.VMSpec().Return(&fakeVMSpec) s.GetLongRunningOperationState("test-vm", serviceName) m.CreateOrUpdateAsync(gomockinternal.AContext(), &fakeVMSpec).Return(nil, nil, internalError) @@ -190,7 +186,6 @@ func TestDeleteVM(t *testing.T) { name: "long running delete operation is done", expectedError: "", expect: func(s *mock_virtualmachines.MockVMScopeMockRecorder, m *mock_virtualmachines.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.VMSpec().AnyTimes().Return(&fakeVMSpec) s.GetLongRunningOperationState("test-vm", serviceName).Times(2).Return(&fakeFuture) m.IsDone(gomockinternal.AContext(), gomock.AssignableToTypeOf(&azureautorest.Future{})).Return(true, nil) @@ -204,7 +199,6 @@ func TestDeleteVM(t *testing.T) { name: "long running delete operation is not done", expectedError: "operation type DELETE on Azure resource test-group/test-vm is not done. Object will be requeued after 15s", expect: func(s *mock_virtualmachines.MockVMScopeMockRecorder, m *mock_virtualmachines.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.VMSpec().AnyTimes().Return(&fakeVMSpec) s.GetLongRunningOperationState("test-vm", serviceName).Times(2).Return(&fakeFuture) m.IsDone(gomockinternal.AContext(), gomock.AssignableToTypeOf(&azureautorest.Future{})).Return(false, nil) @@ -216,7 +210,6 @@ func TestDeleteVM(t *testing.T) { name: "vm doesn't exist", expectedError: "", expect: func(s *mock_virtualmachines.MockVMScopeMockRecorder, m *mock_virtualmachines.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.VMSpec().AnyTimes().Return(&fakeVMSpec) s.GetLongRunningOperationState("test-vm", serviceName) m.DeleteAsync(gomockinternal.AContext(), &fakeVMSpec).Return(nil, notFoundError) @@ -228,7 +221,6 @@ func TestDeleteVM(t *testing.T) { name: "error occurs when deleting vm", expectedError: "failed to delete resource test-group/test-vm (service: virtualmachine): #: Internal Server Error: StatusCode=500", expect: func(s *mock_virtualmachines.MockVMScopeMockRecorder, m *mock_virtualmachines.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.VMSpec().AnyTimes().Return(&fakeVMSpec) s.GetLongRunningOperationState("test-vm", serviceName).Return(nil) m.DeleteAsync(gomockinternal.AContext(), &fakeVMSpec).Return(nil, internalError) @@ -240,7 +232,6 @@ func TestDeleteVM(t *testing.T) { name: "context deadline exceeded while deleting vm", expectedError: "operation type DELETE on Azure resource test-group/test-vm is not done. Object will be requeued after 15s", expect: func(s *mock_virtualmachines.MockVMScopeMockRecorder, m *mock_virtualmachines.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.VMSpec().AnyTimes().Return(&fakeVMSpec) s.GetLongRunningOperationState("test-vm", serviceName).Return(nil) m.DeleteAsync(gomockinternal.AContext(), &fakeVMSpec).Return(&azureautorest.Future{}, errCtxExceeded) @@ -253,7 +244,6 @@ func TestDeleteVM(t *testing.T) { name: "delete the vm successfully", expectedError: "", expect: func(s *mock_virtualmachines.MockVMScopeMockRecorder, m *mock_virtualmachines.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.VMSpec().AnyTimes().Return(&fakeVMSpec) s.GetLongRunningOperationState("test-vm", serviceName).Return(nil) m.DeleteAsync(gomockinternal.AContext(), &fakeVMSpec).Return(nil, nil) diff --git a/azure/services/virtualnetworks/mock_virtualnetworks/virtualnetworks_mock.go b/azure/services/virtualnetworks/mock_virtualnetworks/virtualnetworks_mock.go index e6be4998ed3..2fbbf35b9b5 100644 --- a/azure/services/virtualnetworks/mock_virtualnetworks/virtualnetworks_mock.go +++ b/azure/services/virtualnetworks/mock_virtualnetworks/virtualnetworks_mock.go @@ -24,7 +24,6 @@ import ( reflect "reflect" autorest "github.com/Azure/go-autorest/autorest" - logr "github.com/go-logr/logr" gomock "github.com/golang/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -179,37 +178,6 @@ func (mr *MockVNetScopeMockRecorder) ClusterName() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterName", reflect.TypeOf((*MockVNetScope)(nil).ClusterName)) } -// Enabled mocks base method. -func (m *MockVNetScope) Enabled() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Enabled") - ret0, _ := ret[0].(bool) - return ret0 -} - -// Enabled indicates an expected call of Enabled. -func (mr *MockVNetScopeMockRecorder) Enabled() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Enabled", reflect.TypeOf((*MockVNetScope)(nil).Enabled)) -} - -// Error mocks base method. -func (m *MockVNetScope) Error(err error, msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{err, msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Error", varargs...) -} - -// Error indicates an expected call of Error. -func (mr *MockVNetScopeMockRecorder) Error(err, msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{err, msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockVNetScope)(nil).Error), varargs...) -} - // FailureDomains mocks base method. func (m *MockVNetScope) FailureDomains() []string { m.ctrl.T.Helper() @@ -238,23 +206,6 @@ func (mr *MockVNetScopeMockRecorder) HashKey() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HashKey", reflect.TypeOf((*MockVNetScope)(nil).HashKey)) } -// Info mocks base method. -func (m *MockVNetScope) Info(msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Info", varargs...) -} - -// Info indicates an expected call of Info. -func (mr *MockVNetScopeMockRecorder) Info(msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockVNetScope)(nil).Info), varargs...) -} - // Location mocks base method. func (m *MockVNetScope) Location() string { m.ctrl.T.Helper() @@ -311,20 +262,6 @@ func (mr *MockVNetScopeMockRecorder) TenantID() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TenantID", reflect.TypeOf((*MockVNetScope)(nil).TenantID)) } -// V mocks base method. -func (m *MockVNetScope) V(level int) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "V", level) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// V indicates an expected call of V. -func (mr *MockVNetScopeMockRecorder) V(level interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "V", reflect.TypeOf((*MockVNetScope)(nil).V), level) -} - // VNetSpec mocks base method. func (m *MockVNetScope) VNetSpec() azure.VNetSpec { m.ctrl.T.Helper() @@ -352,35 +289,3 @@ func (mr *MockVNetScopeMockRecorder) Vnet() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Vnet", reflect.TypeOf((*MockVNetScope)(nil).Vnet)) } - -// WithName mocks base method. -func (m *MockVNetScope) WithName(name string) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WithName", name) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithName indicates an expected call of WithName. -func (mr *MockVNetScopeMockRecorder) WithName(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithName", reflect.TypeOf((*MockVNetScope)(nil).WithName), name) -} - -// WithValues mocks base method. -func (m *MockVNetScope) WithValues(keysAndValues ...interface{}) logr.Logger { - m.ctrl.T.Helper() - varargs := []interface{}{} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "WithValues", varargs...) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithValues indicates an expected call of WithValues. -func (mr *MockVNetScopeMockRecorder) WithValues(keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithValues", reflect.TypeOf((*MockVNetScope)(nil).WithValues), keysAndValues...) -} diff --git a/azure/services/virtualnetworks/virtualnetworks.go b/azure/services/virtualnetworks/virtualnetworks.go index 7150ff4c69c..9a6f42090d4 100644 --- a/azure/services/virtualnetworks/virtualnetworks.go +++ b/azure/services/virtualnetworks/virtualnetworks.go @@ -22,9 +22,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network" "github.com/Azure/go-autorest/autorest/to" - "github.com/go-logr/logr" "github.com/pkg/errors" - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/converters" @@ -33,7 +31,6 @@ import ( // VNetScope defines the scope interface for a virtual network service. type VNetScope interface { - logr.Logger azure.ClusterDescriber Vnet() *infrav1.VnetSpec VNetSpec() azure.VNetSpec @@ -55,7 +52,7 @@ func New(scope VNetScope) *Service { // Reconcile gets/creates/updates a virtual network. func (s *Service) Reconcile(ctx context.Context) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "virtualnetworks.Service.Reconcile") + ctx, log, done := tele.StartSpanWithLogger(ctx, "virtualnetworks.Service.Reconcile") defer done() // Following should be created upstream and provided as an input to NewService @@ -76,12 +73,12 @@ func (s *Service) Reconcile(ctx context.Context) error { case err == nil: // vnet already exists, cannot update since it's immutable if !existingVnet.IsManaged(s.Scope.ClusterName()) { - s.Scope.V(2).Info("Working on custom VNet", "vnet-id", existingVnet.ID) + log.V(2).Info("Working on custom VNet", "vnet-id", existingVnet.ID) } existingVnet.DeepCopyInto(s.Scope.Vnet()) default: - s.Scope.V(2).Info("creating VNet", "VNet", vnetSpec.Name) + log.V(2).Info("creating VNet", "VNet", vnetSpec.Name) vnetProperties := network.VirtualNetwork{ Tags: converters.TagsToMap(infrav1.Build(infrav1.BuildParams{ @@ -104,7 +101,7 @@ func (s *Service) Reconcile(ctx context.Context) error { if err != nil { return errors.Wrapf(err, "failed to create virtual network %s", vnetSpec.Name) } - s.Scope.V(2).Info("successfully created VNet", "VNet", vnetSpec.Name) + log.V(2).Info("successfully created VNet", "VNet", vnetSpec.Name) } return nil @@ -112,7 +109,7 @@ func (s *Service) Reconcile(ctx context.Context) error { // Delete deletes the virtual network with the provided name. func (s *Service) Delete(ctx context.Context) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "virtualnetworks.Service.Delete") + ctx, log, done := tele.StartSpanWithLogger(ctx, "virtualnetworks.Service.Delete") defer done() vnetSpec := s.Scope.VNetSpec() @@ -123,11 +120,11 @@ func (s *Service) Delete(ctx context.Context) error { } if !existingVnet.IsManaged(s.Scope.ClusterName()) { - s.Scope.V(4).Info("Skipping VNet deletion in custom vnet mode") + log.V(4).Info("Skipping VNet deletion in custom vnet mode") return nil } - s.Scope.V(2).Info("deleting VNet", "VNet", vnetSpec.Name) + log.V(2).Info("deleting VNet", "VNet", vnetSpec.Name) err = s.Client.Delete(ctx, vnetSpec.ResourceGroup, vnetSpec.Name) if err != nil { if azure.ResourceGroupNotFound(err) || azure.ResourceNotFound(err) { @@ -138,19 +135,19 @@ func (s *Service) Delete(ctx context.Context) error { return errors.Wrapf(err, "failed to delete VNet %s in resource group %s", vnetSpec.Name, vnetSpec.ResourceGroup) } - s.Scope.V(2).Info("successfully deleted VNet", "VNet", vnetSpec.Name) + log.V(2).Info("successfully deleted VNet", "VNet", vnetSpec.Name) return nil } // getExisting provides information about an existing virtual network. func (s *Service) getExisting(ctx context.Context, spec azure.VNetSpec) (*infrav1.VnetSpec, error) { - ctx, _, done := tele.StartSpanWithLogger(ctx, "virtualnetworks.Service.getExisting") + ctx, log, done := tele.StartSpanWithLogger(ctx, "virtualnetworks.Service.getExisting") defer done() vnet, err := s.Client.Get(ctx, spec.ResourceGroup, spec.Name) if err != nil { if azure.ResourceNotFound(err) { - s.Scope.V(2).Info(fmt.Sprintf("Resource not found for VNet %q from resource group %q", spec.Name, spec.ResourceGroup)) + log.V(2).Info(fmt.Sprintf("Resource not found for VNet %q from resource group %q", spec.Name, spec.ResourceGroup)) return nil, err } return nil, errors.Wrapf(err, "failed to get VNet %s", spec.Name) diff --git a/azure/services/virtualnetworks/virtualnetworks_test.go b/azure/services/virtualnetworks/virtualnetworks_test.go index c4952ae1f9c..55bc78f2c54 100644 --- a/azure/services/virtualnetworks/virtualnetworks_test.go +++ b/azure/services/virtualnetworks/virtualnetworks_test.go @@ -26,8 +26,6 @@ import ( "github.com/Azure/go-autorest/autorest/to" "github.com/golang/mock/gomock" . "github.com/onsi/gomega" - "k8s.io/klog/v2/klogr" - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/services/virtualnetworks/mock_virtualnetworks" @@ -44,7 +42,6 @@ func TestReconcileVnet(t *testing.T) { name: "managed vnet exists", expectedError: "", expect: func(s *mock_virtualnetworks.MockVNetScopeMockRecorder, m *mock_virtualnetworks.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.ClusterName().AnyTimes().Return("fake-cluster") s.Vnet().AnyTimes().Return(&infrav1.VnetSpec{Name: "vnet-exists"}) s.VNetSpec().Return(azure.VNetSpec{ @@ -73,7 +70,6 @@ func TestReconcileVnet(t *testing.T) { name: "managed ipv6 vnet exists", expectedError: "", expect: func(s *mock_virtualnetworks.MockVNetScopeMockRecorder, m *mock_virtualnetworks.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.ClusterName().AnyTimes().Return("fake-cluster") s.Vnet().AnyTimes().Return(&infrav1.VnetSpec{Name: "vnet-exists"}) s.VNetSpec().Return(azure.VNetSpec{ @@ -105,7 +101,6 @@ func TestReconcileVnet(t *testing.T) { name: "vnet created successufuly", expectedError: "", expect: func(s *mock_virtualnetworks.MockVNetScopeMockRecorder, m *mock_virtualnetworks.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.ClusterName().AnyTimes().Return("fake-cluster") s.Location().AnyTimes().Return("fake-location") s.AdditionalTags().AnyTimes().Return(infrav1.Tags{}) @@ -125,7 +120,6 @@ func TestReconcileVnet(t *testing.T) { name: "ipv6 vnet created successufuly", expectedError: "", expect: func(s *mock_virtualnetworks.MockVNetScopeMockRecorder, m *mock_virtualnetworks.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.ClusterName().AnyTimes().Return("fake-cluster") s.Location().AnyTimes().Return("fake-location") s.AdditionalTags().AnyTimes().Return(infrav1.Tags{}) @@ -160,7 +154,6 @@ func TestReconcileVnet(t *testing.T) { name: "unmanaged vnet exists", expectedError: "", expect: func(s *mock_virtualnetworks.MockVNetScopeMockRecorder, m *mock_virtualnetworks.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.ClusterName().AnyTimes().Return("fake-cluster") s.Location().AnyTimes().Return("fake-location") s.Vnet().AnyTimes().Return(&infrav1.VnetSpec{Name: "custom-vnet"}) @@ -188,7 +181,6 @@ func TestReconcileVnet(t *testing.T) { name: "custom vnet not found", expectedError: "", expect: func(s *mock_virtualnetworks.MockVNetScopeMockRecorder, m *mock_virtualnetworks.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.ClusterName().AnyTimes().Return("fake-cluster") s.Location().AnyTimes().Return("fake-location") s.AdditionalTags().AnyTimes().Return(infrav1.Tags{}) @@ -208,7 +200,6 @@ func TestReconcileVnet(t *testing.T) { name: "failed to fetch vnet", expectedError: "failed to get VNet custom-vnet: failed to get VNet custom-vnet: #: Internal Server Error: StatusCode=500", expect: func(s *mock_virtualnetworks.MockVNetScopeMockRecorder, m *mock_virtualnetworks.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.ClusterName().AnyTimes().Return("fake-cluster") s.VNetSpec().Return(azure.VNetSpec{ ResourceGroup: "custom-vnet-rg", @@ -223,7 +214,6 @@ func TestReconcileVnet(t *testing.T) { name: "fail to create vnet", expectedError: "failed to create virtual network custom-vnet: #: Internal Server Honk: StatusCode=500", expect: func(s *mock_virtualnetworks.MockVNetScopeMockRecorder, m *mock_virtualnetworks.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.ClusterName().AnyTimes().Return("fake-cluster") s.Location().AnyTimes().Return("fake-location") s.AdditionalTags().AnyTimes().Return(infrav1.Tags{}) @@ -280,7 +270,6 @@ func TestDeleteVnet(t *testing.T) { name: "managed vnet exists", expectedError: "", expect: func(s *mock_virtualnetworks.MockVNetScopeMockRecorder, m *mock_virtualnetworks.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.ClusterName().AnyTimes().Return("fake-cluster") s.Location().AnyTimes().Return("fake-location") s.AdditionalTags().AnyTimes().Return(infrav1.Tags{ @@ -316,7 +305,6 @@ func TestDeleteVnet(t *testing.T) { name: "managed vnet already deleted", expectedError: "", expect: func(s *mock_virtualnetworks.MockVNetScopeMockRecorder, m *mock_virtualnetworks.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.ClusterName().AnyTimes().Return("fake-cluster") s.Location().AnyTimes().Return("fake-location") s.AdditionalTags().AnyTimes().Return(infrav1.Tags{ @@ -338,7 +326,6 @@ func TestDeleteVnet(t *testing.T) { name: "unmanaged vnet", expectedError: "", expect: func(s *mock_virtualnetworks.MockVNetScopeMockRecorder, m *mock_virtualnetworks.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.ClusterName().AnyTimes().Return("fake-cluster") s.Location().AnyTimes().Return("fake-location") s.AdditionalTags().AnyTimes().Return(infrav1.Tags{}) @@ -367,7 +354,6 @@ func TestDeleteVnet(t *testing.T) { name: "fail to delete vnet", expectedError: "failed to delete VNet vnet-exists in resource group my-rg: #: Internal Honk Server: StatusCode=500", expect: func(s *mock_virtualnetworks.MockVNetScopeMockRecorder, m *mock_virtualnetworks.MockClientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.ClusterName().AnyTimes().Return("fake-cluster") s.Location().AnyTimes().Return("fake-location") s.AdditionalTags().AnyTimes().Return(infrav1.Tags{ diff --git a/azure/services/vmextensions/mock_vmextensions/vmextensions_mock.go b/azure/services/vmextensions/mock_vmextensions/vmextensions_mock.go index ce7542d0e2a..298108959a8 100644 --- a/azure/services/vmextensions/mock_vmextensions/vmextensions_mock.go +++ b/azure/services/vmextensions/mock_vmextensions/vmextensions_mock.go @@ -21,10 +21,10 @@ limitations under the License. package mock_vmextensions import ( + context "context" reflect "reflect" autorest "github.com/Azure/go-autorest/autorest" - logr "github.com/go-logr/logr" gomock "github.com/golang/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -179,37 +179,6 @@ func (mr *MockVMExtensionScopeMockRecorder) ClusterName() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterName", reflect.TypeOf((*MockVMExtensionScope)(nil).ClusterName)) } -// Enabled mocks base method. -func (m *MockVMExtensionScope) Enabled() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Enabled") - ret0, _ := ret[0].(bool) - return ret0 -} - -// Enabled indicates an expected call of Enabled. -func (mr *MockVMExtensionScopeMockRecorder) Enabled() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Enabled", reflect.TypeOf((*MockVMExtensionScope)(nil).Enabled)) -} - -// Error mocks base method. -func (m *MockVMExtensionScope) Error(err error, msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{err, msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Error", varargs...) -} - -// Error indicates an expected call of Error. -func (mr *MockVMExtensionScopeMockRecorder) Error(err, msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{err, msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockVMExtensionScope)(nil).Error), varargs...) -} - // FailureDomains mocks base method. func (m *MockVMExtensionScope) FailureDomains() []string { m.ctrl.T.Helper() @@ -238,23 +207,6 @@ func (mr *MockVMExtensionScopeMockRecorder) HashKey() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HashKey", reflect.TypeOf((*MockVMExtensionScope)(nil).HashKey)) } -// Info mocks base method. -func (m *MockVMExtensionScope) Info(msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Info", varargs...) -} - -// Info indicates an expected call of Info. -func (mr *MockVMExtensionScopeMockRecorder) Info(msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockVMExtensionScope)(nil).Info), varargs...) -} - // Location mocks base method. func (m *MockVMExtensionScope) Location() string { m.ctrl.T.Helper() @@ -284,17 +236,17 @@ func (mr *MockVMExtensionScopeMockRecorder) ResourceGroup() *gomock.Call { } // SetBootstrapConditions mocks base method. -func (m *MockVMExtensionScope) SetBootstrapConditions(arg0, arg1 string) error { +func (m *MockVMExtensionScope) SetBootstrapConditions(arg0 context.Context, arg1, arg2 string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetBootstrapConditions", arg0, arg1) + ret := m.ctrl.Call(m, "SetBootstrapConditions", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // SetBootstrapConditions indicates an expected call of SetBootstrapConditions. -func (mr *MockVMExtensionScopeMockRecorder) SetBootstrapConditions(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockVMExtensionScopeMockRecorder) SetBootstrapConditions(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetBootstrapConditions", reflect.TypeOf((*MockVMExtensionScope)(nil).SetBootstrapConditions), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetBootstrapConditions", reflect.TypeOf((*MockVMExtensionScope)(nil).SetBootstrapConditions), arg0, arg1, arg2) } // SubscriptionID mocks base method. @@ -325,20 +277,6 @@ func (mr *MockVMExtensionScopeMockRecorder) TenantID() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TenantID", reflect.TypeOf((*MockVMExtensionScope)(nil).TenantID)) } -// V mocks base method. -func (m *MockVMExtensionScope) V(level int) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "V", level) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// V indicates an expected call of V. -func (mr *MockVMExtensionScopeMockRecorder) V(level interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "V", reflect.TypeOf((*MockVMExtensionScope)(nil).V), level) -} - // VMExtensionSpecs mocks base method. func (m *MockVMExtensionScope) VMExtensionSpecs() []azure.ExtensionSpec { m.ctrl.T.Helper() @@ -352,35 +290,3 @@ func (mr *MockVMExtensionScopeMockRecorder) VMExtensionSpecs() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VMExtensionSpecs", reflect.TypeOf((*MockVMExtensionScope)(nil).VMExtensionSpecs)) } - -// WithName mocks base method. -func (m *MockVMExtensionScope) WithName(name string) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WithName", name) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithName indicates an expected call of WithName. -func (mr *MockVMExtensionScopeMockRecorder) WithName(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithName", reflect.TypeOf((*MockVMExtensionScope)(nil).WithName), name) -} - -// WithValues mocks base method. -func (m *MockVMExtensionScope) WithValues(keysAndValues ...interface{}) logr.Logger { - m.ctrl.T.Helper() - varargs := []interface{}{} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "WithValues", varargs...) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithValues indicates an expected call of WithValues. -func (mr *MockVMExtensionScopeMockRecorder) WithValues(keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithValues", reflect.TypeOf((*MockVMExtensionScope)(nil).WithValues), keysAndValues...) -} diff --git a/azure/services/vmextensions/vmextensions.go b/azure/services/vmextensions/vmextensions.go index 6dbbb36186d..63b15e01642 100644 --- a/azure/services/vmextensions/vmextensions.go +++ b/azure/services/vmextensions/vmextensions.go @@ -21,7 +21,6 @@ import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-04-01/compute" "github.com/Azure/go-autorest/autorest/to" - "github.com/go-logr/logr" "github.com/pkg/errors" "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/util/tele" @@ -29,10 +28,9 @@ import ( // VMExtensionScope defines the scope interface for a vm extension service. type VMExtensionScope interface { - logr.Logger azure.ClusterDescriber VMExtensionSpecs() []azure.ExtensionSpec - SetBootstrapConditions(string, string) error + SetBootstrapConditions(context.Context, string, string) error } // Service provides operations on Azure resources. @@ -51,13 +49,13 @@ func New(scope VMExtensionScope) *Service { // Reconcile creates or updates the VM extension. func (s *Service) Reconcile(ctx context.Context) error { - _, _, done := tele.StartSpanWithLogger(ctx, "vmextensions.Service.Reconcile") + ctx, log, done := tele.StartSpanWithLogger(ctx, "vmextensions.Service.Reconcile") defer done() for _, extensionSpec := range s.Scope.VMExtensionSpecs() { if existing, err := s.client.Get(ctx, s.Scope.ResourceGroup(), extensionSpec.VMName, extensionSpec.Name); err == nil { // check the extension status and set the associated conditions. - if retErr := s.Scope.SetBootstrapConditions(to.String(existing.ProvisioningState), extensionSpec.Name); retErr != nil { + if retErr := s.Scope.SetBootstrapConditions(ctx, to.String(existing.ProvisioningState), extensionSpec.Name); retErr != nil { return retErr } // if the extension already exists, do not update it. @@ -66,7 +64,7 @@ func (s *Service) Reconcile(ctx context.Context) error { return errors.Wrapf(err, "failed to get vm extension %s on vm %s", extensionSpec.Name, extensionSpec.VMName) } - s.Scope.V(2).Info("creating VM extension", "vm extension", extensionSpec.Name) + log.V(2).Info("creating VM extension", "vm extension", extensionSpec.Name) err := s.client.CreateOrUpdateAsync( ctx, s.Scope.ResourceGroup(), @@ -86,7 +84,7 @@ func (s *Service) Reconcile(ctx context.Context) error { if err != nil { return errors.Wrapf(err, "failed to create VM extension %s on VM %s in resource group %s", extensionSpec.Name, extensionSpec.VMName, s.Scope.ResourceGroup()) } - s.Scope.V(2).Info("successfully created VM extension", "vm extension", extensionSpec.Name) + log.V(2).Info("successfully created VM extension", "vm extension", extensionSpec.Name) } return nil } diff --git a/azure/services/vmextensions/vmextensions_test.go b/azure/services/vmextensions/vmextensions_test.go index 3f1e954b039..281838e928d 100644 --- a/azure/services/vmextensions/vmextensions_test.go +++ b/azure/services/vmextensions/vmextensions_test.go @@ -21,16 +21,13 @@ import ( "net/http" "testing" - "github.com/Azure/go-autorest/autorest/to" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-04-01/compute" "github.com/Azure/go-autorest/autorest" - "sigs.k8s.io/cluster-api-provider-azure/azure/services/vmextensions/mock_vmextensions" - + "github.com/Azure/go-autorest/autorest/to" "github.com/golang/mock/gomock" . "github.com/onsi/gomega" - "k8s.io/klog/v2/klogr" "sigs.k8s.io/cluster-api-provider-azure/azure" + "sigs.k8s.io/cluster-api-provider-azure/azure/services/vmextensions/mock_vmextensions" gomockinternal "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomock" ) @@ -44,7 +41,6 @@ func TestReconcileVMExtension(t *testing.T) { name: "extension is in succeeded state", expectedError: "", expect: func(s *mock_vmextensions.MockVMExtensionScopeMockRecorder, m *mock_vmextensions.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.VMExtensionSpecs().Return([]azure.ExtensionSpec{ { Name: "my-extension-1", @@ -64,14 +60,13 @@ func TestReconcileVMExtension(t *testing.T) { ID: to.StringPtr("fake/id"), Name: to.StringPtr("my-extension-1"), }, nil) - s.SetBootstrapConditions(string(compute.ProvisioningStateSucceeded), "my-extension-1") + s.SetBootstrapConditions(gomockinternal.AContext(), string(compute.ProvisioningStateSucceeded), "my-extension-1") }, }, { name: "extension is in failed state", expectedError: "", expect: func(s *mock_vmextensions.MockVMExtensionScopeMockRecorder, m *mock_vmextensions.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.VMExtensionSpecs().Return([]azure.ExtensionSpec{ { Name: "my-extension-1", @@ -91,14 +86,13 @@ func TestReconcileVMExtension(t *testing.T) { ID: to.StringPtr("fake/id"), Name: to.StringPtr("my-extension-1"), }, nil) - s.SetBootstrapConditions(string(compute.ProvisioningStateFailed), "my-extension-1") + s.SetBootstrapConditions(gomockinternal.AContext(), string(compute.ProvisioningStateFailed), "my-extension-1") }, }, { name: "extension is still creating", expectedError: "", expect: func(s *mock_vmextensions.MockVMExtensionScopeMockRecorder, m *mock_vmextensions.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.VMExtensionSpecs().Return([]azure.ExtensionSpec{ { Name: "my-extension-1", @@ -118,14 +112,13 @@ func TestReconcileVMExtension(t *testing.T) { ID: to.StringPtr("fake/id"), Name: to.StringPtr("my-extension-1"), }, nil) - s.SetBootstrapConditions(string(compute.ProvisioningStateCreating), "my-extension-1") + s.SetBootstrapConditions(gomockinternal.AContext(), string(compute.ProvisioningStateCreating), "my-extension-1") }, }, { name: "reconcile multiple extensions", expectedError: "", expect: func(s *mock_vmextensions.MockVMExtensionScopeMockRecorder, m *mock_vmextensions.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.VMExtensionSpecs().Return([]azure.ExtensionSpec{ { Name: "my-extension-1", @@ -154,7 +147,6 @@ func TestReconcileVMExtension(t *testing.T) { name: "error getting the extension", expectedError: "failed to get vm extension my-extension-1 on vm my-vm: #: Internal Server Error: StatusCode=500", expect: func(s *mock_vmextensions.MockVMExtensionScopeMockRecorder, m *mock_vmextensions.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.VMExtensionSpecs().Return([]azure.ExtensionSpec{ { Name: "my-extension-1", @@ -179,7 +171,6 @@ func TestReconcileVMExtension(t *testing.T) { name: "error creating the extension", expectedError: "failed to create VM extension my-extension-1 on VM my-vm in resource group my-rg: #: Internal Server Error: StatusCode=500", expect: func(s *mock_vmextensions.MockVMExtensionScopeMockRecorder, m *mock_vmextensions.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.VMExtensionSpecs().Return([]azure.ExtensionSpec{ { Name: "my-extension-1", diff --git a/azure/services/vmssextensions/mock_vmssextensions/vmssextensions_mock.go b/azure/services/vmssextensions/mock_vmssextensions/vmssextensions_mock.go index f36d6d2267d..40b2cb626e9 100644 --- a/azure/services/vmssextensions/mock_vmssextensions/vmssextensions_mock.go +++ b/azure/services/vmssextensions/mock_vmssextensions/vmssextensions_mock.go @@ -21,10 +21,10 @@ limitations under the License. package mock_vmssextensions import ( + context "context" reflect "reflect" autorest "github.com/Azure/go-autorest/autorest" - logr "github.com/go-logr/logr" gomock "github.com/golang/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -179,37 +179,6 @@ func (mr *MockVMSSExtensionScopeMockRecorder) ClusterName() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterName", reflect.TypeOf((*MockVMSSExtensionScope)(nil).ClusterName)) } -// Enabled mocks base method. -func (m *MockVMSSExtensionScope) Enabled() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Enabled") - ret0, _ := ret[0].(bool) - return ret0 -} - -// Enabled indicates an expected call of Enabled. -func (mr *MockVMSSExtensionScopeMockRecorder) Enabled() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Enabled", reflect.TypeOf((*MockVMSSExtensionScope)(nil).Enabled)) -} - -// Error mocks base method. -func (m *MockVMSSExtensionScope) Error(err error, msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{err, msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Error", varargs...) -} - -// Error indicates an expected call of Error. -func (mr *MockVMSSExtensionScopeMockRecorder) Error(err, msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{err, msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockVMSSExtensionScope)(nil).Error), varargs...) -} - // FailureDomains mocks base method. func (m *MockVMSSExtensionScope) FailureDomains() []string { m.ctrl.T.Helper() @@ -238,23 +207,6 @@ func (mr *MockVMSSExtensionScopeMockRecorder) HashKey() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HashKey", reflect.TypeOf((*MockVMSSExtensionScope)(nil).HashKey)) } -// Info mocks base method. -func (m *MockVMSSExtensionScope) Info(msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Info", varargs...) -} - -// Info indicates an expected call of Info. -func (mr *MockVMSSExtensionScopeMockRecorder) Info(msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockVMSSExtensionScope)(nil).Info), varargs...) -} - // Location mocks base method. func (m *MockVMSSExtensionScope) Location() string { m.ctrl.T.Helper() @@ -284,17 +236,17 @@ func (mr *MockVMSSExtensionScopeMockRecorder) ResourceGroup() *gomock.Call { } // SetBootstrapConditions mocks base method. -func (m *MockVMSSExtensionScope) SetBootstrapConditions(arg0, arg1 string) error { +func (m *MockVMSSExtensionScope) SetBootstrapConditions(arg0 context.Context, arg1, arg2 string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetBootstrapConditions", arg0, arg1) + ret := m.ctrl.Call(m, "SetBootstrapConditions", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // SetBootstrapConditions indicates an expected call of SetBootstrapConditions. -func (mr *MockVMSSExtensionScopeMockRecorder) SetBootstrapConditions(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockVMSSExtensionScopeMockRecorder) SetBootstrapConditions(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetBootstrapConditions", reflect.TypeOf((*MockVMSSExtensionScope)(nil).SetBootstrapConditions), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetBootstrapConditions", reflect.TypeOf((*MockVMSSExtensionScope)(nil).SetBootstrapConditions), arg0, arg1, arg2) } // SubscriptionID mocks base method. @@ -325,20 +277,6 @@ func (mr *MockVMSSExtensionScopeMockRecorder) TenantID() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TenantID", reflect.TypeOf((*MockVMSSExtensionScope)(nil).TenantID)) } -// V mocks base method. -func (m *MockVMSSExtensionScope) V(level int) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "V", level) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// V indicates an expected call of V. -func (mr *MockVMSSExtensionScopeMockRecorder) V(level interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "V", reflect.TypeOf((*MockVMSSExtensionScope)(nil).V), level) -} - // VMSSExtensionSpecs mocks base method. func (m *MockVMSSExtensionScope) VMSSExtensionSpecs() []azure.ExtensionSpec { m.ctrl.T.Helper() @@ -352,35 +290,3 @@ func (mr *MockVMSSExtensionScopeMockRecorder) VMSSExtensionSpecs() *gomock.Call mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VMSSExtensionSpecs", reflect.TypeOf((*MockVMSSExtensionScope)(nil).VMSSExtensionSpecs)) } - -// WithName mocks base method. -func (m *MockVMSSExtensionScope) WithName(name string) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WithName", name) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithName indicates an expected call of WithName. -func (mr *MockVMSSExtensionScopeMockRecorder) WithName(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithName", reflect.TypeOf((*MockVMSSExtensionScope)(nil).WithName), name) -} - -// WithValues mocks base method. -func (m *MockVMSSExtensionScope) WithValues(keysAndValues ...interface{}) logr.Logger { - m.ctrl.T.Helper() - varargs := []interface{}{} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "WithValues", varargs...) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithValues indicates an expected call of WithValues. -func (mr *MockVMSSExtensionScopeMockRecorder) WithValues(keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithValues", reflect.TypeOf((*MockVMSSExtensionScope)(nil).WithValues), keysAndValues...) -} diff --git a/azure/services/vmssextensions/vmssextensions.go b/azure/services/vmssextensions/vmssextensions.go index 4b8c82ec715..0c0c87c0454 100644 --- a/azure/services/vmssextensions/vmssextensions.go +++ b/azure/services/vmssextensions/vmssextensions.go @@ -20,7 +20,6 @@ import ( "context" "github.com/Azure/go-autorest/autorest/to" - "github.com/go-logr/logr" "github.com/pkg/errors" "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/util/tele" @@ -28,10 +27,9 @@ import ( // VMSSExtensionScope defines the scope interface for a vmss extension service. type VMSSExtensionScope interface { - logr.Logger azure.ClusterDescriber VMSSExtensionSpecs() []azure.ExtensionSpec - SetBootstrapConditions(string, string) error + SetBootstrapConditions(context.Context, string, string) error } // Service provides operations on Azure resources. @@ -56,7 +54,7 @@ func (s *Service) Reconcile(ctx context.Context) error { for _, extensionSpec := range s.Scope.VMSSExtensionSpecs() { if existing, err := s.client.Get(ctx, s.Scope.ResourceGroup(), extensionSpec.VMName, extensionSpec.Name); err == nil { // check the extension status and set the associated conditions. - if retErr := s.Scope.SetBootstrapConditions(to.String(existing.ProvisioningState), extensionSpec.Name); retErr != nil { + if retErr := s.Scope.SetBootstrapConditions(ctx, to.String(existing.ProvisioningState), extensionSpec.Name); retErr != nil { return retErr } } else if !azure.ResourceNotFound(err) { diff --git a/azure/services/vmssextensions/vmssextensions_test.go b/azure/services/vmssextensions/vmssextensions_test.go index 5733dbe3d26..57c270e33da 100644 --- a/azure/services/vmssextensions/vmssextensions_test.go +++ b/azure/services/vmssextensions/vmssextensions_test.go @@ -21,16 +21,13 @@ import ( "net/http" "testing" - "github.com/Azure/go-autorest/autorest/to" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-04-01/compute" "github.com/Azure/go-autorest/autorest" - "sigs.k8s.io/cluster-api-provider-azure/azure/services/vmssextensions/mock_vmssextensions" - + "github.com/Azure/go-autorest/autorest/to" "github.com/golang/mock/gomock" . "github.com/onsi/gomega" - "k8s.io/klog/v2/klogr" "sigs.k8s.io/cluster-api-provider-azure/azure" + "sigs.k8s.io/cluster-api-provider-azure/azure/services/vmssextensions/mock_vmssextensions" gomockinternal "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomock" ) @@ -44,7 +41,6 @@ func TestReconcileVMSSExtension(t *testing.T) { name: "extension already exists", expectedError: "", expect: func(s *mock_vmssextensions.MockVMSSExtensionScopeMockRecorder, m *mock_vmssextensions.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.VMSSExtensionSpecs().Return([]azure.ExtensionSpec{ { Name: "my-extension-1", @@ -64,14 +60,13 @@ func TestReconcileVMSSExtension(t *testing.T) { }, ID: to.StringPtr("some/fake/id"), }, nil) - s.SetBootstrapConditions(string(compute.ProvisioningStateSucceeded), "my-extension-1") + s.SetBootstrapConditions(gomockinternal.AContext(), string(compute.ProvisioningStateSucceeded), "my-extension-1") }, }, { name: "extension does not exist", expectedError: "", expect: func(s *mock_vmssextensions.MockVMSSExtensionScopeMockRecorder, m *mock_vmssextensions.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.VMSSExtensionSpecs().Return([]azure.ExtensionSpec{ { Name: "my-extension-1", @@ -98,7 +93,6 @@ func TestReconcileVMSSExtension(t *testing.T) { name: "error getting the extension", expectedError: "failed to get vm extension my-extension-1 on scale set my-vmss: #: Internal Server Error: StatusCode=500", expect: func(s *mock_vmssextensions.MockVMSSExtensionScopeMockRecorder, m *mock_vmssextensions.MockclientMockRecorder) { - s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) s.VMSSExtensionSpecs().Return([]azure.ExtensionSpec{ { Name: "my-extension-1", diff --git a/azure/services/vnetpeerings/mock_vnetpeerings/vnetpeerings_mock.go b/azure/services/vnetpeerings/mock_vnetpeerings/vnetpeerings_mock.go index c43ead8632e..47c94726a5f 100644 --- a/azure/services/vnetpeerings/mock_vnetpeerings/vnetpeerings_mock.go +++ b/azure/services/vnetpeerings/mock_vnetpeerings/vnetpeerings_mock.go @@ -24,7 +24,6 @@ import ( reflect "reflect" autorest "github.com/Azure/go-autorest/autorest" - logr "github.com/go-logr/logr" gomock "github.com/golang/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -136,37 +135,6 @@ func (mr *MockVnetPeeringScopeMockRecorder) DeleteLongRunningOperationState(arg0 return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteLongRunningOperationState", reflect.TypeOf((*MockVnetPeeringScope)(nil).DeleteLongRunningOperationState), arg0, arg1) } -// Enabled mocks base method. -func (m *MockVnetPeeringScope) Enabled() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Enabled") - ret0, _ := ret[0].(bool) - return ret0 -} - -// Enabled indicates an expected call of Enabled. -func (mr *MockVnetPeeringScopeMockRecorder) Enabled() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Enabled", reflect.TypeOf((*MockVnetPeeringScope)(nil).Enabled)) -} - -// Error mocks base method. -func (m *MockVnetPeeringScope) Error(err error, msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{err, msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Error", varargs...) -} - -// Error indicates an expected call of Error. -func (mr *MockVnetPeeringScopeMockRecorder) Error(err, msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{err, msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockVnetPeeringScope)(nil).Error), varargs...) -} - // GetLongRunningOperationState mocks base method. func (m *MockVnetPeeringScope) GetLongRunningOperationState(arg0, arg1 string) *v1beta1.Future { m.ctrl.T.Helper() @@ -195,23 +163,6 @@ func (mr *MockVnetPeeringScopeMockRecorder) HashKey() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HashKey", reflect.TypeOf((*MockVnetPeeringScope)(nil).HashKey)) } -// Info mocks base method. -func (m *MockVnetPeeringScope) Info(msg string, keysAndValues ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{msg} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Info", varargs...) -} - -// Info indicates an expected call of Info. -func (mr *MockVnetPeeringScopeMockRecorder) Info(msg interface{}, keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{msg}, keysAndValues...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockVnetPeeringScope)(nil).Info), varargs...) -} - // SetLongRunningOperationState mocks base method. func (m *MockVnetPeeringScope) SetLongRunningOperationState(arg0 *v1beta1.Future) { m.ctrl.T.Helper() @@ -288,20 +239,6 @@ func (mr *MockVnetPeeringScopeMockRecorder) UpdatePutStatus(arg0, arg1, arg2 int return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePutStatus", reflect.TypeOf((*MockVnetPeeringScope)(nil).UpdatePutStatus), arg0, arg1, arg2) } -// V mocks base method. -func (m *MockVnetPeeringScope) V(level int) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "V", level) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// V indicates an expected call of V. -func (mr *MockVnetPeeringScopeMockRecorder) V(level interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "V", reflect.TypeOf((*MockVnetPeeringScope)(nil).V), level) -} - // VnetPeeringSpecs mocks base method. func (m *MockVnetPeeringScope) VnetPeeringSpecs() []azure.ResourceSpecGetter { m.ctrl.T.Helper() @@ -315,35 +252,3 @@ func (mr *MockVnetPeeringScopeMockRecorder) VnetPeeringSpecs() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VnetPeeringSpecs", reflect.TypeOf((*MockVnetPeeringScope)(nil).VnetPeeringSpecs)) } - -// WithName mocks base method. -func (m *MockVnetPeeringScope) WithName(name string) logr.Logger { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WithName", name) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithName indicates an expected call of WithName. -func (mr *MockVnetPeeringScopeMockRecorder) WithName(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithName", reflect.TypeOf((*MockVnetPeeringScope)(nil).WithName), name) -} - -// WithValues mocks base method. -func (m *MockVnetPeeringScope) WithValues(keysAndValues ...interface{}) logr.Logger { - m.ctrl.T.Helper() - varargs := []interface{}{} - for _, a := range keysAndValues { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "WithValues", varargs...) - ret0, _ := ret[0].(logr.Logger) - return ret0 -} - -// WithValues indicates an expected call of WithValues. -func (mr *MockVnetPeeringScopeMockRecorder) WithValues(keysAndValues ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithValues", reflect.TypeOf((*MockVnetPeeringScope)(nil).WithValues), keysAndValues...) -} diff --git a/azure/services/vnetpeerings/vnetpeerings.go b/azure/services/vnetpeerings/vnetpeerings.go index 42674a1184d..4c9dda94343 100644 --- a/azure/services/vnetpeerings/vnetpeerings.go +++ b/azure/services/vnetpeerings/vnetpeerings.go @@ -19,8 +19,6 @@ package vnetpeerings import ( "context" - "github.com/go-logr/logr" - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/services/async" @@ -32,7 +30,6 @@ const serviceName = "vnetpeerings" // VnetPeeringScope defines the scope interface for a subnet service. type VnetPeeringScope interface { - logr.Logger azure.Authorizer azure.AsyncStatusUpdater VnetPeeringSpecs() []azure.ResourceSpecGetter @@ -54,8 +51,8 @@ func New(scope VnetPeeringScope) *Service { // Reconcile gets/creates/updates a peering. func (s *Service) Reconcile(ctx context.Context) error { - ctx, span := tele.Tracer().Start(ctx, "vnetpeerings.Service.Reconcile") - defer span.End() + ctx, _, done := tele.StartSpanWithLogger(ctx, "vnetpeerings.Service.Reconcile") + defer done() ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultAzureServiceReconcileTimeout) defer cancel() @@ -78,8 +75,8 @@ func (s *Service) Reconcile(ctx context.Context) error { // Delete deletes the peering with the provided name. func (s *Service) Delete(ctx context.Context) error { - ctx, span := tele.Tracer().Start(ctx, "vnetpeerings.Service.Delete") - defer span.End() + ctx, _, done := tele.StartSpanWithLogger(ctx, "vnetpeerings.Service.Delete") + defer done() ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultAzureServiceReconcileTimeout) defer cancel() diff --git a/azure/services/vnetpeerings/vnetpeerings_test.go b/azure/services/vnetpeerings/vnetpeerings_test.go index c9ff8d4c3e3..c210f7afcb9 100644 --- a/azure/services/vnetpeerings/vnetpeerings_test.go +++ b/azure/services/vnetpeerings/vnetpeerings_test.go @@ -26,7 +26,6 @@ import ( azureautorest "github.com/Azure/go-autorest/autorest/azure" "github.com/golang/mock/gomock" . "github.com/onsi/gomega" - "k8s.io/klog/v2/klogr" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/services/vnetpeerings/mock_vnetpeerings" @@ -97,11 +96,9 @@ func TestReconcileVnetPeerings(t *testing.T) { name: "create one peering", expectedError: "", expect: func(p *mock_vnetpeerings.MockVnetPeeringScopeMockRecorder, m *mock_vnetpeerings.MockClientMockRecorder) { - p.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) p.VnetPeeringSpecs().Return(fakePeeringSpecs[:1]) p.GetLongRunningOperationState("vnet1-to-vnet2", serviceName).Return(nil) m.CreateOrUpdateAsync(gomockinternal.AContext(), &fakePeering1To2).Return(nil, nil, nil) - p.UpdatePutStatus(infrav1.VnetPeeringReadyCondition, serviceName, nil) }, }, @@ -109,7 +106,6 @@ func TestReconcileVnetPeerings(t *testing.T) { name: "create no peerings", expectedError: "", expect: func(p *mock_vnetpeerings.MockVnetPeeringScopeMockRecorder, m *mock_vnetpeerings.MockClientMockRecorder) { - p.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) p.VnetPeeringSpecs().Return(fakePeeringSpecs[:0]) p.UpdatePutStatus(infrav1.VnetPeeringReadyCondition, serviceName, nil) }, @@ -118,14 +114,11 @@ func TestReconcileVnetPeerings(t *testing.T) { name: "create even number of peerings", expectedError: "", expect: func(p *mock_vnetpeerings.MockVnetPeeringScopeMockRecorder, m *mock_vnetpeerings.MockClientMockRecorder) { - p.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) p.VnetPeeringSpecs().Return(fakePeeringSpecs[:2]) p.GetLongRunningOperationState("vnet1-to-vnet2", serviceName).Return(nil) m.CreateOrUpdateAsync(gomockinternal.AContext(), &fakePeering1To2).Return(nil, nil, nil) - p.GetLongRunningOperationState("vnet2-to-vnet1", serviceName).Return(nil) m.CreateOrUpdateAsync(gomockinternal.AContext(), &fakePeering2To1).Return(nil, nil, nil) - p.UpdatePutStatus(infrav1.VnetPeeringReadyCondition, serviceName, nil) }, }, @@ -133,7 +126,6 @@ func TestReconcileVnetPeerings(t *testing.T) { name: "create odd number of peerings", expectedError: "", expect: func(p *mock_vnetpeerings.MockVnetPeeringScopeMockRecorder, m *mock_vnetpeerings.MockClientMockRecorder) { - p.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) p.VnetPeeringSpecs().Return(fakePeeringExtraSpecs) p.GetLongRunningOperationState("vnet1-to-vnet2", serviceName).Return(nil) m.CreateOrUpdateAsync(gomockinternal.AContext(), &fakePeering1To2).Return(nil, nil, nil) @@ -151,7 +143,6 @@ func TestReconcileVnetPeerings(t *testing.T) { name: "create multiple peerings on one vnet", expectedError: "", expect: func(p *mock_vnetpeerings.MockVnetPeeringScopeMockRecorder, m *mock_vnetpeerings.MockClientMockRecorder) { - p.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) p.VnetPeeringSpecs().Return(fakePeeringSpecs) p.GetLongRunningOperationState("vnet1-to-vnet2", serviceName).Return(nil) m.CreateOrUpdateAsync(gomockinternal.AContext(), &fakePeering1To2).Return(nil, nil, nil) @@ -172,7 +163,6 @@ func TestReconcileVnetPeerings(t *testing.T) { name: "error in creating peering", expectedError: "failed to create resource group1/vnet1-to-vnet3 (service: vnetpeerings): this is an error", expect: func(p *mock_vnetpeerings.MockVnetPeeringScopeMockRecorder, m *mock_vnetpeerings.MockClientMockRecorder) { - p.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) p.VnetPeeringSpecs().Return(fakePeeringSpecs) p.GetLongRunningOperationState("vnet1-to-vnet2", serviceName).Return(nil) m.CreateOrUpdateAsync(gomockinternal.AContext(), &fakePeering1To2).Return(nil, nil, nil) @@ -193,7 +183,6 @@ func TestReconcileVnetPeerings(t *testing.T) { name: "error in creating peering which is not done", expectedError: "failed to create resource group1/vnet1-to-vnet3 (service: vnetpeerings): this is an error", expect: func(p *mock_vnetpeerings.MockVnetPeeringScopeMockRecorder, m *mock_vnetpeerings.MockClientMockRecorder) { - p.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) p.VnetPeeringSpecs().Return(fakePeeringSpecs) p.GetLongRunningOperationState("vnet1-to-vnet2", serviceName).Return(nil) m.CreateOrUpdateAsync(gomockinternal.AContext(), &fakePeering1To2).Return(nil, nil, nil) @@ -252,7 +241,6 @@ func TestDeleteVnetPeerings(t *testing.T) { name: "delete one peering", expectedError: "", expect: func(p *mock_vnetpeerings.MockVnetPeeringScopeMockRecorder, m *mock_vnetpeerings.MockClientMockRecorder) { - p.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) p.VnetPeeringSpecs().Return(fakePeeringSpecs[:1]) p.GetLongRunningOperationState("vnet1-to-vnet2", serviceName).Return(nil) m.DeleteAsync(gomockinternal.AContext(), &fakePeering1To2).Return(nil, nil) @@ -264,7 +252,6 @@ func TestDeleteVnetPeerings(t *testing.T) { name: "delete no peerings", expectedError: "", expect: func(p *mock_vnetpeerings.MockVnetPeeringScopeMockRecorder, m *mock_vnetpeerings.MockClientMockRecorder) { - p.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) p.VnetPeeringSpecs().Return(fakePeeringSpecs[:0]) p.UpdateDeleteStatus(infrav1.VnetPeeringReadyCondition, serviceName, nil) }, @@ -273,7 +260,6 @@ func TestDeleteVnetPeerings(t *testing.T) { name: "delete even number of peerings", expectedError: "", expect: func(p *mock_vnetpeerings.MockVnetPeeringScopeMockRecorder, m *mock_vnetpeerings.MockClientMockRecorder) { - p.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) p.VnetPeeringSpecs().Return(fakePeeringSpecs[:2]) p.GetLongRunningOperationState("vnet1-to-vnet2", serviceName).Return(nil) m.DeleteAsync(gomockinternal.AContext(), &fakePeering1To2).Return(nil, nil) @@ -288,7 +274,6 @@ func TestDeleteVnetPeerings(t *testing.T) { name: "delete odd number of peerings", expectedError: "", expect: func(p *mock_vnetpeerings.MockVnetPeeringScopeMockRecorder, m *mock_vnetpeerings.MockClientMockRecorder) { - p.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) p.VnetPeeringSpecs().Return(fakePeeringExtraSpecs) p.GetLongRunningOperationState("vnet1-to-vnet2", serviceName).Return(nil) m.DeleteAsync(gomockinternal.AContext(), &fakePeering1To2).Return(nil, nil) @@ -306,7 +291,6 @@ func TestDeleteVnetPeerings(t *testing.T) { name: "delete multiple peerings on one vnet", expectedError: "", expect: func(p *mock_vnetpeerings.MockVnetPeeringScopeMockRecorder, m *mock_vnetpeerings.MockClientMockRecorder) { - p.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) p.VnetPeeringSpecs().Return(fakePeeringSpecs) p.GetLongRunningOperationState("vnet1-to-vnet2", serviceName).Return(nil) m.DeleteAsync(gomockinternal.AContext(), &fakePeering1To2).Return(nil, nil) @@ -327,7 +311,6 @@ func TestDeleteVnetPeerings(t *testing.T) { name: "error in deleting peering", expectedError: "failed to delete resource group1/vnet1-to-vnet3 (service: vnetpeerings): this is an error", expect: func(p *mock_vnetpeerings.MockVnetPeeringScopeMockRecorder, m *mock_vnetpeerings.MockClientMockRecorder) { - p.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) p.VnetPeeringSpecs().Return(fakePeeringSpecs) p.GetLongRunningOperationState("vnet1-to-vnet2", serviceName).Return(nil) m.DeleteAsync(gomockinternal.AContext(), &fakePeering1To2).Return(nil, nil) @@ -348,7 +331,6 @@ func TestDeleteVnetPeerings(t *testing.T) { name: "error in deleting peering which is not done", expectedError: "failed to delete resource group1/vnet1-to-vnet3 (service: vnetpeerings): this is an error", expect: func(p *mock_vnetpeerings.MockVnetPeeringScopeMockRecorder, m *mock_vnetpeerings.MockClientMockRecorder) { - p.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) p.VnetPeeringSpecs().Return(fakePeeringSpecs) p.GetLongRunningOperationState("vnet1-to-vnet2", serviceName).Return(nil) m.DeleteAsync(gomockinternal.AContext(), &fakePeering1To2).Return(nil, nil) diff --git a/controllers/azurecluster_controller.go b/controllers/azurecluster_controller.go index a5b25345338..63ea3f64c51 100644 --- a/controllers/azurecluster_controller.go +++ b/controllers/azurecluster_controller.go @@ -23,7 +23,6 @@ import ( corev1 "k8s.io/api/core/v1" - "github.com/go-logr/logr" "github.com/pkg/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/tools/record" @@ -50,7 +49,6 @@ import ( // AzureClusterReconciler reconciles an AzureCluster object. type AzureClusterReconciler struct { client.Client - Log logr.Logger Recorder record.EventRecorder ReconcileTimeout time.Duration WatchFilterValue string @@ -60,10 +58,9 @@ type AzureClusterReconciler struct { type azureClusterServiceCreator func(clusterScope *scope.ClusterScope) (*azureClusterService, error) // NewAzureClusterReconciler returns a new AzureClusterReconciler instance. -func NewAzureClusterReconciler(client client.Client, log logr.Logger, recorder record.EventRecorder, reconcileTimeout time.Duration, watchFilterValue string) *AzureClusterReconciler { +func NewAzureClusterReconciler(client client.Client, recorder record.EventRecorder, reconcileTimeout time.Duration, watchFilterValue string) *AzureClusterReconciler { acr := &AzureClusterReconciler{ Client: client, - Log: log, Recorder: recorder, ReconcileTimeout: reconcileTimeout, WatchFilterValue: watchFilterValue, @@ -76,10 +73,12 @@ func NewAzureClusterReconciler(client client.Client, log logr.Logger, recorder r // SetupWithManager initializes this controller with a manager. func (acr *AzureClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options Options) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.AzureClusterReconciler.SetupWithManager") + _, log, done := tele.StartSpanWithLogger(ctx, + "controllers.AzureClusterReconciler.SetupWithManager", + tele.KVP("controller", "AzureCluster"), + ) defer done() - log := acr.Log.WithValues("controller", "AzureCluster") var r reconcile.Reconciler = acr if options.Cache != nil { r = coalescing.NewReconciler(acr, options.Cache, log) @@ -88,8 +87,8 @@ func (acr *AzureClusterReconciler) SetupWithManager(ctx context.Context, mgr ctr c, err := ctrl.NewControllerManagedBy(mgr). WithOptions(options.Options). For(&infrav1.AzureCluster{}). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), acr.WatchFilterValue)). - WithEventFilter(predicates.ResourceIsNotExternallyManaged(ctrl.LoggerFrom(ctx))). + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(log, acr.WatchFilterValue)). + WithEventFilter(predicates.ResourceIsNotExternallyManaged(log)). Build(r) if err != nil { return errors.Wrap(err, "error creating controller") @@ -100,7 +99,7 @@ func (acr *AzureClusterReconciler) SetupWithManager(ctx context.Context, mgr ctr &source.Kind{Type: &clusterv1.Cluster{}}, handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(infrav1.GroupVersion.WithKind("AzureCluster"))), predicates.ClusterUnpaused(log), - predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), acr.WatchFilterValue), + predicates.ResourceNotPausedAndHasFilterLabel(log, acr.WatchFilterValue), ); err != nil { return errors.Wrap(err, "failed adding a watch for ready clusters") } @@ -118,9 +117,8 @@ func (acr *AzureClusterReconciler) SetupWithManager(ctx context.Context, mgr ctr func (acr *AzureClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultedLoopTimeout(acr.ReconcileTimeout)) defer cancel() - log := acr.Log.WithValues("namespace", req.Namespace, "azureCluster", req.Name) - ctx, _, done := tele.StartSpanWithLogger( + ctx, log, done := tele.StartSpanWithLogger( ctx, "controllers.AzureClusterReconciler.Reconcile", tele.KVP("namespace", req.Namespace), @@ -178,7 +176,6 @@ func (acr *AzureClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reque // Create the scope. clusterScope, err := scope.NewClusterScope(ctx, scope.ClusterScopeParams{ Client: acr.Client, - Logger: log, Cluster: cluster, AzureCluster: azureCluster, }) @@ -205,10 +202,10 @@ func (acr *AzureClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reque } func (acr *AzureClusterReconciler) reconcileNormal(ctx context.Context, clusterScope *scope.ClusterScope) (reconcile.Result, error) { - ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.AzureClusterReconciler.reconcileNormal") + ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureClusterReconciler.reconcileNormal") defer done() - clusterScope.Info("Reconciling AzureCluster") + log.Info("Reconciling AzureCluster") azureCluster := clusterScope.AzureCluster // If the AzureCluster doesn't have our finalizer, add it. @@ -229,9 +226,9 @@ func (acr *AzureClusterReconciler) reconcileNormal(ctx context.Context, clusterS if errors.As(err, &reconcileError) { if reconcileError.IsTransient() { if azure.IsOperationNotDoneError(reconcileError) { - clusterScope.V(2).Info(fmt.Sprintf("AzureCluster reconcile not done: %s", reconcileError.Error())) + log.V(2).Info(fmt.Sprintf("AzureCluster reconcile not done: %s", reconcileError.Error())) } else { - clusterScope.V(2).Info("transient failure to reconcile AzureCluster, retrying") + log.V(2).Info("transient failure to reconcile AzureCluster, retrying") } return reconcile.Result{RequeueAfter: reconcileError.RequeueAfter()}, nil } @@ -256,10 +253,10 @@ func (acr *AzureClusterReconciler) reconcileNormal(ctx context.Context, clusterS } func (acr *AzureClusterReconciler) reconcileDelete(ctx context.Context, clusterScope *scope.ClusterScope) (reconcile.Result, error) { - ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.AzureClusterReconciler.reconcileDelete") + ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureClusterReconciler.reconcileDelete") defer done() - clusterScope.Info("Reconciling AzureCluster delete") + log.Info("Reconciling AzureCluster delete") azureCluster := clusterScope.AzureCluster conditions.MarkFalse(azureCluster, infrav1.NetworkInfrastructureReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") @@ -278,9 +275,9 @@ func (acr *AzureClusterReconciler) reconcileDelete(ctx context.Context, clusterS if errors.As(err, &reconcileError) { if reconcileError.IsTransient() { if azure.IsOperationNotDoneError(reconcileError) { - clusterScope.V(2).Info(fmt.Sprintf("AzureCluster delete not done: %s", reconcileError.Error())) + log.V(2).Info(fmt.Sprintf("AzureCluster delete not done: %s", reconcileError.Error())) } else { - clusterScope.V(2).Info("transient failure to delete AzureCluster, retrying") + log.V(2).Info("transient failure to delete AzureCluster, retrying") } return reconcile.Result{RequeueAfter: reconcileError.RequeueAfter()}, nil } diff --git a/controllers/azurecluster_controller_test.go b/controllers/azurecluster_controller_test.go index 1eff089a3f1..d1368078cb4 100644 --- a/controllers/azurecluster_controller_test.go +++ b/controllers/azurecluster_controller_test.go @@ -18,10 +18,7 @@ package controllers import ( "context" - "time" - "github.com/go-logr/logr" - "github.com/golang/mock/gomock" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -30,8 +27,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/internal/test" - "sigs.k8s.io/cluster-api-provider-azure/internal/test/mock_log" - "sigs.k8s.io/cluster-api-provider-azure/internal/test/record" + "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" ) var _ = Describe("AzureClusterReconciler", func() { @@ -39,59 +35,20 @@ var _ = Describe("AzureClusterReconciler", func() { AfterEach(func() {}) Context("Reconcile an AzureCluster", func() { - It("should reconcile and exit early due to the cluster not having an OwnerRef", func() { - ctx := context.TODO() - logListener := record.NewListener(testEnv.LogRecorder) - del := logListener.Listen() - defer del() - - randName := test.RandomName("foo", 10) - instance := &infrav1.AzureCluster{ObjectMeta: metav1.ObjectMeta{Name: randName, Namespace: "default"}} - Expect(testEnv.Create(ctx, instance)).To(Succeed()) - defer func() { - err := testEnv.Delete(ctx, instance) - Expect(err).NotTo(HaveOccurred()) - }() - - // Make sure the Cluster exists. - Eventually(logListener.GetEntries, 10*time.Second). - Should(ContainElement(record.LogEntry{ - LogFunc: "Info", - Values: []interface{}{ - "namespace", - instance.Namespace, - "azureCluster", - randName, - "msg", - "Cluster Controller has not yet set OwnerRef", - }, - })) - }) - - It("should fail with context timeout error if context expires", func() { - ctx := context.TODO() - mockCtrl := gomock.NewController(GinkgoT()) - defer mockCtrl.Finish() - - log := mock_log.NewMockLogger(mockCtrl) - log.EXPECT().WithValues(gomock.Any()).DoAndReturn(func(args ...interface{}) logr.Logger { - time.Sleep(3 * time.Second) - return log - }) - - c, err := client.New(testEnv.Config, client.Options{Scheme: testEnv.GetScheme()}) - Expect(err).NotTo(HaveOccurred()) - reconciler := NewAzureClusterReconciler(c, log, testEnv.GetEventRecorderFor("azurecluster-reconciler"), 1*time.Second, "") - - instance := &infrav1.AzureCluster{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - _, err = reconciler.Reconcile(ctx, ctrl.Request{ + It("should not error with minimal set up", func() { + reconciler := NewAzureClusterReconciler(testEnv, testEnv.GetEventRecorderFor("azurecluster-reconciler"), reconciler.DefaultLoopTimeout, "") + By("Calling reconcile") + name := test.RandomName("foo", 10) + instance := &infrav1.AzureCluster{ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "default"}} + result, err := reconciler.Reconcile(context.Background(), ctrl.Request{ NamespacedName: client.ObjectKey{ Namespace: instance.Namespace, Name: instance.Name, }, }) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(Or(Equal("context deadline exceeded"), Equal("rate: Wait(n=1) would exceed context deadline"))) + + Expect(err).To(BeNil()) + Expect(result.RequeueAfter).To(BeZero()) }) }) }) diff --git a/controllers/azurecluster_reconciler.go b/controllers/azurecluster_reconciler.go index c4445db3a44..7f4c2a7e77e 100644 --- a/controllers/azurecluster_reconciler.go +++ b/controllers/azurecluster_reconciler.go @@ -20,8 +20,6 @@ import ( "context" "github.com/pkg/errors" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/scope" "sigs.k8s.io/cluster-api-provider-azure/azure/services/bastionhosts" @@ -38,6 +36,7 @@ import ( "sigs.k8s.io/cluster-api-provider-azure/azure/services/virtualnetworks" "sigs.k8s.io/cluster-api-provider-azure/azure/services/vnetpeerings" "sigs.k8s.io/cluster-api-provider-azure/util/tele" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) // azureClusterService is the reconciler called by the AzureCluster controller. diff --git a/controllers/azurecluster_reconciler_test.go b/controllers/azurecluster_reconciler_test.go index a469ff84003..58e6a42857a 100644 --- a/controllers/azurecluster_reconciler_test.go +++ b/controllers/azurecluster_reconciler_test.go @@ -24,7 +24,6 @@ import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-04-01/compute" "github.com/golang/mock/gomock" . "github.com/onsi/gomega" - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/mock_azure" diff --git a/controllers/azureidentity_controller.go b/controllers/azureidentity_controller.go index 4f7ae8af4c6..27229cb0416 100644 --- a/controllers/azureidentity_controller.go +++ b/controllers/azureidentity_controller.go @@ -21,19 +21,11 @@ import ( "fmt" "time" - infraexpv1 "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" - "sigs.k8s.io/cluster-api-provider-azure/feature" - aadpodv1 "github.com/Azure/aad-pod-identity/pkg/apis/aadpodidentity/v1" - "github.com/go-logr/logr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/tools/record" - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" - "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" - "sigs.k8s.io/cluster-api-provider-azure/util/system" - "sigs.k8s.io/cluster-api-provider-azure/util/tele" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/predicates" @@ -43,12 +35,18 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" + + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + infraexpv1 "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-azure/feature" + "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" + "sigs.k8s.io/cluster-api-provider-azure/util/system" + "sigs.k8s.io/cluster-api-provider-azure/util/tele" ) // AzureIdentityReconciler reconciles Azure identity objects. type AzureIdentityReconciler struct { client.Client - Log logr.Logger Recorder record.EventRecorder ReconcileTimeout time.Duration WatchFilterValue string @@ -56,11 +54,16 @@ type AzureIdentityReconciler struct { // SetupWithManager initializes this controller with a manager. func (r *AzureIdentityReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { - log := r.Log.WithValues("controller", "AzureIdentity") + _, log, done := tele.StartSpanWithLogger(ctx, + "controllers.AzureIdentityReconciler.SetupWithManager", + tele.KVP("controller", "AzureIdentity"), + ) + defer done() + c, err := ctrl.NewControllerManagedBy(mgr). WithOptions(options). For(&infrav1.AzureCluster{}). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(log, r.WatchFilterValue)). Build(r) if err != nil { return errors.Wrap(err, "error creating controller") @@ -71,7 +74,7 @@ func (r *AzureIdentityReconciler) SetupWithManager(ctx context.Context, mgr ctrl if err = c.Watch( &source.Kind{Type: &infraexpv1.AzureManagedControlPlane{}}, &handler.EnqueueRequestForObject{}, - predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue), + predicates.ResourceNotPausedAndHasFilterLabel(log, r.WatchFilterValue), ); err != nil { return errors.Wrap(err, "failed adding a watch for ready clusters") } @@ -82,7 +85,7 @@ func (r *AzureIdentityReconciler) SetupWithManager(ctx context.Context, mgr ctrl &source.Kind{Type: &clusterv1.Cluster{}}, handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(infrav1.GroupVersion.WithKind("AzureCluster"))), predicates.ClusterUnpaused(log), - predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue), + predicates.ResourceNotPausedAndHasFilterLabel(log, r.WatchFilterValue), ); err != nil { return errors.Wrap(err, "failed adding a watch for ready clusters") } @@ -99,9 +102,8 @@ func (r *AzureIdentityReconciler) SetupWithManager(ctx context.Context, mgr ctrl func (r *AzureIdentityReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultedLoopTimeout(r.ReconcileTimeout)) defer cancel() - log := r.Log.WithValues("namespace", req.Namespace, "identityOwner", req.Name) - ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.AzureIdentityReconciler.Reconcile", + ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureIdentityReconciler.Reconcile", tele.KVP("namespace", req.Namespace), tele.KVP("name", req.Name), tele.KVP("kind", "AzureCluster"), @@ -146,7 +148,7 @@ func (r *AzureIdentityReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, err } - bindingsToDelete := []aadpodv1.AzureIdentityBinding{} + var bindingsToDelete []aadpodv1.AzureIdentityBinding for _, b := range bindings.Items { log = log.WithValues("azureidentitybinding", b.Name) diff --git a/controllers/azurejson_machine_controller.go b/controllers/azurejson_machine_controller.go index 06ce835430d..f85af1abc85 100644 --- a/controllers/azurejson_machine_controller.go +++ b/controllers/azurejson_machine_controller.go @@ -48,7 +48,6 @@ import ( // AzureJSONMachineReconciler reconciles Azure json secrets for AzureMachine objects. type AzureJSONMachineReconciler struct { client.Client - Log logr.Logger Recorder record.EventRecorder ReconcileTimeout time.Duration WatchFilterValue string @@ -56,10 +55,16 @@ type AzureJSONMachineReconciler struct { // SetupWithManager initializes this controller with a manager. func (r *AzureJSONMachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { + _, log, done := tele.StartSpanWithLogger(ctx, + "controllers.AzureJSONMachineReconciler.SetupWithManager", + ) + defer done() + return ctrl.NewControllerManagedBy(mgr). + WithOptions(options). For(&infrav1.AzureMachine{}). - WithEventFilter(filterUnclonedMachinesPredicate{log: r.Log}). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). + WithEventFilter(filterUnclonedMachinesPredicate{log: log}). + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(log, r.WatchFilterValue)). Owns(&corev1.Secret{}). Complete(r) } @@ -110,8 +115,6 @@ func (r *AzureJSONMachineReconciler) Reconcile(ctx context.Context, req ctrl.Req ) defer done() - log = log.WithValues("namespace", req.Namespace, "azureMachine", req.Name) - // Fetch the AzureMachine instance azureMachine := &infrav1.AzureMachine{} err := r.Get(ctx, req.NamespacedName, azureMachine) @@ -164,7 +167,6 @@ func (r *AzureJSONMachineReconciler) Reconcile(ctx context.Context, req ctrl.Req // Create the scope. clusterScope, err := scope.NewClusterScope(ctx, scope.ClusterScopeParams{ Client: r.Client, - Logger: log, Cluster: cluster, AzureCluster: azureCluster, }) @@ -204,7 +206,7 @@ func (r *AzureJSONMachineReconciler) Reconcile(ctx context.Context, req ctrl.Req return ctrl.Result{}, errors.Wrap(err, "failed to create cloud provider config") } - if err := reconcileAzureSecret(ctx, log, r.Client, owner, newSecret, clusterScope.ClusterName()); err != nil { + if err := reconcileAzureSecret(ctx, r.Client, owner, newSecret, clusterScope.ClusterName()); err != nil { r.Recorder.Eventf(azureMachine, corev1.EventTypeWarning, "Error reconciling cloud provider secret for AzureMachine", err.Error()) return ctrl.Result{}, errors.Wrap(err, "failed to reconcile azure secret") } diff --git a/controllers/azurejson_machine_controller_test.go b/controllers/azurejson_machine_controller_test.go index f58e207c2c6..5abef91843e 100644 --- a/controllers/azurejson_machine_controller_test.go +++ b/controllers/azurejson_machine_controller_test.go @@ -22,7 +22,6 @@ import ( "testing" "github.com/Azure/go-autorest/autorest/azure/auth" - "github.com/google/go-cmp/cmp" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -30,15 +29,13 @@ import ( "k8s.io/apimachinery/pkg/types" clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" - "k8s.io/klog/v2/klogr" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + infraexpv1 "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" clusterexpv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/event" - - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" - infraexpv1 "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" ) func TestUnclonedMachinesPredicate(t *testing.T) { @@ -180,7 +177,6 @@ func TestAzureJSONMachineReconciler(t *testing.T) { reconciler := &AzureJSONMachineReconciler{ Client: client, - Log: klogr.New(), Recorder: record.NewFakeRecorder(128), } diff --git a/controllers/azurejson_machinepool_controller.go b/controllers/azurejson_machinepool_controller.go index 552a0a115a7..a91e909941a 100644 --- a/controllers/azurejson_machinepool_controller.go +++ b/controllers/azurejson_machinepool_controller.go @@ -21,7 +21,6 @@ import ( "fmt" "time" - "github.com/go-logr/logr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -45,7 +44,6 @@ import ( // AzureJSONMachinePoolReconciler reconciles Azure json secrets for AzureMachinePool objects. type AzureJSONMachinePoolReconciler struct { client.Client - Log logr.Logger Recorder record.EventRecorder ReconcileTimeout time.Duration WatchFilterValue string @@ -53,9 +51,15 @@ type AzureJSONMachinePoolReconciler struct { // SetupWithManager initializes this controller with a manager. func (r *AzureJSONMachinePoolReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { + _, log, done := tele.StartSpanWithLogger(ctx, + "controllers.AzureJSONMachinePoolReconciler.SetupWithManager", + ) + defer done() + return ctrl.NewControllerManagedBy(mgr). + WithOptions(options). For(&expv1.AzureMachinePool{}). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(log, r.WatchFilterValue)). Owns(&corev1.Secret{}). Complete(r) } @@ -137,7 +141,6 @@ func (r *AzureJSONMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl // Create the scope. clusterScope, err := scope.NewClusterScope(ctx, scope.ClusterScopeParams{ Client: r.Client, - Logger: log, Cluster: cluster, AzureCluster: azureCluster, }) @@ -171,7 +174,7 @@ func (r *AzureJSONMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl return ctrl.Result{}, errors.Wrap(err, "failed to create cloud provider config") } - if err := reconcileAzureSecret(ctx, log, r.Client, owner, newSecret, clusterScope.ClusterName()); err != nil { + if err := reconcileAzureSecret(ctx, r.Client, owner, newSecret, clusterScope.ClusterName()); err != nil { r.Recorder.Eventf(azureMachinePool, corev1.EventTypeWarning, "Error reconciling cloud provider secret for AzureMachinePool", err.Error()) return ctrl.Result{}, errors.Wrap(err, "failed to reconcile azure secret") } diff --git a/controllers/azurejson_machinepool_controller_test.go b/controllers/azurejson_machinepool_controller_test.go index c6886a7ef28..2d6b1f2a5f1 100644 --- a/controllers/azurejson_machinepool_controller_test.go +++ b/controllers/azurejson_machinepool_controller_test.go @@ -22,21 +22,18 @@ import ( "testing" "github.com/Azure/go-autorest/autorest/azure/auth" - "github.com/google/go-cmp/cmp" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" - "k8s.io/klog/v2/klogr" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + infraexpv1 "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" clusterexpv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client/fake" - - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" - infraexpv1 "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" ) func TestAzureJSONPoolReconciler(t *testing.T) { @@ -150,7 +147,6 @@ func TestAzureJSONPoolReconciler(t *testing.T) { reconciler := &AzureJSONMachinePoolReconciler{ Client: client, - Log: klogr.New(), Recorder: record.NewFakeRecorder(128), } diff --git a/controllers/azurejson_machinetemplate_controller.go b/controllers/azurejson_machinetemplate_controller.go index a091490ded2..0110400a140 100644 --- a/controllers/azurejson_machinetemplate_controller.go +++ b/controllers/azurejson_machinetemplate_controller.go @@ -21,7 +21,6 @@ import ( "fmt" "time" - "github.com/go-logr/logr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -45,7 +44,6 @@ import ( // AzureJSONTemplateReconciler reconciles Azure json secrets for AzureMachineTemplate objects. type AzureJSONTemplateReconciler struct { client.Client - Log logr.Logger Recorder record.EventRecorder ReconcileTimeout time.Duration WatchFilterValue string @@ -53,10 +51,15 @@ type AzureJSONTemplateReconciler struct { // SetupWithManager initializes this controller with a manager. func (r *AzureJSONTemplateReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { + _, log, done := tele.StartSpanWithLogger(ctx, + "controllers.AzureJSONTemplateReconciler.SetupWithManager", + ) + defer done() + return ctrl.NewControllerManagedBy(mgr). WithOptions(options). For(&infrav1.AzureMachineTemplate{}). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(log, r.WatchFilterValue)). Owns(&corev1.Secret{}). Complete(r) } @@ -73,8 +76,6 @@ func (r *AzureJSONTemplateReconciler) Reconcile(ctx context.Context, req ctrl.Re ) defer done() - log = log.WithValues("namespace", req.Namespace, "azureMachineTemplate", req.Name) - // Fetch the AzureMachineTemplate instance azureMachineTemplate := &infrav1.AzureMachineTemplate{} err := r.Get(ctx, req.NamespacedName, azureMachineTemplate) @@ -125,7 +126,6 @@ func (r *AzureJSONTemplateReconciler) Reconcile(ctx context.Context, req ctrl.Re // Create the scope. clusterScope, err := scope.NewClusterScope(ctx, scope.ClusterScopeParams{ Client: r.Client, - Logger: log, Cluster: cluster, AzureCluster: azureCluster, }) @@ -165,7 +165,7 @@ func (r *AzureJSONTemplateReconciler) Reconcile(ctx context.Context, req ctrl.Re return ctrl.Result{}, errors.Wrap(err, "failed to create cloud provider config") } - if err := reconcileAzureSecret(ctx, log, r.Client, owner, newSecret, clusterScope.ClusterName()); err != nil { + if err := reconcileAzureSecret(ctx, r.Client, owner, newSecret, clusterScope.ClusterName()); err != nil { r.Recorder.Eventf(azureMachineTemplate, corev1.EventTypeWarning, "Error reconciling cloud provider secret for AzureMachineTemplate", err.Error()) return ctrl.Result{}, errors.Wrap(err, "failed to reconcile azure secret") } diff --git a/controllers/azurejson_machinetemplate_controller_test.go b/controllers/azurejson_machinetemplate_controller_test.go index ae41bdd8282..00bd4f1bf9a 100644 --- a/controllers/azurejson_machinetemplate_controller_test.go +++ b/controllers/azurejson_machinetemplate_controller_test.go @@ -22,15 +22,12 @@ import ( "testing" "github.com/Azure/go-autorest/autorest/azure/auth" - "github.com/google/go-cmp/cmp" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" - "k8s.io/klog/v2/klogr" - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ctrl "sigs.k8s.io/controller-runtime" @@ -117,7 +114,6 @@ func TestAzureJSONTemplateReconciler(t *testing.T) { reconciler := &AzureJSONTemplateReconciler{ Client: client, - Log: klogr.New(), Recorder: record.NewFakeRecorder(128), } diff --git a/controllers/azuremachine_controller.go b/controllers/azuremachine_controller.go index 61159f81097..58c1fbab5a8 100644 --- a/controllers/azuremachine_controller.go +++ b/controllers/azuremachine_controller.go @@ -21,11 +21,16 @@ import ( "fmt" "time" - "github.com/go-logr/logr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/tools/record" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-azure/azure" + "sigs.k8s.io/cluster-api-provider-azure/azure/scope" + "sigs.k8s.io/cluster-api-provider-azure/pkg/coalescing" + "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" + "sigs.k8s.io/cluster-api-provider-azure/util/tele" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" capierrors "sigs.k8s.io/cluster-api/errors" "sigs.k8s.io/cluster-api/util" @@ -38,19 +43,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" - "sigs.k8s.io/cluster-api-provider-azure/azure" - "sigs.k8s.io/cluster-api-provider-azure/azure/scope" - "sigs.k8s.io/cluster-api-provider-azure/pkg/coalescing" - "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" - "sigs.k8s.io/cluster-api-provider-azure/util/tele" ) // AzureMachineReconciler reconciles an AzureMachine object. type AzureMachineReconciler struct { client.Client - Log logr.Logger Recorder record.EventRecorder ReconcileTimeout time.Duration WatchFilterValue string @@ -60,10 +57,9 @@ type AzureMachineReconciler struct { type azureMachineServiceCreator func(machineScope *scope.MachineScope) (*azureMachineService, error) // NewAzureMachineReconciler returns a new AzureMachineReconciler instance. -func NewAzureMachineReconciler(client client.Client, log logr.Logger, recorder record.EventRecorder, reconcileTimeout time.Duration, watchFilterValue string) *AzureMachineReconciler { +func NewAzureMachineReconciler(client client.Client, recorder record.EventRecorder, reconcileTimeout time.Duration, watchFilterValue string) *AzureMachineReconciler { amr := &AzureMachineReconciler{ Client: client, - Log: log, Recorder: recorder, ReconcileTimeout: reconcileTimeout, WatchFilterValue: watchFilterValue, @@ -76,10 +72,12 @@ func NewAzureMachineReconciler(client client.Client, log logr.Logger, recorder r // SetupWithManager initializes this controller with a manager. func (amr *AzureMachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options Options) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.AzureMachineReconciler.SetupWithManager") + ctx, log, done := tele.StartSpanWithLogger(ctx, + "controllers.AzureMachineReconciler.SetupWithManager", + tele.KVP("controller", "AzureMachine"), + ) defer done() - log := amr.Log.WithValues("controller", "AzureMachine") var r reconcile.Reconciler = amr if options.Cache != nil { r = coalescing.NewReconciler(amr, options.Cache, log) @@ -94,7 +92,7 @@ func (amr *AzureMachineReconciler) SetupWithManager(ctx context.Context, mgr ctr c, err := ctrl.NewControllerManagedBy(mgr). WithOptions(options.Options). For(&infrav1.AzureMachine{}). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), amr.WatchFilterValue)). + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(log, amr.WatchFilterValue)). // watch for changes in CAPI Machine resources Watches( &source.Kind{Type: &clusterv1.Machine{}}, @@ -120,7 +118,7 @@ func (amr *AzureMachineReconciler) SetupWithManager(ctx context.Context, mgr ctr &source.Kind{Type: &clusterv1.Cluster{}}, handler.EnqueueRequestsFromMapFunc(azureMachineMapper), predicates.ClusterUnpausedAndInfrastructureReady(log), - predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), amr.WatchFilterValue), + predicates.ResourceNotPausedAndHasFilterLabel(log, amr.WatchFilterValue), ); err != nil { return errors.Wrap(err, "failed adding a watch for ready clusters") } @@ -139,7 +137,7 @@ func (amr *AzureMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reque ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultedLoopTimeout(amr.ReconcileTimeout)) defer cancel() - ctx, logger, done := tele.StartSpanWithLogger( + ctx, log, done := tele.StartSpanWithLogger( ctx, "controllers.AzureMachineReconciler.Reconcile", tele.KVP("namespace", req.Namespace), @@ -148,8 +146,6 @@ func (amr *AzureMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reque ) defer done() - logger = logger.WithValues("namespace", req.Namespace, "azureMachine", req.Name) - // Fetch the AzureMachine VM. azureMachine := &infrav1.AzureMachine{} err := amr.Get(ctx, req.NamespacedName, azureMachine) @@ -167,28 +163,29 @@ func (amr *AzureMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reque } if machine == nil { amr.Recorder.Eventf(azureMachine, corev1.EventTypeNormal, "Machine controller dependency not yet met", "Machine Controller has not yet set OwnerRef") - logger.Info("Machine Controller has not yet set OwnerRef") + log.Info("Machine Controller has not yet set OwnerRef") return reconcile.Result{}, nil } - logger = logger.WithValues("machine", machine.Name) + log = log.WithValues("machine", machine.Name) // Fetch the Cluster. cluster, err := util.GetClusterFromMetadata(ctx, amr.Client, machine.ObjectMeta) if err != nil { amr.Recorder.Eventf(azureMachine, corev1.EventTypeNormal, "Unable to get cluster from metadata", "Machine is missing cluster label or cluster does not exist") - logger.Info("Machine is missing cluster label or cluster does not exist") + log.Info("Machine is missing cluster label or cluster does not exist") return reconcile.Result{}, nil } - logger = logger.WithValues("cluster", cluster.Name) + log = log.WithValues("cluster", cluster.Name) // Return early if the object or Cluster is paused. if annotations.IsPaused(cluster, azureMachine) { - logger.Info("AzureMachine or linked Cluster is marked as paused. Won't reconcile") + log.Info("AzureMachine or linked Cluster is marked as paused. Won't reconcile") return ctrl.Result{}, nil } + log = log.WithValues("AzureCluster", cluster.Spec.InfrastructureRef.Name) azureClusterName := client.ObjectKey{ Namespace: azureMachine.Namespace, Name: cluster.Spec.InfrastructureRef.Name, @@ -196,16 +193,13 @@ func (amr *AzureMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reque azureCluster := &infrav1.AzureCluster{} if err := amr.Client.Get(ctx, azureClusterName, azureCluster); err != nil { amr.Recorder.Eventf(azureMachine, corev1.EventTypeNormal, "AzureCluster unavailable", "AzureCluster is not available yet") - logger.Info("AzureCluster is not available yet") + log.Info("AzureCluster is not available yet") return reconcile.Result{}, nil } - logger = logger.WithValues("AzureCluster", azureCluster.Name) - // Create the cluster scope clusterScope, err := scope.NewClusterScope(ctx, scope.ClusterScopeParams{ Client: amr.Client, - Logger: logger, Cluster: cluster, AzureCluster: azureCluster, }) @@ -216,7 +210,6 @@ func (amr *AzureMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reque // Create the machine scope machineScope, err := scope.NewMachineScope(scope.MachineScopeParams{ - Logger: logger, Client: amr.Client, Machine: machine, AzureMachine: azureMachine, @@ -244,13 +237,13 @@ func (amr *AzureMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reque } func (amr *AzureMachineReconciler) reconcileNormal(ctx context.Context, machineScope *scope.MachineScope, clusterScope *scope.ClusterScope) (reconcile.Result, error) { - ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.AzureMachineReconciler.reconcileNormal") + ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureMachineReconciler.reconcileNormal") defer done() - machineScope.Info("Reconciling AzureMachine") + log.Info("Reconciling AzureMachine") // If the AzureMachine is in an error state, return early. if machineScope.AzureMachine.Status.FailureReason != nil || machineScope.AzureMachine.Status.FailureMessage != nil { - machineScope.Info("Error state detected, skipping reconciliation") + log.Info("Error state detected, skipping reconciliation") return reconcile.Result{}, nil } @@ -263,14 +256,14 @@ func (amr *AzureMachineReconciler) reconcileNormal(ctx context.Context, machineS // Make sure the Cluster Infrastructure is ready. if !clusterScope.Cluster.Status.InfrastructureReady { - machineScope.Info("Cluster infrastructure is not ready yet") + log.Info("Cluster infrastructure is not ready yet") conditions.MarkFalse(machineScope.AzureMachine, infrav1.VMRunningCondition, infrav1.WaitingForClusterInfrastructureReason, clusterv1.ConditionSeverityInfo, "") return reconcile.Result{}, nil } // Make sure bootstrap data is available and populated. if machineScope.Machine.Spec.Bootstrap.DataSecretName == nil { - machineScope.Info("Bootstrap data secret reference is not yet available") + log.Info("Bootstrap data secret reference is not yet available") conditions.MarkFalse(machineScope.AzureMachine, infrav1.VMRunningCondition, infrav1.WaitingForBootstrapDataReason, clusterv1.ConditionSeverityInfo, "") return reconcile.Result{}, nil } @@ -303,7 +296,7 @@ func (amr *AzureMachineReconciler) reconcileNormal(ctx context.Context, machineS if errors.As(err, &reconcileError) { if reconcileError.IsTerminal() { amr.Recorder.Eventf(machineScope.AzureMachine, corev1.EventTypeWarning, "ReconcileError", errors.Wrapf(err, "failed to reconcile AzureMachine").Error()) - machineScope.Error(err, "failed to reconcile AzureMachine", "name", machineScope.Name()) + log.Error(err, "failed to reconcile AzureMachine", "name", machineScope.Name()) machineScope.SetFailureReason(capierrors.CreateMachineError) machineScope.SetFailureMessage(err) machineScope.SetNotReady() @@ -313,9 +306,9 @@ func (amr *AzureMachineReconciler) reconcileNormal(ctx context.Context, machineS if reconcileError.IsTransient() { if azure.IsOperationNotDoneError(reconcileError) { - machineScope.V(2).Info(fmt.Sprintf("AzureMachine reconcile not done: %s", reconcileError.Error())) + log.V(2).Info(fmt.Sprintf("AzureMachine reconcile not done: %s", reconcileError.Error())) } else { - machineScope.V(2).Info("transient failure to reconcile AzureMachine, retrying") + log.V(2).Info("transient failure to reconcile AzureMachine, retrying") } return reconcile.Result{RequeueAfter: reconcileError.RequeueAfter()}, nil } @@ -330,17 +323,17 @@ func (amr *AzureMachineReconciler) reconcileNormal(ctx context.Context, machineS } func (amr *AzureMachineReconciler) reconcileDelete(ctx context.Context, machineScope *scope.MachineScope, clusterScope *scope.ClusterScope) (reconcile.Result, error) { - ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.AzureMachineReconciler.reconcileDelete") + ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureMachineReconciler.reconcileDelete") defer done() - machineScope.Info("Handling deleted AzureMachine") - + log.Info("Handling deleted AzureMachine") + conditions.MarkFalse(machineScope.AzureMachine, infrav1.VMRunningCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") if err := machineScope.PatchObject(ctx); err != nil { return reconcile.Result{}, err } if ShouldDeleteIndividualResources(ctx, clusterScope) { - machineScope.Info("Deleting AzureMachine") + log.Info("Deleting AzureMachine") ams, err := amr.createAzureMachineService(machineScope) if err != nil { return reconcile.Result{}, errors.Wrap(err, "failed to create azure machine service") @@ -352,9 +345,9 @@ func (amr *AzureMachineReconciler) reconcileDelete(ctx context.Context, machineS if errors.As(err, &reconcileError) { if reconcileError.IsTransient() { if azure.IsOperationNotDoneError(reconcileError) { - machineScope.V(2).Info(fmt.Sprintf("AzureMachine delete not done: %s", reconcileError.Error())) + log.V(2).Info(fmt.Sprintf("AzureMachine delete not done: %s", reconcileError.Error())) } else { - machineScope.V(2).Info("transient failure to delete AzureMachine, retrying") + log.V(2).Info("transient failure to delete AzureMachine, retrying") } return reconcile.Result{RequeueAfter: reconcileError.RequeueAfter()}, nil } @@ -364,11 +357,11 @@ func (amr *AzureMachineReconciler) reconcileDelete(ctx context.Context, machineS return reconcile.Result{}, errors.Wrapf(err, "error deleting AzureMachine %s/%s", machineScope.Namespace(), machineScope.Name()) } } else { - machineScope.Info("Skipping AzureMachine Deletion; will delete whole resource group.") + log.Info("Skipping AzureMachine Deletion; will delete whole resource group.") } // we're done deleting this AzureMachine so remove the finalizer. - machineScope.Info("Removing finalizer from AzureMachine") + log.Info("Removing finalizer from AzureMachine") controllerutil.RemoveFinalizer(machineScope.AzureMachine, infrav1.MachineFinalizer) return reconcile.Result{}, nil diff --git a/controllers/azuremachine_controller_test.go b/controllers/azuremachine_controller_test.go index 8b7aa5178f2..73ec8e05120 100644 --- a/controllers/azuremachine_controller_test.go +++ b/controllers/azuremachine_controller_test.go @@ -27,16 +27,14 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/record" - "k8s.io/klog/v2/klogr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure/scope" "sigs.k8s.io/cluster-api-provider-azure/internal/test" "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" ) var _ = Describe("AzureMachineReconciler", func() { @@ -45,7 +43,7 @@ var _ = Describe("AzureMachineReconciler", func() { Context("Reconcile an AzureMachine", func() { It("should not error with minimal set up", func() { - reconciler := NewAzureMachineReconciler(testEnv, testEnv.Log, testEnv.GetEventRecorderFor("azuremachine-reconciler"), reconciler.DefaultLoopTimeout, "") + reconciler := NewAzureMachineReconciler(testEnv, testEnv.GetEventRecorderFor("azuremachine-reconciler"), reconciler.DefaultLoopTimeout, "") By("Calling reconcile") name := test.RandomName("foo", 10) @@ -161,7 +159,7 @@ func TestConditions(t *testing.T) { client := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(initObjects...).Build() recorder := record.NewFakeRecorder(10) - reconciler := NewAzureMachineReconciler(client, klogr.New(), recorder, reconciler.DefaultLoopTimeout, "") + reconciler := NewAzureMachineReconciler(client, recorder, reconciler.DefaultLoopTimeout, "") clusterScope, err := scope.NewClusterScope(context.TODO(), scope.ClusterScopeParams{ AzureClients: scope.AzureClients{ diff --git a/controllers/common_controller_test.go b/controllers/common_controller_test.go deleted file mode 100644 index 65e65095d2e..00000000000 --- a/controllers/common_controller_test.go +++ /dev/null @@ -1,109 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "context" - "time" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" - "sigs.k8s.io/cluster-api-provider-azure/internal/test" - "sigs.k8s.io/cluster-api-provider-azure/internal/test/logentries" - "sigs.k8s.io/cluster-api-provider-azure/internal/test/record" -) - -var ( - clusterControllers = []string{ - "AzureCluster", - } - - infraControllers = []string{ - "AzureMachine", - } -) - -var _ = Describe("CommonReconcilerBehaviors", func() { - BeforeEach(func() {}) - AfterEach(func() {}) - - It("should trigger reconciliation if cluster is unpaused", func() { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - logListener := record.NewListener(testEnv.LogRecorder) - del := logListener.Listen() - defer del() - - clusterName := test.RandomName("foo", 10) - azClusterName := test.RandomName("foo", 10) - cluster := &clusterv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: clusterName, - Namespace: "default", - }, - Spec: clusterv1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{ - Name: azClusterName, - Namespace: "default", - Kind: "AzureCluster", - APIVersion: infrav1.GroupVersion.Identifier(), - }, - }, - } - - Expect(testEnv.Create(ctx, cluster)).To(Succeed()) - defer func() { - err := testEnv.Delete(ctx, cluster) - Expect(err).NotTo(HaveOccurred()) - }() - - cluster.Status.InfrastructureReady = true - Expect(testEnv.Status().Update(ctx, cluster)).To(Succeed()) - ec := logentries.EntryCriteria{ - ClusterName: cluster.Name, - ClusterNamespace: cluster.Namespace, - InfraControllers: infraControllers, - ClusterControllers: clusterControllers, - } - - logNotPausedEntries := logentries.GenerateCreateNotPausedLogEntries(ec) - // check to make sure the cluster has reconciled and is not in paused state - Eventually(logListener.GetEntries, test.DefaultEventualTimeout, 1*time.Second).Should(ContainElements(logNotPausedEntries)) - - // we have tried to reconcile, and cluster was not paused - // now, we will pause the cluster and we should trigger a watch event - cluster.Spec.Paused = true - Expect(testEnv.Update(ctx, cluster)).To(Succeed()) - logPausedEntry := logentries.GenerateUpdatePausedClusterLogEntries(ec) - // check to make sure the cluster has reconciled and is paused - Eventually(logListener.GetEntries, test.DefaultEventualTimeout, 1*time.Second).Should(ContainElements(logPausedEntry)) - - // cluster was paused with an update - // now, we will unpause the cluster and we should trigger an unpause watch event for all controllers - cluster.Spec.Paused = false - Expect(testEnv.Update(ctx, cluster)).To(Succeed()) - logUpdatePausedEntries := logentries.GenerateUpdatePausedClusterLogEntries(ec) - Eventually(logListener.GetEntries, test.DefaultEventualTimeout, 1*time.Second).Should(ContainElements(logUpdatePausedEntries)) - }) - -}) diff --git a/controllers/helpers.go b/controllers/helpers.go index c7a2003f2a3..b9f1b396a07 100644 --- a/controllers/helpers.go +++ b/controllers/helpers.go @@ -432,8 +432,8 @@ func toCloudProviderBackOffConfig(source infrav1.BackOffConfig) BackOffConfig { return backOffConfig } -func reconcileAzureSecret(ctx context.Context, log logr.Logger, kubeclient client.Client, owner metav1.OwnerReference, new *corev1.Secret, clusterName string) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.reconcileAzureSecret") +func reconcileAzureSecret(ctx context.Context, kubeclient client.Client, owner metav1.OwnerReference, new *corev1.Secret, clusterName string) error { + ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.reconcileAzureSecret") defer done() // Fetch previous secret, if it exists diff --git a/controllers/helpers_test.go b/controllers/helpers_test.go index 4494c02e490..05ec1e7d352 100644 --- a/controllers/helpers_test.go +++ b/controllers/helpers_test.go @@ -22,27 +22,22 @@ import ( "os" "testing" - "k8s.io/apimachinery/pkg/api/resource" - "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure/auth" + "github.com/golang/mock/gomock" "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" - - "github.com/golang/mock/gomock" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" clientgoscheme "k8s.io/client-go/kubernetes/scheme" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/log/zap" - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure/scope" "sigs.k8s.io/cluster-api-provider-azure/internal/test/mock_log" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client/fake" ) func TestAzureClusterToAzureMachinesMapper(t *testing.T) { @@ -209,9 +204,6 @@ func TestReconcileAzureSecret(t *testing.T) { }, } - ctrl.SetLogger(zap.New(zap.UseDevMode(true))) - testLog := ctrl.Log.WithName("reconcileAzureSecret") - cluster := newCluster("foo") azureCluster := newAzureCluster("foo", "bar") @@ -242,7 +234,7 @@ func TestReconcileAzureSecret(t *testing.T) { g.Expect(err).NotTo(HaveOccurred()) g.Expect(cloudConfig.Data).NotTo(BeNil()) - if err := reconcileAzureSecret(context.Background(), testLog, kubeclient, owner, cloudConfig, azureCluster.ClusterName); err != nil { + if err := reconcileAzureSecret(context.Background(), kubeclient, owner, cloudConfig, azureCluster.ClusterName); err != nil { t.Error(err) } diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 60c7b0d6c37..5b5b7ded99e 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -48,11 +48,10 @@ func TestAPIs(t *testing.T) { var _ = BeforeSuite(func(done Done) { By("bootstrapping test environment") testEnv = env.NewTestEnvironment() - - Expect(NewAzureClusterReconciler(testEnv, testEnv.Log, testEnv.GetEventRecorderFor("azurecluster-reconciler"), reconciler.DefaultLoopTimeout, ""). + Expect(NewAzureClusterReconciler(testEnv, testEnv.GetEventRecorderFor("azurecluster-reconciler"), reconciler.DefaultLoopTimeout, ""). SetupWithManager(context.Background(), testEnv.Manager, Options{Options: controller.Options{MaxConcurrentReconciles: 1}})).To(Succeed()) - Expect(NewAzureMachineReconciler(testEnv, testEnv.Log, testEnv.GetEventRecorderFor("azuremachine-reconciler"), reconciler.DefaultLoopTimeout, ""). + Expect(NewAzureMachineReconciler(testEnv, testEnv.GetEventRecorderFor("azuremachine-reconciler"), reconciler.DefaultLoopTimeout, ""). SetupWithManager(context.Background(), testEnv.Manager, Options{Options: controller.Options{MaxConcurrentReconciles: 1}})).To(Succeed()) // +kubebuilder:scaffold:scheme diff --git a/exp/api/v1beta1/azuremachinepool_webhook.go b/exp/api/v1beta1/azuremachinepool_webhook.go index ab2f5f75e58..64672cd34ea 100644 --- a/exp/api/v1beta1/azuremachinepool_webhook.go +++ b/exp/api/v1beta1/azuremachinepool_webhook.go @@ -25,16 +25,11 @@ import ( kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/validation/field" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" ctrl "sigs.k8s.io/controller-runtime" - logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" - - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" ) -// log is for logging in this package. -var azuremachinepoollog = logf.Log.WithName("azuremachinepool-resource") - // SetupWebhookWithManager sets up and registers the webhook with the manager. func (amp *AzureMachinePool) SetupWebhookWithManager(mgr ctrl.Manager) error { return ctrl.NewWebhookManagedBy(mgr). @@ -48,11 +43,9 @@ var _ webhook.Defaulter = &AzureMachinePool{} // Default implements webhook.Defaulter so a webhook will be registered for the type. func (amp *AzureMachinePool) Default() { - azuremachinepoollog.Info("default", "name", amp.Name) - err := amp.SetDefaultSSHPublicKey() if err != nil { - azuremachinepoollog.Error(err, "SetDefaultSshPublicKey failed") + ctrl.Log.WithName("AzureMachinePoolLogger").Error(err, "SetDefaultSshPublicKey failed") } amp.SetIdentityDefaults() } @@ -63,19 +56,16 @@ var _ webhook.Validator = &AzureMachinePool{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type. func (amp *AzureMachinePool) ValidateCreate() error { - azuremachinepoollog.Info("validate create", "name", amp.Name) return amp.Validate(nil) } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. func (amp *AzureMachinePool) ValidateUpdate(old runtime.Object) error { - azuremachinepoollog.Info("validate update", "name", amp.Name) return amp.Validate(old) } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. func (amp *AzureMachinePool) ValidateDelete() error { - azuremachinepoollog.Info("validate delete", "name", amp.Name) return nil } @@ -106,7 +96,6 @@ func (amp *AzureMachinePool) ValidateImage() error { image := amp.Spec.Template.Image if errs := infrav1.ValidateImage(image, field.NewPath("image")); len(errs) > 0 { agg := kerrors.NewAggregate(errs.ToAggregate().Errors()) - azuremachinepoollog.Info("Invalid image: %s", agg.Error()) return agg } } @@ -136,7 +125,6 @@ func (amp *AzureMachinePool) ValidateSSHKey() error { sshKey := amp.Spec.Template.SSHPublicKey if errs := infrav1.ValidateSSHKey(sshKey, field.NewPath("sshKey")); len(errs) > 0 { agg := kerrors.NewAggregate(errs.ToAggregate().Errors()) - azuremachinepoollog.Info("Invalid sshKey: %s", agg.Error()) return agg } } diff --git a/exp/api/v1beta1/azuremachinepoolmachine_webhook.go b/exp/api/v1beta1/azuremachinepoolmachine_webhook.go index c872edf6452..bb77cf595e9 100644 --- a/exp/api/v1beta1/azuremachinepoolmachine_webhook.go +++ b/exp/api/v1beta1/azuremachinepoolmachine_webhook.go @@ -20,13 +20,9 @@ import ( "github.com/pkg/errors" "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" - logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" ) -// log is for logging in this package. -var azuremachinepoolmachinelog = logf.Log.WithName("azuremachinepoolmachine-resource") - // SetupWebhookWithManager sets up and registers the webhook with the manager. func (ampm *AzureMachinePoolMachine) SetupWebhookWithManager(mgr ctrl.Manager) error { return ctrl.NewWebhookManagedBy(mgr). @@ -40,13 +36,11 @@ var _ webhook.Validator = &AzureMachinePoolMachine{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type. func (ampm *AzureMachinePoolMachine) ValidateCreate() error { - azuremachinepoolmachinelog.Info("validate create", "name", ampm.Name) return nil } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. func (ampm *AzureMachinePoolMachine) ValidateUpdate(old runtime.Object) error { - azuremachinepoolmachinelog.Info("validate update", "name", ampm.Name) oldMachine, ok := old.(*AzureMachinePoolMachine) if !ok { return errors.New("expected and AzureMachinePoolMachine") @@ -61,6 +55,5 @@ func (ampm *AzureMachinePoolMachine) ValidateUpdate(old runtime.Object) error { // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. func (ampm *AzureMachinePoolMachine) ValidateDelete() error { - azuremachinepoolmachinelog.Info("validate delete", "name", ampm.Name) return nil } diff --git a/exp/api/v1beta1/azuremanagedcontrolplane_webhook.go b/exp/api/v1beta1/azuremanagedcontrolplane_webhook.go index 2f78ab6e118..328eaf16d68 100644 --- a/exp/api/v1beta1/azuremanagedcontrolplane_webhook.go +++ b/exp/api/v1beta1/azuremanagedcontrolplane_webhook.go @@ -27,16 +27,11 @@ import ( "k8s.io/apimachinery/pkg/runtime" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/validation/field" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" ctrl "sigs.k8s.io/controller-runtime" - logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" - - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" ) -// log is for logging in this package. -var azuremanagedcontrolplanelog = logf.Log.WithName("azuremanagedcontrolplane-resource") - var kubeSemver = regexp.MustCompile(`^v(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)([-0-9a-zA-Z_\.+]*)?$`) // SetupWebhookWithManager sets up and registers the webhook with the manager. @@ -52,8 +47,6 @@ var _ webhook.Defaulter = &AzureManagedControlPlane{} // Default implements webhook.Defaulter so a webhook will be registered for the type. func (r *AzureManagedControlPlane) Default() { - azuremanagedcontrolplanelog.Info("default", "name", r.Name) - if r.Spec.NetworkPlugin == nil { networkPlugin := "azure" r.Spec.NetworkPlugin = &networkPlugin @@ -74,7 +67,7 @@ func (r *AzureManagedControlPlane) Default() { err := r.setDefaultSSHPublicKey() if err != nil { - azuremanagedcontrolplanelog.Error(err, "SetDefaultSshPublicKey failed") + ctrl.Log.WithName("AzureManagedControlPlaneWebHookLogger").Error(err, "SetDefaultSshPublicKey failed") } r.setDefaultNodeResourceGroupName() @@ -89,14 +82,11 @@ var _ webhook.Validator = &AzureManagedControlPlane{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type. func (r *AzureManagedControlPlane) ValidateCreate() error { - azuremanagedcontrolplanelog.Info("validate create", "name", r.Name) - return r.Validate() } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. func (r *AzureManagedControlPlane) ValidateUpdate(oldRaw runtime.Object) error { - azuremanagedcontrolplanelog.Info("validate update", "name", r.Name) var allErrs field.ErrorList old := oldRaw.(*AzureManagedControlPlane) @@ -257,8 +247,6 @@ func (r *AzureManagedControlPlane) ValidateUpdate(oldRaw runtime.Object) error { // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. func (r *AzureManagedControlPlane) ValidateDelete() error { - azuremanagedcontrolplanelog.Info("validate delete", "name", r.Name) - return nil } @@ -306,9 +294,7 @@ func (r *AzureManagedControlPlane) validateSSHKey() error { if r.Spec.SSHPublicKey != "" { sshKey := r.Spec.SSHPublicKey if errs := infrav1.ValidateSSHKey(sshKey, field.NewPath("sshKey")); len(errs) > 0 { - agg := kerrors.NewAggregate(errs.ToAggregate().Errors()) - azuremachinepoollog.Info("Invalid sshKey: %s", agg.Error()) - return agg + return kerrors.NewAggregate(errs.ToAggregate().Errors()) } } @@ -355,7 +341,6 @@ func (r *AzureManagedControlPlane) validateLoadBalancerProfile() error { if len(allErrs) > 0 { agg := kerrors.NewAggregate(allErrs.ToAggregate().Errors()) - azuremanagedcontrolplanelog.Info("Invalid loadBalancerProfile: %s", agg.Error()) errs = append(errs, agg) } @@ -375,9 +360,7 @@ func (r *AzureManagedControlPlane) validateAPIServerAccessProfile() error { } } if len(allErrs) > 0 { - agg := kerrors.NewAggregate(allErrs.ToAggregate().Errors()) - azuremanagedcontrolplanelog.Info("Invalid apiServerAccessProfile: %s", agg.Error()) - return agg + return kerrors.NewAggregate(allErrs.ToAggregate().Errors()) } } return nil diff --git a/exp/api/v1beta1/azuremanagedmachinepool_webhook.go b/exp/api/v1beta1/azuremanagedmachinepool_webhook.go index 25506f0f188..c8fb087a7cc 100644 --- a/exp/api/v1beta1/azuremanagedmachinepool_webhook.go +++ b/exp/api/v1beta1/azuremanagedmachinepool_webhook.go @@ -24,22 +24,14 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation/field" "sigs.k8s.io/cluster-api-provider-azure/azure" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/controller-runtime/pkg/client" - logf "sigs.k8s.io/controller-runtime/pkg/log" ) -// log is for logging in this package. -var azuremanagedmachinepoollog = logf.Log.WithName("azuremanagedmachinepool-resource") - //+kubebuilder:webhook:path=/mutate-infrastructure-cluster-x-k8s-io-v1beta1-azuremanagedmachinepool,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=azuremanagedmachinepools,verbs=create;update,versions=v1beta1,name=default.azuremanagedmachinepools.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 // Default implements webhook.Defaulter so a webhook will be registered for the type. func (r *AzureManagedMachinePool) Default(client client.Client) { - azuremanagedmachinepoollog.Info("default", "name", r.Name) - if r.Labels == nil { r.Labels = make(map[string]string) } @@ -54,7 +46,6 @@ func (r *AzureManagedMachinePool) Default(client client.Client) { // ValidateCreate implements webhook.Validator so a webhook will be registered for the type. func (r *AzureManagedMachinePool) ValidateCreate(client client.Client) error { - azuremanagedmachinepoollog.Info("validate create", "name", r.Name) return nil } @@ -117,8 +108,6 @@ func (r *AzureManagedMachinePool) ValidateUpdate(oldRaw runtime.Object, client c // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. func (r *AzureManagedMachinePool) ValidateDelete(client client.Client) error { - azuremanagedmachinepoollog.Info("validate delete", "name", r.Name) - if r.Spec.Mode != string(NodePoolModeSystem) { return nil } diff --git a/exp/controllers/azuremachinepool_controller.go b/exp/controllers/azuremachinepool_controller.go index f35811ec1d7..c32e4e6ed1d 100644 --- a/exp/controllers/azuremachinepool_controller.go +++ b/exp/controllers/azuremachinepool_controller.go @@ -20,13 +20,19 @@ import ( "context" "time" - "github.com/go-logr/logr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/record" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-azure/azure" + "sigs.k8s.io/cluster-api-provider-azure/azure/scope" + infracontroller "sigs.k8s.io/cluster-api-provider-azure/controllers" + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/pkg/coalescing" + "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" + "sigs.k8s.io/cluster-api-provider-azure/util/tele" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" capiv1exp "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/util" @@ -38,21 +44,12 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" - "sigs.k8s.io/cluster-api-provider-azure/azure" - "sigs.k8s.io/cluster-api-provider-azure/azure/scope" - infracontroller "sigs.k8s.io/cluster-api-provider-azure/controllers" - infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" - "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" - "sigs.k8s.io/cluster-api-provider-azure/util/tele" ) type ( // AzureMachinePoolReconciler reconciles an AzureMachinePool object. AzureMachinePoolReconciler struct { client.Client - Log logr.Logger Scheme *runtime.Scheme Recorder record.EventRecorder ReconcileTimeout time.Duration @@ -70,10 +67,9 @@ type ( type azureMachinePoolServiceCreator func(machinePoolScope *scope.MachinePoolScope) (*azureMachinePoolService, error) // NewAzureMachinePoolReconciler returns a new AzureMachinePoolReconciler instance. -func NewAzureMachinePoolReconciler(client client.Client, log logr.Logger, recorder record.EventRecorder, reconcileTimeout time.Duration, watchFilterValue string) *AzureMachinePoolReconciler { +func NewAzureMachinePoolReconciler(client client.Client, recorder record.EventRecorder, reconcileTimeout time.Duration, watchFilterValue string) *AzureMachinePoolReconciler { ampr := &AzureMachinePoolReconciler{ Client: client, - Log: log, Recorder: recorder, ReconcileTimeout: reconcileTimeout, WatchFilterValue: watchFilterValue, @@ -86,10 +82,12 @@ func NewAzureMachinePoolReconciler(client client.Client, log logr.Logger, record // SetupWithManager initializes this controller with a manager. func (ampr *AzureMachinePoolReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options infracontroller.Options) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.AzureMachinePoolReconciler.SetupWithManager") + ctx, log, done := tele.StartSpanWithLogger(ctx, + "controllers.AzureMachinePoolReconciler.SetupWithManager", + tele.KVP("controller", "AzureMachinePool"), + ) defer done() - log := ampr.Log.WithValues("controller", "AzureMachinePool") var r reconcile.Reconciler = ampr if options.Cache != nil { r = coalescing.NewReconciler(ampr, options.Cache, log) @@ -104,7 +102,7 @@ func (ampr *AzureMachinePoolReconciler) SetupWithManager(ctx context.Context, mg c, err := ctrl.NewControllerManagedBy(mgr). WithOptions(options.Options). For(&infrav1exp.AzureMachinePool{}). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), ampr.WatchFilterValue)). + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(log, ampr.WatchFilterValue)). // watch for changes in CAPI MachinePool resources Watches( &source.Kind{Type: &capiv1exp.MachinePool{}}, @@ -124,7 +122,7 @@ func (ampr *AzureMachinePoolReconciler) SetupWithManager(ctx context.Context, mg &source.Kind{Type: &infrav1exp.AzureMachinePoolMachine{}}, handler.EnqueueRequestsFromMapFunc(AzureMachinePoolMachineMapper(mgr.GetScheme(), log)), MachinePoolMachineHasStateOrVersionChange(log), - predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), ampr.WatchFilterValue), + predicates.ResourceNotPausedAndHasFilterLabel(log, ampr.WatchFilterValue), ); err != nil { return errors.Wrap(err, "failed adding a watch for AzureMachinePoolMachine") } @@ -139,7 +137,7 @@ func (ampr *AzureMachinePoolReconciler) SetupWithManager(ctx context.Context, mg &source.Kind{Type: &clusterv1.Cluster{}}, handler.EnqueueRequestsFromMapFunc(azureMachinePoolMapper), predicates.ClusterUnpausedAndInfrastructureReady(log), - predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), ampr.WatchFilterValue), + predicates.ResourceNotPausedAndHasFilterLabel(log, ampr.WatchFilterValue), ); err != nil { return errors.Wrap(err, "failed adding a watch for ready clusters") } @@ -207,6 +205,7 @@ func (ampr *AzureMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl. return ctrl.Result{}, nil } + logger = logger.WithValues("AzureCluster", cluster.Spec.InfrastructureRef.Name) azureClusterName := client.ObjectKey{ Namespace: azMachinePool.Namespace, Name: cluster.Spec.InfrastructureRef.Name, @@ -217,12 +216,9 @@ func (ampr *AzureMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl. return reconcile.Result{}, nil } - logger = logger.WithValues("AzureCluster", azureCluster.Name) - // Create the cluster scope clusterScope, err := scope.NewClusterScope(ctx, scope.ClusterScopeParams{ Client: ampr.Client, - Logger: logger, Cluster: cluster, AzureCluster: azureCluster, }) @@ -232,7 +228,6 @@ func (ampr *AzureMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl. // Create the machine pool scope machinePoolScope, err := scope.NewMachinePoolScope(scope.MachinePoolScopeParams{ - Logger: logger, Client: ampr.Client, MachinePool: machinePool, AzureMachinePool: azMachinePool, @@ -259,13 +254,13 @@ func (ampr *AzureMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl. } func (ampr *AzureMachinePoolReconciler) reconcileNormal(ctx context.Context, machinePoolScope *scope.MachinePoolScope, clusterScope *scope.ClusterScope) (_ reconcile.Result, reterr error) { - ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.AzureMachinePoolReconciler.reconcileNormal") + ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureMachinePoolReconciler.reconcileNormal") defer done() - machinePoolScope.Info("Reconciling AzureMachinePool") + log.Info("Reconciling AzureMachinePool") // If the AzureMachine is in an error state, return early. if machinePoolScope.AzureMachinePool.Status.FailureReason != nil || machinePoolScope.AzureMachinePool.Status.FailureMessage != nil { - machinePoolScope.Info("Error state detected, skipping reconciliation") + log.Info("Error state detected, skipping reconciliation") return reconcile.Result{}, nil } @@ -277,13 +272,13 @@ func (ampr *AzureMachinePoolReconciler) reconcileNormal(ctx context.Context, mac } if !clusterScope.Cluster.Status.InfrastructureReady { - machinePoolScope.Info("Cluster infrastructure is not ready yet") + log.Info("Cluster infrastructure is not ready yet") return reconcile.Result{}, nil } // Make sure bootstrap data is available and populated. if machinePoolScope.MachinePool.Spec.Template.Spec.Bootstrap.DataSecretName == nil { - machinePoolScope.Info("Bootstrap data secret reference is not yet available") + log.Info("Bootstrap data secret reference is not yet available") return reconcile.Result{}, nil } @@ -297,12 +292,12 @@ func (ampr *AzureMachinePoolReconciler) reconcileNormal(ctx context.Context, mac var reconcileError azure.ReconcileError if errors.As(err, &reconcileError) { if reconcileError.IsTerminal() { - machinePoolScope.Error(err, "failed to reconcile AzureMachinePool", "name", machinePoolScope.Name()) + log.Error(err, "failed to reconcile AzureMachinePool", "name", machinePoolScope.Name()) return reconcile.Result{}, nil } if reconcileError.IsTransient() { - machinePoolScope.Error(err, "failed to reconcile AzureMachinePool", "name", machinePoolScope.Name()) + log.Error(err, "failed to reconcile AzureMachinePool", "name", machinePoolScope.Name()) return reconcile.Result{RequeueAfter: reconcileError.RequeueAfter()}, nil } @@ -312,12 +307,12 @@ func (ampr *AzureMachinePoolReconciler) reconcileNormal(ctx context.Context, mac return reconcile.Result{}, err } - machinePoolScope.V(2).Info("Scale Set reconciled", "id", + log.V(2).Info("Scale Set reconciled", "id", machinePoolScope.ProviderID(), "state", machinePoolScope.ProvisioningState()) switch machinePoolScope.ProvisioningState() { case infrav1.Deleting: - machinePoolScope.Info("Unexpected scale set deletion", "id", machinePoolScope.ProviderID()) + log.Info("Unexpected scale set deletion", "id", machinePoolScope.ProviderID()) ampr.Recorder.Eventf(machinePoolScope.AzureMachinePool, corev1.EventTypeWarning, "UnexpectedVMDeletion", "Unexpected Azure scale set deletion") case infrav1.Failed: err := ams.Delete(ctx) @@ -337,10 +332,10 @@ func (ampr *AzureMachinePoolReconciler) reconcileNormal(ctx context.Context, mac } func (ampr *AzureMachinePoolReconciler) reconcileDelete(ctx context.Context, machinePoolScope *scope.MachinePoolScope, clusterScope *scope.ClusterScope) (reconcile.Result, error) { - ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.AzureMachinePoolReconciler.reconcileDelete") + ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureMachinePoolReconciler.reconcileDelete") defer done() - machinePoolScope.V(2).Info("handling deleted AzureMachinePool") + log.V(2).Info("handling deleted AzureMachinePool") if infracontroller.ShouldDeleteIndividualResources(ctx, clusterScope) { amps, err := ampr.createAzureMachinePoolService(machinePoolScope) @@ -348,14 +343,14 @@ func (ampr *AzureMachinePoolReconciler) reconcileDelete(ctx context.Context, mac return reconcile.Result{}, errors.Wrap(err, "failed creating a new AzureMachinePoolService") } - machinePoolScope.V(4).Info("deleting AzureMachinePool resource individually") + log.V(4).Info("deleting AzureMachinePool resource individually") if err := amps.Delete(ctx); err != nil { return reconcile.Result{}, errors.Wrapf(err, "error deleting AzureMachinePool %s/%s", clusterScope.Namespace(), machinePoolScope.Name()) } } // Delete succeeded, remove finalizer - machinePoolScope.V(4).Info("removing finalizer for AzureMachinePool") + log.V(4).Info("removing finalizer for AzureMachinePool") controllerutil.RemoveFinalizer(machinePoolScope.AzureMachinePool, capiv1exp.MachinePoolFinalizer) return reconcile.Result{}, nil } diff --git a/exp/controllers/azuremachinepool_controller_test.go b/exp/controllers/azuremachinepool_controller_test.go index 686a6fdd8eb..62ba604ae47 100644 --- a/exp/controllers/azuremachinepool_controller_test.go +++ b/exp/controllers/azuremachinepool_controller_test.go @@ -22,12 +22,10 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log" - - infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" ) var _ = Describe("AzureMachinePoolReconciler", func() { @@ -36,7 +34,7 @@ var _ = Describe("AzureMachinePoolReconciler", func() { Context("Reconcile an AzureMachinePool", func() { It("should not error with minimal set up", func() { - reconciler := NewAzureMachinePoolReconciler(testEnv, log.Log, testEnv.GetEventRecorderFor("azuremachinepool-reconciler"), + reconciler := NewAzureMachinePoolReconciler(testEnv, testEnv.GetEventRecorderFor("azuremachinepool-reconciler"), reconciler.DefaultLoopTimeout, "") By("Calling reconcile") instance := &infrav1exp.AzureMachinePool{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} diff --git a/exp/controllers/azuremachinepoolmachine_controller.go b/exp/controllers/azuremachinepoolmachine_controller.go index b8f04f19b19..4656b80ba67 100644 --- a/exp/controllers/azuremachinepoolmachine_controller.go +++ b/exp/controllers/azuremachinepoolmachine_controller.go @@ -21,13 +21,20 @@ import ( "fmt" "time" - "github.com/go-logr/logr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/record" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-azure/azure" + "sigs.k8s.io/cluster-api-provider-azure/azure/scope" + "sigs.k8s.io/cluster-api-provider-azure/azure/services/scalesetvms" + infracontroller "sigs.k8s.io/cluster-api-provider-azure/controllers" + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/pkg/coalescing" + "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" + "sigs.k8s.io/cluster-api-provider-azure/util/tele" capierrors "sigs.k8s.io/cluster-api/errors" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" @@ -38,15 +45,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" - "sigs.k8s.io/cluster-api-provider-azure/azure" - "sigs.k8s.io/cluster-api-provider-azure/azure/scope" - "sigs.k8s.io/cluster-api-provider-azure/azure/services/scalesetvms" - infracontroller "sigs.k8s.io/cluster-api-provider-azure/controllers" - infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" - "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" - "sigs.k8s.io/cluster-api-provider-azure/util/tele" ) type ( @@ -55,7 +53,6 @@ type ( // AzureMachinePoolMachineController handles Kubernetes change events for AzureMachinePoolMachine resources. AzureMachinePoolMachineController struct { client.Client - Log logr.Logger Scheme *runtime.Scheme Recorder record.EventRecorder ReconcileTimeout time.Duration @@ -70,10 +67,9 @@ type ( ) // NewAzureMachinePoolMachineController creates a new AzureMachinePoolMachineController to handle updates to Azure Machine Pool Machines. -func NewAzureMachinePoolMachineController(c client.Client, log logr.Logger, recorder record.EventRecorder, reconcileTimeout time.Duration, watchFilterValue string) *AzureMachinePoolMachineController { +func NewAzureMachinePoolMachineController(c client.Client, recorder record.EventRecorder, reconcileTimeout time.Duration, watchFilterValue string) *AzureMachinePoolMachineController { return &AzureMachinePoolMachineController{ Client: c, - Log: log, Recorder: recorder, ReconcileTimeout: reconcileTimeout, WatchFilterValue: watchFilterValue, @@ -83,11 +79,12 @@ func NewAzureMachinePoolMachineController(c client.Client, log logr.Logger, reco // SetupWithManager initializes this controller with a manager. func (ampmr *AzureMachinePoolMachineController) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options infracontroller.Options) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.AzureMachinePoolMachineController.SetupWithManager") + ctx, log, done := tele.StartSpanWithLogger(ctx, + "controllers.AzureMachinePoolMachineController.SetupWithManager", + tele.KVP("controller", "AzureMachinePoolMachine"), + ) defer done() - log := ampmr.Log.WithValues("controller", "AzureMachinePoolMachine") - var r reconcile.Reconciler = ampmr if options.Cache != nil { r = coalescing.NewReconciler(ampmr, options.Cache, log) @@ -96,7 +93,7 @@ func (ampmr *AzureMachinePoolMachineController) SetupWithManager(ctx context.Con c, err := ctrl.NewControllerManagedBy(mgr). WithOptions(options.Options). For(&infrav1exp.AzureMachinePoolMachine{}). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), ampmr.WatchFilterValue)). + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(log, ampmr.WatchFilterValue)). Build(r) if err != nil { return errors.Wrapf(err, "error creating controller") @@ -107,7 +104,7 @@ func (ampmr *AzureMachinePoolMachineController) SetupWithManager(ctx context.Con &source.Kind{Type: &infrav1exp.AzureMachinePool{}}, handler.EnqueueRequestsFromMapFunc(AzureMachinePoolToAzureMachinePoolMachines(ctx, mgr.GetClient(), log)), MachinePoolModelHasChanged(log), - predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), ampmr.WatchFilterValue), + predicates.ResourceNotPausedAndHasFilterLabel(log, ampmr.WatchFilterValue), ); err != nil { return errors.Wrapf(err, "failed adding a watch for AzureMachinePool model changes") } @@ -204,7 +201,6 @@ func (ampmr *AzureMachinePoolMachineController) Reconcile(ctx context.Context, r // Create the cluster scope clusterScope, err := scope.NewClusterScope(ctx, scope.ClusterScopeParams{ Client: ampmr.Client, - Logger: logger, Cluster: cluster, AzureCluster: azureCluster, }) @@ -214,7 +210,6 @@ func (ampmr *AzureMachinePoolMachineController) Reconcile(ctx context.Context, r // Create the machine pool scope machineScope, err := scope.NewMachinePoolMachineScope(scope.MachinePoolMachineScopeParams{ - Logger: logger, Client: ampmr.Client, MachinePool: machinePool, AzureMachinePool: azureMachinePool, @@ -238,7 +233,7 @@ func (ampmr *AzureMachinePoolMachineController) Reconcile(ctx context.Context, r } if !clusterScope.Cluster.Status.InfrastructureReady { - machineScope.Info("Cluster infrastructure is not ready yet") + logger.Info("Cluster infrastructure is not ready yet") return reconcile.Result{}, nil } @@ -247,13 +242,13 @@ func (ampmr *AzureMachinePoolMachineController) Reconcile(ctx context.Context, r } func (ampmr *AzureMachinePoolMachineController) reconcileNormal(ctx context.Context, machineScope *scope.MachinePoolMachineScope) (_ reconcile.Result, reterr error) { - ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.AzureMachinePoolMachineController.reconcileNormal") + ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureMachinePoolMachineController.reconcileNormal") defer done() - machineScope.Info("Reconciling AzureMachinePoolMachine") + log.Info("Reconciling AzureMachinePoolMachine") // If the AzureMachine is in an error state, return early. if machineScope.AzureMachinePool.Status.FailureReason != nil || machineScope.AzureMachinePool.Status.FailureMessage != nil { - machineScope.Info("Error state detected, skipping reconciliation") + log.Info("Error state detected, skipping reconciliation") return reconcile.Result{}, nil } @@ -263,12 +258,12 @@ func (ampmr *AzureMachinePoolMachineController) reconcileNormal(ctx context.Cont var reconcileError azure.ReconcileError if errors.As(err, &reconcileError) { if reconcileError.IsTerminal() { - machineScope.Error(err, "failed to reconcile AzureMachinePool", "name", machineScope.Name()) + log.Error(err, "failed to reconcile AzureMachinePool", "name", machineScope.Name()) return reconcile.Result{}, nil } if reconcileError.IsTransient() { - machineScope.V(4).Info("failed to reconcile AzureMachinePoolMachine", "name", machineScope.Name(), "transient_error", err) + log.V(4).Info("failed to reconcile AzureMachinePoolMachine", "name", machineScope.Name(), "transient_error", err) return reconcile.Result{RequeueAfter: reconcileError.RequeueAfter()}, nil } @@ -290,10 +285,10 @@ func (ampmr *AzureMachinePoolMachineController) reconcileNormal(ctx context.Cont } } - machineScope.V(2).Info(fmt.Sprintf("Scale Set VM is %s", state), "id", machineScope.ProviderID()) + log.V(2).Info(fmt.Sprintf("Scale Set VM is %s", state), "id", machineScope.ProviderID()) if !infrav1.IsTerminalProvisioningState(state) || !machineScope.IsReady() { - machineScope.V(2).Info("Requeuing", "state", state, "ready", machineScope.IsReady()) + log.V(2).Info("Requeuing", "state", state, "ready", machineScope.IsReady()) // we are in a non-terminal state, retry in a bit return reconcile.Result{ RequeueAfter: 30 * time.Second, @@ -304,10 +299,10 @@ func (ampmr *AzureMachinePoolMachineController) reconcileNormal(ctx context.Cont } func (ampmr *AzureMachinePoolMachineController) reconcileDelete(ctx context.Context, machineScope *scope.MachinePoolMachineScope) (_ reconcile.Result, reterr error) { - ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.AzureMachinePoolMachineController.reconcileDelete") + ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureMachinePoolMachineController.reconcileDelete") defer done() - machineScope.Info("Handling deleted AzureMachinePoolMachine") + log.Info("Handling deleted AzureMachinePoolMachine") if machineScope.AzureMachinePool == nil || !machineScope.AzureMachinePool.ObjectMeta.DeletionTimestamp.IsZero() { // deleting the entire VMSS, so just remove finalizer and VMSS delete remove the underlying infrastructure. @@ -326,12 +321,12 @@ func (ampmr *AzureMachinePoolMachineController) reconcileDelete(ctx context.Cont var reconcileError azure.ReconcileError if errors.As(err, &reconcileError) { if reconcileError.IsTerminal() { - machineScope.Error(err, "failed to delete AzureMachinePoolMachine", "name", machineScope.Name()) + log.Error(err, "failed to delete AzureMachinePoolMachine", "name", machineScope.Name()) return reconcile.Result{}, nil } if reconcileError.IsTransient() { - machineScope.V(4).Info("failed to delete AzureMachinePoolMachine", "name", machineScope.Name(), "transient_error", err) + log.V(4).Info("failed to delete AzureMachinePoolMachine", "name", machineScope.Name(), "transient_error", err) return reconcile.Result{RequeueAfter: reconcileError.RequeueAfter()}, nil } @@ -369,12 +364,12 @@ func (r *azureMachinePoolMachineReconciler) Reconcile(ctx context.Context) error // Delete will attempt to drain and delete the Azure VMSS VM. func (r *azureMachinePoolMachineReconciler) Delete(ctx context.Context) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.azureMachinePoolMachineReconciler.Delete") + ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.azureMachinePoolMachineReconciler.Delete") defer done() defer func() { if err := r.Scope.UpdateStatus(ctx); err != nil { - r.Scope.V(4).Info("failed tup update vmss vm status during delete") + log.V(4).Info("failed tup update vmss vm status during delete") } }() diff --git a/exp/controllers/azuremachinepoolmachine_controller_test.go b/exp/controllers/azuremachinepoolmachine_controller_test.go index cf1d99496d3..737469b515b 100644 --- a/exp/controllers/azuremachinepoolmachine_controller_test.go +++ b/exp/controllers/azuremachinepoolmachine_controller_test.go @@ -30,7 +30,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/klog/v2/klogr" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/mock_azure" @@ -81,6 +80,7 @@ func TestAzureMachinePoolMachineReconciler_Reconcile(t *testing.T) { os.Setenv(auth.TenantID, "fooTenant") for _, c := range cases { + c := c t.Run(c.Name, func(t *testing.T) { var ( g = NewWithT(t) @@ -104,7 +104,7 @@ func TestAzureMachinePoolMachineReconciler_Reconcile(t *testing.T) { defer mockCtrl.Finish() c.Setup(cb, reconciler.EXPECT()) - controller := NewAzureMachinePoolMachineController(cb.Build(), klogr.New(), nil, 30*time.Second, "foo") + controller := NewAzureMachinePoolMachineController(cb.Build(), nil, 30*time.Second, "foo") controller.reconcilerFactory = func(_ *scope.MachinePoolMachineScope) azure.Reconciler { return reconciler } diff --git a/exp/controllers/azuremanagedcluster_controller.go b/exp/controllers/azuremanagedcluster_controller.go index c90e2bd9f96..d5cd1f32a86 100644 --- a/exp/controllers/azuremanagedcluster_controller.go +++ b/exp/controllers/azuremanagedcluster_controller.go @@ -20,11 +20,15 @@ import ( "context" "time" - "github.com/go-logr/logr" "github.com/pkg/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" + infracontroller "sigs.k8s.io/cluster-api-provider-azure/controllers" + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-azure/pkg/coalescing" + "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" + "sigs.k8s.io/cluster-api-provider-azure/util/tele" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" @@ -35,18 +39,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - - infracontroller "sigs.k8s.io/cluster-api-provider-azure/controllers" - infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" - "sigs.k8s.io/cluster-api-provider-azure/pkg/coalescing" - "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" - "sigs.k8s.io/cluster-api-provider-azure/util/tele" ) // AzureManagedClusterReconciler reconciles an AzureManagedCluster object. type AzureManagedClusterReconciler struct { client.Client - Log logr.Logger Recorder record.EventRecorder ReconcileTimeout time.Duration WatchFilterValue string @@ -54,10 +51,12 @@ type AzureManagedClusterReconciler struct { // SetupWithManager initializes this controller with a manager. func (amcr *AzureManagedClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options infracontroller.Options) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.AzureManagedClusterReconciler.SetupWithManager") + ctx, log, done := tele.StartSpanWithLogger(ctx, + "controllers.AzureManagedClusterReconciler.SetupWithManager", + tele.KVP("controller", "AzureManagedCluster"), + ) defer done() - log := amcr.Log.WithValues("controller", "AzureManagedCluster") var r reconcile.Reconciler = amcr if options.Cache != nil { r = coalescing.NewReconciler(amcr, options.Cache, log) @@ -73,7 +72,7 @@ func (amcr *AzureManagedClusterReconciler) SetupWithManager(ctx context.Context, c, err := ctrl.NewControllerManagedBy(mgr). WithOptions(options.Options). For(azManagedCluster). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), amcr.WatchFilterValue)). + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(log, amcr.WatchFilterValue)). // watch AzureManagedControlPlane resources Watches( &source.Kind{Type: &infrav1exp.AzureManagedControlPlane{}}, @@ -89,7 +88,7 @@ func (amcr *AzureManagedClusterReconciler) SetupWithManager(ctx context.Context, &source.Kind{Type: &clusterv1.Cluster{}}, handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(infrav1exp.GroupVersion.WithKind("AzureManagedCluster"))), predicates.ClusterUnpaused(log), - predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), amcr.WatchFilterValue), + predicates.ResourceNotPausedAndHasFilterLabel(log, amcr.WatchFilterValue), ); err != nil { return errors.Wrap(err, "failed adding a watch for ready clusters") } @@ -105,9 +104,8 @@ func (amcr *AzureManagedClusterReconciler) SetupWithManager(ctx context.Context, func (amcr *AzureManagedClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultedLoopTimeout(amcr.ReconcileTimeout)) defer cancel() - log := amcr.Log.WithValues("namespace", req.Namespace, "azureManagedCluster", req.Name) - ctx, _, done := tele.StartSpanWithLogger( + ctx, log, done := tele.StartSpanWithLogger( ctx, "controllers.AzureManagedClusterReconciler.Reconcile", tele.KVP("namespace", req.Namespace), diff --git a/exp/controllers/azuremanagedcontrolplane_controller.go b/exp/controllers/azuremanagedcontrolplane_controller.go index eb0d93cf7cf..989480324f5 100644 --- a/exp/controllers/azuremanagedcontrolplane_controller.go +++ b/exp/controllers/azuremanagedcontrolplane_controller.go @@ -21,19 +21,10 @@ import ( "fmt" "time" - "github.com/go-logr/logr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/tools/record" - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" - "sigs.k8s.io/cluster-api-provider-azure/azure" - "sigs.k8s.io/cluster-api-provider-azure/azure/scope" - infracontroller "sigs.k8s.io/cluster-api-provider-azure/controllers" - infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" - "sigs.k8s.io/cluster-api-provider-azure/pkg/coalescing" - "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" - "sigs.k8s.io/cluster-api-provider-azure/util/tele" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/util" @@ -45,12 +36,20 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" + + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-azure/azure" + "sigs.k8s.io/cluster-api-provider-azure/azure/scope" + infracontroller "sigs.k8s.io/cluster-api-provider-azure/controllers" + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-azure/pkg/coalescing" + "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" + "sigs.k8s.io/cluster-api-provider-azure/util/tele" ) // AzureManagedControlPlaneReconciler reconciles an AzureManagedControlPlane object. type AzureManagedControlPlaneReconciler struct { client.Client - Log logr.Logger Recorder record.EventRecorder ReconcileTimeout time.Duration WatchFilterValue string @@ -58,10 +57,12 @@ type AzureManagedControlPlaneReconciler struct { // SetupWithManager initializes this controller with a manager. func (amcpr *AzureManagedControlPlaneReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options infracontroller.Options) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.AzureManagedControlPlaneReconciler.SetupWithManager") + ctx, log, done := tele.StartSpanWithLogger(ctx, + "controllers.AzureManagedControlPlaneReconciler.SetupWithManager", + tele.KVP("controller", "AzureManagedControlPlane"), + ) defer done() - log := amcpr.Log.WithValues("controller", "AzureManagedControlPlane") var r reconcile.Reconciler = amcpr if options.Cache != nil { r = coalescing.NewReconciler(amcpr, options.Cache, log) @@ -80,7 +81,7 @@ func (amcpr *AzureManagedControlPlaneReconciler) SetupWithManager(ctx context.Co c, err := ctrl.NewControllerManagedBy(mgr). WithOptions(options.Options). For(azManagedControlPlane). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), amcpr.WatchFilterValue)). + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(log, amcpr.WatchFilterValue)). // watch AzureManagedCluster resources Watches( &source.Kind{Type: &infrav1exp.AzureManagedCluster{}}, @@ -101,7 +102,7 @@ func (amcpr *AzureManagedControlPlaneReconciler) SetupWithManager(ctx context.Co &source.Kind{Type: &clusterv1.Cluster{}}, handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(infrav1exp.GroupVersion.WithKind("AzureManagedControlPlane"))), predicates.ClusterUnpausedAndInfrastructureReady(log), - predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), amcpr.WatchFilterValue), + predicates.ResourceNotPausedAndHasFilterLabel(log, amcpr.WatchFilterValue), ); err != nil { return errors.Wrap(err, "failed adding a watch for ready clusters") } @@ -125,8 +126,6 @@ func (amcpr *AzureManagedControlPlaneReconciler) Reconcile(ctx context.Context, ) defer done() - log = log.WithValues("namespace", req.Namespace, "azureManagedControlPlane", req.Name) - // Fetch the AzureManagedControlPlane instance azureControlPlane := &infrav1exp.AzureManagedControlPlane{} err := amcpr.Get(ctx, req.NamespacedName, azureControlPlane) @@ -175,7 +174,6 @@ func (amcpr *AzureManagedControlPlaneReconciler) Reconcile(ctx context.Context, // Create the scope. mcpScope, err := scope.NewManagedControlPlaneScope(ctx, scope.ManagedControlPlaneScopeParams{ Client: amcpr.Client, - Logger: log, Cluster: cluster, ControlPlane: azureControlPlane, PatchTarget: azureControlPlane, @@ -200,10 +198,10 @@ func (amcpr *AzureManagedControlPlaneReconciler) Reconcile(ctx context.Context, } func (amcpr *AzureManagedControlPlaneReconciler) reconcileNormal(ctx context.Context, scope *scope.ManagedControlPlaneScope) (reconcile.Result, error) { - ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.AzureManagedControlPlaneReconciler.reconcileNormal") + ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureManagedControlPlaneReconciler.reconcileNormal") defer done() - scope.Logger.Info("Reconciling AzureManagedControlPlane") + log.Info("Reconciling AzureManagedControlPlane") // If the AzureManagedControlPlane doesn't have our finalizer, add it. controllerutil.AddFinalizer(scope.ControlPlane, infrav1.ClusterFinalizer) @@ -215,7 +213,7 @@ func (amcpr *AzureManagedControlPlaneReconciler) reconcileNormal(ctx context.Con if err := newAzureManagedControlPlaneReconciler(scope).Reconcile(ctx); err != nil { // Handle transient and terminal errors - log := scope.WithValues("name", scope.ControlPlane.Name, "namespace", scope.ControlPlane.Namespace) + log := log.WithValues("name", scope.ControlPlane.Name, "namespace", scope.ControlPlane.Namespace) var reconcileError azure.ReconcileError if errors.As(err, &reconcileError) { if reconcileError.IsTerminal() { @@ -242,10 +240,10 @@ func (amcpr *AzureManagedControlPlaneReconciler) reconcileNormal(ctx context.Con } func (amcpr *AzureManagedControlPlaneReconciler) reconcileDelete(ctx context.Context, scope *scope.ManagedControlPlaneScope) (reconcile.Result, error) { - ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.AzureManagedControlPlaneReconciler.reconcileDelete") + ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureManagedControlPlaneReconciler.reconcileDelete") defer done() - scope.Logger.Info("Reconciling AzureManagedControlPlane delete") + log.Info("Reconciling AzureManagedControlPlane delete") if err := newAzureManagedControlPlaneReconciler(scope).Delete(ctx); err != nil { return reconcile.Result{}, errors.Wrapf(err, "error deleting AzureManagedControlPlane %s/%s", scope.ControlPlane.Namespace, scope.ControlPlane.Name) diff --git a/exp/controllers/azuremanagedmachinepool_controller.go b/exp/controllers/azuremanagedmachinepool_controller.go index 5e6cb1c6d40..a37cbbfadb4 100644 --- a/exp/controllers/azuremanagedmachinepool_controller.go +++ b/exp/controllers/azuremanagedmachinepool_controller.go @@ -20,11 +20,18 @@ import ( "context" "time" - "github.com/go-logr/logr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/tools/record" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-azure/azure" + "sigs.k8s.io/cluster-api-provider-azure/azure/scope" + infracontroller "sigs.k8s.io/cluster-api-provider-azure/controllers" + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-azure/pkg/coalescing" + "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" + "sigs.k8s.io/cluster-api-provider-azure/util/tele" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/util" @@ -36,21 +43,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" - "sigs.k8s.io/cluster-api-provider-azure/azure" - "sigs.k8s.io/cluster-api-provider-azure/azure/scope" - infracontroller "sigs.k8s.io/cluster-api-provider-azure/controllers" - infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" - "sigs.k8s.io/cluster-api-provider-azure/pkg/coalescing" - "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" - "sigs.k8s.io/cluster-api-provider-azure/util/tele" ) // AzureManagedMachinePoolReconciler reconciles an AzureManagedMachinePool object. type AzureManagedMachinePoolReconciler struct { client.Client - Log logr.Logger Recorder record.EventRecorder ReconcileTimeout time.Duration WatchFilterValue string @@ -60,10 +57,9 @@ type AzureManagedMachinePoolReconciler struct { type azureManagedMachinePoolServiceCreator func(managedControlPlaneScope *scope.ManagedControlPlaneScope) (*azureManagedMachinePoolService, error) // NewAzureManagedMachinePoolReconciler returns a new AzureManagedMachinePoolReconciler instance. -func NewAzureManagedMachinePoolReconciler(client client.Client, log logr.Logger, recorder record.EventRecorder, reconcileTimeout time.Duration, watchFilterValue string) *AzureManagedMachinePoolReconciler { +func NewAzureManagedMachinePoolReconciler(client client.Client, recorder record.EventRecorder, reconcileTimeout time.Duration, watchFilterValue string) *AzureManagedMachinePoolReconciler { ampr := &AzureManagedMachinePoolReconciler{ Client: client, - Log: log, Recorder: recorder, ReconcileTimeout: reconcileTimeout, WatchFilterValue: watchFilterValue, @@ -76,10 +72,12 @@ func NewAzureManagedMachinePoolReconciler(client client.Client, log logr.Logger, // SetupWithManager initializes this controller with a manager. func (ammpr *AzureManagedMachinePoolReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options infracontroller.Options) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.AzureManagedMachinePoolReconciler.SetupWithManager") + ctx, log, done := tele.StartSpanWithLogger(ctx, + "controllers.AzureManagedMachinePoolReconciler.SetupWithManager", + tele.KVP("controller", "AzureManagedMachinePool"), + ) defer done() - log := ammpr.Log.WithValues("controller", "AzureManagedMachinePool") var r reconcile.Reconciler = ammpr if options.Cache != nil { r = coalescing.NewReconciler(ammpr, options.Cache, log) @@ -95,11 +93,11 @@ func (ammpr *AzureManagedMachinePoolReconciler) SetupWithManager(ctx context.Con c, err := ctrl.NewControllerManagedBy(mgr). WithOptions(options.Options). For(azManagedMachinePool). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), ammpr.WatchFilterValue)). + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(log, ammpr.WatchFilterValue)). // watch for changes in CAPI MachinePool resources Watches( &source.Kind{Type: &clusterv1exp.MachinePool{}}, - handler.EnqueueRequestsFromMapFunc(MachinePoolToInfrastructureMapFunc(clusterv1exp.GroupVersion.WithKind("AzureManagedMachinePool"), ctrl.LoggerFrom(ctx))), + handler.EnqueueRequestsFromMapFunc(MachinePoolToInfrastructureMapFunc(clusterv1exp.GroupVersion.WithKind("AzureManagedMachinePool"), log)), ). // watch for changes in AzureManagedControlPlanes Watches( @@ -116,7 +114,7 @@ func (ammpr *AzureManagedMachinePoolReconciler) SetupWithManager(ctx context.Con &source.Kind{Type: &clusterv1.Cluster{}}, handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(infrav1exp.GroupVersion.WithKind("AzureManagedMachinePool"))), predicates.ClusterUnpausedAndInfrastructureReady(log), - predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), ammpr.WatchFilterValue), + predicates.ResourceNotPausedAndHasFilterLabel(log, ammpr.WatchFilterValue), ); err != nil { return errors.Wrap(err, "failed adding a watch for ready clusters") } @@ -133,9 +131,8 @@ func (ammpr *AzureManagedMachinePoolReconciler) SetupWithManager(ctx context.Con func (ammpr *AzureManagedMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultedLoopTimeout(ammpr.ReconcileTimeout)) defer cancel() - log := ammpr.Log.WithValues("namespace", req.Namespace, "azureManagedMachinePool", req.Name) - ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.AzureManagedMachinePoolReconciler.Reconcile", + ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureManagedMachinePoolReconciler.Reconcile", tele.KVP("namespace", req.Namespace), tele.KVP("name", req.Name), tele.KVP("kind", "AzureManagedMachinePool"), @@ -201,7 +198,6 @@ func (ammpr *AzureManagedMachinePoolReconciler) Reconcile(ctx context.Context, r // Create the scope. mcpScope, err := scope.NewManagedControlPlaneScope(ctx, scope.ManagedControlPlaneScopeParams{ Client: ammpr.Client, - Logger: log, ControlPlane: controlPlane, Cluster: ownerCluster, MachinePool: ownerPool, @@ -229,10 +225,10 @@ func (ammpr *AzureManagedMachinePoolReconciler) Reconcile(ctx context.Context, r } func (ammpr *AzureManagedMachinePoolReconciler) reconcileNormal(ctx context.Context, scope *scope.ManagedControlPlaneScope) (reconcile.Result, error) { - ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.AzureManagedMachinePoolReconciler.reconcileNormal") + ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureManagedMachinePoolReconciler.reconcileNormal") defer done() - scope.Logger.Info("Reconciling AzureManagedMachinePool") + log.Info("Reconciling AzureManagedMachinePool") // If the AzureManagedMachinePool doesn't have our finalizer, add it. controllerutil.AddFinalizer(scope.InfraMachinePool, infrav1.ClusterFinalizer) @@ -248,7 +244,7 @@ func (ammpr *AzureManagedMachinePoolReconciler) reconcileNormal(ctx context.Cont if err := svc.Reconcile(ctx); err != nil { // Handle transient and terminal errors - log := scope.WithValues("name", scope.InfraMachinePool.Name, "namespace", scope.InfraMachinePool.Namespace) + log := log.WithValues("name", scope.InfraMachinePool.Name, "namespace", scope.InfraMachinePool.Namespace) var reconcileError azure.ReconcileError if errors.As(err, &reconcileError) { if reconcileError.IsTerminal() { @@ -274,10 +270,10 @@ func (ammpr *AzureManagedMachinePoolReconciler) reconcileNormal(ctx context.Cont } func (ammpr *AzureManagedMachinePoolReconciler) reconcileDelete(ctx context.Context, scope *scope.ManagedControlPlaneScope) (reconcile.Result, error) { - ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.AzureManagedMachinePoolReconciler.reconcileDelete") + ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureManagedMachinePoolReconciler.reconcileDelete") defer done() - scope.Logger.Info("Reconciling AzureManagedMachinePool delete") + log.Info("Reconciling AzureManagedMachinePool delete") if !scope.Cluster.DeletionTimestamp.IsZero() { // Cluster was deleted, skip machine pool deletion and let AKS delete the whole cluster. diff --git a/exp/controllers/azuremanagedmachinepool_reconciler.go b/exp/controllers/azuremanagedmachinepool_reconciler.go index 75f91c2fc50..048f337fe85 100644 --- a/exp/controllers/azuremanagedmachinepool_reconciler.go +++ b/exp/controllers/azuremanagedmachinepool_reconciler.go @@ -93,10 +93,10 @@ func newAzureManagedMachinePoolService(scope *scope.ManagedControlPlaneScope) (* // Reconcile reconciles all the services in a predetermined order. func (s *azureManagedMachinePoolService) Reconcile(ctx context.Context) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.azureManagedMachinePoolService.Reconcile") + ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.azureManagedMachinePoolService.Reconcile") defer done() - s.scope.Info("reconciling machine pool") + log.Info("reconciling managed machine pool") agentPoolName := s.scope.AgentPoolSpec().Name if err := s.agentPoolsSvc.Reconcile(ctx); err != nil { @@ -136,7 +136,7 @@ func (s *azureManagedMachinePoolService) Reconcile(ctx context.Context) error { s.scope.SetAgentPoolReplicas(int32(len(providerIDs))) s.scope.SetAgentPoolReady(true) - s.scope.Info("reconciled machine pool successfully") + log.Info("reconciled managed machine pool successfully") return nil } diff --git a/exp/controllers/common_controller_test.go b/exp/controllers/common_controller_test.go deleted file mode 100644 index 661d77d65bc..00000000000 --- a/exp/controllers/common_controller_test.go +++ /dev/null @@ -1,109 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "context" - "time" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - - infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" - "sigs.k8s.io/cluster-api-provider-azure/internal/test" - "sigs.k8s.io/cluster-api-provider-azure/internal/test/logentries" - "sigs.k8s.io/cluster-api-provider-azure/internal/test/record" -) - -var ( - clusterControllers = []string{ - "AzureManagedCluster", - } - - infraControllers = []string{ - "AzureMachinePool", - "AzureManagedControlPlane", - "AzureManagedMachinePool", - } -) - -var _ = Describe("CommonReconcilerBehaviors", func() { - BeforeEach(func() {}) - AfterEach(func() {}) - - It("should trigger reconciliation if cluster is unpaused", func() { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - logListener := record.NewListener(testEnv.LogRecorder) - del := logListener.Listen() - defer del() - - clusterName := test.RandomName("foo", 10) - azManagedClusterName := test.RandomName("foo", 10) - cluster := &clusterv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: clusterName, - Namespace: "default", - }, - Spec: clusterv1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{ - Name: azManagedClusterName, - Namespace: "default", - Kind: "AzureManagedCluster", - APIVersion: infrav1exp.GroupVersion.Identifier(), - }, - }, - } - Expect(testEnv.Create(ctx, cluster)).To(Succeed()) - defer func() { - err := testEnv.Delete(ctx, cluster) - Expect(err).NotTo(HaveOccurred()) - }() - - cluster.Status.InfrastructureReady = true - Expect(testEnv.Status().Update(ctx, cluster)).To(Succeed()) - ec := logentries.EntryCriteria{ - ClusterName: cluster.Name, - ClusterNamespace: cluster.Namespace, - InfraControllers: infraControllers, - ClusterControllers: clusterControllers, - } - logNotPausedEntries := logentries.GenerateCreateNotPausedLogEntries(ec) - // check to make sure the cluster has reconciled and is not in paused state - Eventually(logListener.GetEntries, test.DefaultEventualTimeout, 1*time.Second).Should(ContainElements(logNotPausedEntries)) - - // we have tried to reconcile, and cluster was not paused - // now, we will pause the cluster and we should trigger a watch event - cluster.Spec.Paused = true - Expect(testEnv.Update(ctx, cluster)).To(Succeed()) - logPausedEntries := logentries.GenerateUpdatePausedClusterLogEntries(ec) - // check to make sure the cluster has reconciled and is paused - Eventually(logListener.GetEntries, test.DefaultEventualTimeout, 1*time.Second).Should(ContainElements(logPausedEntries)) - - // cluster was paused with an update - // now, we will unpause the cluster and we should trigger an unpause watch event for all controllers - cluster.Spec.Paused = false - Expect(testEnv.Update(ctx, cluster)).To(Succeed()) - logUnpausedEntries := logentries.GenerateUpdateUnpausedClusterLogEntries(ec) - Eventually(logListener.GetEntries, test.DefaultEventualTimeout, 1*time.Second).Should(ContainElements(logUnpausedEntries)) - }) - -}) diff --git a/exp/controllers/suite_test.go b/exp/controllers/suite_test.go index 48d0aa0100d..d253c4ecedd 100644 --- a/exp/controllers/suite_test.go +++ b/exp/controllers/suite_test.go @@ -27,6 +27,7 @@ import ( "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/cluster-api-provider-azure/internal/test/env" ) @@ -50,26 +51,29 @@ var _ = BeforeSuite(func(done Done) { By("bootstrapping test environment") testEnv = env.NewTestEnvironment() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ctx = log.IntoContext(ctx, testEnv.Log) + Expect((&AzureManagedClusterReconciler{ Client: testEnv, - Log: testEnv.Log, Recorder: testEnv.GetEventRecorderFor("azuremanagedcluster-reconciler"), - }).SetupWithManager(context.Background(), testEnv.Manager, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: 1}})).To(Succeed()) + }).SetupWithManager(ctx, testEnv.Manager, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: 1}})).To(Succeed()) Expect((&AzureManagedControlPlaneReconciler{ Client: testEnv, - Log: testEnv.Log, Recorder: testEnv.GetEventRecorderFor("azuremanagedcontrolplane-reconciler"), - }).SetupWithManager(context.Background(), testEnv.Manager, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: 1}})).To(Succeed()) + }).SetupWithManager(ctx, testEnv.Manager, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: 1}})).To(Succeed()) - Expect(NewAzureManagedMachinePoolReconciler(testEnv, testEnv.Log, testEnv.GetEventRecorderFor("azuremanagedmachinepool-reconciler"), - reconciler.DefaultLoopTimeout, "").SetupWithManager(context.Background(), testEnv.Manager, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: 1}})).To(Succeed()) + Expect(NewAzureManagedMachinePoolReconciler(testEnv, testEnv.GetEventRecorderFor("azuremanagedmachinepool-reconciler"), + reconciler.DefaultLoopTimeout, "").SetupWithManager(ctx, testEnv.Manager, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: 1}})).To(Succeed()) - Expect(NewAzureMachinePoolReconciler(testEnv, testEnv.Log, testEnv.GetEventRecorderFor("azuremachinepool-reconciler"), - reconciler.DefaultLoopTimeout, "").SetupWithManager(context.Background(), testEnv.Manager, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: 1}})).To(Succeed()) + Expect(NewAzureMachinePoolReconciler(testEnv, testEnv.GetEventRecorderFor("azuremachinepool-reconciler"), + reconciler.DefaultLoopTimeout, "").SetupWithManager(ctx, testEnv.Manager, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: 1}})).To(Succeed()) - Expect(NewAzureMachinePoolMachineController(testEnv, testEnv.Log, testEnv.GetEventRecorderFor("azuremachinepoolmachine-reconciler"), - reconciler.DefaultLoopTimeout, "").SetupWithManager(context.Background(), testEnv.Manager, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: 1}})).To(Succeed()) + Expect(NewAzureMachinePoolMachineController(testEnv, testEnv.GetEventRecorderFor("azuremachinepoolmachine-reconciler"), + reconciler.DefaultLoopTimeout, "").SetupWithManager(ctx, testEnv.Manager, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: 1}})).To(Succeed()) // +kubebuilder:scaffold:scheme @@ -81,7 +85,7 @@ var _ = BeforeSuite(func(done Done) { Eventually(func() bool { nodes := &corev1.NodeList{} - if err := testEnv.Client.List(context.Background(), nodes); err != nil { + if err := testEnv.Client.List(ctx, nodes); err != nil { return false } return true diff --git a/internal/test/logentries/pause_events.go b/internal/test/logentries/pause_events.go deleted file mode 100644 index 7bfad4d8053..00000000000 --- a/internal/test/logentries/pause_events.go +++ /dev/null @@ -1,165 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package logentries - -import ( - "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomega" -) - -type ( - EntryCriteria struct { - ClusterNamespace string - ClusterName string - InfraControllers []string - ClusterControllers []string - } -) - -func GenerateCreateNotPausedLogEntries(ec EntryCriteria) []gomega.LogMatcher { - infraEntries := make([]gomega.LogMatcher, len(ec.InfraControllers)) - for i, c := range ec.InfraControllers { - c := c - infraEntries[i] = gomega.LogContains( - "controller", - c, - "predicate", - "ClusterUnpausedAndInfrastructureReady", - "predicate", - "ClusterCreateNotPaused", - "eventType", - "create", - "namespace", - ec.ClusterNamespace, - "cluster", - ec.ClusterName, - "msg", - "Cluster is not paused, allowing further processing", - ).WithLevel(6).WithLogFunc("Info") - } - - clusterEntries := make([]gomega.LogMatcher, len(ec.ClusterControllers)) - for i, c := range ec.ClusterControllers { - c := c - clusterEntries[i] = gomega.LogContains( - "controller", - c, - "predicate", - "ClusterUnpaused", - "predicate", - "ClusterCreateNotPaused", - "eventType", - "create", - "namespace", - ec.ClusterNamespace, - "cluster", - ec.ClusterName, - "msg", - "Cluster is not paused, allowing further processing", - ).WithLevel(6).WithLogFunc("Info") - } - return append(clusterEntries, infraEntries...) -} - -func GenerateUpdatePausedClusterLogEntries(ec EntryCriteria) []gomega.LogMatcher { - infraEntries := make([]gomega.LogMatcher, len(ec.InfraControllers)) - for i, c := range ec.InfraControllers { - c := c - infraEntries[i] = gomega.LogContains( - "controller", - c, - "predicate", - "ClusterUnpausedAndInfrastructureReady", - "predicate", - "ClusterUpdateUnpaused", - "eventType", - "update", - "namespace", - ec.ClusterNamespace, - "cluster", - ec.ClusterName, - "msg", - "Cluster was not unpaused, blocking further processing", - ).WithLevel(6).WithLogFunc("Info") - } - - clusterEntries := make([]gomega.LogMatcher, len(ec.ClusterControllers)) - for i, c := range ec.ClusterControllers { - c := c - clusterEntries[i] = gomega.LogContains( - "controller", - c, - "predicate", - "ClusterUnpaused", - "predicate", - "ClusterUpdateUnpaused", - "eventType", - "update", - "namespace", - ec.ClusterNamespace, - "cluster", - ec.ClusterName, - "msg", - "Cluster was not unpaused, blocking further processing", - ).WithLevel(6).WithLogFunc("Info") - } - return append(clusterEntries, infraEntries...) -} - -func GenerateUpdateUnpausedClusterLogEntries(ec EntryCriteria) []gomega.LogMatcher { - infraEntries := make([]gomega.LogMatcher, len(ec.InfraControllers)) - for i, c := range ec.InfraControllers { - c := c - infraEntries[i] = gomega.LogContains( - "controller", - c, - "predicate", - "ClusterUnpausedAndInfrastructureReady", - "predicate", - "ClusterUpdateUnpaused", - "eventType", - "update", - "namespace", - ec.ClusterNamespace, - "cluster", - ec.ClusterName, - "msg", - "Cluster was unpaused, allowing further processing", - ).WithLevel(4).WithLogFunc("Info") - } - - clusterEntries := make([]gomega.LogMatcher, len(ec.ClusterControllers)) - for i, c := range ec.ClusterControllers { - c := c - clusterEntries[i] = gomega.LogContains( - "controller", - c, - "predicate", - "ClusterUnpaused", - "predicate", - "ClusterUpdateUnpaused", - "eventType", - "update", - "namespace", - ec.ClusterNamespace, - "cluster", - ec.ClusterName, - "msg", - "Cluster was unpaused, allowing further processing", - ).WithLevel(4).WithLogFunc("Info") - } - return append(clusterEntries, infraEntries...) -} diff --git a/internal/test/test.go b/internal/test/test.go index 0e518da910d..e32a80828aa 100644 --- a/internal/test/test.go +++ b/internal/test/test.go @@ -18,15 +18,10 @@ package test import ( "fmt" - "time" "k8s.io/apimachinery/pkg/util/rand" ) -const ( - DefaultEventualTimeout = 20 * time.Second -) - // RandomName will generate a random name "{prefix}-{rand(len)}". func RandomName(prefix string, len int) string { return fmt.Sprintf("%s-%s", prefix, rand.String(len)) diff --git a/main.go b/main.go index 6fefa5331fb..2559a4ddd7d 100644 --- a/main.go +++ b/main.go @@ -27,6 +27,7 @@ import ( // +kubebuilder:scaffold:imports + aadpodv1 "github.com/Azure/aad-pod-identity/pkg/apis/aadpodidentity/v1" "github.com/spf13/pflag" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -36,7 +37,6 @@ import ( cgrecord "k8s.io/client-go/tools/record" "k8s.io/klog/v2" "k8s.io/klog/v2/klogr" - "sigs.k8s.io/cluster-api-provider-azure/pkg/coalescing" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" clusterv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha4" @@ -47,8 +47,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/manager" - aadpodv1 "github.com/Azure/aad-pod-identity/pkg/apis/aadpodidentity/v1" - infrav1alpha3 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha3" infrav1alpha4 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha4" infrav1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" @@ -58,6 +56,7 @@ import ( infrav1beta1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" infrav1controllersexp "sigs.k8s.io/cluster-api-provider-azure/exp/controllers" "sigs.k8s.io/cluster-api-provider-azure/feature" + "sigs.k8s.io/cluster-api-provider-azure/pkg/coalescing" "sigs.k8s.io/cluster-api-provider-azure/pkg/ot" "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" "sigs.k8s.io/cluster-api-provider-azure/util/webhook" @@ -252,6 +251,8 @@ func main() { pflag.CommandLine.AddGoFlagSet(flag.CommandLine) pflag.Parse() + ctrl.SetLogger(klogr.New()) + if watchNamespace != "" { setupLog.Info("Watching cluster-api objects only in namespace for reconciliation", "namespace", watchNamespace) } @@ -263,8 +264,6 @@ func main() { }() } - ctrl.SetLogger(klogr.New()) - // Machine and cluster operations can create enough events to trigger the event recorder spam filter // Setting the burst size higher ensures all events will be recorded and submitted to the API broadcaster := cgrecord.NewBroadcasterWithCorrelatorOptions(cgrecord.CorrelatorOptions{ @@ -329,7 +328,7 @@ func registerControllers(ctx context.Context, mgr manager.Manager) { if err != nil { setupLog.Error(err, "failed to build machineCache ReconcileCache") } - if err := controllers.NewAzureMachineReconciler(mgr.GetClient(), ctrl.Log.WithName("controllers").WithName("AzureMachine"), + if err := controllers.NewAzureMachineReconciler(mgr.GetClient(), mgr.GetEventRecorderFor("azuremachine-reconciler"), reconcileTimeout, watchFilterValue, @@ -344,7 +343,6 @@ func registerControllers(ctx context.Context, mgr manager.Manager) { } if err := controllers.NewAzureClusterReconciler( mgr.GetClient(), - ctrl.Log.WithName("controllers").WithName("AzureCluster"), mgr.GetEventRecorderFor("azurecluster-reconciler"), reconcileTimeout, watchFilterValue, @@ -355,7 +353,6 @@ func registerControllers(ctx context.Context, mgr manager.Manager) { if err := (&controllers.AzureJSONTemplateReconciler{ Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controllers").WithName("AzureJSONTemplate"), Recorder: mgr.GetEventRecorderFor("azurejsontemplate-reconciler"), ReconcileTimeout: reconcileTimeout, WatchFilterValue: watchFilterValue, @@ -366,7 +363,6 @@ func registerControllers(ctx context.Context, mgr manager.Manager) { if err := (&controllers.AzureJSONMachineReconciler{ Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controllers").WithName("AzureJSONMachine"), Recorder: mgr.GetEventRecorderFor("azurejsonmachine-reconciler"), ReconcileTimeout: reconcileTimeout, WatchFilterValue: watchFilterValue, @@ -377,7 +373,6 @@ func registerControllers(ctx context.Context, mgr manager.Manager) { if err := (&controllers.AzureIdentityReconciler{ Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controllers").WithName("AzureIdentity"), Recorder: mgr.GetEventRecorderFor("azureidentity-reconciler"), ReconcileTimeout: reconcileTimeout, WatchFilterValue: watchFilterValue, @@ -396,7 +391,6 @@ func registerControllers(ctx context.Context, mgr manager.Manager) { if err := infrav1controllersexp.NewAzureMachinePoolReconciler( mgr.GetClient(), - ctrl.Log.WithName("controllers").WithName("AzureMachinePool"), mgr.GetEventRecorderFor("azuremachinepool-reconciler"), reconcileTimeout, watchFilterValue, @@ -412,7 +406,6 @@ func registerControllers(ctx context.Context, mgr manager.Manager) { if err := infrav1controllersexp.NewAzureMachinePoolMachineController( mgr.GetClient(), - ctrl.Log.WithName("controllers").WithName("AzureMachinePoolMachine"), mgr.GetEventRecorderFor("azuremachinepoolmachine-reconciler"), reconcileTimeout, watchFilterValue, @@ -423,7 +416,6 @@ func registerControllers(ctx context.Context, mgr manager.Manager) { if err := (&controllers.AzureJSONMachinePoolReconciler{ Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controllers").WithName("AzureJSONMachinePool"), Recorder: mgr.GetEventRecorderFor("azurejsonmachinepool-reconciler"), ReconcileTimeout: reconcileTimeout, WatchFilterValue: watchFilterValue, @@ -440,7 +432,6 @@ func registerControllers(ctx context.Context, mgr manager.Manager) { if err := infrav1controllersexp.NewAzureManagedMachinePoolReconciler( mgr.GetClient(), - ctrl.Log.WithName("controllers").WithName("AzureManagedMachinePool"), mgr.GetEventRecorderFor("azuremanagedmachinepoolmachine-reconciler"), reconcileTimeout, watchFilterValue, @@ -456,7 +447,6 @@ func registerControllers(ctx context.Context, mgr manager.Manager) { if err := (&infrav1controllersexp.AzureManagedClusterReconciler{ Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controllers").WithName("AzureManagedCluster"), Recorder: mgr.GetEventRecorderFor("azuremanagedcluster-reconciler"), ReconcileTimeout: reconcileTimeout, WatchFilterValue: watchFilterValue, @@ -472,7 +462,6 @@ func registerControllers(ctx context.Context, mgr manager.Manager) { if err := (&infrav1controllersexp.AzureManagedControlPlaneReconciler{ Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controllers").WithName("AzureManagedControlPlane"), Recorder: mgr.GetEventRecorderFor("azuremanagedcontrolplane-reconciler"), ReconcileTimeout: reconcileTimeout, WatchFilterValue: watchFilterValue, diff --git a/util/tele/composite_logger.go b/util/tele/composite_logger.go index 07bd885f4ea..b7cb5667632 100644 --- a/util/tele/composite_logger.go +++ b/util/tele/composite_logger.go @@ -35,6 +35,10 @@ func (c *compositeLogger) Enabled() bool { func (c *compositeLogger) iter(fn func(l logr.Logger)) { for _, l := range c.loggers { + // the callDepthLogger interface allows us to change the depth of the stack so that we can get the real + // line where the log statement was called. We need to do this because the composite logger adds to the + // call stack due to wrapping the internal logger. + l = logr.WithCallDepth(l, 3) fn(l) } } @@ -52,19 +56,34 @@ func (c *compositeLogger) Error(err error, msg string, keysAndValues ...interfac } func (c *compositeLogger) V(level int) logr.Logger { - return c + var loggers = make([]logr.Logger, len(c.loggers)) + for i, l := range c.loggers { + loggers[i] = l.V(level) + } + + return &compositeLogger{ + loggers: loggers, + } } func (c *compositeLogger) WithValues(keysAndValues ...interface{}) logr.Logger { + var loggers = make([]logr.Logger, len(c.loggers)) for i, l := range c.loggers { - c.loggers[i] = l.WithValues(keysAndValues...) + loggers[i] = l.WithValues(keysAndValues...) + } + + return &compositeLogger{ + loggers: loggers, } - return c } func (c *compositeLogger) WithName(name string) logr.Logger { + var loggers = make([]logr.Logger, len(c.loggers)) for i, l := range c.loggers { - c.loggers[i] = l.WithName(name) + loggers[i] = l.WithName(name) + } + + return &compositeLogger{ + loggers: loggers, } - return c } diff --git a/util/tele/span_logger.go b/util/tele/span_logger.go index 18d6ef13916..ed843923521 100644 --- a/util/tele/span_logger.go +++ b/util/tele/span_logger.go @@ -44,7 +44,7 @@ func (s *spanLogger) Enabled() bool { } func (s *spanLogger) kvsToAttrs(keysAndValues ...interface{}) []attribute.KeyValue { - ret := []attribute.KeyValue{} + var ret []attribute.KeyValue for i := 0; i < len(keysAndValues); i += 2 { kv1 := fmt.Sprintf("%s", keysAndValues[i]) kv2 := fmt.Sprintf("%s", keysAndValues[i+1]) @@ -158,7 +158,13 @@ func StartSpanWithLogger( endFn := func() { span.End() } - lggr := log.FromContext(ctx).WithName(spanName) + + kvs := make([]interface{}, 0, 2*len(cfg.KVPs)) + for k, v := range cfg.KVPs { + kvs = append(kvs, k, v) + } + + lggr := log.FromContext(ctx, kvs...).WithName(spanName) return ctx, &compositeLogger{ loggers: []logr.Logger{ corrIDLogger(ctx, lggr),