diff --git a/cloud/scope/cluster.go b/cloud/scope/cluster.go index 7fbe688..312f41c 100644 --- a/cloud/scope/cluster.go +++ b/cloud/scope/cluster.go @@ -30,11 +30,11 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/klog/v2/klogr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log/zap" ) // ClusterScopeParams defines the input parameters used to create a new Scope. @@ -58,7 +58,7 @@ func NewClusterScope(params ClusterScopeParams) (*ClusterScope, error) { } if params.Logger == nil { - log := klogr.New() + log := zap.New(zap.UseDevMode(true)) params.Logger = &log } @@ -84,7 +84,7 @@ func NewClusterScope(params ClusterScopeParams) (*ClusterScope, error) { Context: scopeContext, } - authorizer, err := azhciauth.ReconcileAzureStackHCIAccess(scope.Context, scope.Client, agentFqdn) + authorizer, err := azhciauth.ReconcileAzureStackHCIAccess(*params.Logger, scope.Context, scope.Client, agentFqdn) if err != nil { return nil, errors.Wrap(err, "error creating azurestackhci services. can not authenticate to azurestackhci") } diff --git a/cloud/scope/loadbalancer.go b/cloud/scope/loadbalancer.go index 14a7ac9..934c27b 100644 --- a/cloud/scope/loadbalancer.go +++ b/cloud/scope/loadbalancer.go @@ -24,12 +24,12 @@ import ( infrav1 "github.com/microsoft/cluster-api-provider-azurestackhci/api/v1beta1" "github.com/microsoft/moc/pkg/diagnostics" "github.com/pkg/errors" - "k8s.io/klog/v2/klogr" "k8s.io/utils/pointer" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" capierrors "sigs.k8s.io/cluster-api/errors" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log/zap" ) // LoadBalancerScopeParams defines the input parameters used to create a new LoadBalancerScope. @@ -53,7 +53,7 @@ func NewLoadBalancerScope(params LoadBalancerScopeParams) (*LoadBalancerScope, e } if params.Logger == nil { - log := klogr.New() + log := zap.New(zap.UseDevMode(true)) params.Logger = &log } diff --git a/cloud/scope/machine.go b/cloud/scope/machine.go index eb43716..a468a70 100644 --- a/cloud/scope/machine.go +++ b/cloud/scope/machine.go @@ -26,7 +26,6 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/klog/v2/klogr" "k8s.io/utils/pointer" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/noderefutil" @@ -35,6 +34,7 @@ import ( "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log/zap" ) // MachineScopeParams defines the input parameters used to create a new MachineScope. @@ -68,7 +68,7 @@ func NewMachineScope(params MachineScopeParams) (*MachineScope, error) { } if params.Logger == nil { - log := klogr.New() + log := zap.New(zap.UseDevMode(true)) params.Logger = &log } diff --git a/cloud/scope/virtualmachine.go b/cloud/scope/virtualmachine.go index 6d5bffe..6ff8456 100644 --- a/cloud/scope/virtualmachine.go +++ b/cloud/scope/virtualmachine.go @@ -28,13 +28,13 @@ import ( "github.com/microsoft/moc/pkg/auth" "github.com/microsoft/moc/pkg/diagnostics" "github.com/pkg/errors" - "k8s.io/klog/v2/klogr" "k8s.io/utils/pointer" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" capierrors "sigs.k8s.io/cluster-api/errors" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log/zap" ) // MachineScopeParams defines the input parameters used to create a new VirtualMachineScope. @@ -57,7 +57,7 @@ func NewVirtualMachineScope(params VirtualMachineScopeParams) (*VirtualMachineSc } if params.Logger == nil { - log := klogr.New() + log := zap.New(zap.UseDevMode(true)) params.Logger = &log } @@ -67,7 +67,8 @@ func NewVirtualMachineScope(params VirtualMachineScopeParams) (*VirtualMachineSc } params.AzureStackHCIClients.CloudAgentFqdn = agentFqdn - authorizer, err := azhciauth.ReconcileAzureStackHCIAccess(context.Background(), params.Client, agentFqdn) + scopeContext := diagnostics.NewContextWithCorrelationId(context.Background(), params.AzureStackHCIVirtualMachine.GetAnnotations()[infrav1.AzureCorrelationIDAnnotationKey]) + authorizer, err := azhciauth.ReconcileAzureStackHCIAccess(*params.Logger, scopeContext, params.Client, agentFqdn) if err != nil { return nil, errors.Wrap(err, "failed to create azurestackhci session") } @@ -77,7 +78,6 @@ func NewVirtualMachineScope(params VirtualMachineScopeParams) (*VirtualMachineSc if err != nil { return nil, errors.Wrap(err, "failed to init patch helper") } - scopeContext := diagnostics.NewContextWithCorrelationId(context.Background(), params.AzureStackHCIVirtualMachine.GetAnnotations()[infrav1.AzureCorrelationIDAnnotationKey]) return &VirtualMachineScope{ client: params.Client, AzureStackHCIVirtualMachine: params.AzureStackHCIVirtualMachine, diff --git a/cloud/telemetry/logutils.go b/cloud/telemetry/logutils.go index a174791..73cc2fc 100644 --- a/cloud/telemetry/logutils.go +++ b/cloud/telemetry/logutils.go @@ -12,6 +12,7 @@ import ( "github.com/microsoft/cluster-api-provider-azurestackhci/cloud/services/health" "github.com/microsoft/cluster-api-provider-azurestackhci/cloud/services/versions" mocerrors "github.com/microsoft/moc/pkg/errors" + ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) type MocResourceType string @@ -28,12 +29,21 @@ const ( Group MocResourceType = "Group" ) -type MocOperation string +type ResourceType string const ( - CreateOrUpdate MocOperation = "CreateOrUpdate" - Delete MocOperation = "Delete" - Get MocOperation = "Get" + CRD ResourceType = "CRD" + Status ResourceType = "Status" +) + +type Operation string + +const ( + Create Operation = "Create" + CreateOrUpdate Operation = "CreateOrUpdate" + Update Operation = "Update" + Delete Operation = "Delete" + Get Operation = "Get" ) type OperationLog struct { @@ -47,7 +57,7 @@ type OperationLog struct { Message string `json:"msg"` } -func WriteMocOperationLog(logger logr.Logger, operation MocOperation, crResourceName string, mocResourceType MocResourceType, mocResourceName string, params interface{}, err error) { +func WriteMocOperationLog(logger logr.Logger, operation Operation, crResourceName string, mocResourceType MocResourceType, mocResourceName string, params interface{}, err error) { errcode := "0" message := "" if err != nil { @@ -66,9 +76,42 @@ func WriteMocOperationLog(logger logr.Logger, operation MocOperation, crResource Message: message, } - jsonData, err := json.Marshal(oplog) + jsonData, serializeError := json.Marshal(oplog) + if serializeError != nil { + logger.Error(serializeError, "Unable to serialize operation log object.", "resourceName", crResourceName) + } else { + logger.Info(string(jsonData)) + } +} + +// RecordHybridAKSCRDChange need to be called when CRD changed. +func RecordHybridAKSCRDChange(logger logr.Logger, parentResource string, resource string, action Operation, resourceType ResourceType, params interface{}, err error) { + errMessage := "" + errCode := "0" if err != nil { - logger.Error(err, "Unable to serialize operation log object.", "resourceName", crResourceName) + errCode = "-1" + errMessage = err.Error() + } + + oplog := OperationLog{ + Timestamp: time.Now().Format(time.RFC3339), + ParentResource: parentResource, + Resource: resource, + FilterKeyword: "RESOURCE_ACTION", + Action: fmt.Sprintf("%s %s", action, resourceType), + Params: params, + ErrorCode: errCode, + Message: errMessage, + } + + jsonData, serializeError := json.Marshal(oplog) + if serializeError != nil { + logger.Error(serializeError, "Unable to serialize operation log object", + "timestamp", time.Now().Format(time.RFC3339), + "parent_resource", parentResource, + "resource", resource, + "filter_keyword", "RESOURCE_ACTION", + "action", action) } else { logger.Info(string(jsonData)) } @@ -132,3 +175,27 @@ func getVersionsService(scope scope.ScopeInterface) *versions.Service { versionsService = versions.NewService(scope) return versionsService } + +func IsCRDUpdate(operationResult ctrlutil.OperationResult) bool { + if operationResult == ctrlutil.OperationResultCreated || operationResult == ctrlutil.OperationResultUpdatedStatus || + operationResult == ctrlutil.OperationResultUpdatedStatusOnly { + return true + } + + return false +} + +func ConvertOperationResult(operationResult ctrlutil.OperationResult) (Operation, ResourceType) { + switch operationResult { + case ctrlutil.OperationResultCreated: + return Create, CRD + case ctrlutil.OperationResultUpdated: + return Update, CRD + case ctrlutil.OperationResultUpdatedStatus: + fallthrough + case ctrlutil.OperationResultUpdatedStatusOnly: + return Update, Status + default: + return "", "" + } +} diff --git a/controllers/azurestackhcicluster_controller.go b/controllers/azurestackhcicluster_controller.go index bd360a9..92c44a7 100644 --- a/controllers/azurestackhcicluster_controller.go +++ b/controllers/azurestackhcicluster_controller.go @@ -19,12 +19,14 @@ package controllers import ( "context" + "fmt" "time" "github.com/go-logr/logr" infrav1 "github.com/microsoft/cluster-api-provider-azurestackhci/api/v1beta1" azurestackhci "github.com/microsoft/cluster-api-provider-azurestackhci/cloud" "github.com/microsoft/cluster-api-provider-azurestackhci/cloud/scope" + "github.com/microsoft/cluster-api-provider-azurestackhci/cloud/telemetry" infrav1util "github.com/microsoft/cluster-api-provider-azurestackhci/pkg/util" mocerrors "github.com/microsoft/moc/pkg/errors" "github.com/pkg/errors" @@ -248,7 +250,16 @@ func (r *AzureStackHCIClusterReconciler) deleteOrphanedMachines(clusterScope *sc } } clusterScope.Info("Deleting Orphaned Machine", "Name", azhciMachine.Name, "AzureStackHCICluster", clusterScope.AzureStackHCICluster.Name) - if err := r.Client.Delete(clusterScope.Context, azhciMachine); err != nil { + err := r.Client.Delete(clusterScope.Context, azhciMachine) + telemetry.RecordHybridAKSCRDChange( + clusterScope.GetLogger(), + clusterScope.GetCustomResourceTypeWithName(), + fmt.Sprintf("%s/%s/%s", azhciMachine.TypeMeta.Kind, azhciMachine.ObjectMeta.Namespace, azhciMachine.ObjectMeta.Name), + telemetry.Delete, + telemetry.CRD, + nil, + err) + if err != nil { if !apierrors.IsNotFound(err) { return errors.Wrapf(err, "Failed to delete AzureStackHCIMachine %s", azhciMachine) } @@ -291,7 +302,19 @@ func (r *AzureStackHCIClusterReconciler) reconcileAzureStackHCILoadBalancer(clus return nil } - if _, err := controllerutil.CreateOrUpdate(clusterScope.Context, r.Client, azureStackHCILoadBalancer, mutateFn); err != nil { + operationResult, err := controllerutil.CreateOrUpdate(clusterScope.Context, r.Client, azureStackHCILoadBalancer, mutateFn) + if telemetry.IsCRDUpdate(operationResult) { + operation, resourceType := telemetry.ConvertOperationResult(operationResult) + telemetry.RecordHybridAKSCRDChange( + clusterScope.GetLogger(), + clusterScope.GetCustomResourceTypeWithName(), + fmt.Sprintf("%s/%s/%s", azureStackHCILoadBalancer.TypeMeta.Kind, azureStackHCILoadBalancer.ObjectMeta.Namespace, azureStackHCILoadBalancer.ObjectMeta.Name), + operation, + resourceType, + nil, + err) + } + if err != nil { if !apierrors.IsAlreadyExists(err) { conditions.MarkFalse(clusterScope.AzureStackHCICluster, infrav1.NetworkInfrastructureReadyCondition, infrav1.LoadBalancerProvisioningReason, clusterv1.ConditionSeverityWarning, err.Error()) return false, err @@ -344,7 +367,16 @@ func (r *AzureStackHCIClusterReconciler) reconcileDeleteAzureStackHCILoadBalance return errors.Wrapf(err, "Failed to update AzureStackHCILoadBalancer %s", azureStackHCILoadBalancerName) } } - if err := r.Client.Delete(clusterScope.Context, azureStackHCILoadBalancer); err != nil { + err := r.Client.Delete(clusterScope.Context, azureStackHCILoadBalancer) + telemetry.RecordHybridAKSCRDChange( + clusterScope.GetLogger(), + clusterScope.GetCustomResourceTypeWithName(), + fmt.Sprintf("%s/%s/%s", azureStackHCILoadBalancer.TypeMeta.Kind, azureStackHCILoadBalancer.ObjectMeta.Namespace, azureStackHCILoadBalancer.ObjectMeta.Name), + telemetry.Delete, + telemetry.CRD, + nil, + err) + if err != nil { if !apierrors.IsNotFound(err) { conditions.MarkFalse(clusterScope.AzureStackHCICluster, infrav1.NetworkInfrastructureReadyCondition, clusterv1.DeletionFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) return errors.Wrapf(err, "Failed to delete AzureStackHCILoadBalancer %s", azureStackHCILoadBalancerName) diff --git a/controllers/azurestackhciloadbalancer_virtualmachine.go b/controllers/azurestackhciloadbalancer_virtualmachine.go index 8c3a19a..a946c41 100644 --- a/controllers/azurestackhciloadbalancer_virtualmachine.go +++ b/controllers/azurestackhciloadbalancer_virtualmachine.go @@ -26,6 +26,7 @@ import ( infrav1 "github.com/microsoft/cluster-api-provider-azurestackhci/api/v1beta1" azurestackhci "github.com/microsoft/cluster-api-provider-azurestackhci/cloud" "github.com/microsoft/cluster-api-provider-azurestackhci/cloud/scope" + "github.com/microsoft/cluster-api-provider-azurestackhci/cloud/telemetry" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -129,7 +130,16 @@ func (r *AzureStackHCILoadBalancerReconciler) reconcileDeleteVirtualMachines(loa for _, vm := range vmList { if vm.GetDeletionTimestamp().IsZero() { - if err := r.Client.Delete(clusterScope.Context, vm); err != nil { + err := r.Client.Delete(clusterScope.Context, vm) + telemetry.RecordHybridAKSCRDChange( + clusterScope.GetLogger(), + clusterScope.GetCustomResourceTypeWithName(), + fmt.Sprintf("%s/%s/%s", vm.TypeMeta.Kind, vm.ObjectMeta.Namespace, vm.ObjectMeta.Name), + telemetry.Delete, + telemetry.CRD, + nil, + err) + if err != nil { if !apierrors.IsNotFound(err) { return errors.Wrapf(err, "failed to delete AzureStackHCIVirtualMachine %s", vm.Name) } @@ -191,8 +201,19 @@ func (r *AzureStackHCILoadBalancerReconciler) createOrUpdateVirtualMachine(loadB return nil } - - if _, err := controllerutil.CreateOrUpdate(clusterScope.Context, r.Client, vm, mutateFn); err != nil { + operationResult, err := controllerutil.CreateOrUpdate(clusterScope.Context, r.Client, vm, mutateFn) + if telemetry.IsCRDUpdate(operationResult) { + operation, resourceType := telemetry.ConvertOperationResult(operationResult) + telemetry.RecordHybridAKSCRDChange( + loadBalancerScope.GetLogger(), + clusterScope.GetCustomResourceTypeWithName(), + fmt.Sprintf("%s/%s/%s", vm.TypeMeta.Kind, vm.ObjectMeta.Namespace, vm.ObjectMeta.Name), + operation, + resourceType, + nil, + err) + } + if err != nil { return nil, err } @@ -202,7 +223,16 @@ func (r *AzureStackHCILoadBalancerReconciler) createOrUpdateVirtualMachine(loadB // deleteVirtualMachine deletes a virtual machine func (r *AzureStackHCILoadBalancerReconciler) deleteVirtualMachine(clusterScope *scope.ClusterScope, vm *infrav1.AzureStackHCIVirtualMachine) error { if vm.GetDeletionTimestamp().IsZero() { - if err := r.Client.Delete(clusterScope.Context, vm); err != nil { + err := r.Client.Delete(clusterScope.Context, vm) + telemetry.RecordHybridAKSCRDChange( + clusterScope.GetLogger(), + clusterScope.GetCustomResourceTypeWithName(), + fmt.Sprintf("%s/%s/%s", vm.TypeMeta.Kind, vm.ObjectMeta.Namespace, vm.ObjectMeta.Name), + telemetry.Delete, + telemetry.CRD, + nil, + err) + if err != nil { if !apierrors.IsNotFound(err) { return errors.Wrapf(err, "failed to delete AzureStackHCIVirtualMachine %s", vm.Name) } diff --git a/controllers/azurestackhcimachine_controller.go b/controllers/azurestackhcimachine_controller.go index f3cb71b..0a02b0d 100644 --- a/controllers/azurestackhcimachine_controller.go +++ b/controllers/azurestackhcimachine_controller.go @@ -28,6 +28,7 @@ import ( infrav1 "github.com/microsoft/cluster-api-provider-azurestackhci/api/v1beta1" azurestackhci "github.com/microsoft/cluster-api-provider-azurestackhci/cloud" "github.com/microsoft/cluster-api-provider-azurestackhci/cloud/scope" + "github.com/microsoft/cluster-api-provider-azurestackhci/cloud/telemetry" infrav1util "github.com/microsoft/cluster-api-provider-azurestackhci/pkg/util" "github.com/pkg/errors" @@ -312,7 +313,19 @@ func (r *AzureStackHCIMachineReconciler) reconcileVirtualMachineNormal(machineSc return nil } - if _, err := controllerutil.CreateOrUpdate(clusterScope.Context, r.Client, vm, mutateFn); err != nil { + operationResult, err := controllerutil.CreateOrUpdate(clusterScope.Context, r.Client, vm, mutateFn) + if telemetry.IsCRDUpdate(operationResult) { + operation, resourceType := telemetry.ConvertOperationResult(operationResult) + telemetry.RecordHybridAKSCRDChange( + machineScope.GetLogger(), + clusterScope.GetCustomResourceTypeWithName(), + fmt.Sprintf("%s/%s/%s", vm.TypeMeta.Kind, vm.ObjectMeta.Namespace, vm.ObjectMeta.Name), + operation, + resourceType, + nil, + err) + } + if err != nil { // If CreateOrUpdate throws AlreadyExists, we know that we have encountered an edge case where // Get with the cached client returned NotFound and then Create returned AlreadyExists. // @@ -339,7 +352,7 @@ func (r *AzureStackHCIMachineReconciler) reconcileVirtualMachineNormal(machineSc Name: machineScope.Name(), } - err := r.Client.Get(clusterScope.Context, key, azureStackHCIVirtualMachine) + err = r.Client.Get(clusterScope.Context, key, azureStackHCIVirtualMachine) if err != nil { return nil, err } @@ -383,7 +396,16 @@ func (r *AzureStackHCIMachineReconciler) reconcileVirtualMachineDelete(machineSc } } // is this a synchronous call? - if err := r.Client.Delete(clusterScope.Context, vm); err != nil { + err := r.Client.Delete(clusterScope.Context, vm) + telemetry.RecordHybridAKSCRDChange( + clusterScope.GetLogger(), + clusterScope.GetCustomResourceTypeWithName(), + fmt.Sprintf("%s/%s/%s", vm.TypeMeta.Kind, vm.ObjectMeta.Namespace, vm.ObjectMeta.Name), + telemetry.Delete, + telemetry.CRD, + nil, + err) + if err != nil { if !apierrors.IsNotFound(err) { return errors.Wrapf(err, "failed to delete AzureStackHCIVirtualMachine %s", vmName) } diff --git a/pkg/auth/auth.go b/pkg/auth/auth.go index b5a75fa..dfdc59f 100644 --- a/pkg/auth/auth.go +++ b/pkg/auth/auth.go @@ -24,6 +24,7 @@ import ( "sync" "time" + "github.com/go-logr/logr" azurestackhci "github.com/microsoft/cluster-api-provider-azurestackhci/cloud" "github.com/microsoft/moc-sdk-for-go/services/security/authentication" "github.com/microsoft/moc/pkg/auth" @@ -34,9 +35,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" - "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log/zap" ) var ( @@ -58,15 +59,16 @@ func GetAuthorizerFromKubernetesCluster(ctx context.Context, cloudFqdn string) ( } config.Timeout = 10 * time.Second + logger := zap.New(zap.UseDevMode(true)) c, err := client.New(config, client.Options{Scheme: Scheme}) if err != nil { return nil, errors.Wrap(err, "failed to create a client") } - return ReconcileAzureStackHCIAccess(ctx, c, cloudFqdn) + return ReconcileAzureStackHCIAccess(logger, ctx, c, cloudFqdn) } -func ReconcileAzureStackHCIAccess(ctx context.Context, cli client.Client, cloudFqdn string) (auth.Authorizer, error) { +func ReconcileAzureStackHCIAccess(logger logr.Logger, ctx context.Context, cli client.Client, cloudFqdn string) (auth.Authorizer, error) { wssdconfigpath := os.Getenv("WSSD_CONFIG_PATH") if wssdconfigpath == "" { @@ -76,9 +78,9 @@ func ReconcileAzureStackHCIAccess(ctx context.Context, cli client.Client, cloudF if strings.ToLower(os.Getenv("WSSD_DEBUG_MODE")) != "on" { _, err := os.Stat(wssdconfigpath) if err != nil { - return login(ctx, cli, cloudFqdn) + return login(logger, ctx, cli, cloudFqdn) } - go UpdateLoginConfig(ctx, cli) + go UpdateLoginConfig(logger, ctx, cli) } authorizer, err := auth.NewAuthorizerFromEnvironment(cloudFqdn) if err != nil { @@ -87,28 +89,28 @@ func ReconcileAzureStackHCIAccess(ctx context.Context, cli client.Client, cloudF return nil, errors.Wrap(err, "error: new authorizer failed") } // Login if certificate expired - return login(ctx, cli, cloudFqdn) + return login(logger, ctx, cli, cloudFqdn) } return authorizer, nil } -func UpdateLoginConfig(ctx context.Context, cli client.Client) { +func UpdateLoginConfig(logger logr.Logger, ctx context.Context, cli client.Client) { secret, err := GetSecret(ctx, cli, AzHCIAccessCreds) if err != nil { - klog.Errorf("error: failed to create wssd session, missing login credentials secret %v", err) + logger.Error(err, "error: failed to create wssd session, missing login credentials secret") return } data, ok := secret.Data[AzHCIAccessTokenFieldName] if !ok { - klog.Errorf("error: could not parse kubernetes secret") + logger.Error(nil, "could not parse kubernetes secret") return } loginconfig := auth.LoginConfig{} err = config.LoadYAMLConfig(string(data), &loginconfig) if err != nil { - klog.Errorf("error: failed to create wssd session: parse yaml login config failed") + logger.Error(err, "failed to create wssd session: parse yaml login config failed") return } @@ -117,7 +119,7 @@ func UpdateLoginConfig(ctx context.Context, cli client.Client) { } -func login(ctx context.Context, cli client.Client, cloudFqdn string) (auth.Authorizer, error) { +func login(logger logr.Logger, ctx context.Context, cli client.Client, cloudFqdn string) (auth.Authorizer, error) { wssdconfigpath := os.Getenv("WSSD_CONFIG_PATH") if wssdconfigpath == "" { return nil, errors.New("ReconcileAzureStackHCIAccess: Environment variable WSSD_CONFIG_PATH is not set") @@ -130,7 +132,7 @@ func login(ctx context.Context, cli client.Client, cloudFqdn string) (auth.Autho return authorizer, nil } } - klog.Infof("AzureStackHCI: Login attempt") + logger.Info("AzureStackHCI: Login attempt") secret, err := GetSecret(ctx, cli, AzHCIAccessCreds) if err != nil { return nil, errors.Wrap(err, "failed to create wssd session, missing login credentials secret") @@ -159,7 +161,7 @@ func login(ctx context.Context, cli client.Client, cloudFqdn string) (auth.Autho if _, err := os.Stat(wssdconfigpath); err != nil { return nil, errors.Wrapf(err, "Missing wssdconfig %s after login", wssdconfigpath) } - klog.Infof("AzureStackHCI: Login successful") + logger.Info("AzureStackHCI: Login successful") return auth.NewAuthorizerFromEnvironment(cloudFqdn) }