Skip to content

Commit

Permalink
🌱 Drop internal log package & improve logs and errors (kubernetes-sig…
Browse files Browse the repository at this point in the history
…s#11025)

* Drop internal log package

Signed-off-by: Stefan Büringer [email protected]

* fix review findings

---------

Signed-off-by: Stefan Büringer [email protected]
  • Loading branch information
sbueringer authored Sep 4, 2024
1 parent 50a0714 commit b35bd70
Show file tree
Hide file tree
Showing 18 changed files with 284 additions and 474 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
Expand All @@ -37,7 +38,6 @@ import (

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1"
tlog "sigs.k8s.io/cluster-api/internal/log"
runtimeclient "sigs.k8s.io/cluster-api/internal/runtime/client"
"sigs.k8s.io/cluster-api/util/annotations"
"sigs.k8s.io/cluster-api/util/conditions"
Expand Down Expand Up @@ -185,7 +185,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, extensionConfig *runti
log := ctrl.LoggerFrom(ctx)
log.Info("Unregistering ExtensionConfig information from registry")
if err := r.RuntimeClient.Unregister(extensionConfig); err != nil {
return ctrl.Result{}, errors.Wrapf(err, "failed to unregister %s", tlog.KObj{Obj: extensionConfig})
return ctrl.Result{}, errors.Wrapf(err, "failed to unregister ExtensionConfig %s", klog.KObj(extensionConfig))
}
return ctrl.Result{}, nil
}
Expand Down Expand Up @@ -221,7 +221,7 @@ func discoverExtensionConfig(ctx context.Context, runtimeClient runtimeclient.Cl
if err != nil {
modifiedExtensionConfig := extensionConfig.DeepCopy()
conditions.MarkFalse(modifiedExtensionConfig, runtimev1.RuntimeExtensionDiscoveredCondition, runtimev1.DiscoveryFailedReason, clusterv1.ConditionSeverityError, "error in discovery: %v", err)
return modifiedExtensionConfig, errors.Wrapf(err, "failed to discover %s", tlog.KObj{Obj: extensionConfig})
return modifiedExtensionConfig, errors.Wrapf(err, "failed to discover ExtensionConfig %s", klog.KObj(extensionConfig))
}

conditions.MarkTrue(discoveredExtension, runtimev1.RuntimeExtensionDiscoveredCondition)
Expand Down
17 changes: 9 additions & 8 deletions exp/topology/desiredstate/desired_state.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,9 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/klog/v2"
"k8s.io/utils/ptr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
Expand All @@ -40,7 +42,6 @@ import (
"sigs.k8s.io/cluster-api/internal/contract"
"sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/patches"
"sigs.k8s.io/cluster-api/internal/hooks"
tlog "sigs.k8s.io/cluster-api/internal/log"
runtimeclient "sigs.k8s.io/cluster-api/internal/runtime/client"
"sigs.k8s.io/cluster-api/internal/topology/clustershim"
topologynames "sigs.k8s.io/cluster-api/internal/topology/names"
Expand Down Expand Up @@ -399,7 +400,7 @@ func (g *generator) computeControlPlane(ctx context.Context, s *scope.Scope, inf
// The version is calculated using the state of the current machine deployments, the current control plane
// and the version defined in the topology.
func (g *generator) computeControlPlaneVersion(ctx context.Context, s *scope.Scope) (string, error) {
log := tlog.LoggerFrom(ctx)
log := ctrl.LoggerFrom(ctx)
desiredVersion := s.Blueprint.Topology.Version
// If we are creating the control plane object (current control plane is nil), use version from topology.
if s.Current.ControlPlane == nil || s.Current.ControlPlane.Object == nil {
Expand Down Expand Up @@ -477,7 +478,7 @@ func (g *generator) computeControlPlaneVersion(ctx context.Context, s *scope.Sco
// change the UpgradeTracker accordingly, otherwise the hook call is completed and we
// can remove this hook from the list of pending-hooks.
if hookResponse.RetryAfterSeconds != 0 {
log.Infof("MachineDeployments/MachinePools upgrade to version %q are blocked by %q hook", desiredVersion, runtimecatalog.HookName(runtimehooksv1.AfterControlPlaneUpgrade))
log.Info(fmt.Sprintf("MachineDeployments/MachinePools upgrade to version %q are blocked by %q hook", desiredVersion, runtimecatalog.HookName(runtimehooksv1.AfterControlPlaneUpgrade)))
} else {
if err := hooks.MarkAsDone(ctx, g.Client, s.Current.Cluster, runtimehooksv1.AfterControlPlaneUpgrade); err != nil {
return "", err
Expand Down Expand Up @@ -527,7 +528,7 @@ func (g *generator) computeControlPlaneVersion(ctx context.Context, s *scope.Sco
s.HookResponseTracker.Add(runtimehooksv1.BeforeClusterUpgrade, hookResponse)
if hookResponse.RetryAfterSeconds != 0 {
// Cannot pickup the new version right now. Need to try again later.
log.Infof("Cluster upgrade to version %q is blocked by %q hook", desiredVersion, runtimecatalog.HookName(runtimehooksv1.BeforeClusterUpgrade))
log.Info(fmt.Sprintf("Cluster upgrade to version %q is blocked by %q hook", desiredVersion, runtimecatalog.HookName(runtimehooksv1.BeforeClusterUpgrade)))
return *currentVersion, nil
}

Expand Down Expand Up @@ -627,7 +628,7 @@ func (g *generator) computeMachineDeployment(ctx context.Context, s *scope.Scope
className := machineDeploymentTopology.Class
machineDeploymentBlueprint, ok := s.Blueprint.MachineDeployments[className]
if !ok {
return nil, errors.Errorf("MachineDeployment class %s not found in %s", className, tlog.KObj{Obj: s.Blueprint.ClusterClass})
return nil, errors.Errorf("MachineDeployment class %s not found in ClusterClass %s", className, klog.KObj(s.Blueprint.ClusterClass))
}

var machineDeploymentClass *clusterv1.MachineDeploymentClass
Expand All @@ -638,7 +639,7 @@ func (g *generator) computeMachineDeployment(ctx context.Context, s *scope.Scope
}
}
if machineDeploymentClass == nil {
return nil, errors.Errorf("MachineDeployment class %s not found in %s", className, tlog.KObj{Obj: s.Blueprint.ClusterClass})
return nil, errors.Errorf("MachineDeployment class %s not found in ClusterClass %s", className, klog.KObj(s.Blueprint.ClusterClass))
}

// Compute the bootstrap template.
Expand Down Expand Up @@ -952,7 +953,7 @@ func (g *generator) computeMachinePool(_ context.Context, s *scope.Scope, machin
className := machinePoolTopology.Class
machinePoolBlueprint, ok := s.Blueprint.MachinePools[className]
if !ok {
return nil, errors.Errorf("MachinePool class %s not found in %s", className, tlog.KObj{Obj: s.Blueprint.ClusterClass})
return nil, errors.Errorf("MachinePool class %s not found in ClusterClass %s", className, klog.KObj(s.Blueprint.ClusterClass))
}

var machinePoolClass *clusterv1.MachinePoolClass
Expand All @@ -963,7 +964,7 @@ func (g *generator) computeMachinePool(_ context.Context, s *scope.Scope, machin
}
}
if machinePoolClass == nil {
return nil, errors.Errorf("MachinePool class %s not found in %s", className, tlog.KObj{Obj: s.Blueprint.ClusterClass})
return nil, errors.Errorf("MachinePool class %s not found in ClusterClass %s", className, klog.KObj(s.Blueprint.ClusterClass))
}

// Compute the bootstrap config.
Expand Down
4 changes: 2 additions & 2 deletions internal/controllers/clusterclass/clusterclass_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ import (
kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
Expand All @@ -44,7 +45,6 @@ import (
runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1"
runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1"
"sigs.k8s.io/cluster-api/feature"
tlog "sigs.k8s.io/cluster-api/internal/log"
runtimeclient "sigs.k8s.io/cluster-api/internal/runtime/client"
"sigs.k8s.io/cluster-api/internal/topology/variables"
"sigs.k8s.io/cluster-api/util/annotations"
Expand Down Expand Up @@ -391,7 +391,7 @@ func (r *Reconciler) reconcileExternal(ctx context.Context, clusterClass *cluste

// Set external object ControllerReference to the ClusterClass.
if err := controllerutil.SetOwnerReference(clusterClass, obj, r.Client.Scheme()); err != nil {
return errors.Wrapf(err, "failed to set ClusterClass owner reference for %s", tlog.KObj{Obj: obj})
return errors.Wrapf(err, "failed to set ClusterClass owner reference for %s %s", obj.GetKind(), klog.KObj(obj))
}

// Patch the external object.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ import (
"k8s.io/apimachinery/pkg/util/version"
utilversion "k8s.io/apiserver/pkg/util/version"
utilfeature "k8s.io/component-base/featuregate/testing"
"k8s.io/klog/v2"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
Expand All @@ -42,7 +43,6 @@ import (
runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog"
runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1"
"sigs.k8s.io/cluster-api/feature"
tlog "sigs.k8s.io/cluster-api/internal/log"
fakeruntimeclient "sigs.k8s.io/cluster-api/internal/runtime/client/fake"
"sigs.k8s.io/cluster-api/internal/test/builder"
)
Expand Down Expand Up @@ -384,7 +384,9 @@ func assertHasOwnerReference(obj client.Object, ownerRef metav1.OwnerReference)
}
}
if !found {
return fmt.Errorf("object %s does not have OwnerReference %s", tlog.KObj{Obj: obj}, &ownerRef)
// Note: Kind might be empty here as it's usually only set on Unstructured objects.
// But as this is just test code we don't care too much about it.
return fmt.Errorf("%s %s does not have OwnerReference %s", obj.GetObjectKind().GroupVersionKind().Kind, klog.KObj(obj), &ownerRef)
}
return nil
}
Expand Down
16 changes: 8 additions & 8 deletions internal/controllers/topology/cluster/blueprint.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,10 @@ import (
"context"

"github.com/pkg/errors"
"k8s.io/klog/v2"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/exp/topology/scope"
tlog "sigs.k8s.io/cluster-api/internal/log"
)

// getBlueprint gets a ClusterBlueprint with the ClusterClass and the referenced templates to be used for a managed Cluster topology.
Expand All @@ -41,21 +41,21 @@ func (r *Reconciler) getBlueprint(ctx context.Context, cluster *clusterv1.Cluste
// Get ClusterClass.spec.infrastructure.
blueprint.InfrastructureClusterTemplate, err = r.getReference(ctx, blueprint.ClusterClass.Spec.Infrastructure.Ref)
if err != nil {
return nil, errors.Wrapf(err, "failed to get infrastructure cluster template for %s", tlog.KObj{Obj: blueprint.ClusterClass})
return nil, errors.Wrapf(err, "failed to get infrastructure cluster template for ClusterClass %s", klog.KObj(blueprint.ClusterClass))
}

// Get ClusterClass.spec.controlPlane.
blueprint.ControlPlane = &scope.ControlPlaneBlueprint{}
blueprint.ControlPlane.Template, err = r.getReference(ctx, blueprint.ClusterClass.Spec.ControlPlane.Ref)
if err != nil {
return nil, errors.Wrapf(err, "failed to get control plane template for %s", tlog.KObj{Obj: blueprint.ClusterClass})
return nil, errors.Wrapf(err, "failed to get control plane template for ClusterClass %s", klog.KObj(blueprint.ClusterClass))
}

// If the clusterClass mandates the controlPlane has infrastructureMachines, read it.
if blueprint.HasControlPlaneInfrastructureMachine() {
blueprint.ControlPlane.InfrastructureMachineTemplate, err = r.getReference(ctx, blueprint.ClusterClass.Spec.ControlPlane.MachineInfrastructure.Ref)
if err != nil {
return nil, errors.Wrapf(err, "failed to get control plane's machine template for %s", tlog.KObj{Obj: blueprint.ClusterClass})
return nil, errors.Wrapf(err, "failed to get control plane's machine template for ClusterClass %s", klog.KObj(blueprint.ClusterClass))
}
}

Expand All @@ -77,13 +77,13 @@ func (r *Reconciler) getBlueprint(ctx context.Context, cluster *clusterv1.Cluste
// Get the infrastructure machine template.
machineDeploymentBlueprint.InfrastructureMachineTemplate, err = r.getReference(ctx, machineDeploymentClass.Template.Infrastructure.Ref)
if err != nil {
return nil, errors.Wrapf(err, "failed to get infrastructure machine template for %s, MachineDeployment class %q", tlog.KObj{Obj: blueprint.ClusterClass}, machineDeploymentClass.Class)
return nil, errors.Wrapf(err, "failed to get infrastructure machine template for ClusterClass %s, MachineDeployment class %q", klog.KObj(blueprint.ClusterClass), machineDeploymentClass.Class)
}

// Get the bootstrap config template.
machineDeploymentBlueprint.BootstrapTemplate, err = r.getReference(ctx, machineDeploymentClass.Template.Bootstrap.Ref)
if err != nil {
return nil, errors.Wrapf(err, "failed to get bootstrap config template for %s, MachineDeployment class %q", tlog.KObj{Obj: blueprint.ClusterClass}, machineDeploymentClass.Class)
return nil, errors.Wrapf(err, "failed to get bootstrap config template for ClusterClass %s, MachineDeployment class %q", klog.KObj(blueprint.ClusterClass), machineDeploymentClass.Class)
}

// If the machineDeploymentClass defines a MachineHealthCheck add it to the blueprint.
Expand All @@ -106,13 +106,13 @@ func (r *Reconciler) getBlueprint(ctx context.Context, cluster *clusterv1.Cluste
// Get the InfrastructureMachinePoolTemplate.
machinePoolBlueprint.InfrastructureMachinePoolTemplate, err = r.getReference(ctx, machinePoolClass.Template.Infrastructure.Ref)
if err != nil {
return nil, errors.Wrapf(err, "failed to get InfrastructureMachinePoolTemplate for %s, MachinePool class %q", tlog.KObj{Obj: blueprint.ClusterClass}, machinePoolClass.Class)
return nil, errors.Wrapf(err, "failed to get InfrastructureMachinePoolTemplate for ClusterClass %s, MachinePool class %q", klog.KObj(blueprint.ClusterClass), machinePoolClass.Class)
}

// Get the bootstrap config template.
machinePoolBlueprint.BootstrapTemplate, err = r.getReference(ctx, machinePoolClass.Template.Bootstrap.Ref)
if err != nil {
return nil, errors.Wrapf(err, "failed to get bootstrap config for %s, MachinePool class %q", tlog.KObj{Obj: blueprint.ClusterClass}, machinePoolClass.Class)
return nil, errors.Wrapf(err, "failed to get bootstrap config for ClusterClass %s, MachinePool class %q", klog.KObj(blueprint.ClusterClass), machinePoolClass.Class)
}

blueprint.MachinePools[machinePoolClass.Class] = machinePoolBlueprint
Expand Down
9 changes: 4 additions & 5 deletions internal/controllers/topology/cluster/cluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,6 @@ import (
"sigs.k8s.io/cluster-api/feature"
"sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/structuredmerge"
"sigs.k8s.io/cluster-api/internal/hooks"
tlog "sigs.k8s.io/cluster-api/internal/log"
runtimeclient "sigs.k8s.io/cluster-api/internal/runtime/client"
"sigs.k8s.io/cluster-api/internal/util/ssa"
"sigs.k8s.io/cluster-api/internal/webhooks"
Expand Down Expand Up @@ -316,7 +315,7 @@ func (r *Reconciler) setupDynamicWatches(ctx context.Context, s *scope.Scope) er
func (r *Reconciler) callBeforeClusterCreateHook(ctx context.Context, s *scope.Scope) (reconcile.Result, error) {
// If the cluster objects (InfraCluster, ControlPlane, etc) are not yet created we are in the creation phase.
// Call the BeforeClusterCreate hook before proceeding.
log := tlog.LoggerFrom(ctx)
log := ctrl.LoggerFrom(ctx)
if s.Current.Cluster.Spec.InfrastructureRef == nil && s.Current.Cluster.Spec.ControlPlaneRef == nil {
hookRequest := &runtimehooksv1.BeforeClusterCreateRequest{
Cluster: *s.Current.Cluster,
Expand All @@ -327,7 +326,7 @@ func (r *Reconciler) callBeforeClusterCreateHook(ctx context.Context, s *scope.S
}
s.HookResponseTracker.Add(runtimehooksv1.BeforeClusterCreate, hookResponse)
if hookResponse.RetryAfterSeconds != 0 {
log.Infof("Creation of Cluster topology is blocked by %s hook", runtimecatalog.HookName(runtimehooksv1.BeforeClusterCreate))
log.Info(fmt.Sprintf("Creation of Cluster topology is blocked by %s hook", runtimecatalog.HookName(runtimehooksv1.BeforeClusterCreate)))
return ctrl.Result{RequeueAfter: time.Duration(hookResponse.RetryAfterSeconds) * time.Second}, nil
}
}
Expand Down Expand Up @@ -402,7 +401,7 @@ func (r *Reconciler) machinePoolToCluster(_ context.Context, o client.Object) []
func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Cluster) (ctrl.Result, error) {
// Call the BeforeClusterDelete hook if the 'ok-to-delete' annotation is not set
// and add the annotation to the cluster after receiving a successful non-blocking response.
log := tlog.LoggerFrom(ctx)
log := ctrl.LoggerFrom(ctx)
if feature.Gates.Enabled(feature.RuntimeSDK) {
if !hooks.IsOkToDelete(cluster) {
hookRequest := &runtimehooksv1.BeforeClusterDeleteRequest{
Expand All @@ -413,7 +412,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu
return ctrl.Result{}, err
}
if hookResponse.RetryAfterSeconds != 0 {
log.Infof("Cluster deletion is blocked by %q hook", runtimecatalog.HookName(runtimehooksv1.BeforeClusterDelete))
log.Info(fmt.Sprintf("Cluster deletion is blocked by %q hook", runtimecatalog.HookName(runtimehooksv1.BeforeClusterDelete)))
return ctrl.Result{RequeueAfter: time.Duration(hookResponse.RetryAfterSeconds) * time.Second}, nil
}
// The BeforeClusterDelete hook returned a non-blocking response. Now the cluster is ready to be deleted.
Expand Down
Loading

0 comments on commit b35bd70

Please sign in to comment.