Skip to content

Commit

Permalink
Merge pull request #791 from gianlucam76/optimize-slices
Browse files Browse the repository at this point in the history
Where possible, set slice capacity
  • Loading branch information
gianlucam76 authored Nov 4, 2024
2 parents 53d8fd7 + 5affdba commit 4e1277b
Show file tree
Hide file tree
Showing 7 changed files with 26 additions and 21 deletions.
16 changes: 8 additions & 8 deletions controllers/handlers_helm.go
Original file line number Diff line number Diff line change
Expand Up @@ -516,8 +516,8 @@ func walkChartsAndDeploy(ctx context.Context, c client.Client, clusterSummary *c
}

conflictErrorMessage := ""
releaseReports := make([]configv1beta1.ReleaseReport, 0)
chartDeployed := make([]configv1beta1.Chart, 0)
releaseReports := make([]configv1beta1.ReleaseReport, 0, len(clusterSummary.Spec.ClusterProfileSpec.HelmCharts))
chartDeployed := make([]configv1beta1.Chart, 0, len(clusterSummary.Spec.ClusterProfileSpec.HelmCharts))
for i := range clusterSummary.Spec.ClusterProfileSpec.HelmCharts {
currentChart := &clusterSummary.Spec.ClusterProfileSpec.HelmCharts[i]
// Eventual conflicts are already resolved before this method is called (in updateStatusForeferencedHelmReleases)
Expand Down Expand Up @@ -1703,7 +1703,7 @@ func updateStatusForNonReferencedHelmReleases(ctx context.Context, c client.Clie
currentlyReferenced[helmInfo(currentChart.ReleaseNamespace, currentChart.ReleaseName)] = true
}

helmReleaseSummaries := make([]configv1beta1.HelmChartSummary, 0)
helmReleaseSummaries := make([]configv1beta1.HelmChartSummary, 0, len(clusterSummary.Status.HelmReleaseSummaries))
for i := range clusterSummary.Status.HelmReleaseSummaries {
summary := &clusterSummary.Status.HelmReleaseSummaries[i]
if _, ok := currentlyReferenced[helmInfo(summary.ReleaseNamespace, summary.ReleaseName)]; ok {
Expand Down Expand Up @@ -1866,7 +1866,7 @@ func collectResourcesFromManagedHelmChartsForDriftDetection(ctx context.Context,
return nil, err
}

helmResources := make([]libsveltosv1beta1.HelmResources, 0)
helmResources := make([]libsveltosv1beta1.HelmResources, 0, len(clusterSummary.Spec.ClusterProfileSpec.HelmCharts))

for i := range clusterSummary.Spec.ClusterProfileSpec.HelmCharts {
currentChart := &clusterSummary.Spec.ClusterProfileSpec.HelmCharts[i]
Expand Down Expand Up @@ -1927,13 +1927,13 @@ func collectResourcesFromManagedHelmChartsForDriftDetection(ctx context.Context,
}

func collectHelmContent(manifest string, logger logr.Logger) ([]*unstructured.Unstructured, error) {
resources := make([]*unstructured.Unstructured, 0)

elements, err := customSplit(manifest)
if err != nil {
return nil, err
}

resources := make([]*unstructured.Unstructured, 0, len(elements))

for i := range elements {
policy, err := utils.GetUnstructured([]byte(elements[i]))
if err != nil {
Expand All @@ -1948,7 +1948,7 @@ func collectHelmContent(manifest string, logger logr.Logger) ([]*unstructured.Un
}

func unstructuredToSveltosResources(policies []*unstructured.Unstructured) []libsveltosv1beta1.Resource {
resources := make([]libsveltosv1beta1.Resource, 0)
resources := make([]libsveltosv1beta1.Resource, len(policies))

for i := range policies {
r := libsveltosv1beta1.Resource{
Expand All @@ -1960,7 +1960,7 @@ func unstructuredToSveltosResources(policies []*unstructured.Unstructured) []lib
IgnoreForConfigurationDrift: hasIgnoreConfigurationDriftAnnotation(policies[i]),
}

resources = append(resources, r)
resources[i] = r
}

return resources
Expand Down
9 changes: 7 additions & 2 deletions controllers/handlers_kustomize.go
Original file line number Diff line number Diff line change
Expand Up @@ -135,9 +135,9 @@ func deployKustomizeRefs(ctx context.Context, c client.Client,
}

// If we are here there are no conflicts (and error would have been returned by deployKustomizeRef)
remoteDeployed := make([]configv1beta1.Resource, 0)
remoteDeployed := make([]configv1beta1.Resource, len(remoteResourceReports))
for i := range remoteResourceReports {
remoteDeployed = append(remoteDeployed, remoteResourceReports[i].Resource)
remoteDeployed[i] = remoteResourceReports[i].Resource
}

// TODO: track resource deployed in the management cluster
Expand Down Expand Up @@ -639,6 +639,8 @@ func getKustomizedResources(ctx context.Context, c client.Client, clusterSummary
}

resources := resMap.Resources()
objectsToDeployLocally = make([]*unstructured.Unstructured, 0, len(resources))
objectsToDeployRemotely = make([]*unstructured.Unstructured, 0, len(resources))
for i := range resources {
resource := resources[i]
yaml, err := resource.AsYAML()
Expand Down Expand Up @@ -821,6 +823,9 @@ func deployEachKustomizeRefs(ctx context.Context, c client.Client, remoteRestCon
clusterSummary *configv1beta1.ClusterSummary, logger logr.Logger,
) (localResourceReports, remoteResourceReports []configv1beta1.ResourceReport, err error) {

capacity := len(clusterSummary.Spec.ClusterProfileSpec.KustomizationRefs)
localResourceReports = make([]configv1beta1.ResourceReport, 0, capacity)
remoteResourceReports = make([]configv1beta1.ResourceReport, 0, capacity)
for i := range clusterSummary.Spec.ClusterProfileSpec.KustomizationRefs {
kustomizationRef := &clusterSummary.Spec.ClusterProfileSpec.KustomizationRefs[i]
var tmpLocal []configv1beta1.ResourceReport
Expand Down
4 changes: 2 additions & 2 deletions controllers/handlers_resources.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,9 +89,9 @@ func deployResources(ctx context.Context, c client.Client,
return err
}

remoteDeployed := make([]configv1beta1.Resource, 0)
remoteDeployed := make([]configv1beta1.Resource, len(remoteResourceReports))
for i := range remoteResourceReports {
remoteDeployed = append(remoteDeployed, remoteResourceReports[i].Resource)
remoteDeployed[i] = remoteResourceReports[i].Resource
}

// TODO: track resource deployed in the management cluster
Expand Down
10 changes: 5 additions & 5 deletions controllers/handlers_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -745,7 +745,7 @@ func collectContent(ctx context.Context, clusterSummary *configv1beta1.ClusterSu
instantiateTemplate bool, logger logr.Logger,
) ([]*unstructured.Unstructured, error) {

policies := make([]*unstructured.Unstructured, 0)
policies := make([]*unstructured.Unstructured, 0, len(data))

for k := range data {
section := data[k]
Expand Down Expand Up @@ -789,12 +789,11 @@ func collectContent(ctx context.Context, clusterSummary *configv1beta1.ClusterSu
}

func getUnstructured(section []byte, logger logr.Logger) ([]*unstructured.Unstructured, error) {
policies := make([]*unstructured.Unstructured, 0)
elements, err := customSplit(string(section))
if err != nil {
return nil, err
}

policies := make([]*unstructured.Unstructured, 0, len(elements))
for i := range elements {
policy, err := utils.GetUnstructured([]byte(elements[i]))
if err != nil {
Expand Down Expand Up @@ -896,8 +895,8 @@ func collectReferencedObjects(ctx context.Context, controlClusterClient client.C
clusterSummary *configv1beta1.ClusterSummary, references []configv1beta1.PolicyRef,
logger logr.Logger) (local, remote []client.Object, err error) {

local = make([]client.Object, 0)
remote = make([]client.Object, 0)
local = make([]client.Object, 0, len(references))
remote = make([]client.Object, 0, len(references))
for i := range references {
var object client.Object
reference := &references[i]
Expand Down Expand Up @@ -999,6 +998,7 @@ func deployObjects(ctx context.Context, deployingToMgmtCluster bool, destClient
mgmtResources map[string]*unstructured.Unstructured, logger logr.Logger,
) (reports []configv1beta1.ResourceReport, err error) {

reports = make([]configv1beta1.ResourceReport, 0, len(referencedObjects))
for i := range referencedObjects {
var tmpResourceReports []configv1beta1.ResourceReport
if referencedObjects[i].GetObjectKind().GroupVersionKind().Kind == string(libsveltosv1beta1.ConfigMapReferencedResourceKind) {
Expand Down
4 changes: 2 additions & 2 deletions controllers/profile_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -884,7 +884,7 @@ func reviseUpdatedAndUpdatingClusters(profileScope *scope.ProfileScope) {
}

updatedClusters := &libsveltosset.Set{}
currentUpdatedClusters := make([]corev1.ObjectReference, 0)
currentUpdatedClusters := make([]corev1.ObjectReference, 0, len(profileScope.GetStatus().UpdatedClusters.Clusters))
for i := range profileScope.GetStatus().UpdatedClusters.Clusters {
cluster := &profileScope.GetStatus().UpdatedClusters.Clusters[i]
if matchingCluster.Has(cluster) {
Expand All @@ -896,7 +896,7 @@ func reviseUpdatedAndUpdatingClusters(profileScope *scope.ProfileScope) {
profileScope.GetStatus().UpdatedClusters.Clusters = currentUpdatedClusters

updatingClusters := &libsveltosset.Set{}
currentUpdatingClusters := make([]corev1.ObjectReference, 0)
currentUpdatingClusters := make([]corev1.ObjectReference, 0, len(profileScope.GetStatus().UpdatingClusters.Clusters))
for i := range profileScope.GetStatus().UpdatingClusters.Clusters {
cluster := &profileScope.GetStatus().UpdatingClusters.Clusters[i]
if matchingCluster.Has(cluster) {
Expand Down
2 changes: 1 addition & 1 deletion controllers/reloader_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ func deployReloaderInstance(ctx context.Context, remoteClient client.Client,
clusterProfileName string, feature configv1beta1.FeatureID, resources []corev1.ObjectReference,
logger logr.Logger) error {

reloaderInfo := make([]libsveltosv1beta1.ReloaderInfo, 0)
reloaderInfo := make([]libsveltosv1beta1.ReloaderInfo, 0, len(resources))
for i := range resources {
resource := &resources[i]
if watchForRollingUpgrade(resource) {
Expand Down
2 changes: 1 addition & 1 deletion controllers/set_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ func selectClusters(ctx context.Context, c client.Client, setScope *scope.SetSco
currentMatchingHealthyCluster[healthyMatchingClusters[i]] = true
}

currentSelectedClusters := make([]corev1.ObjectReference, 0)
currentSelectedClusters := make([]corev1.ObjectReference, 0, len(status.SelectedClusterRefs))
for i := range status.SelectedClusterRefs {
cluster := &status.SelectedClusterRefs[i]
if _, ok := currentMatchingHealthyCluster[*cluster]; ok {
Expand Down

0 comments on commit 4e1277b

Please sign in to comment.