From a8629ac72ad2a2f8a2401197380c470c8999eaa9 Mon Sep 17 00:00:00 2001 From: chaosi-zju Date: Wed, 22 Nov 2023 09:50:09 +0800 Subject: [PATCH] Expose assignReplicas and selectClusters function in scheduler Signed-off-by: chaosi-zju --- pkg/scheduler/core/common.go | 73 +++++++++++++++++++++++++ pkg/scheduler/core/generic_scheduler.go | 43 ++------------- 2 files changed, 77 insertions(+), 39 deletions(-) create mode 100644 pkg/scheduler/core/common.go diff --git a/pkg/scheduler/core/common.go b/pkg/scheduler/core/common.go new file mode 100644 index 000000000000..093cf747a7a2 --- /dev/null +++ b/pkg/scheduler/core/common.go @@ -0,0 +1,73 @@ +/* +Copyright 2021 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import ( + "fmt" + "time" + + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" + "github.com/karmada-io/karmada/pkg/scheduler/core/spreadconstraint" + "github.com/karmada-io/karmada/pkg/scheduler/framework" + "github.com/karmada-io/karmada/pkg/scheduler/metrics" +) + +func SelectClusters(clustersScore framework.ClusterScoreList, + placement *policyv1alpha1.Placement, spec *workv1alpha2.ResourceBindingSpec) ([]*clusterv1alpha1.Cluster, error) { + startTime := time.Now() + defer metrics.ScheduleStep(metrics.ScheduleStepSelect, startTime) + + groupClustersInfo := spreadconstraint.GroupClustersWithScore(clustersScore, placement, spec, calAvailableReplicas) + return spreadconstraint.SelectBestClusters(placement, groupClustersInfo, spec.Replicas) +} + +func AssignReplicas( + clusters []*clusterv1alpha1.Cluster, + placement *policyv1alpha1.Placement, + object *workv1alpha2.ResourceBindingSpec, +) ([]workv1alpha2.TargetCluster, error) { + startTime := time.Now() + defer metrics.ScheduleStep(metrics.ScheduleStepAssignReplicas, startTime) + + if len(clusters) == 0 { + return nil, fmt.Errorf("no clusters available to schedule") + } + + if object.Replicas > 0 { + state := newAssignState(clusters, placement, object) + assignFunc, ok := assignFuncMap[state.strategyType] + if !ok { + // should never happen at present + return nil, fmt.Errorf("unsupported replica scheduling strategy, replicaSchedulingType: %s, replicaDivisionPreference: %s, "+ + "please try another scheduling strategy", placement.ReplicaSchedulingType(), placement.ReplicaScheduling.ReplicaDivisionPreference) + } + assignResults, err := assignFunc(state) + if err != nil { + return nil, err + } + return removeZeroReplicasCluster(assignResults), nil + } + + // If not workload, assign all clusters without considering replicas. + targetClusters := make([]workv1alpha2.TargetCluster, len(clusters)) + for i, cluster := range clusters { + targetClusters[i] = workv1alpha2.TargetCluster{Name: cluster.Name} + } + return targetClusters, nil +} diff --git a/pkg/scheduler/core/generic_scheduler.go b/pkg/scheduler/core/generic_scheduler.go index 321f91448a2d..22bcd0339c84 100644 --- a/pkg/scheduler/core/generic_scheduler.go +++ b/pkg/scheduler/core/generic_scheduler.go @@ -27,7 +27,6 @@ import ( policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" "github.com/karmada-io/karmada/pkg/scheduler/cache" - "github.com/karmada-io/karmada/pkg/scheduler/core/spreadconstraint" "github.com/karmada-io/karmada/pkg/scheduler/framework" "github.com/karmada-io/karmada/pkg/scheduler/framework/runtime" "github.com/karmada-io/karmada/pkg/scheduler/metrics" @@ -175,44 +174,10 @@ func (g *genericScheduler) prioritizeClusters( func (g *genericScheduler) selectClusters(clustersScore framework.ClusterScoreList, placement *policyv1alpha1.Placement, spec *workv1alpha2.ResourceBindingSpec) ([]*clusterv1alpha1.Cluster, error) { - startTime := time.Now() - defer metrics.ScheduleStep(metrics.ScheduleStepSelect, startTime) - - groupClustersInfo := spreadconstraint.GroupClustersWithScore(clustersScore, placement, spec, calAvailableReplicas) - return spreadconstraint.SelectBestClusters(placement, groupClustersInfo, spec.Replicas) + return SelectClusters(clustersScore, placement, spec) } -func (g *genericScheduler) assignReplicas( - clusters []*clusterv1alpha1.Cluster, - placement *policyv1alpha1.Placement, - object *workv1alpha2.ResourceBindingSpec, -) ([]workv1alpha2.TargetCluster, error) { - startTime := time.Now() - defer metrics.ScheduleStep(metrics.ScheduleStepAssignReplicas, startTime) - - if len(clusters) == 0 { - return nil, fmt.Errorf("no clusters available to schedule") - } - - if object.Replicas > 0 { - state := newAssignState(clusters, placement, object) - assignFunc, ok := assignFuncMap[state.strategyType] - if !ok { - // should never happen at present - return nil, fmt.Errorf("unsupported replica scheduling strategy, replicaSchedulingType: %s, replicaDivisionPreference: %s, "+ - "please try another scheduling strategy", placement.ReplicaSchedulingType(), placement.ReplicaScheduling.ReplicaDivisionPreference) - } - assignResults, err := assignFunc(state) - if err != nil { - return nil, err - } - return removeZeroReplicasCluster(assignResults), nil - } - - // If not workload, assign all clusters without considering replicas. - targetClusters := make([]workv1alpha2.TargetCluster, len(clusters)) - for i, cluster := range clusters { - targetClusters[i] = workv1alpha2.TargetCluster{Name: cluster.Name} - } - return targetClusters, nil +func (g *genericScheduler) assignReplicas(clusters []*clusterv1alpha1.Cluster, placement *policyv1alpha1.Placement, + object *workv1alpha2.ResourceBindingSpec) ([]workv1alpha2.TargetCluster, error) { + return AssignReplicas(clusters, placement, object) }