diff --git a/cluster-autoscaler/cloudprovider/alicloud/alicloud_auto_scaling_group.go b/cluster-autoscaler/cloudprovider/alicloud/alicloud_auto_scaling_group.go index dd0c9a7eb478..4ecb64c49b56 100644 --- a/cluster-autoscaler/cloudprovider/alicloud/alicloud_auto_scaling_group.go +++ b/cluster-autoscaler/cloudprovider/alicloud/alicloud_auto_scaling_group.go @@ -22,8 +22,8 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/config" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" klog "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) // Asg implements NodeGroup interface. @@ -179,7 +179,7 @@ func (asg *Asg) Nodes() ([]cloudprovider.Instance, error) { } // TemplateNodeInfo returns a node template for this node group. -func (asg *Asg) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { +func (asg *Asg) TemplateNodeInfo() (*framework.NodeInfo, error) { template, err := asg.manager.getAsgTemplate(asg.id) if err != nil { return nil, err @@ -191,8 +191,7 @@ func (asg *Asg) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { return nil, err } - nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(asg.id)) - nodeInfo.SetNode(node) + nodeInfo := framework.NewNodeInfo(node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(asg.id)}) return nodeInfo, nil } diff --git a/cluster-autoscaler/cloudprovider/aws/aws_cloud_provider.go b/cluster-autoscaler/cloudprovider/aws/aws_cloud_provider.go index f4044aeb5554..118f5e91e88d 100644 --- a/cluster-autoscaler/cloudprovider/aws/aws_cloud_provider.go +++ b/cluster-autoscaler/cloudprovider/aws/aws_cloud_provider.go @@ -27,10 +27,10 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/config" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" klog "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) const ( @@ -392,7 +392,7 @@ func (ng *AwsNodeGroup) Nodes() ([]cloudprovider.Instance, error) { } // TemplateNodeInfo returns a node template for this node group. -func (ng *AwsNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { +func (ng *AwsNodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) { template, err := ng.awsManager.getAsgTemplate(ng.asg) if err != nil { return nil, err @@ -403,8 +403,7 @@ func (ng *AwsNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) return nil, err } - nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(ng.asg.Name)) - nodeInfo.SetNode(node) + nodeInfo := framework.NewNodeInfo(node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(ng.asg.Name)}) return nodeInfo, nil } diff --git a/cluster-autoscaler/cloudprovider/azure/azure_agent_pool.go b/cluster-autoscaler/cloudprovider/azure/azure_agent_pool.go index 7fcc60e286a6..c69ff8f07083 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_agent_pool.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_agent_pool.go @@ -34,8 +34,8 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/config" "k8s.io/autoscaler/cluster-autoscaler/config/dynamic" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" klog "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) const ( @@ -477,7 +477,7 @@ func (as *AgentPool) Debug() string { } // TemplateNodeInfo returns a node template for this agent pool. -func (as *AgentPool) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { +func (as *AgentPool) TemplateNodeInfo() (*framework.NodeInfo, error) { return nil, cloudprovider.ErrNotImplemented } diff --git a/cluster-autoscaler/cloudprovider/azure/azure_scale_set.go b/cluster-autoscaler/cloudprovider/azure/azure_scale_set.go index 68a64c672035..ad109425a178 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_scale_set.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_scale_set.go @@ -27,8 +27,8 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/config" "k8s.io/autoscaler/cluster-autoscaler/config/dynamic" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" klog "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" "sigs.k8s.io/cloud-provider-azure/pkg/retry" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" @@ -627,7 +627,7 @@ func (scaleSet *ScaleSet) Debug() string { } // TemplateNodeInfo returns a node template for this scale set. -func (scaleSet *ScaleSet) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { +func (scaleSet *ScaleSet) TemplateNodeInfo() (*framework.NodeInfo, error) { template, err := scaleSet.getVMSSFromCache() if err != nil { return nil, err @@ -641,8 +641,7 @@ func (scaleSet *ScaleSet) TemplateNodeInfo() (*schedulerframework.NodeInfo, erro return nil, err } - nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(scaleSet.Name)) - nodeInfo.SetNode(node) + nodeInfo := framework.NewNodeInfo(node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(scaleSet.Name)}) return nodeInfo, nil } diff --git a/cluster-autoscaler/cloudprovider/azure/azure_scale_set_test.go b/cluster-autoscaler/cloudprovider/azure/azure_scale_set_test.go index b3bd5fca5b5c..78c38fbbb78a 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_scale_set_test.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_scale_set_test.go @@ -1120,7 +1120,7 @@ func TestTemplateNodeInfo(t *testing.T) { nodeInfo, err := asg.TemplateNodeInfo() assert.NoError(t, err) assert.NotNil(t, nodeInfo) - assert.NotEmpty(t, nodeInfo.Pods) + assert.NotEmpty(t, nodeInfo.Pods()) }) // Properly testing dynamic SKU list through skewer is not possible, @@ -1143,7 +1143,7 @@ func TestTemplateNodeInfo(t *testing.T) { assert.Equal(t, *nodeInfo.Node().Status.Capacity.Memory(), *resource.NewQuantity(3*1024*1024, resource.DecimalSI)) assert.NoError(t, err) assert.NotNil(t, nodeInfo) - assert.NotEmpty(t, nodeInfo.Pods) + assert.NotEmpty(t, nodeInfo.Pods()) }) t.Run("Checking static workflow if dynamic fails", func(t *testing.T) { @@ -1164,7 +1164,7 @@ func TestTemplateNodeInfo(t *testing.T) { assert.Equal(t, *nodeInfo.Node().Status.Capacity.Memory(), *resource.NewQuantity(3*1024*1024, resource.DecimalSI)) assert.NoError(t, err) assert.NotNil(t, nodeInfo) - assert.NotEmpty(t, nodeInfo.Pods) + assert.NotEmpty(t, nodeInfo.Pods()) }) t.Run("Fails to find vmss instance information using static and dynamic workflow, instance not supported", func(t *testing.T) { @@ -1198,7 +1198,7 @@ func TestTemplateNodeInfo(t *testing.T) { assert.Equal(t, *nodeInfo.Node().Status.Capacity.Memory(), *resource.NewQuantity(3*1024*1024, resource.DecimalSI)) assert.NoError(t, err) assert.NotNil(t, nodeInfo) - assert.NotEmpty(t, nodeInfo.Pods) + assert.NotEmpty(t, nodeInfo.Pods()) }) t.Run("Checking static-only workflow with built-in SKU list", func(t *testing.T) { @@ -1207,7 +1207,7 @@ func TestTemplateNodeInfo(t *testing.T) { nodeInfo, err := asg.TemplateNodeInfo() assert.NoError(t, err) assert.NotNil(t, nodeInfo) - assert.NotEmpty(t, nodeInfo.Pods) + assert.NotEmpty(t, nodeInfo.Pods()) }) } diff --git a/cluster-autoscaler/cloudprovider/azure/azure_vms_pool.go b/cluster-autoscaler/cloudprovider/azure/azure_vms_pool.go index c86eff66ce6e..b9387d4aebda 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_vms_pool.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_vms_pool.go @@ -24,7 +24,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/config" "k8s.io/autoscaler/cluster-autoscaler/config/dynamic" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" ) // VMsPool is single instance VM pool @@ -169,7 +169,7 @@ func (agentPool *VMsPool) Nodes() ([]cloudprovider.Instance, error) { } // TemplateNodeInfo is not implemented. -func (agentPool *VMsPool) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { +func (agentPool *VMsPool) TemplateNodeInfo() (*framework.NodeInfo, error) { return nil, cloudprovider.ErrNotImplemented } diff --git a/cluster-autoscaler/cloudprovider/baiducloud/baiducloud_cloud_provider.go b/cluster-autoscaler/cloudprovider/baiducloud/baiducloud_cloud_provider.go index a38d2b86d46a..dfbd9c2095ec 100644 --- a/cluster-autoscaler/cloudprovider/baiducloud/baiducloud_cloud_provider.go +++ b/cluster-autoscaler/cloudprovider/baiducloud/baiducloud_cloud_provider.go @@ -27,10 +27,10 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/config" "k8s.io/autoscaler/cluster-autoscaler/config/dynamic" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" klog "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) const ( @@ -365,13 +365,13 @@ func (asg *Asg) Nodes() ([]cloudprovider.Instance, error) { return instances, nil } -// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty +// TemplateNodeInfo returns a framework.NodeInfo structure of an empty // (as if just started) node. This will be used in scale-up simulations to // predict what would a new node look like if a node group was expanded. The returned // NodeInfo is expected to have a fully populated Node object, with all of the labels, // capacity and allocatable information as well as all pods that are started on // the node by default, using manifest (most likely only kube-proxy). Implementation optional. -func (asg *Asg) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { +func (asg *Asg) TemplateNodeInfo() (*framework.NodeInfo, error) { template, err := asg.baiducloudManager.getAsgTemplate(asg.Name) if err != nil { return nil, err @@ -380,8 +380,7 @@ func (asg *Asg) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { if err != nil { return nil, err } - nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(asg.Name)) - nodeInfo.SetNode(node) + nodeInfo := framework.NewNodeInfo(node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(asg.Name)}) return nodeInfo, nil } diff --git a/cluster-autoscaler/cloudprovider/bizflycloud/bizflycloud_node_group.go b/cluster-autoscaler/cloudprovider/bizflycloud/bizflycloud_node_group.go index 7ca070fffbc0..5b4cea3b869a 100644 --- a/cluster-autoscaler/cloudprovider/bizflycloud/bizflycloud_node_group.go +++ b/cluster-autoscaler/cloudprovider/bizflycloud/bizflycloud_node_group.go @@ -26,7 +26,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/config" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" ) const ( @@ -183,14 +183,14 @@ func (n *NodeGroup) Nodes() ([]cloudprovider.Instance, error) { return toInstances(n.nodePool.Nodes), nil } -// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty +// TemplateNodeInfo returns a framework.NodeInfo structure of an empty // (as if just started) node. This will be used in scale-up simulations to // predict what would a new node look like if a node group was expanded. The // returned NodeInfo is expected to have a fully populated Node object, with // all of the labels, capacity and allocatable information as well as all pods // that are started on the node by default, using manifest (most likely only // kube-proxy). Implementation optional. -func (n *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { +func (n *NodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) { return nil, cloudprovider.ErrNotImplemented } diff --git a/cluster-autoscaler/cloudprovider/brightbox/brightbox_node_group.go b/cluster-autoscaler/cloudprovider/brightbox/brightbox_node_group.go index 4b97f8779320..db184f7fd04b 100644 --- a/cluster-autoscaler/cloudprovider/brightbox/brightbox_node_group.go +++ b/cluster-autoscaler/cloudprovider/brightbox/brightbox_node_group.go @@ -31,6 +31,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/status" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/brightbox/k8ssdk" "k8s.io/autoscaler/cluster-autoscaler/config" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" klog "k8s.io/klog/v2" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" @@ -239,13 +240,13 @@ func (ng *brightboxNodeGroup) Exist() bool { return err == nil } -// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty +// TemplateNodeInfo returns a framework.NodeInfo structure of an empty // (as if just started) node. This will be used in scale-up simulations to // predict what would a new node look like if a node group was expanded. The returned // NodeInfo is expected to have a fully populated Node object, with all of the labels, // capacity and allocatable information as well as all pods that are started on // the node by default, using manifest (most likely only kube-proxy). Implementation optional. -func (ng *brightboxNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { +func (ng *brightboxNodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) { klog.V(4).Info("TemplateNodeInfo") klog.V(4).Infof("Looking for server type %q", ng.serverOptions.ServerType) serverType, err := ng.findServerType() @@ -268,8 +269,7 @@ func (ng *brightboxNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, Conditions: cloudprovider.BuildReadyConditions(), }, } - nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(ng.Id())) - nodeInfo.SetNode(&node) + nodeInfo := framework.NewNodeInfo(&node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(ng.Id())}) return nodeInfo, nil } diff --git a/cluster-autoscaler/cloudprovider/brightbox/brightbox_node_group_test.go b/cluster-autoscaler/cloudprovider/brightbox/brightbox_node_group_test.go index 4cd6309d18ce..7a4e33cd17b5 100644 --- a/cluster-autoscaler/cloudprovider/brightbox/brightbox_node_group_test.go +++ b/cluster-autoscaler/cloudprovider/brightbox/brightbox_node_group_test.go @@ -316,7 +316,7 @@ func TestTemplateNodeInfo(t *testing.T) { Return(fakeServerTypezx45f(), nil) obj, err := makeFakeNodeGroup(t, testclient).TemplateNodeInfo() require.NoError(t, err) - assert.Equal(t, fakeResource(), obj.Allocatable) + assert.Equal(t, fakeResource(), obj.ToScheduler().Allocatable) } func TestNodeGroupErrors(t *testing.T) { diff --git a/cluster-autoscaler/cloudprovider/cherryservers/cherry_manager.go b/cluster-autoscaler/cloudprovider/cherryservers/cherry_manager.go index 1cfd02cfa5d9..fb74b650c039 100644 --- a/cluster-autoscaler/cloudprovider/cherryservers/cherry_manager.go +++ b/cluster-autoscaler/cloudprovider/cherryservers/cherry_manager.go @@ -23,7 +23,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/config" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" ) const ( @@ -45,7 +45,7 @@ type cherryManager interface { getNodes(nodegroup string) ([]string, error) getNodeNames(nodegroup string) ([]string, error) deleteNodes(nodegroup string, nodes []NodeRef, updatedNodeCount int) error - templateNodeInfo(nodegroup string) (*schedulerframework.NodeInfo, error) + templateNodeInfo(nodegroup string) (*framework.NodeInfo, error) NodeGroupForNode(labels map[string]string, nodeId string) (string, error) } diff --git a/cluster-autoscaler/cloudprovider/cherryservers/cherry_manager_rest.go b/cluster-autoscaler/cloudprovider/cherryservers/cherry_manager_rest.go index 0b07c1c07f38..3f5f9a04bd63 100644 --- a/cluster-autoscaler/cloudprovider/cherryservers/cherry_manager_rest.go +++ b/cluster-autoscaler/cloudprovider/cherryservers/cherry_manager_rest.go @@ -42,10 +42,10 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/config" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" "k8s.io/autoscaler/cluster-autoscaler/version" klog "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) const ( @@ -618,7 +618,7 @@ func BuildGenericLabels(nodegroup string, plan *Plan) map[string]string { // templateNodeInfo returns a NodeInfo with a node template based on the Cherry Servers plan // that is used to create nodes in a given node group. -func (mgr *cherryManagerRest) templateNodeInfo(nodegroup string) (*schedulerframework.NodeInfo, error) { +func (mgr *cherryManagerRest) templateNodeInfo(nodegroup string) (*framework.NodeInfo, error) { node := apiv1.Node{} nodeName := fmt.Sprintf("%s-asg-%d", nodegroup, rand.Int63()) node.ObjectMeta = metav1.ObjectMeta{ @@ -664,8 +664,7 @@ func (mgr *cherryManagerRest) templateNodeInfo(nodegroup string) (*schedulerfram // GenericLabels node.Labels = cloudprovider.JoinStringMaps(node.Labels, BuildGenericLabels(nodegroup, cherryPlan)) - nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(nodegroup)) - nodeInfo.SetNode(&node) + nodeInfo := framework.NewNodeInfo(&node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(nodegroup)}) return nodeInfo, nil } diff --git a/cluster-autoscaler/cloudprovider/cherryservers/cherry_node_group.go b/cluster-autoscaler/cloudprovider/cherryservers/cherry_node_group.go index 4add7c862d7e..33dfd8bd181a 100644 --- a/cluster-autoscaler/cloudprovider/cherryservers/cherry_node_group.go +++ b/cluster-autoscaler/cloudprovider/cherryservers/cherry_node_group.go @@ -25,8 +25,8 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/config" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" klog "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) const ( @@ -269,7 +269,7 @@ func (ng *cherryNodeGroup) Nodes() ([]cloudprovider.Instance, error) { } // TemplateNodeInfo returns a node template for this node group. -func (ng *cherryNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { +func (ng *cherryNodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) { return ng.cherryManager.templateNodeInfo(ng.id) } diff --git a/cluster-autoscaler/cloudprovider/civo/civo_node_group.go b/cluster-autoscaler/cloudprovider/civo/civo_node_group.go index d20a8c4d5ebe..ae3ddf34f430 100644 --- a/cluster-autoscaler/cloudprovider/civo/civo_node_group.go +++ b/cluster-autoscaler/cloudprovider/civo/civo_node_group.go @@ -28,9 +28,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" autoscaler "k8s.io/autoscaler/cluster-autoscaler/config" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) // NodeGroup implements cloudprovider.NodeGroup interface. NodeGroup contains @@ -208,15 +208,13 @@ func (n *NodeGroup) Nodes() ([]cloudprovider.Instance, error) { // all of the labels, capacity and allocatable information as well as all pods // that are started on the node by default, using manifest (most likely only // kube-proxy). Implementation optional. -func (n *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { +func (n *NodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) { node, err := n.buildNodeFromTemplate(n.Id(), n.nodeTemplate) if err != nil { return nil, fmt.Errorf("failed to build node from template") } - nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(n.Id())) - nodeInfo.SetNode(node) - + nodeInfo := framework.NewNodeInfo(node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(n.Id())}) return nodeInfo, nil } diff --git a/cluster-autoscaler/cloudprovider/civo/civo_node_group_test.go b/cluster-autoscaler/cloudprovider/civo/civo_node_group_test.go index e6101cadc4f8..36be379345a4 100644 --- a/cluster-autoscaler/cloudprovider/civo/civo_node_group_test.go +++ b/cluster-autoscaler/cloudprovider/civo/civo_node_group_test.go @@ -540,7 +540,7 @@ func TestNodeGroup_TemplateNodeInfo(t *testing.T) { nodeInfo, err := ng.TemplateNodeInfo() assert.NoError(t, err) - assert.Equal(t, len(nodeInfo.Pods), 1, "should have one template pod") + assert.Equal(t, len(nodeInfo.Pods()), 1, "should have one template pod") assert.Equal(t, nodeInfo.Node().Status.Capacity.Cpu().ToDec().Value(), int64(1000), "should match cpu capacity ") assert.Equal(t, nodeInfo.Node().Status.Capacity.Memory().ToDec().Value(), int64(1073741824), "should match memory capacity") assert.Equal(t, nodeInfo.Node().Status.Capacity.StorageEphemeral().ToDec().Value(), int64(21474836480), "should match epheral storage capacity") diff --git a/cluster-autoscaler/cloudprovider/cloud_provider.go b/cluster-autoscaler/cloudprovider/cloud_provider.go index 33719ac63f31..171a89cfab35 100644 --- a/cluster-autoscaler/cloudprovider/cloud_provider.go +++ b/cluster-autoscaler/cloudprovider/cloud_provider.go @@ -23,8 +23,8 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/autoscaler/cluster-autoscaler/config" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) const ( @@ -214,13 +214,13 @@ type NodeGroup interface { // This list should include also instances that might have not become a kubernetes node yet. Nodes() ([]Instance, error) - // TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty + // TemplateNodeInfo returns a framework.NodeInfo structure of an empty // (as if just started) node. This will be used in scale-up simulations to // predict what would a new node look like if a node group was expanded. The returned // NodeInfo is expected to have a fully populated Node object, with all of the labels, // capacity and allocatable information as well as all pods that are started on // the node by default, using manifest (most likely only kube-proxy). Implementation optional. - TemplateNodeInfo() (*schedulerframework.NodeInfo, error) + TemplateNodeInfo() (*framework.NodeInfo, error) // Exist checks if the node group really exists on the cloud provider side. Allows to tell the // theoretical node group from the real one. Implementation required. diff --git a/cluster-autoscaler/cloudprovider/cloudstack/cloudstack_node_group.go b/cluster-autoscaler/cloudprovider/cloudstack/cloudstack_node_group.go index 9181b5cd82b9..383587f30eb7 100644 --- a/cluster-autoscaler/cloudprovider/cloudstack/cloudstack_node_group.go +++ b/cluster-autoscaler/cloudprovider/cloudstack/cloudstack_node_group.go @@ -26,8 +26,8 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/utils/errors" apiv1 "k8s.io/api/core/v1" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" klog "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) // asg implements NodeGroup interface. @@ -168,7 +168,7 @@ func (asg *asg) Delete() error { } // TemplateNodeInfo returns a node template for this node group. -func (asg *asg) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { +func (asg *asg) TemplateNodeInfo() (*framework.NodeInfo, error) { return nil, cloudprovider.ErrNotImplemented } diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup.go index 3c0c87487f12..481039024726 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup.go @@ -29,7 +29,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/config" @@ -250,7 +250,7 @@ func (ng *nodegroup) Nodes() ([]cloudprovider.Instance, error) { // allocatable information as well as all pods that are started on the // node by default, using manifest (most likely only kube-proxy). // Implementation optional. -func (ng *nodegroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { +func (ng *nodegroup) TemplateNodeInfo() (*framework.NodeInfo, error) { if !ng.scalableResource.CanScaleFromZero() { return nil, cloudprovider.ErrNotImplemented } @@ -278,9 +278,7 @@ func (ng *nodegroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { return nil, err } - nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(ng.scalableResource.Name())) - nodeInfo.SetNode(&node) - + nodeInfo := framework.NewNodeInfo(&node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(ng.scalableResource.Name())}) return nodeInfo, nil } diff --git a/cluster-autoscaler/cloudprovider/digitalocean/digitalocean_node_group.go b/cluster-autoscaler/cloudprovider/digitalocean/digitalocean_node_group.go index 421cdea1451c..3f27d0e67180 100644 --- a/cluster-autoscaler/cloudprovider/digitalocean/digitalocean_node_group.go +++ b/cluster-autoscaler/cloudprovider/digitalocean/digitalocean_node_group.go @@ -26,7 +26,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/config" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" ) const ( @@ -200,14 +200,14 @@ func (n *NodeGroup) Nodes() ([]cloudprovider.Instance, error) { return toInstances(n.nodePool.Nodes), nil } -// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty +// TemplateNodeInfo returns a framework.NodeInfo structure of an empty // (as if just started) node. This will be used in scale-up simulations to // predict what would a new node look like if a node group was expanded. The // returned NodeInfo is expected to have a fully populated Node object, with // all of the labels, capacity and allocatable information as well as all pods // that are started on the node by default, using manifest (most likely only // kube-proxy). Implementation optional. -func (n *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { +func (n *NodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) { return nil, cloudprovider.ErrNotImplemented } diff --git a/cluster-autoscaler/cloudprovider/equinixmetal/manager.go b/cluster-autoscaler/cloudprovider/equinixmetal/manager.go index f403b04b8907..ceb673da1208 100644 --- a/cluster-autoscaler/cloudprovider/equinixmetal/manager.go +++ b/cluster-autoscaler/cloudprovider/equinixmetal/manager.go @@ -23,7 +23,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/config" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" ) const ( @@ -45,7 +45,7 @@ type equinixMetalManager interface { getNodes(nodegroup string) ([]string, error) getNodeNames(nodegroup string) ([]string, error) deleteNodes(nodegroup string, nodes []NodeRef, updatedNodeCount int) error - templateNodeInfo(nodegroup string) (*schedulerframework.NodeInfo, error) + templateNodeInfo(nodegroup string) (*framework.NodeInfo, error) NodeGroupForNode(labels map[string]string, nodeId string) (string, error) } diff --git a/cluster-autoscaler/cloudprovider/equinixmetal/manager_rest.go b/cluster-autoscaler/cloudprovider/equinixmetal/manager_rest.go index 9935c3462fef..351f0be1d09d 100644 --- a/cluster-autoscaler/cloudprovider/equinixmetal/manager_rest.go +++ b/cluster-autoscaler/cloudprovider/equinixmetal/manager_rest.go @@ -38,10 +38,10 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/config" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" "k8s.io/autoscaler/cluster-autoscaler/version" klog "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) const ( @@ -689,7 +689,7 @@ func BuildGenericLabels(nodegroup string, instanceType string) map[string]string // templateNodeInfo returns a NodeInfo with a node template based on the equinix metal plan // that is used to create nodes in a given node group. -func (mgr *equinixMetalManagerRest) templateNodeInfo(nodegroup string) (*schedulerframework.NodeInfo, error) { +func (mgr *equinixMetalManagerRest) templateNodeInfo(nodegroup string) (*framework.NodeInfo, error) { node := apiv1.Node{} nodeName := fmt.Sprintf("%s-asg-%d", nodegroup, rand.Int63()) node.ObjectMeta = metav1.ObjectMeta{ @@ -716,8 +716,7 @@ func (mgr *equinixMetalManagerRest) templateNodeInfo(nodegroup string) (*schedul // GenericLabels node.Labels = cloudprovider.JoinStringMaps(node.Labels, BuildGenericLabels(nodegroup, mgr.getNodePoolDefinition(nodegroup).plan)) - nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(nodegroup)) - nodeInfo.SetNode(&node) + nodeInfo := framework.NewNodeInfo(&node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(nodegroup)}) return nodeInfo, nil } diff --git a/cluster-autoscaler/cloudprovider/equinixmetal/node_group.go b/cluster-autoscaler/cloudprovider/equinixmetal/node_group.go index b260f9102523..4be962eecbb7 100644 --- a/cluster-autoscaler/cloudprovider/equinixmetal/node_group.go +++ b/cluster-autoscaler/cloudprovider/equinixmetal/node_group.go @@ -24,8 +24,8 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/config" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" klog "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) // equinixMetalNodeGroup implements NodeGroup interface from cluster-autoscaler/cloudprovider. @@ -260,7 +260,7 @@ func (ng *equinixMetalNodeGroup) Nodes() ([]cloudprovider.Instance, error) { } // TemplateNodeInfo returns a node template for this node group. -func (ng *equinixMetalNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { +func (ng *equinixMetalNodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) { return ng.equinixMetalManager.templateNodeInfo(ng.id) } diff --git a/cluster-autoscaler/cloudprovider/exoscale/exoscale_node_group_instance_pool.go b/cluster-autoscaler/cloudprovider/exoscale/exoscale_node_group_instance_pool.go index 5a001ad35c39..bc82deb903f8 100644 --- a/cluster-autoscaler/cloudprovider/exoscale/exoscale_node_group_instance_pool.go +++ b/cluster-autoscaler/cloudprovider/exoscale/exoscale_node_group_instance_pool.go @@ -26,7 +26,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" egoscale "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/exoscale/internal/github.com/exoscale/egoscale/v2" "k8s.io/autoscaler/cluster-autoscaler/config" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" ) // instancePoolNodeGroup implements cloudprovider.NodeGroup interface for Exoscale Instance Pools. @@ -170,13 +170,13 @@ func (n *instancePoolNodeGroup) Nodes() ([]cloudprovider.Instance, error) { return nodes, nil } -// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty +// TemplateNodeInfo returns a framework.NodeInfo structure of an empty // (as if just started) node. This will be used in scale-up simulations to // predict what would a new node look like if a node group was expanded. The returned // NodeInfo is expected to have a fully populated Node object, with all of the labels, // capacity and allocatable information as well as all pods that are started on // the node by default, using manifest (most likely only kube-proxy). Implementation optional. -func (n *instancePoolNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { +func (n *instancePoolNodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) { return nil, cloudprovider.ErrNotImplemented } diff --git a/cluster-autoscaler/cloudprovider/exoscale/exoscale_node_group_sks_nodepool.go b/cluster-autoscaler/cloudprovider/exoscale/exoscale_node_group_sks_nodepool.go index a78f6b8e25a5..9c0d3131438d 100644 --- a/cluster-autoscaler/cloudprovider/exoscale/exoscale_node_group_sks_nodepool.go +++ b/cluster-autoscaler/cloudprovider/exoscale/exoscale_node_group_sks_nodepool.go @@ -25,7 +25,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" egoscale "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/exoscale/internal/github.com/exoscale/egoscale/v2" "k8s.io/autoscaler/cluster-autoscaler/config" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" ) const ( @@ -187,13 +187,13 @@ func (n *sksNodepoolNodeGroup) Nodes() ([]cloudprovider.Instance, error) { return nodes, nil } -// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty +// TemplateNodeInfo returns a framework.NodeInfo structure of an empty // (as if just started) node. This will be used in scale-up simulations to // predict what would a new node look like if a node group was expanded. The returned // NodeInfo is expected to have a fully populated Node object, with all of the labels, // capacity and allocatable information as well as all pods that are started on // the node by default, using manifest (most likely only kube-proxy). Implementation optional. -func (n *sksNodepoolNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { +func (n *sksNodepoolNodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) { return nil, cloudprovider.ErrNotImplemented } diff --git a/cluster-autoscaler/cloudprovider/externalgrpc/externalgrpc_node_group.go b/cluster-autoscaler/cloudprovider/externalgrpc/externalgrpc_node_group.go index c6629666d6e0..52a467fd24d4 100644 --- a/cluster-autoscaler/cloudprovider/externalgrpc/externalgrpc_node_group.go +++ b/cluster-autoscaler/cloudprovider/externalgrpc/externalgrpc_node_group.go @@ -28,8 +28,8 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/externalgrpc/protos" "k8s.io/autoscaler/cluster-autoscaler/config" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" klog "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) // NodeGroup implements cloudprovider.NodeGroup interface. NodeGroup contains @@ -44,7 +44,7 @@ type NodeGroup struct { grpcTimeout time.Duration mutex sync.Mutex - nodeInfo **schedulerframework.NodeInfo // used to cache NodeGroupTemplateNodeInfo() grpc calls + nodeInfo **framework.NodeInfo // used to cache NodeGroupTemplateNodeInfo() grpc calls } // MaxSize returns maximum size of the node group. @@ -188,7 +188,7 @@ func (n *NodeGroup) Nodes() ([]cloudprovider.Instance, error) { return instances, nil } -// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty +// TemplateNodeInfo returns a framework.NodeInfo structure of an empty // (as if just started) node. This will be used in scale-up simulations to // predict what would a new node look like if a node group was expanded. The // returned NodeInfo is expected to have a fully populated Node object, with @@ -200,7 +200,7 @@ func (n *NodeGroup) Nodes() ([]cloudprovider.Instance, error) { // complex approach and does not cover all the scenarios. For the sake of simplicity, // the `nodeInfo` is defined as a Kubernetes `k8s.io.api.core.v1.Node` type // where the system could still extract certain info about the node. -func (n *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { +func (n *NodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) { n.mutex.Lock() defer n.mutex.Unlock() @@ -224,11 +224,10 @@ func (n *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { } pbNodeInfo := res.GetNodeInfo() if pbNodeInfo == nil { - n.nodeInfo = new(*schedulerframework.NodeInfo) + n.nodeInfo = new(*framework.NodeInfo) return nil, nil } - nodeInfo := schedulerframework.NewNodeInfo() - nodeInfo.SetNode(pbNodeInfo) + nodeInfo := framework.NewNodeInfo(pbNodeInfo, nil) n.nodeInfo = &nodeInfo return nodeInfo, nil } diff --git a/cluster-autoscaler/cloudprovider/gce/gce_cloud_provider.go b/cluster-autoscaler/cloudprovider/gce/gce_cloud_provider.go index 7a7b5b42e308..d9cb61695405 100644 --- a/cluster-autoscaler/cloudprovider/gce/gce_cloud_provider.go +++ b/cluster-autoscaler/cloudprovider/gce/gce_cloud_provider.go @@ -26,10 +26,10 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/config" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" klog "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) const ( @@ -361,13 +361,12 @@ func (mig *gceMig) GetOptions(defaults config.NodeGroupAutoscalingOptions) (*con } // TemplateNodeInfo returns a node template for this node group. -func (mig *gceMig) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { +func (mig *gceMig) TemplateNodeInfo() (*framework.NodeInfo, error) { node, err := mig.gceManager.GetMigTemplateNode(mig) if err != nil { return nil, err } - nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(mig.Id())) - nodeInfo.SetNode(node) + nodeInfo := framework.NewNodeInfo(node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(mig.Id())}) return nodeInfo, nil } diff --git a/cluster-autoscaler/cloudprovider/hetzner/hetzner_node_group.go b/cluster-autoscaler/cloudprovider/hetzner/hetzner_node_group.go index 88cac1bc7696..6dffa1d972d9 100644 --- a/cluster-autoscaler/cloudprovider/hetzner/hetzner_node_group.go +++ b/cluster-autoscaler/cloudprovider/hetzner/hetzner_node_group.go @@ -31,8 +31,8 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/hetzner/hcloud-go/hcloud" "k8s.io/autoscaler/cluster-autoscaler/config" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) // hetznerNodeGroup implements cloudprovider.NodeGroup interface. hetznerNodeGroup contains @@ -251,14 +251,14 @@ func (n *hetznerNodeGroup) Nodes() ([]cloudprovider.Instance, error) { return instances, nil } -// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty +// TemplateNodeInfo returns a framework.NodeInfo structure of an empty // (as if just started) node. This will be used in scale-up simulations to // predict what would a new node look like if a node group was expanded. The // returned NodeInfo is expected to have a fully populated Node object, with // all of the labels, capacity and allocatable information as well as all pods // that are started on the node by default, using manifest (most likely only // kube-proxy). Implementation optional. -func (n *hetznerNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { +func (n *hetznerNodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) { resourceList, err := getMachineTypeResourceList(n.manager, n.instanceType) if err != nil { return nil, fmt.Errorf("failed to create resource list for node group %s error: %v", n.id, err) @@ -297,9 +297,7 @@ func (n *hetznerNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, err } } - nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(n.id)) - nodeInfo.SetNode(&node) - + nodeInfo := framework.NewNodeInfo(&node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(n.id)}) return nodeInfo, nil } diff --git a/cluster-autoscaler/cloudprovider/huaweicloud/huaweicloud_auto_scaling_group.go b/cluster-autoscaler/cloudprovider/huaweicloud/huaweicloud_auto_scaling_group.go index 83554edc4168..9440345ceab7 100644 --- a/cluster-autoscaler/cloudprovider/huaweicloud/huaweicloud_auto_scaling_group.go +++ b/cluster-autoscaler/cloudprovider/huaweicloud/huaweicloud_auto_scaling_group.go @@ -26,10 +26,10 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" huaweicloudsdkasmodel "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/huaweicloud/huaweicloud-sdk-go-v3/services/as/v1/model" "k8s.io/autoscaler/cluster-autoscaler/config" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) // AutoScalingGroup represents a HuaweiCloud's 'Auto Scaling Group' which also can be treated as a node group. @@ -180,13 +180,13 @@ func (asg *AutoScalingGroup) Nodes() ([]cloudprovider.Instance, error) { return instances, nil } -// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty +// TemplateNodeInfo returns a framework.NodeInfo structure of an empty // (as if just started) node. This will be used in scale-up simulations to // predict what would a new node look like if a node group was expanded. The returned // NodeInfo is expected to have a fully populated Node object, with all of the labels, // capacity and allocatable information as well as all pods that are started on // the node by default, using manifest (most likely only kube-proxy). Implementation optional. -func (asg *AutoScalingGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { +func (asg *AutoScalingGroup) TemplateNodeInfo() (*framework.NodeInfo, error) { template, err := asg.cloudServiceManager.getAsgTemplate(asg.groupID) if err != nil { return nil, err @@ -195,8 +195,7 @@ func (asg *AutoScalingGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, e if err != nil { return nil, err } - nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(asg.groupName)) - nodeInfo.SetNode(node) + nodeInfo := framework.NewNodeInfo(node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(asg.groupName)}) return nodeInfo, nil } diff --git a/cluster-autoscaler/cloudprovider/ionoscloud/ionoscloud_cloud_provider.go b/cluster-autoscaler/cloudprovider/ionoscloud/ionoscloud_cloud_provider.go index 0795879e3a94..b6f50abae4f1 100644 --- a/cluster-autoscaler/cloudprovider/ionoscloud/ionoscloud_cloud_provider.go +++ b/cluster-autoscaler/cloudprovider/ionoscloud/ionoscloud_cloud_provider.go @@ -24,10 +24,10 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/config" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" caerrors "k8s.io/autoscaler/cluster-autoscaler/utils/errors" "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) const ( @@ -144,14 +144,14 @@ func (n *nodePool) Nodes() ([]cloudprovider.Instance, error) { return n.manager.GetInstancesForNodeGroup(n) } -// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty +// TemplateNodeInfo returns a framework.NodeInfo structure of an empty // (as if just started) node. This will be used in scale-up simulations to // predict what would a new node look like if a node group was expanded. The // returned NodeInfo is expected to have a fully populated Node object, with // all of the labels, capacity and allocatable information as well as all pods // that are started on the node by default, using manifest (most likely only // kube-proxy). Implementation optional. -func (n *nodePool) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { +func (n *nodePool) TemplateNodeInfo() (*framework.NodeInfo, error) { return nil, cloudprovider.ErrNotImplemented } diff --git a/cluster-autoscaler/cloudprovider/kamatera/kamatera_node_group.go b/cluster-autoscaler/cloudprovider/kamatera/kamatera_node_group.go index 208767879182..130a3c6d7764 100644 --- a/cluster-autoscaler/cloudprovider/kamatera/kamatera_node_group.go +++ b/cluster-autoscaler/cloudprovider/kamatera/kamatera_node_group.go @@ -19,16 +19,17 @@ package kamatera import ( "context" "fmt" + "strconv" + "strings" + apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/config" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" - "strconv" - "strings" ) // NodeGroup implements cloudprovider.NodeGroup interface. NodeGroup contains @@ -147,13 +148,13 @@ func (n *NodeGroup) Nodes() ([]cloudprovider.Instance, error) { return instances, nil } -// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty +// TemplateNodeInfo returns a framework.NodeInfo structure of an empty // (as if just started) node. This will be used in scale-up simulations to // predict what would a new node look like if a node group was expanded. The returned // NodeInfo is expected to have a fully populated Node object, with all of the labels, // capacity and allocatable information as well as all pods that are started on // the node by default, using manifest (most likely only kube-proxy). Implementation optional. -func (n *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { +func (n *NodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) { resourceList, err := n.getResourceList() if err != nil { return nil, fmt.Errorf("failed to create resource list for node group %s error: %v", n.id, err) @@ -171,9 +172,7 @@ func (n *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { node.Status.Allocatable = node.Status.Capacity node.Status.Conditions = cloudprovider.BuildReadyConditions() - nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(n.id)) - nodeInfo.SetNode(&node) - + nodeInfo := framework.NewNodeInfo(&node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(n.id)}) return nodeInfo, nil } diff --git a/cluster-autoscaler/cloudprovider/kubemark/kubemark_linux.go b/cluster-autoscaler/cloudprovider/kubemark/kubemark_linux.go index 2d6ffe6af7f9..f12b7f589da4 100644 --- a/cluster-autoscaler/cloudprovider/kubemark/kubemark_linux.go +++ b/cluster-autoscaler/cloudprovider/kubemark/kubemark_linux.go @@ -32,6 +32,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/config" "k8s.io/autoscaler/cluster-autoscaler/config/dynamic" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" "k8s.io/client-go/informers" @@ -39,7 +40,6 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/kubernetes/pkg/kubemark" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" klog "k8s.io/klog/v2" ) @@ -290,7 +290,7 @@ func (nodeGroup *NodeGroup) DecreaseTargetSize(delta int) error { } // TemplateNodeInfo returns a node template for this node group. -func (nodeGroup *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { +func (nodeGroup *NodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) { return nil, cloudprovider.ErrNotImplemented } diff --git a/cluster-autoscaler/cloudprovider/kwok/kwok_nodegroups.go b/cluster-autoscaler/cloudprovider/kwok/kwok_nodegroups.go index cd94a291b797..91933140720c 100644 --- a/cluster-autoscaler/cloudprovider/kwok/kwok_nodegroups.go +++ b/cluster-autoscaler/cloudprovider/kwok/kwok_nodegroups.go @@ -25,8 +25,8 @@ import ( "k8s.io/apimachinery/pkg/util/rand" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/config" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" klog "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) var ( @@ -186,10 +186,8 @@ func (nodeGroup *NodeGroup) Nodes() ([]cloudprovider.Instance, error) { } // TemplateNodeInfo returns a node template for this node group. -func (nodeGroup *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { - nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(nodeGroup.Id())) - nodeInfo.SetNode(nodeGroup.nodeTemplate) - +func (nodeGroup *NodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) { + nodeInfo := framework.NewNodeInfo(nodeGroup.nodeTemplate, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(nodeGroup.Id())}) return nodeInfo, nil } diff --git a/cluster-autoscaler/cloudprovider/kwok/kwok_nodegroups_test.go b/cluster-autoscaler/cloudprovider/kwok/kwok_nodegroups_test.go index 5604f1bac6a7..23fc50902409 100644 --- a/cluster-autoscaler/cloudprovider/kwok/kwok_nodegroups_test.go +++ b/cluster-autoscaler/cloudprovider/kwok/kwok_nodegroups_test.go @@ -305,8 +305,8 @@ func TestTemplateNodeInfo(t *testing.T) { ti, err := ng.TemplateNodeInfo() assert.Nil(t, err) assert.NotNil(t, ti) - assert.Len(t, ti.Pods, 1) - assert.Contains(t, ti.Pods[0].Pod.Name, fmt.Sprintf("kube-proxy-%s", ng.name)) + assert.Len(t, ti.Pods(), 1) + assert.Contains(t, ti.Pods()[0].Pod.Name, fmt.Sprintf("kube-proxy-%s", ng.name)) assert.Equal(t, ng.nodeTemplate, ti.Node()) } diff --git a/cluster-autoscaler/cloudprovider/linode/linode_node_group.go b/cluster-autoscaler/cloudprovider/linode/linode_node_group.go index 644ce8bbd989..d2f660fd3b06 100644 --- a/cluster-autoscaler/cloudprovider/linode/linode_node_group.go +++ b/cluster-autoscaler/cloudprovider/linode/linode_node_group.go @@ -26,8 +26,8 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/linode/linodego" "k8s.io/autoscaler/cluster-autoscaler/config" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" klog "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) const ( @@ -186,14 +186,14 @@ func (n *NodeGroup) Nodes() ([]cloudprovider.Instance, error) { return nodes, nil } -// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty +// TemplateNodeInfo returns a framework.NodeInfo structure of an empty // (as if just started) node. This will be used in scale-up simulations to // predict what would a new node look like if a node group was expanded. The // returned NodeInfo is expected to have a fully populated Node object, with // all of the labels, capacity and allocatable information as well as all pods // that are started on the node by default, using manifest (most likely only // kube-proxy). Implementation optional. -func (n *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { +func (n *NodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) { return nil, cloudprovider.ErrNotImplemented } diff --git a/cluster-autoscaler/cloudprovider/magnum/magnum_nodegroup.go b/cluster-autoscaler/cloudprovider/magnum/magnum_nodegroup.go index 8c72a67acb94..1d7863f00945 100644 --- a/cluster-autoscaler/cloudprovider/magnum/magnum_nodegroup.go +++ b/cluster-autoscaler/cloudprovider/magnum/magnum_nodegroup.go @@ -24,8 +24,8 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/config" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" klog "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) // How long to sleep after deleting nodes, to ensure that multiple requests arrive in order. @@ -206,7 +206,7 @@ func (ng *magnumNodeGroup) Nodes() ([]cloudprovider.Instance, error) { } // TemplateNodeInfo returns a node template for this node group. -func (ng *magnumNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { +func (ng *magnumNodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) { return nil, cloudprovider.ErrNotImplemented } diff --git a/cluster-autoscaler/cloudprovider/mocks/NodeGroup.go b/cluster-autoscaler/cloudprovider/mocks/NodeGroup.go index 65a4b3847516..04a66a2e006e 100644 --- a/cluster-autoscaler/cloudprovider/mocks/NodeGroup.go +++ b/cluster-autoscaler/cloudprovider/mocks/NodeGroup.go @@ -20,7 +20,7 @@ import ( cloudprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" config "k8s.io/autoscaler/cluster-autoscaler/config" - framework "k8s.io/kubernetes/pkg/scheduler/framework" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" mock "github.com/stretchr/testify/mock" diff --git a/cluster-autoscaler/cloudprovider/oci/instancepools/oci_instance_pool.go b/cluster-autoscaler/cloudprovider/oci/instancepools/oci_instance_pool.go index 6b2649b6d03c..8d402fbe94ea 100644 --- a/cluster-autoscaler/cloudprovider/oci/instancepools/oci_instance_pool.go +++ b/cluster-autoscaler/cloudprovider/oci/instancepools/oci_instance_pool.go @@ -6,15 +6,16 @@ package instancepools import ( "fmt" + "github.com/pkg/errors" apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/oci/common" ocicommon "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/oci/common" "k8s.io/autoscaler/cluster-autoscaler/config" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) // InstancePoolNodeGroup implements the NodeGroup interface using OCI instance pools. @@ -172,23 +173,23 @@ func (ip *InstancePoolNodeGroup) Nodes() ([]cloudprovider.Instance, error) { return ip.manager.GetInstancePoolNodes(*ip) } -// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty +// TemplateNodeInfo returns a framework.NodeInfo structure of an empty // (as if just started) node. This will be used in scale-up simulations to // predict what would a new node look like if a instance-pool was expanded. The returned // NodeInfo is expected to have a fully populated Node object, with all of the labels, // capacity and allocatable information as well as all pods that are started on // the node by default, using manifest (most likely only kube-proxy). Implementation optional. -func (ip *InstancePoolNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { +func (ip *InstancePoolNodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) { node, err := ip.manager.GetInstancePoolTemplateNode(*ip) if err != nil { return nil, errors.Wrap(err, "unable to build node info template") } - nodeInfo := schedulerframework.NewNodeInfo( - cloudprovider.BuildKubeProxy(ip.id), - ocicommon.BuildCSINodePod(), + nodeInfo := framework.NewNodeInfo( + node, nil, + &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(ip.id)}, + &framework.PodInfo{Pod: ocicommon.BuildCSINodePod()}, ) - nodeInfo.SetNode(node) return nodeInfo, nil } diff --git a/cluster-autoscaler/cloudprovider/oci/nodepools/oci_node_pool.go b/cluster-autoscaler/cloudprovider/oci/nodepools/oci_node_pool.go index 43d656e0f76c..914c16ef666f 100644 --- a/cluster-autoscaler/cloudprovider/oci/nodepools/oci_node_pool.go +++ b/cluster-autoscaler/cloudprovider/oci/nodepools/oci_node_pool.go @@ -18,9 +18,9 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/config" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/client-go/kubernetes" klog "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ocicommon "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/oci/common" ) @@ -273,24 +273,24 @@ func (np *nodePool) Nodes() ([]cloudprovider.Instance, error) { return np.manager.GetNodePoolNodes(np) } -// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty +// TemplateNodeInfo returns a framework.NodeInfo structure of an empty // (as if just started) node. This will be used in scale-up simulations to // predict what would a new node look like if a node group was expanded. The returned // NodeInfo is expected to have a fully populated Node object, with all of the labels, // capacity and allocatable information as well as all pods that are started on // the node by default, using manifest (most likely only kube-proxy). Implementation optional. -func (np *nodePool) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { +func (np *nodePool) TemplateNodeInfo() (*framework.NodeInfo, error) { node, err := np.manager.GetNodePoolTemplateNode(np) if err != nil { return nil, errors.Wrap(err, "unable to build node pool template") } - nodeInfo := schedulerframework.NewNodeInfo( - cloudprovider.BuildKubeProxy(np.id), - ocicommon.BuildFlannelPod(), - ocicommon.BuildProxymuxClientPod(), + nodeInfo := framework.NewNodeInfo( + node, nil, + &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(np.id)}, + &framework.PodInfo{Pod: ocicommon.BuildFlannelPod()}, + &framework.PodInfo{Pod: ocicommon.BuildProxymuxClientPod()}, ) - nodeInfo.SetNode(node) return nodeInfo, nil } diff --git a/cluster-autoscaler/cloudprovider/ovhcloud/ovh_cloud_node_group.go b/cluster-autoscaler/cloudprovider/ovhcloud/ovh_cloud_node_group.go index 42b94f3d56ad..185adde536ab 100644 --- a/cluster-autoscaler/cloudprovider/ovhcloud/ovh_cloud_node_group.go +++ b/cluster-autoscaler/cloudprovider/ovhcloud/ovh_cloud_node_group.go @@ -28,8 +28,8 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/ovhcloud/sdk" @@ -215,7 +215,7 @@ func (ng *NodeGroup) Nodes() ([]cloudprovider.Instance, error) { } // TemplateNodeInfo returns a node template for this node group. -func (ng *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { +func (ng *NodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) { // Forge node template in a node group node := &apiv1.Node{ ObjectMeta: metav1.ObjectMeta{ @@ -252,9 +252,7 @@ func (ng *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { node.Status.Allocatable = node.Status.Capacity // Setup node info template - nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(ng.Id())) - nodeInfo.SetNode(node) - + nodeInfo := framework.NewNodeInfo(node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(ng.Id())}) return nodeInfo, nil } diff --git a/cluster-autoscaler/cloudprovider/rancher/rancher_nodegroup.go b/cluster-autoscaler/cloudprovider/rancher/rancher_nodegroup.go index c83871a05654..045f78802920 100644 --- a/cluster-autoscaler/cloudprovider/rancher/rancher_nodegroup.go +++ b/cluster-autoscaler/cloudprovider/rancher/rancher_nodegroup.go @@ -31,8 +31,8 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" provisioningv1 "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/rancher/provisioning.cattle.io/v1" "k8s.io/autoscaler/cluster-autoscaler/config" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" klog "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/utils/pointer" ) @@ -196,7 +196,7 @@ func (ng *nodeGroup) DecreaseTargetSize(delta int) error { } // TemplateNodeInfo returns a node template for this node group. -func (ng *nodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { +func (ng *nodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) { node := &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-%s-%d", ng.provider.config.ClusterName, ng.Id(), rand.Int63()), @@ -216,9 +216,7 @@ func (ng *nodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { node.Status.Allocatable = node.Status.Capacity // Setup node info template - nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(ng.Id())) - nodeInfo.SetNode(node) - + nodeInfo := framework.NewNodeInfo(node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(ng.Id())}) return nodeInfo, nil } diff --git a/cluster-autoscaler/cloudprovider/rancher/rancher_nodegroup_test.go b/cluster-autoscaler/cloudprovider/rancher/rancher_nodegroup_test.go index ac734bb8ab95..84bce1536898 100644 --- a/cluster-autoscaler/cloudprovider/rancher/rancher_nodegroup_test.go +++ b/cluster-autoscaler/cloudprovider/rancher/rancher_nodegroup_test.go @@ -396,19 +396,19 @@ func TestTemplateNodeInfo(t *testing.T) { t.Fatal(err) } - if nodeInfo.Allocatable.MilliCPU != ng.resources.Cpu().MilliValue() { + if nodeInfo.ToScheduler().Allocatable.MilliCPU != ng.resources.Cpu().MilliValue() { t.Fatalf("expected nodeInfo to have %v MilliCPU, got %v", - ng.resources.Cpu().MilliValue(), nodeInfo.Allocatable.MilliCPU) + ng.resources.Cpu().MilliValue(), nodeInfo.ToScheduler().Allocatable.MilliCPU) } - if nodeInfo.Allocatable.Memory != ng.resources.Memory().Value() { + if nodeInfo.ToScheduler().Allocatable.Memory != ng.resources.Memory().Value() { t.Fatalf("expected nodeInfo to have %v Memory, got %v", - ng.resources.Memory().Value(), nodeInfo.Allocatable.Memory) + ng.resources.Memory().Value(), nodeInfo.ToScheduler().Allocatable.Memory) } - if nodeInfo.Allocatable.EphemeralStorage != ng.resources.StorageEphemeral().Value() { + if nodeInfo.ToScheduler().Allocatable.EphemeralStorage != ng.resources.StorageEphemeral().Value() { t.Fatalf("expected nodeInfo to have %v ephemeral storage, got %v", - ng.resources.StorageEphemeral().Value(), nodeInfo.Allocatable.EphemeralStorage) + ng.resources.StorageEphemeral().Value(), nodeInfo.ToScheduler().Allocatable.EphemeralStorage) } } diff --git a/cluster-autoscaler/cloudprovider/scaleway/scaleway_node_group.go b/cluster-autoscaler/cloudprovider/scaleway/scaleway_node_group.go index 96191c3b88c0..bd264e5c7b92 100644 --- a/cluster-autoscaler/cloudprovider/scaleway/scaleway_node_group.go +++ b/cluster-autoscaler/cloudprovider/scaleway/scaleway_node_group.go @@ -20,16 +20,17 @@ import ( "context" "errors" "fmt" + "strings" + apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/scaleway/scalewaygo" "k8s.io/autoscaler/cluster-autoscaler/config" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" - "strings" ) // NodeGroup implements cloudprovider.NodeGroup interface. @@ -198,13 +199,13 @@ func (ng *NodeGroup) Nodes() ([]cloudprovider.Instance, error) { return nodes, nil } -// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty +// TemplateNodeInfo returns a framework.NodeInfo structure of an empty // (as if just started) node. This will be used in scale-up simulations to // predict what would a new node look like if a node group was expanded. The returned // NodeInfo is expected to have a fully populated Node object, with all of the labels, // capacity and allocatable information as well as all pods that are started on // the node by default, using manifest (most likely only kube-proxy). -func (ng *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { +func (ng *NodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) { klog.V(4).Infof("TemplateNodeInfo,PoolID=%s", ng.p.ID) node := apiv1.Node{ ObjectMeta: metav1.ObjectMeta{ @@ -235,8 +236,7 @@ func (ng *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { node.Status.Conditions = cloudprovider.BuildReadyConditions() node.Spec.Taints = parseTaints(ng.specs.Taints) - nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(ng.p.Name)) - nodeInfo.SetNode(&node) + nodeInfo := framework.NewNodeInfo(&node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(ng.p.Name)}) return nodeInfo, nil } diff --git a/cluster-autoscaler/cloudprovider/tencentcloud/tencentcloud_auto_scaling_group.go b/cluster-autoscaler/cloudprovider/tencentcloud/tencentcloud_auto_scaling_group.go index a0d54f0319e9..88972c01d73b 100644 --- a/cluster-autoscaler/cloudprovider/tencentcloud/tencentcloud_auto_scaling_group.go +++ b/cluster-autoscaler/cloudprovider/tencentcloud/tencentcloud_auto_scaling_group.go @@ -26,7 +26,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/config" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" ) // TcRef contains a reference to some entity in Tencentcloud/TKE world. @@ -247,15 +247,14 @@ func (asg *tcAsg) Nodes() ([]cloudprovider.Instance, error) { } // TemplateNodeInfo returns a node template for this node group. -func (asg *tcAsg) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { +func (asg *tcAsg) TemplateNodeInfo() (*framework.NodeInfo, error) { node, err := asg.tencentcloudManager.GetAsgTemplateNode(asg) if err != nil { return nil, err } klog.V(4).Infof("Generate tencentcloud template: labels=%v taints=%v allocatable=%v", node.Labels, node.Spec.Taints, node.Status.Allocatable) - nodeInfo := schedulerframework.NewNodeInfo() - nodeInfo.SetNode(node) + nodeInfo := framework.NewNodeInfo(node, nil) return nodeInfo, nil } diff --git a/cluster-autoscaler/cloudprovider/test/test_cloud_provider.go b/cluster-autoscaler/cloudprovider/test/test_cloud_provider.go index ec2319dbbfa1..414e8cae5d1c 100644 --- a/cluster-autoscaler/cloudprovider/test/test_cloud_provider.go +++ b/cluster-autoscaler/cloudprovider/test/test_cloud_provider.go @@ -24,9 +24,9 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/config" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) // OnScaleUpFunc is a function called on node group increase in TestCloudProvider. @@ -56,7 +56,7 @@ type TestCloudProvider struct { onNodeGroupDelete func(string) error hasInstance func(string) (bool, error) machineTypes []string - machineTemplates map[string]*schedulerframework.NodeInfo + machineTemplates map[string]*framework.NodeInfo priceModel cloudprovider.PricingModel resourceLimiter *cloudprovider.ResourceLimiter } @@ -75,7 +75,7 @@ func NewTestCloudProvider(onScaleUp OnScaleUpFunc, onScaleDown OnScaleDownFunc) // NewTestAutoprovisioningCloudProvider builds new TestCloudProvider with autoprovisioning support func NewTestAutoprovisioningCloudProvider(onScaleUp OnScaleUpFunc, onScaleDown OnScaleDownFunc, onNodeGroupCreate OnNodeGroupCreateFunc, onNodeGroupDelete OnNodeGroupDeleteFunc, - machineTypes []string, machineTemplates map[string]*schedulerframework.NodeInfo) *TestCloudProvider { + machineTypes []string, machineTemplates map[string]*framework.NodeInfo) *TestCloudProvider { return &TestCloudProvider{ nodes: make(map[string]string), groups: make(map[string]cloudprovider.NodeGroup), @@ -494,7 +494,7 @@ func (tng *TestNodeGroup) Autoprovisioned() bool { } // TemplateNodeInfo returns a node template for this node group. -func (tng *TestNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { +func (tng *TestNodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) { if tng.cloudProvider.machineTemplates == nil { return nil, cloudprovider.ErrNotImplemented } diff --git a/cluster-autoscaler/cloudprovider/volcengine/volcengine_auto_scaling_group.go b/cluster-autoscaler/cloudprovider/volcengine/volcengine_auto_scaling_group.go index 4ed9bbc8cd1f..413d4670f015 100644 --- a/cluster-autoscaler/cloudprovider/volcengine/volcengine_auto_scaling_group.go +++ b/cluster-autoscaler/cloudprovider/volcengine/volcengine_auto_scaling_group.go @@ -22,8 +22,8 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/config" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) // AutoScalingGroup represents a Volcengine 'Auto Scaling Group' which also can be treated as a node group. @@ -169,13 +169,13 @@ func (asg *AutoScalingGroup) Nodes() ([]cloudprovider.Instance, error) { return nodes, nil } -// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty +// TemplateNodeInfo returns a framework.NodeInfo structure of an empty // (as if just started) node. This will be used in scale-up simulations to // predict what would a new node look like if a node group was expanded. The returned // NodeInfo is expected to have a fully populated Node object, with all of the labels, // capacity and allocatable information as well as all pods that are started on // the node by default, using manifest (most likely only kube-proxy). Implementation optional. -func (asg *AutoScalingGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { +func (asg *AutoScalingGroup) TemplateNodeInfo() (*framework.NodeInfo, error) { template, err := asg.manager.getAsgTemplate(asg.asgId) if err != nil { return nil, err @@ -184,8 +184,7 @@ func (asg *AutoScalingGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, e if err != nil { return nil, err } - nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(asg.asgId)) - nodeInfo.SetNode(node) + nodeInfo := framework.NewNodeInfo(node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(asg.asgId)}) return nodeInfo, nil } diff --git a/cluster-autoscaler/cloudprovider/vultr/vultr_node_group.go b/cluster-autoscaler/cloudprovider/vultr/vultr_node_group.go index c7a5bd113ba5..afa711fc34f3 100644 --- a/cluster-autoscaler/cloudprovider/vultr/vultr_node_group.go +++ b/cluster-autoscaler/cloudprovider/vultr/vultr_node_group.go @@ -25,7 +25,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/vultr/govultr" "k8s.io/autoscaler/cluster-autoscaler/config" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" ) const ( @@ -193,14 +193,14 @@ func (n *NodeGroup) Nodes() ([]cloudprovider.Instance, error) { } -// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty +// TemplateNodeInfo returns a framework.NodeInfo structure of an empty // (as if just started) node. This will be used in scale-up simulations to // predict what would a new node look like if a node group was expanded. The // returned NodeInfo is expected to have a fully populated Node object, with // all of the labels, capacity and allocatable information as well as all pods // that are started on the node by default, using manifest (most likely only // kube-proxy). Implementation optional. -func (n *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { +func (n *NodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) { return nil, cloudprovider.ErrNotImplemented } diff --git a/cluster-autoscaler/clusterstate/clusterstate.go b/cluster-autoscaler/clusterstate/clusterstate.go index 210d0ec5827b..dfcdc3687081 100644 --- a/cluster-autoscaler/clusterstate/clusterstate.go +++ b/cluster-autoscaler/clusterstate/clusterstate.go @@ -30,6 +30,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/metrics" "k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupconfig" "k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups/asyncnodegroups" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/backoff" "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" @@ -38,7 +39,6 @@ import ( apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" klog "k8s.io/klog/v2" ) @@ -124,7 +124,7 @@ type ClusterStateRegistry struct { scaleUpRequests map[string]*ScaleUpRequest // nodeGroupName -> ScaleUpRequest scaleDownRequests []*ScaleDownRequest nodes []*apiv1.Node - nodeInfosForGroups map[string]*schedulerframework.NodeInfo + nodeInfosForGroups map[string]*framework.NodeInfo cloudProvider cloudprovider.CloudProvider perNodeGroupReadiness map[string]Readiness totalReadiness Readiness @@ -338,7 +338,7 @@ func (csr *ClusterStateRegistry) registerFailedScaleUpNoLock(nodeGroup cloudprov } // UpdateNodes updates the state of the nodes in the ClusterStateRegistry and recalculates the stats -func (csr *ClusterStateRegistry) UpdateNodes(nodes []*apiv1.Node, nodeInfosForGroups map[string]*schedulerframework.NodeInfo, currentTime time.Time) error { +func (csr *ClusterStateRegistry) UpdateNodes(nodes []*apiv1.Node, nodeInfosForGroups map[string]*framework.NodeInfo, currentTime time.Time) error { csr.updateNodeGroupMetrics() targetSizes, err := getTargetSizes(csr.cloudProvider) if err != nil { diff --git a/cluster-autoscaler/core/podlistprocessor/currently_drained_nodes.go b/cluster-autoscaler/core/podlistprocessor/currently_drained_nodes.go index 58459da50f17..1d2ae582f218 100644 --- a/cluster-autoscaler/core/podlistprocessor/currently_drained_nodes.go +++ b/cluster-autoscaler/core/podlistprocessor/currently_drained_nodes.go @@ -45,12 +45,12 @@ func currentlyDrainedPods(context *context.AutoscalingContext) []*apiv1.Pod { var pods []*apiv1.Pod _, nodeNames := context.ScaleDownActuator.CheckStatus().DeletionsInProgress() for _, nodeName := range nodeNames { - nodeInfo, err := context.ClusterSnapshot.NodeInfos().Get(nodeName) + nodeInfo, err := context.ClusterSnapshot.GetNodeInfo(nodeName) if err != nil { klog.Warningf("Couldn't get node %v info, assuming the node got deleted already: %v", nodeName, err) continue } - for _, podInfo := range nodeInfo.Pods { + for _, podInfo := range nodeInfo.Pods() { // Filter out pods that has deletion timestamp set if podInfo.Pod.DeletionTimestamp != nil { klog.Infof("Pod %v has deletion timestamp set, skipping injection to unschedulable pods list", podInfo.Pod.Name) diff --git a/cluster-autoscaler/core/podlistprocessor/filter_out_expendable_test.go b/cluster-autoscaler/core/podlistprocessor/filter_out_expendable_test.go index 5a286b4276de..458f633c7152 100644 --- a/cluster-autoscaler/core/podlistprocessor/filter_out_expendable_test.go +++ b/cluster-autoscaler/core/podlistprocessor/filter_out_expendable_test.go @@ -125,13 +125,12 @@ func TestFilterOutExpendable(t *testing.T) { assert.ElementsMatch(t, tc.wantPods, pods) var podsInSnapshot []*apiv1.Pod - nodeInfoLister := snapshot.NodeInfos() // Get pods in snapshot for _, n := range tc.nodes { - nodeInfo, err := nodeInfoLister.Get(n.Name) + nodeInfo, err := snapshot.GetNodeInfo(n.Name) assert.NoError(t, err) - assert.NotEqual(t, nodeInfo.Pods, nil) - for _, podInfo := range nodeInfo.Pods { + assert.NotEqual(t, nodeInfo.Pods(), nil) + for _, podInfo := range nodeInfo.Pods() { podsInSnapshot = append(podsInSnapshot, podInfo.Pod) } } diff --git a/cluster-autoscaler/core/podlistprocessor/filter_out_schedulable.go b/cluster-autoscaler/core/podlistprocessor/filter_out_schedulable.go index 29088334b048..f56fb19d98c0 100644 --- a/cluster-autoscaler/core/podlistprocessor/filter_out_schedulable.go +++ b/cluster-autoscaler/core/podlistprocessor/filter_out_schedulable.go @@ -25,20 +25,20 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/context" "k8s.io/autoscaler/cluster-autoscaler/metrics" "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" "k8s.io/autoscaler/cluster-autoscaler/simulator/scheduling" corev1helpers "k8s.io/component-helpers/scheduling/corev1" klog "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) type filterOutSchedulablePodListProcessor struct { schedulingSimulator *scheduling.HintingSimulator - nodeFilter func(*schedulerframework.NodeInfo) bool + nodeFilter func(*framework.NodeInfo) bool } // NewFilterOutSchedulablePodListProcessor creates a PodListProcessor filtering out schedulable pods -func NewFilterOutSchedulablePodListProcessor(predicateChecker predicatechecker.PredicateChecker, nodeFilter func(*schedulerframework.NodeInfo) bool) *filterOutSchedulablePodListProcessor { +func NewFilterOutSchedulablePodListProcessor(predicateChecker predicatechecker.PredicateChecker, nodeFilter func(*framework.NodeInfo) bool) *filterOutSchedulablePodListProcessor { return &filterOutSchedulablePodListProcessor{ schedulingSimulator: scheduling.NewHintingSimulator(predicateChecker), nodeFilter: nodeFilter, diff --git a/cluster-autoscaler/core/podlistprocessor/filter_out_schedulable_test.go b/cluster-autoscaler/core/podlistprocessor/filter_out_schedulable_test.go index 0c63ba56945a..e02e0b9c0bb6 100644 --- a/cluster-autoscaler/core/podlistprocessor/filter_out_schedulable_test.go +++ b/cluster-autoscaler/core/podlistprocessor/filter_out_schedulable_test.go @@ -24,10 +24,10 @@ import ( "github.com/stretchr/testify/assert" apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" "k8s.io/autoscaler/cluster-autoscaler/simulator/scheduling" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics" ) @@ -35,15 +35,15 @@ func TestFilterOutSchedulable(t *testing.T) { schedulermetrics.Register() node := buildReadyTestNode("node", 2000, 100) - matchesAllNodes := func(*schedulerframework.NodeInfo) bool { return true } - matchesNoNodes := func(*schedulerframework.NodeInfo) bool { return false } + matchesAllNodes := func(*framework.NodeInfo) bool { return true } + matchesNoNodes := func(*framework.NodeInfo) bool { return false } testCases := map[string]struct { nodesWithPods map[*apiv1.Node][]*apiv1.Pod unschedulableCandidates []*apiv1.Pod expectedScheduledPods []*apiv1.Pod expectedUnscheduledPods []*apiv1.Pod - nodeFilter func(*schedulerframework.NodeInfo) bool + nodeFilter func(*framework.NodeInfo) bool }{ "single empty node, no pods": { nodesWithPods: map[*apiv1.Node][]*apiv1.Pod{node: {}}, @@ -203,11 +203,11 @@ func TestFilterOutSchedulable(t *testing.T) { assert.NoError(t, err) assert.ElementsMatch(t, unschedulablePods, tc.expectedUnscheduledPods, "unschedulable pods differ") - nodeInfos, err := clusterSnapshot.NodeInfos().List() + nodeInfos, err := clusterSnapshot.ListNodeInfos() assert.NoError(t, err) var scheduledPods []*apiv1.Pod for _, nodeInfo := range nodeInfos { - for _, podInfo := range nodeInfo.Pods { + for _, podInfo := range nodeInfo.Pods() { scheduledPods = append(scheduledPods, podInfo.Pod) } } diff --git a/cluster-autoscaler/core/podlistprocessor/pod_list_processor.go b/cluster-autoscaler/core/podlistprocessor/pod_list_processor.go index 3e9327fde12f..9557b134c2cc 100644 --- a/cluster-autoscaler/core/podlistprocessor/pod_list_processor.go +++ b/cluster-autoscaler/core/podlistprocessor/pod_list_processor.go @@ -18,13 +18,13 @@ package podlistprocessor import ( "k8s.io/autoscaler/cluster-autoscaler/processors/pods" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) // NewDefaultPodListProcessor returns a default implementation of the pod list // processor, which wraps and sequentially runs other sub-processors. -func NewDefaultPodListProcessor(predicateChecker predicatechecker.PredicateChecker, nodeFilter func(*schedulerframework.NodeInfo) bool) *pods.CombinedPodListProcessor { +func NewDefaultPodListProcessor(predicateChecker predicatechecker.PredicateChecker, nodeFilter func(*framework.NodeInfo) bool) *pods.CombinedPodListProcessor { return pods.NewCombinedPodListProcessor([]pods.PodListProcessor{ NewClearTPURequestsPodListProcessor(), NewFilterOutExpendablePodListProcessor(), diff --git a/cluster-autoscaler/core/scaledown/actuation/actuator.go b/cluster-autoscaler/core/scaledown/actuation/actuator.go index eccbe4c0884b..b02e2016aaab 100644 --- a/cluster-autoscaler/core/scaledown/actuation/actuator.go +++ b/cluster-autoscaler/core/scaledown/actuation/actuator.go @@ -285,7 +285,7 @@ func (a *Actuator) deleteNodesAsync(nodes []*apiv1.Node, nodeGroup cloudprovider } for _, node := range nodes { - nodeInfo, err := clusterSnapshot.NodeInfos().Get(node.Name) + nodeInfo, err := clusterSnapshot.GetNodeInfo(node.Name) if err != nil { klog.Errorf("Scale-down: can't retrieve node %q from snapshot, err: %v", node.Name, err) nodeDeleteResult := status.NodeDeleteResult{ResultType: status.NodeDeleteErrorInternal, Err: errors.NewAutoscalerError(errors.InternalError, "nodeInfos.Get for %q returned error: %v", node.Name, err)} @@ -317,7 +317,7 @@ func (a *Actuator) scaleDownNodeToReport(node *apiv1.Node, drain bool) (*status. if err != nil { return nil, err } - nodeInfo, err := a.ctx.ClusterSnapshot.NodeInfos().Get(node.Name) + nodeInfo, err := a.ctx.ClusterSnapshot.GetNodeInfo(node.Name) if err != nil { return nil, err } diff --git a/cluster-autoscaler/core/scaledown/actuation/drain.go b/cluster-autoscaler/core/scaledown/actuation/drain.go index c89ad46017f2..3cc941af6d9d 100644 --- a/cluster-autoscaler/core/scaledown/actuation/drain.go +++ b/cluster-autoscaler/core/scaledown/actuation/drain.go @@ -27,6 +27,7 @@ import ( kube_errors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/autoscaler/cluster-autoscaler/metrics" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/klog/v2" kubelet_config "k8s.io/kubernetes/pkg/kubelet/apis/config" @@ -35,7 +36,6 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/utils/daemonset" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" pod_util "k8s.io/autoscaler/cluster-autoscaler/utils/pod" - "k8s.io/kubernetes/pkg/scheduler/framework" ) const ( @@ -251,7 +251,7 @@ func (e Evictor) evictPod(ctx *acontext.AutoscalingContext, podToEvict *apiv1.Po } func podsToEvict(nodeInfo *framework.NodeInfo, evictDsByDefault bool) (dsPods, nonDsPods []*apiv1.Pod) { - for _, podInfo := range nodeInfo.Pods { + for _, podInfo := range nodeInfo.Pods() { if pod_util.IsMirrorPod(podInfo.Pod) { continue } else if pod_util.IsDaemonSetPod(podInfo.Pod) { diff --git a/cluster-autoscaler/core/scaledown/actuation/drain_test.go b/cluster-autoscaler/core/scaledown/actuation/drain_test.go index 82a4613866a7..7205afe4a975 100644 --- a/cluster-autoscaler/core/scaledown/actuation/drain_test.go +++ b/cluster-autoscaler/core/scaledown/actuation/drain_test.go @@ -146,7 +146,7 @@ func TestDaemonSetEvictionForEmptyNodes(t *testing.T) { EvictionRetryTime: waitBetweenRetries, shutdownGracePeriodByPodPriority: drainConfig, } - nodeInfo, err := context.ClusterSnapshot.NodeInfos().Get(n1.Name) + nodeInfo, err := context.ClusterSnapshot.GetNodeInfo(n1.Name) assert.NoError(t, err) _, err = evictor.EvictDaemonSetPods(&context, nodeInfo) if scenario.err != nil { @@ -213,7 +213,7 @@ func TestDrainNodeWithPods(t *testing.T) { shutdownGracePeriodByPodPriority: legacyFlagDrainConfig, } clustersnapshot.InitializeClusterSnapshotOrDie(t, ctx.ClusterSnapshot, []*apiv1.Node{n1}, []*apiv1.Pod{p1, p2, d1}) - nodeInfo, err := ctx.ClusterSnapshot.NodeInfos().Get(n1.Name) + nodeInfo, err := ctx.ClusterSnapshot.GetNodeInfo(n1.Name) assert.NoError(t, err) _, err = evictor.DrainNode(&ctx, nodeInfo) assert.NoError(t, err) @@ -277,7 +277,7 @@ func TestDrainNodeWithPodsWithRescheduled(t *testing.T) { shutdownGracePeriodByPodPriority: legacyFlagDrainConfig, } clustersnapshot.InitializeClusterSnapshotOrDie(t, ctx.ClusterSnapshot, []*apiv1.Node{n1}, []*apiv1.Pod{p1, p2}) - nodeInfo, err := ctx.ClusterSnapshot.NodeInfos().Get(n1.Name) + nodeInfo, err := ctx.ClusterSnapshot.GetNodeInfo(n1.Name) assert.NoError(t, err) _, err = evictor.DrainNode(&ctx, nodeInfo) assert.NoError(t, err) @@ -346,7 +346,7 @@ func TestDrainNodeWithPodsWithRetries(t *testing.T) { shutdownGracePeriodByPodPriority: legacyFlagDrainConfig, } clustersnapshot.InitializeClusterSnapshotOrDie(t, ctx.ClusterSnapshot, []*apiv1.Node{n1}, []*apiv1.Pod{p1, p2, p3, d1}) - nodeInfo, err := ctx.ClusterSnapshot.NodeInfos().Get(n1.Name) + nodeInfo, err := ctx.ClusterSnapshot.GetNodeInfo(n1.Name) assert.NoError(t, err) _, err = evictor.DrainNode(&ctx, nodeInfo) assert.NoError(t, err) @@ -409,7 +409,7 @@ func TestDrainNodeWithPodsDaemonSetEvictionFailure(t *testing.T) { shutdownGracePeriodByPodPriority: legacyFlagDrainConfig, } clustersnapshot.InitializeClusterSnapshotOrDie(t, ctx.ClusterSnapshot, []*apiv1.Node{n1}, []*apiv1.Pod{p1, p2, d1, d2}) - nodeInfo, err := ctx.ClusterSnapshot.NodeInfos().Get(n1.Name) + nodeInfo, err := ctx.ClusterSnapshot.GetNodeInfo(n1.Name) assert.NoError(t, err) evictionResults, err := evictor.DrainNode(&ctx, nodeInfo) assert.NoError(t, err) @@ -470,7 +470,7 @@ func TestDrainNodeWithPodsEvictionFailure(t *testing.T) { shutdownGracePeriodByPodPriority: legacyFlagDrainConfig, } clustersnapshot.InitializeClusterSnapshotOrDie(t, ctx.ClusterSnapshot, []*apiv1.Node{n1}, []*apiv1.Pod{p1, p2, p3, p4}) - nodeInfo, err := ctx.ClusterSnapshot.NodeInfos().Get(n1.Name) + nodeInfo, err := ctx.ClusterSnapshot.GetNodeInfo(n1.Name) assert.NoError(t, err) evictionResults, err := evictor.DrainNode(&ctx, nodeInfo) assert.Error(t, err) @@ -536,7 +536,7 @@ func TestDrainWithPodsNodeDisappearanceFailure(t *testing.T) { shutdownGracePeriodByPodPriority: legacyFlagDrainConfig, } clustersnapshot.InitializeClusterSnapshotOrDie(t, ctx.ClusterSnapshot, []*apiv1.Node{n1}, []*apiv1.Pod{p1, p2, p3, p4}) - nodeInfo, err := ctx.ClusterSnapshot.NodeInfos().Get(n1.Name) + nodeInfo, err := ctx.ClusterSnapshot.GetNodeInfo(n1.Name) assert.NoError(t, err) evictionResults, err := evictor.DrainNode(&ctx, nodeInfo) assert.Error(t, err) @@ -626,9 +626,9 @@ func TestPodsToEvict(t *testing.T) { if tc.nodeNameOverwrite != "" { nodeName = tc.nodeNameOverwrite } - nodeInfo, err := snapshot.NodeInfos().Get(nodeName) + nodeInfo, err := snapshot.GetNodeInfo(nodeName) if err != nil { - t.Fatalf("NodeInfos().Get() unexpected error: %v", err) + t.Fatalf("GetNodeInfo() unexpected error: %v", err) } gotDsPods, gotNonDsPods := podsToEvict(nodeInfo, ctx.DaemonSetEvictionForOccupiedNodes) if diff := cmp.Diff(tc.wantDsPods, gotDsPods, cmpopts.EquateEmpty()); diff != "" { diff --git a/cluster-autoscaler/core/scaledown/actuation/group_deletion_scheduler.go b/cluster-autoscaler/core/scaledown/actuation/group_deletion_scheduler.go index b7b583381567..094a6ef1cb1f 100644 --- a/cluster-autoscaler/core/scaledown/actuation/group_deletion_scheduler.go +++ b/cluster-autoscaler/core/scaledown/actuation/group_deletion_scheduler.go @@ -20,8 +20,8 @@ import ( "sync" apiv1 "k8s.io/api/core/v1" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/config" diff --git a/cluster-autoscaler/core/scaledown/actuation/group_deletion_scheduler_test.go b/cluster-autoscaler/core/scaledown/actuation/group_deletion_scheduler_test.go index cadf6ec99400..d2c49625355c 100644 --- a/cluster-autoscaler/core/scaledown/actuation/group_deletion_scheduler_test.go +++ b/cluster-autoscaler/core/scaledown/actuation/group_deletion_scheduler_test.go @@ -33,10 +33,9 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/core/scaledown/deletiontracker" "k8s.io/autoscaler/cluster-autoscaler/core/scaledown/status" . "k8s.io/autoscaler/cluster-autoscaler/core/test" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" "k8s.io/client-go/kubernetes/fake" - "k8s.io/kubernetes/pkg/scheduler/framework" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) type testIteration struct { @@ -215,18 +214,12 @@ func scheduleAll(toSchedule []*budgets.NodeGroupView, scheduler *GroupDeletionSc return fmt.Errorf("failed to get target size for node group %q: %s", bucket.Group.Id(), err) } for _, node := range bucket.Nodes { - scheduler.ScheduleDeletion(infoForNode(node), bucket.Group, bucketSize, false) + scheduler.ScheduleDeletion(framework.NewTestNodeInfo(node), bucket.Group, bucketSize, false) } } return nil } -func infoForNode(n *apiv1.Node) *framework.NodeInfo { - info := schedulerframework.NewNodeInfo() - info.SetNode(n) - return info -} - func mergeLists(lists ...[]*budgets.NodeGroupView) []*budgets.NodeGroupView { merged := []*budgets.NodeGroupView{} for _, l := range lists { diff --git a/cluster-autoscaler/core/scaledown/actuation/priority_test.go b/cluster-autoscaler/core/scaledown/actuation/priority_test.go index 4b50be682bf5..de743584560c 100644 --- a/cluster-autoscaler/core/scaledown/actuation/priority_test.go +++ b/cluster-autoscaler/core/scaledown/actuation/priority_test.go @@ -96,7 +96,7 @@ func TestPriorityEvictor(t *testing.T) { fullDsEviction: true, } clustersnapshot.InitializeClusterSnapshotOrDie(t, ctx.ClusterSnapshot, []*apiv1.Node{n1}, []*apiv1.Pod{p1, p2, p3}) - nodeInfo, err := ctx.ClusterSnapshot.NodeInfos().Get(n1.Name) + nodeInfo, err := ctx.ClusterSnapshot.GetNodeInfo(n1.Name) assert.NoError(t, err) _, err = evictor.DrainNode(&ctx, nodeInfo) assert.NoError(t, err) diff --git a/cluster-autoscaler/core/scaledown/eligibility/eligibility.go b/cluster-autoscaler/core/scaledown/eligibility/eligibility.go index f6cf9a9e55c9..faa2b6d6d3ef 100644 --- a/cluster-autoscaler/core/scaledown/eligibility/eligibility.go +++ b/cluster-autoscaler/core/scaledown/eligibility/eligibility.go @@ -25,13 +25,13 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/core/scaledown/actuation" "k8s.io/autoscaler/cluster-autoscaler/core/scaledown/unremovable" "k8s.io/autoscaler/cluster-autoscaler/simulator" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/simulator/utilization" "k8s.io/autoscaler/cluster-autoscaler/utils/klogx" apiv1 "k8s.io/api/core/v1" kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" klog "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) const ( @@ -73,7 +73,7 @@ func (c *Checker) FilterOutUnremovable(context *context.AutoscalingContext, scal utilLogsQuota := klogx.NewLoggingQuota(20) for _, node := range scaleDownCandidates { - nodeInfo, err := context.ClusterSnapshot.NodeInfos().Get(node.Name) + nodeInfo, err := context.ClusterSnapshot.GetNodeInfo(node.Name) if err != nil { klog.Errorf("Can't retrieve scale-down candidate %s from snapshot, err: %v", node.Name, err) ineligible = append(ineligible, &simulator.UnremovableNode{Node: node, Reason: simulator.UnexpectedError}) @@ -106,7 +106,7 @@ func (c *Checker) FilterOutUnremovable(context *context.AutoscalingContext, scal return currentlyUnneededNodeNames, utilizationMap, ineligible } -func (c *Checker) unremovableReasonAndNodeUtilization(context *context.AutoscalingContext, timestamp time.Time, nodeInfo *schedulerframework.NodeInfo, utilLogsQuota *klogx.Quota) (simulator.UnremovableReason, *utilization.Info) { +func (c *Checker) unremovableReasonAndNodeUtilization(context *context.AutoscalingContext, timestamp time.Time, nodeInfo *framework.NodeInfo, utilLogsQuota *klogx.Quota) (simulator.UnremovableReason, *utilization.Info) { node := nodeInfo.Node() if actuation.IsNodeBeingDeleted(node, timestamp) { diff --git a/cluster-autoscaler/core/scaledown/planner/planner.go b/cluster-autoscaler/core/scaledown/planner/planner.go index 9550516eacf9..2898e240cb05 100644 --- a/cluster-autoscaler/core/scaledown/planner/planner.go +++ b/cluster-autoscaler/core/scaledown/planner/planner.go @@ -176,7 +176,7 @@ func (p *Planner) addUnremovableNodes(unremovableNodes []simulator.UnremovableNo } func allNodes(s clustersnapshot.ClusterSnapshot) ([]*apiv1.Node, error) { - nodeInfos, err := s.NodeInfos().List() + nodeInfos, err := s.ListNodeInfos() if err != nil { // This should never happen, List() returns err only because scheduler interface requires it. return nil, err @@ -264,7 +264,7 @@ func (p *Planner) categorizeNodes(podDestinations map[string]bool, scaleDownCand unremovableCount := 0 var removableList []simulator.NodeToBeRemoved atomicScaleDownNodesCount := 0 - p.unremovableNodes.Update(p.context.ClusterSnapshot.NodeInfos(), p.latestUpdate) + p.unremovableNodes.Update(p.context.ClusterSnapshot, p.latestUpdate) currentlyUnneededNodeNames, utilizationMap, ineligible := p.eligibilityChecker.FilterOutUnremovable(p.context, scaleDownCandidates, p.latestUpdate, p.unremovableNodes) for _, n := range ineligible { p.unremovableNodes.Add(n) diff --git a/cluster-autoscaler/core/scaledown/unremovable/nodes.go b/cluster-autoscaler/core/scaledown/unremovable/nodes.go index c452aef01554..c900faa55372 100644 --- a/cluster-autoscaler/core/scaledown/unremovable/nodes.go +++ b/cluster-autoscaler/core/scaledown/unremovable/nodes.go @@ -20,10 +20,10 @@ import ( "time" "k8s.io/autoscaler/cluster-autoscaler/simulator" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" apiv1 "k8s.io/api/core/v1" klog "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) // Nodes tracks the state of cluster nodes that cannot be removed. @@ -40,21 +40,21 @@ func NewNodes() *Nodes { } } -// NodeInfoGetter is anything that can return NodeInfo object by name. -type NodeInfoGetter interface { - Get(name string) (*schedulerframework.NodeInfo, error) +// nodeInfoGetter is anything that can return NodeInfo object by name. +type nodeInfoGetter interface { + GetNodeInfo(name string) (*framework.NodeInfo, error) } // Update updates the internal structure according to current state of the // cluster. Removes the nodes that are no longer in the nodes list. -func (n *Nodes) Update(nodeInfos NodeInfoGetter, timestamp time.Time) { +func (n *Nodes) Update(nodeInfos nodeInfoGetter, timestamp time.Time) { n.reasons = make(map[string]*simulator.UnremovableNode) if len(n.ttls) <= 0 { return } newTTLs := make(map[string]time.Time, len(n.ttls)) for name, ttl := range n.ttls { - if _, err := nodeInfos.Get(name); err != nil { + if _, err := nodeInfos.GetNodeInfo(name); err != nil { // Not logging on error level as most likely cause is that node is no longer in the cluster. klog.Infof("Can't retrieve node %s from snapshot, removing from unremovable nodes, err: %v", name, err) continue diff --git a/cluster-autoscaler/core/scaledown/unremovable/nodes_test.go b/cluster-autoscaler/core/scaledown/unremovable/nodes_test.go index c190b1e148fe..43d82548fc0f 100644 --- a/cluster-autoscaler/core/scaledown/unremovable/nodes_test.go +++ b/cluster-autoscaler/core/scaledown/unremovable/nodes_test.go @@ -22,11 +22,11 @@ import ( "time" "k8s.io/autoscaler/cluster-autoscaler/simulator" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) var ( @@ -107,7 +107,7 @@ type fakeNodeInfoGetter struct { names map[string]bool } -func (f *fakeNodeInfoGetter) Get(name string) (*schedulerframework.NodeInfo, error) { +func (f *fakeNodeInfoGetter) GetNodeInfo(name string) (*framework.NodeInfo, error) { // We don't actually care about the node info object itself, just its presence. _, found := f.names[name] if found { diff --git a/cluster-autoscaler/core/scaleup/orchestrator/async_initializer.go b/cluster-autoscaler/core/scaleup/orchestrator/async_initializer.go index 57d39cc9d23b..de2dabf600bc 100644 --- a/cluster-autoscaler/core/scaleup/orchestrator/async_initializer.go +++ b/cluster-autoscaler/core/scaleup/orchestrator/async_initializer.go @@ -22,7 +22,6 @@ import ( appsv1 "k8s.io/api/apps/v1" "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/context" @@ -30,6 +29,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups" "k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupset" "k8s.io/autoscaler/cluster-autoscaler/processors/status" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" "k8s.io/autoscaler/cluster-autoscaler/utils/taints" ) diff --git a/cluster-autoscaler/core/scaleup/orchestrator/executor.go b/cluster-autoscaler/core/scaleup/orchestrator/executor.go index ec064dde5dfb..c00eae64797a 100644 --- a/cluster-autoscaler/core/scaleup/orchestrator/executor.go +++ b/cluster-autoscaler/core/scaleup/orchestrator/executor.go @@ -24,8 +24,8 @@ import ( "time" apiv1 "k8s.io/api/core/v1" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/context" @@ -63,7 +63,7 @@ func newScaleUpExecutor( // If there were multiple concurrent errors one combined error is returned. func (e *scaleUpExecutor) ExecuteScaleUps( scaleUpInfos []nodegroupset.ScaleUpInfo, - nodeInfos map[string]*schedulerframework.NodeInfo, + nodeInfos map[string]*framework.NodeInfo, now time.Time, atomic bool, ) (errors.AutoscalerError, []cloudprovider.NodeGroup) { @@ -76,7 +76,7 @@ func (e *scaleUpExecutor) ExecuteScaleUps( func (e *scaleUpExecutor) executeScaleUpsSync( scaleUpInfos []nodegroupset.ScaleUpInfo, - nodeInfos map[string]*schedulerframework.NodeInfo, + nodeInfos map[string]*framework.NodeInfo, now time.Time, atomic bool, ) (errors.AutoscalerError, []cloudprovider.NodeGroup) { @@ -96,7 +96,7 @@ func (e *scaleUpExecutor) executeScaleUpsSync( func (e *scaleUpExecutor) executeScaleUpsParallel( scaleUpInfos []nodegroupset.ScaleUpInfo, - nodeInfos map[string]*schedulerframework.NodeInfo, + nodeInfos map[string]*framework.NodeInfo, now time.Time, atomic bool, ) (errors.AutoscalerError, []cloudprovider.NodeGroup) { @@ -156,7 +156,7 @@ func (e *scaleUpExecutor) increaseSize(nodeGroup cloudprovider.NodeGroup, increa func (e *scaleUpExecutor) executeScaleUp( info nodegroupset.ScaleUpInfo, - nodeInfo *schedulerframework.NodeInfo, + nodeInfo *framework.NodeInfo, availableGPUTypes map[string]struct{}, now time.Time, atomic bool, diff --git a/cluster-autoscaler/core/scaleup/orchestrator/orchestrator.go b/cluster-autoscaler/core/scaleup/orchestrator/orchestrator.go index 6c6b229bcc10..dd4b53a241ac 100644 --- a/cluster-autoscaler/core/scaleup/orchestrator/orchestrator.go +++ b/cluster-autoscaler/core/scaleup/orchestrator/orchestrator.go @@ -22,25 +22,24 @@ import ( appsv1 "k8s.io/api/apps/v1" apiv1 "k8s.io/api/core/v1" - "k8s.io/autoscaler/cluster-autoscaler/estimator" - "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/clusterstate" "k8s.io/autoscaler/cluster-autoscaler/context" "k8s.io/autoscaler/cluster-autoscaler/core/scaleup/equivalence" "k8s.io/autoscaler/cluster-autoscaler/core/scaleup/resource" "k8s.io/autoscaler/cluster-autoscaler/core/utils" + "k8s.io/autoscaler/cluster-autoscaler/estimator" "k8s.io/autoscaler/cluster-autoscaler/expander" "k8s.io/autoscaler/cluster-autoscaler/metrics" ca_processors "k8s.io/autoscaler/cluster-autoscaler/processors" "k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups" "k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupset" "k8s.io/autoscaler/cluster-autoscaler/processors/status" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" "k8s.io/autoscaler/cluster-autoscaler/utils/klogx" "k8s.io/autoscaler/cluster-autoscaler/utils/taints" + "k8s.io/klog/v2" ) // ScaleUpOrchestrator implements scaleup.Orchestrator interface. @@ -87,7 +86,7 @@ func (o *ScaleUpOrchestrator) ScaleUp( unschedulablePods []*apiv1.Pod, nodes []*apiv1.Node, daemonSets []*appsv1.DaemonSet, - nodeInfos map[string]*schedulerframework.NodeInfo, + nodeInfos map[string]*framework.NodeInfo, allOrNothing bool, // Either request enough capacity for all unschedulablePods, or don't request it at all. ) (*status.ScaleUpStatus, errors.AutoscalerError) { if !o.initialized { @@ -277,7 +276,7 @@ func (o *ScaleUpOrchestrator) ScaleUp( }, nil } -func (o *ScaleUpOrchestrator) applyLimits(newNodes int, resourcesLeft resource.Limits, nodeGroup cloudprovider.NodeGroup, nodeInfos map[string]*schedulerframework.NodeInfo) (int, errors.AutoscalerError) { +func (o *ScaleUpOrchestrator) applyLimits(newNodes int, resourcesLeft resource.Limits, nodeGroup cloudprovider.NodeGroup, nodeInfos map[string]*framework.NodeInfo) (int, errors.AutoscalerError) { nodeInfo, found := nodeInfos[nodeGroup.Id()] if !found { // This should never happen, as we already should have retrieved nodeInfo for any considered nodegroup. @@ -293,7 +292,7 @@ func (o *ScaleUpOrchestrator) applyLimits(newNodes int, resourcesLeft resource.L // appropriate status or error if an unexpected error occurred. func (o *ScaleUpOrchestrator) ScaleUpToNodeGroupMinSize( nodes []*apiv1.Node, - nodeInfos map[string]*schedulerframework.NodeInfo, + nodeInfos map[string]*framework.NodeInfo, ) (*status.ScaleUpStatus, errors.AutoscalerError) { if !o.initialized { return status.UpdateScaleUpError(&status.ScaleUpStatus{}, errors.NewAutoscalerError(errors.InternalError, "ScaleUpOrchestrator is not initialized")) @@ -390,7 +389,7 @@ func (o *ScaleUpOrchestrator) ScaleUpToNodeGroupMinSize( // filterValidScaleUpNodeGroups filters the node groups that are valid for scale-up func (o *ScaleUpOrchestrator) filterValidScaleUpNodeGroups( nodeGroups []cloudprovider.NodeGroup, - nodeInfos map[string]*schedulerframework.NodeInfo, + nodeInfos map[string]*framework.NodeInfo, resourcesLeft resource.Limits, currentNodeCount int, now time.Time, @@ -449,7 +448,7 @@ func (o *ScaleUpOrchestrator) filterValidScaleUpNodeGroups( func (o *ScaleUpOrchestrator) ComputeExpansionOption( nodeGroup cloudprovider.NodeGroup, schedulablePodGroups map[string][]estimator.PodEquivalenceGroup, - nodeInfos map[string]*schedulerframework.NodeInfo, + nodeInfos map[string]*framework.NodeInfo, currentNodeCount int, now time.Time, allOrNothing bool, @@ -499,7 +498,7 @@ func (o *ScaleUpOrchestrator) ComputeExpansionOption( // CreateNodeGroup will try to create a new node group based on the initialOption. func (o *ScaleUpOrchestrator) CreateNodeGroup( initialOption *expander.Option, - nodeInfos map[string]*schedulerframework.NodeInfo, + nodeInfos map[string]*framework.NodeInfo, schedulablePodGroups map[string][]estimator.PodEquivalenceGroup, podEquivalenceGroups []*equivalence.PodGroup, daemonSets []*appsv1.DaemonSet, @@ -564,14 +563,14 @@ func (o *ScaleUpOrchestrator) CreateNodeGroup( func (o *ScaleUpOrchestrator) SchedulablePodGroups( podEquivalenceGroups []*equivalence.PodGroup, nodeGroup cloudprovider.NodeGroup, - nodeInfo *schedulerframework.NodeInfo, + nodeInfo *framework.NodeInfo, ) []estimator.PodEquivalenceGroup { o.autoscalingContext.ClusterSnapshot.Fork() defer o.autoscalingContext.ClusterSnapshot.Revert() // Add test node to snapshot. var allPods []*apiv1.Pod - for _, podInfo := range nodeInfo.Pods { + for _, podInfo := range nodeInfo.Pods() { allPods = append(allPods, podInfo.Pod) } if err := o.autoscalingContext.ClusterSnapshot.AddNodeWithPods(nodeInfo.Node(), allPods); err != nil { @@ -603,9 +602,9 @@ func (o *ScaleUpOrchestrator) SchedulablePodGroups( } // UpcomingNodes returns a list of nodes that are not ready but should be. -func (o *ScaleUpOrchestrator) UpcomingNodes(nodeInfos map[string]*schedulerframework.NodeInfo) ([]*schedulerframework.NodeInfo, errors.AutoscalerError) { +func (o *ScaleUpOrchestrator) UpcomingNodes(nodeInfos map[string]*framework.NodeInfo) ([]*framework.NodeInfo, errors.AutoscalerError) { upcomingCounts, _ := o.clusterStateRegistry.GetUpcomingNodes() - upcomingNodes := make([]*schedulerframework.NodeInfo, 0) + upcomingNodes := make([]*framework.NodeInfo, 0) for nodeGroup, numberOfNodes := range upcomingCounts { nodeTemplate, found := nodeInfos[nodeGroup] if !found { @@ -636,7 +635,7 @@ func (o *ScaleUpOrchestrator) IsNodeGroupReadyToScaleUp(nodeGroup cloudprovider. } // IsNodeGroupResourceExceeded returns nil if node group resource limits are not exceeded, otherwise a reason is provided. -func (o *ScaleUpOrchestrator) IsNodeGroupResourceExceeded(resourcesLeft resource.Limits, nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulerframework.NodeInfo, numNodes int) status.Reasons { +func (o *ScaleUpOrchestrator) IsNodeGroupResourceExceeded(resourcesLeft resource.Limits, nodeGroup cloudprovider.NodeGroup, nodeInfo *framework.NodeInfo, numNodes int) status.Reasons { resourcesDelta, err := o.resourceManager.DeltaForNode(o.autoscalingContext, nodeInfo, nodeGroup) if err != nil { klog.Errorf("Skipping node group %s; error getting node group resources: %v", nodeGroup.Id(), err) @@ -682,7 +681,7 @@ func (o *ScaleUpOrchestrator) balanceScaleUps( now time.Time, nodeGroup cloudprovider.NodeGroup, newNodes int, - nodeInfos map[string]*schedulerframework.NodeInfo, + nodeInfos map[string]*framework.NodeInfo, schedulablePodGroups map[string][]estimator.PodEquivalenceGroup, ) ([]nodegroupset.ScaleUpInfo, errors.AutoscalerError) { // Recompute similar node groups in case they need to be updated @@ -718,7 +717,7 @@ func (o *ScaleUpOrchestrator) balanceScaleUps( // set of pods as the main node group. func (o *ScaleUpOrchestrator) ComputeSimilarNodeGroups( nodeGroup cloudprovider.NodeGroup, - nodeInfos map[string]*schedulerframework.NodeInfo, + nodeInfos map[string]*framework.NodeInfo, schedulablePodGroups map[string][]estimator.PodEquivalenceGroup, now time.Time, ) []cloudprovider.NodeGroup { diff --git a/cluster-autoscaler/core/scaleup/orchestrator/orchestrator_test.go b/cluster-autoscaler/core/scaleup/orchestrator/orchestrator_test.go index eba980f48c9e..a1cdd15eba91 100644 --- a/cluster-autoscaler/core/scaleup/orchestrator/orchestrator_test.go +++ b/cluster-autoscaler/core/scaleup/orchestrator/orchestrator_test.go @@ -28,6 +28,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupconfig" "k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups/asyncnodegroups" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" kube_record "k8s.io/client-go/tools/record" "k8s.io/component-base/metrics/legacyregistry" schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics" @@ -57,7 +58,6 @@ import ( apiv1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes/fake" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" "github.com/stretchr/testify/assert" ) @@ -146,8 +146,7 @@ func TestZeroOrMaxNodeScaling(t *testing.T) { n := BuildTestNode("n", 1000, 1000) SetNodeReadyState(n, true, time.Time{}) - nodeInfo := schedulerframework.NewNodeInfo() - nodeInfo.SetNode(n) + nodeInfo := framework.NewTestNodeInfo(n) cases := map[string]struct { testConfig *ScaleUpTestConfig @@ -835,8 +834,7 @@ func TestNoCreateNodeGroupMaxCoresLimitHit(t *testing.T) { largeNode := BuildTestNode("n", 8000, 8000) SetNodeReadyState(largeNode, true, time.Time{}) - largeNodeInfo := schedulerframework.NewNodeInfo() - largeNodeInfo.SetNode(largeNode) + largeNodeInfo := framework.NewTestNodeInfo(largeNode) config := &ScaleUpTestConfig{ EnableAutoprovisioning: true, @@ -1004,7 +1002,7 @@ func runSimpleScaleUpTest(t *testing.T, config *ScaleUpTestConfig) *ScaleUpTestR } if len(config.NodeTemplateConfigs) > 0 { machineTypes := []string{} - machineTemplates := map[string]*schedulerframework.NodeInfo{} + machineTemplates := map[string]*framework.NodeInfo{} for _, ntc := range config.NodeTemplateConfigs { machineTypes = append(machineTypes, ntc.MachineType) machineTemplates[ntc.NodeGroupName] = ntc.NodeInfo @@ -1285,7 +1283,7 @@ type constNodeGroupSetProcessor struct { similarNodeGroups []cloudprovider.NodeGroup } -func (p *constNodeGroupSetProcessor) FindSimilarNodeGroups(_ *context.AutoscalingContext, _ cloudprovider.NodeGroup, _ map[string]*schedulerframework.NodeInfo) ([]cloudprovider.NodeGroup, errors.AutoscalerError) { +func (p *constNodeGroupSetProcessor) FindSimilarNodeGroups(_ *context.AutoscalingContext, _ cloudprovider.NodeGroup, _ map[string]*framework.NodeInfo) ([]cloudprovider.NodeGroup, errors.AutoscalerError) { return p.similarNodeGroups, nil } @@ -1516,8 +1514,7 @@ func TestScaleUpAutoprovisionedNodeGroup(t *testing.T) { t1 := BuildTestNode("t1", 4000, 1000000) SetNodeReadyState(t1, true, time.Time{}) - ti1 := schedulerframework.NewNodeInfo() - ti1.SetNode(t1) + ti1 := framework.NewTestNodeInfo(t1) provider := testprovider.NewTestAutoprovisioningCloudProvider( func(nodeGroup string, increase int) error { @@ -1526,7 +1523,7 @@ func TestScaleUpAutoprovisionedNodeGroup(t *testing.T) { }, nil, func(nodeGroup string) error { createdGroups <- nodeGroup return nil - }, nil, []string{"T1"}, map[string]*schedulerframework.NodeInfo{"T1": ti1}) + }, nil, []string{"T1"}, map[string]*framework.NodeInfo{"T1": ti1}) options := config.AutoscalingOptions{ EstimatorName: estimator.BinpackingEstimatorName, @@ -1570,8 +1567,7 @@ func TestScaleUpBalanceAutoprovisionedNodeGroups(t *testing.T) { t1 := BuildTestNode("t1", 100, 1000000) SetNodeReadyState(t1, true, time.Time{}) - ti1 := schedulerframework.NewNodeInfo() - ti1.SetNode(t1) + ti1 := framework.NewTestNodeInfo(t1) provider := testprovider.NewTestAutoprovisioningCloudProvider( func(nodeGroup string, increase int) error { @@ -1580,7 +1576,7 @@ func TestScaleUpBalanceAutoprovisionedNodeGroups(t *testing.T) { }, nil, func(nodeGroup string) error { createdGroups <- nodeGroup return nil - }, nil, []string{"T1"}, map[string]*schedulerframework.NodeInfo{"T1": ti1}) + }, nil, []string{"T1"}, map[string]*framework.NodeInfo{"T1": ti1}) options := config.AutoscalingOptions{ BalanceSimilarNodeGroups: true, @@ -1672,20 +1668,18 @@ func TestScaleUpToMeetNodeGroupMinSize(t *testing.T) { func TestScaleupAsyncNodeGroupsEnabled(t *testing.T) { t1 := BuildTestNode("t1", 100, 0) SetNodeReadyState(t1, true, time.Time{}) - ti1 := schedulerframework.NewNodeInfo() - ti1.SetNode(t1) + ti1 := framework.NewTestNodeInfo(t1) t2 := BuildTestNode("t2", 0, 100) SetNodeReadyState(t2, true, time.Time{}) - ti2 := schedulerframework.NewNodeInfo() - ti2.SetNode(t2) + ti2 := framework.NewTestNodeInfo(t2) testCases := []struct { upcomingNodeGroupsNames []string podsToAdd []*v1.Pod isUpcomingMockMap map[string]bool machineTypes []string - machineTemplates map[string]*schedulerframework.NodeInfo + machineTemplates map[string]*framework.NodeInfo expectedCreatedGroups map[string]bool expectedExpandedGroups map[string]int }{ @@ -1694,7 +1688,7 @@ func TestScaleupAsyncNodeGroupsEnabled(t *testing.T) { podsToAdd: []*v1.Pod{BuildTestPod("p1", 80, 0), BuildTestPod("p2", 80, 0)}, isUpcomingMockMap: map[string]bool{"autoprovisioned-T1": true}, machineTypes: []string{"T1"}, - machineTemplates: map[string]*schedulerframework.NodeInfo{"T1": ti1}, + machineTemplates: map[string]*framework.NodeInfo{"T1": ti1}, expectedCreatedGroups: map[string]bool{}, expectedExpandedGroups: map[string]int{"autoprovisioned-T1": 2}, }, @@ -1703,7 +1697,7 @@ func TestScaleupAsyncNodeGroupsEnabled(t *testing.T) { podsToAdd: []*v1.Pod{BuildTestPod("p1", 80, 0)}, isUpcomingMockMap: map[string]bool{}, machineTypes: []string{"T1"}, - machineTemplates: map[string]*schedulerframework.NodeInfo{"T1": ti1}, + machineTemplates: map[string]*framework.NodeInfo{"T1": ti1}, expectedCreatedGroups: map[string]bool{"autoprovisioned-T1": true}, expectedExpandedGroups: map[string]int{"autoprovisioned-T1": 1}, }, @@ -1712,7 +1706,7 @@ func TestScaleupAsyncNodeGroupsEnabled(t *testing.T) { podsToAdd: []*v1.Pod{BuildTestPod("p3", 0, 100), BuildTestPod("p2", 0, 100)}, isUpcomingMockMap: map[string]bool{"autoprovisioned-T1": true}, machineTypes: []string{"T1", "T2"}, - machineTemplates: map[string]*schedulerframework.NodeInfo{"T1": ti1, "T2": ti2}, + machineTemplates: map[string]*framework.NodeInfo{"T1": ti1, "T2": ti2}, expectedCreatedGroups: map[string]bool{"autoprovisioned-T2": true}, expectedExpandedGroups: map[string]int{"autoprovisioned-T2": 2}, }, diff --git a/cluster-autoscaler/core/scaleup/resource/manager.go b/cluster-autoscaler/core/scaleup/resource/manager.go index 326701924403..a4f8bd2c071d 100644 --- a/cluster-autoscaler/core/scaleup/resource/manager.go +++ b/cluster-autoscaler/core/scaleup/resource/manager.go @@ -26,9 +26,9 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/context" "k8s.io/autoscaler/cluster-autoscaler/core/utils" "k8s.io/autoscaler/cluster-autoscaler/processors/customresources" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) // LimitUnknown is used as a value in ResourcesLimits if actual limit could not be obtained due to errors talking to cloud provider. @@ -59,7 +59,7 @@ func NewManager(crp customresources.CustomResourcesProcessor) *Manager { } // DeltaForNode calculates the amount of resources that will be used from the cluster when creating a node. -func (m *Manager) DeltaForNode(ctx *context.AutoscalingContext, nodeInfo *schedulerframework.NodeInfo, nodeGroup cloudprovider.NodeGroup) (Delta, errors.AutoscalerError) { +func (m *Manager) DeltaForNode(ctx *context.AutoscalingContext, nodeInfo *framework.NodeInfo, nodeGroup cloudprovider.NodeGroup) (Delta, errors.AutoscalerError) { resultScaleUpDelta := make(Delta) nodeCPU, nodeMemory := utils.GetNodeCoresAndMemory(nodeInfo.Node()) resultScaleUpDelta[cloudprovider.ResourceNameCores] = nodeCPU @@ -85,7 +85,7 @@ func (m *Manager) DeltaForNode(ctx *context.AutoscalingContext, nodeInfo *schedu } // ResourcesLeft calculates the amount of resources left in the cluster. -func (m *Manager) ResourcesLeft(ctx *context.AutoscalingContext, nodeInfos map[string]*schedulerframework.NodeInfo, nodes []*corev1.Node) (Limits, errors.AutoscalerError) { +func (m *Manager) ResourcesLeft(ctx *context.AutoscalingContext, nodeInfos map[string]*framework.NodeInfo, nodes []*corev1.Node) (Limits, errors.AutoscalerError) { nodesFromNotAutoscaledGroups, err := utils.FilterOutNodesFromNotAutoscaledGroups(nodes, ctx.CloudProvider) if err != nil { return nil, err.AddPrefix("failed to filter out nodes which are from not autoscaled groups: ") @@ -143,7 +143,7 @@ func (m *Manager) ResourcesLeft(ctx *context.AutoscalingContext, nodeInfos map[s } // ApplyLimits calculates the new node count by applying the left resource limits of the cluster. -func (m *Manager) ApplyLimits(ctx *context.AutoscalingContext, newCount int, resourceLeft Limits, nodeInfo *schedulerframework.NodeInfo, nodeGroup cloudprovider.NodeGroup) (int, errors.AutoscalerError) { +func (m *Manager) ApplyLimits(ctx *context.AutoscalingContext, newCount int, resourceLeft Limits, nodeInfo *framework.NodeInfo, nodeGroup cloudprovider.NodeGroup) (int, errors.AutoscalerError) { delta, err := m.DeltaForNode(ctx, nodeInfo, nodeGroup) if err != nil { return 0, err @@ -203,7 +203,7 @@ func LimitsNotExceeded() LimitsCheckResult { return LimitsCheckResult{false, []string{}} } -func (m *Manager) coresMemoryTotal(ctx *context.AutoscalingContext, nodeInfos map[string]*schedulerframework.NodeInfo, nodesFromNotAutoscaledGroups []*corev1.Node) (int64, int64, errors.AutoscalerError) { +func (m *Manager) coresMemoryTotal(ctx *context.AutoscalingContext, nodeInfos map[string]*framework.NodeInfo, nodesFromNotAutoscaledGroups []*corev1.Node) (int64, int64, errors.AutoscalerError) { var coresTotal int64 var memoryTotal int64 for _, nodeGroup := range ctx.CloudProvider.NodeGroups() { @@ -233,7 +233,7 @@ func (m *Manager) coresMemoryTotal(ctx *context.AutoscalingContext, nodeInfos ma return coresTotal, memoryTotal, nil } -func (m *Manager) customResourcesTotal(ctx *context.AutoscalingContext, nodeInfos map[string]*schedulerframework.NodeInfo, nodesFromNotAutoscaledGroups []*corev1.Node) (map[string]int64, errors.AutoscalerError) { +func (m *Manager) customResourcesTotal(ctx *context.AutoscalingContext, nodeInfos map[string]*framework.NodeInfo, nodesFromNotAutoscaledGroups []*corev1.Node) (map[string]int64, errors.AutoscalerError) { result := make(map[string]int64) for _, nodeGroup := range ctx.CloudProvider.NodeGroups() { currentSize, err := nodeGroup.TargetSize() diff --git a/cluster-autoscaler/core/scaleup/scaleup.go b/cluster-autoscaler/core/scaleup/scaleup.go index 0da619134ea2..21a0ef6b8f13 100644 --- a/cluster-autoscaler/core/scaleup/scaleup.go +++ b/cluster-autoscaler/core/scaleup/scaleup.go @@ -24,9 +24,9 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/estimator" ca_processors "k8s.io/autoscaler/cluster-autoscaler/processors" "k8s.io/autoscaler/cluster-autoscaler/processors/status" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" "k8s.io/autoscaler/cluster-autoscaler/utils/taints" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) // Orchestrator is a component that picks the node group to resize and triggers @@ -47,7 +47,7 @@ type Orchestrator interface { unschedulablePods []*apiv1.Pod, nodes []*apiv1.Node, daemonSets []*appsv1.DaemonSet, - nodeInfos map[string]*schedulerframework.NodeInfo, + nodeInfos map[string]*framework.NodeInfo, allOrNothing bool, ) (*status.ScaleUpStatus, errors.AutoscalerError) // ScaleUpToNodeGroupMinSize tries to scale up node groups that have less nodes @@ -56,6 +56,6 @@ type Orchestrator interface { // appropriate status or error if an unexpected error occurred. ScaleUpToNodeGroupMinSize( nodes []*apiv1.Node, - nodeInfos map[string]*schedulerframework.NodeInfo, + nodeInfos map[string]*framework.NodeInfo, ) (*status.ScaleUpStatus, errors.AutoscalerError) } diff --git a/cluster-autoscaler/core/static_autoscaler.go b/cluster-autoscaler/core/static_autoscaler.go index 071da13f2a9a..216a684ee304 100644 --- a/cluster-autoscaler/core/static_autoscaler.go +++ b/cluster-autoscaler/core/static_autoscaler.go @@ -46,6 +46,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/simulator" "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/simulator/options" "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" "k8s.io/autoscaler/cluster-autoscaler/utils/backoff" @@ -58,7 +59,6 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" klog "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) const ( @@ -496,8 +496,7 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) caerrors.AutoscalerErr } } } - - l, err := a.ClusterSnapshot.NodeInfos().List() + l, err := a.ClusterSnapshot.ListNodeInfos() if err != nil { klog.Errorf("Unable to fetch ClusterNode List for Debugging Snapshot, %v", err) } else { @@ -679,7 +678,7 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) caerrors.AutoscalerErr return nil } -func (a *StaticAutoscaler) addUpcomingNodesToClusterSnapshot(upcomingCounts map[string]int, nodeInfosForGroups map[string]*schedulerframework.NodeInfo) error { +func (a *StaticAutoscaler) addUpcomingNodesToClusterSnapshot(upcomingCounts map[string]int, nodeInfosForGroups map[string]*framework.NodeInfo) error { nodeGroups := a.nodeGroupsById() upcomingNodeGroups := make(map[string]int) upcomingNodesFromUpcomingNodeGroups := 0 @@ -691,7 +690,7 @@ func (a *StaticAutoscaler) addUpcomingNodesToClusterSnapshot(upcomingCounts map[ isUpcomingNodeGroup := a.processors.AsyncNodeGroupStateChecker.IsUpcoming(nodeGroup) for _, upcomingNode := range upcomingNodes { var pods []*apiv1.Pod - for _, podInfo := range upcomingNode.Pods { + for _, podInfo := range upcomingNode.Pods() { pods = append(pods, podInfo.Pod) } err := a.ClusterSnapshot.AddNodeWithPods(upcomingNode.Node(), pods) @@ -989,7 +988,7 @@ func filterNodesFromSelectedGroups(cp cloudprovider.CloudProvider, nodes ...*api return filtered } -func (a *StaticAutoscaler) updateClusterState(allNodes []*apiv1.Node, nodeInfosForGroups map[string]*schedulerframework.NodeInfo, currentTime time.Time) caerrors.AutoscalerError { +func (a *StaticAutoscaler) updateClusterState(allNodes []*apiv1.Node, nodeInfosForGroups map[string]*framework.NodeInfo, currentTime time.Time) caerrors.AutoscalerError { err := a.clusterStateRegistry.UpdateNodes(allNodes, nodeInfosForGroups, currentTime) if err != nil { klog.Errorf("Failed to update node registry: %v", err) @@ -1016,8 +1015,8 @@ func allPodsAreNew(pods []*apiv1.Pod, currentTime time.Time) bool { return found && oldest.Add(unschedulablePodWithGpuTimeBuffer).After(currentTime) } -func getUpcomingNodeInfos(upcomingCounts map[string]int, nodeInfos map[string]*schedulerframework.NodeInfo) map[string][]*schedulerframework.NodeInfo { - upcomingNodes := make(map[string][]*schedulerframework.NodeInfo) +func getUpcomingNodeInfos(upcomingCounts map[string]int, nodeInfos map[string]*framework.NodeInfo) map[string][]*framework.NodeInfo { + upcomingNodes := make(map[string][]*framework.NodeInfo) for nodeGroup, numberOfNodes := range upcomingCounts { nodeTemplate, found := nodeInfos[nodeGroup] if !found { @@ -1030,7 +1029,7 @@ func getUpcomingNodeInfos(upcomingCounts map[string]int, nodeInfos map[string]*s } nodeTemplate.Node().Annotations[NodeUpcomingAnnotation] = "true" - var nodes []*schedulerframework.NodeInfo + var nodes []*framework.NodeInfo for i := 0; i < numberOfNodes; i++ { // Ensure new nodes have different names because nodeName // will be used as a map key. Also deep copy pods (daemonsets & diff --git a/cluster-autoscaler/core/static_autoscaler_test.go b/cluster-autoscaler/core/static_autoscaler_test.go index 3433bcb5ef12..fcff16331eef 100644 --- a/cluster-autoscaler/core/static_autoscaler_test.go +++ b/cluster-autoscaler/core/static_autoscaler_test.go @@ -53,6 +53,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/simulator" "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/simulator/options" "k8s.io/autoscaler/cluster-autoscaler/simulator/utilization" "k8s.io/autoscaler/cluster-autoscaler/utils/drain" @@ -72,7 +73,6 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/kubernetes/fake" v1appslister "k8s.io/client-go/listers/apps/v1" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" @@ -336,8 +336,7 @@ func TestStaticAutoscalerRunOnce(t *testing.T) { p2 := BuildTestPod("p2", 600, 100, MarkUnschedulable()) tn := BuildTestNode("tn", 1000, 1000) - tni := schedulerframework.NewNodeInfo() - tni.SetNode(tn) + tni := framework.NewTestNodeInfo(tn) provider := testprovider.NewTestAutoprovisioningCloudProvider( func(id string, delta int) error { @@ -348,7 +347,7 @@ func TestStaticAutoscalerRunOnce(t *testing.T) { return ret }, nil, nil, - nil, map[string]*schedulerframework.NodeInfo{"ng1": tni, "ng2": tni, "ng3": tni}) + nil, map[string]*framework.NodeInfo{"ng1": tni, "ng2": tni, "ng3": tni}) provider.AddNodeGroup("ng1", 1, 10, 1) provider.AddNode("ng1", n1) ng1 := reflect.ValueOf(provider.GetNodeGroup("ng1")).Interface().(*testprovider.TestNodeGroup) @@ -514,8 +513,7 @@ func TestStaticAutoscalerRunOnceWithScaleDownDelayPerNG(t *testing.T) { SetNodeReadyState(n2, true, time.Now()) tn := BuildTestNode("tn", 1000, 1000) - tni := schedulerframework.NewNodeInfo() - tni.SetNode(tn) + tni := framework.NewTestNodeInfo(tn) provider := testprovider.NewTestAutoprovisioningCloudProvider( func(id string, delta int) error { @@ -526,7 +524,7 @@ func TestStaticAutoscalerRunOnceWithScaleDownDelayPerNG(t *testing.T) { return ret }, nil, nil, - nil, map[string]*schedulerframework.NodeInfo{"ng1": tni, "ng2": tni}) + nil, map[string]*framework.NodeInfo{"ng1": tni, "ng2": tni}) assert.NotNil(t, provider) provider.AddNodeGroup("ng1", 0, 10, 1) @@ -744,16 +742,13 @@ func TestStaticAutoscalerRunOnceWithAutoprovisionedEnabled(t *testing.T) { tn1 := BuildTestNode("tn1", 100, 1000) SetNodeReadyState(tn1, true, time.Now()) - tni1 := schedulerframework.NewNodeInfo() - tni1.SetNode(tn1) + tni1 := framework.NewTestNodeInfo(tn1) tn2 := BuildTestNode("tn2", 1000, 1000) SetNodeReadyState(tn2, true, time.Now()) - tni2 := schedulerframework.NewNodeInfo() - tni2.SetNode(tn2) + tni2 := framework.NewTestNodeInfo(tn2) tn3 := BuildTestNode("tn3", 100, 1000) SetNodeReadyState(tn2, true, time.Now()) - tni3 := schedulerframework.NewNodeInfo() - tni3.SetNode(tn3) + tni3 := framework.NewTestNodeInfo(tn3) provider := testprovider.NewTestAutoprovisioningCloudProvider( func(id string, delta int) error { @@ -767,7 +762,7 @@ func TestStaticAutoscalerRunOnceWithAutoprovisionedEnabled(t *testing.T) { }, func(id string) error { return onNodeGroupDeleteMock.Delete(id) }, - []string{"TN1", "TN2"}, map[string]*schedulerframework.NodeInfo{"TN1": tni1, "TN2": tni2, "ng1": tni3}) + []string{"TN1", "TN2"}, map[string]*framework.NodeInfo{"TN1": tni1, "TN2": tni2, "ng1": tni3}) provider.AddNodeGroup("ng1", 1, 10, 1) provider.AddAutoprovisionedNodeGroup("autoprovisioned-TN1", 0, 10, 0, "TN1") autoprovisionedTN1 := reflect.ValueOf(provider.GetNodeGroup("autoprovisioned-TN1")).Interface().(*testprovider.TestNodeGroup) @@ -2005,13 +2000,13 @@ func (f *candidateTrackingFakePlanner) NodeUtilizationMap() map[string]utilizati } func assertSnapshotNodeCount(t *testing.T, snapshot clustersnapshot.ClusterSnapshot, wantCount int) { - nodeInfos, err := snapshot.NodeInfos().List() + nodeInfos, err := snapshot.ListNodeInfos() assert.NoError(t, err) assert.Len(t, nodeInfos, wantCount) } func assertNodesNotInSnapshot(t *testing.T, snapshot clustersnapshot.ClusterSnapshot, nodeNames map[string]bool) { - nodeInfos, err := snapshot.NodeInfos().List() + nodeInfos, err := snapshot.ListNodeInfos() assert.NoError(t, err) for _, nodeInfo := range nodeInfos { assert.NotContains(t, nodeNames, nodeInfo.Node().Name) @@ -2019,7 +2014,7 @@ func assertNodesNotInSnapshot(t *testing.T, snapshot clustersnapshot.ClusterSnap } func assertNodesInSnapshot(t *testing.T, snapshot clustersnapshot.ClusterSnapshot, nodeNames map[string]bool) { - nodeInfos, err := snapshot.NodeInfos().List() + nodeInfos, err := snapshot.ListNodeInfos() assert.NoError(t, err) snapshotNodeNames := map[string]bool{} for _, nodeInfo := range nodeInfos { diff --git a/cluster-autoscaler/core/test/common.go b/cluster-autoscaler/core/test/common.go index 49a44476a5c5..7dfc57b765ae 100644 --- a/cluster-autoscaler/core/test/common.go +++ b/cluster-autoscaler/core/test/common.go @@ -37,6 +37,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups" "k8s.io/autoscaler/cluster-autoscaler/processors/status" "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" "k8s.io/autoscaler/cluster-autoscaler/utils/backoff" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" @@ -48,7 +49,6 @@ import ( "k8s.io/apimachinery/pkg/api/resource" kube_client "k8s.io/client-go/kubernetes" kube_record "k8s.io/client-go/tools/record" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) // NodeConfig is a node config used in tests @@ -100,7 +100,7 @@ type NodeGroupConfig struct { // NodeTemplateConfig is a structure to provide node info in tests type NodeTemplateConfig struct { MachineType string - NodeInfo *schedulerframework.NodeInfo + NodeInfo *framework.NodeInfo NodeGroupName string } @@ -284,9 +284,9 @@ type MockAutoprovisioningNodeGroupListProcessor struct { } // Process extends the list of node groups -func (p *MockAutoprovisioningNodeGroupListProcessor) Process(context *context.AutoscalingContext, nodeGroups []cloudprovider.NodeGroup, nodeInfos map[string]*schedulerframework.NodeInfo, +func (p *MockAutoprovisioningNodeGroupListProcessor) Process(context *context.AutoscalingContext, nodeGroups []cloudprovider.NodeGroup, nodeInfos map[string]*framework.NodeInfo, unschedulablePods []*apiv1.Pod, -) ([]cloudprovider.NodeGroup, map[string]*schedulerframework.NodeInfo, error) { +) ([]cloudprovider.NodeGroup, map[string]*framework.NodeInfo, error) { machines, err := context.CloudProvider.GetAvailableMachineTypes() assert.NoError(p.T, err) @@ -368,7 +368,7 @@ func (r *MockReportingStrategy) LastInputOptions() []GroupSizeChange { // BestOption satisfies the Strategy interface. Picks the best option from those passed as an argument. // When parameter optionToChoose is defined, it's picked as the best one. // Otherwise, random option is used. -func (r *MockReportingStrategy) BestOption(options []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) *expander.Option { +func (r *MockReportingStrategy) BestOption(options []expander.Option, nodeInfo map[string]*framework.NodeInfo) *expander.Option { r.results.inputOptions = expanderOptionsToGroupSizeChanges(options) if r.optionToChoose == nil { return r.defaultStrategy.BestOption(options, nodeInfo) diff --git a/cluster-autoscaler/core/utils/utils.go b/cluster-autoscaler/core/utils/utils.go index 79592f472b36..c25db2ef8453 100644 --- a/cluster-autoscaler/core/utils/utils.go +++ b/cluster-autoscaler/core/utils/utils.go @@ -27,16 +27,16 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/clusterstate" "k8s.io/autoscaler/cluster-autoscaler/metrics" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/daemonset" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" "k8s.io/autoscaler/cluster-autoscaler/utils/labels" "k8s.io/autoscaler/cluster-autoscaler/utils/taints" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) // GetNodeInfoFromTemplate returns NodeInfo object built base on TemplateNodeInfo returned by NodeGroup.TemplateNodeInfo(). -func GetNodeInfoFromTemplate(nodeGroup cloudprovider.NodeGroup, daemonsets []*appsv1.DaemonSet, taintConfig taints.TaintConfig) (*schedulerframework.NodeInfo, errors.AutoscalerError) { +func GetNodeInfoFromTemplate(nodeGroup cloudprovider.NodeGroup, daemonsets []*appsv1.DaemonSet, taintConfig taints.TaintConfig) (*framework.NodeInfo, errors.AutoscalerError) { id := nodeGroup.Id() baseNodeInfo, err := nodeGroup.TemplateNodeInfo() if err != nil { @@ -55,12 +55,11 @@ func GetNodeInfoFromTemplate(nodeGroup cloudprovider.NodeGroup, daemonsets []*ap if err != nil { return nil, errors.ToAutoscalerError(errors.InternalError, err) } - for _, podInfo := range baseNodeInfo.Pods { - pods = append(pods, podInfo.Pod) + for _, podInfo := range baseNodeInfo.Pods() { + pods = append(pods, &framework.PodInfo{Pod: podInfo.Pod}) } - sanitizedNodeInfo := schedulerframework.NewNodeInfo(SanitizePods(pods, sanitizedNode)...) - sanitizedNodeInfo.SetNode(sanitizedNode) + sanitizedNodeInfo := framework.NewNodeInfo(sanitizedNode, nil, SanitizePods(pods, sanitizedNode)...) return sanitizedNodeInfo, nil } @@ -91,15 +90,14 @@ func FilterOutNodesFromNotAutoscaledGroups(nodes []*apiv1.Node, cloudProvider cl } // DeepCopyNodeInfo clones the provided nodeInfo -func DeepCopyNodeInfo(nodeInfo *schedulerframework.NodeInfo) *schedulerframework.NodeInfo { - newPods := make([]*apiv1.Pod, 0) - for _, podInfo := range nodeInfo.Pods { - newPods = append(newPods, podInfo.Pod.DeepCopy()) +func DeepCopyNodeInfo(nodeInfo *framework.NodeInfo) *framework.NodeInfo { + newPods := make([]*framework.PodInfo, 0) + for _, podInfo := range nodeInfo.Pods() { + newPods = append(newPods, &framework.PodInfo{Pod: podInfo.Pod.DeepCopy()}) } // Build a new node info. - newNodeInfo := schedulerframework.NewNodeInfo(newPods...) - newNodeInfo.SetNode(nodeInfo.Node().DeepCopy()) + newNodeInfo := framework.NewNodeInfo(nodeInfo.Node().DeepCopy(), nil, newPods...) return newNodeInfo } @@ -121,13 +119,13 @@ func SanitizeNode(node *apiv1.Node, nodeGroup string, taintConfig taints.TaintCo } // SanitizePods cleans up pods used for node group templates -func SanitizePods(pods []*apiv1.Pod, sanitizedNode *apiv1.Node) []*apiv1.Pod { +func SanitizePods(pods []*framework.PodInfo, sanitizedNode *apiv1.Node) []*framework.PodInfo { // Update node name in pods. - sanitizedPods := make([]*apiv1.Pod, 0) + sanitizedPods := make([]*framework.PodInfo, 0) for _, pod := range pods { - sanitizedPod := pod.DeepCopy() + sanitizedPod := pod.Pod.DeepCopy() sanitizedPod.Spec.NodeName = sanitizedNode.Name - sanitizedPods = append(sanitizedPods, sanitizedPod) + sanitizedPods = append(sanitizedPods, &framework.PodInfo{Pod: sanitizedPod}) } return sanitizedPods diff --git a/cluster-autoscaler/core/utils/utils_test.go b/cluster-autoscaler/core/utils/utils_test.go index a4bf95b9e154..b63badbcc834 100644 --- a/cluster-autoscaler/core/utils/utils_test.go +++ b/cluster-autoscaler/core/utils/utils_test.go @@ -20,6 +20,7 @@ import ( "testing" "time" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/taints" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" @@ -31,7 +32,7 @@ import ( func TestSanitizePods(t *testing.T) { pod := BuildTestPod("p1", 80, 0) pod.Spec.NodeName = "n1" - pods := []*apiv1.Pod{pod} + pods := []*framework.PodInfo{{Pod: pod}} node := BuildTestNode("node", 1000, 1000) diff --git a/cluster-autoscaler/debuggingsnapshot/debugging_snapshot.go b/cluster-autoscaler/debuggingsnapshot/debugging_snapshot.go index 0292c709927c..2891bce3e133 100644 --- a/cluster-autoscaler/debuggingsnapshot/debugging_snapshot.go +++ b/cluster-autoscaler/debuggingsnapshot/debugging_snapshot.go @@ -21,8 +21,8 @@ import ( "time" v1 "k8s.io/api/core/v1" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/scheduler/framework" ) // ClusterNode captures a single entity of nodeInfo. i.e. Node specs and all the pods on that node. @@ -98,7 +98,7 @@ func GetClusterNodeCopy(template *framework.NodeInfo) *ClusterNode { cNode := &ClusterNode{} cNode.Node = template.Node().DeepCopy() var pods []*v1.Pod - for _, p := range template.Pods { + for _, p := range template.Pods() { pods = append(pods, p.Pod.DeepCopy()) } cNode.Pods = pods diff --git a/cluster-autoscaler/debuggingsnapshot/debugging_snapshot_test.go b/cluster-autoscaler/debuggingsnapshot/debugging_snapshot_test.go index 09924ac58001..611e3a2a3a44 100644 --- a/cluster-autoscaler/debuggingsnapshot/debugging_snapshot_test.go +++ b/cluster-autoscaler/debuggingsnapshot/debugging_snapshot_test.go @@ -24,21 +24,17 @@ import ( "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/scheduler/framework" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" ) func TestBasicSetterWorkflow(t *testing.T) { snapshot := &DebuggingSnapshotImpl{} - pod := []*framework.PodInfo{ - { - Pod: &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "Pod1", - }, - Spec: v1.PodSpec{ - NodeName: "testNode", - }, - }, + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "Pod1", + }, + Spec: v1.PodSpec{ + NodeName: "testNode", }, } node := &v1.Node{ @@ -46,18 +42,10 @@ func TestBasicSetterWorkflow(t *testing.T) { Name: "testNode", }, } - - nodeInfo := &framework.NodeInfo{ - Pods: pod, - Requested: &framework.Resource{}, - NonZeroRequested: &framework.Resource{}, - Allocatable: &framework.Resource{}, - Generation: 0, - } + nodeInfo := framework.NewTestNodeInfo(node, pod) var nodeGroups []*framework.NodeInfo nodeGroups = append(nodeGroups, nodeInfo) - nodeGroups[0].SetNode(node) timestamp := time.Now().In(time.UTC) snapshot.SetClusterNodes(nodeGroups) snapshot.SetEndTimestamp(timestamp) diff --git a/cluster-autoscaler/debuggingsnapshot/debugging_snapshotter.go b/cluster-autoscaler/debuggingsnapshot/debugging_snapshotter.go index 318d895a92db..851202da3951 100644 --- a/cluster-autoscaler/debuggingsnapshot/debugging_snapshotter.go +++ b/cluster-autoscaler/debuggingsnapshot/debugging_snapshotter.go @@ -23,8 +23,8 @@ import ( "time" v1 "k8s.io/api/core/v1" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/scheduler/framework" ) // DebuggingSnapshotterState is the type for the debugging snapshot State machine diff --git a/cluster-autoscaler/debuggingsnapshot/debugging_snapshotter_test.go b/cluster-autoscaler/debuggingsnapshot/debugging_snapshotter_test.go index 8d8d79eff1de..776bde9fe21c 100644 --- a/cluster-autoscaler/debuggingsnapshot/debugging_snapshotter_test.go +++ b/cluster-autoscaler/debuggingsnapshot/debugging_snapshotter_test.go @@ -25,7 +25,7 @@ import ( "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/scheduler/framework" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" ) func TestBasicSnapshotRequest(t *testing.T) { @@ -33,16 +33,12 @@ func TestBasicSnapshotRequest(t *testing.T) { wg.Add(1) snapshotter := NewDebuggingSnapshotter(true) - pod := []*framework.PodInfo{ - { - Pod: &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "Pod1", - }, - Spec: v1.PodSpec{ - NodeName: "testNode", - }, - }, + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "Pod1", + }, + Spec: v1.PodSpec{ + NodeName: "testNode", }, } node := &v1.Node{ @@ -50,18 +46,10 @@ func TestBasicSnapshotRequest(t *testing.T) { Name: "testNode", }, } - - nodeInfo := &framework.NodeInfo{ - Pods: pod, - Requested: &framework.Resource{}, - NonZeroRequested: &framework.Resource{}, - Allocatable: &framework.Resource{}, - Generation: 0, - } + nodeInfo := framework.NewTestNodeInfo(node, pod) var nodeGroups []*framework.NodeInfo nodeGroups = append(nodeGroups, nodeInfo) - nodeGroups[0].SetNode(node) req := httptest.NewRequest(http.MethodGet, "/", nil) w := httptest.NewRecorder() diff --git a/cluster-autoscaler/estimator/binpacking_estimator.go b/cluster-autoscaler/estimator/binpacking_estimator.go index 229a2e26420f..6ffad3800df6 100644 --- a/cluster-autoscaler/estimator/binpacking_estimator.go +++ b/cluster-autoscaler/estimator/binpacking_estimator.go @@ -22,10 +22,10 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" "k8s.io/autoscaler/cluster-autoscaler/utils/scheduler" klog "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) // BinpackingNodeEstimator estimates the number of needed nodes to handle the given amount of pods. @@ -89,7 +89,7 @@ func newEstimationState() *estimationState { // Returns the number of nodes needed to accommodate all pods from the list. func (e *BinpackingNodeEstimator) Estimate( podsEquivalenceGroups []PodEquivalenceGroup, - nodeTemplate *schedulerframework.NodeInfo, + nodeTemplate *framework.NodeInfo, nodeGroup cloudprovider.NodeGroup, ) (int, []*apiv1.Pod) { @@ -136,7 +136,7 @@ func (e *BinpackingNodeEstimator) tryToScheduleOnExistingNodes( pod := pods[index] // Check schedulability on all nodes created during simulation - nodeName, err := e.predicateChecker.FitsAnyNodeMatching(e.clusterSnapshot, pod, func(nodeInfo *schedulerframework.NodeInfo) bool { + nodeName, err := e.predicateChecker.FitsAnyNodeMatching(e.clusterSnapshot, pod, func(nodeInfo *framework.NodeInfo) bool { return estimationState.newNodeNames[nodeInfo.Node().Name] }) if err != nil { @@ -152,7 +152,7 @@ func (e *BinpackingNodeEstimator) tryToScheduleOnExistingNodes( func (e *BinpackingNodeEstimator) tryToScheduleOnNewNodes( estimationState *estimationState, - nodeTemplate *schedulerframework.NodeInfo, + nodeTemplate *framework.NodeInfo, pods []*apiv1.Pod, ) error { for _, pod := range pods { @@ -208,11 +208,11 @@ func (e *BinpackingNodeEstimator) tryToScheduleOnNewNodes( func (e *BinpackingNodeEstimator) addNewNodeToSnapshot( estimationState *estimationState, - template *schedulerframework.NodeInfo, + template *framework.NodeInfo, ) error { newNodeInfo := scheduler.DeepCopyTemplateNode(template, fmt.Sprintf("e-%d", estimationState.newNodeNameIndex)) var pods []*apiv1.Pod - for _, podInfo := range newNodeInfo.Pods { + for _, podInfo := range newNodeInfo.Pods() { pods = append(pods, podInfo.Pod) } if err := e.clusterSnapshot.AddNodeWithPods(newNodeInfo.Node(), pods); err != nil { diff --git a/cluster-autoscaler/estimator/binpacking_estimator_test.go b/cluster-autoscaler/estimator/binpacking_estimator_test.go index e80276de3675..e0fa48aeda10 100644 --- a/cluster-autoscaler/estimator/binpacking_estimator_test.go +++ b/cluster-autoscaler/estimator/binpacking_estimator_test.go @@ -24,10 +24,10 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" "k8s.io/autoscaler/cluster-autoscaler/utils/units" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics" "github.com/stretchr/testify/assert" @@ -222,8 +222,7 @@ func TestBinpackingEstimate(t *testing.T) { processor := NewDecreasingPodOrderer() estimator := NewBinpackingNodeEstimator(predicateChecker, clusterSnapshot, limiter, processor, nil /* EstimationContext */, nil /* EstimationAnalyserFunc */) node := makeNode(tc.millicores, tc.memory, 10, "template", "zone-mars") - nodeInfo := schedulerframework.NewNodeInfo() - nodeInfo.SetNode(node) + nodeInfo := framework.NewTestNodeInfo(node) estimatedNodes, estimatedPods := estimator.Estimate(tc.podsEquivalenceGroup, nodeInfo, nil) assert.Equal(t, tc.expectNodeCount, estimatedNodes) @@ -277,8 +276,7 @@ func BenchmarkBinpackingEstimate(b *testing.B) { processor := NewDecreasingPodOrderer() estimator := NewBinpackingNodeEstimator(predicateChecker, clusterSnapshot, limiter, processor, nil /* EstimationContext */, nil /* EstimationAnalyserFunc */) node := makeNode(millicores, memory, podsPerNode, "template", "zone-mars") - nodeInfo := schedulerframework.NewNodeInfo() - nodeInfo.SetNode(node) + nodeInfo := framework.NewTestNodeInfo(node) estimatedNodes, estimatedPods := estimator.Estimate(podsEquivalenceGroup, nodeInfo, nil) assert.Equal(b, expectNodeCount, estimatedNodes) diff --git a/cluster-autoscaler/estimator/decreasing_pod_orderer.go b/cluster-autoscaler/estimator/decreasing_pod_orderer.go index 8a2be95ff3c8..d4ac467e904a 100644 --- a/cluster-autoscaler/estimator/decreasing_pod_orderer.go +++ b/cluster-autoscaler/estimator/decreasing_pod_orderer.go @@ -22,7 +22,7 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" - "k8s.io/kubernetes/pkg/scheduler/framework" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" ) // podScoreInfo contains Pod and score that corresponds to how important it is to handle the pod first. diff --git a/cluster-autoscaler/estimator/decreasing_pod_orderer_test.go b/cluster-autoscaler/estimator/decreasing_pod_orderer_test.go index 4720805eee6a..c64b3cca7fee 100644 --- a/cluster-autoscaler/estimator/decreasing_pod_orderer_test.go +++ b/cluster-autoscaler/estimator/decreasing_pod_orderer_test.go @@ -21,8 +21,8 @@ import ( "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/test" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) func TestPodPriorityProcessor(t *testing.T) { @@ -57,8 +57,7 @@ func TestPodPriorityProcessor(t *testing.T) { tc := tc t.Parallel() processor := NewDecreasingPodOrderer() - nodeInfo := schedulerframework.NewNodeInfo() - nodeInfo.SetNode(node) + nodeInfo := framework.NewTestNodeInfo(node) actual := processor.Order(tc.inputPodsEquivalentGroup, nodeInfo, nil) assert.Equal(t, tc.expectedPodsEquivalentGroup, actual) }) diff --git a/cluster-autoscaler/estimator/estimator.go b/cluster-autoscaler/estimator/estimator.go index 7323a168e324..b8e3db070349 100644 --- a/cluster-autoscaler/estimator/estimator.go +++ b/cluster-autoscaler/estimator/estimator.go @@ -22,9 +22,8 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" - "k8s.io/kubernetes/pkg/scheduler/framework" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) const ( @@ -54,7 +53,7 @@ func (p *PodEquivalenceGroup) Exemplar() *apiv1.Pod { // to schedule on those nodes. type Estimator interface { // Estimate estimates how many nodes are needed to provision pods coming from the given equivalence groups. - Estimate([]PodEquivalenceGroup, *schedulerframework.NodeInfo, cloudprovider.NodeGroup) (int, []*apiv1.Pod) + Estimate([]PodEquivalenceGroup, *framework.NodeInfo, cloudprovider.NodeGroup) (int, []*apiv1.Pod) } // EstimatorBuilder creates a new estimator object. diff --git a/cluster-autoscaler/expander/expander.go b/cluster-autoscaler/expander/expander.go index 01700a3b943c..e138da48276d 100644 --- a/cluster-autoscaler/expander/expander.go +++ b/cluster-autoscaler/expander/expander.go @@ -19,7 +19,7 @@ package expander import ( apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" ) var ( @@ -53,10 +53,10 @@ type Option struct { // Strategy describes an interface for selecting the best option when scaling up type Strategy interface { - BestOption(options []Option, nodeInfo map[string]*schedulerframework.NodeInfo) *Option + BestOption(options []Option, nodeInfo map[string]*framework.NodeInfo) *Option } // Filter describes an interface for filtering to equally good options according to some criteria type Filter interface { - BestOptions(options []Option, nodeInfo map[string]*schedulerframework.NodeInfo) []Option + BestOptions(options []Option, nodeInfo map[string]*framework.NodeInfo) []Option } diff --git a/cluster-autoscaler/expander/factory/chain.go b/cluster-autoscaler/expander/factory/chain.go index eec2ec91a311..d19dd7f1d54f 100644 --- a/cluster-autoscaler/expander/factory/chain.go +++ b/cluster-autoscaler/expander/factory/chain.go @@ -18,8 +18,7 @@ package factory import ( "k8s.io/autoscaler/cluster-autoscaler/expander" - - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" ) type chainStrategy struct { @@ -34,7 +33,7 @@ func newChainStrategy(filters []expander.Filter, fallback expander.Strategy) exp } } -func (c *chainStrategy) BestOption(options []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) *expander.Option { +func (c *chainStrategy) BestOption(options []expander.Option, nodeInfo map[string]*framework.NodeInfo) *expander.Option { filteredOptions := options for _, filter := range c.filters { filteredOptions = filter.BestOptions(filteredOptions, nodeInfo) diff --git a/cluster-autoscaler/expander/factory/chain_test.go b/cluster-autoscaler/expander/factory/chain_test.go index b6039269752a..68990cee52d9 100644 --- a/cluster-autoscaler/expander/factory/chain_test.go +++ b/cluster-autoscaler/expander/factory/chain_test.go @@ -17,12 +17,12 @@ limitations under the License. package factory import ( - "k8s.io/autoscaler/cluster-autoscaler/expander" "strings" "testing" "github.com/stretchr/testify/assert" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" + "k8s.io/autoscaler/cluster-autoscaler/expander" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" ) type substringTestFilterStrategy struct { @@ -35,7 +35,7 @@ func newSubstringTestFilterStrategy(substring string) *substringTestFilterStrate } } -func (s *substringTestFilterStrategy) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) []expander.Option { +func (s *substringTestFilterStrategy) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*framework.NodeInfo) []expander.Option { var ret []expander.Option for _, option := range expansionOptions { if strings.Contains(option.Debug, s.substring) { @@ -46,7 +46,7 @@ func (s *substringTestFilterStrategy) BestOptions(expansionOptions []expander.Op } -func (s *substringTestFilterStrategy) BestOption(expansionOptions []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) *expander.Option { +func (s *substringTestFilterStrategy) BestOption(expansionOptions []expander.Option, nodeInfo map[string]*framework.NodeInfo) *expander.Option { ret := s.BestOptions(expansionOptions, nodeInfo) if len(ret) == 0 { return nil diff --git a/cluster-autoscaler/expander/grpcplugin/grpc_client.go b/cluster-autoscaler/expander/grpcplugin/grpc_client.go index 7bcab0c8158d..ca1f2a716960 100644 --- a/cluster-autoscaler/expander/grpcplugin/grpc_client.go +++ b/cluster-autoscaler/expander/grpcplugin/grpc_client.go @@ -24,8 +24,8 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/expander" "k8s.io/autoscaler/cluster-autoscaler/expander/grpcplugin/protos" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" "google.golang.org/grpc" "google.golang.org/grpc/credentials" @@ -72,7 +72,7 @@ func createGRPCClient(expanderCert string, expanderUrl string) protos.ExpanderCl return protos.NewExpanderClient(conn) } -func (g *grpcclientstrategy) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) []expander.Option { +func (g *grpcclientstrategy) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*framework.NodeInfo) []expander.Option { if g.grpcClient == nil { klog.Errorf("Incorrect gRPC client config, filtering no options") return expansionOptions @@ -117,7 +117,7 @@ func populateOptionsForGRPC(expansionOptions []expander.Option) ([]*protos.Optio } // populateNodeInfoForGRPC looks at the corresponding v1.Node object per NodeInfo object, and populates the grpcNodeInfoMap with these to pass over grpc -func populateNodeInfoForGRPC(nodeInfos map[string]*schedulerframework.NodeInfo) map[string]*v1.Node { +func populateNodeInfoForGRPC(nodeInfos map[string]*framework.NodeInfo) map[string]*v1.Node { grpcNodeInfoMap := make(map[string]*v1.Node) for nodeId, nodeInfo := range nodeInfos { grpcNodeInfoMap[nodeId] = nodeInfo.Node() diff --git a/cluster-autoscaler/expander/grpcplugin/grpc_client_test.go b/cluster-autoscaler/expander/grpcplugin/grpc_client_test.go index cb6882b79fc6..d04670bebd74 100644 --- a/cluster-autoscaler/expander/grpcplugin/grpc_client_test.go +++ b/cluster-autoscaler/expander/grpcplugin/grpc_client_test.go @@ -25,8 +25,8 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/expander/grpcplugin/protos" "k8s.io/autoscaler/cluster-autoscaler/expander/mocks" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test" "k8s.io/autoscaler/cluster-autoscaler/expander" @@ -124,11 +124,10 @@ func TestPopulateOptionsForGrpc(t *testing.T) { } } -func makeFakeNodeInfos() map[string]*schedulerframework.NodeInfo { - nodeInfos := make(map[string]*schedulerframework.NodeInfo) +func makeFakeNodeInfos() map[string]*framework.NodeInfo { + nodeInfos := make(map[string]*framework.NodeInfo) for i, opt := range options { - nodeInfo := schedulerframework.NewNodeInfo() - nodeInfo.SetNode(nodes[i]) + nodeInfo := framework.NewTestNodeInfo(nodes[i]) nodeInfos[opt.NodeGroup.Id()] = nodeInfo } return nodeInfos @@ -251,7 +250,7 @@ func TestBestOptionsErrors(t *testing.T) { testCases := []struct { desc string client grpcclientstrategy - nodeInfo map[string]*schedulerframework.NodeInfo + nodeInfo map[string]*framework.NodeInfo mockResponse protos.BestOptionsResponse errResponse error }{ diff --git a/cluster-autoscaler/expander/leastnodes/leastnodes.go b/cluster-autoscaler/expander/leastnodes/leastnodes.go index 80c8c782a60a..efd34ccc38eb 100644 --- a/cluster-autoscaler/expander/leastnodes/leastnodes.go +++ b/cluster-autoscaler/expander/leastnodes/leastnodes.go @@ -20,7 +20,7 @@ import ( "math" "k8s.io/autoscaler/cluster-autoscaler/expander" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" ) type leastnodes struct { @@ -32,7 +32,7 @@ func NewFilter() expander.Filter { } // BestOptions selects the expansion option that uses the least number of nodes -func (m *leastnodes) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) []expander.Option { +func (m *leastnodes) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*framework.NodeInfo) []expander.Option { leastNodes := math.MaxInt var leastOptions []expander.Option diff --git a/cluster-autoscaler/expander/mostpods/mostpods.go b/cluster-autoscaler/expander/mostpods/mostpods.go index 77f1662ca716..6c6d6f73f6f0 100644 --- a/cluster-autoscaler/expander/mostpods/mostpods.go +++ b/cluster-autoscaler/expander/mostpods/mostpods.go @@ -18,7 +18,7 @@ package mostpods import ( "k8s.io/autoscaler/cluster-autoscaler/expander" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" ) type mostpods struct { @@ -30,7 +30,7 @@ func NewFilter() expander.Filter { } // BestOptions selects the expansion option that schedules the most pods -func (m *mostpods) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) []expander.Option { +func (m *mostpods) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*framework.NodeInfo) []expander.Option { var maxPods int var maxOptions []expander.Option diff --git a/cluster-autoscaler/expander/price/price.go b/cluster-autoscaler/expander/price/price.go index 8a1297cc2c05..22f99320a651 100644 --- a/cluster-autoscaler/expander/price/price.go +++ b/cluster-autoscaler/expander/price/price.go @@ -26,9 +26,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/expander" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" "k8s.io/autoscaler/cluster-autoscaler/utils/units" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" klog "k8s.io/klog/v2" ) @@ -87,7 +87,7 @@ func NewFilter(cloudProvider cloudprovider.CloudProvider, } // BestOption selects option based on cost and preferred node type. -func (p *priceBased) BestOptions(expansionOptions []expander.Option, nodeInfos map[string]*schedulerframework.NodeInfo) []expander.Option { +func (p *priceBased) BestOptions(expansionOptions []expander.Option, nodeInfos map[string]*framework.NodeInfo) []expander.Option { var bestOptions []expander.Option bestOptionScore := 0.0 now := time.Now() diff --git a/cluster-autoscaler/expander/price/price_test.go b/cluster-autoscaler/expander/price/price_test.go index 90d2d9982938..c536a93878d8 100644 --- a/cluster-autoscaler/expander/price/price_test.go +++ b/cluster-autoscaler/expander/price/price_test.go @@ -28,8 +28,8 @@ import ( apiv1 "k8s.io/api/core/v1" cloudprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" "github.com/stretchr/testify/assert" ) @@ -90,13 +90,10 @@ func TestPriceExpander(t *testing.T) { ng2, _ := provider.NodeGroupForNode(n2) ng3, _ := provider.NewNodeGroup("MT1", nil, nil, nil, nil) - ni1 := schedulerframework.NewNodeInfo() - ni1.SetNode(n1) - ni2 := schedulerframework.NewNodeInfo() - ni2.SetNode(n2) - ni3 := schedulerframework.NewNodeInfo() - ni3.SetNode(n3) - nodeInfosForGroups := map[string]*schedulerframework.NodeInfo{ + ni1 := framework.NewTestNodeInfo(n1) + ni2 := framework.NewTestNodeInfo(n2) + ni3 := framework.NewTestNodeInfo(n3) + nodeInfosForGroups := map[string]*framework.NodeInfo{ "ng1": ni1, "ng2": ni2, } var pricingModel cloudprovider.PricingModel diff --git a/cluster-autoscaler/expander/priority/priority.go b/cluster-autoscaler/expander/priority/priority.go index dab38c986a98..4dc4ed9582ae 100644 --- a/cluster-autoscaler/expander/priority/priority.go +++ b/cluster-autoscaler/expander/priority/priority.go @@ -26,10 +26,10 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/expander" apiv1 "k8s.io/api/core/v1" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" v1lister "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/record" klog "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) const ( @@ -116,7 +116,7 @@ func (p *priority) parsePrioritiesYAMLString(prioritiesYAML string) (priorities, return newPriorities, nil } -func (p *priority) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) []expander.Option { +func (p *priority) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*framework.NodeInfo) []expander.Option { if len(expansionOptions) <= 0 { return nil } diff --git a/cluster-autoscaler/expander/random/random.go b/cluster-autoscaler/expander/random/random.go index a789a01b9260..12a3dfe84ece 100644 --- a/cluster-autoscaler/expander/random/random.go +++ b/cluster-autoscaler/expander/random/random.go @@ -20,7 +20,7 @@ import ( "math/rand" "k8s.io/autoscaler/cluster-autoscaler/expander" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" ) type random struct { @@ -37,7 +37,7 @@ func NewStrategy() expander.Strategy { } // BestOptions selects from the expansion options at random -func (r *random) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) []expander.Option { +func (r *random) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*framework.NodeInfo) []expander.Option { best := r.BestOption(expansionOptions, nodeInfo) if best == nil { return nil @@ -46,7 +46,7 @@ func (r *random) BestOptions(expansionOptions []expander.Option, nodeInfo map[st } // BestOption selects from the expansion options at random -func (r *random) BestOption(expansionOptions []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) *expander.Option { +func (r *random) BestOption(expansionOptions []expander.Option, nodeInfo map[string]*framework.NodeInfo) *expander.Option { if len(expansionOptions) <= 0 { return nil } diff --git a/cluster-autoscaler/expander/waste/waste.go b/cluster-autoscaler/expander/waste/waste.go index a4a7768b8835..d05de2516cbb 100644 --- a/cluster-autoscaler/expander/waste/waste.go +++ b/cluster-autoscaler/expander/waste/waste.go @@ -20,8 +20,8 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/autoscaler/cluster-autoscaler/expander" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" klog "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) type leastwaste struct { @@ -33,7 +33,7 @@ func NewFilter() expander.Filter { } // BestOption Finds the option that wastes the least fraction of CPU and Memory -func (l *leastwaste) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) []expander.Option { +func (l *leastwaste) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*framework.NodeInfo) []expander.Option { var leastWastedScore float64 var leastWastedOptions []expander.Option diff --git a/cluster-autoscaler/expander/waste/waste_test.go b/cluster-autoscaler/expander/waste/waste_test.go index d743c36a7fc8..c552fb3645df 100644 --- a/cluster-autoscaler/expander/waste/waste_test.go +++ b/cluster-autoscaler/expander/waste/waste_test.go @@ -28,7 +28,7 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/autoscaler/cluster-autoscaler/expander" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" ) type FakeNodeGroup struct { @@ -47,7 +47,7 @@ func (f *FakeNodeGroup) Debug() string { return f.id } func (f *FakeNodeGroup) Nodes() ([]cloudprovider.Instance, error) { return []cloudprovider.Instance{}, nil } -func (f *FakeNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { +func (f *FakeNodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) { return nil, cloudprovider.ErrNotImplemented } func (f *FakeNodeGroup) Exist() bool { return true } @@ -60,7 +60,7 @@ func (f *FakeNodeGroup) GetOptions(defaults config.NodeGroupAutoscalingOptions) return nil, cloudprovider.ErrNotImplemented } -func makeNodeInfo(cpu int64, memory int64, pods int64) *schedulerframework.NodeInfo { +func makeNodeInfo(cpu int64, memory int64, pods int64) *framework.NodeInfo { node := &apiv1.Node{ Status: apiv1.NodeStatus{ Capacity: apiv1.ResourceList{ @@ -73,8 +73,7 @@ func makeNodeInfo(cpu int64, memory int64, pods int64) *schedulerframework.NodeI node.Status.Allocatable = node.Status.Capacity SetNodeReadyState(node, true, time.Time{}) - nodeInfo := schedulerframework.NewNodeInfo() - nodeInfo.SetNode(node) + nodeInfo := framework.NewTestNodeInfo(node) return nodeInfo } @@ -84,7 +83,7 @@ func TestLeastWaste(t *testing.T) { memoryPerPod := int64(1000 * 1024 * 1024) e := NewFilter() balancedNodeInfo := makeNodeInfo(16*cpuPerPod, 16*memoryPerPod, 100) - nodeMap := map[string]*schedulerframework.NodeInfo{"balanced": balancedNodeInfo} + nodeMap := map[string]*framework.NodeInfo{"balanced": balancedNodeInfo} balancedOption := expander.Option{NodeGroup: &FakeNodeGroup{"balanced"}, NodeCount: 1} // Test without any pods, one node info diff --git a/cluster-autoscaler/processors/nodegroups/nodegroup_list_processor.go b/cluster-autoscaler/processors/nodegroups/nodegroup_list_processor.go index 03a75ba0aadb..288849538cca 100644 --- a/cluster-autoscaler/processors/nodegroups/nodegroup_list_processor.go +++ b/cluster-autoscaler/processors/nodegroups/nodegroup_list_processor.go @@ -20,14 +20,14 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/context" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" ) // NodeGroupListProcessor processes lists of NodeGroups considered in scale-up. type NodeGroupListProcessor interface { Process(context *context.AutoscalingContext, nodeGroups []cloudprovider.NodeGroup, - nodeInfos map[string]*schedulerframework.NodeInfo, - unschedulablePods []*apiv1.Pod) ([]cloudprovider.NodeGroup, map[string]*schedulerframework.NodeInfo, error) + nodeInfos map[string]*framework.NodeInfo, + unschedulablePods []*apiv1.Pod) ([]cloudprovider.NodeGroup, map[string]*framework.NodeInfo, error) CleanUp() } @@ -41,8 +41,8 @@ func NewDefaultNodeGroupListProcessor() NodeGroupListProcessor { } // Process processes lists of unschedulable and scheduled pods before scaling of the cluster. -func (p *NoOpNodeGroupListProcessor) Process(context *context.AutoscalingContext, nodeGroups []cloudprovider.NodeGroup, nodeInfos map[string]*schedulerframework.NodeInfo, - unschedulablePods []*apiv1.Pod) ([]cloudprovider.NodeGroup, map[string]*schedulerframework.NodeInfo, error) { +func (p *NoOpNodeGroupListProcessor) Process(context *context.AutoscalingContext, nodeGroups []cloudprovider.NodeGroup, nodeInfos map[string]*framework.NodeInfo, + unschedulablePods []*apiv1.Pod) ([]cloudprovider.NodeGroup, map[string]*framework.NodeInfo, error) { return nodeGroups, nodeInfos, nil } diff --git a/cluster-autoscaler/processors/nodegroupset/aws_nodegroups.go b/cluster-autoscaler/processors/nodegroupset/aws_nodegroups.go index 2c22f8280522..cf1416564f91 100644 --- a/cluster-autoscaler/processors/nodegroupset/aws_nodegroups.go +++ b/cluster-autoscaler/processors/nodegroupset/aws_nodegroups.go @@ -18,7 +18,7 @@ package nodegroupset import ( "k8s.io/autoscaler/cluster-autoscaler/config" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" ) // CreateAwsNodeInfoComparator returns a comparator that checks if two nodes should be considered @@ -42,7 +42,7 @@ func CreateAwsNodeInfoComparator(extraIgnoredLabels []string, ratioOpts config.N awsIgnoredLabels[k] = true } - return func(n1, n2 *schedulerframework.NodeInfo) bool { + return func(n1, n2 *framework.NodeInfo) bool { return IsCloudProviderNodeInfoSimilar(n1, n2, awsIgnoredLabels, ratioOpts) } } diff --git a/cluster-autoscaler/processors/nodegroupset/azure_nodegroups.go b/cluster-autoscaler/processors/nodegroupset/azure_nodegroups.go index 3b615a620675..d48eb9eef57c 100644 --- a/cluster-autoscaler/processors/nodegroupset/azure_nodegroups.go +++ b/cluster-autoscaler/processors/nodegroupset/azure_nodegroups.go @@ -18,7 +18,7 @@ package nodegroupset import ( "k8s.io/autoscaler/cluster-autoscaler/config" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" ) // AzureNodepoolLegacyLabel is a label specifying which Azure node pool a particular node belongs to. @@ -40,13 +40,13 @@ const aksConsolidatedAdditionalProperties = "kubernetes.azure.com/consolidated-a // AKS node image version const aksNodeImageVersion = "kubernetes.azure.com/node-image-version" -func nodesFromSameAzureNodePool(n1, n2 *schedulerframework.NodeInfo) bool { +func nodesFromSameAzureNodePool(n1, n2 *framework.NodeInfo) bool { n1AzureNodePool := n1.Node().Labels[AzureNodepoolLabel] n2AzureNodePool := n2.Node().Labels[AzureNodepoolLabel] return (n1AzureNodePool != "" && n1AzureNodePool == n2AzureNodePool) || nodesFromSameAzureNodePoolLegacy(n1, n2) } -func nodesFromSameAzureNodePoolLegacy(n1, n2 *schedulerframework.NodeInfo) bool { +func nodesFromSameAzureNodePoolLegacy(n1, n2 *framework.NodeInfo) bool { n1AzureNodePool := n1.Node().Labels[AzureNodepoolLegacyLabel] n2AzureNodePool := n2.Node().Labels[AzureNodepoolLegacyLabel] return n1AzureNodePool != "" && n1AzureNodePool == n2AzureNodePool @@ -74,7 +74,7 @@ func CreateAzureNodeInfoComparator(extraIgnoredLabels []string, ratioOpts config azureIgnoredLabels[k] = true } - return func(n1, n2 *schedulerframework.NodeInfo) bool { + return func(n1, n2 *framework.NodeInfo) bool { if nodesFromSameAzureNodePool(n1, n2) { return true } diff --git a/cluster-autoscaler/processors/nodegroupset/azure_nodegroups_test.go b/cluster-autoscaler/processors/nodegroupset/azure_nodegroups_test.go index 8b73f36111a1..430c8a29f808 100644 --- a/cluster-autoscaler/processors/nodegroupset/azure_nodegroups_test.go +++ b/cluster-autoscaler/processors/nodegroupset/azure_nodegroups_test.go @@ -23,8 +23,8 @@ import ( testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test" "k8s.io/autoscaler/cluster-autoscaler/config" "k8s.io/autoscaler/cluster-autoscaler/context" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" "github.com/stretchr/testify/assert" ) @@ -110,12 +110,10 @@ func TestFindSimilarNodeGroupsAzureByLabel(t *testing.T) { provider.AddNode("ng1", n1) provider.AddNode("ng2", n2) - ni1 := schedulerframework.NewNodeInfo() - ni1.SetNode(n1) - ni2 := schedulerframework.NewNodeInfo() - ni2.SetNode(n2) + ni1 := framework.NewTestNodeInfo(n1) + ni2 := framework.NewTestNodeInfo(n2) - nodeInfosForGroups := map[string]*schedulerframework.NodeInfo{ + nodeInfosForGroups := map[string]*framework.NodeInfo{ "ng1": ni1, "ng2": ni2, } @@ -141,8 +139,7 @@ func TestFindSimilarNodeGroupsAzureByLabel(t *testing.T) { n3 := BuildTestNode("n1", 1000, 1000) provider.AddNodeGroup("ng3", 1, 10, 1) provider.AddNode("ng3", n3) - ni3 := schedulerframework.NewNodeInfo() - ni3.SetNode(n3) + ni3 := framework.NewTestNodeInfo(n3) nodeInfosForGroups["ng3"] = ni3 ng3, _ := provider.NodeGroupForNode(n3) diff --git a/cluster-autoscaler/processors/nodegroupset/balancing_processor.go b/cluster-autoscaler/processors/nodegroupset/balancing_processor.go index c65cb10ea214..749566b531bd 100644 --- a/cluster-autoscaler/processors/nodegroupset/balancing_processor.go +++ b/cluster-autoscaler/processors/nodegroupset/balancing_processor.go @@ -21,8 +21,8 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/context" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" klog "k8s.io/klog/v2" ) @@ -35,7 +35,7 @@ type BalancingNodeGroupSetProcessor struct { // FindSimilarNodeGroups returns a list of NodeGroups similar to the given one using the // BalancingNodeGroupSetProcessor's comparator function. func (b *BalancingNodeGroupSetProcessor) FindSimilarNodeGroups(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup, - nodeInfosForGroups map[string]*schedulerframework.NodeInfo) ([]cloudprovider.NodeGroup, errors.AutoscalerError) { + nodeInfosForGroups map[string]*framework.NodeInfo) ([]cloudprovider.NodeGroup, errors.AutoscalerError) { result := []cloudprovider.NodeGroup{} nodeGroupId := nodeGroup.Id() diff --git a/cluster-autoscaler/processors/nodegroupset/balancing_processor_test.go b/cluster-autoscaler/processors/nodegroupset/balancing_processor_test.go index 9645cdb3997b..6fd86d6fb423 100644 --- a/cluster-autoscaler/processors/nodegroupset/balancing_processor_test.go +++ b/cluster-autoscaler/processors/nodegroupset/balancing_processor_test.go @@ -19,18 +19,17 @@ package nodegroupset import ( "testing" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test" "k8s.io/autoscaler/cluster-autoscaler/config" "k8s.io/autoscaler/cluster-autoscaler/context" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" "github.com/stretchr/testify/assert" ) -func buildBasicNodeGroups(context *context.AutoscalingContext) (*schedulerframework.NodeInfo, *schedulerframework.NodeInfo, *schedulerframework.NodeInfo) { +func buildBasicNodeGroups(context *context.AutoscalingContext) (*framework.NodeInfo, *framework.NodeInfo, *framework.NodeInfo) { n1 := BuildTestNode("n1", 1000, 1000) n2 := BuildTestNode("n2", 1000, 1000) n3 := BuildTestNode("n3", 2000, 2000) @@ -42,12 +41,9 @@ func buildBasicNodeGroups(context *context.AutoscalingContext) (*schedulerframew provider.AddNode("ng2", n2) provider.AddNode("ng3", n3) - ni1 := schedulerframework.NewNodeInfo() - ni1.SetNode(n1) - ni2 := schedulerframework.NewNodeInfo() - ni2.SetNode(n2) - ni3 := schedulerframework.NewNodeInfo() - ni3.SetNode(n3) + ni1 := framework.NewTestNodeInfo(n1) + ni2 := framework.NewTestNodeInfo(n2) + ni3 := framework.NewTestNodeInfo(n3) context.CloudProvider = provider return ni1, ni2, ni3 @@ -57,11 +53,11 @@ func basicSimilarNodeGroupsTest( t *testing.T, context *context.AutoscalingContext, processor NodeGroupSetProcessor, - ni1 *schedulerframework.NodeInfo, - ni2 *schedulerframework.NodeInfo, - ni3 *schedulerframework.NodeInfo, + ni1 *framework.NodeInfo, + ni2 *framework.NodeInfo, + ni3 *framework.NodeInfo, ) { - nodeInfosForGroups := map[string]*schedulerframework.NodeInfo{ + nodeInfosForGroups := map[string]*framework.NodeInfo{ "ng1": ni1, "ng2": ni2, "ng3": ni3, } @@ -104,7 +100,7 @@ func TestFindSimilarNodeGroupsCustomComparator(t *testing.T) { ni1, ni2, ni3 := buildBasicNodeGroups(context) processor := &BalancingNodeGroupSetProcessor{ - Comparator: func(n1, n2 *schedulerframework.NodeInfo) bool { + Comparator: func(n1, n2 *framework.NodeInfo) bool { return (n1.Node().Name == "n1" && n2.Node().Name == "n2") || (n1.Node().Name == "n2" && n2.Node().Name == "n1") }, diff --git a/cluster-autoscaler/processors/nodegroupset/compare_nodegroups.go b/cluster-autoscaler/processors/nodegroupset/compare_nodegroups.go index 42dbb9ef8651..b26d47ae7e0f 100644 --- a/cluster-autoscaler/processors/nodegroupset/compare_nodegroups.go +++ b/cluster-autoscaler/processors/nodegroupset/compare_nodegroups.go @@ -22,8 +22,8 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/autoscaler/cluster-autoscaler/config" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/scheduler" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) // BasicIgnoredLabels define a set of basic labels that should be ignored when comparing the similarity @@ -41,7 +41,7 @@ var BasicIgnoredLabels = map[string]bool{ // NodeInfoComparator is a function that tells if two nodes are from NodeGroups // similar enough to be considered a part of a single NodeGroupSet. -type NodeInfoComparator func(n1, n2 *schedulerframework.NodeInfo) bool +type NodeInfoComparator func(n1, n2 *framework.NodeInfo) bool func resourceMapsWithinTolerance(resources map[apiv1.ResourceName][]resource.Quantity, maxDifferenceRatio float64) bool { @@ -62,7 +62,7 @@ func resourceListWithinTolerance(qtyList []resource.Quantity, maxDifferenceRatio return larger-smaller <= larger*maxDifferenceRatio } -func compareLabels(nodes []*schedulerframework.NodeInfo, ignoredLabels map[string]bool) bool { +func compareLabels(nodes []*framework.NodeInfo, ignoredLabels map[string]bool) bool { labels := make(map[string][]string) for _, node := range nodes { for label, value := range node.Node().ObjectMeta.Labels { @@ -90,7 +90,7 @@ func CreateGenericNodeInfoComparator(extraIgnoredLabels []string, ratioOpts conf genericIgnoredLabels[k] = true } - return func(n1, n2 *schedulerframework.NodeInfo) bool { + return func(n1, n2 *framework.NodeInfo) bool { return IsCloudProviderNodeInfoSimilar(n1, n2, genericIgnoredLabels, ratioOpts) } } @@ -101,11 +101,11 @@ func CreateGenericNodeInfoComparator(extraIgnoredLabels []string, ratioOpts conf // are similar enough to likely be the same type of machine and if the set of labels // is the same (except for a set of labels passed in to be ignored like hostname or zone). func IsCloudProviderNodeInfoSimilar( - n1, n2 *schedulerframework.NodeInfo, ignoredLabels map[string]bool, ratioOpts config.NodeGroupDifferenceRatios) bool { + n1, n2 *framework.NodeInfo, ignoredLabels map[string]bool, ratioOpts config.NodeGroupDifferenceRatios) bool { capacity := make(map[apiv1.ResourceName][]resource.Quantity) allocatable := make(map[apiv1.ResourceName][]resource.Quantity) free := make(map[apiv1.ResourceName][]resource.Quantity) - nodes := []*schedulerframework.NodeInfo{n1, n2} + nodes := []*framework.NodeInfo{n1, n2} for _, node := range nodes { for res, quantity := range node.Node().Status.Capacity { capacity[res] = append(capacity[res], quantity) @@ -113,7 +113,7 @@ func IsCloudProviderNodeInfoSimilar( for res, quantity := range node.Node().Status.Allocatable { allocatable[res] = append(allocatable[res], quantity) } - for res, quantity := range scheduler.ResourceToResourceList(node.Requested) { + for res, quantity := range scheduler.ResourceToResourceList(node.ToScheduler().Requested) { freeRes := node.Node().Status.Allocatable[res].DeepCopy() freeRes.Sub(quantity) free[res] = append(free[res], freeRes) diff --git a/cluster-autoscaler/processors/nodegroupset/compare_nodegroups_test.go b/cluster-autoscaler/processors/nodegroupset/compare_nodegroups_test.go index eae8a67090e0..0b7a4f224d21 100644 --- a/cluster-autoscaler/processors/nodegroupset/compare_nodegroups_test.go +++ b/cluster-autoscaler/processors/nodegroupset/compare_nodegroups_test.go @@ -19,14 +19,13 @@ package nodegroupset import ( "testing" + "github.com/stretchr/testify/assert" apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/autoscaler/cluster-autoscaler/config" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" - - "github.com/stretchr/testify/assert" ) func checkNodesSimilar(t *testing.T, n1, n2 *apiv1.Node, comparator NodeInfoComparator, shouldEqual bool) { @@ -34,10 +33,8 @@ func checkNodesSimilar(t *testing.T, n1, n2 *apiv1.Node, comparator NodeInfoComp } func checkNodesSimilarWithPods(t *testing.T, n1, n2 *apiv1.Node, pods1, pods2 []*apiv1.Pod, comparator NodeInfoComparator, shouldEqual bool) { - ni1 := schedulerframework.NewNodeInfo(pods1...) - ni1.SetNode(n1) - ni2 := schedulerframework.NewNodeInfo(pods2...) - ni2.SetNode(n2) + ni1 := framework.NewTestNodeInfo(n1, pods1...) + ni2 := framework.NewTestNodeInfo(n2, pods2...) assert.Equal(t, shouldEqual, comparator(ni1, ni2)) } diff --git a/cluster-autoscaler/processors/nodegroupset/gce_nodegroups.go b/cluster-autoscaler/processors/nodegroupset/gce_nodegroups.go index 483f6f04f395..a0b1a8277648 100644 --- a/cluster-autoscaler/processors/nodegroupset/gce_nodegroups.go +++ b/cluster-autoscaler/processors/nodegroupset/gce_nodegroups.go @@ -18,7 +18,7 @@ package nodegroupset import ( "k8s.io/autoscaler/cluster-autoscaler/config" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" ) // CreateGceNodeInfoComparator returns a comparator that checks if two nodes should be considered @@ -37,7 +37,7 @@ func CreateGceNodeInfoComparator(extraIgnoredLabels []string, ratioOpts config.N gceIgnoredLabels[k] = true } - return func(n1, n2 *schedulerframework.NodeInfo) bool { + return func(n1, n2 *framework.NodeInfo) bool { return IsCloudProviderNodeInfoSimilar(n1, n2, gceIgnoredLabels, ratioOpts) } } diff --git a/cluster-autoscaler/processors/nodegroupset/label_nodegroups.go b/cluster-autoscaler/processors/nodegroupset/label_nodegroups.go index df25e6d237c6..af3f5657e063 100644 --- a/cluster-autoscaler/processors/nodegroupset/label_nodegroups.go +++ b/cluster-autoscaler/processors/nodegroupset/label_nodegroups.go @@ -17,18 +17,18 @@ limitations under the License. package nodegroupset import ( + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" klog "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) // CreateLabelNodeInfoComparator returns a comparator that checks for node group similarity using the given labels. func CreateLabelNodeInfoComparator(labels []string) NodeInfoComparator { - return func(n1, n2 *schedulerframework.NodeInfo) bool { + return func(n1, n2 *framework.NodeInfo) bool { return areLabelsSame(n1, n2, labels) } } -func areLabelsSame(n1, n2 *schedulerframework.NodeInfo, labels []string) bool { +func areLabelsSame(n1, n2 *framework.NodeInfo, labels []string) bool { for _, label := range labels { val1, exists := n1.Node().ObjectMeta.Labels[label] if !exists { diff --git a/cluster-autoscaler/processors/nodegroupset/nodegroup_set_processor.go b/cluster-autoscaler/processors/nodegroupset/nodegroup_set_processor.go index 712b6ed7e20c..31c5d7629b94 100644 --- a/cluster-autoscaler/processors/nodegroupset/nodegroup_set_processor.go +++ b/cluster-autoscaler/processors/nodegroupset/nodegroup_set_processor.go @@ -22,8 +22,8 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/config" "k8s.io/autoscaler/cluster-autoscaler/context" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) // ScaleUpInfo contains information about planned scale-up of a single NodeGroup @@ -46,7 +46,7 @@ func (s ScaleUpInfo) String() string { // NodeGroupSetProcessor finds nodegroups that are similar and allows balancing scale-up between them. type NodeGroupSetProcessor interface { FindSimilarNodeGroups(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup, - nodeInfosForGroups map[string]*schedulerframework.NodeInfo) ([]cloudprovider.NodeGroup, errors.AutoscalerError) + nodeInfosForGroups map[string]*framework.NodeInfo) ([]cloudprovider.NodeGroup, errors.AutoscalerError) BalanceScaleUpBetweenGroups(context *context.AutoscalingContext, groups []cloudprovider.NodeGroup, newNodes int) ([]ScaleUpInfo, errors.AutoscalerError) CleanUp() @@ -58,7 +58,7 @@ type NoOpNodeGroupSetProcessor struct { // FindSimilarNodeGroups returns a list of NodeGroups similar to the one provided in parameter. func (n *NoOpNodeGroupSetProcessor) FindSimilarNodeGroups(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup, - nodeInfosForGroups map[string]*schedulerframework.NodeInfo) ([]cloudprovider.NodeGroup, errors.AutoscalerError) { + nodeInfosForGroups map[string]*framework.NodeInfo) ([]cloudprovider.NodeGroup, errors.AutoscalerError) { return []cloudprovider.NodeGroup{}, nil } diff --git a/cluster-autoscaler/processors/nodeinfosprovider/annotation_node_info_provider.go b/cluster-autoscaler/processors/nodeinfosprovider/annotation_node_info_provider.go index 8497ac5064e9..dd295cd53f77 100644 --- a/cluster-autoscaler/processors/nodeinfosprovider/annotation_node_info_provider.go +++ b/cluster-autoscaler/processors/nodeinfosprovider/annotation_node_info_provider.go @@ -22,9 +22,9 @@ import ( appsv1 "k8s.io/api/apps/v1" apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/context" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" "k8s.io/autoscaler/cluster-autoscaler/utils/taints" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) // AnnotationNodeInfoProvider is a wrapper for MixedTemplateNodeInfoProvider. @@ -47,7 +47,7 @@ func NewCustomAnnotationNodeInfoProvider(templateNodeInfoProvider TemplateNodeIn } // Process returns the nodeInfos set for this cluster. -func (p *AnnotationNodeInfoProvider) Process(ctx *context.AutoscalingContext, nodes []*apiv1.Node, daemonsets []*appsv1.DaemonSet, taintConfig taints.TaintConfig, currentTime time.Time) (map[string]*schedulerframework.NodeInfo, errors.AutoscalerError) { +func (p *AnnotationNodeInfoProvider) Process(ctx *context.AutoscalingContext, nodes []*apiv1.Node, daemonsets []*appsv1.DaemonSet, taintConfig taints.TaintConfig, currentTime time.Time) (map[string]*framework.NodeInfo, errors.AutoscalerError) { nodeInfos, err := p.templateNodeInfoProvider.Process(ctx, nodes, daemonsets, taintConfig, currentTime) if err != nil { return nil, err diff --git a/cluster-autoscaler/processors/nodeinfosprovider/asg_tag_resource_node_info_provider.go b/cluster-autoscaler/processors/nodeinfosprovider/asg_tag_resource_node_info_provider.go index 120b29ffc664..1a9d64e3dc8d 100644 --- a/cluster-autoscaler/processors/nodeinfosprovider/asg_tag_resource_node_info_provider.go +++ b/cluster-autoscaler/processors/nodeinfosprovider/asg_tag_resource_node_info_provider.go @@ -22,9 +22,9 @@ import ( appsv1 "k8s.io/api/apps/v1" apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/context" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" "k8s.io/autoscaler/cluster-autoscaler/utils/taints" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) // AsgTagResourceNodeInfoProvider is a wrapper for MixedTemplateNodeInfoProvider. @@ -40,7 +40,7 @@ func NewAsgTagResourceNodeInfoProvider(t *time.Duration, forceDaemonSets bool) * } // Process returns the nodeInfos set for this cluster. -func (p *AsgTagResourceNodeInfoProvider) Process(ctx *context.AutoscalingContext, nodes []*apiv1.Node, daemonsets []*appsv1.DaemonSet, taintConfig taints.TaintConfig, currentTime time.Time) (map[string]*schedulerframework.NodeInfo, errors.AutoscalerError) { +func (p *AsgTagResourceNodeInfoProvider) Process(ctx *context.AutoscalingContext, nodes []*apiv1.Node, daemonsets []*appsv1.DaemonSet, taintConfig taints.TaintConfig, currentTime time.Time) (map[string]*framework.NodeInfo, errors.AutoscalerError) { nodeInfos, err := p.mixedTemplateNodeInfoProvider.Process(ctx, nodes, daemonsets, taintConfig, currentTime) if err != nil { return nil, err diff --git a/cluster-autoscaler/processors/nodeinfosprovider/mixed_nodeinfos_processor.go b/cluster-autoscaler/processors/nodeinfosprovider/mixed_nodeinfos_processor.go index b1f310e47625..8b0ebd58571a 100644 --- a/cluster-autoscaler/processors/nodeinfosprovider/mixed_nodeinfos_processor.go +++ b/cluster-autoscaler/processors/nodeinfosprovider/mixed_nodeinfos_processor.go @@ -26,10 +26,10 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/context" "k8s.io/autoscaler/cluster-autoscaler/core/utils" "k8s.io/autoscaler/cluster-autoscaler/simulator" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" "k8s.io/autoscaler/cluster-autoscaler/utils/taints" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" klog "k8s.io/klog/v2" ) @@ -38,7 +38,7 @@ const stabilizationDelay = 1 * time.Minute const maxCacheExpireTime = 87660 * time.Hour type cacheItem struct { - *schedulerframework.NodeInfo + *framework.NodeInfo added time.Time } @@ -72,15 +72,15 @@ func (p *MixedTemplateNodeInfoProvider) CleanUp() { } // Process returns the nodeInfos set for this cluster -func (p *MixedTemplateNodeInfoProvider) Process(ctx *context.AutoscalingContext, nodes []*apiv1.Node, daemonsets []*appsv1.DaemonSet, taintConfig taints.TaintConfig, now time.Time) (map[string]*schedulerframework.NodeInfo, errors.AutoscalerError) { +func (p *MixedTemplateNodeInfoProvider) Process(ctx *context.AutoscalingContext, nodes []*apiv1.Node, daemonsets []*appsv1.DaemonSet, taintConfig taints.TaintConfig, now time.Time) (map[string]*framework.NodeInfo, errors.AutoscalerError) { // TODO(mwielgus): This returns map keyed by url, while most code (including scheduler) uses node.Name for a key. // TODO(mwielgus): Review error policy - sometimes we may continue with partial errors. - result := make(map[string]*schedulerframework.NodeInfo) + result := make(map[string]*framework.NodeInfo) seenGroups := make(map[string]bool) podsForNodes, err := getPodsForNodes(ctx.ListerRegistry) if err != nil { - return map[string]*schedulerframework.NodeInfo{}, err + return map[string]*framework.NodeInfo{}, err } // processNode returns information whether the nodeTemplate was generated and if there was an error. @@ -105,12 +105,10 @@ func (p *MixedTemplateNodeInfoProvider) Process(ctx *context.AutoscalingContext, } var pods []*apiv1.Pod - for _, podInfo := range nodeInfo.Pods { + for _, podInfo := range nodeInfo.Pods() { pods = append(pods, podInfo.Pod) } - - sanitizedNodeInfo := schedulerframework.NewNodeInfo(utils.SanitizePods(pods, sanitizedNode)...) - sanitizedNodeInfo.SetNode(sanitizedNode) + sanitizedNodeInfo := framework.NewNodeInfo(sanitizedNode, nil, utils.SanitizePods(nodeInfo.Pods(), sanitizedNode)...) result[id] = sanitizedNodeInfo return true, id, nil } @@ -124,7 +122,7 @@ func (p *MixedTemplateNodeInfoProvider) Process(ctx *context.AutoscalingContext, } added, id, typedErr := processNode(node) if typedErr != nil { - return map[string]*schedulerframework.NodeInfo{}, typedErr + return map[string]*framework.NodeInfo{}, typedErr } if added && p.nodeInfoCache != nil { nodeInfoCopy := utils.DeepCopyNodeInfo(result[id]) @@ -158,7 +156,7 @@ func (p *MixedTemplateNodeInfoProvider) Process(ctx *context.AutoscalingContext, continue } else { klog.Errorf("Unable to build proper template node for %s: %v", id, err) - return map[string]*schedulerframework.NodeInfo{}, errors.ToAutoscalerError(errors.CloudProviderError, err) + return map[string]*framework.NodeInfo{}, errors.ToAutoscalerError(errors.CloudProviderError, err) } } result[id] = nodeInfo @@ -179,11 +177,11 @@ func (p *MixedTemplateNodeInfoProvider) Process(ctx *context.AutoscalingContext, } added, _, typedErr := processNode(node) if typedErr != nil { - return map[string]*schedulerframework.NodeInfo{}, typedErr + return map[string]*framework.NodeInfo{}, typedErr } nodeGroup, err := ctx.CloudProvider.NodeGroupForNode(node) if err != nil { - return map[string]*schedulerframework.NodeInfo{}, errors.ToAutoscalerError( + return map[string]*framework.NodeInfo{}, errors.ToAutoscalerError( errors.CloudProviderError, err) } if added { diff --git a/cluster-autoscaler/processors/nodeinfosprovider/mixed_nodeinfos_processor_test.go b/cluster-autoscaler/processors/nodeinfosprovider/mixed_nodeinfos_processor_test.go index f59aaac7a33e..68e04752a8dc 100644 --- a/cluster-autoscaler/processors/nodeinfosprovider/mixed_nodeinfos_processor_test.go +++ b/cluster-autoscaler/processors/nodeinfosprovider/mixed_nodeinfos_processor_test.go @@ -22,6 +22,7 @@ import ( testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test" "k8s.io/autoscaler/cluster-autoscaler/context" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" "k8s.io/autoscaler/cluster-autoscaler/utils/taints" @@ -31,7 +32,6 @@ import ( "github.com/stretchr/testify/assert" appsv1 "k8s.io/api/apps/v1" apiv1 "k8s.io/api/core/v1" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) var ( @@ -54,13 +54,12 @@ func TestGetNodeInfosForGroups(t *testing.T) { SetNodeReadyState(justReady5, true, now) tn := BuildTestNode("tn", 5000, 5000) - tni := schedulerframework.NewNodeInfo() - tni.SetNode(tn) + tni := framework.NewTestNodeInfo(tn) // Cloud provider with TemplateNodeInfo implemented. provider1 := testprovider.NewTestAutoprovisioningCloudProvider( nil, nil, nil, nil, nil, - map[string]*schedulerframework.NodeInfo{"ng3": tni, "ng4": tni, "ng5": tni}) + map[string]*framework.NodeInfo{"ng3": tni, "ng4": tni, "ng5": tni}) provider1.AddNodeGroup("ng1", 1, 10, 1) // Nodegroup with ready node. provider1.AddNode("ng1", ready1) provider1.AddNodeGroup("ng2", 1, 10, 1) // Nodegroup with ready and unready node. @@ -137,8 +136,7 @@ func TestGetNodeInfosForGroupsCache(t *testing.T) { SetNodeReadyState(ready6, true, now.Add(-2*time.Minute)) tn := BuildTestNode("tn", 10000, 10000) - tni := schedulerframework.NewNodeInfo() - tni.SetNode(tn) + tni := framework.NewTestNodeInfo(tn) lastDeletedGroup := "" onDeleteGroup := func(id string) error { @@ -149,7 +147,7 @@ func TestGetNodeInfosForGroupsCache(t *testing.T) { // Cloud provider with TemplateNodeInfo implemented. provider1 := testprovider.NewTestAutoprovisioningCloudProvider( nil, nil, nil, onDeleteGroup, nil, - map[string]*schedulerframework.NodeInfo{"ng3": tni, "ng4": tni}) + map[string]*framework.NodeInfo{"ng3": tni, "ng4": tni}) provider1.AddNodeGroup("ng1", 1, 10, 1) // Nodegroup with ready node. provider1.AddNode("ng1", ready1) provider1.AddNodeGroup("ng2", 1, 10, 1) // Nodegroup with ready and unready node. @@ -229,8 +227,7 @@ func TestGetNodeInfosForGroupsCache(t *testing.T) { assert.False(t, found) // Fill cache manually - infoNg4Node6 := schedulerframework.NewNodeInfo() - infoNg4Node6.SetNode(ready6.DeepCopy()) + infoNg4Node6 := framework.NewTestNodeInfo(ready6.DeepCopy()) niProcessor.nodeInfoCache = map[string]cacheItem{"ng4": {NodeInfo: infoNg4Node6, added: now}} res, err = niProcessor.Process(&ctx, []*apiv1.Node{unready4, unready3, ready2, ready1}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now) // Check if cache was used @@ -264,8 +261,7 @@ func TestGetNodeInfosCacheExpired(t *testing.T) { }, } tn := BuildTestNode("tn", 5000, 5000) - tni := schedulerframework.NewNodeInfo() - tni.SetNode(tn) + tni := framework.NewTestNodeInfo(tn) // Cache expire time is set. niProcessor1 := NewMixedTemplateNodeInfoProvider(&cacheTtl, false) niProcessor1.nodeInfoCache = map[string]cacheItem{ diff --git a/cluster-autoscaler/processors/nodeinfosprovider/node_info_provider_processor.go b/cluster-autoscaler/processors/nodeinfosprovider/node_info_provider_processor.go index 437f7f9180ec..2e36a3c539f6 100644 --- a/cluster-autoscaler/processors/nodeinfosprovider/node_info_provider_processor.go +++ b/cluster-autoscaler/processors/nodeinfosprovider/node_info_provider_processor.go @@ -23,15 +23,15 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/context" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" "k8s.io/autoscaler/cluster-autoscaler/utils/taints" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) // TemplateNodeInfoProvider is provides the initial nodeInfos set. type TemplateNodeInfoProvider interface { // Process returns a map of nodeInfos for node groups. - Process(ctx *context.AutoscalingContext, nodes []*apiv1.Node, daemonsets []*appsv1.DaemonSet, taintConfig taints.TaintConfig, currentTime time.Time) (map[string]*schedulerframework.NodeInfo, errors.AutoscalerError) + Process(ctx *context.AutoscalingContext, nodes []*apiv1.Node, daemonsets []*appsv1.DaemonSet, taintConfig taints.TaintConfig, currentTime time.Time) (map[string]*framework.NodeInfo, errors.AutoscalerError) // CleanUp cleans up processor's internal structures. CleanUp() } diff --git a/cluster-autoscaler/processors/podinjection/pod_injection_processor.go b/cluster-autoscaler/processors/podinjection/pod_injection_processor.go index 74fb4b6d9a20..08645fa4d1d3 100644 --- a/cluster-autoscaler/processors/podinjection/pod_injection_processor.go +++ b/cluster-autoscaler/processors/podinjection/pod_injection_processor.go @@ -24,8 +24,8 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/autoscaler/cluster-autoscaler/context" podinjectionbackoff "k8s.io/autoscaler/cluster-autoscaler/processors/podinjection/backoff" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/scheduler/framework" ) const ( @@ -60,7 +60,7 @@ func (p *PodInjectionPodListProcessor) Process(ctx *context.AutoscalingContext, controllers := listControllers(ctx) controllers = p.skipBackedoffControllers(controllers) - nodeInfos, err := ctx.ClusterSnapshot.NodeInfos().List() + nodeInfos, err := ctx.ClusterSnapshot.ListNodeInfos() if err != nil { klog.Errorf("Failed to list nodeInfos from cluster snapshot: %v", err) return unschedulablePods, fmt.Errorf("failed to list nodeInfos from cluster snapshot: %v", err) @@ -125,7 +125,7 @@ func (p *podGroup) fakePodCount() int { func podsFromNodeInfos(nodeInfos []*framework.NodeInfo) []*apiv1.Pod { var pods []*apiv1.Pod for _, nodeInfo := range nodeInfos { - for _, podInfo := range nodeInfo.Pods { + for _, podInfo := range nodeInfo.Pods() { pods = append(pods, podInfo.Pod) } } diff --git a/cluster-autoscaler/processors/provreq/processor.go b/cluster-autoscaler/processors/provreq/processor.go index 810112e94eb8..56f52257547c 100644 --- a/cluster-autoscaler/processors/provreq/processor.go +++ b/cluster-autoscaler/processors/provreq/processor.go @@ -31,11 +31,11 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/provreqclient" "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/provreqwrapper" "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" "k8s.io/autoscaler/cluster-autoscaler/simulator/scheduling" "k8s.io/autoscaler/cluster-autoscaler/utils/klogx" "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/scheduler/framework" ) const ( diff --git a/cluster-autoscaler/processors/provreq/processor_test.go b/cluster-autoscaler/processors/provreq/processor_test.go index 68dd47c9cdc5..20eaa0e1ffba 100644 --- a/cluster-autoscaler/processors/provreq/processor_test.go +++ b/cluster-autoscaler/processors/provreq/processor_test.go @@ -24,7 +24,7 @@ import ( "github.com/stretchr/testify/assert" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/scheduler/framework" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics" v1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1" diff --git a/cluster-autoscaler/processors/scaledowncandidates/emptycandidates/empty_candidates_sorting.go b/cluster-autoscaler/processors/scaledowncandidates/emptycandidates/empty_candidates_sorting.go index 8ad745648c2a..6358c106473f 100644 --- a/cluster-autoscaler/processors/scaledowncandidates/emptycandidates/empty_candidates_sorting.go +++ b/cluster-autoscaler/processors/scaledowncandidates/emptycandidates/empty_candidates_sorting.go @@ -23,20 +23,20 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/simulator" "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/simulator/options" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) type nodeInfoGetter interface { - GetNodeInfo(nodeName string) (*schedulerframework.NodeInfo, error) + GetNodeInfo(nodeName string) (*framework.NodeInfo, error) } type nodeInfoGetterImpl struct { c clustersnapshot.ClusterSnapshot } -func (n *nodeInfoGetterImpl) GetNodeInfo(nodeName string) (*schedulerframework.NodeInfo, error) { - return n.c.NodeInfos().Get(nodeName) +func (n *nodeInfoGetterImpl) GetNodeInfo(nodeName string) (*framework.NodeInfo, error) { + return n.c.GetNodeInfo(nodeName) } // NewNodeInfoGetter limits ClusterSnapshot interface to NodeInfoGet() method. diff --git a/cluster-autoscaler/processors/scaledowncandidates/emptycandidates/empty_candidates_sorting_test.go b/cluster-autoscaler/processors/scaledowncandidates/emptycandidates/empty_candidates_sorting_test.go index 469ddcd81382..d02ae8400a47 100644 --- a/cluster-autoscaler/processors/scaledowncandidates/emptycandidates/empty_candidates_sorting_test.go +++ b/cluster-autoscaler/processors/scaledowncandidates/emptycandidates/empty_candidates_sorting_test.go @@ -21,18 +21,18 @@ import ( "testing" v1 "k8s.io/api/core/v1" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/simulator/options" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) var err = fmt.Errorf("error") type testNodeInfoGetter struct { - m map[string]*schedulerframework.NodeInfo + m map[string]*framework.NodeInfo } -func (t *testNodeInfoGetter) GetNodeInfo(nodeName string) (*schedulerframework.NodeInfo, error) { +func (t *testNodeInfoGetter) GetNodeInfo(nodeName string) (*framework.NodeInfo, error) { if nodeInfo, ok := t.m[nodeName]; ok { return nodeInfo, nil } @@ -40,27 +40,22 @@ func (t *testNodeInfoGetter) GetNodeInfo(nodeName string) (*schedulerframework.N } func TestScaleDownEarlierThan(t *testing.T) { - niEmpty := schedulerframework.NewNodeInfo() nodeEmptyName := "nodeEmpty" nodeEmpty := BuildTestNode(nodeEmptyName, 0, 100) - niEmpty.SetNode(nodeEmpty) + niEmpty := framework.NewTestNodeInfo(nodeEmpty) - niEmpty2 := schedulerframework.NewNodeInfo() nodeEmptyName2 := "nodeEmpty2" nodeEmpty2 := BuildTestNode(nodeEmptyName2, 0, 100) - niEmpty.SetNode(nodeEmpty2) + niEmpty2 := framework.NewTestNodeInfo(nodeEmpty2) - niNonEmpty := schedulerframework.NewNodeInfo() nodeNonEmptyName := "nodeNonEmpty" nodeNonEmpty := BuildTestNode(nodeNonEmptyName, 0, 100) - niNonEmpty.SetNode(nodeNonEmpty) pod := BuildTestPod("p1", 0, 100) - pi, _ := schedulerframework.NewPodInfo(pod) - niNonEmpty.AddPodInfo(pi) + niNonEmpty := framework.NewTestNodeInfo(nodeNonEmpty, pod) noNodeInfoNode := BuildTestNode("n1", 0, 100) - niGetter := testNodeInfoGetter{map[string]*schedulerframework.NodeInfo{nodeEmptyName: niEmpty, nodeNonEmptyName: niNonEmpty, nodeEmptyName2: niEmpty2}} + niGetter := testNodeInfoGetter{map[string]*framework.NodeInfo{nodeEmptyName: niEmpty, nodeNonEmptyName: niNonEmpty, nodeEmptyName2: niEmpty2}} deleteOptions := options.NodeDeleteOptions{ SkipNodesWithSystemPods: true, diff --git a/cluster-autoscaler/provisioningrequest/besteffortatomic/provisioning_class.go b/cluster-autoscaler/provisioningrequest/besteffortatomic/provisioning_class.go index 122312b622cc..4f7c1ad351f1 100644 --- a/cluster-autoscaler/provisioningrequest/besteffortatomic/provisioning_class.go +++ b/cluster-autoscaler/provisioningrequest/besteffortatomic/provisioning_class.go @@ -21,6 +21,7 @@ import ( apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/klog/v2" "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1" @@ -37,7 +38,6 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/utils/taints" ca_processors "k8s.io/autoscaler/cluster-autoscaler/processors" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) // Best effort atomic provisionig class requests scale-up only if it's possible @@ -76,7 +76,7 @@ func (o *bestEffortAtomicProvClass) Provision( unschedulablePods []*apiv1.Pod, nodes []*apiv1.Node, daemonSets []*appsv1.DaemonSet, - nodeInfos map[string]*schedulerframework.NodeInfo, + nodeInfos map[string]*framework.NodeInfo, ) (*status.ScaleUpStatus, errors.AutoscalerError) { if len(unschedulablePods) == 0 { return &status.ScaleUpStatus{Result: status.ScaleUpNotTried}, nil diff --git a/cluster-autoscaler/provisioningrequest/checkcapacity/provisioningclass.go b/cluster-autoscaler/provisioningrequest/checkcapacity/provisioningclass.go index 1a3b45cbbaf5..35cde63f4338 100644 --- a/cluster-autoscaler/provisioningrequest/checkcapacity/provisioningclass.go +++ b/cluster-autoscaler/provisioningrequest/checkcapacity/provisioningclass.go @@ -35,13 +35,13 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/provreqclient" "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/provreqwrapper" "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/simulator/scheduling" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" "k8s.io/autoscaler/cluster-autoscaler/utils/taints" "k8s.io/klog/v2" ca_processors "k8s.io/autoscaler/cluster-autoscaler/processors" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) type checkCapacityProvClass struct { @@ -84,7 +84,7 @@ func (o *checkCapacityProvClass) Provision( unschedulablePods []*apiv1.Pod, nodes []*apiv1.Node, daemonSets []*appsv1.DaemonSet, - nodeInfos map[string]*schedulerframework.NodeInfo, + nodeInfos map[string]*framework.NodeInfo, ) (*status.ScaleUpStatus, errors.AutoscalerError) { combinedStatus := NewCombinedStatusSet() provisioningRequestsProcessed := make(map[string]bool) diff --git a/cluster-autoscaler/provisioningrequest/orchestrator/orchestrator.go b/cluster-autoscaler/provisioningrequest/orchestrator/orchestrator.go index d59ad2fa9186..4008b5992246 100644 --- a/cluster-autoscaler/provisioningrequest/orchestrator/orchestrator.go +++ b/cluster-autoscaler/provisioningrequest/orchestrator/orchestrator.go @@ -26,18 +26,18 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/estimator" "k8s.io/autoscaler/cluster-autoscaler/processors/status" "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/provreqclient" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/simulator/scheduling" ca_errors "k8s.io/autoscaler/cluster-autoscaler/utils/errors" "k8s.io/autoscaler/cluster-autoscaler/utils/taints" ca_processors "k8s.io/autoscaler/cluster-autoscaler/processors" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) // ProvisioningClass is an interface for ProvisioningRequests. type ProvisioningClass interface { Provision([]*apiv1.Pod, []*apiv1.Node, []*appsv1.DaemonSet, - map[string]*schedulerframework.NodeInfo) (*status.ScaleUpStatus, ca_errors.AutoscalerError) + map[string]*framework.NodeInfo) (*status.ScaleUpStatus, ca_errors.AutoscalerError) Initialize(*context.AutoscalingContext, *ca_processors.AutoscalingProcessors, *clusterstate.ClusterStateRegistry, estimator.EstimatorBuilder, taints.TaintConfig, *scheduling.HintingSimulator) } @@ -82,7 +82,7 @@ func (o *provReqOrchestrator) ScaleUp( unschedulablePods []*apiv1.Pod, nodes []*apiv1.Node, daemonSets []*appsv1.DaemonSet, - nodeInfos map[string]*schedulerframework.NodeInfo, + nodeInfos map[string]*framework.NodeInfo, _ bool, // Provision() doesn't use this parameter. ) (*status.ScaleUpStatus, ca_errors.AutoscalerError) { if !o.initialized { @@ -105,7 +105,7 @@ func (o *provReqOrchestrator) ScaleUp( // ScaleUpToNodeGroupMinSize doesn't have implementation for ProvisioningRequest Orchestrator. func (o *provReqOrchestrator) ScaleUpToNodeGroupMinSize( nodes []*apiv1.Node, - nodeInfos map[string]*schedulerframework.NodeInfo, + nodeInfos map[string]*framework.NodeInfo, ) (*status.ScaleUpStatus, ca_errors.AutoscalerError) { return nil, nil } diff --git a/cluster-autoscaler/provisioningrequest/orchestrator/orchestrator_test.go b/cluster-autoscaler/provisioningrequest/orchestrator/orchestrator_test.go index 0b527625f589..ba6a8e684a17 100644 --- a/cluster-autoscaler/provisioningrequest/orchestrator/orchestrator_test.go +++ b/cluster-autoscaler/provisioningrequest/orchestrator/orchestrator_test.go @@ -43,13 +43,13 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/provreqclient" "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/provreqwrapper" "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" "k8s.io/autoscaler/cluster-autoscaler/utils/taints" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" "k8s.io/client-go/kubernetes/fake" clocktesting "k8s.io/utils/clock/testing" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics" ) @@ -451,7 +451,7 @@ func TestScaleUp(t *testing.T) { } } -func setupTest(t *testing.T, client *provreqclient.ProvisioningRequestClient, nodes []*apiv1.Node, onScaleUpFunc func(string, int) error, autoprovisioning bool, batchProcessing bool, maxBatchSize int, batchTimebox time.Duration) (*provReqOrchestrator, map[string]*schedulerframework.NodeInfo) { +func setupTest(t *testing.T, client *provreqclient.ProvisioningRequestClient, nodes []*apiv1.Node, onScaleUpFunc func(string, int) error, autoprovisioning bool, batchProcessing bool, maxBatchSize int, batchTimebox time.Duration) (*provReqOrchestrator, map[string]*framework.NodeInfo) { provider := testprovider.NewTestCloudProvider(onScaleUpFunc, nil) clock := clocktesting.NewFakePassiveClock(time.Now()) now := clock.Now() @@ -459,9 +459,8 @@ func setupTest(t *testing.T, client *provreqclient.ProvisioningRequestClient, no machineTypes := []string{"large-machine"} template := BuildTestNode("large-node-template", 100, 100) SetNodeReadyState(template, true, now) - nodeInfoTemplate := schedulerframework.NewNodeInfo() - nodeInfoTemplate.SetNode(template) - machineTemplates := map[string]*schedulerframework.NodeInfo{ + nodeInfoTemplate := framework.NewTestNodeInfo(template) + machineTemplates := map[string]*framework.NodeInfo{ "large-machine": nodeInfoTemplate, } onNodeGroupCreateFunc := func(name string) error { return nil } diff --git a/cluster-autoscaler/provisioningrequest/orchestrator/wrapper_orchestrator.go b/cluster-autoscaler/provisioningrequest/orchestrator/wrapper_orchestrator.go index 07a902b2e54b..7e8799a2e970 100644 --- a/cluster-autoscaler/provisioningrequest/orchestrator/wrapper_orchestrator.go +++ b/cluster-autoscaler/provisioningrequest/orchestrator/wrapper_orchestrator.go @@ -27,9 +27,9 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/estimator" ca_processors "k8s.io/autoscaler/cluster-autoscaler/processors" "k8s.io/autoscaler/cluster-autoscaler/processors/status" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" "k8s.io/autoscaler/cluster-autoscaler/utils/taints" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) // WrapperOrchestrator is an orchestrator which wraps Scale Up for ProvisioningRequests and regular pods. @@ -67,7 +67,7 @@ func (o *WrapperOrchestrator) ScaleUp( unschedulablePods []*apiv1.Pod, nodes []*apiv1.Node, daemonSets []*appsv1.DaemonSet, - nodeInfos map[string]*schedulerframework.NodeInfo, + nodeInfos map[string]*framework.NodeInfo, allOrNothing bool, ) (*status.ScaleUpStatus, errors.AutoscalerError) { defer func() { o.scaleUpRegularPods = !o.scaleUpRegularPods }() @@ -102,7 +102,7 @@ func splitOut(unschedulablePods []*apiv1.Pod) (provReqPods, regularPods []*apiv1 // appropriate status or error if an unexpected error occurred. func (o *WrapperOrchestrator) ScaleUpToNodeGroupMinSize( nodes []*apiv1.Node, - nodeInfos map[string]*schedulerframework.NodeInfo, + nodeInfos map[string]*framework.NodeInfo, ) (*status.ScaleUpStatus, errors.AutoscalerError) { return o.podsOrchestrator.ScaleUpToNodeGroupMinSize(nodes, nodeInfos) } diff --git a/cluster-autoscaler/provisioningrequest/orchestrator/wrapper_orchestrator_test.go b/cluster-autoscaler/provisioningrequest/orchestrator/wrapper_orchestrator_test.go index 64644ee8d3f3..02d32d295890 100644 --- a/cluster-autoscaler/provisioningrequest/orchestrator/wrapper_orchestrator_test.go +++ b/cluster-autoscaler/provisioningrequest/orchestrator/wrapper_orchestrator_test.go @@ -28,10 +28,10 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/estimator" ca_processors "k8s.io/autoscaler/cluster-autoscaler/processors" "k8s.io/autoscaler/cluster-autoscaler/processors/status" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" "k8s.io/autoscaler/cluster-autoscaler/utils/taints" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) const ( @@ -70,7 +70,7 @@ func (f *fakeScaleUp) ScaleUp( unschedulablePods []*apiv1.Pod, nodes []*apiv1.Node, daemonSets []*appsv1.DaemonSet, - nodeInfos map[string]*schedulerframework.NodeInfo, + nodeInfos map[string]*framework.NodeInfo, allOrNothing bool, ) (*status.ScaleUpStatus, errors.AutoscalerError) { return nil, errors.NewAutoscalerError(errors.InternalError, f.errorMsg) @@ -87,7 +87,7 @@ func (f *fakeScaleUp) Initialize( func (f *fakeScaleUp) ScaleUpToNodeGroupMinSize( nodes []*apiv1.Node, - nodeInfos map[string]*schedulerframework.NodeInfo, + nodeInfos map[string]*framework.NodeInfo, ) (*status.ScaleUpStatus, errors.AutoscalerError) { return nil, nil } diff --git a/cluster-autoscaler/simulator/cluster.go b/cluster-autoscaler/simulator/cluster.go index 34288e013039..6855ae5efb95 100644 --- a/cluster-autoscaler/simulator/cluster.go +++ b/cluster-autoscaler/simulator/cluster.go @@ -20,18 +20,17 @@ import ( "fmt" "time" + apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/core/scaledown/pdb" "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/simulator/options" "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" "k8s.io/autoscaler/cluster-autoscaler/simulator/scheduling" "k8s.io/autoscaler/cluster-autoscaler/utils/drain" kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" "k8s.io/autoscaler/cluster-autoscaler/utils/tpu" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" - - apiv1 "k8s.io/api/core/v1" klog "k8s.io/klog/v2" ) @@ -151,7 +150,7 @@ func (r *RemovalSimulator) SimulateNodeRemoval( timestamp time.Time, remainingPdbTracker pdb.RemainingPdbTracker, ) (*NodeToBeRemoved, *UnremovableNode) { - nodeInfo, err := r.clusterSnapshot.NodeInfos().Get(nodeName) + nodeInfo, err := r.clusterSnapshot.GetNodeInfo(nodeName) if err != nil { klog.Errorf("Can't retrieve node %s from snapshot, err: %v", nodeName, err) } @@ -185,7 +184,7 @@ func (r *RemovalSimulator) SimulateNodeRemoval( func (r *RemovalSimulator) FindEmptyNodesToRemove(candidates []string, timestamp time.Time) []string { result := make([]string, 0) for _, node := range candidates { - nodeInfo, err := r.clusterSnapshot.NodeInfos().Get(node) + nodeInfo, err := r.clusterSnapshot.GetNodeInfo(node) if err != nil { klog.Errorf("Can't retrieve node %s from snapshot, err: %v", node, err) continue @@ -216,7 +215,7 @@ func (r *RemovalSimulator) withForkedSnapshot(f func() error) (err error) { } func (r *RemovalSimulator) findPlaceFor(removedNode string, pods []*apiv1.Pod, nodes map[string]bool, timestamp time.Time) error { - isCandidateNode := func(nodeInfo *schedulerframework.NodeInfo) bool { + isCandidateNode := func(nodeInfo *framework.NodeInfo) bool { return nodeInfo.Node().Name != removedNode && nodes[nodeInfo.Node().Name] } diff --git a/cluster-autoscaler/simulator/cluster_test.go b/cluster-autoscaler/simulator/cluster_test.go index 6b9eaa4e96e8..e493a6672b02 100644 --- a/cluster-autoscaler/simulator/cluster_test.go +++ b/cluster-autoscaler/simulator/cluster_test.go @@ -22,6 +22,7 @@ import ( "time" "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/simulator/options" "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" "k8s.io/autoscaler/cluster-autoscaler/utils/drain" @@ -34,7 +35,6 @@ import ( apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/kubelet/types" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) func TestFindEmptyNodes(t *testing.T) { @@ -78,23 +78,18 @@ func TestFindNodesToRemove(t *testing.T) { schedulermetrics.Register() emptyNode := BuildTestNode("n1", 1000, 2000000) - emptyNodeInfo := schedulerframework.NewNodeInfo() - emptyNodeInfo.SetNode(emptyNode) // two small pods backed by ReplicaSet drainableNode := BuildTestNode("n2", 1000, 2000000) - drainableNodeInfo := schedulerframework.NewNodeInfo() - drainableNodeInfo.SetNode(drainableNode) + drainableNodeInfo := framework.NewTestNodeInfo(drainableNode) // one small pod, not backed by anything nonDrainableNode := BuildTestNode("n3", 1000, 2000000) - nonDrainableNodeInfo := schedulerframework.NewNodeInfo() - nonDrainableNodeInfo.SetNode(nonDrainableNode) + nonDrainableNodeInfo := framework.NewTestNodeInfo(nonDrainableNode) // one very large pod fullNode := BuildTestNode("n4", 1000, 2000000) - fullNodeInfo := schedulerframework.NewNodeInfo() - fullNodeInfo.SetNode(fullNode) + fullNodeInfo := framework.NewTestNodeInfo(fullNode) SetNodeReadyState(emptyNode, true, time.Time{}) SetNodeReadyState(drainableNode, true, time.Time{}) @@ -123,20 +118,20 @@ func TestFindNodesToRemove(t *testing.T) { pod1 := BuildTestPod("p1", 100, 100000) pod1.OwnerReferences = ownerRefs pod1.Spec.NodeName = "n2" - drainableNodeInfo.AddPod(pod1) + drainableNodeInfo.AddPod(&framework.PodInfo{Pod: pod1}) pod2 := BuildTestPod("p2", 100, 100000) pod2.OwnerReferences = ownerRefs pod2.Spec.NodeName = "n2" - drainableNodeInfo.AddPod(pod2) + drainableNodeInfo.AddPod(&framework.PodInfo{Pod: pod2}) pod3 := BuildTestPod("p3", 100, 100000) pod3.Spec.NodeName = "n3" - nonDrainableNodeInfo.AddPod(pod3) + nonDrainableNodeInfo.AddPod(&framework.PodInfo{Pod: pod3}) pod4 := BuildTestPod("p4", 1000, 100000) pod4.Spec.NodeName = "n4" - fullNodeInfo.AddPod(pod4) + fullNodeInfo.AddPod(&framework.PodInfo{Pod: pod4}) emptyNodeToRemove := NodeToBeRemoved{ Node: emptyNode, diff --git a/cluster-autoscaler/simulator/clustersnapshot/clustersnapshot_test.go b/cluster-autoscaler/simulator/clustersnapshot/clustersnapshot_test.go index b3c0b2b8a426..f9ce65162580 100644 --- a/cluster-autoscaler/simulator/clustersnapshot/clustersnapshot_test.go +++ b/cluster-autoscaler/simulator/clustersnapshot/clustersnapshot_test.go @@ -23,6 +23,7 @@ import ( "time" apiv1 "k8s.io/api/core/v1" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" @@ -42,7 +43,7 @@ func nodeNames(nodes []*apiv1.Node) []string { return names } -func extractNodes(nodeInfos []*schedulerframework.NodeInfo) []*apiv1.Node { +func extractNodes(nodeInfos []*framework.NodeInfo) []*apiv1.Node { nodes := []*apiv1.Node{} for _, ni := range nodeInfos { nodes = append(nodes, ni.Node()) @@ -61,11 +62,11 @@ func compareStates(t *testing.T, a, b snapshotState) { } func getSnapshotState(t *testing.T, snapshot ClusterSnapshot) snapshotState { - nodes, err := snapshot.NodeInfos().List() + nodes, err := snapshot.ListNodeInfos() assert.NoError(t, err) var pods []*apiv1.Pod for _, nodeInfo := range nodes { - for _, podInfo := range nodeInfo.Pods { + for _, podInfo := range nodeInfo.Pods() { pods = append(pods, podInfo.Pod) } } diff --git a/cluster-autoscaler/simulator/drain.go b/cluster-autoscaler/simulator/drain.go index 1e23a7aaf420..aa026023eb90 100644 --- a/cluster-autoscaler/simulator/drain.go +++ b/cluster-autoscaler/simulator/drain.go @@ -23,11 +23,11 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/core/scaledown/pdb" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/simulator/options" "k8s.io/autoscaler/cluster-autoscaler/utils/drain" kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" pod_util "k8s.io/autoscaler/cluster-autoscaler/utils/pod" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) // GetPodsToMove returns a list of pods that should be moved elsewhere and a @@ -38,7 +38,7 @@ import ( // with dangling created-by annotation). // If listers is not nil it checks whether RC, DS, Jobs and RS that created // these pods still exist. -func GetPodsToMove(nodeInfo *schedulerframework.NodeInfo, deleteOptions options.NodeDeleteOptions, drainabilityRules rules.Rules, listers kube_util.ListerRegistry, remainingPdbTracker pdb.RemainingPdbTracker, timestamp time.Time) (pods []*apiv1.Pod, daemonSetPods []*apiv1.Pod, blockingPod *drain.BlockingPod, err error) { +func GetPodsToMove(nodeInfo *framework.NodeInfo, deleteOptions options.NodeDeleteOptions, drainabilityRules rules.Rules, listers kube_util.ListerRegistry, remainingPdbTracker pdb.RemainingPdbTracker, timestamp time.Time) (pods []*apiv1.Pod, daemonSetPods []*apiv1.Pod, blockingPod *drain.BlockingPod, err error) { if drainabilityRules == nil { drainabilityRules = rules.Default(deleteOptions) } @@ -50,7 +50,7 @@ func GetPodsToMove(nodeInfo *schedulerframework.NodeInfo, deleteOptions options. Listers: listers, Timestamp: timestamp, } - for _, podInfo := range nodeInfo.Pods { + for _, podInfo := range nodeInfo.Pods() { pod := podInfo.Pod status := drainabilityRules.Drainable(drainCtx, pod, nodeInfo) switch status.Outcome { diff --git a/cluster-autoscaler/simulator/drain_test.go b/cluster-autoscaler/simulator/drain_test.go index 74fef8d04fcc..bb7de7307643 100644 --- a/cluster-autoscaler/simulator/drain_test.go +++ b/cluster-autoscaler/simulator/drain_test.go @@ -30,13 +30,13 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/core/scaledown/pdb" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/simulator/options" "k8s.io/autoscaler/cluster-autoscaler/utils/drain" "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" "k8s.io/kubernetes/pkg/kubelet/types" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" "github.com/stretchr/testify/assert" ) @@ -775,7 +775,8 @@ func TestGetPodsToMove(t *testing.T) { rules := append(tc.rules, rules.Default(deleteOptions)...) tracker := pdb.NewBasicRemainingPdbTracker() tracker.SetPdbs(tc.pdbs) - p, d, b, err := GetPodsToMove(schedulerframework.NewNodeInfo(tc.pods...), deleteOptions, rules, registry, tracker, testTime) + ni := framework.NewTestNodeInfo(nil, tc.pods...) + p, d, b, err := GetPodsToMove(ni, deleteOptions, rules, registry, tracker, testTime) if tc.wantErr { assert.Error(t, err) } else { @@ -794,7 +795,7 @@ func (a alwaysDrain) Name() string { return "AlwaysDrain" } -func (a alwaysDrain) Drainable(*drainability.DrainContext, *apiv1.Pod, *schedulerframework.NodeInfo) drainability.Status { +func (a alwaysDrain) Drainable(*drainability.DrainContext, *apiv1.Pod, *framework.NodeInfo) drainability.Status { return drainability.NewDrainableStatus() } @@ -804,7 +805,7 @@ func (n neverDrain) Name() string { return "NeverDrain" } -func (n neverDrain) Drainable(*drainability.DrainContext, *apiv1.Pod, *schedulerframework.NodeInfo) drainability.Status { +func (n neverDrain) Drainable(*drainability.DrainContext, *apiv1.Pod, *framework.NodeInfo) drainability.Status { return drainability.NewBlockedStatus(drain.UnexpectedError, fmt.Errorf("nope")) } @@ -814,6 +815,6 @@ func (c cantDecide) Name() string { return "CantDecide" } -func (c cantDecide) Drainable(*drainability.DrainContext, *apiv1.Pod, *schedulerframework.NodeInfo) drainability.Status { +func (c cantDecide) Drainable(*drainability.DrainContext, *apiv1.Pod, *framework.NodeInfo) drainability.Status { return drainability.NewUndefinedStatus() } diff --git a/cluster-autoscaler/simulator/drainability/rules/daemonset/rule.go b/cluster-autoscaler/simulator/drainability/rules/daemonset/rule.go index e41cecdc359a..81fe7911d4b3 100644 --- a/cluster-autoscaler/simulator/drainability/rules/daemonset/rule.go +++ b/cluster-autoscaler/simulator/drainability/rules/daemonset/rule.go @@ -19,8 +19,8 @@ package daemonset import ( apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" pod_util "k8s.io/autoscaler/cluster-autoscaler/utils/pod" - "k8s.io/kubernetes/pkg/scheduler/framework" ) // Rule is a drainability rule on how to handle daemon set pods. diff --git a/cluster-autoscaler/simulator/drainability/rules/localstorage/rule.go b/cluster-autoscaler/simulator/drainability/rules/localstorage/rule.go index 4d365caa1995..3fe89da73421 100644 --- a/cluster-autoscaler/simulator/drainability/rules/localstorage/rule.go +++ b/cluster-autoscaler/simulator/drainability/rules/localstorage/rule.go @@ -21,8 +21,8 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/drain" - "k8s.io/kubernetes/pkg/scheduler/framework" ) // Rule is a drainability rule on how to handle local storage pods. diff --git a/cluster-autoscaler/simulator/drainability/rules/longterminating/rule.go b/cluster-autoscaler/simulator/drainability/rules/longterminating/rule.go index c543fbacff2a..8bb3f1c51542 100644 --- a/cluster-autoscaler/simulator/drainability/rules/longterminating/rule.go +++ b/cluster-autoscaler/simulator/drainability/rules/longterminating/rule.go @@ -19,8 +19,8 @@ package longterminating import ( apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/drain" - "k8s.io/kubernetes/pkg/scheduler/framework" ) // Rule is a drainability rule on how to handle long terminating pods. diff --git a/cluster-autoscaler/simulator/drainability/rules/mirror/rule.go b/cluster-autoscaler/simulator/drainability/rules/mirror/rule.go index 18ce48186884..39fe1a8f9c83 100644 --- a/cluster-autoscaler/simulator/drainability/rules/mirror/rule.go +++ b/cluster-autoscaler/simulator/drainability/rules/mirror/rule.go @@ -19,8 +19,8 @@ package mirror import ( apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" pod_util "k8s.io/autoscaler/cluster-autoscaler/utils/pod" - "k8s.io/kubernetes/pkg/scheduler/framework" ) // Rule is a drainability rule on how to handle mirror pods. diff --git a/cluster-autoscaler/simulator/drainability/rules/notsafetoevict/rule.go b/cluster-autoscaler/simulator/drainability/rules/notsafetoevict/rule.go index 76eb5ba14de5..5b4947fdc17b 100644 --- a/cluster-autoscaler/simulator/drainability/rules/notsafetoevict/rule.go +++ b/cluster-autoscaler/simulator/drainability/rules/notsafetoevict/rule.go @@ -21,8 +21,8 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/drain" - "k8s.io/kubernetes/pkg/scheduler/framework" ) // Rule is a drainability rule on how to handle not safe to evict pods. diff --git a/cluster-autoscaler/simulator/drainability/rules/pdb/rule.go b/cluster-autoscaler/simulator/drainability/rules/pdb/rule.go index 9b903112998c..63d92b20970c 100644 --- a/cluster-autoscaler/simulator/drainability/rules/pdb/rule.go +++ b/cluster-autoscaler/simulator/drainability/rules/pdb/rule.go @@ -21,8 +21,8 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/drain" - "k8s.io/kubernetes/pkg/scheduler/framework" ) // Rule is a drainability rule on how to handle pods with pdbs. diff --git a/cluster-autoscaler/simulator/drainability/rules/replicacount/rule.go b/cluster-autoscaler/simulator/drainability/rules/replicacount/rule.go index c256c53b56a7..69f4040785f7 100644 --- a/cluster-autoscaler/simulator/drainability/rules/replicacount/rule.go +++ b/cluster-autoscaler/simulator/drainability/rules/replicacount/rule.go @@ -23,9 +23,9 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/drain" pod_util "k8s.io/autoscaler/cluster-autoscaler/utils/pod" - "k8s.io/kubernetes/pkg/scheduler/framework" ) // Rule is a drainability rule on how to handle replicated pods. diff --git a/cluster-autoscaler/simulator/drainability/rules/replicated/rule.go b/cluster-autoscaler/simulator/drainability/rules/replicated/rule.go index 53bd19e1fc1a..4fed15ca5d51 100644 --- a/cluster-autoscaler/simulator/drainability/rules/replicated/rule.go +++ b/cluster-autoscaler/simulator/drainability/rules/replicated/rule.go @@ -21,8 +21,8 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/drain" - "k8s.io/kubernetes/pkg/scheduler/framework" ) // Rule is a drainability rule on how to handle replicated pods. diff --git a/cluster-autoscaler/simulator/drainability/rules/rules.go b/cluster-autoscaler/simulator/drainability/rules/rules.go index c778f7db8b09..be8d00cb415b 100644 --- a/cluster-autoscaler/simulator/drainability/rules/rules.go +++ b/cluster-autoscaler/simulator/drainability/rules/rules.go @@ -31,9 +31,9 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules/safetoevict" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules/system" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules/terminal" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/simulator/options" "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/scheduler/framework" ) // Rule determines whether a given pod can be drained or not. diff --git a/cluster-autoscaler/simulator/drainability/rules/rules_test.go b/cluster-autoscaler/simulator/drainability/rules/rules_test.go index f7f2bc076fce..5418e55cc1e9 100644 --- a/cluster-autoscaler/simulator/drainability/rules/rules_test.go +++ b/cluster-autoscaler/simulator/drainability/rules/rules_test.go @@ -22,8 +22,8 @@ import ( "github.com/google/go-cmp/cmp" apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/drain" - "k8s.io/kubernetes/pkg/scheduler/framework" ) func TestDrainable(t *testing.T) { diff --git a/cluster-autoscaler/simulator/drainability/rules/safetoevict/rule.go b/cluster-autoscaler/simulator/drainability/rules/safetoevict/rule.go index 4473913cb1eb..902b5883632f 100644 --- a/cluster-autoscaler/simulator/drainability/rules/safetoevict/rule.go +++ b/cluster-autoscaler/simulator/drainability/rules/safetoevict/rule.go @@ -19,8 +19,8 @@ package safetoevict import ( apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/drain" - "k8s.io/kubernetes/pkg/scheduler/framework" ) // Rule is a drainability rule on how to handle safe to evict pods. diff --git a/cluster-autoscaler/simulator/drainability/rules/system/rule.go b/cluster-autoscaler/simulator/drainability/rules/system/rule.go index 24fea5d5f612..fae89fb128a2 100644 --- a/cluster-autoscaler/simulator/drainability/rules/system/rule.go +++ b/cluster-autoscaler/simulator/drainability/rules/system/rule.go @@ -21,8 +21,8 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/drain" - "k8s.io/kubernetes/pkg/scheduler/framework" ) // Rule is a drainability rule on how to handle system pods. diff --git a/cluster-autoscaler/simulator/drainability/rules/terminal/rule.go b/cluster-autoscaler/simulator/drainability/rules/terminal/rule.go index 5609f9eb563b..0b01c49950ba 100644 --- a/cluster-autoscaler/simulator/drainability/rules/terminal/rule.go +++ b/cluster-autoscaler/simulator/drainability/rules/terminal/rule.go @@ -19,8 +19,8 @@ package terminal import ( apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/drain" - "k8s.io/kubernetes/pkg/scheduler/framework" ) // Rule is a drainability rule on how to handle terminal pods. diff --git a/cluster-autoscaler/simulator/nodes.go b/cluster-autoscaler/simulator/nodes.go index cb4841fbbef3..c80fe0cbe092 100644 --- a/cluster-autoscaler/simulator/nodes.go +++ b/cluster-autoscaler/simulator/nodes.go @@ -21,21 +21,20 @@ import ( apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/daemonset" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" pod_util "k8s.io/autoscaler/cluster-autoscaler/utils/pod" ) // BuildNodeInfoForNode build a NodeInfo structure for the given node as if the node was just created. -func BuildNodeInfoForNode(node *apiv1.Node, scheduledPods []*apiv1.Pod, daemonsets []*appsv1.DaemonSet, forceDaemonSets bool) (*schedulerframework.NodeInfo, errors.AutoscalerError) { - nodeInfo := schedulerframework.NewNodeInfo() - nodeInfo.SetNode(node) +func BuildNodeInfoForNode(node *apiv1.Node, scheduledPods []*apiv1.Pod, daemonsets []*appsv1.DaemonSet, forceDaemonSets bool) (*framework.NodeInfo, errors.AutoscalerError) { + nodeInfo := framework.NewNodeInfo(node, nil) return addExpectedPods(nodeInfo, scheduledPods, daemonsets, forceDaemonSets) } -func addExpectedPods(nodeInfo *schedulerframework.NodeInfo, scheduledPods []*apiv1.Pod, daemonsets []*appsv1.DaemonSet, forceDaemonSets bool) (*schedulerframework.NodeInfo, errors.AutoscalerError) { +func addExpectedPods(nodeInfo *framework.NodeInfo, scheduledPods []*apiv1.Pod, daemonsets []*appsv1.DaemonSet, forceDaemonSets bool) (*framework.NodeInfo, errors.AutoscalerError) { runningDS := make(map[types.UID]bool) for _, pod := range scheduledPods { // Ignore scheduled pods in deletion phase @@ -44,7 +43,7 @@ func addExpectedPods(nodeInfo *schedulerframework.NodeInfo, scheduledPods []*api } // Add scheduled mirror and DS pods if pod_util.IsMirrorPod(pod) || pod_util.IsDaemonSetPod(pod) { - nodeInfo.AddPod(pod) + nodeInfo.AddPod(&framework.PodInfo{Pod: pod}) } // Mark DS pods as running controllerRef := metav1.GetControllerOf(pod) diff --git a/cluster-autoscaler/simulator/nodes_test.go b/cluster-autoscaler/simulator/nodes_test.go index 4a2be55b13df..b931979de6cb 100644 --- a/cluster-autoscaler/simulator/nodes_test.go +++ b/cluster-autoscaler/simulator/nodes_test.go @@ -207,7 +207,7 @@ func TestBuildNodeInfoForNode(t *testing.T) { for _, pod := range tc.wantPods { wantPods = append(wantPods, cleanPodMetadata(pod)) } - for _, podInfo := range nodeInfo.Pods { + for _, podInfo := range nodeInfo.Pods() { pods = append(pods, cleanPodMetadata(podInfo.Pod)) } assert.ElementsMatch(t, tc.wantPods, pods) diff --git a/cluster-autoscaler/simulator/predicatechecker/interface.go b/cluster-autoscaler/simulator/predicatechecker/interface.go index 2537df5b57b5..2d35b779172c 100644 --- a/cluster-autoscaler/simulator/predicatechecker/interface.go +++ b/cluster-autoscaler/simulator/predicatechecker/interface.go @@ -18,14 +18,14 @@ package predicatechecker import ( "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" apiv1 "k8s.io/api/core/v1" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) // PredicateChecker checks whether all required predicates pass for given Pod and Node. type PredicateChecker interface { FitsAnyNode(clusterSnapshot clustersnapshot.ClusterSnapshot, pod *apiv1.Pod) (string, error) - FitsAnyNodeMatching(clusterSnapshot clustersnapshot.ClusterSnapshot, pod *apiv1.Pod, nodeMatches func(*schedulerframework.NodeInfo) bool) (string, error) + FitsAnyNodeMatching(clusterSnapshot clustersnapshot.ClusterSnapshot, pod *apiv1.Pod, nodeMatches func(*framework.NodeInfo) bool) (string, error) CheckPredicates(clusterSnapshot clustersnapshot.ClusterSnapshot, pod *apiv1.Pod, nodeName string) *PredicateError } diff --git a/cluster-autoscaler/simulator/predicatechecker/schedulerbased.go b/cluster-autoscaler/simulator/predicatechecker/schedulerbased.go index d276e010fa26..4e37e97528a2 100644 --- a/cluster-autoscaler/simulator/predicatechecker/schedulerbased.go +++ b/cluster-autoscaler/simulator/predicatechecker/schedulerbased.go @@ -21,6 +21,7 @@ import ( "fmt" "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" apiv1 "k8s.io/api/core/v1" "k8s.io/client-go/informers" @@ -80,18 +81,18 @@ func NewSchedulerBasedPredicateChecker(informerFactory informers.SharedInformerF // FitsAnyNode checks if the given pod can be placed on any of the given nodes. func (p *SchedulerBasedPredicateChecker) FitsAnyNode(clusterSnapshot clustersnapshot.ClusterSnapshot, pod *apiv1.Pod) (string, error) { - return p.FitsAnyNodeMatching(clusterSnapshot, pod, func(*schedulerframework.NodeInfo) bool { + return p.FitsAnyNodeMatching(clusterSnapshot, pod, func(*framework.NodeInfo) bool { return true }) } // FitsAnyNodeMatching checks if the given pod can be placed on any of the given nodes matching the provided function. -func (p *SchedulerBasedPredicateChecker) FitsAnyNodeMatching(clusterSnapshot clustersnapshot.ClusterSnapshot, pod *apiv1.Pod, nodeMatches func(*schedulerframework.NodeInfo) bool) (string, error) { +func (p *SchedulerBasedPredicateChecker) FitsAnyNodeMatching(clusterSnapshot clustersnapshot.ClusterSnapshot, pod *apiv1.Pod, nodeMatches func(*framework.NodeInfo) bool) (string, error) { if clusterSnapshot == nil { return "", fmt.Errorf("ClusterSnapshot not provided") } - nodeInfosList, err := clusterSnapshot.NodeInfos().List() + nodeInfosList, err := clusterSnapshot.ListNodeInfos() if err != nil { // This should never happen. // @@ -125,7 +126,7 @@ func (p *SchedulerBasedPredicateChecker) FitsAnyNodeMatching(clusterSnapshot clu continue } - filterStatus := p.framework.RunFilterPlugins(context.TODO(), state, pod, nodeInfo) + filterStatus := p.framework.RunFilterPlugins(context.TODO(), state, pod, nodeInfo.ToScheduler()) if filterStatus.IsSuccess() { p.lastIndex = (p.lastIndex + i + 1) % len(nodeInfosList) return nodeInfo.Node().Name, nil @@ -139,7 +140,7 @@ func (p *SchedulerBasedPredicateChecker) CheckPredicates(clusterSnapshot cluster if clusterSnapshot == nil { return NewPredicateError(InternalPredicateError, "", "ClusterSnapshot not provided", nil, emptyString) } - nodeInfo, err := clusterSnapshot.NodeInfos().Get(nodeName) + nodeInfo, err := clusterSnapshot.GetNodeInfo(nodeName) if err != nil { errorMessage := fmt.Sprintf("Error obtaining NodeInfo for name %s; %v", nodeName, err) return NewPredicateError(InternalPredicateError, "", errorMessage, nil, emptyString) @@ -159,7 +160,7 @@ func (p *SchedulerBasedPredicateChecker) CheckPredicates(clusterSnapshot cluster emptyString) } - filterStatus := p.framework.RunFilterPlugins(context.TODO(), state, pod, nodeInfo) + filterStatus := p.framework.RunFilterPlugins(context.TODO(), state, pod, nodeInfo.ToScheduler()) if !filterStatus.IsSuccess() { filterName := filterStatus.Plugin() @@ -184,7 +185,7 @@ func (p *SchedulerBasedPredicateChecker) CheckPredicates(clusterSnapshot cluster return nil } -func (p *SchedulerBasedPredicateChecker) buildDebugInfo(filterName string, nodeInfo *schedulerframework.NodeInfo) func() string { +func (p *SchedulerBasedPredicateChecker) buildDebugInfo(filterName string, nodeInfo *framework.NodeInfo) func() string { switch filterName { case "TaintToleration": taints := nodeInfo.Node().Spec.Taints diff --git a/cluster-autoscaler/simulator/scheduling/hinting_simulator.go b/cluster-autoscaler/simulator/scheduling/hinting_simulator.go index 51e300d41b40..2287d28810e4 100644 --- a/cluster-autoscaler/simulator/scheduling/hinting_simulator.go +++ b/cluster-autoscaler/simulator/scheduling/hinting_simulator.go @@ -20,11 +20,11 @@ import ( "fmt" "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" "k8s.io/autoscaler/cluster-autoscaler/utils/klogx" apiv1 "k8s.io/api/core/v1" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) // Status contains information about pods scheduled by the HintingSimulator @@ -55,7 +55,7 @@ func NewHintingSimulator(predicateChecker predicatechecker.PredicateChecker) *Hi // after the first scheduling attempt that fails. This is useful if all provided // pods need to be scheduled. // Note: this function does not fork clusterSnapshot: this has to be done by the caller. -func (s *HintingSimulator) TrySchedulePods(clusterSnapshot clustersnapshot.ClusterSnapshot, pods []*apiv1.Pod, isNodeAcceptable func(*schedulerframework.NodeInfo) bool, breakOnFailure bool) ([]Status, int, error) { +func (s *HintingSimulator) TrySchedulePods(clusterSnapshot clustersnapshot.ClusterSnapshot, pods []*apiv1.Pod, isNodeAcceptable func(*framework.NodeInfo) bool, breakOnFailure bool) ([]Status, int, error) { similarPods := NewSimilarPodsScheduling() var statuses []Status @@ -85,13 +85,13 @@ func (s *HintingSimulator) TrySchedulePods(clusterSnapshot clustersnapshot.Clust return statuses, similarPods.OverflowingControllerCount(), nil } -func (s *HintingSimulator) findNodeWithHints(clusterSnapshot clustersnapshot.ClusterSnapshot, pod *apiv1.Pod, isNodeAcceptable func(*schedulerframework.NodeInfo) bool) (string, error) { +func (s *HintingSimulator) findNodeWithHints(clusterSnapshot clustersnapshot.ClusterSnapshot, pod *apiv1.Pod, isNodeAcceptable func(*framework.NodeInfo) bool) (string, error) { hk := HintKeyFromPod(pod) if hintedNode, hasHint := s.hints.Get(hk); hasHint { if err := s.predicateChecker.CheckPredicates(clusterSnapshot, pod, hintedNode); err == nil { s.hints.Set(hk, hintedNode) - nodeInfo, err := clusterSnapshot.NodeInfos().Get(hintedNode) + nodeInfo, err := clusterSnapshot.GetNodeInfo(hintedNode) if err != nil { return "", err } @@ -104,7 +104,7 @@ func (s *HintingSimulator) findNodeWithHints(clusterSnapshot clustersnapshot.Clu return "", nil } -func (s *HintingSimulator) findNode(similarPods *SimilarPodsScheduling, clusterSnapshot clustersnapshot.ClusterSnapshot, pod *apiv1.Pod, loggingQuota *klogx.Quota, isNodeAcceptable func(*schedulerframework.NodeInfo) bool) string { +func (s *HintingSimulator) findNode(similarPods *SimilarPodsScheduling, clusterSnapshot clustersnapshot.ClusterSnapshot, pod *apiv1.Pod, loggingQuota *klogx.Quota, isNodeAcceptable func(*framework.NodeInfo) bool) string { if similarPods.IsSimilarUnschedulable(pod) { klogx.V(4).UpTo(loggingQuota).Infof("failed to find place for %s/%s based on similar pods scheduling", pod.Namespace, pod.Name) return "" @@ -127,6 +127,6 @@ func (s *HintingSimulator) DropOldHints() { } // ScheduleAnywhere can be passed to TrySchedulePods when there are no extra restrictions on nodes to consider. -func ScheduleAnywhere(_ *schedulerframework.NodeInfo) bool { +func ScheduleAnywhere(_ *framework.NodeInfo) bool { return true } diff --git a/cluster-autoscaler/simulator/scheduling/hinting_simulator_test.go b/cluster-autoscaler/simulator/scheduling/hinting_simulator_test.go index f0528661a3f3..7e3ec8cb3d11 100644 --- a/cluster-autoscaler/simulator/scheduling/hinting_simulator_test.go +++ b/cluster-autoscaler/simulator/scheduling/hinting_simulator_test.go @@ -21,9 +21,9 @@ import ( "time" "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics" "github.com/stretchr/testify/assert" @@ -38,7 +38,7 @@ func TestTrySchedulePods(t *testing.T) { nodes []*apiv1.Node pods []*apiv1.Pod newPods []*apiv1.Pod - acceptableNodes func(*schedulerframework.NodeInfo) bool + acceptableNodes func(*framework.NodeInfo) bool wantStatuses []Status wantErr bool }{ @@ -257,20 +257,20 @@ func buildScheduledPod(name string, cpu, mem int64, nodeName string) *apiv1.Pod func countPods(t *testing.T, clusterSnapshot clustersnapshot.ClusterSnapshot) int { t.Helper() count := 0 - nis, err := clusterSnapshot.NodeInfos().List() + nis, err := clusterSnapshot.ListNodeInfos() assert.NoError(t, err) for _, ni := range nis { - count += len(ni.Pods) + count += len(ni.Pods()) } return count } func nodeNameForPod(t *testing.T, clusterSnapshot clustersnapshot.ClusterSnapshot, pod string) string { t.Helper() - nis, err := clusterSnapshot.NodeInfos().List() + nis, err := clusterSnapshot.ListNodeInfos() assert.NoError(t, err) for _, ni := range nis { - for _, pi := range ni.Pods { + for _, pi := range ni.Pods() { if pi.Pod.Name == pod { return ni.Node().Name } @@ -279,8 +279,8 @@ func nodeNameForPod(t *testing.T, clusterSnapshot clustersnapshot.ClusterSnapsho return "" } -func singleNodeOk(nodeName string) func(*schedulerframework.NodeInfo) bool { - return func(nodeInfo *schedulerframework.NodeInfo) bool { +func singleNodeOk(nodeName string) func(*framework.NodeInfo) bool { + return func(nodeInfo *framework.NodeInfo) bool { return nodeName == nodeInfo.Node().Name } } diff --git a/cluster-autoscaler/simulator/utilization/info.go b/cluster-autoscaler/simulator/utilization/info.go index 54365bf84715..ec9ab6578d07 100644 --- a/cluster-autoscaler/simulator/utilization/info.go +++ b/cluster-autoscaler/simulator/utilization/info.go @@ -21,13 +21,13 @@ import ( "time" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/drain" pod_util "k8s.io/autoscaler/cluster-autoscaler/utils/pod" apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" resourcehelper "k8s.io/kubernetes/pkg/api/v1/resource" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" klog "k8s.io/klog/v2" ) @@ -47,7 +47,7 @@ type Info struct { // memory) or gpu utilization based on if the node has GPU or not. Per resource // utilization is the sum of requests for it divided by allocatable. It also // returns the individual cpu, memory and gpu utilization. -func Calculate(nodeInfo *schedulerframework.NodeInfo, skipDaemonSetPods, skipMirrorPods bool, gpuConfig *cloudprovider.GpuConfig, currentTime time.Time) (utilInfo Info, err error) { +func Calculate(nodeInfo *framework.NodeInfo, skipDaemonSetPods, skipMirrorPods bool, gpuConfig *cloudprovider.GpuConfig, currentTime time.Time) (utilInfo Info, err error) { if gpuConfig != nil { gpuUtil, err := CalculateUtilizationOfResource(nodeInfo, gpuConfig.ResourceName, skipDaemonSetPods, skipMirrorPods, currentTime) if err != nil { @@ -82,7 +82,7 @@ func Calculate(nodeInfo *schedulerframework.NodeInfo, skipDaemonSetPods, skipMir } // CalculateUtilizationOfResource calculates utilization of a given resource for a node. -func CalculateUtilizationOfResource(nodeInfo *schedulerframework.NodeInfo, resourceName apiv1.ResourceName, skipDaemonSetPods, skipMirrorPods bool, currentTime time.Time) (float64, error) { +func CalculateUtilizationOfResource(nodeInfo *framework.NodeInfo, resourceName apiv1.ResourceName, skipDaemonSetPods, skipMirrorPods bool, currentTime time.Time) (float64, error) { nodeAllocatable, found := nodeInfo.Node().Status.Allocatable[resourceName] if !found { return 0, fmt.Errorf("failed to get %v from %s", resourceName, nodeInfo.Node().Name) @@ -98,7 +98,7 @@ func CalculateUtilizationOfResource(nodeInfo *schedulerframework.NodeInfo, resou // the same with the Mirror pod. podsRequest := resource.MustParse("0") daemonSetAndMirrorPodsUtilization := resource.MustParse("0") - for _, podInfo := range nodeInfo.Pods { + for _, podInfo := range nodeInfo.Pods() { requestedResourceList := resourcehelper.PodRequests(podInfo.Pod, opts) resourceValue := requestedResourceList[resourceName] diff --git a/cluster-autoscaler/simulator/utilization/info_test.go b/cluster-autoscaler/simulator/utilization/info_test.go index fab0e59b434d..be0228da8770 100644 --- a/cluster-autoscaler/simulator/utilization/info_test.go +++ b/cluster-autoscaler/simulator/utilization/info_test.go @@ -24,10 +24,10 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" "k8s.io/kubernetes/pkg/kubelet/types" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" "github.com/stretchr/testify/assert" ) @@ -76,7 +76,7 @@ func TestCalculate(t *testing.T) { } node := BuildTestNode("node1", 2000, 2000000) SetNodeReadyState(node, true, time.Time{}) - nodeInfo := newNodeInfo(node, pod, pod, pod2) + nodeInfo := framework.NewTestNodeInfo(node, pod, pod, pod2) gpuConfig := getGpuConfigFromNode(nodeInfo.Node()) utilInfo, err := Calculate(nodeInfo, false, false, gpuConfig, testTime) @@ -85,7 +85,7 @@ func TestCalculate(t *testing.T) { assert.Equal(t, 0.1, utilInfo.CpuUtil) node2 := BuildTestNode("node2", 2000, -1) - nodeInfo = newNodeInfo(node2, pod, pod, pod2) + nodeInfo = framework.NewTestNodeInfo(node2, pod, pod, pod2) gpuConfig = getGpuConfigFromNode(nodeInfo.Node()) _, err = Calculate(nodeInfo, false, false, gpuConfig, testTime) @@ -93,7 +93,7 @@ func TestCalculate(t *testing.T) { node3 := BuildTestNode("node3", 2000, 2000000) SetNodeReadyState(node3, true, time.Time{}) - nodeInfo = newNodeInfo(node3, pod, podWithInitContainers, podWithLargeNonRestartableInitContainers) + nodeInfo = framework.NewTestNodeInfo(node3, pod, podWithInitContainers, podWithLargeNonRestartableInitContainers) gpuConfig = getGpuConfigFromNode(nodeInfo.Node()) utilInfo, err = Calculate(nodeInfo, false, false, gpuConfig, testTime) @@ -108,13 +108,13 @@ func TestCalculate(t *testing.T) { daemonSetPod4.OwnerReferences = GenerateOwnerReferences("ds", "CustomDaemonSet", "crd/v1", "") daemonSetPod4.Annotations = map[string]string{"cluster-autoscaler.kubernetes.io/daemonset-pod": "true"} - nodeInfo = newNodeInfo(node, pod, pod, pod2, daemonSetPod3, daemonSetPod4) + nodeInfo = framework.NewTestNodeInfo(node, pod, pod, pod2, daemonSetPod3, daemonSetPod4) gpuConfig = getGpuConfigFromNode(nodeInfo.Node()) utilInfo, err = Calculate(nodeInfo, true, false, gpuConfig, testTime) assert.NoError(t, err) assert.InEpsilon(t, 2.5/10, utilInfo.Utilization, 0.01) - nodeInfo = newNodeInfo(node, pod, pod2, daemonSetPod3) + nodeInfo = framework.NewTestNodeInfo(node, pod, pod2, daemonSetPod3) gpuConfig = getGpuConfigFromNode(nodeInfo.Node()) utilInfo, err = Calculate(nodeInfo, false, false, gpuConfig, testTime) assert.NoError(t, err) @@ -122,7 +122,7 @@ func TestCalculate(t *testing.T) { terminatedPod := BuildTestPod("podTerminated", 100, 200000) terminatedPod.DeletionTimestamp = &metav1.Time{Time: testTime.Add(-10 * time.Minute)} - nodeInfo = newNodeInfo(node, pod, pod, pod2, terminatedPod) + nodeInfo = framework.NewTestNodeInfo(node, pod, pod, pod2, terminatedPod) gpuConfig = getGpuConfigFromNode(nodeInfo.Node()) utilInfo, err = Calculate(nodeInfo, false, false, gpuConfig, testTime) assert.NoError(t, err) @@ -133,19 +133,19 @@ func TestCalculate(t *testing.T) { types.ConfigMirrorAnnotationKey: "", } - nodeInfo = newNodeInfo(node, pod, pod, pod2, mirrorPod) + nodeInfo = framework.NewTestNodeInfo(node, pod, pod, pod2, mirrorPod) gpuConfig = getGpuConfigFromNode(nodeInfo.Node()) utilInfo, err = Calculate(nodeInfo, false, true, gpuConfig, testTime) assert.NoError(t, err) assert.InEpsilon(t, 2.0/9.0, utilInfo.Utilization, 0.01) - nodeInfo = newNodeInfo(node, pod, pod2, mirrorPod) + nodeInfo = framework.NewTestNodeInfo(node, pod, pod2, mirrorPod) gpuConfig = getGpuConfigFromNode(nodeInfo.Node()) utilInfo, err = Calculate(nodeInfo, false, false, gpuConfig, testTime) assert.NoError(t, err) assert.InEpsilon(t, 2.0/10, utilInfo.Utilization, 0.01) - nodeInfo = newNodeInfo(node, pod, mirrorPod, daemonSetPod3) + nodeInfo = framework.NewTestNodeInfo(node, pod, mirrorPod, daemonSetPod3) gpuConfig = getGpuConfigFromNode(nodeInfo.Node()) utilInfo, err = Calculate(nodeInfo, true, true, gpuConfig, testTime) assert.NoError(t, err) @@ -156,7 +156,7 @@ func TestCalculate(t *testing.T) { gpuPod := BuildTestPod("gpu_pod", 100, 200000) RequestGpuForPod(gpuPod, 1) TolerateGpuForPod(gpuPod) - nodeInfo = newNodeInfo(gpuNode, pod, pod, gpuPod) + nodeInfo = framework.NewTestNodeInfo(gpuNode, pod, pod, gpuPod) gpuConfig = getGpuConfigFromNode(nodeInfo.Node()) utilInfo, err = Calculate(nodeInfo, false, false, gpuConfig, testTime) assert.NoError(t, err) @@ -165,27 +165,13 @@ func TestCalculate(t *testing.T) { // Node with Unready GPU gpuNode = BuildTestNode("gpu_node", 2000, 2000000) AddGpuLabelToNode(gpuNode) - nodeInfo = newNodeInfo(gpuNode, pod, pod) + nodeInfo = framework.NewTestNodeInfo(gpuNode, pod, pod) gpuConfig = getGpuConfigFromNode(nodeInfo.Node()) utilInfo, err = Calculate(nodeInfo, false, false, gpuConfig, testTime) assert.NoError(t, err) assert.Zero(t, utilInfo.Utilization) } -func nodeInfos(nodes []*apiv1.Node) []*schedulerframework.NodeInfo { - result := make([]*schedulerframework.NodeInfo, len(nodes)) - for i, node := range nodes { - result[i] = newNodeInfo(node) - } - return result -} - -func newNodeInfo(node *apiv1.Node, pods ...*apiv1.Pod) *schedulerframework.NodeInfo { - ni := schedulerframework.NewNodeInfo(pods...) - ni.SetNode(node) - return ni -} - func getGpuConfigFromNode(node *apiv1.Node) *cloudprovider.GpuConfig { gpuLabel := "cloud.google.com/gke-accelerator" gpuType, hasGpuLabel := node.Labels[gpuLabel] diff --git a/cluster-autoscaler/utils/backoff/backoff.go b/cluster-autoscaler/utils/backoff/backoff.go index a4409d2f99ca..362cf2508145 100644 --- a/cluster-autoscaler/utils/backoff/backoff.go +++ b/cluster-autoscaler/utils/backoff/backoff.go @@ -20,7 +20,7 @@ import ( "time" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" ) // Status contains information about back off status. @@ -32,11 +32,11 @@ type Status struct { // Backoff allows time-based backing off of node groups considered in scale up algorithm type Backoff interface { // Backoff execution for the given node group. Returns time till execution is backed off. - Backoff(nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulerframework.NodeInfo, errorInfo cloudprovider.InstanceErrorInfo, currentTime time.Time) time.Time + Backoff(nodeGroup cloudprovider.NodeGroup, nodeInfo *framework.NodeInfo, errorInfo cloudprovider.InstanceErrorInfo, currentTime time.Time) time.Time // BackoffStatus returns whether the execution is backed off for the given node group and error info when the node group is backed off. - BackoffStatus(nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulerframework.NodeInfo, currentTime time.Time) Status + BackoffStatus(nodeGroup cloudprovider.NodeGroup, nodeInfo *framework.NodeInfo, currentTime time.Time) Status // RemoveBackoff removes backoff data for the given node group. - RemoveBackoff(nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulerframework.NodeInfo) + RemoveBackoff(nodeGroup cloudprovider.NodeGroup, nodeInfo *framework.NodeInfo) // RemoveStaleBackoffData removes stale backoff data. RemoveStaleBackoffData(currentTime time.Time) } diff --git a/cluster-autoscaler/utils/backoff/exponential_backoff.go b/cluster-autoscaler/utils/backoff/exponential_backoff.go index a65b9c323dd1..860f26ef7c74 100644 --- a/cluster-autoscaler/utils/backoff/exponential_backoff.go +++ b/cluster-autoscaler/utils/backoff/exponential_backoff.go @@ -20,8 +20,7 @@ import ( "time" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" - - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" ) // Backoff handles backing off executions. @@ -67,7 +66,7 @@ func NewIdBasedExponentialBackoff(initialBackoffDuration time.Duration, maxBacko } // Backoff execution for the given node group. Returns time till execution is backed off. -func (b *exponentialBackoff) Backoff(nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulerframework.NodeInfo, errorInfo cloudprovider.InstanceErrorInfo, currentTime time.Time) time.Time { +func (b *exponentialBackoff) Backoff(nodeGroup cloudprovider.NodeGroup, nodeInfo *framework.NodeInfo, errorInfo cloudprovider.InstanceErrorInfo, currentTime time.Time) time.Time { duration := b.initialBackoffDuration key := b.nodeGroupKey(nodeGroup) if backoffInfo, found := b.backoffInfo[key]; found { @@ -94,7 +93,7 @@ func (b *exponentialBackoff) Backoff(nodeGroup cloudprovider.NodeGroup, nodeInfo } // BackoffStatus returns whether the execution is backed off for the given node group and error info when the node group is backed off. -func (b *exponentialBackoff) BackoffStatus(nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulerframework.NodeInfo, currentTime time.Time) Status { +func (b *exponentialBackoff) BackoffStatus(nodeGroup cloudprovider.NodeGroup, nodeInfo *framework.NodeInfo, currentTime time.Time) Status { backoffInfo, found := b.backoffInfo[b.nodeGroupKey(nodeGroup)] if !found || backoffInfo.backoffUntil.Before(currentTime) { return Status{IsBackedOff: false} @@ -106,7 +105,7 @@ func (b *exponentialBackoff) BackoffStatus(nodeGroup cloudprovider.NodeGroup, no } // RemoveBackoff removes backoff data for the given node group. -func (b *exponentialBackoff) RemoveBackoff(nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulerframework.NodeInfo) { +func (b *exponentialBackoff) RemoveBackoff(nodeGroup cloudprovider.NodeGroup, nodeInfo *framework.NodeInfo) { delete(b.backoffInfo, b.nodeGroupKey(nodeGroup)) } diff --git a/cluster-autoscaler/utils/daemonset/daemonset.go b/cluster-autoscaler/utils/daemonset/daemonset.go index 193ea07f671a..06236ae2443c 100644 --- a/cluster-autoscaler/utils/daemonset/daemonset.go +++ b/cluster-autoscaler/utils/daemonset/daemonset.go @@ -22,8 +22,8 @@ import ( appsv1 "k8s.io/api/apps/v1" apiv1 "k8s.io/api/core/v1" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/kubernetes/pkg/controller/daemon" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) const ( @@ -33,14 +33,14 @@ const ( ) // GetDaemonSetPodsForNode returns daemonset nodes for the given pod. -func GetDaemonSetPodsForNode(nodeInfo *schedulerframework.NodeInfo, daemonsets []*appsv1.DaemonSet) ([]*apiv1.Pod, error) { - result := make([]*apiv1.Pod, 0) +func GetDaemonSetPodsForNode(nodeInfo *framework.NodeInfo, daemonsets []*appsv1.DaemonSet) ([]*framework.PodInfo, error) { + result := make([]*framework.PodInfo, 0) for _, ds := range daemonsets { shouldRun, _ := daemon.NodeShouldRunDaemonPod(nodeInfo.Node(), ds) if shouldRun { pod := daemon.NewPod(ds, nodeInfo.Node().Name) pod.Name = fmt.Sprintf("%s-pod-%d", ds.Name, rand.Int63()) - result = append(result, pod) + result = append(result, &framework.PodInfo{Pod: pod}) } } return result, nil diff --git a/cluster-autoscaler/utils/daemonset/daemonset_test.go b/cluster-autoscaler/utils/daemonset/daemonset_test.go index 18a27f04805e..b22318758ba3 100644 --- a/cluster-autoscaler/utils/daemonset/daemonset_test.go +++ b/cluster-autoscaler/utils/daemonset/daemonset_test.go @@ -21,13 +21,13 @@ import ( "testing" "time" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" appsv1 "k8s.io/api/apps/v1" apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" "github.com/stretchr/testify/assert" ) @@ -35,8 +35,7 @@ import ( func TestGetDaemonSetPodsForNode(t *testing.T) { node := BuildTestNode("node", 1000, 1000) SetNodeReadyState(node, true, time.Now()) - nodeInfo := schedulerframework.NewNodeInfo() - nodeInfo.SetNode(node) + nodeInfo := framework.NewTestNodeInfo(node) ds1 := newDaemonSet("ds1", "0.1", "100M", nil) ds2 := newDaemonSet("ds2", "0.1", "100M", map[string]string{"foo": "bar"}) diff --git a/cluster-autoscaler/utils/scheduler/scheduler.go b/cluster-autoscaler/utils/scheduler/scheduler.go index cd981aa72fbf..04a6e99e7af7 100644 --- a/cluster-autoscaler/utils/scheduler/scheduler.go +++ b/cluster-autoscaler/utils/scheduler/scheduler.go @@ -24,6 +24,7 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" scheduler_config "k8s.io/kubernetes/pkg/scheduler/apis/config" scheduler_scheme "k8s.io/kubernetes/pkg/scheduler/apis/config/scheme" scheduler_validation "k8s.io/kubernetes/pkg/scheduler/apis/config/validation" @@ -40,22 +41,22 @@ const ( // CreateNodeNameToInfoMap obtains a list of pods and pivots that list into a map where the keys are node names // and the values are the aggregated information for that node. Pods waiting lower priority pods preemption // (pod.Status.NominatedNodeName is set) are also added to list of pods for a node. -func CreateNodeNameToInfoMap(pods []*apiv1.Pod, nodes []*apiv1.Node) map[string]*schedulerframework.NodeInfo { - nodeNameToNodeInfo := make(map[string]*schedulerframework.NodeInfo) +func CreateNodeNameToInfoMap(pods []*apiv1.Pod, nodes []*apiv1.Node) map[string]*framework.NodeInfo { + nodeNameToNodeInfo := make(map[string]*framework.NodeInfo) for _, pod := range pods { nodeName := pod.Spec.NodeName if nodeName == "" { nodeName = pod.Status.NominatedNodeName } if _, ok := nodeNameToNodeInfo[nodeName]; !ok { - nodeNameToNodeInfo[nodeName] = schedulerframework.NewNodeInfo() + nodeNameToNodeInfo[nodeName] = framework.NewNodeInfo(nil, nil) } - nodeNameToNodeInfo[nodeName].AddPod(pod) + nodeNameToNodeInfo[nodeName].AddPod(&framework.PodInfo{Pod: pod}) } for _, node := range nodes { if _, ok := nodeNameToNodeInfo[node.Name]; !ok { - nodeNameToNodeInfo[node.Name] = schedulerframework.NewNodeInfo() + nodeNameToNodeInfo[node.Name] = framework.NewNodeInfo(nil, nil) } nodeNameToNodeInfo[node.Name].SetNode(node) } @@ -81,7 +82,7 @@ func isHugePageResourceName(name apiv1.ResourceName) bool { // DeepCopyTemplateNode copies NodeInfo object used as a template. It changes // names of UIDs of both node and pods running on it, so that copies can be used // to represent multiple nodes. -func DeepCopyTemplateNode(nodeTemplate *schedulerframework.NodeInfo, suffix string) *schedulerframework.NodeInfo { +func DeepCopyTemplateNode(nodeTemplate *framework.NodeInfo, suffix string) *framework.NodeInfo { node := nodeTemplate.Node().DeepCopy() node.Name = fmt.Sprintf("%s-%s", node.Name, suffix) node.UID = uuid.NewUUID() @@ -89,13 +90,12 @@ func DeepCopyTemplateNode(nodeTemplate *schedulerframework.NodeInfo, suffix stri node.Labels = make(map[string]string) } node.Labels["kubernetes.io/hostname"] = node.Name - nodeInfo := schedulerframework.NewNodeInfo() - nodeInfo.SetNode(node) - for _, podInfo := range nodeTemplate.Pods { + nodeInfo := framework.NewNodeInfo(node, nil) + for _, podInfo := range nodeTemplate.Pods() { pod := podInfo.Pod.DeepCopy() pod.Name = fmt.Sprintf("%s-%s", podInfo.Pod.Name, suffix) pod.UID = uuid.NewUUID() - nodeInfo.AddPod(pod) + nodeInfo.AddPod(&framework.PodInfo{Pod: pod}) } return nodeInfo } diff --git a/cluster-autoscaler/utils/scheduler/scheduler_test.go b/cluster-autoscaler/utils/scheduler/scheduler_test.go index 59f1aa52d92a..c92433704a6d 100644 --- a/cluster-autoscaler/utils/scheduler/scheduler_test.go +++ b/cluster-autoscaler/utils/scheduler/scheduler_test.go @@ -52,10 +52,10 @@ func TestCreateNodeNameToInfoMap(t *testing.T) { res := CreateNodeNameToInfoMap([]*apiv1.Pod{p1, p2, p3, podWaitingForPreemption}, []*apiv1.Node{n1, n2}) assert.Equal(t, 2, len(res)) - assert.Equal(t, p1, res["node1"].Pods[0].Pod) - assert.Equal(t, podWaitingForPreemption, res["node1"].Pods[1].Pod) + assert.Equal(t, p1, res["node1"].Pods()[0].Pod) + assert.Equal(t, podWaitingForPreemption, res["node1"].Pods()[1].Pod) assert.Equal(t, n1, res["node1"].Node()) - assert.Equal(t, p2, res["node2"].Pods[0].Pod) + assert.Equal(t, p2, res["node2"].Pods()[0].Pod) assert.Equal(t, n2, res["node2"].Node()) }