From ccac6c33f4680b4b9873bed2cd5519fa3c960a32 Mon Sep 17 00:00:00 2001 From: qiuming520 Date: Fri, 1 Mar 2024 16:52:58 +0800 Subject: [PATCH] fix: kosmos scheduler e2e cases Signed-off-by: qiuming520 --- docs/proposals/distributionpolicy/README.md | 6 +++--- ...eration.go => leafnode_tainttoleration.go} | 0 .../framework/distribution_policy_sample.go | 20 ++++--------------- 3 files changed, 7 insertions(+), 19 deletions(-) rename pkg/scheduler/lifted/plugins/leafnodetainttoleration/{knode_tainttoleration.go => leafnode_tainttoleration.go} (100%) diff --git a/docs/proposals/distributionpolicy/README.md b/docs/proposals/distributionpolicy/README.md index f121938bf..3d21ae262 100644 --- a/docs/proposals/distributionpolicy/README.md +++ b/docs/proposals/distributionpolicy/README.md @@ -28,7 +28,7 @@ The more the precise, the higher the priority. Defaults to zero which means sche **PolicyTerms** 1. PolicyTerms is required -2. The current node scheduling policy is divided into three nodeType (host, leaf, mix). +2. The current node scheduling policy is divided into four nodeTypes (host, leaf, mix, adv). 3. Advanced options will be supported in the future. Sure as NodeSelector, Affinity and so on. ## Use cases @@ -151,10 +151,10 @@ kosmos-member2-cluster-1 Ready agent 24m v1.21.5- kosmos-member2-cluster-2 Ready agent 24m v1.21.5-eki.0 # Show the taint information on the virtual node -kubectl describe node kosmos-member2-cluster-1 |grep Tai +kubectl describe node kosmos-member2-cluster-1 |grep Taints Taints: node.kubernetes.io/unreachable:NoExecute -kubectl describe node kosmos-member2-cluster-2 |grep Tai +kubectl describe node kosmos-member2-cluster-2 |grep Taints Taints: node.kubernetes.io/unreachable:NoExecute # Scheduling by the kosmos-scheduler (hybrid scheduling) diff --git a/pkg/scheduler/lifted/plugins/leafnodetainttoleration/knode_tainttoleration.go b/pkg/scheduler/lifted/plugins/leafnodetainttoleration/leafnode_tainttoleration.go similarity index 100% rename from pkg/scheduler/lifted/plugins/leafnodetainttoleration/knode_tainttoleration.go rename to pkg/scheduler/lifted/plugins/leafnodetainttoleration/leafnode_tainttoleration.go diff --git a/test/e2e/framework/distribution_policy_sample.go b/test/e2e/framework/distribution_policy_sample.go index e69616209..d26afbc01 100644 --- a/test/e2e/framework/distribution_policy_sample.go +++ b/test/e2e/framework/distribution_policy_sample.go @@ -159,14 +159,8 @@ func NewClusterDistributionPolicy() *kosmosv1alpha1.ClusterDistributionPolicy { func CreateDistributionPolicy(client versioned.Interface, ns string, dp *kosmosv1alpha1.DistributionPolicy) { ginkgo.By("Creating DistributionPolicy", func() { - err := client.KosmosV1alpha1().DistributionPolicies(ns).Delete(context.TODO(), dp.Name, metav1.DeleteOptions{}) - if err != nil { - klog.Errorf("delete old DistributionPolicy occur error :", err) - gomega.Expect(apierrors.IsNotFound(err)).Should(gomega.Equal(true)) - } - - _, err = client.KosmosV1alpha1().DistributionPolicies(ns).Create(context.TODO(), dp, metav1.CreateOptions{}) - if err != nil { + _, err := client.KosmosV1alpha1().DistributionPolicies(ns).Create(context.TODO(), dp, metav1.CreateOptions{}) + if err != nil && !apierrors.IsAlreadyExists(err) { klog.Errorf("create DistributionPolicy occur error :", err) gomega.Expect(err).Should(gomega.HaveOccurred()) } @@ -175,14 +169,8 @@ func CreateDistributionPolicy(client versioned.Interface, ns string, dp *kosmosv func CreateClusterDistributionPolicy(client versioned.Interface, cdp *kosmosv1alpha1.ClusterDistributionPolicy) { ginkgo.By("Creating ClusterDistributionPolicy", func() { - err := client.KosmosV1alpha1().ClusterDistributionPolicies().Delete(context.TODO(), cdp.Name, metav1.DeleteOptions{}) - if err != nil { - klog.Errorf("delete old ClusterDistributionPolicy occur error :", err) - gomega.Expect(apierrors.IsNotFound(err)).Should(gomega.Equal(true)) - } - - _, err = client.KosmosV1alpha1().ClusterDistributionPolicies().Create(context.TODO(), cdp, metav1.CreateOptions{}) - if err != nil { + _, err := client.KosmosV1alpha1().ClusterDistributionPolicies().Create(context.TODO(), cdp, metav1.CreateOptions{}) + if err != nil && !apierrors.IsAlreadyExists(err) { klog.Errorf("create ClusterDistributionPolicy occur error :", err) gomega.Expect(err).Should(gomega.HaveOccurred()) }