diff --git a/Gopkg.lock b/Gopkg.lock index 129164c603..af80a8ab18 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -438,7 +438,7 @@ [[projects]] branch = "master" - digest = "1:5c3adf4959d01c28bbb8bc0827d2ee23eba7cf88aacfcb95f1e078b12064955c" + digest = "1:61745f35032f46da3e23c6b454209c5146c0166aff19d6cbc2477a384868252a" name = "github.com/openshift/cluster-api-actuator-pkg" packages = [ "pkg/e2e", @@ -451,7 +451,7 @@ "pkg/types", ] pruneopts = "" - revision = "acbcbdc39cb8052eaafe69f8faa575e5d991aee4" + revision = "f9925b22c1414c7c65704030ec11463beee95138" [[projects]] branch = "master" diff --git a/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/framework/framework.go b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/framework/framework.go index 26ecf23ed3..1c8667ed32 100644 --- a/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/framework/framework.go +++ b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/framework/framework.go @@ -436,7 +436,7 @@ func IsNodeReady(node *corev1.Node) bool { } func WaitUntilAllNodesAreReady(client runtimeclient.Client) error { - return wait.PollImmediate(1*time.Second, time.Minute, func() (bool, error) { + return wait.PollImmediate(1*time.Second, PoolNodesReadyTimeout, func() (bool, error) { nodeList := corev1.NodeList{} if err := client.List(context.TODO(), &runtimeclient.ListOptions{}, &nodeList); err != nil { glog.Errorf("error querying api for nodeList object: %v, retrying...", err) diff --git a/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/machinehealthcheck/machinehealthcheck.go b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/machinehealthcheck/machinehealthcheck.go index e81ae4d747..d045dd6a93 100644 --- a/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/machinehealthcheck/machinehealthcheck.go +++ b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/machinehealthcheck/machinehealthcheck.go @@ -57,15 +57,9 @@ var _ = Describe("[Feature:MachineHealthCheck] MachineHealthCheck controller", f client, err = e2e.LoadClient() Expect(err).ToNot(HaveOccurred()) - isKubemarkProvider, err := e2e.IsKubemarkProvider(client) - Expect(err).ToNot(HaveOccurred()) - - // TODO: remove once we can create or update kubemark machines - // that will give use possibility to make this test work - if isKubemarkProvider { - glog.V(2).Info("Can not run this tests with the 'KubeMark' provider") - Skip("Can not run this tests with the 'KubeMark' provider") - } + // TODO: enable once https://github.com/openshift/cluster-api-actuator-pkg/pull/61 is fixed + glog.V(2).Info("Skipping machine health checking test") + Skip("Skipping machine health checking test") workerNodes, err := e2e.GetWorkerNodes(client) Expect(err).ToNot(HaveOccurred()) @@ -118,12 +112,9 @@ var _ = Describe("[Feature:MachineHealthCheck] MachineHealthCheck controller", f }) AfterEach(func() { - isKubemarkProvider, err := e2e.IsKubemarkProvider(client) - Expect(err).ToNot(HaveOccurred()) - if isKubemarkProvider { - glog.V(2).Info("Can not run this tests with the 'KubeMark' provider") - Skip("Can not run this tests with the 'KubeMark' provider") - } + // TODO: enable once https://github.com/openshift/cluster-api-actuator-pkg/pull/61 is fixed + glog.V(2).Info("Skipping machine health checking test") + Skip("Skipping machine health checking test") waitForWorkersToGetReady(numberOfReadyWorkers) deleteMachineHealthCheck(e2e.MachineHealthCheckName)