Skip to content

Commit

Permalink
implements clean-up logic (#64)
Browse files Browse the repository at this point in the history
  • Loading branch information
p0lyn0mial authored Feb 5, 2018
1 parent 0decd2e commit d16587d
Showing 1 changed file with 59 additions and 9 deletions.
68 changes: 59 additions & 9 deletions test/tools/verify/cmd/main.go
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
package main

import (
"errors"
"flag"
"fmt"
machineclientset "github.com/kubermatic/machine-controller/pkg/client/clientset/versioned"
machinev1alpha1 "github.com/kubermatic/machine-controller/pkg/machines/v1alpha1"
"io/ioutil"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/util/yaml"
Expand All @@ -22,6 +22,7 @@ const (
machineReadyCheckTimeout = 5 * time.Minute
)

// TODO(lukasz): put the binary under github.com/kubermatic/machine-controller/tree/master/cmd
func main() {
var manifestPath string
var parameters string
Expand Down Expand Up @@ -62,15 +63,15 @@ func main() {
// prepare the manifest
manifest, err := readAndModifyManifest(manifestPath, keyValuePairs)
if err != nil {
printAndDie(fmt.Sprintf("failed to prepare the manifest, due to = %v", err))
printAndDie(fmt.Sprintf("failed to prepare the manifest, due to: %v", err))
}

// act
err = verify(manifest, kubeClient, machineClient)
if err != nil {
printAndDie(fmt.Sprintf("failed to verify if a machine/node has been created, reason: \n%v", err))
printAndDie(fmt.Sprintf("failed to verify if a machine/node has been created/deleted, due to: \n%v", err))
}
fmt.Println("all good, successfully verified that a machine/node has been created within the cluster")
fmt.Println("all good, successfully verified that a machine/node has been created and then deleted")
}

func verify(manifest string, kubeClient kubernetes.Interface, machineClient machineclientset.Interface) error {
Expand All @@ -84,14 +85,22 @@ func verify(manifest string, kubeClient kubernetes.Interface, machineClient mach
}
}

err := createAndAssure(newMachine, machineClient, kubeClient)
if err != nil {
return err
}
return deleteAndAssure(newMachine, machineClient, kubeClient)
}

func createAndAssure(machine *machinev1alpha1.Machine, machineClient machineclientset.Interface, kubeClient kubernetes.Interface) error {
// we expect to find no nodes within the cluster
err := assureNodeCount(0, kubeClient)
if err != nil {
return fmt.Errorf("unable to perform the verification, incorrect cluster state detected %v", err)
}

fmt.Printf("creating a new \"%s\" machine\n", newMachine.Name)
_, err = machineClient.MachineV1alpha1().Machines().Create(newMachine)
fmt.Printf("creating a new \"%s\" machine\n", machine.Name)
_, err = machineClient.MachineV1alpha1().Machines().Create(machine)
if err != nil {
return err
}
Expand All @@ -103,12 +112,53 @@ func verify(manifest string, kubeClient kubernetes.Interface, machineClient mach
return false, nil
})
if err != nil {
// TODO(lukasz) grab the machine's State and put it into the logs"
return fmt.Errorf("falied to created the new machine, err = %v", err)
}

fmt.Printf("waiting for status = %s to come \n", v1.NodeReady)
err = wait.Poll(machineReadyCheckPeriod, machineReadyCheckTimeout, func() (bool, error) {
nodes, err := kubeClient.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return false, nil
}
// assertion check - if true then something weird has happened
// or someone else is playing with the cluster
if len(nodes.Items) != 1 {
return false, fmt.Errorf("expected to get only one node but got = %d", len(nodes.Items))
}
for _, condition := range nodes.Items[0].Status.Conditions {
if condition.Type == v1.NodeReady && condition.Status == v1.ConditionTrue {
return true, nil
}
}
return false, nil
})
if err != nil {
// TODO(lukasz) grab the machine's State and put it into the logs"
return fmt.Errorf("falied to created the new machine, err = %v", err)
}
return nil
}

func deleteAndAssure(machine *machinev1alpha1.Machine, machineClient machineclientset.Interface, kubeClient kubernetes.Interface) error {
fmt.Printf("deleting the machine \"%s\"\n", machine.Name)
err := machineClient.MachineV1alpha1().Machines().Delete(machine.Name, nil)
if err != nil {
return fmt.Errorf("unable to remove machine %s, due to %v", machine.Name, err)
}

// TODO(lukasz): implement clean-up logic
// TODO(lukasz): add dep
return errors.New("not fully implemented")
err = wait.Poll(machineReadyCheckPeriod, machineReadyCheckTimeout, func() (bool, error) {
err := assureNodeCount(0, kubeClient)
if err == nil {
return true, nil
}
return false, nil
})
if err != nil {
return fmt.Errorf("falied to delete the node, err = %v", err)
}
return nil
}

func assureNodeCount(expectedNodeCount int, kubeClient kubernetes.Interface) error {
Expand Down

0 comments on commit d16587d

Please sign in to comment.