Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added test that removes PV, PVC & Pod or dbserver. [ci VERBOSE=1] [ci LONG=1] [ci TESTOPTIONS="-test.run ^TestResiliencePVDBServer$"] #117

Merged
merged 1 commit into from
Apr 6, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions manifests/templates/test/rbac.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,9 @@ rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list"]
- apiGroups: [""]
resources: ["pods", "services", "persistentvolumes", "persistentvolumeclaims", "secrets"]
verbs: ["*"]
- apiGroups: ["apps"]
resources: ["daemonsets"]
verbs: ["*"]
Expand Down
111 changes: 108 additions & 3 deletions tests/resilience_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ import (
"testing"
"time"

"github.com/stretchr/testify/require"

"github.com/dchest/uniuri"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

Expand All @@ -26,7 +28,7 @@ func TestResiliencePod(t *testing.T) {
//fmt.Printf("There are %d pods in the cluster\n", len(pods.Items))

// Prepare deployment config
depl := newDeployment("test-pod-resilience" + uniuri.NewLen(4))
depl := newDeployment("test-pod-resilience-" + uniuri.NewLen(4))
depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster)
depl.Spec.SetDefaults(depl.GetName()) // this must be last

Expand Down Expand Up @@ -106,7 +108,7 @@ func TestResiliencePVC(t *testing.T) {
ns := getNamespace(t)

// Prepare deployment config
depl := newDeployment("test-pvc-resilience" + uniuri.NewLen(4))
depl := newDeployment("test-pvc-resilience-" + uniuri.NewLen(4))
depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster)
depl.Spec.SetDefaults(depl.GetName()) // this must be last

Expand Down Expand Up @@ -181,6 +183,109 @@ func TestResiliencePVC(t *testing.T) {
removeDeployment(c, depl.GetName(), ns)
}

// TestResiliencePVDBServer
// Tests handling of entire PVs of dbservers being removed.
func TestResiliencePVDBServer(t *testing.T) {
longOrSkip(t)
c := client.MustNewInCluster()
kubecli := mustNewKubeClient(t)
ns := getNamespace(t)

// Prepare deployment config
depl := newDeployment("test-pv-prmr-resi-" + uniuri.NewLen(4))
depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster)
depl.Spec.SetDefaults(depl.GetName()) // this must be last

// Create deployment
apiObject, err := c.DatabaseV1alpha().ArangoDeployments(ns).Create(depl)
if err != nil {
t.Fatalf("Create deployment failed: %v", err)
}

// Wait for deployment to be ready
if _, err = waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()); err != nil {
t.Fatalf("Deployment not running in time: %v", err)
}

// Create a database client
ctx := context.Background()
client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t)

// Wait for cluster to be completely ready
if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error {
return clusterHealthEqualsSpec(h, apiObject.Spec)
}); err != nil {
t.Fatalf("Cluster not running in expected health in time: %v", err)
}

// Fetch latest status so we know all member details
apiObject, err = c.DatabaseV1alpha().ArangoDeployments(ns).Get(depl.GetName(), metav1.GetOptions{})
if err != nil {
t.Fatalf("Failed to get deployment: %v", err)
}

// Delete one pv, pvc & pod after the other
apiObject.ForeachServerGroup(func(group api.ServerGroup, spec api.ServerGroupSpec, status *api.MemberStatusList) error {
if group != api.ServerGroupDBServers {
// Agents cannot be replaced with a new ID
// Coordinators, Sync masters/workers have no persistent storage
return nil
}
for i, m := range *status {
// Only test first 2
if i >= 2 {
continue
}
// Get current pvc so we can compare UID later
originalPVC, err := kubecli.CoreV1().PersistentVolumeClaims(ns).Get(m.PersistentVolumeClaimName, metav1.GetOptions{})
if err != nil {
t.Fatalf("Failed to get pvc %s: %v", m.PersistentVolumeClaimName, err)
}
// Get current pv
pvName := originalPVC.Spec.VolumeName
require.NotEmpty(t, pvName, "VolumeName of %s must be non-empty", originalPVC.GetName())
// Delete PV
if err := kubecli.CoreV1().PersistentVolumes().Delete(pvName, &metav1.DeleteOptions{}); err != nil {
t.Fatalf("Failed to delete pv %s: %v", pvName, err)
}
// Delete PVC
if err := kubecli.CoreV1().PersistentVolumeClaims(ns).Delete(m.PersistentVolumeClaimName, &metav1.DeleteOptions{}); err != nil {
t.Fatalf("Failed to delete pvc %s: %v", m.PersistentVolumeClaimName, err)
}
// Delete Pod
if err := kubecli.CoreV1().Pods(ns).Delete(m.PodName, &metav1.DeleteOptions{}); err != nil {
t.Fatalf("Failed to delete pod %s: %v", m.PodName, err)
}
// Wait for cluster to be healthy again with the same number of
// dbservers, but the current dbserver being replaced.
expectedDBServerCount := apiObject.Spec.DBServers.GetCount()
unexpectedID := m.ID
pred := func(depl *api.ArangoDeployment) error {
if len(depl.Status.Members.DBServers) != expectedDBServerCount {
return maskAny(fmt.Errorf("Expected %d dbservers, got %d", expectedDBServerCount, len(depl.Status.Members.DBServers)))
}
if depl.Status.Members.ContainsID(unexpectedID) {
return maskAny(fmt.Errorf("Member %s should be gone", unexpectedID))
}
return nil
}
if _, err := waitUntilDeployment(c, apiObject.GetName(), ns, pred, time.Minute*5); err != nil {
t.Fatalf("Deployment not ready in time: %v", err)
}
// Wait for cluster to be completely ready
if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error {
return clusterHealthEqualsSpec(h, apiObject.Spec)
}); err != nil {
t.Fatalf("Cluster not running in expected health in time: %v", err)
}
}
return nil
}, &apiObject.Status)

// Cleanup
removeDeployment(c, depl.GetName(), ns)
}

// TestResilienceService
// Tests handling of individual service deletions
func TestResilienceService(t *testing.T) {
Expand All @@ -190,7 +295,7 @@ func TestResilienceService(t *testing.T) {
ns := getNamespace(t)

// Prepare deployment config
depl := newDeployment("test-service-resilience" + uniuri.NewLen(4))
depl := newDeployment("test-service-resilience-" + uniuri.NewLen(4))
depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster)
depl.Spec.SetDefaults(depl.GetName()) // this must be last

Expand Down