Skip to content

Commit

Permalink
Envtesting... (#491)
Browse files Browse the repository at this point in the history
* First running envtest..

Read target mgmt-api port from the container namedPorts

Modify cooldownPeriod to be configurable, modify the test to allow more namespaces and use blackbox fake StS controller

The /start command was not using the correct BuildPodHostFromPod function

Fake mgmt-api httptest server will now set the pod status when it receives the start call

Add multi rack and multi node tests

Modify liveness and readiness checks ports

Move pkg/internal/result to internal/result, allow override of the requeue duration calculation

Fix tests that did not set the PodIP correctly, move duration timers to suite_test in controllers

Add deletion checks

Add scale up functionality.. a bit broken though

Remove unused FakeServerWithSuccess

Update the tests, the ProgressState did not work since Kubernetes has precision of 1s in the metav1.Time

* Modify after rebase

* Fix Dockerfile after result movement
  • Loading branch information
burmanm authored Aug 31, 2023
1 parent 3cc1fd5 commit 9a5098c
Show file tree
Hide file tree
Showing 25 changed files with 828 additions and 69 deletions.
4 changes: 2 additions & 2 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,10 @@ COPY go.sum go.sum
RUN go mod download

# Copy the go source
COPY cmd/main.go cmd/main.go
COPY cmd/ cmd/
COPY apis/ apis/
COPY pkg/ pkg/
COPY internal/controllers/ internal/controllers/
COPY internal/ internal/

# Build
RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager cmd/main.go
Expand Down
12 changes: 8 additions & 4 deletions internal/controllers/cassandra/cassandradatacenter_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,11 @@ import (
configv1beta1 "github.com/k8ssandra/cass-operator/apis/config/v1beta1"
)

var (
cooldownPeriod = 20 * time.Second
minimumRequeueTime = 500 * time.Millisecond
)

// datastax.com groups
//+kubebuilder:rbac:groups=cassandra.datastax.com,namespace=cass-operator,resources=cassandradatacenters,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=cassandra.datastax.com,namespace=cass-operator,resources=cassandradatacenters/status,verbs=get;update;patch
Expand Down Expand Up @@ -117,7 +122,7 @@ func (r *CassandraDatacenterReconciler) Reconcile(ctx context.Context, request c

// Error reading the object
logger.Error(err, "Failed to get CassandraDatacenter.")
return ctrl.Result{RequeueAfter: 10 * time.Second}, err
return ctrl.Result{}, err
}

if err := rc.IsValid(rc.Datacenter); err != nil {
Expand All @@ -127,7 +132,6 @@ func (r *CassandraDatacenterReconciler) Reconcile(ctx context.Context, request c
}

// TODO fold this into the quiet period
cooldownPeriod := time.Second * 20
lastNodeStart := rc.Datacenter.Status.LastServerNodeStarted
cooldownTime := time.Until(lastNodeStart.Add(cooldownPeriod))

Expand All @@ -152,8 +156,8 @@ func (r *CassandraDatacenterReconciler) Reconcile(ctx context.Context, request c

// Prevent immediate requeue
if res.Requeue {
if res.RequeueAfter.Milliseconds() < 500 {
res.RequeueAfter = time.Duration(500 * time.Millisecond)
if res.RequeueAfter < minimumRequeueTime {
res.RequeueAfter = minimumRequeueTime
}
}
return res, err
Expand Down
254 changes: 254 additions & 0 deletions internal/controllers/cassandra/cassandradatacenter_controller_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,254 @@
package controllers

import (
"context"
"fmt"
"math/rand"
"time"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"

cassdcapi "github.com/k8ssandra/cass-operator/apis/cassandra/v1beta1"
)

var (
testNamespaceName string
)

func clusterName() string {
// TODO Modify when multiple clusters are needed
return "cluster1"
}

func createDatacenter(ctx context.Context, dcName string, nodeCount, rackCount int) cassdcapi.CassandraDatacenter {
testDc := createStubCassDc(dcName, int32(nodeCount))

testDc.Spec.Racks = make([]cassdcapi.Rack, rackCount)
for i := 0; i < rackCount; i++ {
testDc.Spec.Racks[i] = cassdcapi.Rack{
Name: fmt.Sprintf("r%d", i),
}
}

Expect(k8sClient.Create(ctx, &testDc)).Should(Succeed())
return testDc
}

func deleteDatacenter(ctx context.Context, dcName string) {
dc := cassdcapi.CassandraDatacenter{}
dcKey := types.NamespacedName{Name: dcName, Namespace: testNamespaceName}
Expect(k8sClient.Get(ctx, dcKey, &dc)).To(Succeed())
Expect(k8sClient.Delete(ctx, &dc)).To(Succeed())
}

func waitForDatacenterProgress(ctx context.Context, dcName string, state cassdcapi.ProgressState) {
Eventually(func(g Gomega) {
dc := cassdcapi.CassandraDatacenter{}
key := types.NamespacedName{Namespace: testNamespaceName, Name: dcName}

g.Expect(k8sClient.Get(ctx, key, &dc)).To(Succeed())
g.Expect(dc.Status.CassandraOperatorProgress).To(Equal(state))
}).WithTimeout(20 * time.Second).WithPolling(200 * time.Millisecond).WithContext(ctx).Should(Succeed())
}

func waitForDatacenterReady(ctx context.Context, dcName string) {
waitForDatacenterProgress(ctx, dcName, cassdcapi.ProgressReady)
}

var _ = Describe("CassandraDatacenter tests", func() {
Describe("Creating a new datacenter", func() {
Context("Single datacenter", func() {
BeforeEach(func() {
testNamespaceName = fmt.Sprintf("test-cassdc-%d", rand.Int31())
testNamespace := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: testNamespaceName,
},
}
Expect(k8sClient.Create(context.Background(), testNamespace)).Should(Succeed())
})

AfterEach(func() {
testNamespaceDel := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: testNamespaceName,
},
}
Expect(k8sClient.Delete(context.TODO(), testNamespaceDel)).To(Succeed())
})
When("There is a single rack and a single node", func() {
It("should end up in a Ready state", func(ctx SpecContext) {
dcName := "dc1"

createDatacenter(ctx, dcName, 1, 1)
waitForDatacenterReady(ctx, dcName)

verifyStsCount(ctx, dcName, 1, 1)
verifyPodCount(ctx, dcName, 1)

deleteDatacenter(ctx, dcName)
verifyDatacenterDeleted(ctx, dcName)
})
It("should be able to scale up", func(ctx SpecContext) {
dcName := "dc11"

dc := createDatacenter(ctx, dcName, 1, 1)
waitForDatacenterReady(ctx, dcName)

verifyStsCount(ctx, dcName, 1, 1)
verifyPodCount(ctx, dcName, 1)

key := types.NamespacedName{Namespace: testNamespaceName, Name: dcName}
Expect(k8sClient.Get(ctx, key, &dc)).To(Succeed())

By("Updating the size to 3")
dc.Spec.Size = 3
Expect(k8sClient.Update(ctx, &dc)).To(Succeed())

Eventually(func(g Gomega) {
verifyStsCount(ctx, dcName, 1, 3)
verifyPodCount(ctx, dcName, 3)
})

waitForDatacenterReady(ctx, dcName)

deleteDatacenter(ctx, dcName)
verifyDatacenterDeleted(ctx, dcName)
})
})
When("There are multiple nodes in a single rack", func() {
It("should end up in a Ready state", func(ctx SpecContext) {
dcName := "dc2"

createDatacenter(ctx, dcName, 3, 1)

waitForDatacenterReady(ctx, dcName)

verifyStsCount(ctx, dcName, 1, 3)
verifyPodCount(ctx, dcName, 3)

deleteDatacenter(ctx, dcName)
verifyDatacenterDeleted(ctx, dcName)
})
})
When("There are multiple nodes in multiple racks", func() {
It("should end up in a Ready state", func(ctx SpecContext) {
dcName := "dc3"

createDatacenter(ctx, dcName, 9, 3)
waitForDatacenterReady(ctx, dcName)

verifyStsCount(ctx, dcName, 3, 3)
verifyPodCount(ctx, dcName, 9)

deleteDatacenter(ctx, dcName)
verifyDatacenterDeleted(ctx, dcName)
})
})
})
})
})

func verifyStsCount(ctx context.Context, dcName string, rackCount, podsPerSts int) {
Eventually(func(g Gomega) {
stsAll := &appsv1.StatefulSetList{}
g.Expect(k8sClient.List(ctx, stsAll, client.MatchingLabels{cassdcapi.DatacenterLabel: dcName}, client.InNamespace(testNamespaceName))).To(Succeed())
g.Expect(len(stsAll.Items)).To(Equal(rackCount))

for _, sts := range stsAll.Items {
rackName := sts.Labels[cassdcapi.RackLabel]

podList := &corev1.PodList{}
g.Expect(k8sClient.List(ctx, podList, client.MatchingLabels{cassdcapi.DatacenterLabel: dcName, cassdcapi.RackLabel: rackName}, client.InNamespace(testNamespaceName))).To(Succeed())
g.Expect(len(podList.Items)).To(Equal(podsPerSts))
}
}).Should(Succeed())
}

func verifyPodCount(ctx context.Context, dcName string, podCount int) {
Eventually(func(g Gomega) {
podList := &corev1.PodList{}
g.Expect(k8sClient.List(ctx, podList, client.MatchingLabels{cassdcapi.DatacenterLabel: dcName}, client.InNamespace(testNamespaceName))).To(Succeed())
g.Expect(len(podList.Items)).To(Equal(podCount))
}).Should(Succeed())
}

func verifyDatacenterDeleted(ctx context.Context, dcName string) {
Eventually(func(g Gomega) {
// Envtest has no garbage collection, so we can only compare that the ownerReferences are correct and they would be GCed (for items which we do not remove)

// Check that DC no longer exists
dc := &cassdcapi.CassandraDatacenter{}
dcKey := types.NamespacedName{Name: dcName, Namespace: testNamespaceName}
err := k8sClient.Get(ctx, dcKey, dc)
g.Expect(errors.IsNotFound(err)).To(BeTrue())

// Check that services would be autodeleted
svcList := &corev1.ServiceList{}
g.Expect(k8sClient.List(ctx, svcList, client.MatchingLabels{cassdcapi.DatacenterLabel: dcName}, client.InNamespace(testNamespaceName))).To(Succeed())
for _, svc := range svcList.Items {
g.Expect(len(svc.OwnerReferences)).To(Equal(1))
verifyOwnerReference(g, svc.OwnerReferences[0], dcName)
}

// Check that all StS would be autoremoved
stsAll := &appsv1.StatefulSetList{}
g.Expect(k8sClient.List(ctx, stsAll, client.MatchingLabels{cassdcapi.DatacenterLabel: dcName}, client.InNamespace(testNamespaceName))).To(Succeed())
for _, sts := range stsAll.Items {
g.Expect(len(sts.OwnerReferences)).To(Equal(1))
verifyOwnerReference(g, sts.OwnerReferences[0], dcName)
}

// Check that all PVCs were removed (we remove these)
pvcList := &corev1.PersistentVolumeClaimList{}
g.Expect(k8sClient.List(ctx, pvcList, client.MatchingLabels{cassdcapi.DatacenterLabel: dcName}, client.InNamespace(testNamespaceName))).To(Succeed())
for _, pvc := range pvcList.Items {
g.Expect(pvc.GetDeletionTimestamp()).ToNot(BeNil())
}

}).WithTimeout(10 * time.Second).WithPolling(100 * time.Millisecond).Should(Succeed())
}

func verifyOwnerReference(g Gomega, ownerRef metav1.OwnerReference, dcName string) {
g.Expect(ownerRef.Kind).To(Equal("CassandraDatacenter"))
g.Expect(ownerRef.Name).To(Equal(dcName))
g.Expect(ownerRef.APIVersion).To(Equal("cassandra.datastax.com/v1beta1"))
}

func createStubCassDc(dcName string, nodeCount int32) cassdcapi.CassandraDatacenter {
return cassdcapi.CassandraDatacenter{
ObjectMeta: metav1.ObjectMeta{
Name: dcName,
Namespace: testNamespaceName,
Annotations: map[string]string{},
},
Spec: cassdcapi.CassandraDatacenterSpec{
ManagementApiAuth: cassdcapi.ManagementApiAuthConfig{
Insecure: &cassdcapi.ManagementApiAuthInsecureConfig{},
},
ClusterName: clusterName(),
ServerType: "cassandra",
ServerVersion: "4.0.7",
Size: nodeCount,
StorageConfig: cassdcapi.StorageConfig{
CassandraDataVolumeClaimSpec: &corev1.PersistentVolumeClaimSpec{
StorageClassName: pointer.String("default"),
AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"},
Resources: corev1.ResourceRequirements{
Requests: map[corev1.ResourceName]resource.Quantity{"storage": resource.MustParse("1Gi")},
},
},
},
},
Status: cassdcapi.CassandraDatacenterStatus{},
}
}
Loading

0 comments on commit 9a5098c

Please sign in to comment.