diff --git a/operator/pkg/reconciliation/construct_statefulset.go b/operator/pkg/reconciliation/construct_statefulset.go index aecacc74..e83c6530 100644 --- a/operator/pkg/reconciliation/construct_statefulset.go +++ b/operator/pkg/reconciliation/construct_statefulset.go @@ -7,6 +7,7 @@ package reconciliation import ( "fmt" + api "github.com/k8ssandra/cass-operator/operator/pkg/apis/cassandra/v1beta1" "github.com/k8ssandra/cass-operator/operator/pkg/httphelper" "github.com/k8ssandra/cass-operator/operator/pkg/images" @@ -49,25 +50,6 @@ func newNamespacedNameForStatefulSet( } } -// We have to account for the fact that they might use the old managed-by label value -// (oplabels.ManagedByLabelDefunctValue) for CassandraDatacenters originally -// created in version 1.1.0 or earlier. -func newStatefulSetForCassandraDatacenterWithDefunctPvcManagedBy( - rackName string, - dc *api.CassandraDatacenter, - replicaCount int) (*appsv1.StatefulSet, error) { - - return newStatefulSetForCassandraDatacenterHelper(rackName, dc, replicaCount, true) -} - -func newStatefulSetForCassandraDatacenter( - rackName string, - dc *api.CassandraDatacenter, - replicaCount int) (*appsv1.StatefulSet, error) { - - return newStatefulSetForCassandraDatacenterHelper(rackName, dc, replicaCount, false) -} - // Check if we need to define a SecurityContext. // If the user defines the DockerImageRunsAsCassandra field, we trust that. // Otherwise if ServerType is "dse", the answer is true. @@ -93,12 +75,12 @@ func rackNodeAffinitylabels(dc *api.CassandraDatacenter, rackName string) (map[s if rack.Zone != "" { if _, found := nodeAffinityLabels[zoneLabel]; found { log.Error(nil, - "Deprecated parameter Zone is used and also defined in NodeAffinityLabels. " + - "You should only define it in NodeAffinityLabels") + "Deprecated parameter Zone is used and also defined in NodeAffinityLabels. "+ + "You should only define it in NodeAffinityLabels") } nodeAffinityLabels = utils.MergeMap( emptyMapIfNil(nodeAffinityLabels), map[string]string{zoneLabel: rack.Zone}, - ) + ) } break } @@ -107,7 +89,11 @@ func rackNodeAffinitylabels(dc *api.CassandraDatacenter, rackName string) (map[s } // Create a statefulset object for the Datacenter. -func newStatefulSetForCassandraDatacenterHelper( +// We have to account for the fact that they might use the old managed-by label value +// (oplabels.ManagedByLabelDefunctValue) for CassandraDatacenters originally +// created in version 1.1.0 or earlier. Set useDefunctManagedByForPvc to true to use old ones. +func newStatefulSetForCassandraDatacenter( + sts *appsv1.StatefulSet, rackName string, dc *api.CassandraDatacenter, replicaCount int, @@ -195,6 +181,10 @@ func newStatefulSetForCassandraDatacenterHelper( } result.Annotations = map[string]string{} + if sts != nil && sts.Spec.ServiceName != result.Spec.ServiceName { + result.Spec.ServiceName = sts.Spec.ServiceName + } + if utils.IsPSPEnabled() { result = psp.AddStatefulSetChanges(dc, result) } diff --git a/operator/pkg/reconciliation/construct_statefulset_test.go b/operator/pkg/reconciliation/construct_statefulset_test.go index 3104475b..1a42f0e7 100644 --- a/operator/pkg/reconciliation/construct_statefulset_test.go +++ b/operator/pkg/reconciliation/construct_statefulset_test.go @@ -44,7 +44,7 @@ func Test_newStatefulSetForCassandraDatacenter(t *testing.T) { } for _, tt := range tests { t.Log(tt.name) - got, err := newStatefulSetForCassandraDatacenter(tt.args.rackName, tt.args.dc, tt.args.replicaCount) + got, err := newStatefulSetForCassandraDatacenter(nil, tt.args.rackName, tt.args.dc, tt.args.replicaCount, false) assert.NoError(t, err, "newStatefulSetForCassandraDatacenter should not have errored") assert.NotNil(t, got, "newStatefulSetForCassandraDatacenter should not have returned a nil statefulset") assert.Equal(t, map[string]string{"dedicated": "cassandra"}, got.Spec.Template.Spec.NodeSelector) @@ -54,15 +54,15 @@ func Test_newStatefulSetForCassandraDatacenter(t *testing.T) { func Test_newStatefulSetForCassandraDatacenter_rackNodeAffinitylabels(t *testing.T) { dc := &api.CassandraDatacenter{ Spec: api.CassandraDatacenterSpec{ - ClusterName: "bob", - ServerType: "cassandra", - ServerVersion: "3.11.7", - PodTemplateSpec: &corev1.PodTemplateSpec{}, + ClusterName: "bob", + ServerType: "cassandra", + ServerVersion: "3.11.7", + PodTemplateSpec: &corev1.PodTemplateSpec{}, NodeAffinityLabels: map[string]string{"dclabel1": "dcvalue1", "dclabel2": "dcvalue2"}, Racks: []api.Rack{ { - Name: "rack1", - Zone: "z1", + Name: "rack1", + Zone: "z1", NodeAffinityLabels: map[string]string{"r1label1": "r1value1", "r1label2": "r1value2"}, }, }, @@ -76,7 +76,7 @@ func Test_newStatefulSetForCassandraDatacenter_rackNodeAffinitylabels(t *testing assert.NoError(t, nodeAffinityLabelsConfigurationError, "should not have gotten error when getting NodeAffinitylabels of rack rack1") - expected := map[string]string { + expected := map[string]string{ "dclabel1": "dcvalue1", "dclabel2": "dcvalue2", "r1label1": "r1value1", @@ -108,16 +108,16 @@ func Test_newStatefulSetForCassandraDatacenterWithAdditionalVolumes(t *testing.T replicaCount: 1, dc: &api.CassandraDatacenter{ Spec: api.CassandraDatacenterSpec{ - ClusterName: "c1", + ClusterName: "c1", PodTemplateSpec: &corev1.PodTemplateSpec{ - Spec: corev1.PodSpec { - InitContainers: []corev1.Container { + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ { - Name: "initContainer1", + Name: "initContainer1", Image: "initImage1", VolumeMounts: []corev1.VolumeMount{ { - Name: "server-logs", + Name: "server-logs", MountPath: "/var/log/cassandra", }, }, @@ -130,16 +130,16 @@ func Test_newStatefulSetForCassandraDatacenterWithAdditionalVolumes(t *testing.T StorageClassName: &customCassandraDataStorageClass, }, AdditionalVolumes: api.AdditionalVolumesSlice{ - api.AdditionalVolumes { + api.AdditionalVolumes{ MountPath: "/var/log/cassandra", - Name: "server-logs", + Name: "server-logs", PVCSpec: corev1.PersistentVolumeClaimSpec{ StorageClassName: &customCassandraServerLogsStorageClass, }, }, api.AdditionalVolumes{ MountPath: "/var/lib/cassandra/commitlog", - Name: "cassandra-commitlogs", + Name: "cassandra-commitlogs", PVCSpec: corev1.PersistentVolumeClaimSpec{ StorageClassName: &customCassandraCommitLogsStorageClass, }, @@ -155,7 +155,7 @@ func Test_newStatefulSetForCassandraDatacenterWithAdditionalVolumes(t *testing.T } for _, tt := range tests { t.Log(tt.name) - got, err := newStatefulSetForCassandraDatacenter(tt.args.rackName, tt.args.dc, tt.args.replicaCount) + got, err := newStatefulSetForCassandraDatacenter(nil, tt.args.rackName, tt.args.dc, tt.args.replicaCount, false) assert.NoError(t, err, "newStatefulSetForCassandraDatacenter should not have errored") assert.NotNil(t, got, "newStatefulSetForCassandraDatacenter should not have returned a nil statefulset") @@ -214,20 +214,20 @@ func Test_newStatefulSetForCassandraPodSecurityContext(t *testing.T) { FSGroup: int64Ptr(999), } - tests := []struct{ - name string - dc *api.CassandraDatacenter + tests := []struct { + name string + dc *api.CassandraDatacenter expected *corev1.PodSecurityContext - } { + }{ { name: "run cassandra as non-root user", dc: &api.CassandraDatacenter{ Spec: api.CassandraDatacenterSpec{ - ClusterName: clusterName, - ServerType: "cassandra", - ServerVersion: "3.11.10", + ClusterName: clusterName, + ServerType: "cassandra", + ServerVersion: "3.11.10", DockerImageRunsAsCassandra: boolPtr(true), - StorageConfig: storageConfig, + StorageConfig: storageConfig, }, }, expected: defaultSecurityContext, @@ -236,11 +236,11 @@ func Test_newStatefulSetForCassandraPodSecurityContext(t *testing.T) { name: "run cassandra as root user", dc: &api.CassandraDatacenter{ Spec: api.CassandraDatacenterSpec{ - ClusterName: clusterName, - ServerType: "cassandra", - ServerVersion: "3.11.7", + ClusterName: clusterName, + ServerType: "cassandra", + ServerVersion: "3.11.7", DockerImageRunsAsCassandra: boolPtr(false), - StorageConfig: storageConfig, + StorageConfig: storageConfig, }, }, expected: nil, @@ -250,8 +250,8 @@ func Test_newStatefulSetForCassandraPodSecurityContext(t *testing.T) { name: "run dse as non-root user", dc: &api.CassandraDatacenter{ Spec: api.CassandraDatacenterSpec{ - ClusterName: clusterName, - ServerType: "dse", + ClusterName: clusterName, + ServerType: "dse", ServerVersion: "6.8.7", StorageConfig: storageConfig, }, @@ -262,58 +262,58 @@ func Test_newStatefulSetForCassandraPodSecurityContext(t *testing.T) { name: "run cassandra with pod security context override", dc: &api.CassandraDatacenter{ Spec: api.CassandraDatacenterSpec{ - ClusterName: clusterName, - ServerType: "cassandra", + ClusterName: clusterName, + ServerType: "cassandra", ServerVersion: "3.11.10", StorageConfig: storageConfig, PodTemplateSpec: &corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ SecurityContext: &corev1.PodSecurityContext{ - RunAsUser: int64Ptr(12345), + RunAsUser: int64Ptr(12345), RunAsGroup: int64Ptr(54321), - FSGroup: int64Ptr(11111), + FSGroup: int64Ptr(11111), }, }, }, }, }, expected: &corev1.PodSecurityContext{ - RunAsUser: int64Ptr(12345), + RunAsUser: int64Ptr(12345), RunAsGroup: int64Ptr(54321), - FSGroup: int64Ptr(11111), + FSGroup: int64Ptr(11111), }, }, { name: "run dse with pod security context override", dc: &api.CassandraDatacenter{ Spec: api.CassandraDatacenterSpec{ - ClusterName: clusterName, - ServerType: "dse", + ClusterName: clusterName, + ServerType: "dse", ServerVersion: "6.8.7", StorageConfig: storageConfig, PodTemplateSpec: &corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ SecurityContext: &corev1.PodSecurityContext{ - RunAsUser: int64Ptr(12345), + RunAsUser: int64Ptr(12345), RunAsGroup: int64Ptr(54321), - FSGroup: int64Ptr(11111), + FSGroup: int64Ptr(11111), }, }, }, }, }, expected: &corev1.PodSecurityContext{ - RunAsUser: int64Ptr(12345), + RunAsUser: int64Ptr(12345), RunAsGroup: int64Ptr(54321), - FSGroup: int64Ptr(11111), + FSGroup: int64Ptr(11111), }, }, { name: "run cassandra with empty pod security context override", dc: &api.CassandraDatacenter{ Spec: api.CassandraDatacenterSpec{ - ClusterName: clusterName, - ServerType: "cassandra", + ClusterName: clusterName, + ServerType: "cassandra", ServerVersion: "3.11.10", StorageConfig: storageConfig, PodTemplateSpec: &corev1.PodTemplateSpec{ @@ -329,8 +329,8 @@ func Test_newStatefulSetForCassandraPodSecurityContext(t *testing.T) { name: "run dse with empty pod security context override", dc: &api.CassandraDatacenter{ Spec: api.CassandraDatacenterSpec{ - ClusterName: clusterName, - ServerType: "dse", + ClusterName: clusterName, + ServerType: "dse", ServerVersion: "6.8.7", StorageConfig: storageConfig, PodTemplateSpec: &corev1.PodTemplateSpec{ @@ -345,7 +345,7 @@ func Test_newStatefulSetForCassandraPodSecurityContext(t *testing.T) { } for _, tt := range tests { t.Log(tt.name) - statefulSet, err := newStatefulSetForCassandraDatacenter(rack, tt.dc, replicas) + statefulSet, err := newStatefulSetForCassandraDatacenter(nil, rack, tt.dc, replicas, false) assert.NoError(t, err, fmt.Sprintf("%s: failed to create new statefulset", tt.name)) assert.NotNil(t, statefulSet, fmt.Sprintf("%s: statefulset is nil", tt.name)) diff --git a/operator/pkg/reconciliation/reconcile_racks.go b/operator/pkg/reconciliation/reconcile_racks.go index 82d6fead..b6409ad6 100644 --- a/operator/pkg/reconciliation/reconcile_racks.go +++ b/operator/pkg/reconciliation/reconcile_racks.go @@ -174,13 +174,7 @@ func (rc *ReconciliationContext) desiredStatefulSetForExistingStatefulSet(sts *a // StatefulSet. Consequently, we must preserve the old labels in this case. usesDefunct := usesDefunctPvcManagedByLabel(sts) - if usesDefunct { - desiredSts, err = newStatefulSetForCassandraDatacenterWithDefunctPvcManagedBy(rackName, dc, replicas) - } else { - desiredSts, err = newStatefulSetForCassandraDatacenter(rackName, dc, replicas) - } - - return + return newStatefulSetForCassandraDatacenter(sts, rackName, dc, replicas, usesDefunct) } func (rc *ReconciliationContext) CheckRackPodTemplate() result.ReconcileResult { @@ -342,7 +336,7 @@ func (rc *ReconciliationContext) CheckRackForceUpgrade() result.ReconcileResult // have to use zero here, because each statefulset is created with no replicas // in GetStatefulSetForRack() - desiredSts, err := newStatefulSetForCassandraDatacenter(rackName, dc, 0) + desiredSts, err := newStatefulSetForCassandraDatacenter(statefulSet, rackName, dc, 0, false) if err != nil { logger.Error(err, "error calling newStatefulSetForCassandraDatacenter") return result.Error(err) @@ -648,7 +642,7 @@ func hasPodPotentiallyBootstrapped(pod *corev1.Pod, nodeStatuses api.CassandraSt // In effect, we want to know if 'nodetool status' would indicate the relevant cassandra node // is part of the cluster - // Case 1: If we have a host ID for the pod, then we know it must be a member of the cluster + // Case 1: If we have a host ID for the pod, then we know it must be a member of the cluster // (even if the pod does not exist) nodeStatus, ok := nodeStatuses[pod.Name] if ok { @@ -663,7 +657,7 @@ func hasPodPotentiallyBootstrapped(pod *corev1.Pod, nodeStatuses api.CassandraSt state, ok := pod.Labels[api.CassNodeState] if ok && state != stateReadyToStart { return true - } + } } return false @@ -747,7 +741,7 @@ func allPodsBelongToSameNodeOrHaveNoNode(pods []*corev1.Pod) (string, bool) { } } - return nodeName, true + return nodeName, true } // CheckRackScale loops over each statefulset and makes sure that it has the right @@ -1370,9 +1364,11 @@ func (rc *ReconciliationContext) GetStatefulSetForRack( } desiredStatefulSet, err := newStatefulSetForCassandraDatacenter( + currentStatefulSet, nextRack.RackName, rc.Datacenter, - 0) + 0, + false) if err != nil { return nil, false, err } @@ -2322,7 +2318,6 @@ func (rc *ReconciliationContext) ReconcileAllRacks() (reconcile.Result, error) { return recResult.Output() } - if recResult := rc.CheckRollingRestart(); recResult.Completed() { return recResult.Output() } diff --git a/operator/pkg/reconciliation/reconcile_racks_test.go b/operator/pkg/reconciliation/reconcile_racks_test.go index 8c51a0e9..e4b42a53 100644 --- a/operator/pkg/reconciliation/reconcile_racks_test.go +++ b/operator/pkg/reconciliation/reconcile_racks_test.go @@ -152,9 +152,11 @@ func TestReconcileRacks_ReconcilePods(t *testing.T) { ) desiredStatefulSet, err := newStatefulSetForCassandraDatacenter( + nil, "default", rc.Datacenter, - 2) + 2, + false) assert.NoErrorf(t, err, "error occurred creating statefulset") desiredStatefulSet.Spec.Replicas = &one @@ -320,9 +322,11 @@ func TestReconcilePods(t *testing.T) { Once() statefulSet, err := newStatefulSetForCassandraDatacenter( + nil, "default", rc.Datacenter, - 2) + 2, + false) assert.NoErrorf(t, err, "error occurred creating statefulset") statefulSet.Status.Replicas = int32(1) @@ -337,9 +341,11 @@ func TestReconcilePods_WithVolumes(t *testing.T) { defer cleanupMockScr() statefulSet, err := newStatefulSetForCassandraDatacenter( + nil, "default", rc.Datacenter, - 2) + 2, + false) assert.NoErrorf(t, err, "error occurred creating statefulset") statefulSet.Status.Replicas = int32(1) @@ -394,9 +400,11 @@ func TestReconcileNextRack(t *testing.T) { defer cleanupMockScr() statefulSet, err := newStatefulSetForCassandraDatacenter( + nil, "default", rc.Datacenter, - 2) + 2, + false) assert.NoErrorf(t, err, "error occurred creating statefulset") err = rc.ReconcileNextRack(statefulSet) @@ -417,9 +425,11 @@ func TestReconcileNextRack_CreateError(t *testing.T) { defer cleanupMockScr() statefulSet, err := newStatefulSetForCassandraDatacenter( + nil, "default", rc.Datacenter, - 2) + 2, + false) assert.NoErrorf(t, err, "error occurred creating statefulset") mockClient := &mocks.Client{} @@ -493,9 +503,11 @@ func TestReconcileRacks(t *testing.T) { defer cleanupMockScr() desiredStatefulSet, err := newStatefulSetForCassandraDatacenter( + nil, "default", rc.Datacenter, - 2) + 2, + false) assert.NoErrorf(t, err, "error occurred creating statefulset") trackObjects := []runtime.Object{ @@ -565,9 +577,11 @@ func TestReconcileRacks_WaitingForReplicas(t *testing.T) { defer cleanupMockScr() desiredStatefulSet, err := newStatefulSetForCassandraDatacenter( + nil, "default", rc.Datacenter, - 2) + 2, + false) assert.NoErrorf(t, err, "error occurred creating statefulset") trackObjects := []runtime.Object{ @@ -606,9 +620,11 @@ func TestReconcileRacks_NeedMoreReplicas(t *testing.T) { defer cleanupMockScr() preExistingStatefulSet, err := newStatefulSetForCassandraDatacenter( + nil, "default", rc.Datacenter, - 2) + 2, + false) assert.NoErrorf(t, err, "error occurred creating statefulset") trackObjects := []runtime.Object{ @@ -640,9 +656,11 @@ func TestReconcileRacks_DoesntScaleDown(t *testing.T) { defer cleanupMockScr() preExistingStatefulSet, err := newStatefulSetForCassandraDatacenter( + nil, "default", rc.Datacenter, - 2) + 2, + false) assert.NoErrorf(t, err, "error occurred creating statefulset") trackObjects := []runtime.Object{ @@ -680,9 +698,11 @@ func TestReconcileRacks_NeedToPark(t *testing.T) { defer cleanupMockScr() preExistingStatefulSet, err := newStatefulSetForCassandraDatacenter( + nil, "default", rc.Datacenter, - 3) + 3, + false) assert.NoErrorf(t, err, "error occurred creating statefulset") trackObjects := []runtime.Object{ @@ -724,9 +744,11 @@ func TestReconcileRacks_AlreadyReconciled(t *testing.T) { defer cleanupMockScr() desiredStatefulSet, err := newStatefulSetForCassandraDatacenter( + nil, "default", rc.Datacenter, - 2) + 2, + false) assert.NoErrorf(t, err, "error occurred creating statefulset") desiredStatefulSet.Status.ReadyReplicas = 2 @@ -758,6 +780,33 @@ func TestReconcileRacks_AlreadyReconciled(t *testing.T) { assert.Equal(t, reconcile.Result{}, result, "Should not requeue request") } +func TestReconcileStatefulSet_ImmutableSpec(t *testing.T) { + assert := assert.New(t) + rc, _, cleanupMockScr := setupTest() + defer cleanupMockScr() + + origStatefulSet, err := newStatefulSetForCassandraDatacenter( + nil, + "rack0", + rc.Datacenter, + 2, + false) + assert.NoErrorf(err, "error occurred creating statefulset") + + assert.NotEqual("immutable-service", origStatefulSet.Spec.ServiceName) + origStatefulSet.Spec.ServiceName = "immutable-service" + + modifiedStatefulSet, err := newStatefulSetForCassandraDatacenter( + origStatefulSet, + "rack0", + rc.Datacenter, + 2, + false) + assert.NoErrorf(err, "error occurred creating statefulset") + + assert.Equal("immutable-service", modifiedStatefulSet.Spec.ServiceName) +} + func TestReconcileRacks_FirstRackAlreadyReconciled(t *testing.T) { t.Skip("FIXME - Skipping this test") @@ -765,17 +814,21 @@ func TestReconcileRacks_FirstRackAlreadyReconciled(t *testing.T) { defer cleanupMockScr() desiredStatefulSet, err := newStatefulSetForCassandraDatacenter( + nil, "rack0", rc.Datacenter, - 2) + 2, + false) assert.NoErrorf(t, err, "error occurred creating statefulset") desiredStatefulSet.Status.ReadyReplicas = 2 secondDesiredStatefulSet, err := newStatefulSetForCassandraDatacenter( + nil, "rack1", rc.Datacenter, - 1) + 1, + false) assert.NoErrorf(t, err, "error occurred creating statefulset") secondDesiredStatefulSet.Status.ReadyReplicas = 1 @@ -874,9 +927,11 @@ func TestReconcileRacks_UpdateConfig(t *testing.T) { defer cleanupMockScr() desiredStatefulSet, err := newStatefulSetForCassandraDatacenter( + nil, "rack0", rc.Datacenter, - 2) + 2, + false) assert.NoErrorf(t, err, "error occurred creating statefulset") desiredStatefulSet.Status.ReadyReplicas = 2