Skip to content

Commit

Permalink
Enable golangci-lint in the unit testing phase, fix all the errors (#266
Browse files Browse the repository at this point in the history
)

* Enable golangci-lint in the unit testing phase, fix all the errors

* Increase golangci-lint timeout for GHA

* Check for WaitForOutputContains error in upgrade_operator test

* Fix a mistake in the fixture file (1 -> 3 nodes) and fix new missing error checks after rebase

* Add new Makefile target: lint that runs golangci-lint, also use golangci-lint-action v3
  • Loading branch information
burmanm authored Apr 22, 2022
1 parent de9f17f commit 484cd1d
Show file tree
Hide file tree
Showing 77 changed files with 649 additions and 514 deletions.
8 changes: 4 additions & 4 deletions .github/workflows/operatorBuildAndDeploy.yml
Original file line number Diff line number Diff line change
Expand Up @@ -39,13 +39,13 @@ jobs:
restore-keys: |
${{ runner.os }}-buildx-
- name: golangci-lint
uses: golangci/golangci-lint-action@v2
uses: golangci/golangci-lint-action@v3
with:
version: latest
# Temporary, do not fail this job even if we have issues (until we've fixed them)
args: --issues-exit-code=0 --timeout=10m
# GHA requires longer timeout
args: --timeout=10m
# Optional: show only new issues if it's a pull request. The default value is `false`.
# only-new-issues: true
only-new-issues: true
skip-go-installation: true
skip-pkg-cache: true
skip-build-cache: true
Expand Down
4 changes: 4 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,10 @@ fmt: ## Run go fmt against code.
vet: ## Run go vet against code.
go vet ./...

.PHONY: lint
lint: ## Run golangci-lint against code.
golangci-lint run ./...

test: manifests generate fmt vet envtest ## Run tests.
# Old unit tests first - these use mocked client / fakeclient
go test ./pkg/... -coverprofile cover-pkg.out
Expand Down
2 changes: 0 additions & 2 deletions apis/cassandra/v1beta1/webhook_suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ import (

//+kubebuilder:scaffold:imports
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/rest"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
Expand All @@ -42,7 +41,6 @@ import (
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.

var cfg *rest.Config
var k8sClient client.Client
var testEnv *envtest.Environment
var ctx context.Context
Expand Down
6 changes: 3 additions & 3 deletions controllers/cassandra/cassandradatacenter_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -123,9 +123,9 @@ func (r *CassandraDatacenterReconciler) Reconcile(ctx context.Context, request c
}

// TODO fold this into the quiet period
twentySecs := time.Second * 20
cooldownPeriod := time.Second * 20
lastNodeStart := rc.Datacenter.Status.LastServerNodeStarted
cooldownTime := time.Until(lastNodeStart.Add(twentySecs))
cooldownTime := time.Until(lastNodeStart.Add(cooldownPeriod))

if cooldownTime > 0 {
logger.Info("Ending reconciliation early because a server node was recently started")
Expand All @@ -135,7 +135,7 @@ func (r *CassandraDatacenterReconciler) Reconcile(ctx context.Context, request c

if rc.Datacenter.Status.QuietPeriod.After(time.Now()) {
logger.Info("Ending reconciliation early because the datacenter is in a quiet period")
cooldownTime = rc.Datacenter.Status.QuietPeriod.Sub(time.Now())
cooldownTime = time.Until(rc.Datacenter.Status.QuietPeriod.Time)
secs := time.Duration(1 + int(cooldownTime.Seconds()))
return ctrl.Result{RequeueAfter: secs * time.Second}, nil
}
Expand Down
2 changes: 0 additions & 2 deletions controllers/cassandra/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
Expand All @@ -38,7 +37,6 @@ import (
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.

var cfg *rest.Config
var k8sClient client.Client
var testEnv *envtest.Environment
var ctx context.Context
Expand Down
6 changes: 5 additions & 1 deletion controllers/control/cassandratask_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,11 @@ func (r *CassandraTaskReconciler) Reconcile(ctx context.Context, req ctrl.Reques
return res, err
}

r.cleanupJobAnnotations(ctx, &dc, taskId)
err = r.cleanupJobAnnotations(ctx, &dc, taskId)
if err != nil {
// Not the end of the world
logger.Error(err, "Failed to cleanup job annotations from pods")
}

cassTask.Status.Active = 0
cassTask.Status.CompletionTime = &timeNow
Expand Down
10 changes: 0 additions & 10 deletions controllers/control/cassandratask_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -154,16 +154,6 @@ func waitForTaskCompletion(taskKey types.NamespacedName) *api.CassandraTask {
return emptyTask
}

func verifyPodsHaveAnnotations(testNamespaceName, taskId string) {
podList := corev1.PodList{}
Expect(k8sClient.List(context.TODO(), &podList, client.InNamespace(testNamespaceName))).To(Succeed())

jobAnnotationKey := getJobAnnotationKey(taskId)
for _, pod := range podList.Items {
Expect(pod.GetAnnotations()).To(HaveKey(jobAnnotationKey))
}
}

var _ = Describe("Execute jobs against all pods", func() {
jobRunningRequeue = time.Duration(1 * time.Millisecond)
Context("Async jobs", func() {
Expand Down
2 changes: 0 additions & 2 deletions controllers/control/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
Expand All @@ -39,7 +38,6 @@ import (
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.

var cfg *rest.Config
var k8sClient client.Client
var testEnv *envtest.Environment
var ctx context.Context
Expand Down
6 changes: 3 additions & 3 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,9 @@ require (
github.com/pavel-v-chernykh/keystore-go v2.1.0+incompatible
github.com/pkg/errors v0.9.1
github.com/stretchr/testify v1.7.0
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
gopkg.in/yaml.v2 v2.4.0
k8s.io/api v0.23.4
k8s.io/apimachinery v0.23.4
Expand Down Expand Up @@ -58,9 +60,7 @@ require (
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.19.1 // indirect
golang.org/x/net v0.0.0-20211209124913-491a49abca63 // indirect
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8 // indirect
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
Expand Down
3 changes: 2 additions & 1 deletion go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -855,8 +855,9 @@ golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8 h1:M69LAlWZCshgp0QSzyDcSsSIejIEeuaCVpmwcKwyLMk=
golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
Expand Down
21 changes: 13 additions & 8 deletions pkg/httphelper/server_test_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,6 @@ var jobDetailsCompleted = `{"submit_time":"1638545895255","end_time":"1638545895

var jobDetailsFailed = `{"submit_time":"1638545895255","end_time":"1638545895255","id":"%s","type":"Cleanup","status":"ERROR"}`

var noJobDetails = `{}`

func mgmtApiListener() (net.Listener, error) {
mgmtApiListener, err := net.Listen("tcp", "127.0.0.1:8080")
if err != nil {
Expand Down Expand Up @@ -61,20 +59,24 @@ func FakeExecutorServerWithDetails(callDetails *CallDetails) (*httptest.Server,

if r.Method == http.MethodGet && r.RequestURI == "/api/v0/metadata/versions/features" {
w.WriteHeader(http.StatusOK)
w.Write([]byte(featuresReply))
_, err = w.Write([]byte(featuresReply))
} else if r.Method == http.MethodGet && r.URL.Path == "/api/v0/ops/executor/job" {
w.WriteHeader(http.StatusOK)
jobId := query.Get("job_id")
w.Write([]byte(fmt.Sprintf(jobDetailsCompleted, jobId)))
_, err = w.Write([]byte(fmt.Sprintf(jobDetailsCompleted, jobId)))
} else if r.Method == http.MethodPost && (r.URL.Path == "/api/v1/ops/keyspace/cleanup" || r.URL.Path == "/api/v1/ops/node/rebuild") {
w.WriteHeader(http.StatusOK)
// Write jobId
jobId++
w.Write([]byte(strconv.Itoa(jobId)))
_, err = w.Write([]byte(strconv.Itoa(jobId)))
} else {
w.WriteHeader(http.StatusNotFound)
}

if err != nil {
w.WriteHeader(http.StatusInternalServerError)
}

}))
}

Expand All @@ -91,19 +93,22 @@ func FakeExecutorServerWithDetailsFails(callDetails *CallDetails) (*httptest.Ser

if r.Method == http.MethodGet && r.RequestURI == "/api/v0/metadata/versions/features" {
w.WriteHeader(http.StatusOK)
w.Write([]byte(featuresReply))
_, err = w.Write([]byte(featuresReply))
} else if r.Method == http.MethodGet && r.URL.Path == "/api/v0/ops/executor/job" {
w.WriteHeader(http.StatusOK)
jobId := query.Get("job_id")
w.Write([]byte(fmt.Sprintf(jobDetailsFailed, jobId)))
_, err = w.Write([]byte(fmt.Sprintf(jobDetailsFailed, jobId)))
} else if r.Method == http.MethodPost && (r.URL.Path == "/api/v1/ops/keyspace/cleanup" || r.URL.Path == "/api/v1/ops/node/rebuild") {
w.WriteHeader(http.StatusOK)
// Write jobId
jobId++
w.Write([]byte(strconv.Itoa(jobId)))
_, err = w.Write([]byte(strconv.Itoa(jobId)))
} else {
w.WriteHeader(http.StatusNotFound)
}
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
}

}))
}
Expand Down
21 changes: 9 additions & 12 deletions pkg/psp/emm.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ type EMMTaintValue string

const (
EvacuateAllData EMMTaintValue = "drain"
PlannedDowntime = "planned-downtime"
PlannedDowntime EMMTaintValue = "planned-downtime"
)

type VolumeHealth string
Expand Down Expand Up @@ -351,13 +351,10 @@ func (impl *EMMServiceImpl) removeNextPodFromEvacuateDataNode() (bool, error) {
return false, err
}

for name, _ := range nodeNameSet {
for name := range nodeNameSet {
for _, pod := range impl.getPodsForNodeName(name) {
err := impl.RemovePod(pod)
if err != nil {
return false, err
}
return true, nil
return err == nil, err
}
}

Expand All @@ -366,7 +363,7 @@ func (impl *EMMServiceImpl) removeNextPodFromEvacuateDataNode() (bool, error) {

func (impl *EMMServiceImpl) removeAllPodsFromOnePlannedDowntimeNode() (bool, error) {
nodeNameSet, err := impl.getPlannedDownTimeNodeNameSet()
for nodeName, _ := range nodeNameSet {
for nodeName := range nodeNameSet {
pods := impl.getPodsForNodeName(nodeName)
if len(pods) > 0 {
for _, pod := range pods {
Expand Down Expand Up @@ -421,7 +418,7 @@ func (impl *EMMServiceImpl) getRacksWithNotReadyPodsBootstrapped() []string {
pods := impl.GetNotReadyPodsBootstrappedInDC()
rackNameSet := getPodsRackNameSet(pods)
rackNames := []string{}
for rackName, _ := range rackNameSet {
for rackName := range rackNameSet {
rackNames = append(rackNames, rackName)
}
return rackNames
Expand Down Expand Up @@ -564,7 +561,7 @@ func checkNodeEMM(provider EMMService) result.ReconcileResult {
// as we are unlikely to be able to carry them out successfully.
if provider.IsStopped() {
didUpdate := false
for nodeName, _ := range evacuateDataNodeNameSet {
for nodeName := range evacuateDataNodeNameSet {
podsUpdated, err := provider.failEMM(nodeName, GenericFailure)
if err != nil {
return result.Error(err)
Expand Down Expand Up @@ -595,7 +592,7 @@ func checkNodeEMM(provider EMMService) result.ReconcileResult {
if len(racksWithDownPods) > 1 {
allTaintedNameSet := utils.UnionStringSet(plannedDownNodeNameSet, evacuateDataNodeNameSet)
didUpdate := false
for nodeName, _ := range allTaintedNameSet {
for nodeName := range allTaintedNameSet {
didUpdatePods, err := provider.failEMM(nodeName, TooManyExistingFailures)
if err != nil {
return result.Error(err)
Expand Down Expand Up @@ -629,7 +626,7 @@ func checkNodeEMM(provider EMMService) result.ReconcileResult {

if len(nodeNameSetForNoPodsInRack) > 0 {
didUpdate := false
for nodeName, _ := range nodeNameSetForNoPodsInRack {
for nodeName := range nodeNameSetForNoPodsInRack {
podsUpdated, err := provider.failEMM(nodeName, TooManyExistingFailures)
if err != nil {
return result.Error(err)
Expand Down Expand Up @@ -781,7 +778,7 @@ func checkPVCHealth(provider EMMService) result.ReconcileResult {
return result.Continue()
}

for podName, _ := range podNameSetWithInaccessible {
for podName := range podNameSetWithInaccessible {
err := provider.startNodeReplace(podName)
if err != nil {
logger.Error(err, "Failed to start node replacement for pod with inaccessible PVC", "pod", podName)
Expand Down
5 changes: 2 additions & 3 deletions pkg/psp/emm_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -514,7 +514,7 @@ func evacuateDataNode(name string) *corev1.Node {
node := &corev1.Node{}
node.Name = name
node.Spec.Taints = []corev1.Taint{
corev1.Taint{
{
Key: "node.vmware.com/drain",
Value: "drain",
Effect: corev1.TaintEffectNoSchedule,
Expand All @@ -525,9 +525,8 @@ func evacuateDataNode(name string) *corev1.Node {

func Test_removeAllNotReadyPodsOnEMMNodes(t *testing.T) {
var service *EMMServiceImpl
var testObj *MockEMMSPI

testObj = &MockEMMSPI{}
testObj := &MockEMMSPI{}
service = &EMMServiceImpl{
EMMSPI: testObj,
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/psp/labels_annotations.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ func GetExtensionID() string {
}

func AddStatefulSetChanges(dc *api.CassandraDatacenter, statefulSet *appsv1.StatefulSet) *appsv1.StatefulSet {
for i, _ := range statefulSet.Spec.VolumeClaimTemplates {
for i := range statefulSet.Spec.VolumeClaimTemplates {
cvt := &statefulSet.Spec.VolumeClaimTemplates[i]
addLabels(dc.Name, cvt)
}
Expand Down
14 changes: 9 additions & 5 deletions pkg/psp/psp_status.go
Original file line number Diff line number Diff line change
Expand Up @@ -144,15 +144,15 @@ func instanceHealthSchema() map[string]interface{} {
return map[string]interface{}{
"name": "instanceHealth",
"fields": []map[string]interface{}{
map[string]interface{}{
{
"name": "instance",
"type": "string",
},
map[string]interface{}{
{
"name": "namespace",
"type": "string",
},
map[string]interface{}{
{
"name": "health",
"type": "string",
},
Expand Down Expand Up @@ -210,7 +210,7 @@ func updateHealth(health *Health, dc api.CassandraDatacenter) {
health.Spec = buildSchema()
instanceHealths := health.Status.InstanceHealth
index := -1
for i, _ := range instanceHealths {
for i := range instanceHealths {
if instanceHealths[i].Instance == dc.Name && instanceHealths[i].Namespace == dc.Namespace {
index = i
break
Expand Down Expand Up @@ -239,7 +239,11 @@ func loadHealthFromConfigMap(configMap *corev1.ConfigMap) (*Health, error) {

func createHealthCheckConfigMap(health *Health, healthCheckNamespacedName types.NamespacedName) *corev1.ConfigMap {
configMap := newConfigMap(healthCheckNamespacedName)
saveHealthCheckToConfigMap(health, configMap)
err := saveHealthCheckToConfigMap(health, configMap)
if err != nil {
// No point continuing, we need to redo this
return configMap
}
utils.AddHashAnnotation(configMap)
configMap.SetLabels(
utils.MergeMap(
Expand Down
2 changes: 1 addition & 1 deletion pkg/reconciliation/check_nodes.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ func (rc *ReconciliationContext) getPodsPVCs(pods []*corev1.Pod) ([]*corev1.Pers

func (rc *ReconciliationContext) getNodesForNameSet(nodeNameSet utils.StringSet) ([]*corev1.Node, error) {
nodes := []*corev1.Node{}
for nodeName, _ := range nodeNameSet {
for nodeName := range nodeNameSet {
if nodeName != "" {
node, err := rc.getNode(nodeName)
if err != nil {
Expand Down
Loading

0 comments on commit 484cd1d

Please sign in to comment.