Skip to content

Commit

Permalink
Upgrade golangci-lint to v1.59.1 and faillint to v1.13.0 (#533)
Browse files Browse the repository at this point in the history
Signed-off-by: Arve Knudsen <[email protected]>
  • Loading branch information
aknuds1 authored Jun 14, 2024
1 parent 7e83c3c commit 4beef46
Show file tree
Hide file tree
Showing 38 changed files with 141 additions and 137 deletions.
4 changes: 2 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -79,10 +79,10 @@ check-protos: clean-protos protos ## Re-generates protos and git diffs them
GOPATH=$(CURDIR)/.tools go install github.com/client9/misspell/cmd/[email protected]

.tools/bin/faillint: .tools
GOPATH=$(CURDIR)/.tools go install github.com/fatih/faillint@v1.11.0
GOPATH=$(CURDIR)/.tools go install github.com/fatih/faillint@v1.13.0

.tools/bin/golangci-lint: .tools
GOPATH=$(CURDIR)/.tools go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.53.2
GOPATH=$(CURDIR)/.tools go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.59.1

.tools/bin/protoc: .tools
ifeq ("$(wildcard .tools/protoc/bin/protoc)","")
Expand Down
14 changes: 7 additions & 7 deletions concurrency/limited_concurrency_singleflight_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ func TestLimitedConcurrencySingleFlight_ForEachNotInFlight_ConcurrencyLimit(t *t
require.NoError(t, sf.ForEachNotInFlight(ctx, tokens, f))
}

busyWorker := func(ctx context.Context, s string) error {
busyWorker := func(context.Context, string) error {
workersToStart.Done()
<-workersWait
return nil
Expand All @@ -49,7 +49,7 @@ func TestLimitedConcurrencySingleFlight_ForEachNotInFlight_ConcurrencyLimit(t *t
workersToStart.Wait()

extraWorkerInvoked := make(chan struct{})
go forEachNotInFlight(func(ctx context.Context, s string) error {
go forEachNotInFlight(func(context.Context, string) error {
close(extraWorkerInvoked)
return nil
}, "10")
Expand Down Expand Up @@ -77,7 +77,7 @@ func TestLimitedConcurrencySingleFlight_ForEachNotInFlight_ReturnsWhenAllTokensA

workersToStart.Add(1)
go func() {
require.NoError(t, sf.ForEachNotInFlight(ctx, []string{token}, func(ctx context.Context, s string) error {
require.NoError(t, sf.ForEachNotInFlight(ctx, []string{token}, func(context.Context, string) error {
workersToStart.Done()
<-workersWait
return nil
Expand All @@ -87,7 +87,7 @@ func TestLimitedConcurrencySingleFlight_ForEachNotInFlight_ReturnsWhenAllTokensA
workersToStart.Wait()

duplicatedTokenInvoked := false
require.NoError(t, sf.ForEachNotInFlight(ctx, []string{token}, func(ctx context.Context, s string) error {
require.NoError(t, sf.ForEachNotInFlight(ctx, []string{token}, func(context.Context, string) error {
duplicatedTokenInvoked = true
return nil
}))
Expand All @@ -114,7 +114,7 @@ func TestLimitedConcurrencySingleFlight_ForEachNotInFlight_CallsOnlyNotInFlightT

workersToStart.Add(1)
go func() {
require.NoError(t, sf.ForEachNotInFlight(ctx, []string{tokenA}, func(ctx context.Context, s string) error {
require.NoError(t, sf.ForEachNotInFlight(ctx, []string{tokenA}, func(context.Context, string) error {
workersToStart.Done()
<-workersWait
return nil
Expand All @@ -123,7 +123,7 @@ func TestLimitedConcurrencySingleFlight_ForEachNotInFlight_CallsOnlyNotInFlightT

workersToStart.Wait()
var invocations atomic.Int64
assert.NoError(t, sf.ForEachNotInFlight(ctx, []string{tokenA, tokenB}, func(ctx context.Context, s string) error {
assert.NoError(t, sf.ForEachNotInFlight(ctx, []string{tokenA, tokenB}, func(_ context.Context, s string) error {
assert.Equal(t, tokenB, s)
invocations.Inc()
return nil
Expand All @@ -136,7 +136,7 @@ func TestLimitedConcurrencySingleFlight_ForEachNotInFlight_ReturnsWhenTokensAreE
t.Parallel()

var invocations atomic.Int64
assert.NoError(t, NewLimitedConcurrencySingleFlight(10).ForEachNotInFlight(context.Background(), []string{}, func(ctx context.Context, s string) error {
assert.NoError(t, NewLimitedConcurrencySingleFlight(10).ForEachNotInFlight(context.Background(), []string{}, func(context.Context, string) error {
invocations.Inc()
return nil
}))
Expand Down
28 changes: 14 additions & 14 deletions concurrency/runner_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ func TestForEachUser(t *testing.T) {

input := []string{"a", "b", "c"}

err := ForEachUser(context.Background(), input, 2, func(ctx context.Context, user string) error {
err := ForEachUser(context.Background(), input, 2, func(_ context.Context, user string) error {
processedMx.Lock()
defer processedMx.Unlock()
processed = append(processed, user)
Expand All @@ -40,7 +40,7 @@ func TestForEachUser_ShouldContinueOnErrorButReturnIt(t *testing.T) {

input := []string{"a", "b", "c"}

err := ForEachUser(context.Background(), input, 2, func(ctx context.Context, user string) error {
err := ForEachUser(context.Background(), input, 2, func(ctx context.Context, _ string) error {
if processed.CompareAndSwap(0, 1) {
return errors.New("the first request is failing")
}
Expand All @@ -63,7 +63,7 @@ func TestForEachUser_ShouldContinueOnErrorButReturnIt(t *testing.T) {
}

func TestForEachUser_ShouldReturnImmediatelyOnNoUsersProvided(t *testing.T) {
require.NoError(t, ForEachUser(context.Background(), nil, 2, func(ctx context.Context, user string) error {
require.NoError(t, ForEachUser(context.Background(), nil, 2, func(context.Context, string) error {
return nil
}))
}
Expand All @@ -72,7 +72,7 @@ func TestForEachJob(t *testing.T) {
jobs := []string{"a", "b", "c"}
processed := make([]string, len(jobs))

err := ForEachJob(context.Background(), len(jobs), 2, func(ctx context.Context, idx int) error {
err := ForEachJob(context.Background(), len(jobs), 2, func(_ context.Context, idx int) error {
processed[idx] = jobs[idx]
return nil
})
Expand All @@ -85,7 +85,7 @@ func TestForEachJob_ShouldBreakOnFirstError_ContextCancellationHandled(t *testin
// Keep the processed jobs count.
var processed atomic.Int32

err := ForEachJob(context.Background(), 3, 2, func(ctx context.Context, idx int) error {
err := ForEachJob(context.Background(), 3, 2, func(ctx context.Context, _ int) error {
if processed.CompareAndSwap(0, 1) {
return errors.New("the first request is failing")
}
Expand Down Expand Up @@ -116,7 +116,7 @@ func TestForEachJob_ShouldBreakOnFirstError_ContextCancellationUnhandled(t *test
var wg sync.WaitGroup
wg.Add(2)

err := ForEachJob(context.Background(), 3, 2, func(ctx context.Context, idx int) error {
err := ForEachJob(context.Background(), 3, 2, func(ctx context.Context, _ int) error {
wg.Done()

if processed.CompareAndSwap(0, 1) {
Expand All @@ -143,7 +143,7 @@ func TestForEachJob_ShouldBreakOnFirstError_ContextCancellationUnhandled(t *test
func TestForEachJob_ShouldReturnImmediatelyOnNoJobsProvided(t *testing.T) {
// Keep the processed jobs count.
var processed atomic.Int32
require.NoError(t, ForEachJob(context.Background(), 0, 2, func(ctx context.Context, idx int) error {
require.NoError(t, ForEachJob(context.Background(), 0, 2, func(context.Context, int) error {
processed.Inc()
return nil
}))
Expand All @@ -161,9 +161,9 @@ func TestForEachJob_ShouldCancelContextPassedToCallbackOnceDone(t *testing.T) {
contexts []context.Context
)

jobFunc := func(ctx context.Context, idx int) error {
jobFunc := func(ctx context.Context, _ int) error {
// Context should not be cancelled.
assert.Nil(t, ctx.Err())
require.NoError(t, ctx.Err())

contextsMx.Lock()
contexts = append(contexts, ctx)
Expand Down Expand Up @@ -194,7 +194,7 @@ func TestForEach(t *testing.T) {

jobs := []string{"a", "b", "c"}

err := ForEach(context.Background(), CreateJobsFromStrings(jobs), 2, func(ctx context.Context, job interface{}) error {
err := ForEach(context.Background(), CreateJobsFromStrings(jobs), 2, func(_ context.Context, job interface{}) error {
processedMx.Lock()
defer processedMx.Unlock()
processed = append(processed, job.(string))
Expand All @@ -213,7 +213,7 @@ func TestForEach_ShouldBreakOnFirstError_ContextCancellationHandled(t *testing.T
processed atomic.Int32
)

err := ForEach(ctx, []interface{}{"a", "b", "c"}, 2, func(ctx context.Context, job interface{}) error {
err := ForEach(ctx, []interface{}{"a", "b", "c"}, 2, func(ctx context.Context, _ interface{}) error {
if processed.CompareAndSwap(0, 1) {
return errors.New("the first request is failing")
}
Expand Down Expand Up @@ -244,7 +244,7 @@ func TestForEach_ShouldBreakOnFirstError_ContextCancellationUnhandled(t *testing
var wg sync.WaitGroup
wg.Add(2)

err := ForEach(context.Background(), []interface{}{"a", "b", "c"}, 2, func(ctx context.Context, job interface{}) error {
err := ForEach(context.Background(), []interface{}{"a", "b", "c"}, 2, func(ctx context.Context, _ interface{}) error {
wg.Done()

if processed.CompareAndSwap(0, 1) {
Expand All @@ -269,7 +269,7 @@ func TestForEach_ShouldBreakOnFirstError_ContextCancellationUnhandled(t *testing
}

func TestForEach_ShouldReturnImmediatelyOnNoJobsProvided(t *testing.T) {
require.NoError(t, ForEach(context.Background(), nil, 2, func(ctx context.Context, job interface{}) error {
require.NoError(t, ForEach(context.Background(), nil, 2, func(context.Context, interface{}) error {
return nil
}))
}
Expand Down Expand Up @@ -391,7 +391,7 @@ func TestForEachJobMergeResults(t *testing.T) {
close(waitBeforeReturningError)
}()

_, err := ForEachJobMergeResults[[]string, string](context.Background(), jobs, 0, func(ctx context.Context, job []string) ([]string, error) {
_, err := ForEachJobMergeResults[[]string, string](context.Background(), jobs, 0, func(context.Context, []string) ([]string, error) {
callbacksStarted.Done()
<-waitBeforeReturningError

Expand Down
2 changes: 1 addition & 1 deletion crypto/tls/test/tls_integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ func newIntegrationClientServer(
serv, err := server.New(cfg)
require.NoError(t, err)

serv.HTTP.HandleFunc("/hello", func(w http.ResponseWriter, r *http.Request) {
serv.HTTP.HandleFunc("/hello", func(w http.ResponseWriter, _ *http.Request) {
fmt.Fprintf(w, "OK")
})

Expand Down
2 changes: 1 addition & 1 deletion grpcclient/ratelimit_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ func TestRateLimiterFailureResultsInResourceExhaustedError(t *testing.T) {
RateLimit: 0,
}
conn := grpc.ClientConn{}
invoker := func(currentCtx context.Context, currentMethod string, currentReq, currentRepl interface{}, currentConn *grpc.ClientConn, currentOpts ...grpc.CallOption) error {
invoker := func(context.Context, string, interface{}, interface{}, *grpc.ClientConn, ...grpc.CallOption) error {
return nil
}

Expand Down
4 changes: 2 additions & 2 deletions grpcutil/health_check.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ type Check func(ctx context.Context) bool

// WithManager returns a new Check that tests if the managed services are healthy.
func WithManager(manager *services.Manager) Check {
return func(ctx context.Context) bool {
return func(context.Context) bool {
states := manager.ServicesByState()

// Given this is a health check endpoint for the whole instance, we should consider
Expand All @@ -33,7 +33,7 @@ func WithManager(manager *services.Manager) Check {

// WithShutdownRequested returns a new Check that returns false when shutting down.
func WithShutdownRequested(requested *atomic.Bool) Check {
return func(ctx context.Context) bool {
return func(context.Context) bool {
return !requested.Load()
}
}
Expand Down
4 changes: 2 additions & 2 deletions hedging/hedging_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ func TestHedging(t *testing.T) {
}
count := atomic.NewInt32(0)
client, err := Client(cfg, &http.Client{
Transport: RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
Transport: RoundTripperFunc(func(*http.Request) (*http.Response, error) {
count.Inc()
time.Sleep(200 * time.Millisecond)
return &http.Response{
Expand Down Expand Up @@ -70,7 +70,7 @@ func TestHedgingRateLimit(t *testing.T) {
}
count := atomic.NewInt32(0)
client, err := Client(cfg, &http.Client{
Transport: RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
Transport: RoundTripperFunc(func(*http.Request) (*http.Response, error) {
count.Inc()
time.Sleep(200 * time.Millisecond)
return &http.Response{
Expand Down
12 changes: 6 additions & 6 deletions httpgrpc/server/server_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ import (
)

func TestReturn4XXErrorsOption(t *testing.T) {
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
handler := http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
_, err := fmt.Fprint(w, "test")
require.NoError(t, err)
})
Expand Down Expand Up @@ -69,7 +69,7 @@ func newTestServer(t *testing.T, handler http.Handler) (*testServer, error) {
}

func TestBasic(t *testing.T) {
server, err := newTestServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
server, err := newTestServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
_, err := fmt.Fprint(w, "world")
require.NoError(t, err)
}))
Expand Down Expand Up @@ -97,7 +97,7 @@ func TestError(t *testing.T) {
stat = "not "
}
t.Run(fmt.Sprintf("test header when DoNotLogErrorHeaderKey is %spresent", stat), func(t *testing.T) {
server, err := newTestServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
server, err := newTestServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
if doNotLog {
w.Header().Set(DoNotLogErrorHeaderKey, "true")
}
Expand Down Expand Up @@ -152,7 +152,7 @@ func TestServerHandleDoNotLogError(t *testing.T) {
errMsg := "this is an error"
for testName, testData := range testCases {
t.Run(testName, func(t *testing.T) {
h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
h := http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
if testData.doNotLogError {
w.Header().Set(DoNotLogErrorHeaderKey, "true")
}
Expand Down Expand Up @@ -227,7 +227,7 @@ func TestServerHandleReturn4XXErrors(t *testing.T) {
errMsg := "this is an error"
for testName, testData := range testCases {
t.Run(testName, func(t *testing.T) {
h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
h := http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
http.Error(w, errMsg, testData.errorCode)
})

Expand Down Expand Up @@ -351,7 +351,7 @@ func TestGrpcErrorsHaveCorrectMessage(t *testing.T) {
}
for testName, testData := range testCases {
t.Run(testName, func(t *testing.T) {
h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
h := http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
if testData.errorMessageInHeader != "" {
w.Header().Set(ErrorMessageHeaderKey, testData.errorMessageInHeader)
}
Expand Down
4 changes: 2 additions & 2 deletions kv/client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -195,12 +195,12 @@ func TestMultipleInMemoryClient(t *testing.T) {
}, stringCodec{value: "bar"}, prometheus.NewRegistry(), logger)
require.NoError(t, err)

require.NoError(t, foo.CAS(context.TODO(), "foo", func(in interface{}) (out interface{}, retry bool, err error) { return "foo", false, nil }))
require.NoError(t, foo.CAS(context.TODO(), "foo", func(interface{}) (out interface{}, retry bool, err error) { return "foo", false, nil }))
fooKey, err := foo.Get(ctx, "foo")
require.NoError(t, err)
require.Equal(t, "foo", fooKey.(string))

require.NoError(t, bar.CAS(context.TODO(), "bar", func(in interface{}) (out interface{}, retry bool, err error) { return "bar", false, nil }))
require.NoError(t, bar.CAS(context.TODO(), "bar", func(interface{}) (out interface{}, retry bool, err error) { return "bar", false, nil }))
barKey, err := bar.Get(ctx, "bar")
require.NoError(t, err)
require.Equal(t, "bar", barKey.(string))
Expand Down
12 changes: 7 additions & 5 deletions kv/consul/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ func (c *Client) Put(ctx context.Context, key string, value interface{}) error {
return err
}

return instrument.CollectedRequest(ctx, "Put", c.consulMetrics.consulRequestDuration, instrument.ErrorCode, func(ctx context.Context) error {
return instrument.CollectedRequest(ctx, "Put", c.consulMetrics.consulRequestDuration, instrument.ErrorCode, func(context.Context) error {
_, err := c.kv.Put(&consul.KVPair{
Key: key,
Value: bytes,
Expand Down Expand Up @@ -376,16 +376,18 @@ func checkLastIndex(index, metaLastIndex uint64) (newIndex uint64, skip bool) {
// Don't just keep using index=0.
// After blocking request, returned index must be at least 1.
return 1, false
} else if metaLastIndex < index {
}
if metaLastIndex < index {
// Index reset.
return 0, false
} else if index == metaLastIndex {
}
if index == metaLastIndex {
// Skip if the index is the same as last time, because the key value is
// guaranteed to be the same as last time
return metaLastIndex, true
} else {
return metaLastIndex, false
}

return metaLastIndex, false
}

func (c *Client) createRateLimiter() *rate.Limiter {
Expand Down
2 changes: 1 addition & 1 deletion kv/consul/client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ func TestWatchKeyWithNoStartValue(t *testing.T) {
defer fn()

reported := 0
c.WatchKey(ctx, key, func(i interface{}) bool {
c.WatchKey(ctx, key, func(interface{}) bool {
reported++
return reported != 2
})
Expand Down
12 changes: 7 additions & 5 deletions kv/etcd/mock.go
Original file line number Diff line number Diff line change
Expand Up @@ -234,15 +234,17 @@ func (m *mockKV) Do(_ context.Context, op clientv3.Op) (clientv3.OpResponse, err
func (m *mockKV) doInternal(op clientv3.Op) (clientv3.OpResponse, error) {
if op.IsGet() {
return m.doGet(op)
} else if op.IsPut() {
}
if op.IsPut() {
return m.doPut(op)
} else if op.IsDelete() {
}
if op.IsDelete() {
return m.doDelete(op)
} else if op.IsTxn() {
}
if op.IsTxn() {
return m.doTxn(op)
} else {
panic(fmt.Sprintf("unsupported operation: %+v", op))
}
panic(fmt.Sprintf("unsupported operation: %+v", op))
}

func (m *mockKV) doGet(op clientv3.Op) (clientv3.OpResponse, error) {
Expand Down
Loading

0 comments on commit 4beef46

Please sign in to comment.