From d037cf764a7a17704df5c87beda64bd915226fb5 Mon Sep 17 00:00:00 2001 From: Laurent Demailly Date: Wed, 20 Mar 2024 19:57:59 -0700 Subject: [PATCH 1/5] Use shared workflows --- .github/workflows/codecov.yml | 21 ---- .github/workflows/codeql-analysis.yml | 67 ---------- .github/workflows/gochecks.yml | 26 ---- .github/workflows/include.yml | 28 +++++ .github/workflows/releaser.yml | 41 ------ .gitignore | 3 + .golangci.yml | 171 -------------------------- .goreleaser.yaml | 85 ------------- 8 files changed, 31 insertions(+), 411 deletions(-) delete mode 100644 .github/workflows/codecov.yml delete mode 100644 .github/workflows/codeql-analysis.yml delete mode 100644 .github/workflows/gochecks.yml create mode 100644 .github/workflows/include.yml delete mode 100644 .github/workflows/releaser.yml create mode 100644 .gitignore delete mode 100644 .golangci.yml delete mode 100644 .goreleaser.yaml diff --git a/.github/workflows/codecov.yml b/.github/workflows/codecov.yml deleted file mode 100644 index e9a6854..0000000 --- a/.github/workflows/codecov.yml +++ /dev/null @@ -1,21 +0,0 @@ -name: "Code Coverage" - -on: - push: - branches: [ main ] - pull_request: - branches: [ main ] - -jobs: - coverage: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@master - - uses: actions/setup-go@v5 - with: - go-version: '1.21' - - name: Run test coverage - run: go test -race -coverprofile=coverage.out -covermode=atomic ./... - - uses: codecov/codecov-action@v4 - with: - files: coverage.out diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml deleted file mode 100644 index e8e4feb..0000000 --- a/.github/workflows/codeql-analysis.yml +++ /dev/null @@ -1,67 +0,0 @@ -# For most projects, this workflow file will not need changing; you simply need -# to commit it to your repository. -# -# You may wish to alter this file to override the set of languages analyzed, -# or to provide custom queries or build logic. -# -# ******** NOTE ******** -# We have attempted to detect the languages in your repository. Please check -# the `language` matrix defined below to confirm you have the correct set of -# supported CodeQL languages. -# -name: "CodeQL" - -on: - push: - branches: [ main ] - pull_request: - # The branches below must be a subset of the branches above - branches: [ main ] - schedule: - - cron: '42 20 * * 3' - -jobs: - analyze: - name: Analyze - runs-on: ubuntu-latest - permissions: - actions: read - contents: read - security-events: write - - strategy: - fail-fast: false - matrix: - language: [ 'go' ] - # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] - # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support - - steps: - - name: Checkout repository - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # pin@v3 - - # Initializes the CodeQL tools for scanning. - - name: Initialize CodeQL - uses: github/codeql-action/init@cdcdbb579706841c47f7063dda365e292e5cad7a # pin@v2 - with: - languages: ${{ matrix.language }} - # If you wish to specify custom queries, you can do so here or in a config file. - # By default, queries listed here will override any specified in a config file. - # Prefix the list here with "+" to use these queries and those in the config file. - # Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs - # queries: security-extended,security-and-quality - - # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). - # If this step fails, then you should remove it and run the build manually (see below) - - name: Autobuild - uses: github/codeql-action/autobuild@cdcdbb579706841c47f7063dda365e292e5cad7a # pin@v2 - - # ℹī¸ Command-line programs to run using the OS shell. - # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun - # If the Autobuild fails above, remove it and uncomment the following three lines. - # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance. - # - run: | - # echo "Run, Build Application using script" - # ./location_of_script_within_repo/buildscript.sh - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@cdcdbb579706841c47f7063dda365e292e5cad7a # pin@v2 diff --git a/.github/workflows/gochecks.yml b/.github/workflows/gochecks.yml deleted file mode 100644 index 3ddf683..0000000 --- a/.github/workflows/gochecks.yml +++ /dev/null @@ -1,26 +0,0 @@ -name: go-checks - -on: - push: - branches: [main] - pull_request: - # The branches below must be a subset of the branches above - branches: [main] - -jobs: - check: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # pin@v3 - - name: Setup Go environment - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # pin@v4 - with: - go-version: '1.21' - check-latest: true - - name: Run Vulncheck - run: | - go install golang.org/x/vuln/cmd/govulncheck@latest - govulncheck ./... - - name: Run golangci-lint - uses: golangci/golangci-lint-action@3cfe3a4abbb849e10058ce4af15d205b6da42804 # pin@v3 diff --git a/.github/workflows/include.yml b/.github/workflows/include.yml new file mode 100644 index 0000000..5eb5975 --- /dev/null +++ b/.github/workflows/include.yml @@ -0,0 +1,28 @@ +# Remember to change/update `description` below when copying +# this include +name: "Shared cli/server fortio workflows" +on: + push: + branches: [ main ] + tags: + # so a vX.Y.Z-test1 doesn't trigger build + - 'v[0-9]+.[0-9]+.[0-9]+' + - 'v[0-9]+.[0-9]+.[0-9]+-pre*' + pull_request: + branches: [ main ] + +jobs: + call-gochecks: + uses: fortio/workflows/.github/workflows/gochecks.yml@main + call-codecov: + uses: fortio/workflows/.github/workflows/codecov.yml@main + call-codeql: + uses: fortio/workflows/.github/workflows/codeql-analysis.yml@main + call-releaser: + uses: fortio/workflows/.github/workflows/releaser.yml@main + with: + description: "Proxy for applications aiming to dispatch messages to Slack nicely" + secrets: + GH_PAT: ${{ secrets.GH_PAT }} + DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }} + DOCKER_USER: ${{ secrets.DOCKER_USER }} diff --git a/.github/workflows/releaser.yml b/.github/workflows/releaser.yml deleted file mode 100644 index 67cc0d2..0000000 --- a/.github/workflows/releaser.yml +++ /dev/null @@ -1,41 +0,0 @@ -name: Release - -on: - push: - tags: - # so a vX.Y.Z-test1 doesn't trigger build - - 'v[0-9]+.[0-9]+.[0-9]+' - - 'v[0-9]+.[0-9]+.[0-9]+-pre*' - -# A workflow run is made up of one or more jobs that can run sequentially or in parallel -jobs: - # This workflow contains a single job called "build" - build: - # The type of runner that the job will run on - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # pin@v3 - with: - fetch-depth: 0 - - uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # pin@v1 - - uses: docker/setup-buildx-action@0d103c3126aa41d772a8362f6aa67afac040f80c # pin@v1 - - name: Set up Go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # pin@v4 - with: - go-version: '1.21' - check-latest: true - - name: Log in to Docker Hub - uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # pin@v2 - with: - username: ${{ secrets.DOCKER_USER }} - password: ${{ secrets.DOCKER_TOKEN }} - - name: "GoReleaser Action" - uses: goreleaser/goreleaser-action@7ec5c2b0c6cdda6e8bbb49444bc797dd33d74dd8 # pin@v5.0.0 - with: - distribution: goreleaser - version: latest - args: release --clean - env: - GITHUB_TOKEN: ${{ secrets.GH_PAT }} - GOWORK: off diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..6ca8a00 --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +slack-proxy +.goreleaser.yaml +.golangci.yml diff --git a/.golangci.yml b/.golangci.yml deleted file mode 100644 index e237c15..0000000 --- a/.golangci.yml +++ /dev/null @@ -1,171 +0,0 @@ -# Config for golangci-lint - -# output configuration options - -# all available settings of specific linters -linters-settings: - gocritic: - disabled-checks: - - ifElseChain - dupl: - # tokens count to trigger issue, 150 by default - threshold: 100 - exhaustive: - # indicates that switch statements are to be considered exhaustive if a - # 'default' case is present, even if all enum members aren't listed in the - # switch - default-signifies-exhaustive: false - funlen: - lines: 140 - statements: 70 - gocognit: - # minimal code complexity to report, 30 by default (but we recommend 10-20) - min-complexity: 42 - nestif: - # minimal complexity of if statements to report, 5 by default - min-complexity: 4 - gocyclo: - # minimal code complexity to report, 30 by default (but we recommend 10-20) - min-complexity: 30 - godot: - # check all top-level comments, not only declarations - check-all: false - govet: - # report about shadowed variables - check-shadowing: true - # settings per analyzer - settings: - printf: # analyzer name, run `go tool vet help` to see all analyzers - funcs: # run `go tool vet help printf` to see available settings for `printf` analyzer - - (github.com/golangci/golangci-lint/pkg/logutils.Log).Infof - - (github.com/golangci/golangci-lint/pkg/logutils.Log).Warnf - - (github.com/golangci/golangci-lint/pkg/logutils.Log).Errorf - - (github.com/golangci/golangci-lint/pkg/logutils.Log).Fatalf - - (github.com/golangci/golangci-lint/pkg/logutils.Log).Printf - - (github.com/golangci/golangci-lint/pkg/logutils.Log).FErrf - enable-all: true - disable-all: false - lll: - # max line length, lines longer will be reported. Default is 120. - # '\t' is counted as 1 character by default, and can be changed with the tab-width option - line-length: 132 - # tab width in spaces. Default to 1. - tab-width: 1 - misspell: - # Correct spellings using locale preferences for US or UK. - # Default is to use a neutral variety of English. - # Setting locale to US will correct the British spelling of 'colour' to 'color'. - locale: US - ignore-words: - - fortio - nakedret: - # make an issue if func has more lines of code than this setting and it has naked returns; default is 30 - max-func-lines: 30 - nolintlint: - require-specific: true - whitespace: - multi-if: false # Enforces newlines (or comments) after every multi-line if statement - multi-func: false # Enforces newlines (or comments) after every multi-line function signature - gofumpt: - # Choose whether or not to use the extra rules that are disabled - # by default - extra-rules: false - - -linters: - disable: - # bad ones: - - musttag - # Deprecated ones: - - scopelint - - golint - - interfacer - - maligned - - varcheck - - structcheck - - nosnakecase - - deadcode - # Weird/bad ones: - - wsl - - nlreturn - - gochecknoinits - - gochecknoglobals - - gomnd - - testpackage - - wrapcheck - - exhaustivestruct - - tagliatelle - - nonamedreturns - - varnamelen - - exhaustruct # seems like a good idea at first but actually a pain and go does have zero values for a reason. -# TODO consider putting these back, when they stop being bugged (ifshort, wastedassign,...) - - paralleltest - - thelper - - forbidigo - - ifshort - - wastedassign - - cyclop - - forcetypeassert - - ireturn - - depguard - # TODO bis: do put that one back: - - lll - enable-all: true - disable-all: false - # Must not use fast: true in newer golangci-lint or it'll just skip a bunch of linter instead of doing caching like before (!) - fast: false - - -issues: - # Excluding configuration per-path, per-linter, per-text and per-source - exclude-rules: - # Exclude some linters from running on tests files. - - path: _test\.go - linters: - - gocyclo - - errcheck - - dupl - - gosec - - gochecknoinits - - gochecknoglobals - - forcetypeassert - - nosnakecase - - noctx - - # Exclude lll issues for long lines with go:generate - - linters: - - lll - source: "^//go:generate " - - linters: - - goerr113 - text: "do not define dynamic errors" - - linters: - - govet - text: "fieldalignment:" - - linters: - - godox - text: "TODO" - - linters: - - nosnakecase - text: "grpc_|_SERVING|O_" - - # Maximum issues count per one linter. Set to 0 to disable. Default is 50. - max-issues-per-linter: 0 - - # Maximum count of issues with the same text. Set to 0 to disable. Default is 3. - max-same-issues: 0 - -severity: - # Default value is empty string. - # Set the default severity for issues. If severity rules are defined and the issues - # do not match or no severity is provided to the rule this will be the default - # severity applied. Severities should match the supported severity names of the - # selected out format. - # - Code climate: https://docs.codeclimate.com/docs/issues#issue-severity - # - Checkstyle: https://checkstyle.sourceforge.io/property_types.html#severity - # - Github: https://help.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-an-error-message - default-severity: error - - # The default value is false. - # If set to true severity-rules regular expressions become case sensitive. - case-sensitive: false diff --git a/.goreleaser.yaml b/.goreleaser.yaml deleted file mode 100644 index 5de6d47..0000000 --- a/.goreleaser.yaml +++ /dev/null @@ -1,85 +0,0 @@ -builds: - - env: - - CGO_ENABLED=0 - goos: - - linux - - windows - - darwin - goarch: - - amd64 - - arm64 -checksum: - name_template: "checksums.txt" -snapshot: - name_template: "{{ incpatch .Version }}-next" -changelog: - sort: asc - filters: - exclude: - - "^docs:" - - "^test:" -gomod: - proxy: true - mod: mod -dockers: - - image_templates: ["fortio/{{ .ProjectName }}:{{ .Version }}-amd64"] - use: buildx - goarch: amd64 - build_flag_templates: - - --platform=linux/amd64 - - image_templates: ["fortio/{{ .ProjectName }}:{{ .Version }}-arm64"] - use: buildx - goarch: arm64 - build_flag_templates: - - --platform=linux/arm64 -docker_manifests: -- - name_template: fortio/{{ .ProjectName }}:{{ .Version }} - image_templates: - - fortio/{{ .ProjectName }}:{{ .Version }}-amd64 - - fortio/{{ .ProjectName }}:{{ .Version }}-arm64 -- - name_template: fortio/{{ .ProjectName }}:latest - image_templates: - - fortio/{{ .ProjectName }}:{{ .Version }}-amd64 - - fortio/{{ .ProjectName }}:{{ .Version }}-arm64 -release: - prerelease: auto - mode: append -# .goreleaser.yaml -brews: - - - # GitHub/GitLab repository to push the formula to - repository: - owner: fortio - name: homebrew-tap - - # Git author used to commit to the repository. - # Defaults are shown. - commit_author: - name: goreleaserbot - email: bot@goreleaser.com - - # The project name and current git tag are used in the format string. - commit_msg_template: "Brew formula update for {{ .ProjectName }} version {{ .Tag }}" - - # Folder inside the repository to put the formula. - # Default is the root folder. - folder: Formula - - # Your app's homepage. - # Default is empty. - homepage: "https://fortio.org/" - - # Template of your app's description. - # Default is empty. - description: "Proxy for applications aiming to dispatch messages to Slack nicely" - - # SPDX identifier of your app's license. - # Default is empty. - license: "Apache-2.0" - - # So you can `brew test` your formula. - # Default is empty. - test: | - assert_match version.to_s, shell_output("#{bin}/{{ .ProjectName }} -version") From afa080eb363be531e001232d218f1e2fe1adda40 Mon Sep 17 00:00:00 2001 From: Laurent Demailly Date: Wed, 20 Mar 2024 20:18:29 -0700 Subject: [PATCH 2/5] somehow this repo found issue with permissions --- .github/workflows/include.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/include.yml b/.github/workflows/include.yml index 5eb5975..a606568 100644 --- a/.github/workflows/include.yml +++ b/.github/workflows/include.yml @@ -18,6 +18,10 @@ jobs: uses: fortio/workflows/.github/workflows/codecov.yml@main call-codeql: uses: fortio/workflows/.github/workflows/codeql-analysis.yml@main + permissions: + actions: read + contents: read + security-events: write call-releaser: uses: fortio/workflows/.github/workflows/releaser.yml@main with: From 8b542917059258811854c38cec4a5ffb68d4367f Mon Sep 17 00:00:00 2001 From: Laurent Demailly Date: Thu, 21 Mar 2024 00:34:37 -0700 Subject: [PATCH 3/5] lll-fixer can fix comments already, making errors 78->58 --- app.go | 30 ++++++++++++++++++++---------- app_test.go | 3 ++- server.go | 6 ++++-- server_test.go | 6 ++++-- 4 files changed, 30 insertions(+), 15 deletions(-) diff --git a/app.go b/app.go index a625d6c..1f3dca4 100644 --- a/app.go +++ b/app.go @@ -92,7 +92,8 @@ var doNotProcessChannels = map[string]time.Time{} func CheckError(err string) (retryable bool, pause bool, description string) { // Special case for channel_not_found, we don't want to retry this one right away. - // We are making it a 'soft failure' so that we don't keep retrying it for a period of time for any message that is sent to a channel that doesn't exist. + // We are making it a 'soft failure' so that we don't keep retrying it for a period of time for any message that is + // sent to a channel that doesn't exist. if err == "channel_not_found" { return true, true, "Channel not found" } @@ -124,7 +125,8 @@ func (s *SlackClient) PostMessage(request SlackPostMessageRequest, url string, t // Charset is required to remove warnings from Slack. Maybe it's nice to have it configurable. /shrug req.Header.Set("Content-Type", "application/json; charset=utf-8") - // Documentation says that you are allowed the POST the token instead, however that does simply not work. Hence why we are using the Authorization header. + // Documentation says that you are allowed the POST the token instead, however that does simply not work. Hence why we + // are using the Authorization header. req.Header.Set("Authorization", "Bearer "+token) resp, err := s.client.Do(req) @@ -166,22 +168,27 @@ func (app *App) Shutdown() { //nolint:gocognit // but could probably use a refactor. func (app *App) processQueue(ctx context.Context, maxRetries int, initialBackoff time.Duration, burst int, slackRequestRate time.Duration) { // This is the rate limiter, which will block until it is allowed to continue on r.Wait(ctx). - // I kept the rate at 1 per second, as doing more than that will cause Slack to reject the messages anyways. We can burst however. - // Do note that this is best effort, in case of failures, we will exponentially backoff and retry, which will cause the rate to be lower than 1 per second due to obvious reasons. + // I kept the rate at 1 per second, as doing more than that will cause Slack to reject the messages anyways. We can + // burst however. + // Do note that this is best effort, in case of failures, we will exponentially backoff and retry, which will cause the + // rate to be lower than 1 per second due to obvious reasons. r := rate.NewLimiter(rate.Every(slackRequestRate), burst) for { select { case msg, ok := <-app.slackQueue: - // We do check if the channel is closed, but its important to note is that the channel will be closed when the queue is empty and the Shutdown() is called. - // Simply calling close(app.slackQueue) will not close the channel, it will only prevent any more messages from being sent to the channel. + // We do check if the channel is closed, but its important to note is that the channel will be closed when the queue is + // empty and the Shutdown() is called. + // Simply calling close(app.slackQueue) will not close the channel, it will only prevent any more messages from being + // sent to the channel. // Only once the channel is empty, will it be closed. if !ok { return } log.S(log.Debug, "Got message from queue", log.Any("message", msg)) - // Rate limiter was initially before fetching a message from the queue, but that caused problems by indefinitely looping even if there was no message in the queue. + // Rate limiter was initially before fetching a message from the queue, but that caused problems by indefinitely + // looping even if there was no message in the queue. // On shutdown, it would cancel the context, even if the queue was stopped (thus no messages would even come in). err := r.Wait(ctx) if err != nil { @@ -194,10 +201,12 @@ func (app *App) processQueue(ctx context.Context, maxRetries int, initialBackoff retryCount := 0 for { - // Check if the channel is in the doNotProcessChannels map, if it is, check if it's been more than 15 minutes since we last tried to send a message to it. + // Check if the channel is in the doNotProcessChannels map, if it is, check if it's been more than 15 minutes since we + // last tried to send a message to it. if (doNotProcessChannels[msg.Channel] != time.Time{}) { if time.Since(doNotProcessChannels[msg.Channel]) >= 15*time.Minute { - // Remove the channel from the map, so that we can process it again. If the channel isn't created in the meantime, we will just add it again. + // Remove the channel from the map, so that we can process it again. If the channel isn't created in the meantime, we + // will just add it again. delete(doNotProcessChannels, msg.Channel) } else { log.S(log.Info, "Channel is on the doNotProcess list, not trying to post this message", log.String("channel", msg.Channel)) @@ -248,7 +257,8 @@ func (app *App) processQueue(ctx context.Context, maxRetries int, initialBackoff } } - // Need to call this to clean up the wg, which is vital for the shutdown to work (so that we process all the messages in the queue before exiting cleanly) + // Need to call this to clean up the wg, which is vital for the shutdown to work (so that we process all the messages + // in the queue before exiting cleanly) app.wg.Done() case <-ctx.Done(): diff --git a/app_test.go b/app_test.go index b609fd6..aac5b31 100644 --- a/app_test.go +++ b/app_test.go @@ -147,7 +147,8 @@ func TestApp_TestSlackRequestRate(t *testing.T) { diffInSeconds := endTime.Sub(startTime).Seconds() log.S(log.Debug, "diffInSeconds", log.Float64("diffInSeconds", diffInSeconds)) - // The sum is always: (Amount of messages * RPS * delay in seconds) minus burst. In this case 20 * 4 * 1 - 10 = 5 seconds. + // The sum is always: (Amount of messages * RPS * delay in seconds) minus burst. In this case 20 * 4 * 1 - 10 = 5 + // seconds. if math.RoundToEven(diffInSeconds) != 5 { t.Fatal("Expected processQueue finish the job in ~5 seconds, give or take. Got", diffInSeconds) } diff --git a/server.go b/server.go index a6549c3..10f91ef 100644 --- a/server.go +++ b/server.go @@ -53,7 +53,8 @@ func (app *App) handleRequest(w http.ResponseWriter, r *http.Request) { maxQueueSize := int(float64(cap(app.slackQueue)) * 0.9) // Reject requests if the queue is almost full // If the channel is full, the request will block until there is space in the channel. - // Ideally we don't reject at 90%, but initially after some tests I got blocked. So I decided to be a bit more conservative. + // Ideally we don't reject at 90%, but initially after some tests I got blocked. So I decided to be a bit more + // conservative. // ToDo: Fix this behavior so we can reach 100% channel size without problems. if len(app.slackQueue) >= maxQueueSize { log.S(log.Warning, "Queue is almost full, returning StatusServiceUnavailable", log.Int("queueSize", len(app.slackQueue))) @@ -100,7 +101,8 @@ func (app *App) handleRequest(w http.ResponseWriter, r *http.Request) { request.Channel = app.channelOverride } - // Add a counter to the wait group, this is important to wait for all the messages to be processed before shutting down the server. + // Add a counter to the wait group, this is important to wait for all the messages to be processed before shutting down + // the server. app.wg.Add(1) // Send the message to the slackQueue to be processed app.slackQueue <- request diff --git a/server_test.go b/server_test.go index 8a002f4..9a5cb59 100644 --- a/server_test.go +++ b/server_test.go @@ -98,7 +98,8 @@ func TestStartServer(t *testing.T) { }() // Give server some time to start - // If you are running on a non-priviledged account, and get a popup asking for permission to accept incoming connections, you can increase this time... + // If you are running on a non-priviledged account, and get a popup asking for permission to accept incoming + // connections, you can increase this time... time.Sleep(1 * time.Second) // Make a sample request to ensure server is running @@ -124,7 +125,8 @@ func TestStartServer(t *testing.T) { t.Fatal("Expected error making POST request after server shut down, got none") } - // to avoid confusion; we _are_ expecting a err, but for the edge-case it doesn't (resp != nil), we should close the body. + // to avoid confusion; we _are_ expecting a err, but for the edge-case it doesn't (resp != nil), we should close the + // body. if secondResp != nil { defer secondResp.Body.Close() } From 95e556473e52d001728b478f64917dc24eeefd07 Mon Sep 17 00:00:00 2001 From: Laurent Demailly Date: Thu, 21 Mar 2024 00:57:34 -0700 Subject: [PATCH 4/5] Revert "lll-fixer can fix comments already, making errors 78->58" to use a shorter len and version that handles litterals This reverts commit 8b542917059258811854c38cec4a5ffb68d4367f. --- app.go | 30 ++++++++++-------------------- app_test.go | 3 +-- server.go | 6 ++---- server_test.go | 6 ++---- 4 files changed, 15 insertions(+), 30 deletions(-) diff --git a/app.go b/app.go index 1f3dca4..a625d6c 100644 --- a/app.go +++ b/app.go @@ -92,8 +92,7 @@ var doNotProcessChannels = map[string]time.Time{} func CheckError(err string) (retryable bool, pause bool, description string) { // Special case for channel_not_found, we don't want to retry this one right away. - // We are making it a 'soft failure' so that we don't keep retrying it for a period of time for any message that is - // sent to a channel that doesn't exist. + // We are making it a 'soft failure' so that we don't keep retrying it for a period of time for any message that is sent to a channel that doesn't exist. if err == "channel_not_found" { return true, true, "Channel not found" } @@ -125,8 +124,7 @@ func (s *SlackClient) PostMessage(request SlackPostMessageRequest, url string, t // Charset is required to remove warnings from Slack. Maybe it's nice to have it configurable. /shrug req.Header.Set("Content-Type", "application/json; charset=utf-8") - // Documentation says that you are allowed the POST the token instead, however that does simply not work. Hence why we - // are using the Authorization header. + // Documentation says that you are allowed the POST the token instead, however that does simply not work. Hence why we are using the Authorization header. req.Header.Set("Authorization", "Bearer "+token) resp, err := s.client.Do(req) @@ -168,27 +166,22 @@ func (app *App) Shutdown() { //nolint:gocognit // but could probably use a refactor. func (app *App) processQueue(ctx context.Context, maxRetries int, initialBackoff time.Duration, burst int, slackRequestRate time.Duration) { // This is the rate limiter, which will block until it is allowed to continue on r.Wait(ctx). - // I kept the rate at 1 per second, as doing more than that will cause Slack to reject the messages anyways. We can - // burst however. - // Do note that this is best effort, in case of failures, we will exponentially backoff and retry, which will cause the - // rate to be lower than 1 per second due to obvious reasons. + // I kept the rate at 1 per second, as doing more than that will cause Slack to reject the messages anyways. We can burst however. + // Do note that this is best effort, in case of failures, we will exponentially backoff and retry, which will cause the rate to be lower than 1 per second due to obvious reasons. r := rate.NewLimiter(rate.Every(slackRequestRate), burst) for { select { case msg, ok := <-app.slackQueue: - // We do check if the channel is closed, but its important to note is that the channel will be closed when the queue is - // empty and the Shutdown() is called. - // Simply calling close(app.slackQueue) will not close the channel, it will only prevent any more messages from being - // sent to the channel. + // We do check if the channel is closed, but its important to note is that the channel will be closed when the queue is empty and the Shutdown() is called. + // Simply calling close(app.slackQueue) will not close the channel, it will only prevent any more messages from being sent to the channel. // Only once the channel is empty, will it be closed. if !ok { return } log.S(log.Debug, "Got message from queue", log.Any("message", msg)) - // Rate limiter was initially before fetching a message from the queue, but that caused problems by indefinitely - // looping even if there was no message in the queue. + // Rate limiter was initially before fetching a message from the queue, but that caused problems by indefinitely looping even if there was no message in the queue. // On shutdown, it would cancel the context, even if the queue was stopped (thus no messages would even come in). err := r.Wait(ctx) if err != nil { @@ -201,12 +194,10 @@ func (app *App) processQueue(ctx context.Context, maxRetries int, initialBackoff retryCount := 0 for { - // Check if the channel is in the doNotProcessChannels map, if it is, check if it's been more than 15 minutes since we - // last tried to send a message to it. + // Check if the channel is in the doNotProcessChannels map, if it is, check if it's been more than 15 minutes since we last tried to send a message to it. if (doNotProcessChannels[msg.Channel] != time.Time{}) { if time.Since(doNotProcessChannels[msg.Channel]) >= 15*time.Minute { - // Remove the channel from the map, so that we can process it again. If the channel isn't created in the meantime, we - // will just add it again. + // Remove the channel from the map, so that we can process it again. If the channel isn't created in the meantime, we will just add it again. delete(doNotProcessChannels, msg.Channel) } else { log.S(log.Info, "Channel is on the doNotProcess list, not trying to post this message", log.String("channel", msg.Channel)) @@ -257,8 +248,7 @@ func (app *App) processQueue(ctx context.Context, maxRetries int, initialBackoff } } - // Need to call this to clean up the wg, which is vital for the shutdown to work (so that we process all the messages - // in the queue before exiting cleanly) + // Need to call this to clean up the wg, which is vital for the shutdown to work (so that we process all the messages in the queue before exiting cleanly) app.wg.Done() case <-ctx.Done(): diff --git a/app_test.go b/app_test.go index aac5b31..b609fd6 100644 --- a/app_test.go +++ b/app_test.go @@ -147,8 +147,7 @@ func TestApp_TestSlackRequestRate(t *testing.T) { diffInSeconds := endTime.Sub(startTime).Seconds() log.S(log.Debug, "diffInSeconds", log.Float64("diffInSeconds", diffInSeconds)) - // The sum is always: (Amount of messages * RPS * delay in seconds) minus burst. In this case 20 * 4 * 1 - 10 = 5 - // seconds. + // The sum is always: (Amount of messages * RPS * delay in seconds) minus burst. In this case 20 * 4 * 1 - 10 = 5 seconds. if math.RoundToEven(diffInSeconds) != 5 { t.Fatal("Expected processQueue finish the job in ~5 seconds, give or take. Got", diffInSeconds) } diff --git a/server.go b/server.go index 10f91ef..a6549c3 100644 --- a/server.go +++ b/server.go @@ -53,8 +53,7 @@ func (app *App) handleRequest(w http.ResponseWriter, r *http.Request) { maxQueueSize := int(float64(cap(app.slackQueue)) * 0.9) // Reject requests if the queue is almost full // If the channel is full, the request will block until there is space in the channel. - // Ideally we don't reject at 90%, but initially after some tests I got blocked. So I decided to be a bit more - // conservative. + // Ideally we don't reject at 90%, but initially after some tests I got blocked. So I decided to be a bit more conservative. // ToDo: Fix this behavior so we can reach 100% channel size without problems. if len(app.slackQueue) >= maxQueueSize { log.S(log.Warning, "Queue is almost full, returning StatusServiceUnavailable", log.Int("queueSize", len(app.slackQueue))) @@ -101,8 +100,7 @@ func (app *App) handleRequest(w http.ResponseWriter, r *http.Request) { request.Channel = app.channelOverride } - // Add a counter to the wait group, this is important to wait for all the messages to be processed before shutting down - // the server. + // Add a counter to the wait group, this is important to wait for all the messages to be processed before shutting down the server. app.wg.Add(1) // Send the message to the slackQueue to be processed app.slackQueue <- request diff --git a/server_test.go b/server_test.go index 9a5cb59..8a002f4 100644 --- a/server_test.go +++ b/server_test.go @@ -98,8 +98,7 @@ func TestStartServer(t *testing.T) { }() // Give server some time to start - // If you are running on a non-priviledged account, and get a popup asking for permission to accept incoming - // connections, you can increase this time... + // If you are running on a non-priviledged account, and get a popup asking for permission to accept incoming connections, you can increase this time... time.Sleep(1 * time.Second) // Make a sample request to ensure server is running @@ -125,8 +124,7 @@ func TestStartServer(t *testing.T) { t.Fatal("Expected error making POST request after server shut down, got none") } - // to avoid confusion; we _are_ expecting a err, but for the edge-case it doesn't (resp != nil), we should close the - // body. + // to avoid confusion; we _are_ expecting a err, but for the edge-case it doesn't (resp != nil), we should close the body. if secondResp != nil { defer secondResp.Body.Close() } From a1f670eec74d8b0a43937486ddb34ccf23bfcd75 Mon Sep 17 00:00:00 2001 From: Laurent Demailly Date: Thu, 21 Mar 2024 01:20:19 -0700 Subject: [PATCH 5/5] fixed all the lll mostly with lll-fixer + manual few more --- app.go | 156 +++++++++++++++++++++++++++++++------------------ app_test.go | 9 ++- main.go | 3 +- server.go | 15 +++-- server_test.go | 12 ++-- 5 files changed, 126 insertions(+), 69 deletions(-) diff --git a/app.go b/app.go index a625d6c..aaaf1d7 100644 --- a/app.go +++ b/app.go @@ -46,53 +46,77 @@ var slackPermanentErrors = map[string]string{ "restricted_action_thread_only_channel": "Cannot post top-level messages into a thread-only channel.", "slack_connect_canvas_sharing_blocked": "Admin has disabled Canvas File sharing in all Slack Connect communications", "slack_connect_file_link_sharing_blocked": "Admin has disabled Slack File sharing in all Slack Connect communications", - "team_access_not_granted": "The token used is not granted the specific workspace access required to complete this request.", - "too_many_attachments": "Too many attachments were provided with this message. A maximum of 100 attachments are allowed on a message.", - "too_many_contact_cards": "Too many contact_cards were provided with this message. A maximum of 10 contact cards are allowed on a message.", - "cannot_reply_to_message": "This message type cannot have thread replies.", - "access_denied": "Access to a resource specified in the request is denied.", - "account_inactive": "Authentication token is for a deleted user or workspace when using a bot token.", - "deprecated_endpoint": "The endpoint has been deprecated.", - "enterprise_is_restricted": "The method cannot be called from an Enterprise.", - "invalid_auth": "Some aspect of authentication cannot be validated. Either the provided token is invalid or the request originates from an IP address disallowed from making the request.", - "method_deprecated": "The method has been deprecated.", - "missing_scope": "The token used is not granted the specific scope permissions required to complete this request.", - "not_allowed_token_type": "The token type used in this request is not allowed.", - "not_authed": "No authentication token provided.", - "no_permission": "The workspace token used in this request does not have the permissions necessary to complete the request. Make sure your app is a member of the conversation it's attempting to post a message to.", - "org_login_required": "The workspace is undergoing an enterprise migration and will not be available until migration is complete.", - "token_expired": "Authentication token has expired", - "token_revoked": "Authentication token is for a deleted user or workspace or the app has been removed when using a user token.", - "two_factor_setup_required": "Two factor setup is required.", - "accesslimited": "Access to this method is limited on the current network", - "fatal_error": "The server could not complete your operation(s) without encountering a catastrophic error. It's possible some aspect of the operation succeeded before the error was raised.", - "internal_error": "The server could not complete your operation(s) without encountering an error, likely due to a transient issue on our end. It's possible some aspect of the operation succeeded before the error was raised.", - "invalid_arg_name": "The method was passed an argument whose name falls outside the bounds of accepted or expected values. This includes very long names and names with non-alphanumeric characters other than _. If you get this error, it is typically an indication that you have made a very malformed API call.", - "invalid_arguments": "The method was either called with invalid arguments or some detail about the arguments passed is invalid, which is more likely when using complex arguments like blocks or attachments.", - "invalid_array_arg": "The method was passed an array as an argument. Please only input valid strings.", - "invalid_charset": "The method was called via a POST request, but the charset specified in the Content-Type header was invalid. Valid charset names are: utf-8 iso-8859-1.", - "invalid_form_data": "The method was called via a POST request with Content-Type application/x-www-form-urlencoded or multipart/form-data, but the form data was either missing or syntactically invalid.", - "invalid_post_type": "The method was called via a POST request, but the specified Content-Type was invalid. Valid types are: application/json application/x-www-form-urlencoded multipart/form-data text/plain.", - "missing_post_type": "The method was called via a POST request and included a data payload, but the request did not include a Content-Type header.", - "ratelimited": "The request has been ratelimited. Refer to the Retry-After header for when to retry the request.", - "service_unavailable": "The service is temporarily unavailable", - "team_added_to_org": "The workspace associated with your request is currently undergoing migration to an Enterprise Organization. Web API and other platform operations will be intermittently unavailable until the transition is complete.", + + "team_access_not_granted": "The token used is not granted the specific workspace access required to complete this request.", + "too_many_attachments": "Too many attachments were provided with this message. A maximum of 100 attachments are allowed on" + + " a message.", + "too_many_contact_cards": "Too many contact_cards were provided with this message. A maximum of 10 contact cards are allowed" + + " on a message.", + "cannot_reply_to_message": "This message type cannot have thread replies.", + "access_denied": "Access to a resource specified in the request is denied.", + "account_inactive": "Authentication token is for a deleted user or workspace when using a bot token.", + "deprecated_endpoint": "The endpoint has been deprecated.", + "enterprise_is_restricted": "The method cannot be called from an Enterprise.", + "invalid_auth": "Some aspect of authentication cannot be validated. Either the provided token is invalid or the" + + " request originates from an IP address disallowed from making the request.", + "method_deprecated": "The method has been deprecated.", + "missing_scope": "The token used is not granted the specific scope permissions required to complete this request.", + "not_allowed_token_type": "The token type used in this request is not allowed.", + "not_authed": "No authentication token provided.", + "no_permission": "The workspace token used in this request does not have the permissions necessary to complete the" + + " request. Make sure your app is a member of the conversation it's attempting to post a message to.", + "org_login_required": "The workspace is undergoing an enterprise migration and will not be available until migration is" + + " complete.", + "token_expired": "Authentication token has expired", + "token_revoked": "Authentication token is for a deleted user or workspace or the app has been removed when using a" + + " user token.", + "two_factor_setup_required": "Two factor setup is required.", + "accesslimited": "Access to this method is limited on the current network", + "fatal_error": "The server could not complete your operation(s) without encountering a catastrophic error. It's" + + " possible some aspect of the operation succeeded before the error was raised.", + "internal_error": "The server could not complete your operation(s) without encountering an error, likely due to a" + + " transient issue on our end. It's possible some aspect of the operation succeeded before the error" + + " was raised.", + "invalid_arg_name": "The method was passed an argument whose name falls outside the bounds of accepted or expected" + + " values. This includes very long names and names with non-alphanumeric characters other than _. If" + + " you get this error, it is typically an indication that you have made a very malformed API call.", + "invalid_arguments": "The method was either called with invalid arguments or some detail about the arguments passed is" + + " invalid, which is more likely when using complex arguments like blocks or attachments.", + "invalid_array_arg": "The method was passed an array as an argument. Please only input valid strings.", + "invalid_charset": "The method was called via a POST request, but the charset specified in the Content-Type header was" + + " invalid. Valid charset names are: utf-8 iso-8859-1.", + "invalid_form_data": "The method was called via a POST request with Content-Type application/x-www-form-urlencoded or" + + " multipart/form-data, but the form data was either missing or syntactically invalid.", + "invalid_post_type": "The method was called via a POST request, but the specified Content-Type was invalid. Valid types" + + " are: application/json application/x-www-form-urlencoded multipart/form-data text/plain.", + "missing_post_type": "The method was called via a POST request and included a data payload, but the request did not" + + " include a Content-Type header.", + "ratelimited": "The request has been ratelimited. Refer to the Retry-After header for when to retry the request.", + "service_unavailable": "The service is temporarily unavailable", + "team_added_to_org": "The workspace associated with your request is currently undergoing migration to an Enterprise" + + " Organization. Web API and other platform operations will be intermittently unavailable until the" + + " transition is complete.", } var slackRetryErrors = map[string]string{ - "message_limit_exceeded": "Members on this team are sending too many messages. For more details, see https://slack.com/help/articles/115002422943-Usage-limits-for-free-workspaces", - "rate_limited": "Application has posted too many messages, read the Rate Limit documentation for more information", - "fatal_error": "The server could not complete your operation(s) without encountering a catastrophic error. It's possible some aspect of the operation succeeded before the error was raised.", - "internal_error": "The server could not complete your operation(s) without encountering an error, likely due to a transient issue on our end. It's possible some aspect of the operation succeeded before the error was raised.", - "ratelimited": "The request has been ratelimited. Refer to the Retry-After header for when to retry the request.", - "request_timeout": "The method was called via a POST request, but the POST data was either missing or truncated.", + "message_limit_exceeded": "Members on this team are sending too many messages. For more details, see" + + " https://slack.com/help/articles/115002422943-Usage-limits-for-free-workspaces", + "rate_limited": "Application has posted too many messages, read the Rate Limit documentation for more information", + "fatal_error": "The server could not complete your operation(s) without encountering a catastrophic error. It's" + + " possible some aspect of the operation succeeded before the error was raised.", + "internal_error": "The server could not complete your operation(s) without encountering an error, likely due to a" + + " transient issue on our end. It's possible some aspect of the operation succeeded before the error" + + " was raised.", + "ratelimited": "The request has been ratelimited. Refer to the Retry-After header for when to retry the request.", + "request_timeout": "The method was called via a POST request, but the POST data was either missing or truncated.", } var doNotProcessChannels = map[string]time.Time{} func CheckError(err string) (retryable bool, pause bool, description string) { // Special case for channel_not_found, we don't want to retry this one right away. - // We are making it a 'soft failure' so that we don't keep retrying it for a period of time for any message that is sent to a channel that doesn't exist. + // We are making it a 'soft failure' so that we don't keep retrying it for a period of time for any + // message that is sent to a channel that doesn't exist. if err == "channel_not_found" { return true, true, "Channel not found" } @@ -116,15 +140,18 @@ func (s *SlackClient) PostMessage(request SlackPostMessageRequest, url string, t if err != nil { return err } - // Detach from the caller/new context. TODO: have some timeout (or use jrpc package functions which do that already) + // Detach from the caller/new context. TODO: have some timeout (or use jrpc package functions which + // do that already) req, err := http.NewRequestWithContext(context.Background(), http.MethodPost, url, bytes.NewBuffer(jsonValue)) if err != nil { return err } - // Charset is required to remove warnings from Slack. Maybe it's nice to have it configurable. /shrug + // Charset is required to remove warnings from Slack. Maybe it's nice to have it configurable. + // /shrug req.Header.Set("Content-Type", "application/json; charset=utf-8") - // Documentation says that you are allowed the POST the token instead, however that does simply not work. Hence why we are using the Authorization header. + // Documentation says that you are allowed the POST the token instead, however that does simply not + // work. Hence why we are using the Authorization header. req.Header.Set("Authorization", "Bearer "+token) resp, err := s.client.Do(req) @@ -146,7 +173,9 @@ func (s *SlackClient) PostMessage(request SlackPostMessageRequest, url string, t return nil } -func NewApp(queueSize int, httpClient *http.Client, metrics *Metrics, channelOverride, slackPostMessageURL, slackToken string) *App { +func NewApp(queueSize int, httpClient *http.Client, + metrics *Metrics, channelOverride, slackPostMessageURL, slackToken string, +) *App { return &App{ slackQueue: make(chan SlackPostMessageRequest, queueSize), messenger: &SlackClient{client: httpClient}, @@ -164,28 +193,37 @@ func (app *App) Shutdown() { } //nolint:gocognit // but could probably use a refactor. -func (app *App) processQueue(ctx context.Context, maxRetries int, initialBackoff time.Duration, burst int, slackRequestRate time.Duration) { +func (app *App) processQueue(ctx context.Context, maxRetries int, + initialBackoff time.Duration, burst int, slackRequestRate time.Duration, +) { // This is the rate limiter, which will block until it is allowed to continue on r.Wait(ctx). - // I kept the rate at 1 per second, as doing more than that will cause Slack to reject the messages anyways. We can burst however. - // Do note that this is best effort, in case of failures, we will exponentially backoff and retry, which will cause the rate to be lower than 1 per second due to obvious reasons. + // I kept the rate at 1 per second, as doing more than that will cause Slack to reject the messages + // anyways. We can burst however. + // Do note that this is best effort, in case of failures, we will exponentially backoff and retry, + // which will cause the rate to be lower than 1 per second due to obvious reasons. r := rate.NewLimiter(rate.Every(slackRequestRate), burst) for { select { case msg, ok := <-app.slackQueue: - // We do check if the channel is closed, but its important to note is that the channel will be closed when the queue is empty and the Shutdown() is called. - // Simply calling close(app.slackQueue) will not close the channel, it will only prevent any more messages from being sent to the channel. + // We do check if the channel is closed, but its important to note is that the channel will be + // closed when the queue is empty and the Shutdown() is called. + // Simply calling close(app.slackQueue) will not close the channel, it will only prevent any more + // messages from being sent to the channel. // Only once the channel is empty, will it be closed. if !ok { return } log.S(log.Debug, "Got message from queue", log.Any("message", msg)) - // Rate limiter was initially before fetching a message from the queue, but that caused problems by indefinitely looping even if there was no message in the queue. - // On shutdown, it would cancel the context, even if the queue was stopped (thus no messages would even come in). + // Rate limiter was initially before fetching a message from the queue, but that caused problems by + // indefinitely looping even if there was no message in the queue. + // On shutdown, it would cancel the context, even if the queue was stopped (thus no messages would + // even come in). err := r.Wait(ctx) if err != nil { - log.Fatalf("Error while waiting for rate limiter. This should not happen, provide debug info + error message to an issue if it does: %v", err) + log.Fatalf("Error while waiting for rate limiter. This should not happen, provide debug info + error message"+ + " to an issue if it does: %v", err) return } @@ -194,10 +232,12 @@ func (app *App) processQueue(ctx context.Context, maxRetries int, initialBackoff retryCount := 0 for { - // Check if the channel is in the doNotProcessChannels map, if it is, check if it's been more than 15 minutes since we last tried to send a message to it. + // Check if the channel is in the doNotProcessChannels map, if it is, check if it's been more than + // 15 minutes since we last tried to send a message to it. if (doNotProcessChannels[msg.Channel] != time.Time{}) { if time.Since(doNotProcessChannels[msg.Channel]) >= 15*time.Minute { - // Remove the channel from the map, so that we can process it again. If the channel isn't created in the meantime, we will just add it again. + // Remove the channel from the map, so that we can process it again. If the channel isn't created + // in the meantime, we will just add it again. delete(doNotProcessChannels, msg.Channel) } else { log.S(log.Info, "Channel is on the doNotProcess list, not trying to post this message", log.String("channel", msg.Channel)) @@ -221,14 +261,17 @@ func (app *App) processQueue(ctx context.Context, maxRetries int, initialBackoff if !retryable { app.metrics.RequestsFailedTotal.WithLabelValues(msg.Channel).Inc() - log.S(log.Error, "Permanent error, message will not be retried", log.Any("err", err), log.String("description", description), log.String("channel", msg.Channel), log.Any("message", msg)) + log.S(log.Error, "Permanent error, message will not be retried", log.Any("err", err), + log.String("description", description), log.String("channel", msg.Channel), log.Any("message", msg)) break } if description == "Unknown error" { - log.S(log.Error, "Unknown error, since we can't infer what type of error it is, we will retry it. However, please create a ticket/issue for this project for this error", log.Any("err", err)) + log.S(log.Error, "Unknown error, since we can't infer what type of error it is, we will retry it. However, please"+ + " create a ticket/issue for this project for this error", log.Any("err", err)) } - log.S(log.Warning, "Temporary error, message will be retried", log.Any("err", err), log.String("description", description), log.String("channel", msg.Channel), log.Any("message", msg)) + log.S(log.Warning, "Temporary error, message will be retried", log.Any("err", err), + log.String("description", description), log.String("channel", msg.Channel), log.Any("message", msg)) app.metrics.RequestsRetriedTotal.WithLabelValues(msg.Channel).Inc() @@ -248,7 +291,8 @@ func (app *App) processQueue(ctx context.Context, maxRetries int, initialBackoff } } - // Need to call this to clean up the wg, which is vital for the shutdown to work (so that we process all the messages in the queue before exiting cleanly) + // Need to call this to clean up the wg, which is vital for the shutdown to work (so that we + // process all the messages in the queue before exiting cleanly) app.wg.Done() case <-ctx.Done(): diff --git a/app_test.go b/app_test.go index b609fd6..04210e1 100644 --- a/app_test.go +++ b/app_test.go @@ -61,7 +61,8 @@ func TestApp_singleBurst_Success(t *testing.T) { diffInSeconds := endTime.Sub(startTime).Seconds() log.S(log.Debug, "diffInSeconds", log.Float64("diffInSeconds", diffInSeconds)) - // The sum is always: (Amount of messages * RPS * delay in seconds) minus burst. In this case 20 * 1 - 10 = 10 seconds. + // The sum is always: (Amount of messages * RPS * delay in seconds) minus burst. In this case 20 * + // 1 - 10 = 10 seconds. if math.RoundToEven(diffInSeconds) != 9 { t.Fatal("Expected processQueue finish the job in ~9 seconds, give or take. Got", diffInSeconds) } @@ -104,7 +105,8 @@ func TestApp_MultiBurst_Success(t *testing.T) { diffInSeconds := endTime.Sub(startTime).Seconds() log.S(log.Debug, "diffInSeconds", log.Float64("diffInSeconds", diffInSeconds)) - // The sum is always: (Amount of messages * RPS * delay in seconds) minus burst. In this case 20 * 1 - 10 = 10 seconds. + // The sum is always: (Amount of messages * RPS * delay in seconds) minus burst. In this case 20 * + // 1 - 10 = 10 seconds. if math.RoundToEven(diffInSeconds) != 10 { t.Fatal("Expected processQueue finish the job in ~9 seconds, give or take. Got", diffInSeconds) } @@ -147,7 +149,8 @@ func TestApp_TestSlackRequestRate(t *testing.T) { diffInSeconds := endTime.Sub(startTime).Seconds() log.S(log.Debug, "diffInSeconds", log.Float64("diffInSeconds", diffInSeconds)) - // The sum is always: (Amount of messages * RPS * delay in seconds) minus burst. In this case 20 * 4 * 1 - 10 = 5 seconds. + // The sum is always: (Amount of messages * RPS * delay in seconds) minus burst. In this case 20 * + // 4 * 1 - 10 = 5 seconds. if math.RoundToEven(diffInSeconds) != 5 { t.Fatal("Expected processQueue finish the job in ~5 seconds, give or take. Got", diffInSeconds) } diff --git a/main.go b/main.go index 6d74e69..7c6642f 100644 --- a/main.go +++ b/main.go @@ -120,7 +120,8 @@ func main() { tokens := getSlackTokens() // Hack to get the pod index - // Todo: Remove this by using the label pod-index: https://github.com/kubernetes/kubernetes/pull/119232 + // Todo: Remove this by using the label pod-index: + // https://github.com/kubernetes/kubernetes/pull/119232 podName := os.Getenv("HOSTNAME") if podName == "" { log.Fatalf("HOSTNAME environment variable not set") diff --git a/server.go b/server.go index a6549c3..bbc0829 100644 --- a/server.go +++ b/server.go @@ -53,7 +53,8 @@ func (app *App) handleRequest(w http.ResponseWriter, r *http.Request) { maxQueueSize := int(float64(cap(app.slackQueue)) * 0.9) // Reject requests if the queue is almost full // If the channel is full, the request will block until there is space in the channel. - // Ideally we don't reject at 90%, but initially after some tests I got blocked. So I decided to be a bit more conservative. + // Ideally we don't reject at 90%, but initially after some tests I got blocked. So I decided to be + // a bit more conservative. // ToDo: Fix this behavior so we can reach 100% channel size without problems. if len(app.slackQueue) >= maxQueueSize { log.S(log.Warning, "Queue is almost full, returning StatusServiceUnavailable", log.Int("queueSize", len(app.slackQueue))) @@ -72,7 +73,8 @@ func (app *App) handleRequest(w http.ResponseWriter, r *http.Request) { var request SlackPostMessageRequest requestErr := json.NewDecoder(r.Body).Decode(&request) - // If we can't decode, we don't bother validating. In the end it's the same outcome if either one is invalid. + // If we can't decode, we don't bother validating. In the end it's the same outcome if either one + // is invalid. if requestErr == nil { requestErr = validate(request) } @@ -100,16 +102,19 @@ func (app *App) handleRequest(w http.ResponseWriter, r *http.Request) { request.Channel = app.channelOverride } - // Add a counter to the wait group, this is important to wait for all the messages to be processed before shutting down the server. + // Add a counter to the wait group, this is important to wait for all the messages to be processed + // before shutting down the server. app.wg.Add(1) // Send the message to the slackQueue to be processed app.slackQueue <- request // Update the queue size metric after any change on the queue size app.metrics.QueueSize.With(nil).Set(float64(len(app.slackQueue))) - // Respond, this is not entirely accurate as we have no idea if the message will be processed successfully. + // Respond, this is not entirely accurate as we have no idea if the message will be processed + // successfully. // This is the downside of having a queue which could potentially delay responses by a lot. - // We do our due diligences on the received message and can make a fair assumption we will be able to process it. + // We do our due diligences on the received message and can make a fair assumption we will be able + // to process it. // Application should utlise this applications metrics and logs to find out if there are any issues. err := jrpc.Reply[SlackResponse](w, http.StatusOK, &SlackResponse{ Ok: true, diff --git a/server_test.go b/server_test.go index 8a002f4..57c579f 100644 --- a/server_test.go +++ b/server_test.go @@ -98,12 +98,14 @@ func TestStartServer(t *testing.T) { }() // Give server some time to start - // If you are running on a non-priviledged account, and get a popup asking for permission to accept incoming connections, you can increase this time... + // If you are running on a non-priviledged account, and get a popup asking for permission to accept + // incoming connections, you can increase this time... time.Sleep(1 * time.Second) // Make a sample request to ensure server is running - resp, err := http.Post("http://localhost"+testPort, "application/json", bytes.NewBufferString(`{"channel": "test_channel", "text": "Hello"}`)) + resp, err := http.Post("http://localhost"+testPort, "application/json", + bytes.NewBufferString(`{"channel": "test_channel", "text": "Hello"}`)) if err != nil { t.Fatalf("Could not make GET request: %v", err) } @@ -119,12 +121,14 @@ func TestStartServer(t *testing.T) { time.Sleep(1 * time.Second) // Make another request, this should fail since the server should be stopped - secondResp, err := http.Post("http://localhost"+testPort, "application/json", bytes.NewBufferString(`{"channel": "test_channel", "text": "Hello"}`)) + secondResp, err := http.Post("http://localhost"+testPort, "application/json", + bytes.NewBufferString(`{"channel": "test_channel", "text": "Hello"}`)) if err == nil { t.Fatal("Expected error making POST request after server shut down, got none") } - // to avoid confusion; we _are_ expecting a err, but for the edge-case it doesn't (resp != nil), we should close the body. + // to avoid confusion; we _are_ expecting a err, but for the edge-case it doesn't (resp != nil), we + // should close the body. if secondResp != nil { defer secondResp.Body.Close() }