diff --git a/.codecov.yml b/.codecov.yml index b49dfb9e315..97cae9da52b 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -23,16 +23,15 @@ coverage: status: project: default: - enabled: true - # allowed to drop coverage and still result in a "success" commit status - threshold: null + informational: true if_not_found: success if_no_uploads: success if_ci_failed: error patch: default: - enabled: true - threshold: 90% + # patch coverage should be within 10% of existing coverage + target: auto + threshold: 10% if_not_found: success if_no_uploads: success if_ci_failed: error diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index a3845c672cd..fc21e2e37be 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -4,12 +4,8 @@ # the repo. Unless a later match takes precedence, # the following users/teams will be requested for # review when someone opens a pull request. -# TODO(owners): For ease of management, this should eventually shift to a -# defined GitHub team instead of individual usernames -* @azeemshaikh38 @justaugustus @laurentsimon @naveensrinivasan @spencerschrock @raghavkaul +* @ossf/scorecard-maintainers # Docs -# TODO(owners): For ease of management, this should eventually shift to a -# defined GitHub team instead of individual usernames -*.md @olivekl -/docs/ @olivekl +*.md @ossf/scorecard-doc-maintainers +/docs/ @ossf/scorecard-doc-maintainers diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 070dec2e781..ee6850e409b 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -19,10 +19,19 @@ updates: - package-ecosystem: "github-actions" directory: "/" schedule: - interval: "daily" + interval: "weekly" rebase-strategy: disabled commit-message: prefix: ":seedling:" + groups: + github-actions: + patterns: + - "*" + # These actions directly influence the build process and are excluded from grouped updates + exclude-patterns: + - "actions/setup-go" + - "arduino/setup-protoc" + - "goreleaser/goreleaser-action" - package-ecosystem: docker directory: "/" schedule: @@ -74,3 +83,10 @@ updates: rebase-strategy: disabled commit-message: prefix: ":seedling:" +- package-ecosystem: docker + directory: "/attestor" + schedule: + interval: weekly + rebase-strategy: disabled + commit-message: + prefix: ":seedling:" diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index c431cb5afea..787f424b17f 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -36,6 +36,9 @@ on: permissions: contents: read +env: + GO_VERSION: 1.21 + jobs: analyze: permissions: @@ -52,12 +55,21 @@ jobs: steps: - name: Harden Runner - uses: step-security/harden-runner@55d479fb1c5bcad5a4f9099a5d9f37c8857b2845 # v1 + uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v1 with: egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - name: Checkout repository - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v2.3.4 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + # don't use the default version of Go from GitHub runners + # https://github.com/github/codeql-action/issues/1842#issuecomment-1704398087 + - name: Setup Go + uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + with: + go-version: ${{ env.GO_VERSION }} + check-latest: true + cache: false # CodeQL needs to build everything itself to do its analysis # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL diff --git a/.github/workflows/depsreview.yml b/.github/workflows/depsreview.yml index ea8c3b2d09b..96824c249d3 100644 --- a/.github/workflows/depsreview.yml +++ b/.github/workflows/depsreview.yml @@ -22,6 +22,6 @@ jobs: runs-on: ubuntu-latest steps: - name: 'Checkout Repository' - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: 'Dependency Review' - uses: actions/dependency-review-action@1360a344ccb0ab6e9475edef90ad2f46bf8003b1 + uses: actions/dependency-review-action@4901385134134e04cec5fbe5ddfe3b2c5bd5d976 # v4.0.0 diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index b612cba9e5d..25800fc9558 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -23,9 +23,7 @@ on: - main env: - PROTOC_VERSION: 3.17.3 - GO_VERSION_FILE: go.mod # no good way of getting a mutual version between go.mod and tools/go.mod - CACHE_DEPENDENCY_PATH: "**/go.sum" # include both go.sum and tools/go.sum + GO_VERSION: 1.21 jobs: docs_only_check: @@ -37,12 +35,12 @@ jobs: docs_only: ${{ steps.docs_only_check.outputs.docs_only }} steps: - name: Check out code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 #v3.5.3 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 #v4.1.1 with: fetch-depth: 2 # needed to diff changed files - id: files name: Get changed files - uses: tj-actions/changed-files@ec1e14cf27f4585783f463070881b2c499349a8a #v37.0.3 + uses: tj-actions/changed-files@90a06d6ba9543371ab4df8eeca0be07ca6054959 #v42.0.2 with: files_ignore: '**.md' - id: docs_only_check @@ -50,212 +48,41 @@ jobs: name: Check for docs-only changes run: echo "docs_only=true" >> $GITHUB_OUTPUT - scorecard: - name: scorecard-docker + docker_matrix: + strategy: + matrix: + target: + - 'scorecard-docker' + - 'cron-controller-docker' + - 'cron-worker-docker' + - 'cron-cii-worker-docker' + - 'cron-bq-transfer-docker' + - 'cron-webhook-docker' + - 'cron-github-server-docker' + - 'build-attestor-docker' + name: ${{ matrix.target }} runs-on: ubuntu-latest permissions: contents: read - needs: - - docs_only_check - if: (needs.docs_only_check.outputs.docs_only != 'true') + needs: docs_only_check + # ideally we put one "if" here, but due to how skipped matrix jobs work, we need one for each step + # https://github.com/orgs/community/discussions/9141 steps: - name: Harden Runner - uses: step-security/harden-runner@55d479fb1c5bcad5a4f9099a5d9f37c8857b2845 # v2.4.1 + if: (needs.docs_only_check.outputs.docs_only != 'true') + uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v2.6.1 with: egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - - name: Install Protoc - uses: arduino/setup-protoc@149f6c87b92550901b26acd1632e11c3662e381f # v1.3.0 - with: - version: ${{ env.PROTOC_VERSION }} - repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Clone the code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - - name: Setup Go - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 - with: - go-version-file: ${{ env.GO_VERSION_FILE }} - check-latest: true - cache: true - cache-dependency-path: ${{ env.CACHE_DEPENDENCY_PATH }} - - name: docker build - run: make scorecard-docker - cron-controller: - name: cron-controller-docker - runs-on: ubuntu-latest - permissions: - contents: read - needs: - - docs_only_check - if: (needs.docs_only_check.outputs.docs_only != 'true') - steps: - - name: Harden Runner - uses: step-security/harden-runner@55d479fb1c5bcad5a4f9099a5d9f37c8857b2845 # v2.4.1 - with: - egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - - name: Install Protoc - uses: arduino/setup-protoc@149f6c87b92550901b26acd1632e11c3662e381f # v1.3.0 - with: - version: ${{ env.PROTOC_VERSION }} - repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Clone the code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - - name: Setup Go - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 - with: - go-version-file: ${{ env.GO_VERSION_FILE }} - check-latest: true - cache: true - cache-dependency-path: ${{ env.CACHE_DEPENDENCY_PATH }} - - name: docker build - run: make cron-controller-docker - cron-worker: - name: cron-worker-docker - runs-on: ubuntu-latest - permissions: - contents: read - needs: - - docs_only_check - if: (needs.docs_only_check.outputs.docs_only != 'true') - steps: - - name: Harden Runner - uses: step-security/harden-runner@55d479fb1c5bcad5a4f9099a5d9f37c8857b2845 # v2.4.1 - with: - egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - - name: Install Protoc - uses: arduino/setup-protoc@149f6c87b92550901b26acd1632e11c3662e381f # v1.3.0 - with: - version: ${{ env.PROTOC_VERSION }} - repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Clone the code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - - name: Setup Go - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 - with: - go-version-file: ${{ env.GO_VERSION_FILE }} - check-latest: true - cache: true - cache-dependency-path: ${{ env.CACHE_DEPENDENCY_PATH }} - - name: docker build - run: make cron-worker-docker - cron-cii-worker: - name: cron-cii--worker-docker - runs-on: ubuntu-latest - permissions: - contents: read - needs: - - docs_only_check - if: (needs.docs_only_check.outputs.docs_only != 'true') - steps: - - name: Harden Runner - uses: step-security/harden-runner@55d479fb1c5bcad5a4f9099a5d9f37c8857b2845 # v2.4.1 - with: - egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - - name: Install Protoc - uses: arduino/setup-protoc@149f6c87b92550901b26acd1632e11c3662e381f # v1.3.0 - with: - version: ${{ env.PROTOC_VERSION }} - repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Clone the code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - - name: Setup Go - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 - with: - go-version-file: ${{ env.GO_VERSION_FILE }} - check-latest: true - cache: true - cache-dependency-path: ${{ env.CACHE_DEPENDENCY_PATH }} - - name: docker build - run: make cron-cii-worker-docker - cron-bq-transfer: - name: cron-bq-transfer-docker - runs-on: ubuntu-latest - permissions: - contents: read - needs: - - docs_only_check - if: (needs.docs_only_check.outputs.docs_only != 'true') - steps: - - name: Harden Runner - uses: step-security/harden-runner@55d479fb1c5bcad5a4f9099a5d9f37c8857b2845 # v2.4.1 - with: - egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - - name: Install Protoc - uses: arduino/setup-protoc@149f6c87b92550901b26acd1632e11c3662e381f # v1.3.0 - with: - version: ${{ env.PROTOC_VERSION }} - repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Clone the code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - - name: Setup Go - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 - with: - go-version-file: ${{ env.GO_VERSION_FILE }} - check-latest: true - cache: true - cache-dependency-path: ${{ env.CACHE_DEPENDENCY_PATH }} - - name: docker build - run: make cron-bq-transfer-docker - cron-webhook: - name: cron-webhook-docker - runs-on: ubuntu-latest - permissions: - contents: read - needs: - - docs_only_check - if: (needs.docs_only_check.outputs.docs_only != 'true') - steps: - - name: Harden Runner - uses: step-security/harden-runner@55d479fb1c5bcad5a4f9099a5d9f37c8857b2845 # v2.4.1 - with: - egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - - name: Install Protoc - uses: arduino/setup-protoc@149f6c87b92550901b26acd1632e11c3662e381f # v1.3.0 - with: - version: ${{ env.PROTOC_VERSION }} - repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Clone the code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - - name: Setup Go - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 - with: - go-version-file: ${{ env.GO_VERSION_FILE }} - check-latest: true - cache: true - cache-dependency-path: ${{ env.CACHE_DEPENDENCY_PATH }} - - name: docker build - run: make cron-webhook-docker - cron-github-server: - name: cron-github-server-docker - runs-on: ubuntu-latest - permissions: - contents: read - needs: - - docs_only_check - if: (needs.docs_only_check.outputs.docs_only != 'true') - steps: - - name: Harden Runner - uses: step-security/harden-runner@55d479fb1c5bcad5a4f9099a5d9f37c8857b2845 # v2.4.1 - with: - egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - - name: Install Protoc - uses: arduino/setup-protoc@149f6c87b92550901b26acd1632e11c3662e381f # v1.3.0 - with: - version: ${{ env.PROTOC_VERSION }} - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Clone the code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - - name: Setup Go - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + if: (needs.docs_only_check.outputs.docs_only != 'true') + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Setup Go # needed for some of the Makefile evaluations, even if building happens in Docker + if: (needs.docs_only_check.outputs.docs_only != 'true') + uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: - go-version-file: ${{ env.GO_VERSION_FILE }} + go-version: ${{ env.GO_VERSION }} check-latest: true - cache: true + cache: false # the building happens in Docker, so saving this cache would negatively impact other builds - name: docker build - run: make cron-github-server-docker + if: (needs.docs_only_check.outputs.docs_only != 'true') + run: make ${{ matrix.target }} diff --git a/.github/workflows/gitlab.yml b/.github/workflows/gitlab.yml index 3ee314caacb..7742763d536 100644 --- a/.github/workflows/gitlab.yml +++ b/.github/workflows/gitlab.yml @@ -20,6 +20,12 @@ on: push: branches: - main + pull_request: + branches: + - main + +env: + GO_VERSION: 1.21 jobs: gitlab-integration-trusted: @@ -27,28 +33,50 @@ jobs: environment: gitlab steps: - name: Harden Runner - uses: step-security/harden-runner@55d479fb1c5bcad5a4f9099a5d9f37c8857b2845 # v1 + uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v2.6.1 with: egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - name: Clone the code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: - fetch-depth: 0 - - - name: setup-go - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v2.2.0 + ref: ${{ github.event.pull_request.head.sha || github.sha }} # head SHA if PR, else fallback to push SHA + - name: Setup Go + uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: - go-version: '1.19' + go-version: ${{ env.GO_VERSION }} check-latest: true + cache: false # we manually manage caches below + - id: go-cache-paths + run: | + echo "go-build=$(go env GOCACHE)" >> "$GITHUB_OUTPUT" + echo "go-mod=$(go env GOMODCACHE)" >> "$GITHUB_OUTPUT" + - name: Cache builds + # https://github.com/mvdan/github-actions-golang#how-do-i-set-up-caching-between-builds + uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 #v4.0.0 + with: + path: | + ${{ steps.go-cache-paths.outputs.go-build }} + ${{ steps.go-cache-paths.outputs.go-mod }} + key: ${{ runner.os }}-go-tests-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go-tests- - name: Prepare test env run: | go mod download + - name: Run GitLab tokenless E2E + uses: nick-invision/retry@14672906e672a08bd6eeb15720e9ed3ce869cdd4 # v2.9.0 + if: github.event_name == 'pull_request' + with: + max_attempts: 3 + retry_on: error + timeout_minutes: 30 + command: make e2e-gitlab + - name: Run GitLab PAT E2E # skip if auth token is not available - uses: nick-invision/retry@943e742917ac94714d2f408a0e8320f2d1fcafcd - if: ${{ github.actor != 'dependabot[bot]' }} + uses: nick-invision/retry@14672906e672a08bd6eeb15720e9ed3ce869cdd4 # v2.9.0 + if: ${{ github.event_name == 'push' && github.actor != 'dependabot[bot]' }} env: GITLAB_AUTH_TOKEN: ${{ secrets.GITLAB_TOKEN }} with: @@ -58,7 +86,7 @@ jobs: command: make e2e-gitlab-token - name: codecov - uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d # 2.1.0 + uses: codecov/codecov-action@4fe8c5f003fae66aa5ebb77cfd3e7bfbbda0b6b0 # 3.1.5 with: - files: ./e2e-coverage.out - verbose: true \ No newline at end of file + files: "*e2e-coverage.out" + verbose: true diff --git a/.github/workflows/goreleaser.yaml b/.github/workflows/goreleaser.yaml index c3b29154f6e..272691482d5 100644 --- a/.github/workflows/goreleaser.yaml +++ b/.github/workflows/goreleaser.yaml @@ -22,6 +22,9 @@ on: permissions: contents: read +env: + GO_VERSION: 1.21 + jobs: goreleaser: outputs: @@ -31,25 +34,25 @@ jobs: runs-on: ubuntu-latest steps: - name: Harden Runner - uses: step-security/harden-runner@55d479fb1c5bcad5a4f9099a5d9f37c8857b2845 # v1 + uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v1 with: egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - name: Checkout - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v2.3.4 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: fetch-depth: 0 - name: Set up Go - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v2.2.0 + uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v2.2.0 with: - go-version: 1.19 + go-version: ${{ env.GO_VERSION }} check-latest: true - name: Configure ldflags id: ldflags run: echo "version_flags=$(./scripts/version-ldflags)" >> "$GITHUB_OUTPUT" - name: Run GoReleaser id: run-goreleaser - uses: goreleaser/goreleaser-action@336e29918d653399e599bfca99fadc1d7ffbc9f7 # v2.5.0 + uses: goreleaser/goreleaser-action@7ec5c2b0c6cdda6e8bbb49444bc797dd33d74dd8 # v2.5.0 with: version: latest args: release --rm-dist @@ -72,7 +75,7 @@ jobs: actions: read # To read the workflow path. id-token: write # To sign the provenance. contents: write # To add assets to a release. - uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.7.0 + uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.9.0 with: base64-subjects: "${{ needs.goreleaser.outputs.hashes }}" upload-assets: true # upload to a new release @@ -83,7 +86,7 @@ jobs: permissions: read-all steps: - name: Install the verifier - uses: slsa-framework/slsa-verifier/actions/installer@v2.3.0 + uses: slsa-framework/slsa-verifier/actions/installer@v2.4.1 - name: Download assets env: diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 32bb9998ddf..9a3736165b9 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -23,12 +23,15 @@ on: permissions: contents: read +env: + GO_VERSION: 1.21 + jobs: approve: runs-on: ubuntu-latest steps: - name: Harden Runner - uses: step-security/harden-runner@55d479fb1c5bcad5a4f9099a5d9f37c8857b2845 # v1 + uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v1 with: egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs @@ -41,41 +44,40 @@ jobs: needs: [approve] steps: - name: Harden Runner - uses: step-security/harden-runner@55d479fb1c5bcad5a4f9099a5d9f37c8857b2845 # v1 + uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v2.6.1 with: egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - - name: pull_request actions/checkout - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v2.3.4 + - name: Clone the code + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ github.event.pull_request.head.sha }} - - - name: setup-go - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v2.2.0 + - name: Setup Go + uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: - go-version: '1.19' + go-version: ${{ env.GO_VERSION }} check-latest: true + cache: false # we manually manage caches below + - id: go-cache-paths + run: | + echo "go-build=$(go env GOCACHE)" >> "$GITHUB_OUTPUT" + echo "go-mod=$(go env GOMODCACHE)" >> "$GITHUB_OUTPUT" + - name: Cache builds + # https://github.com/mvdan/github-actions-golang#how-do-i-set-up-caching-between-builds + uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 #v4.0.0 + with: + path: | + ${{ steps.go-cache-paths.outputs.go-build }} + ${{ steps.go-cache-paths.outputs.go-mod }} + key: ${{ runner.os }}-go-tests-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go-tests- - name: Prepare test env run: | go mod download - - name: Run GitLab E2E #using retry because the GitHub token is being throttled. - uses: nick-invision/retry@943e742917ac94714d2f408a0e8320f2d1fcafcd - with: - max_attempts: 3 - retry_on: error - timeout_minutes: 30 - command: make e2e-gitlab - - - name: codecov - uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d # 2.1.0 - with: - files: ./e2e-coverage.out - verbose: true - - name: Run GITHUB_TOKEN E2E #using retry because the GitHub token is being throttled. - uses: nick-invision/retry@943e742917ac94714d2f408a0e8320f2d1fcafcd + uses: nick-invision/retry@14672906e672a08bd6eeb15720e9ed3ce869cdd4 # v2.9.0 env: GITHUB_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: @@ -85,7 +87,7 @@ jobs: command: make e2e-gh-token - name: codecov - uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d # 2.1.0 + uses: codecov/codecov-action@4fe8c5f003fae66aa5ebb77cfd3e7bfbbda0b6b0 # 3.1.5 with: files: "*e2e-coverage.out" verbose: true diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 00000000000..db07113c633 --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,37 @@ +name: golangci-lint +on: + push: + branches: + - main + pull_request: + branches: + - main + +permissions: + contents: read + pull-requests: read # Use with `only-new-issues` option. + +env: + GO_VERSION: 1.21 + +jobs: + golangci: + name: check-linter + runs-on: ubuntu-latest + steps: + - uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v2.6.1 + with: + egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + with: + go-version: ${{ env.GO_VERSION }} + cache: false # golangci-lint maintains its own cache + - name: set golangci-lint version # keep in sync with tools/go.mod + run: | + echo "GOLANGCI_LINT_VERSION=$(cd tools; go list -m -f '{{ .Version }}' github.com/golangci/golangci-lint)" >> "$GITHUB_ENV" + - name: golangci-lint + uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc # v3.7.0 + with: + version: ${{ env.GOLANGCI_LINT_VERSION }} + only-new-issues: true diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index cef4ac8c8f6..e3f6cadc11e 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -27,7 +27,7 @@ on: env: PROTOC_VERSION: 3.17.3 - GO_VERSION: 1.19 + GO_VERSION: 1.21 jobs: unit-test: @@ -37,41 +37,43 @@ jobs: contents: read steps: - name: Harden Runner - uses: step-security/harden-runner@55d479fb1c5bcad5a4f9099a5d9f37c8857b2845 # v1 + uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v2.6.1 with: egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - - name: Cache builds - # https://github.com/mvdan/github-actions-golang#how-do-i-set-up-caching-between-builds - uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 #v3.3.1 - with: - path: | - ~/go/pkg/mod - ~/.cache/go-build - ~/Library/Caches/go-build - %LocalAppData%\go-build - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go- - name: Clone the code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v2.3.4 - with: - fetch-depth: 0 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Setup Go - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v2.2.0 + uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: go-version: ${{ env.GO_VERSION }} check-latest: true - cache: true + cache: false # we manually manage caches below + - id: go-cache-paths + run: | + echo "go-build=$(go env GOCACHE)" >> "$GITHUB_OUTPUT" + echo "go-mod=$(go env GOMODCACHE)" >> "$GITHUB_OUTPUT" + - name: Cache builds + # https://github.com/mvdan/github-actions-golang#how-do-i-set-up-caching-between-builds + uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 #v4.0.0 + with: + path: | + ${{ steps.go-cache-paths.outputs.go-build }} + ${{ steps.go-cache-paths.outputs.go-mod }} + key: ${{ runner.os }}-go-tests-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go-tests- + - name: Prepare test env + run: | + go mod download - name: Run unit-tests run: make unit-test - name: Upload codecoverage - uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d # 2.1.0 + uses: codecov/codecov-action@4fe8c5f003fae66aa5ebb77cfd3e7bfbbda0b6b0 # 3.1.5 with: files: ./unit-coverage.out verbose: true - name: Run PAT Token E2E #using retry because the GitHub token is being throttled. - uses: nick-invision/retry@943e742917ac94714d2f408a0e8320f2d1fcafcd + uses: nick-invision/retry@14672906e672a08bd6eeb15720e9ed3ce869cdd4 if: ${{ github.event_name != 'pull_request' && github.actor != 'dependabot[bot]' }} env: GITHUB_AUTH_TOKEN: ${{ secrets.GH_AUTH_TOKEN }} @@ -81,8 +83,8 @@ jobs: timeout_minutes: 30 command: make e2e-pat - name: codecov - uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d # 2.1.0 - if: ${{ github.event_name != 'pull_request' }} + uses: codecov/codecov-action@4fe8c5f003fae66aa5ebb77cfd3e7bfbbda0b6b0 # 2.1.0 + if: ${{ github.event_name != 'pull_request' || github.actor != 'dependabot[bot]' }} with: files: "*e2e-coverage.out" verbose: true @@ -93,7 +95,7 @@ jobs: contents: read steps: - name: Harden Runner - uses: step-security/harden-runner@55d479fb1c5bcad5a4f9099a5d9f37c8857b2845 # v1 + uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v1 with: egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs @@ -104,7 +106,7 @@ jobs: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Cache builds # https://github.com/mvdan/github-actions-golang#how-do-i-set-up-caching-between-builds - uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 + uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 with: path: | ~/go/pkg/mod @@ -115,17 +117,17 @@ jobs: restore-keys: | ${{ runner.os }}-go- - name: Clone the code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v2.3.4 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: fetch-depth: 0 - name: Setup Go - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v2.2.0 + uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v2.2.0 with: go-version: ${{ env.GO_VERSION }} check-latest: true cache: true - name: generate mocks - uses: nick-invision/retry@943e742917ac94714d2f408a0e8320f2d1fcafcd + uses: nick-invision/retry@14672906e672a08bd6eeb15720e9ed3ce869cdd4 with: max_attempts: 3 retry_on: error @@ -141,39 +143,19 @@ jobs: contents: read steps: - name: Harden Runner - uses: step-security/harden-runner@55d479fb1c5bcad5a4f9099a5d9f37c8857b2845 # v1 + uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v2.6.1 with: egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - - name: Install Protoc - uses: arduino/setup-protoc@149f6c87b92550901b26acd1632e11c3662e381f # v1.3.0 - with: - version: ${{ env.PROTOC_VERSION }} - repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Cache builds - # https://github.com/mvdan/github-actions-golang#how-do-i-set-up-caching-between-builds - uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 - with: - path: | - ~/go/pkg/mod - ~/.cache/go-build - ~/Library/Caches/go-build - %LocalAppData%\go-build - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go- - name: Clone the code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v2.3.4 - with: - fetch-depth: 0 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Setup Go - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v2.2.0 + uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: go-version: ${{ env.GO_VERSION }} check-latest: true cache: true - name: generate docs - uses: nick-invision/retry@943e742917ac94714d2f408a0e8320f2d1fcafcd + uses: nick-invision/retry@14672906e672a08bd6eeb15720e9ed3ce869cdd4 # v2.9.0 with: max_attempts: 3 retry_on: error @@ -181,6 +163,8 @@ jobs: command: | go env -w GOFLAGS=-mod=mod make generate-docs + - name: ensure checks.yaml and checks.md match + run: git diff --exit-code build-proto: name: build-proto runs-on: ubuntu-latest @@ -188,7 +172,7 @@ jobs: contents: read steps: - name: Harden Runner - uses: step-security/harden-runner@55d479fb1c5bcad5a4f9099a5d9f37c8857b2845 # v1 + uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v1 with: egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs @@ -198,17 +182,17 @@ jobs: version: ${{ env.PROTOC_VERSION }} repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Clone the code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v2.3.4 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: fetch-depth: 0 - name: Setup Go - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v2.2.0 + uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v2.2.0 with: go-version: ${{ env.GO_VERSION }} check-latest: true cache: true - name: build-proto - uses: nick-invision/retry@943e742917ac94714d2f408a0e8320f2d1fcafcd + uses: nick-invision/retry@14672906e672a08bd6eeb15720e9ed3ce869cdd4 with: max_attempts: 3 retry_on: error @@ -216,26 +200,33 @@ jobs: command: | go env -w GOFLAGS=-mod=mod make build-proto - build-scorecard: - name: build-scorecard + build-matrix: + strategy: + matrix: + target: + - 'build-scorecard' + - 'build-controller' + - 'build-worker' + - 'build-cii-worker' + - 'build-shuffler' + - 'build-bq-transfer' + - 'build-github-server' + - 'build-webhook' + - 'build-add-script' + - 'build-validate-script' + name: ${{ matrix.target }} runs-on: ubuntu-latest needs: build-proto permissions: contents: read steps: - name: Harden Runner - uses: step-security/harden-runner@55d479fb1c5bcad5a4f9099a5d9f37c8857b2845 # v1 + uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v2.6.1 with: egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - - name: Install Protoc - uses: arduino/setup-protoc@149f6c87b92550901b26acd1632e11c3662e381f # v1.3.0 - with: - version: ${{ env.PROTOC_VERSION }} - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Cache builds # https://github.com/mvdan/github-actions-golang#how-do-i-set-up-caching-between-builds - uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 + uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 with: path: | ~/go/pkg/mod @@ -246,534 +237,22 @@ jobs: restore-keys: | ${{ runner.os }}-go- - name: Clone the code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v2.3.4 - with: - fetch-depth: 0 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Setup Go - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v2.2.0 + uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: go-version: ${{ env.GO_VERSION }} check-latest: true cache: true - name: Run build - uses: nick-invision/retry@943e742917ac94714d2f408a0e8320f2d1fcafcd - with: - max_attempts: 3 - retry_on: error - timeout_minutes: 30 - command: | - go env -w GOFLAGS=-mod=mod - make build-scorecard - build-controller: - name: build-controller - runs-on: ubuntu-latest - needs: build-proto - permissions: - contents: read - steps: - - name: Harden Runner - uses: step-security/harden-runner@55d479fb1c5bcad5a4f9099a5d9f37c8857b2845 # v1 - with: - egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - - name: Install Protoc - uses: arduino/setup-protoc@149f6c87b92550901b26acd1632e11c3662e381f # v1.3.0 - with: - version: ${{ env.PROTOC_VERSION }} - repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Cache builds - # https://github.com/mvdan/github-actions-golang#how-do-i-set-up-caching-between-builds - uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 - with: - path: | - ~/go/pkg/mod - ~/.cache/go-build - ~/Library/Caches/go-build - %LocalAppData%\go-build - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go- - - name: Clone the code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v2.3.4 - with: - fetch-depth: 0 - - name: Setup Go - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v2.2.0 - with: - go-version: ${{ env.GO_VERSION }} - check-latest: true - cache: true - - name: build cron - uses: nick-invision/retry@943e742917ac94714d2f408a0e8320f2d1fcafcd - with: - max_attempts: 3 - retry_on: error - timeout_minutes: 30 - command: | - go env -w GOFLAGS=-mod=mod - make build-controller - build-worker: - name: build-worker - runs-on: ubuntu-latest - needs: build-proto - permissions: - contents: read - steps: - - name: Harden Runner - uses: step-security/harden-runner@55d479fb1c5bcad5a4f9099a5d9f37c8857b2845 # v1 - with: - egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - - name: Install Protoc - uses: arduino/setup-protoc@149f6c87b92550901b26acd1632e11c3662e381f # v1.3.0 - with: - version: ${{ env.PROTOC_VERSION }} - repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Cache builds - # https://github.com/mvdan/github-actions-golang#how-do-i-set-up-caching-between-builds - uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 - with: - path: | - ~/go/pkg/mod - ~/.cache/go-build - ~/Library/Caches/go-build - %LocalAppData%\go-build - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go- - - name: Clone the code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v2.3.4 - with: - fetch-depth: 0 - - name: Setup Go - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v2.2.0 - with: - go-version: ${{ env.GO_VERSION }} - check-latest: true - cache: true - - name: build worker - uses: nick-invision/retry@943e742917ac94714d2f408a0e8320f2d1fcafcd - with: - max_attempts: 3 - retry_on: error - timeout_minutes: 30 - command: | - go env -w GOFLAGS=-mod=mod - make build-worker - build-cii-worker: - name: build-cii-worker - runs-on: ubuntu-latest - needs: build-proto - permissions: - contents: read - steps: - - name: Harden Runner - uses: step-security/harden-runner@55d479fb1c5bcad5a4f9099a5d9f37c8857b2845 # v1 - with: - egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - - name: Install Protoc - uses: arduino/setup-protoc@149f6c87b92550901b26acd1632e11c3662e381f # v1.3.0 - with: - version: ${{ env.PROTOC_VERSION }} - repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Cache builds - # https://github.com/mvdan/github-actions-golang#how-do-i-set-up-caching-between-builds - uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 - with: - path: | - ~/go/pkg/mod - ~/.cache/go-build - ~/Library/Caches/go-build - %LocalAppData%\go-build - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go- - - name: Clone the code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v2.3.4 - with: - fetch-depth: 0 - - name: Setup Go - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v2.2.0 - with: - go-version: ${{ env.GO_VERSION }} - check-latest: true - cache: true - - name: build cii-worker - uses: nick-invision/retry@943e742917ac94714d2f408a0e8320f2d1fcafcd - with: - max_attempts: 3 - retry_on: error - timeout_minutes: 30 - command: | - go env -w GOFLAGS=-mod=mod - make build-cii-worker - build-shuffler: - name: build-shuffler - runs-on: ubuntu-latest - needs: build-proto - permissions: - contents: read - steps: - - name: Harden Runner - uses: step-security/harden-runner@55d479fb1c5bcad5a4f9099a5d9f37c8857b2845 # v1 - with: - egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - - name: Install Protoc - uses: arduino/setup-protoc@149f6c87b92550901b26acd1632e11c3662e381f # v1.3.0 - with: - version: ${{ env.PROTOC_VERSION }} - repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Cache builds - # https://github.com/mvdan/github-actions-golang#how-do-i-set-up-caching-between-builds - uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 - with: - path: | - ~/go/pkg/mod - ~/.cache/go-build - ~/Library/Caches/go-build - %LocalAppData%\go-build - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go- - - name: Clone the code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v2.3.4 - with: - fetch-depth: 0 - - name: Setup Go - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v2.2.0 - with: - go-version: ${{ env.GO_VERSION }} - check-latest: true - cache: true - - name: build shuffler - uses: nick-invision/retry@943e742917ac94714d2f408a0e8320f2d1fcafcd - with: - max_attempts: 3 - retry_on: error - timeout_minutes: 30 - command: | - go env -w GOFLAGS=-mod=mod - make build-shuffler - build-bq-transfer: - name: build-bq-transfer - runs-on: ubuntu-latest - needs: build-proto - permissions: - contents: read - steps: - - name: Harden Runner - uses: step-security/harden-runner@55d479fb1c5bcad5a4f9099a5d9f37c8857b2845 # v1 - with: - egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - - name: Install Protoc - uses: arduino/setup-protoc@149f6c87b92550901b26acd1632e11c3662e381f # v1.3.0 - with: - version: ${{ env.PROTOC_VERSION }} - repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Cache builds - # https://github.com/mvdan/github-actions-golang#how-do-i-set-up-caching-between-builds - uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 - with: - path: | - ~/go/pkg/mod - ~/.cache/go-build - ~/Library/Caches/go-build - %LocalAppData%\go-build - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go- - - name: Clone the code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v2.3.4 - with: - fetch-depth: 0 - - name: Setup Go - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v2.2.0 - with: - go-version: ${{ env.GO_VERSION }} - check-latest: true - cache: true - - name: build bq transfer - uses: nick-invision/retry@943e742917ac94714d2f408a0e8320f2d1fcafcd + uses: nick-invision/retry@14672906e672a08bd6eeb15720e9ed3ce869cdd4 # v2.9.0 with: max_attempts: 3 retry_on: error timeout_minutes: 30 command: | go env -w GOFLAGS=-mod=mod - make build-bq-transfer - build-github-server: - name: build-github-server - runs-on: ubuntu-latest - needs: build-proto - permissions: - contents: read - steps: - - name: Harden Runner - uses: step-security/harden-runner@55d479fb1c5bcad5a4f9099a5d9f37c8857b2845 # v1 - with: - egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - - name: Install Protoc - uses: arduino/setup-protoc@149f6c87b92550901b26acd1632e11c3662e381f # v1.3.0 - with: - version: ${{ env.PROTOC_VERSION }} - repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Cache builds - # https://github.com/mvdan/github-actions-golang#how-do-i-set-up-caching-between-builds - uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 - with: - path: | - ~/go/pkg/mod - ~/.cache/go-build - ~/Library/Caches/go-build - %LocalAppData%\go-build - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go- - - name: Clone the code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v2.3.4 - with: - fetch-depth: 0 - - name: Setup Go - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v2.2.0 - with: - go-version: ${{ env.GO_VERSION }} - check-latest: true - cache: true - - name: build bq transfer - uses: nick-invision/retry@943e742917ac94714d2f408a0e8320f2d1fcafcd - with: - max_attempts: 3 - retry_on: error - timeout_minutes: 30 - command: | - go env -w GOFLAGS=-mod=mod - make build-github-server - build-webhook: - name: build-webhook - runs-on: ubuntu-latest - needs: build-proto - permissions: - contents: read - steps: - - name: Harden Runner - uses: step-security/harden-runner@55d479fb1c5bcad5a4f9099a5d9f37c8857b2845 # v1 - with: - egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - - name: Install Protoc - uses: arduino/setup-protoc@149f6c87b92550901b26acd1632e11c3662e381f # v1.3.0 - with: - version: ${{ env.PROTOC_VERSION }} - repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Cache builds - # https://github.com/mvdan/github-actions-golang#how-do-i-set-up-caching-between-builds - uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 - with: - path: | - ~/go/pkg/mod - ~/.cache/go-build - ~/Library/Caches/go-build - %LocalAppData%\go-build - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go- - - name: Clone the code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v2.3.4 - with: - fetch-depth: 0 - - name: Setup Go - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v2.2.0 - with: - go-version: ${{ env.GO_VERSION }} - check-latest: true - cache: true - - name: build webhook - uses: nick-invision/retry@943e742917ac94714d2f408a0e8320f2d1fcafcd - with: - max_attempts: 3 - retry_on: error - timeout_minutes: 30 - command: | - go env -w GOFLAGS=-mod=mod - make build-webhook - build-add-script: - name: build-add-script - runs-on: ubuntu-latest - needs: build-proto - permissions: - contents: read - steps: - - name: Harden Runner - uses: step-security/harden-runner@55d479fb1c5bcad5a4f9099a5d9f37c8857b2845 # v1 - with: - egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - - name: Install Protoc - uses: arduino/setup-protoc@149f6c87b92550901b26acd1632e11c3662e381f # v1.3.0 - with: - version: ${{ env.PROTOC_VERSION }} - repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Cache builds - # https://github.com/mvdan/github-actions-golang#how-do-i-set-up-caching-between-builds - uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 - with: - path: | - ~/go/pkg/mod - ~/.cache/go-build - ~/Library/Caches/go-build - %LocalAppData%\go-build - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go- - - name: Clone the code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v2.3.4 - with: - fetch-depth: 0 - - name: Setup Go - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v2.2.0 - with: - go-version: ${{ env.GO_VERSION }} - check-latest: true - cache: true - - name: build-add-script - uses: nick-invision/retry@943e742917ac94714d2f408a0e8320f2d1fcafcd - with: - max_attempts: 3 - retry_on: error - timeout_minutes: 30 - command: | - go env -w GOFLAGS=-mod=mod - make build-add-script - build-validate-script: - name: build-validate-script - runs-on: ubuntu-latest - needs: build-proto - permissions: - contents: read - steps: - - name: Harden Runner - uses: step-security/harden-runner@55d479fb1c5bcad5a4f9099a5d9f37c8857b2845 # v1 - with: - egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - - name: Install Protoc - uses: arduino/setup-protoc@149f6c87b92550901b26acd1632e11c3662e381f # v1.3.0 - with: - version: ${{ env.PROTOC_VERSION }} - repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Cache builds - # https://github.com/mvdan/github-actions-golang#how-do-i-set-up-caching-between-builds - uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 - with: - path: | - ~/go/pkg/mod - ~/.cache/go-build - ~/Library/Caches/go-build - %LocalAppData%\go-build - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go- - - name: Clone the code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v2.3.4 - with: - fetch-depth: 0 - - name: Setup Go - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v2.2.0 - with: - go-version: ${{ env.GO_VERSION }} - check-latest: true - cache: true - - name: build-validate-script - uses: nick-invision/retry@943e742917ac94714d2f408a0e8320f2d1fcafcd - with: - max_attempts: 3 - retry_on: error - timeout_minutes: 30 - command: | - go env -w GOFLAGS=-mod=mod - make build-validate-script - build-update-script: - name: build-update-script - runs-on: ubuntu-latest - needs: build-proto - permissions: - contents: read - steps: - - name: Harden Runner - uses: step-security/harden-runner@55d479fb1c5bcad5a4f9099a5d9f37c8857b2845 # v1 - with: - egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - - name: Install Protoc - uses: arduino/setup-protoc@149f6c87b92550901b26acd1632e11c3662e381f # v1.3.0 - with: - version: ${{ env.PROTOC_VERSION }} - repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Cache builds - # https://github.com/mvdan/github-actions-golang#how-do-i-set-up-caching-between-builds - uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 - with: - path: | - ~/go/pkg/mod - ~/.cache/go-build - ~/Library/Caches/go-build - %LocalAppData%\go-build - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go- - - name: Clone the code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v2.3.4 - with: - fetch-depth: 0 - - name: Setup Go - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v2.2.0 - with: - go-version: ${{ env.GO_VERSION }} - check-latest: true - cache: true - - name: build-validate-script - uses: nick-invision/retry@943e742917ac94714d2f408a0e8320f2d1fcafcd - with: - max_attempts: 3 - retry_on: error - timeout_minutes: 30 - command: | - go env -w GOFLAGS=-mod=mod - make build-update-script - check-linter: - name: check-linter - runs-on: ubuntu-latest - permissions: - contents: read - steps: - - name: Harden Runner - uses: step-security/harden-runner@55d479fb1c5bcad5a4f9099a5d9f37c8857b2845 # v1 - with: - egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - - name: Install Protoc - uses: arduino/setup-protoc@149f6c87b92550901b26acd1632e11c3662e381f # v1.3.0 - with: - version: ${{ env.PROTOC_VERSION }} - repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Clone the code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v2.3.4 - with: - fetch-depth: 0 - - name: Setup Go - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v2.2.0 - with: - go-version: ${{ env.GO_VERSION }} - check-latest: true - cache: true - - name: Run build - run: | - go env -w GOFLAGS=-mod=mod - make check-linter + make ${{ matrix.target }} validate-docs: name: validate-docs runs-on: ubuntu-latest @@ -781,13 +260,13 @@ jobs: contents: read steps: - name: Harden Runner - uses: step-security/harden-runner@55d479fb1c5bcad5a4f9099a5d9f37c8857b2845 # v1 + uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v1 with: egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - name: Cache builds # https://github.com/mvdan/github-actions-golang#how-do-i-set-up-caching-between-builds - uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 + uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 with: path: | ~/go/pkg/mod @@ -798,17 +277,17 @@ jobs: restore-keys: | ${{ runner.os }}-go- - name: Clone the code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v2.3.4 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: fetch-depth: 0 - name: Setup Go - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v2.2.0 + uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v2.2.0 with: go-version: ${{ env.GO_VERSION }} check-latest: true cache: true - name: Run build - uses: nick-invision/retry@943e742917ac94714d2f408a0e8320f2d1fcafcd + uses: nick-invision/retry@14672906e672a08bd6eeb15720e9ed3ce869cdd4 with: max_attempts: 3 retry_on: error @@ -819,43 +298,23 @@ jobs: add-projects: name: add-projects runs-on: ubuntu-latest - needs: build-proto permissions: contents: read steps: - name: Harden Runner - uses: step-security/harden-runner@55d479fb1c5bcad5a4f9099a5d9f37c8857b2845 # v1 + uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v1 with: egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - name: Install Protoc - uses: arduino/setup-protoc@149f6c87b92550901b26acd1632e11c3662e381f # v1.3.0 - with: - version: ${{ env.PROTOC_VERSION }} - repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Cache builds - # https://github.com/mvdan/github-actions-golang#how-do-i-set-up-caching-between-builds - uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 - with: - path: | - ~/go/pkg/mod - ~/.cache/go-build - ~/Library/Caches/go-build - %LocalAppData%\go-build - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go- - name: Clone the code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v2.3.4 - with: - fetch-depth: 0 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Setup Go - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v2.2.0 + uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: go-version: ${{ env.GO_VERSION }} check-latest: true cache: true - name: Run build - uses: nick-invision/retry@943e742917ac94714d2f408a0e8320f2d1fcafcd + uses: nick-invision/retry@14672906e672a08bd6eeb15720e9ed3ce869cdd4 with: max_attempts: 3 retry_on: error @@ -863,6 +322,7 @@ jobs: command: | go env -w GOFLAGS=-mod=mod make add-projects + git diff --exit-code validate-projects: name: validate-projects runs-on: ubuntu-latest @@ -870,7 +330,7 @@ jobs: contents: read steps: - name: Harden Runner - uses: step-security/harden-runner@55d479fb1c5bcad5a4f9099a5d9f37c8857b2845 # v1 + uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v1 with: egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs @@ -880,17 +340,17 @@ jobs: version: ${{ env.PROTOC_VERSION }} repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Clone the code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v2.3.4 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: fetch-depth: 0 - name: Setup Go - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v2.2.0 + uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v2.2.0 with: go-version: ${{ env.GO_VERSION }} check-latest: true cache: true - name: Run build - uses: nick-invision/retry@943e742917ac94714d2f408a0e8320f2d1fcafcd + uses: nick-invision/retry@14672906e672a08bd6eeb15720e9ed3ce869cdd4 with: max_attempts: 3 retry_on: error @@ -905,12 +365,12 @@ jobs: contents: read steps: - name: Harden Runner - uses: step-security/harden-runner@55d479fb1c5bcad5a4f9099a5d9f37c8857b2845 # v1 + uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v1 with: egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v2.3.4 - - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v2.2.0 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v2.2.0 with: go-version: ${{ env.GO_VERSION }} check-latest: true diff --git a/.github/workflows/publishimage.yml b/.github/workflows/publishimage.yml index ec6593aa8a7..0d5c956e00a 100644 --- a/.github/workflows/publishimage.yml +++ b/.github/workflows/publishimage.yml @@ -21,6 +21,9 @@ on: branches: - main +env: + GO_VERSION: 1.21 + jobs: publishimage: runs-on: ubuntu-latest @@ -32,23 +35,23 @@ jobs: COSIGN_EXPERIMENTAL: "true" steps: - name: Harden Runner - uses: step-security/harden-runner@55d479fb1c5bcad5a4f9099a5d9f37c8857b2845 + uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 with: egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - name: Clone the code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: fetch-depth: 0 - name: Setup Go - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 + uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 with: - go-version-file: go.mod # use version from go.mod so it stays in sync + go-version: ${{ env.GO_VERSION }} check-latest: true - name: install ko uses: ko-build/setup-ko@ace48d793556083a76f1e3e6068850c1f4a369aa # v0.6 - name: publishimage - uses: nick-invision/retry@943e742917ac94714d2f408a0e8320f2d1fcafcd + uses: nick-invision/retry@14672906e672a08bd6eeb15720e9ed3ce869cdd4 with: max_attempts: 3 retry_on: error @@ -58,7 +61,7 @@ jobs: make install make scorecard-ko - name: Install Cosign - uses: sigstore/cosign-installer@dd6b2e2b610a11fd73dd187a43d57cc1394e35f9 + uses: sigstore/cosign-installer@9614fae9e5c5eddabb09f90a270fcb487c9f7149 - name: Sign image run: | cosign sign --yes ghcr.io/${{github.repository_owner}}/scorecard/v4:${{ github.sha }} diff --git a/.github/workflows/scdiff.yml b/.github/workflows/scdiff.yml new file mode 100644 index 00000000000..cc439933095 --- /dev/null +++ b/.github/workflows/scdiff.yml @@ -0,0 +1,110 @@ +name: scdiff PR evaluation +on: + issue_comment: + types: [created] + +permissions: read-all + +env: + GO_VERSION: 1.21 + +jobs: + share-link: + if: ${{ (github.event.issue.pull_request) && (contains(github.event.comment.body, '/scdiff generate')) }} + runs-on: [ubuntu-latest] + permissions: + pull-requests: write # to create the PR comment + steps: + - name: share link to workflow run + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + with: + script: | + github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: `[Here's a link to the scdiff run](https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId})` + }) + + golden-test: + if: ${{ (github.event.issue.pull_request) && (contains(github.event.comment.body, '/scdiff generate')) }} + runs-on: [ubuntu-latest] + steps: + - name: create file of repos to analyze + run: | + cat < $HOME/repos.txt + https://github.com/airbnb/lottie-web + https://github.com/apache/tomcat + https://github.com/Azure/azure-functions-dotnet-worker + https://github.com/cncf/xds + https://github.com/google/go-cmp + https://github.com/google/highwayhash + https://github.com/googleapis/google-api-php-client + https://github.com/jacoco/jacoco + https://github.com/ossf/scorecard + https://github.com/pallets/jinja + https://github.com/polymer/polymer + https://github.com/rust-random/getrandom + https://github.com/yaml/libyaml + https://gitlab.com/baserow/baserow + https://gitlab.com/cryptsetup/cryptsetup + EOF + - name: configure scdiff + id: config + env: + COMMENT_BODY: ${{ github.event.comment.body }} + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + with: + script: | + const allowedAssociations = ["COLLABORATOR", "CONTRIBUTOR", "MEMBER", "OWNER"]; + authorAssociation = '${{ github.event.comment.author_association }}' + if (!allowedAssociations.includes(authorAssociation)) { + core.setFailed("You don't have access to run scdiff"); + } + + const response = await github.rest.pulls.get({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: context.issue.number, + }) + core.setOutput('base', response.data.base.sha) + core.setOutput('head', response.data.head.sha) + + checks = '""' + const commentBody = process.env.COMMENT_BODY + const regex = /\/scdiff generate ([^ ]+)/; + const found = commentBody.match(regex); + if (found && found.length == 2) { + checks = found[1] + } + core.exportVariable('SCORECARD_CHECKS', checks) + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: ${{ steps.config.outputs.base }} + - name: Setup Go + uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + with: + go-version: ${{ env.GO_VERSION }} + check-latest: true + - name: generate before results + env: + GITHUB_AUTH_TOKEN: ${{ secrets.GH_AUTH_TOKEN }} + GITLAB_AUTH_TOKEN: ${{ secrets.GITLAB_TOKEN }} + run: | + go run cmd/internal/scdiff/main.go generate \ + --repos $HOME/repos.txt \ + --checks $SCORECARD_CHECKS > $HOME/before.json + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: ${{ steps.config.outputs.head }} + - name: generate after results + env: + GITHUB_AUTH_TOKEN: ${{ secrets.GH_AUTH_TOKEN }} + GITLAB_AUTH_TOKEN: ${{ secrets.GITLAB_TOKEN }} + run: | + go run cmd/internal/scdiff/main.go generate \ + --repos $HOME/repos.txt \ + --checks $SCORECARD_CHECKS > $HOME/after.json + - name: compare results + run: | + go run cmd/internal/scdiff/main.go compare $HOME/before.json $HOME/after.json diff --git a/.github/workflows/scorecard-analysis.yml b/.github/workflows/scorecard-analysis.yml index f1b3e1ea743..ccdeff43b64 100644 --- a/.github/workflows/scorecard-analysis.yml +++ b/.github/workflows/scorecard-analysis.yml @@ -22,10 +22,10 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: "Run analysis" - uses: ossf/scorecard-action@08b4669551908b1024bb425080c797723083c031 # v2.2.0 + uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # v2.3.1 with: results_file: results.sarif results_format: sarif @@ -40,7 +40,7 @@ jobs: # https://docs.github.com/en/actions/advanced-guides/storing-workflow-data-as-artifacts # Optional. - name: "Upload artifact" - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3 + uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 # v3 with: name: SARIF file path: results.sarif diff --git a/.github/workflows/slsa-goreleaser.yml b/.github/workflows/slsa-goreleaser.yml index 86cdc5e70c9..6b584eb1066 100644 --- a/.github/workflows/slsa-goreleaser.yml +++ b/.github/workflows/slsa-goreleaser.yml @@ -7,6 +7,9 @@ on: permissions: read-all +env: + GO_VERSION: 1.21 + jobs: # Generate ldflags dynamically. args: @@ -16,7 +19,7 @@ jobs: go-binary-name: ${{ steps.build.outputs.go-binary-name }} steps: - id: checkout - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v2.3.4 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: fetch-depth: 0 - id: ldflags @@ -29,9 +32,9 @@ jobs: contents: write actions: read needs: args - uses: slsa-framework/slsa-github-generator/.github/workflows/builder_go_slsa3.yml@v1.7.0 #7f4fdb871876c23e455853d694197440c5a91506 + uses: slsa-framework/slsa-github-generator/.github/workflows/builder_go_slsa3.yml@v1.9.0 #7f4fdb871876c23e455853d694197440c5a91506 with: - go-version: 1.19 + go-version: ${{ env.GO_VERSION }} evaluated-envs: "VERSION_LDFLAGS:${{needs.args.outputs.ldflags}}" verification: @@ -41,15 +44,15 @@ jobs: permissions: read-all steps: - name: Install the verifier - uses: slsa-framework/slsa-verifier/actions/installer@v2.3.0 + uses: slsa-framework/slsa-verifier/actions/installer@v2.4.1 - name: Download the artifact - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@6b208ae046db98c579e8a3aa621ab581ff575935 # v4.1.1 with: name: "${{ needs.build.outputs.go-binary-name }}.intoto.jsonl" - name: Download the artifact - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@6b208ae046db98c579e8a3aa621ab581ff575935 # v4.1.1 with: name: ${{ needs.build.outputs.go-binary-name }} diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index ba9642a6be7..b74a81041d7 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -27,19 +27,20 @@ jobs: runs-on: ubuntu-latest steps: - name: Harden Runner - uses: step-security/harden-runner@55d479fb1c5bcad5a4f9099a5d9f37c8857b2845 # v1 + uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v1 with: egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - uses: actions/stale@1160a2240286f5da8ec72b1c0816ce2481aabf84 # v3.0.18 + - uses: actions/stale@28ca1036281a5e5922ead5184a1bbf96e5fc984e # v3.0.18 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - stale-issue-message: 'Stale issue message' - stale-pr-message: 'Stale pull request message' - stale-issue-label: 'no-issue-activity' - exempt-issue-labels: 'wishlist,slsa,priority,bug,core feature,enhancement,good first issue,help wanted,needs discussion' - stale-pr-label: 'no-pr-activity' + stale-issue-message: 'This issue is stale because it has been open for 60 days with no activity.' + stale-pr-message: 'This pull request is stale because it has been open for 10 days with no activity' + exempt-issue-labels: 'priority,bug,good first issue,backlog,help wanted' + exempt-issue-milestones: 'Structured results' exempt-pr-labels: 'awaiting-approval,work-in-progress' days-before-pr-stale: '10' days-before-pr-close: '20' days-before-issue-stale: '60' + days-before-issue-close: -1 + operations-per-run: '100' diff --git a/.github/workflows/verify.yml b/.github/workflows/verify.yml index af59979b66d..e6024834fdd 100644 --- a/.github/workflows/verify.yml +++ b/.github/workflows/verify.yml @@ -26,12 +26,12 @@ jobs: runs-on: ubuntu-latest steps: - name: Harden Runner - uses: step-security/harden-runner@55d479fb1c5bcad5a4f9099a5d9f37c8857b2845 # v1 + uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v1 with: egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - name: Verifier action id: verifier - uses: kubernetes-sigs/kubebuilder-release-tools@4f3d1085b4458a49ed86918b4b55505716715b77 # v0.3.0 + uses: kubernetes-sigs/kubebuilder-release-tools@012269a88fa4c034a0acf1ba84c26b195c0dbab4 # v0.4.3 with: github_token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitignore b/.gitignore index d67e51d87dc..9b53679705c 100644 --- a/.gitignore +++ b/.gitignore @@ -8,7 +8,6 @@ clients/githubrepo/roundtripper/tokens/server/github-auth-server clients/githubrepo/roundtripper/tokens/server/github-auth-server.docker cron/internal/data/add/add cron/internal/data/validate/validate -cron/internal/data/update/projects-update cron/internal/controller/controller cron/internal/controller/controller.docker cron/internal/worker/worker @@ -54,3 +53,8 @@ githubrepo.tar.gz # goreleaser dist/* + +# scdiff, ensure the files detailed in RELEASE.md aren't committed +repos.txt +oldRelease.json +newRelease.json diff --git a/.golangci.yml b/.golangci.yml index 80a62880379..6468694225f 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,7 +1,7 @@ --- run: concurrency: 6 - deadline: 5m + timeout: 5m issues: # Maximum issues count per one linter. # Set to 0 to disable. @@ -18,18 +18,21 @@ issues: - funlen - goconst - gocyclo + - goerr113 + - lll + - wrapcheck skip-files: - cron/data/request.pb.go # autogenerated linters: - disable-all: true enable: - asciicheck - - depguard - dogsled - errcheck + - errname - errorlint - exhaustive - exportloopref + - forbidigo - gci - gochecknoinits - gocognit @@ -43,6 +46,9 @@ linters: - gofumpt - goheader - goimports + # manage use of replace, retract and exclude directives (see https://github.com/ossf/scorecard/pull/3440#issuecomment-1708904830) + # https://golangci-lint.run/usage/linters/#gomoddirectives + - gomoddirectives - gomodguard - goprintffuncname - gosec @@ -51,29 +57,39 @@ linters: - ineffassign - lll - makezero + - mirror - misspell - nakedret - nestif + - nolintlint - predeclared - staticcheck - stylecheck + - tenv - thelper - typecheck - unconvert - unused + - usestdlibvars - whitespace - wrapcheck + disable: + - exhaustruct # initializing every struct makes tests longer without much benefit (spencerschrock) + - testpackage # tests don't need their own package (spencerschrock) + presets: + - bugs + - test linters-settings: errcheck: check-type-assertions: true check-blank: true - errorlint: - # TODO remove this when project migrates to golang 1.20 - # https://golangci-lint.run/usage/linters/#errorlint - errorf-multi: false exhaustive: # https://golangci-lint.run/usage/linters/#exhaustive default-signifies-exhaustive: true + forbidigo: + forbid: + - p: "^fmt\\.Print.*$" + msg: "Do not commit print statements. Output to stdout interferes with users who redirect JSON results to files." govet: enable: - fieldalignment @@ -139,6 +155,10 @@ linters-settings: - ptrToRefParam - typeUnparen - unnecessaryBlock + nolintlint: + # `//nolint` should mention specific linter such as `//nolint:my-linter` + # Overly broad directives can hide unrelated issues + require-specific: true wrapcheck: ignorePackageGlobs: - github.com/ossf/scorecard/v4/checks/fileparser diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 24c91297357..7953aa09fe3 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -16,7 +16,10 @@ project. This document describes the contribution guidelines for the project. * [How to build scorecard locally](#how-to-build-scorecard-locally) * [PR Process](#pr-process) * [What to do before submitting a pull request](#what-to-do-before-submitting-a-pull-request) +* [Changing Score Results](#changing-score-results) +* [Linting](#linting) * [Permission for GitHub personal access tokens](#permission-for-github-personal-access-tokens) +* [Adding New Probes](#adding-new-probes) * [Where the CI Tests are configured](#where-the-ci-tests-are-configured) * [dailyscore-cronjob](#dailyscore-cronjob) * [Deploying the cron job](#deploying-the-cron-job) @@ -43,7 +46,7 @@ You must install these tools: 1. [`git`](https://help.github.com/articles/set-up-git/): For source control 1. [`go`](https://golang.org/doc/install): You need go version - [v1.17](https://golang.org/dl/) or higher. + [v1.21](https://golang.org/dl/) or higher. 1. [`docker`](https://docs.docker.com/engine/install/): `v18.9` or higher. @@ -126,6 +129,9 @@ assumed to match the PR. For instance, if you have a bugfix in with a breaking change, it's generally encouraged to submit the bugfix separately, but if you must put them in one PR, you should mark the whole PR as breaking. +When a maintainer reviews your code, it is generally preferred to solve each individual +review with small fixes without rebasing, so the maintainer can assess each fix separately. + ## What to do before submitting a pull request Following the targets that can be used to test your changes locally. @@ -139,6 +145,25 @@ Make sure to signoff your commits before submitting a pull request. https://docs.pi-hole.net/guides/github/how-to-signoff/ +When developing locally, the following commands are useful to run regularly to check unit tests and linting. + +| Command | Description | Is called in the CI? | +| make unit-test | Runs unit tests only. `make all` will also run this. | yes | +| make check-linter | Checks linter issues only. `make all` will also run this. | yes | + +## Changing Score Results + +As a general rule of thumb, pull requests that change Scorecard score results will need a good reason to do so to get merged. +It is a good idea to discuss such changes in a GitHub issue before implementing them. + +## Linting + +Most linter issues can be fixed with `golangci-lint` with the following command: + +``` +make fix-linter +``` + ## Permission for GitHub personal access tokens The personal access token need the following scopes: @@ -154,16 +179,24 @@ The personal access token need the following scopes: ## How do I add additional GitHub repositories to be scanned by scorecard weekly? -Scorecard maintains the list of repositories in a file +Scorecard maintains the list of GitHub repositories in a file https://github.com/ossf/scorecard/blob/main/cron/internal/data/projects.csv -Submit a PR for this file and scorecard would start scanning in subsequent runs. +GitLab repositories are listed in: +https://github.com/ossf/scorecard/blob/main/cron/internal/data/gitlab-projects.csv + +Append your desired repositories to the end of these files, then run `make add-projects`. +Commit the changes, and submit a PR and scorecard would start scanning in subsequent runs. ## Adding New Checks See [checks/write.md](checks/write.md). When you add new checks, you need to also update the docs. +## Adding New Probes + +See [probes/README.md](probes/README.md) for information about the probes. + ## Updating Docs A summary for each check needs to be included in the `README.md`. diff --git a/CONTRIBUTOR_LADDER.md b/CONTRIBUTOR_LADDER.md new file mode 100644 index 00000000000..e0b3c6762a1 --- /dev/null +++ b/CONTRIBUTOR_LADDER.md @@ -0,0 +1,180 @@ +# OpenSSF Scorecard Contributor ladder + +This document outlines the various contributor roles in the Scorecard project, along with their respective prerequisites and responsibilities. +It also defines the process by which users can request to change roles. + +- [Roles](#roles) + - [Community participants](#community-participants) + - [Community members](#community-members) + - [Triagers](#triagers) + - [Maintainers](#maintainers) +- [Inactive members](#inactive-members) + +## Roles + +### Community participants + +Community participants engage with Scorecard, +contributing their time and energy in discussions or just generally helping out. + +#### Pre-requisites + +- Must follow the [OpenSSF Code of Conduct] +- Must follow the [Contribution Guide] + +#### Responsibilities + +- Keep it up! + +### Community Members + +Community Members are active contributors in the community. +They can have issues and PRs assigned to them, participate through GitHub teams, +and pre-submit tests are automatically run for their PRs. +Members are expected to remain active contributors to the community. + +**Defined by:** Member of the OpenSSF GitHub organization + +#### Pre-requisites + +- Enabled two-factor authentication on their GitHub account +- Have made multiple contributions to the project or community. + Contributions may include, but are not limited to: + - Authoring or reviewing PRs on GitHub. At least one PR must be **merged**. + - Filing or commenting on issues on GitHub + - Contributing to a project, or community discussions (e.g. meetings, Slack, + email discussion forums, Stack Overflow) +- Active contributor to Scorecard or a relevant OpenSSF SIG + +#### Responsibilities + +- Can be assigned issues and PRs +- Responsive to issues and PRs assigned to them +- Others can ask for reviews with a `/cc @username`. +- Responsive to mentions of teams they are members of +- Active owner of code they have contributed (unless ownership is explicitly transferred) + - Ensures code is well tested and that tests consistently pass + - Addresses bugs or issues discovered after code is accepted + +#### Privileges + +- Tests run against their PRs automatically + +#### Promotion process + +- Sponsored by 1 maintainer or 2 triagers. **Note the following requirements for sponsors**: + - Sponsors must have close interactions with the prospective Member – e.g. + code/design/proposal review, coordinating on issues, etc. + - Sponsors must be reviewers or approvers in at least one CODEOWNERS file. + - Sponsors should preferably be from multiple OpenSSF member companies to incentivize community integration. +- Open an issue in the project's repository + - Ensure your sponsors are `@mentioned` + - Describe and/or link to all your relevant contributions to the project + (or other OpenSSF projects) + - Sponsoring reviewers must comment on the issue/PR confirming their sponsorship + +### Triagers + +Triagers help a project by reviewing issues and code for quality and correctness. +They are knowledgeable about the project's codebase (in its entirety or a specific section) +and software engineering principles. + +**Defined by:** "triage" permission in the project + +#### Pre-requisites + +- Community Member for at least 3 months +- Helped to triage issues and pull requests +- Knowledgeable about the codebase + +#### Responsibilities + +- Read through issues and PRs + - Answer questions when possible + - Add relevant labels + - Draw maintainers' attention (via `@mention`) if relevant + - Close issue (as "completed" or "not planned") if necessary +- Help maintain project quality control via [code reviews] on PRs + - Focus on code quality and correctness, including testing and factoring + - May also review for more holistic issues, but not a requirement +- Be responsive to review requests +- May be assigned PRs to review if in area of expertise +- Assigned test bugs related to the project of expertise + +#### Privileges + +- Same as for Community Members +- Triager status may be a precondition to accepting large code contributions + +#### Promotion process + +- Sponsored by a maintainer + - With no objections from other maintainers + - Done through issue or PR to update the CODEOWNERS file +- May self-nominate or be nominated by a maintainer + - In case of self-nomination, sponsor must comment approval on the issue/PR + +### Maintainers + +Maintainers are responsible for the project's overall health. +They are the only ones who can approve and merge code contributions. +While triage and code review is focused on code quality and correctness, +approval is focused on holistic acceptance of a contribution including: + +- backwards/forwards compatibility +- adherence to API and style conventions +- subtle performance and correctness issues +- interactions with other parts of the system +- consistency between code and documentation + +**Defined by:** "Maintain" permissions in the project and an entry in its CODEOWNERS file + +#### Pre-requisites + +- Triager for at least 3 months +- Reviewed at least 10 substantial PRs to the codebase +- Reviewed or got at least 30 PRs merged to the codebase + +#### Responsibilities + +- Demonstrate sound technical judgment +- Maintain project quality control via code reviews + - Focus on holistic acceptance of contribution +- Be responsive to review requests +- Mentor contributors and triagers +- Approve and merge code contributions as appropriate +- Participate in OpenSSF or Scorecard-specific community meetings, if possible +- Facilitating Scorecard-specific community meetings, if possible and comfortable + +#### Privileges + +- Same as for Triager +- Maintainer status may be a precondition to accepting especially large code contributions + +#### Promotion process +- Sponsored by a maintainer + - With no objections from other maintainers + - Done through PR to update the CODEOWNERS file +- May self-nominate or be nominated by a maintainer + - In case of self-nomination, sponsor must comment approval on the PR + +## Inactive members +A core principle in maintaining a healthy community is encouraging active participation. +It is inevitable that a contributor's focus will change over time +and there is no expectation they'll actively contribute forever. + +Any contributor at any level described above may write an issue (or PR, if CODEOWNER changes are necessary) +asking to step down to a lighter-weight tier or to depart the project entirely. +Such requests will hopefully come after thoughtful conversations with the rest of the team +and with sufficient forewarning for the others to prepare. However, sometimes "life happens". +Therefore, the change in responsibilities will be understood to take immediate effect, +regardless of whether the issue/PR has been acknowledged or merged. + +However, should a Triager or above be deemed inactive for a significant period, any +Community Member or above may write an issue/PR requesting their removal from the ranks +(and `@mentioning` the inactive contributor in the hopes of drawing their attention). +The request must receive support (in comments) from a majority of Maintainers to proceed. + + +[OpenSSF Code of Conduct]: https://openssf.org/community/code-of-conduct/ +[Contribution Guide]: ./CONTRIBUTING.md diff --git a/Dockerfile b/Dockerfile index dd777680a3c..0f9cbb4ea52 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM golang:1.19@sha256:6b3fa4b908676231b50acbbc00e84d8cee9c6ce072b1175c0ff352c57d8a612f AS base +FROM golang:1.21.6@sha256:76aadd914a29a2ee7a6b0f3389bb2fdb87727291d688e1d972abe6c0fa6f2ee0 AS base WORKDIR /src ENV CGO_ENABLED=0 COPY go.* ./ @@ -24,6 +24,6 @@ ARG TARGETOS ARG TARGETARCH RUN CGO_ENABLED=0 make build-scorecard -FROM gcr.io/distroless/base:nonroot@sha256:99133cb0878bb1f84d1753957c6fd4b84f006f2798535de22ebf7ba170bbf434 +FROM gcr.io/distroless/base:nonroot@sha256:29da700a46816467c7cb91058f53eac4170a4a25ac8551d316d9fd38e2c58bdf COPY --from=build /src/scorecard / ENTRYPOINT [ "/scorecard" ] diff --git a/MAINTAINERS.md b/MAINTAINERS.md new file mode 100644 index 00000000000..79214d94f96 --- /dev/null +++ b/MAINTAINERS.md @@ -0,0 +1,21 @@ +# Maintainers + +## `scorecard-maintainers` + +- Stephen Augustus ([@justaugustus](https://github.com/justaugustus)), Cisco +- Raghav Kaul ([@raghavkaul](https://github.com/raghavkaul)), Google +- Spencer Schrock ([@spencerschrock](https://github.com/spencerschrock)), Google +- Azeem Shaikh [@azeemshaikh38](https://github.com/azeemshaikh38), Google +- Laurent Simon ([@laurentsimon](https://github.com/laurentsimon)), Google +- Naveen Srinivasan ([@naveensrinivasan](https://github.com/naveensrinivasan)), Independent + +## `scorecard-doc-maintainers` + +- Kara Olive ([@olivekl](https://github.com/olivekl)), Google + +## Emeritus + +Former maintainers are listed here. +Thanks for your contributions to Scorecard! + +- diff --git a/Makefile b/Makefile index fb27d9d387c..80b812753c8 100644 --- a/Makefile +++ b/Makefile @@ -35,10 +35,6 @@ KO := $(TOOLS_BIN_DIR)/ko $(KO): $(TOOLS_DIR)/go.mod cd $(TOOLS_DIR); GOBIN=$(TOOLS_BIN_DIR) go install github.com/google/ko -STUNNING_TRIBBLE := $(TOOLS_BIN_DIR)/stunning-tribble -$(STUNNING_TRIBBLE): $(TOOLS_DIR)/go.mod - cd $(TOOLS_DIR); GOBIN=$(TOOLS_BIN_DIR) go install github.com/naveensrinivasan/stunning-tribble - MOCKGEN := $(TOOLS_BIN_DIR)/mockgen $(MOCKGEN): $(TOOLS_DIR)/go.mod cd $(TOOLS_DIR); GOBIN=$(TOOLS_BIN_DIR) go install github.com/golang/mock/mockgen @@ -73,7 +69,6 @@ $(PROTOC): install: ## Installs required binaries. install: $(GOLANGCI_LINT) \ $(KO) \ - $(STUNNING_TRIBBLE) \ $(PROTOC_GEN_GO) $(PROTOC) \ $(MOCKGEN) \ $(GINKGO) \ @@ -99,19 +94,29 @@ check-linter: | $(GOLANGCI_LINT) # Run golangci-lint linter $(GOLANGCI_LINT) run -c .golangci.yml -add-projects: ## Adds new projects to ./cron/internal/data/projects.csv +fix-linter: ## Install and run golang linter, with fixes +fix-linter: | $(GOLANGCI_LINT) + # Run golangci-lint linter + $(GOLANGCI_LINT) run -c .golangci.yml --fix + +add-projects: ## Adds new projects to ./cron/internal/data/projects.csv and ./cron/internal/data/gitlab-projects.csv add-projects: ./cron/internal/data/projects.csv | build-add-script - # Add new projects to ./cron/internal/data/projects.csv + # GitHub ./cron/internal/data/add/add ./cron/internal/data/projects.csv ./cron/internal/data/projects.new.csv mv ./cron/internal/data/projects.new.csv ./cron/internal/data/projects.csv + # GitLab + ./cron/internal/data/add/add ./cron/internal/data/gitlab-projects.csv ./cron/internal/data/gitlab-projects.new.csv + mv ./cron/internal/data/gitlab-projects.new.csv ./cron/internal/data/gitlab-projects.csv validate-projects: ## Validates ./cron/internal/data/projects.csv validate-projects: ./cron/internal/data/projects.csv | build-validate-script # Validate ./cron/internal/data/projects.csv ./cron/internal/data/validate/validate ./cron/internal/data/projects.csv + ./cron/internal/data/validate/validate ./cron/internal/data/gitlab-projects.csv + ./cron/internal/data/validate/validate ./cron/internal/data/gitlab-projects-releasetest.csv tree-status: | all-targets-update-dependencies ## Verify tree is clean and all changes are committed - # Verify the tree is clean and all changes are commited + # Verify the tree is clean and all changes are committed ./scripts/tree-status ############################################################################### @@ -120,7 +125,7 @@ tree-status: | all-targets-update-dependencies ## Verify tree is clean and all c ## Build all cron-related targets build-cron: build-controller build-worker build-cii-worker \ build-shuffler build-bq-transfer build-github-server \ - build-webhook build-add-script build-validate-script build-update-script + build-webhook build-add-script build-validate-script build-targets = generate-mocks generate-docs build-scorecard build-cron build-proto build-attestor .PHONY: build $(build-targets) @@ -161,8 +166,7 @@ cmd/internal/nuget/nuget_mockclient.go: cmd/internal/nuget/client.go | $(MOCKGEN $(MOCKGEN) -source=cmd/internal/nuget/client.go -destination=cmd/internal/nuget/nuget_mockclient.go -package=nuget -copyright_file=clients/mockclients/license.txt generate-docs: ## Generates docs -generate-docs: validate-docs docs/checks.md -docs/checks.md: docs/checks/internal/checks.yaml docs/checks/internal/*.go docs/checks/internal/generate/*.go +generate-docs: validate-docs docs/checks.md docs/checks/internal/checks.yaml docs/checks/internal/*.go docs/checks/internal/generate/*.go # Generating checks.md go run ./docs/checks/internal/generate/main.go docs/checks.md @@ -251,7 +255,7 @@ build-attestor-docker: ## Build scorecard-attestor Docker image build-attestor-docker: DOCKER_BUILDKIT=1 docker build . --file attestor/Dockerfile \ --tag scorecard-attestor:latest \ - --tag scorecard-atttestor:$(GIT_HASH) + --tag scorecard-attestor:$(GIT_HASH) TOKEN_SERVER_DEPS = $(shell find clients/githubrepo/roundtripper/tokens/ -iname "*.go") build-github-server: ## Build GitHub token server @@ -295,12 +299,6 @@ cron/internal/data/validate/validate: cron/internal/data/validate/*.go cron/data # Run go build on the validate script cd cron/internal/data/validate && CGO_ENABLED=0 go build -trimpath -a -ldflags '$(LDFLAGS)' -o validate -build-update-script: ## Runs go build on the update script -build-update-script: cron/internal/data/update/projects-update -cron/internal/data/update/projects-update: cron/internal/data/update/*.go cron/data/*.go - # Run go build on the update script - cd cron/internal/data/update && CGO_ENABLED=0 go build -trimpath -a -tags netgo -ldflags '$(LDFLAGS)' -o projects-update - docker-targets = scorecard-docker cron-controller-docker cron-worker-docker cron-cii-worker-docker cron-bq-transfer-docker cron-webhook-docker cron-github-server-docker .PHONY: dockerbuild $(docker-targets) dockerbuild: $(docker-targets) @@ -336,20 +334,20 @@ endif e2e-pat: ## Runs e2e tests. Requires GITHUB_AUTH_TOKEN env var to be set to GitHub personal access token e2e-pat: build-scorecard check-env | $(GINKGO) # Run e2e tests. GITHUB_AUTH_TOKEN with personal access token must be exported to run this - TOKEN_TYPE="PAT" $(GINKGO) --race -p -v -cover -coverprofile=e2e-coverage.out --keep-separate-coverprofiles ./... + TOKEN_TYPE="PAT" $(GINKGO) --race -p -v -coverprofile=e2e-coverage.out -coverpkg=./... -r ./... e2e-gh-token: ## Runs e2e tests. Requires GITHUB_AUTH_TOKEN env var to be set to default GITHUB_TOKEN e2e-gh-token: build-scorecard check-env | $(GINKGO) # Run e2e tests. GITHUB_AUTH_TOKEN set to secrets.GITHUB_TOKEN must be used to run this. - TOKEN_TYPE="GITHUB_TOKEN" $(GINKGO) --race -p -v -cover -coverprofile=e2e-coverage.out --keep-separate-coverprofiles ./... + GITLAB_AUTH_TOKEN="" TOKEN_TYPE="GITHUB_TOKEN" $(GINKGO) --race -p -v -coverprofile=e2e-coverage.out --keep-separate-coverprofiles ./... e2e-gitlab-token: ## Runs e2e tests that require a GITLAB_TOKEN e2e-gitlab-token: build-scorecard check-env-gitlab | $(GINKGO) - TEST_GITLAB_EXTERNAL=1 TOKEN_TYPE="GITLAB_PAT" $(GINKGO) --race -p -vv --focus '.*GitLab' ./... + TEST_GITLAB_EXTERNAL=1 TOKEN_TYPE="GITLAB_PAT" $(GINKGO) --race -p -vv -coverprofile=e2e-coverage.out --keep-separate-coverprofiles --focus '.*GitLab' ./... e2e-gitlab: ## Runs e2e tests for GitLab only. TOKEN_TYPE is not used (since these are public APIs), but must be set to something e2e-gitlab: build-scorecard | $(GINKGO) - TEST_GITLAB_EXTERNAL=1 TOKEN_TYPE="PAT" $(GINKGO) --race -p -vv --focus ".*GitLab" ./... + TEST_GITLAB_EXTERNAL=1 TOKEN_TYPE="PAT" $(GINKGO) --race -p -vv -coverprofile=e2e-coverage.out --keep-separate-coverprofiles --focus ".*GitLab" ./... e2e-attestor: ## Runs e2e tests for scorecard-attestor cd attestor/e2e; go test -covermode=atomic -coverprofile=e2e-coverage.out; cd ../.. @@ -450,4 +448,4 @@ cron-github-server-ko: | $(KO) $(KOCACHE_PATH) --tags latest,$(GIT_VERSION),$(GIT_HASH) \ github.com/ossf/scorecard/v4/clients/githubrepo/roundtripper/tokens/server -############################################################################### \ No newline at end of file +############################################################################### diff --git a/README.md b/README.md index 0201b8fbc00..bf1a6654f9b 100644 --- a/README.md +++ b/README.md @@ -1,14 +1,14 @@ # OpenSSF Scorecard -[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/ossf/scorecard/badge)](https://api.securityscorecards.dev/projects/github.com/ossf/scorecard) -[![OpenSSF Best Practices](https://bestpractices.coreinfrastructure.org/projects/5621/badge)](https://bestpractices.coreinfrastructure.org/projects/5621) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/ossf/scorecard/badge)](https://securityscorecards.dev/viewer/?uri=github.com/ossf/scorecard) +[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/5621/badge)](https://www.bestpractices.dev/projects/5621) ![build](https://github.com/ossf/scorecard/workflows/build/badge.svg?branch=main) ![CodeQL](https://github.com/ossf/scorecard/workflows/CodeQL/badge.svg?branch=main) [![Go Reference](https://pkg.go.dev/badge/github.com/ossf/scorecard/v4.svg)](https://pkg.go.dev/github.com/ossf/scorecard/v4) [![Go Report Card](https://goreportcard.com/badge/github.com/ossf/scorecard/v4)](https://goreportcard.com/report/github.com/ossf/scorecard/v4) [![codecov](https://codecov.io/gh/ossf/scorecard/branch/main/graph/badge.svg?token=PMJ6NAN9J3)](https://codecov.io/gh/ossf/scorecard) -[![Slack](https://slack.babeljs.io/badge.svg)](https://slack.openssf.org/#security_scorecards) [![SLSA 3](https://slsa.dev/images/gh-badge-level3.svg)](https://slsa.dev) +[![Slack](https://img.shields.io/badge/slack-openssf/security_scorecards-white.svg?logo=slack)](https://slack.openssf.org/#security_scorecards) @@ -16,6 +16,7 @@ - [What Is Scorecard?](#what-is-scorecard) - [Prominent Scorecard Users](#prominent-scorecard-users) +- [View a Project's Score](#view-a-projects-score) - [Scorecard's Public Data](#public-data) ## Using Scorecard @@ -34,6 +35,7 @@ - [Default Scorecard Checks](#scorecard-checks) - [Detailed Check Documentation](docs/checks.md) (Scoring Criteria, Risks, and Remediation) +- [Beginner's Guide to Scorecard Checks](#beginners-guide-to-scorecard-checks) ## Other Important Recommendations - [Two-factor Authentication (2FA)](#two-factor-authentication-2fa) @@ -91,6 +93,17 @@ metrics. Prominent projects that use Scorecard include: - [sos.dev](https://sos.dev) - [deps.dev](https://deps.dev) +### View a Project's Score + +To see scores for projects regularly scanned by Scorecard, navigate to the webviewer, replacing the placeholder text with the platform, user/org, and repository name: +https://securityscorecards.dev/viewer/?uri=.com//. + +For example: + - [https://securityscorecards.dev/viewer/?uri=github.com/ossf/scorecard](https://securityscorecards.dev/viewer/?uri=github.com/ossf/scorecard) + - [https://securityscorecards.dev/viewer/?uri=gitlab.com/fdroid/fdroidclient](https://securityscorecards.dev/viewer/?uri=gitlab.com/fdroid/fdroidclient) + +To view scores for projects not included in the webviewer, use the [Scorecard CLI](#scorecard-command-line-interface). + ### Public Data We run a weekly Scorecard scan of the 1 million most critical open source @@ -150,16 +163,18 @@ To enable your project to be available on the REST API, set [`publish_results: true`](https://github.com/ossf/scorecard-action/blob/dd5015aaf9688596b0e6d11e7f24fff566aa366b/action.yaml#L35) in the Scorecard GitHub Action setting. +Data provided by the REST API is licensed under the [CDLA Permissive 2.0](https://cdla.dev/permissive-2-0). + ### Scorecard Badges Enabling [`publish_results: true`](https://github.com/ossf/scorecard-action/blob/dd5015aaf9688596b0e6d11e7f24fff566aa366b/action.yaml#L35) in Scorecard GitHub Actions also allows maintainers to display a Scorecard badge on their repository to show off their -hard work. This badge also auto-updates for every change made to the repository. +hard work. This badge also auto-updates for every change made to the repository. See more details on [this OSSF blogpost](https://openssf.org/blog/2022/09/08/show-off-your-security-score-announcing-scorecards-badges/). + To include a badge on your project's repository, simply add the following markdown to your README: ``` -[![OpenSSF -Scorecard](https://api.securityscorecards.dev/projects/github.com/{owner}/{repo}/badge)](https://api.securityscorecards.dev/projects/github.com/{owner}/{repo}) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/{owner}/{repo}/badge)](https://securityscorecards.dev/viewer/?uri=github.com/{owner}/{repo}) ``` ### Scorecard Command Line Interface @@ -398,12 +413,41 @@ RESULTS | | | | review dismissal enabled on | | | | | | branch 'main' Info: Owner | | | | | | review required on branch | | -| | | | 'main' Info: 'admininistrator' | | +| | | | 'main' Info: 'administrator' | | | | | | PRs need reviews before being | | | | | | merged on branch 'main' | | |---------|------------------------|--------------------------------|--------------------------------|---------------------------------------------------------------------------| ``` +##### Using a GitLab Repository + +To run Scorecard on a GitLab repository, you must create a [GitLab Access Token](https://gitlab.com/-/profile/personal_access_tokens) with the following permissions: + +- `read_api` +- `read_user` +- `read_repository` + +You can run Scorecard on a GitLab repository by setting the `GITLAB_AUTH_TOKEN` environment variable: + +```bash +export GITLAB_AUTH_TOKEN=glpat-xxxx + +scorecard --repo gitlab.com/// +``` + +For an example of using Scorecard in GitLab CI/CD, see [here](https://gitlab.com/ossf-test/scorecard-pipeline-example). + +###### Self Hosted Editions +While we focus on GitLab.com support, Scorecard also works with self-hosted GitLab installations. +If your platform is hosted at a subdomain (e.g. `gitlab.foo.com`), Scorecard should work out of the box. +If your platform is hosted at some slug (e.g. `foo.com/bar/`), you will need to set the `GL_HOST` environment variable. + +```bash +export GITLAB_AUTH_TOKEN=glpat-xxxx +export GL_HOST=foo.com/bar +scorecard --repo foo.com/bar// +``` + ##### Using GitHub Enterprise Server (GHES) based Repository To use a GitHub Enterprise host `github.corp.com`, use the `GH_HOST` environment variable. @@ -452,7 +496,7 @@ Name | Description | Risk Level | Token Req [Binary-Artifacts](docs/checks.md#binary-artifacts) | Is the project free of checked-in binaries? | High | PAT, GITHUB_TOKEN | Supported | [Branch-Protection](docs/checks.md#branch-protection) | Does the project use [Branch Protection](https://docs.github.com/en/free-pro-team@latest/github/administering-a-repository/about-protected-branches) ? | High | PAT (`repo` or `repo> public_repo`), GITHUB_TOKEN | Supported (see notes) | certain settings are only supported with a maintainer PAT [CI-Tests](docs/checks.md#ci-tests) | Does the project run tests in CI, e.g. [GitHub Actions](https://docs.github.com/en/free-pro-team@latest/actions), [Prow](https://github.com/kubernetes/test-infra/tree/master/prow)? | Low | PAT, GITHUB_TOKEN | Supported -[CII-Best-Practices](docs/checks.md#cii-best-practices) | Has the project earned an [OpenSSF (formerly CII) Best Practices Badge](https://bestpractices.coreinfrastructure.org) at the passing, silver, or gold level? | Low | PAT, GITHUB_TOKEN | Validating | +[CII-Best-Practices](docs/checks.md#cii-best-practices) | Has the project earned an [OpenSSF (formerly CII) Best Practices Badge](https://www.bestpractices.dev) at the passing, silver, or gold level? | Low | PAT, GITHUB_TOKEN | Validating | [Code-Review](docs/checks.md#code-review) | Does the project practice code review before code is merged? | High | PAT, GITHUB_TOKEN | Validating | [Contributors](docs/checks.md#contributors) | Does the project have contributors from at least two different organizations? | Low | PAT, GITHUB_TOKEN | Validating | [Dangerous-Workflow](docs/checks.md#dangerous-workflow) | Does the project avoid dangerous coding patterns in GitHub Action workflows? | Critical | PAT, GITHUB_TOKEN | Unsupported | @@ -467,20 +511,24 @@ Name | Description | Risk Level | Token Req [Signed-Releases](docs/checks.md#signed-releases) | Does the project cryptographically [sign releases](https://wiki.debian.org/Creating%20signed%20GitHub%20releases)? | High | PAT, GITHUB_TOKEN | Validating | [Token-Permissions](docs/checks.md#token-permissions) | Does the project declare GitHub workflow tokens as [read only](https://docs.github.com/en/actions/reference/authentication-in-a-workflow)? | High | PAT, GITHUB_TOKEN | Unsupported | [Vulnerabilities](docs/checks.md#vulnerabilities) | Does the project have unfixed vulnerabilities? Uses the [OSV service](https://osv.dev). | High | PAT, GITHUB_TOKEN | Validating | -[Webhooks](docs/checks.md#webhooks) | Does the webhook defined in the repository have a token configured to authenticate the origins of requests? | High | maintainer PAT (`admin: repo_hook` or `admin> read:repo_hook` [doc](https://docs.github.com/en/rest/webhooks/repo-config#get-a-webhook-configuration-for-a-repository) | | EXPERIMENTAL +[Webhooks](docs/checks.md#webhooks) | Does the webhook defined in the repository have a token configured to authenticate the origins of requests? | Critical | maintainer PAT (`admin: repo_hook` or `admin> read:repo_hook` [doc](https://docs.github.com/en/rest/webhooks/repo-config#get-a-webhook-configuration-for-a-repository) | | EXPERIMENTAL ### Detailed Checks Documentation To see detailed information about each check, its scoring criteria, and remediation steps, check out the [checks documentation page](docs/checks.md). +### Beginner's Guide to Scorecard Checks + +For a guide to the checks you should use when getting started, see the [beginner's guide to scorecard checks](docs/beginner-checks.md). + ## Other Important Recommendations ### Two-factor Authentication (2FA) [Two-factor Authentication (2FA)](https://docs.github.com/en/authentication/securing-your-account-with-two-factor-authentication-2fa/about-two-factor-authentication) adds an extra layer of security when logging into websites or apps. 2FA protects your account if your password is compromised by requiring a second form of authentication, such as codes sent via SMS or authentication app, or touching a physical security key. -We strongly recommend that you enable 2FA on GitHub and any important account where it is available. 2FA is not a Scorecard check because GitHub does not make that data about user accounts public. Arguably, this data should always remain private, since accounts without 2FA are so vulnerable to attack. +We strongly recommend that you enable 2FA on any important accounts where it is available. 2FA is not a Scorecard check because GitHub and GitLab do not make that data about user accounts public. Arguably, this data should always remain private, since accounts without 2FA are so vulnerable to attack. Though it is not an official check, we urge all project maintainers to enable 2FA to protect their projects from compromise. @@ -522,7 +570,7 @@ risk level. ### Report Problems If you have what looks like a bug, please use the -[Github issue tracking system.](https://github.com/ossf/scorecard/issues) Before +[GitHub issue tracking system.](https://github.com/ossf/scorecard/issues) Before you file an issue, please search existing issues to see if your issue is already covered. @@ -548,9 +596,9 @@ Artifact | Link ----------------------------- | ---- Scorecard Dev Forum | [ossf-scorecard-dev@](https://groups.google.com/g/ossf-scorecard-dev) Scorecard Announcements Forum | [ossf-scorecard-announce@](https://groups.google.com/g/ossf-scorecard-announce) -Community Meeting VC | [Link to z o o m meeting](https://zoom.us/j/98835923979) -Community Meeting Calendar | Biweekly Thursdays, 1:00pm-2:00pm PST
[Calendar](https://calendar.google.com/calendar?cid=czYzdm9lZmhwNWk5cGZsdGI1cTY3bmdwZXNAZ3JvdXAuY2FsZW5kYXIuZ29vZ2xlLmNvbQ) -Meeting Notes | [Notes](https://docs.google.com/document/d/1dB2U7_qZpNW96vtuoG7ShmgKXzIg6R5XT5Tc-0yz6kE/edit#heading=h.4k8ml0qkh7tl) +Community Meeting VC | [Link to z o o m meeting](https://zoom-lfx.platform.linuxfoundation.org/meeting/95007214146?password=250040c3-80c0-48c4-80c1-07a373116d54) +Community Meeting Calendar | **_APAC-friendly_** Biweekly on Thursdays at 1:00-2:00 PM Pacific ([OSSF Public Calendar](https://calendar.google.com/calendar/u/0/embed?height=600&wkst=1&bgcolor=%238E24AA&showTitle=1&mode=WEEK&showCalendars=0&showTabs=1&showPrint=0&title=OpenSSF+Community+Calendar&src=czYzdm9lZmhwNWk5cGZsdGI1cTY3bmdwZXNAZ3JvdXAuY2FsZW5kYXIuZ29vZ2xlLmNvbQ&color=%238E24AA))
Video Call: [LFX Zoom](https://zoom-lfx.platform.linuxfoundation.org/meeting/95007214146?password=250040c3-80c0-48c4-80c1-07a373116d54)
**_EMEA-friendly_** Every 4 Mondays at 7:00-8:00 AM Pacific ([OSSF Public Calendar](https://calendar.google.com/calendar/u/0/embed?height=600&wkst=1&bgcolor=%238E24AA&showTitle=1&mode=WEEK&showCalendars=0&showTabs=1&showPrint=0&title=OpenSSF+Community+Calendar&src=czYzdm9lZmhwNWk5cGZsdGI1cTY3bmdwZXNAZ3JvdXAuY2FsZW5kYXIuZ29vZ2xlLmNvbQ&color=%238E24AA))
Video Call: [LFX Zoom](https://zoom-lfx.platform.linuxfoundation.org/meeting/93377638314?password=d53af562-d908-4100-8ae1-52686756cc5d) +Meeting Notes | [Notes](https://docs.google.com/document/d/1b6d3CVJLsl7YnTE7ZaZQHdkdYIvuOQ8rzAmvVdypOWM/edit?usp=sharing) Slack Channel | [#security_scorecards](https://slack.openssf.org/#security_scorecards) __Maintainers__ are listed in the [CODEOWNERS file](.github/CODEOWNERS). @@ -561,13 +609,19 @@ To report a security issue, please follow instructions [here](SECURITY.md). ### Join the Scorecards Project Meeting -#### Zoom +#### Zoom + +**_APAC-friendly_** Biweekly on Thursdays at 1:00-2:00 PM Pacific ([OSSF Public Calendar](https://calendar.google.com/calendar/u/0/embed?height=600&wkst=1&bgcolor=%238E24AA&showTitle=1&mode=WEEK&showCalendars=0&showTabs=1&showPrint=0&title=OpenSSF+Community+Calendar&src=czYzdm9lZmhwNWk5cGZsdGI1cTY3bmdwZXNAZ3JvdXAuY2FsZW5kYXIuZ29vZ2xlLmNvbQ&color=%238E24AA)) + +Video Call: [LFX z o o m](https://zoom-lfx.platform.linuxfoundation.org/meeting/95007214146?password=250040c3-80c0-48c4-80c1-07a373116d54) + +**_EMEA-friendly_** Every 4 Mondays at 7:00-8:00 AM Pacific ([OSSF Public Calendar](https://calendar.google.com/calendar/u/0/embed?height=600&wkst=1&bgcolor=%238E24AA&showTitle=1&mode=WEEK&showCalendars=0&showTabs=1&showPrint=0&title=OpenSSF+Community+Calendar&src=czYzdm9lZmhwNWk5cGZsdGI1cTY3bmdwZXNAZ3JvdXAuY2FsZW5kYXIuZ29vZ2xlLmNvbQ&color=%238E24AA)) -We meet every other Thursday - 4p ET on this [zoom link](https://zoom.us/j/98835923979?pwd=RG5JZ3czZEtmRDlGdms0ZktmMFQvUT09). +Video Call: [LFX z o o m](https://zoom-lfx.platform.linuxfoundation.org/meeting/93377638314?password=d53af562-d908-4100-8ae1-52686756cc5d) #### Agenda -You can see the [agenda and meeting notes here](https://docs.google.com/document/d/1b6d3CVJLsl7YnTE7ZaZQHdkdYIvuOQ8rzAmvVdypOWM/edit?usp=sharing). +You can see the [agenda and meeting notes here](https://docs.google.com/document/d/1b6d3CVJLsl7YnTE7ZaZQHdkdYIvuOQ8rzAmvVdypOWM/edit?usp=sharing). ## Stargazers over time diff --git a/RELEASE.md b/RELEASE.md new file mode 100644 index 00000000000..f5b78849c09 --- /dev/null +++ b/RELEASE.md @@ -0,0 +1,132 @@ +# Releasing Scorecard + +This is a draft document to describe the release process for Scorecard + +(If there are improvements you'd like to see, please comment on the +[tracking issue](https://github.com/ossf/scorecard/issues/1676) or issue a +pull request to discuss.) + +- [Tracking](#tracking) +- [Preparing the release](#preparing-the-release) + - [Validate tests](#validate-tests) + - [Validate the changes with scdiff](#validate-the-changes-with-scdiff) +- [Drafting release notes](#drafting-release-notes) +- [Release](#release) + - [Create a tag](#create-a-tag) + - [Create a GitHub release](#create-a-github-release) +- [Validate Release](#validate-release) + +## Tracking + +As the first task, a Release Manager should open a tracking issue for the +release. + +We don't currently have a template for releasing, but the following +[issue](https://github.com/ossf/scorecard-action/issues/97) is a good example +to draw inspiration from. + +We're not striving for perfection with the template, but the tracking issue +will serve as a reference point to aggregate feedback, so try your best to be +as descriptive as possible. + +## Preparing the release + +This section covers changes that need to be issued as a pull request and should +be merged before releasing the scorecard GitHub Action. + +### Validate tests + +Check the unit tests and integration tests are passing for the planned release commit, either locally or for the GitHub workflows. + +### Validate the changes with scdiff +1. Create the list of repos to use for the analysis if you don't have it already: +```console +cat < repos.txt +https://github.com/airbnb/lottie-web +https://github.com/apache/tomcat +https://github.com/Azure/azure-functions-dotnet-worker +https://github.com/cncf/xds +https://github.com/google/go-cmp +https://github.com/google/highwayhash +https://github.com/googleapis/google-api-php-client +https://github.com/jacoco/jacoco +https://github.com/ossf/scorecard +https://github.com/pallets/jinja +https://github.com/polymer/polymer +https://github.com/rust-random/getrandom +https://github.com/yaml/libyaml +https://gitlab.com/baserow/baserow +https://gitlab.com/cryptsetup/cryptsetup +EOF +``` +2. Run `scdiff` on the previous release: +```console +git checkout +go run cmd/internal/scdiff/main.go generate --repos repos.txt --output oldRelease.json +``` +3. Run `scdiff` on the commit to be tagged: +```console +git checkout +go run cmd/internal/scdiff/main.go generate --repos repos.txt --output newRelease.json +``` +4. Compare the results: +```console +go run cmd/internal/scdiff/main.go compare oldRelease.json newRelease.json +``` +5. Evaluating results: +There will be differences! That's ok, but please pay attention to what they are and use your judgement when evaluating them. +Compare the changes against the release notes you're expecting below. + + +## Drafting release notes + +Release notes are a semi-automated process. We often start by opening [drafting a new release on GitHub](https://github.com/ossf/scorecard/releases/new). +You can select to create a new tag on publish, and auto-generate some notes by clicking `Generate release notes`. +This provides a good start, but no one wants to see a wave of dependabot commits, so filter them out. +Try to focus on the PRs that affect users or behavior, not dependency updates or CI changes. + +Using the Kubernetes `release-notes` tool can also be helpful if PR authors filled out the user-facing change section. +```console +release-notes --org ossf --repo scorecard --branch main \ + --dependencies=false \ + --required-author "" \ + --start-rev \ + --end-rev +``` + +Note: This doesn't always grab the right value when PR bodies have multiple code blocks in them. + +Save your draft when satisfied and share it with other maintainers for feedback, if possible. + +## Release + +### Create a tag + +The GitHub release process supports creating a tag on publish, but prefer signing the tag when possible. +In this example, we're releasing a hypothetical `v100.0.0` at the desired commit SHA `SHA`: + +```console +git remote update +git checkout `SHA` +git tag -s -m "v100.0.0" v100.0.0 +git push v100.0.0 +``` + +### Create a GitHub release + +Revisit the draft release you created earlier, and ensure it's using the correct tag. + +Release title: `` + +The release notes will be the notes you drafted in the previous step. + +Ensure the release is marked as the latest release, if appropriate. + +Click `Publish release`. + +## Validate Release + +When a new tag is pushed, our GitHub Actions will create a release using `goreleaser`. +Confirm the workflow ran without issues. Check the release again to verify the artifacts and provenance have been added. + +If any issues were encountered, fixes must be issued under a new release/tag as Go releases are immutable. diff --git a/attestor/Dockerfile b/attestor/Dockerfile index ec4d6949b04..03a307dcd07 100644 --- a/attestor/Dockerfile +++ b/attestor/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM golang@sha256:ea3d912d500b1ae0a691b2e53eb8a6345b579d42d7e6a64acca83d274b949740 AS base +FROM golang:1.21.6@sha256:76aadd914a29a2ee7a6b0f3389bb2fdb87727291d688e1d972abe6c0fa6f2ee0 AS base WORKDIR /src/scorecard COPY . ./ @@ -21,7 +21,7 @@ ARG TARGETOS ARG TARGETARCH RUN make build-attestor -FROM gcr.io/google-appengine/debian11@sha256:fed7dd5b2c4bbfb70bd26a277cdaff98dced71f113632ccd5451dcc013fce0a4 +FROM gcr.io/google-appengine/debian11@sha256:97dc4fbf18419ef928bcddb865ccf0536d4cc39ae3ace5a2b4273c11aedbea82 COPY --from=build /src/scorecard/attestor / ENTRYPOINT [ "/scorecard-attestor" ] diff --git a/attestor/README.md b/attestor/README.md index 24f9022a373..0043ace52c8 100644 --- a/attestor/README.md +++ b/attestor/README.md @@ -23,7 +23,7 @@ Policies for scorecard attestor can be passed through the CLI using the `--polic * `PreventKnownVulnerabilities`: Ensure that the project is free from security vulnerabilities/advisories, as registered in osv.dev. * `PreventUnpinnedDependencies`: Ensure that a project's dependencies are pinned by hash. Dependency pinning makes builds more predictable, and prevents the consumption of malicious package versions from a compromised upstream. * `AllowedUnpinnedDependencies`: Ignore some dependencies, either by the filepath of the dependency management file (`filepath`, e.g. requirements.txt or package.json) or the dependency name (`packagename`, the specific package being ignored). If multiple filepaths/names, or a combination of filepaths and names are specified, all of them will be used. If not specified, no unpinned dependencies will be allowed. -* `RequireCodeReviewed`: Require that If `CodeReviewRequirements` is not specified, at least one reviewer will be required on all changesets. Scorecard-attestor inherits scorecard's deafult commit window (i.e. will only look at the last 30 commits to determine if they are reviewed or not). +* `RequireCodeReviewed`: Require that If `CodeReviewRequirements` is not specified, at least one reviewer will be required on all changesets. Scorecard-attestor inherits scorecard's default commit window (i.e. will only look at the last 30 commits to determine if they are reviewed or not). * `CodeReviewRequirements.MinReviewers`: The minimum number of distinct approvals required. * `CodeReviewRequirements.RequiredApprovers`: A set of approvers, any of whom must be found to have approved all changes. If a change is found without any approvals from this list, the check fails. diff --git a/attestor/command/cli.go b/attestor/command/cli.go index 0f8e89239c8..27e8b829689 100644 --- a/attestor/command/cli.go +++ b/attestor/command/cli.go @@ -114,7 +114,7 @@ func init() { func Execute() { if err := RootCmd.Execute(); err != nil { - fmt.Println(err) + fmt.Fprintln(os.Stderr, err) os.Exit(1) } } diff --git a/attestor/command/cli_test.go b/attestor/command/cli_test.go new file mode 100644 index 00000000000..0ba516271cb --- /dev/null +++ b/attestor/command/cli_test.go @@ -0,0 +1,60 @@ +// Copyright 2023 OpenSSF Scorecard Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package command + +import ( + "testing" + + "github.com/spf13/cobra" +) + +func Test_addSignFlags(t *testing.T) { + t.Parallel() + type args struct { + cmd *cobra.Command + } + testArgs := args{ + cmd: &cobra.Command{}, + } + + addSignFlags(testArgs.cmd) + // persistent flags of Image being set has to be tested in the integration test + if testArgs.cmd.PersistentFlags().Lookup("image") == nil { + t.Errorf("addSignFlags() did not add persistent flag 'image'") + } + if testArgs.cmd.PersistentFlags().Lookup("attestation-project") == nil { + t.Errorf("addSignFlags() did not add persistent flag 'attestation-project'") + } + if testArgs.cmd.PersistentFlags().Lookup("overwrite") == nil { + t.Errorf("addSignFlags() did not add persistent flag 'overwrite'") + } + if testArgs.cmd.PersistentFlags().Lookup("kms-key-name") == nil { + t.Errorf("addSignFlags() did not add persistent flag 'kms-key-name'") + } + if testArgs.cmd.PersistentFlags().Lookup("kms-digest-alg") == nil { + t.Errorf("addSignFlags() did not add persistent flag 'kms-digest-alg'") + } + if testArgs.cmd.PersistentFlags().Lookup("pgp-private-key") == nil { + t.Errorf("addSignFlags() did not add persistent flag 'pgp-private-key'") + } + if testArgs.cmd.PersistentFlags().Lookup("pgp-passphrase") == nil { + t.Errorf("addSignFlags() did not add persistent flag 'pgp-passphrase'") + } + if testArgs.cmd.PersistentFlags().Lookup("pkix-private-key") == nil { + t.Errorf("addSignFlags() did not add persistent flag 'pkix-private-key'") + } + if testArgs.cmd.PersistentFlags().Lookup("pkix-alg") == nil { + t.Errorf("addSignFlags() did not add persistent flag 'pkix-alg'") + } +} diff --git a/attestor/policy/attestation_policy.go b/attestor/policy/attestation_policy.go index 1052bbad948..36d97c06e39 100644 --- a/attestor/policy/attestation_policy.go +++ b/attestor/policy/attestation_policy.go @@ -29,7 +29,7 @@ import ( sclog "github.com/ossf/scorecard/v4/log" ) -//nolint:govet +//nolint:govet,musttag // JSON usage is test only type AttestationPolicy struct { // PreventBinaryArtifacts : set to true to require that this project's SCM repo is // free of binary artifacts @@ -120,7 +120,7 @@ func (ap *AttestationPolicy) EvaluateResults(raw *checker.RawResults) (PolicyRes if ap.EnsureCodeReviewed { // By default, if code review reqs. aren't specified, we assume - // the user wants there to be atleast one reviewer + // the user wants there to be at least one reviewer if len(ap.CodeReviewRequirements.RequiredApprovers) == 0 && ap.CodeReviewRequirements.MinReviewers == 0 { ap.CodeReviewRequirements.MinReviewers = 1 diff --git a/attestor/policy/attestation_policy_test.go b/attestor/policy/attestation_policy_test.go index 61e222b906d..8f2c201f859 100644 --- a/attestor/policy/attestation_policy_test.go +++ b/attestor/policy/attestation_policy_test.go @@ -17,7 +17,6 @@ package policy import ( "encoding/json" "errors" - "fmt" "testing" "github.com/google/go-cmp/cmp" @@ -171,7 +170,7 @@ func TestCheckPreventBinaryArtifacts(t *testing.T) { func TestCheckCodeReviewed(t *testing.T) { t.Parallel() - // nolint + //nolint:govet tests := []struct { err error raw *checker.RawResults @@ -386,7 +385,7 @@ func asPointer(s string) *string { func TestNoUnpinnedDependencies(t *testing.T) { t.Parallel() - // nolint + //nolint:govet tests := []struct { err error raw *checker.RawResults @@ -533,8 +532,8 @@ func TestAttestationPolicyRead(t *testing.T) { // Compare outputs only if the error is nil. // TODO: compare objects. if p.ToJSON() != tt.result.ToJSON() { - fmt.Printf("p.ToJSON(): %v\n", p.ToJSON()) - fmt.Printf("tt.result.ToJSON(): %v\n", tt.result.ToJSON()) + t.Logf("p.ToJSON(): %v\n", p.ToJSON()) + t.Logf("tt.result.ToJSON(): %v\n", tt.result.ToJSON()) t.Fatalf("%s: invalid result", tt.name) } }) diff --git a/checker/check_result.go b/checker/check_result.go index 5e0dd21946f..b6aa83d7bc8 100644 --- a/checker/check_result.go +++ b/checker/check_result.go @@ -16,9 +16,11 @@ package checker import ( + "errors" "fmt" "math" + sce "github.com/ossf/scorecard/v4/errors" "github.com/ossf/scorecard/v4/finding" "github.com/ossf/scorecard/v4/rule" ) @@ -50,6 +52,10 @@ const ( DetailDebug ) +// errSuccessTotal indicates a runtime error because number of success cases should +// be smaller than the total cases to create a proportional score. +var errSuccessTotal = errors.New("unexpected number of success is higher than total") + // CheckResult captures result from a check run. // //nolint:govet @@ -88,13 +94,55 @@ type LogMessage struct { Remediation *rule.Remediation // Remediation information, if any. } +// ProportionalScoreWeighted is a structure that contains +// the fields to calculate weighted proportional scores. +type ProportionalScoreWeighted struct { + Success int + Total int + Weight int +} + // CreateProportionalScore creates a proportional score. func CreateProportionalScore(success, total int) int { if total == 0 { return 0 } - return int(math.Min(float64(MaxResultScore*success/total), float64(MaxResultScore))) + return min(MaxResultScore*success/total, MaxResultScore) +} + +// CreateProportionalScoreWeighted creates the proportional score +// between multiple successes over the total, but some proportions +// are worth more. +func CreateProportionalScoreWeighted(scores ...ProportionalScoreWeighted) (int, error) { + var ws, wt int + allWeightsZero := true + noScoreGroups := true + for _, score := range scores { + if score.Success > score.Total { + return InconclusiveResultScore, fmt.Errorf("%w: %d, %d", errSuccessTotal, score.Success, score.Total) + } + if score.Total == 0 { + continue // Group with 0 total, does not count for score + } + noScoreGroups = false + if score.Weight != 0 { + allWeightsZero = false + } + // Group with zero weight, adds nothing to the score + + ws += score.Success * score.Weight + wt += score.Total * score.Weight + } + if noScoreGroups { + return InconclusiveResultScore, nil + } + // If has score groups but no groups matter to the score, result in max score + if allWeightsZero { + return MaxResultScore, nil + } + + return min(MaxResultScore*ws/wt, MaxResultScore), nil } // AggregateScores adds up all scores @@ -128,8 +176,15 @@ func NormalizeReason(reason string, score int) string { // CreateResultWithScore is used when // the check runs without runtime errors, and we want to assign a -// specific score. +// specific score. The score must be between [MinResultScore] and [MaxResultScore]. +// Callers who want [InconclusiveResultScore] must use [CreateInconclusiveResult] instead. +// +// Passing an invalid score results in a runtime error result as if created by [CreateRuntimeErrorResult]. func CreateResultWithScore(name, reason string, score int) CheckResult { + if score < MinResultScore || score > MaxResultScore { + err := sce.CreateInternal(sce.ErrScorecardInternal, fmt.Sprintf("invalid score (%d), please report this", score)) + return CreateRuntimeErrorResult(name, err) + } return CheckResult{ Name: name, Version: 2, @@ -146,15 +201,8 @@ func CreateResultWithScore(name, reason string, score int) CheckResult { // the number of tests that succeeded. func CreateProportionalScoreResult(name, reason string, b, t int) CheckResult { score := CreateProportionalScore(b, t) - return CheckResult{ - Name: name, - // Old structure. - // New structure. - Version: 2, - Error: nil, - Score: score, - Reason: NormalizeReason(reason, score), - } + reason = NormalizeReason(reason, score) + return CreateResultWithScore(name, reason, score) } // CreateMaxScoreResult is used when @@ -190,12 +238,12 @@ func CreateRuntimeErrorResult(name string, e error) CheckResult { Version: 2, Error: e, Score: InconclusiveResultScore, - Reason: e.Error(), // Note: message already accessible by caller thru `Error`. + Reason: e.Error(), // Note: message already accessible by caller through `Error`. } } // LogFindings logs the list of findings. -func LogFindings(findings []finding.Finding, dl DetailLogger) error { +func LogFindings(findings []finding.Finding, dl DetailLogger) { for i := range findings { f := &findings[i] switch f.Outcome { @@ -213,6 +261,4 @@ func LogFindings(findings []finding.Finding, dl DetailLogger) error { }) } } - - return nil } diff --git a/checker/check_result_test.go b/checker/check_result_test.go index 291f0696cde..762d960465d 100644 --- a/checker/check_result_test.go +++ b/checker/check_result_test.go @@ -19,6 +19,9 @@ import ( "testing" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + + sce "github.com/ossf/scorecard/v4/errors" ) func TestAggregateScores(t *testing.T) { @@ -50,7 +53,7 @@ func TestAggregateScores(t *testing.T) { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() - if got := AggregateScores(tt.args.scores...); got != tt.want { //nolint:govet + if got := AggregateScores(tt.args.scores...); got != tt.want { t.Errorf("AggregateScores() = %v, want %v", got, tt.want) } }) @@ -86,7 +89,7 @@ func TestAggregateScoresWithWeight(t *testing.T) { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() - if got := AggregateScoresWithWeight(tt.args.scores); got != tt.want { //nolint:govet + if got := AggregateScoresWithWeight(tt.args.scores); got != tt.want { t.Errorf("AggregateScoresWithWeight() = %v, want %v", got, tt.want) } }) @@ -127,13 +130,288 @@ func TestCreateProportionalScore(t *testing.T) { }, want: 5, }, + { + name: "2 and 5", + args: args{ + success: 2, + total: 5, + }, + want: 4, + }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() - if got := CreateProportionalScore(tt.args.success, tt.args.total); got != tt.want { //nolint:govet - t.Errorf("CreateProportionalScore() = %v, want %v", got, tt.want) //nolint:govet + if got := CreateProportionalScore(tt.args.success, tt.args.total); got != tt.want { + t.Errorf("CreateProportionalScore() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestCreateProportionalScoreWeighted(t *testing.T) { + t.Parallel() + type want struct { + score int + err bool + } + tests := []struct { + name string + scores []ProportionalScoreWeighted + want want + }{ + { + name: "max result with 1 group and normal weight", + scores: []ProportionalScoreWeighted{ + { + Success: 1, + Total: 1, + Weight: 10, + }, + }, + want: want{ + score: 10, + }, + }, + { + name: "min result with 1 group and normal weight", + scores: []ProportionalScoreWeighted{ + { + Success: 0, + Total: 1, + Weight: 10, + }, + }, + want: want{ + score: 0, + }, + }, + { + name: "partial result with 1 group and normal weight", + scores: []ProportionalScoreWeighted{ + { + Success: 2, + Total: 10, + Weight: 10, + }, + }, + want: want{ + score: 2, + }, + }, + { + name: "partial result with 2 groups and normal weights", + scores: []ProportionalScoreWeighted{ + { + Success: 2, + Total: 10, + Weight: 10, + }, + { + Success: 8, + Total: 10, + Weight: 10, + }, + }, + want: want{ + score: 5, + }, + }, + { + name: "partial result with 2 groups and odd weights", + scores: []ProportionalScoreWeighted{ + { + Success: 2, + Total: 10, + Weight: 8, + }, + { + Success: 8, + Total: 10, + Weight: 2, + }, + }, + want: want{ + score: 3, + }, + }, + { + name: "all groups with 0 weight, no groups matter for the score, results in max score", + scores: []ProportionalScoreWeighted{ + { + Success: 1, + Total: 1, + Weight: 0, + }, + { + Success: 1, + Total: 1, + Weight: 0, + }, + }, + want: want{ + score: 10, + }, + }, + { + name: "not all groups with 0 weight, only groups with weight matter to the score", + scores: []ProportionalScoreWeighted{ + { + Success: 1, + Total: 1, + Weight: 0, + }, + { + Success: 2, + Total: 10, + Weight: 8, + }, + { + Success: 8, + Total: 10, + Weight: 2, + }, + }, + want: want{ + score: 3, + }, + }, + { + name: "no total, results in inconclusive score", + scores: []ProportionalScoreWeighted{ + { + Success: 0, + Total: 0, + Weight: 10, + }, + }, + want: want{ + score: -1, + }, + }, + { + name: "some groups with 0 total, only groups with total matter to the score", + scores: []ProportionalScoreWeighted{ + { + Success: 0, + Total: 0, + Weight: 10, + }, + { + Success: 2, + Total: 10, + Weight: 10, + }, + }, + want: want{ + score: 2, + }, + }, + { + name: "any group with number of successes higher than total, results in inconclusive score and error", + scores: []ProportionalScoreWeighted{ + { + Success: 1, + Total: 0, + Weight: 10, + }, + }, + want: want{ + score: -1, + err: true, + }, + }, + { + name: "only groups with weight and total matter to the score", + scores: []ProportionalScoreWeighted{ + { + Success: 1, + Total: 1, + Weight: 0, + }, + { + Success: 0, + Total: 0, + Weight: 10, + }, + { + Success: 2, + Total: 10, + Weight: 8, + }, + { + Success: 8, + Total: 10, + Weight: 2, + }, + }, + want: want{ + score: 3, + }, + }, + { + name: "only groups with weight and total matter to the score but no groups have success, results in min score", + scores: []ProportionalScoreWeighted{ + { + Success: 1, + Total: 1, + Weight: 0, + }, + { + Success: 0, + Total: 0, + Weight: 10, + }, + { + Success: 0, + Total: 10, + Weight: 8, + }, + { + Success: 0, + Total: 10, + Weight: 2, + }, + }, + want: want{ + score: 0, + }, + }, + { + name: "group with 0 weight counts as max score and group with 0 total does not count", + scores: []ProportionalScoreWeighted{ + { + Success: 2, + Total: 8, + Weight: 0, + }, + { + Success: 0, + Total: 0, + Weight: 10, + }, + }, + want: want{ + score: 10, + }, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + got, err := CreateProportionalScoreWeighted(tt.scores...) + if err != nil && !tt.want.err { + t.Errorf("CreateProportionalScoreWeighted unexpected error '%v'", err) + t.Fail() + } + if err == nil && tt.want.err { + t.Errorf("CreateProportionalScoreWeighted expected error and got none") + t.Fail() + } + if got != tt.want.score { + t.Errorf("CreateProportionalScoreWeighted() = %v, want %v", got, tt.want.score) } }) } @@ -171,8 +449,8 @@ func TestNormalizeReason(t *testing.T) { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() - if got := NormalizeReason(tt.args.reason, tt.args.score); got != tt.want { //nolint:govet - t.Errorf("NormalizeReason() = %v, want %v", got, tt.want) //nolint:govet + if got := NormalizeReason(tt.args.reason, tt.args.score); got != tt.want { + t.Errorf("NormalizeReason() = %v, want %v", got, tt.want) } }) } @@ -218,13 +496,59 @@ func TestCreateResultWithScore(t *testing.T) { Score: 1, }, }, + { + name: "inconclusive score is not valid", + args: args{ + name: "name", + reason: "reason", + score: InconclusiveResultScore, + }, + want: CheckResult{ + Name: "name", + Reason: "internal error: invalid score (-1), please report this", + Version: 2, + Score: -1, + Error: sce.ErrScorecardInternal, + }, + }, + { + name: "score too low", + args: args{ + name: "name", + reason: "reason", + score: -3, + }, + want: CheckResult{ + Name: "name", + Reason: "internal error: invalid score (-3), please report this", + Version: 2, + Score: -1, + Error: sce.ErrScorecardInternal, + }, + }, + { + name: "score too high", + args: args{ + name: "name", + reason: "reason", + score: MaxResultScore + 2, + }, + want: CheckResult{ + Name: "name", + Reason: "internal error: invalid score (12), please report this", + Version: 2, + Score: -1, + Error: sce.ErrScorecardInternal, + }, + }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() - if got := CreateResultWithScore(tt.args.name, tt.args.reason, tt.args.score); !cmp.Equal(got, tt.want) { //nolint:lll,govet - t.Errorf("CreateResultWithScore() = %v, want %v", got, cmp.Diff(got, tt.want)) //nolint:govet + got := CreateResultWithScore(tt.args.name, tt.args.reason, tt.args.score) + if !cmp.Equal(got, tt.want, cmpopts.EquateErrors()) { + t.Errorf("CreateResultWithScore() = %v, want %v", got, cmp.Diff(got, tt.want)) } }) } @@ -273,13 +597,30 @@ func TestCreateProportionalScoreResult(t *testing.T) { Version: 2, }, }, + { + name: "negative proportion, score too low", + args: args{ + name: "name", + reason: "reason", + b: -2, + t: 1, + }, + want: CheckResult{ + Name: "name", + Reason: "internal error: invalid score (-20), please report this", + Version: 2, + Score: -1, + Error: sce.ErrScorecardInternal, + }, + }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() - if got := CreateProportionalScoreResult(tt.args.name, tt.args.reason, tt.args.b, tt.args.t); !cmp.Equal(got, tt.want) { //nolint:govet,lll - t.Errorf("CreateProportionalScoreResult() = %v, want %v", got, cmp.Diff(got, tt.want)) //nolint:govet + got := CreateProportionalScoreResult(tt.args.name, tt.args.reason, tt.args.b, tt.args.t) + if !cmp.Equal(got, tt.want, cmpopts.EquateErrors()) { + t.Errorf("CreateProportionalScoreResult() = %v, want %v", got, cmp.Diff(got, tt.want)) } }) } @@ -327,7 +668,7 @@ func TestCreateMaxScoreResult(t *testing.T) { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() - if got := CreateMaxScoreResult(tt.args.name, tt.args.reason); !cmp.Equal(got, tt.want) { //nolint:govet + if got := CreateMaxScoreResult(tt.args.name, tt.args.reason); !cmp.Equal(got, tt.want) { t.Errorf("CreateMaxScoreResult() = %v, want %v", got, cmp.Diff(got, tt.want)) } }) @@ -376,7 +717,7 @@ func TestCreateMinScoreResult(t *testing.T) { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() - if got := CreateMinScoreResult(tt.args.name, tt.args.reason); !cmp.Equal(got, tt.want) { //nolint:govet + if got := CreateMinScoreResult(tt.args.name, tt.args.reason); !cmp.Equal(got, tt.want) { t.Errorf("CreateMinScoreResult() = %v, want %v", got, cmp.Diff(got, tt.want)) } }) @@ -447,14 +788,14 @@ func TestCreateRuntimeErrorResult(t *testing.T) { name: "empty", args: args{ name: "", - e: errors.New("runtime error"), //nolint:goerr113 + e: errors.New("runtime error"), }, want: CheckResult{ Name: "", Reason: "runtime error", Score: -1, Version: 2, - Error: errors.New("runtime error"), //nolint:goerr113 + Error: errors.New("runtime error"), }, }, } diff --git a/checker/check_runner.go b/checker/check_runner.go index edf5fcfb86b..22ba8e75741 100644 --- a/checker/check_runner.go +++ b/checker/check_runner.go @@ -96,14 +96,21 @@ func (r *Runner) Run(ctx context.Context, c Check) CheckResult { fmt.Sprintf("requiredType: %s not supported by check %s", fmt.Sprint(unsupported), r.CheckName))) } + l := NewLogger() ctx, err := tag.New(ctx, tag.Upsert(stats.CheckName, r.CheckName)) if err != nil { - panic(err) + l.Warn(&LogMessage{Text: fmt.Sprintf("tag.New: %v", err)}) } + + ctx, err = tag.New(ctx, tag.Upsert(stats.RepoHost, r.CheckRequest.Repo.Host())) + if err != nil { + l.Warn(&LogMessage{Text: fmt.Sprintf("tag.New: %v", err)}) + } + startTime := time.Now() var res CheckResult - l := NewLogger() + l = NewLogger() for retriesRemaining := checkRetries; retriesRemaining > 0; retriesRemaining-- { checkRequest := r.CheckRequest checkRequest.Ctx = ctx diff --git a/checker/client.go b/checker/client.go index 7a28b2771f4..1b5d28a3ddd 100644 --- a/checker/client.go +++ b/checker/client.go @@ -17,7 +17,6 @@ package checker import ( "context" "fmt" - "os" "github.com/ossf/scorecard/v4/clients" ghrepo "github.com/ossf/scorecard/v4/clients/githubrepo" @@ -54,14 +53,11 @@ func GetClients(ctx context.Context, repoURI, localURI string, logger *log.Logge retErr } - _, experimental := os.LookupEnv("SCORECARD_EXPERIMENTAL") var repoClient clients.RepoClient - if experimental { - repo, makeRepoError = glrepo.MakeGitlabRepo(repoURI) - if repo != nil && makeRepoError == nil { - repoClient, makeRepoError = glrepo.CreateGitlabClient(ctx, repo.Host()) - } + repo, makeRepoError = glrepo.MakeGitlabRepo(repoURI) + if repo != nil && makeRepoError == nil { + repoClient, makeRepoError = glrepo.CreateGitlabClient(ctx, repo.Host()) } if makeRepoError != nil || repo == nil { diff --git a/checker/client_test.go b/checker/client_test.go index b2dd293d2e2..9426e841bdb 100644 --- a/checker/client_test.go +++ b/checker/client_test.go @@ -20,9 +20,8 @@ import ( "github.com/ossf/scorecard/v4/log" ) -// nolint:paralleltest -// because we are using t.Setenv. -func TestGetClients(t *testing.T) { //nolint:gocognit +//nolint:paralleltest // because we are using t.Setenv. +func TestGetClients(t *testing.T) { type args struct { //nolint:govet ctx context.Context repoURI string @@ -68,32 +67,17 @@ func TestGetClients(t *testing.T) { //nolint:gocognit wantErr: true, }, { - name: "repoURI is gitlab which is not supported", + name: "repoURI is gitlab which is supported", args: args{ ctx: context.Background(), - repoURI: "https://gitlab.com/ossf/scorecard", + repoURI: "https://gitlab.com/ossf-test/scorecard", localURI: "", }, shouldOSSFuzzBeNil: false, shouldRepoClientBeNil: false, shouldVulnClientBeNil: false, - shouldRepoBeNil: true, - wantErr: true, - }, - { - name: "repoURI is gitlab and experimental is true", - args: args{ - ctx: context.Background(), - repoURI: "https://gitlab.com/ossf/scorecard", - localURI: "", - }, - shouldOSSFuzzBeNil: false, shouldRepoBeNil: false, - shouldRepoClientBeNil: false, - shouldVulnClientBeNil: false, - shouldCIIBeNil: false, wantErr: false, - experimental: true, }, { name: "repoURI is corp github host", @@ -122,7 +106,7 @@ func TestGetClients(t *testing.T) { //nolint:gocognit t.Setenv("GH_HOST", "github.corp.com") t.Setenv("GH_TOKEN", "PAT") } - got, repoClient, ossFuzzClient, ciiClient, vulnsClient, err := GetClients(tt.args.ctx, tt.args.repoURI, tt.args.localURI, tt.args.logger) //nolint:lll + got, repoClient, ossFuzzClient, ciiClient, vulnsClient, err := GetClients(tt.args.ctx, tt.args.repoURI, tt.args.localURI, tt.args.logger) if (err != nil) != tt.wantErr { t.Fatalf("GetClients() error = %v, wantErr %v", err, tt.wantErr) } diff --git a/checker/detail_logger_impl_test.go b/checker/detail_logger_impl_test.go index c1cebcb8a61..e81901d3eb9 100644 --- a/checker/detail_logger_impl_test.go +++ b/checker/detail_logger_impl_test.go @@ -18,6 +18,7 @@ import ( ) func Test_logger_Info(t *testing.T) { + t.Parallel() l := &logger{ logs: []CheckDetail{}, } @@ -28,6 +29,7 @@ func Test_logger_Info(t *testing.T) { } func Test_logger_Warn(t *testing.T) { + t.Parallel() l := &logger{ logs: []CheckDetail{}, } @@ -38,6 +40,7 @@ func Test_logger_Warn(t *testing.T) { } func Test_logger_Flush(t *testing.T) { + t.Parallel() l := &logger{ logs: []CheckDetail{}, } @@ -52,6 +55,7 @@ func Test_logger_Flush(t *testing.T) { } func Test_logger_Logs(t *testing.T) { + t.Parallel() l := &logger{ logs: []CheckDetail{}, } @@ -62,6 +66,7 @@ func Test_logger_Logs(t *testing.T) { } func Test_logger_Debug(t *testing.T) { + t.Parallel() l := &logger{ logs: []CheckDetail{}, } @@ -72,6 +77,7 @@ func Test_logger_Debug(t *testing.T) { } func TestNewLogger(t *testing.T) { + t.Parallel() l := NewLogger() if l == nil { t.Errorf("expected non-nil logger, got nil") diff --git a/checker/raw_result.go b/checker/raw_result.go index 2acaee17a47..011bd106cb5 100644 --- a/checker/raw_result.go +++ b/checker/raw_result.go @@ -15,35 +15,39 @@ package checker import ( + "fmt" "time" "github.com/ossf/scorecard/v4/clients" "github.com/ossf/scorecard/v4/finding" + "github.com/ossf/scorecard/v4/rule" ) // RawResults contains results before a policy // is applied. -// nolint +// +//nolint:govet type RawResults struct { - PackagingResults PackagingData - CIIBestPracticesResults CIIBestPracticesData - DangerousWorkflowResults DangerousWorkflowData - VulnerabilitiesResults VulnerabilitiesData BinaryArtifactResults BinaryArtifactData - SecurityPolicyResults SecurityPolicyData - DependencyUpdateToolResults DependencyUpdateToolData BranchProtectionResults BranchProtectionsData + CIIBestPracticesResults CIIBestPracticesData + CITestResults CITestData CodeReviewResults CodeReviewData - PinningDependenciesResults PinningDependenciesData - WebhookResults WebhooksData ContributorsResults ContributorsData - MaintainedResults MaintainedData - SignedReleasesResults SignedReleasesData + DangerousWorkflowResults DangerousWorkflowData + DependencyUpdateToolResults DependencyUpdateToolData FuzzingResults FuzzingData LicenseResults LicenseData - TokenPermissionsResults TokenPermissionsData - CITestResults CITestData + MaintainedResults MaintainedData Metadata MetadataData + PackagingResults PackagingData + PinningDependenciesResults PinningDependenciesData + SASTResults SASTData + SecurityPolicyResults SecurityPolicyData + SignedReleasesResults SignedReleasesData + TokenPermissionsResults TokenPermissionsData + VulnerabilitiesResults VulnerabilitiesData + WebhookResults WebhooksData } type MetadataData struct { @@ -74,7 +78,6 @@ type PackagingData struct { } // Package represents a package. -// nolint type Package struct { // TODO: not supported yet. This needs to be unique across // ecosystems: purl, OSV, CPE, etc. @@ -110,18 +113,21 @@ const ( // PinningDependenciesData represents pinned dependency data. type PinningDependenciesData struct { - Dependencies []Dependency + Dependencies []Dependency + ProcessingErrors []ElementError // jobs or files with errors may have incomplete results } // Dependency represents a dependency. type Dependency struct { // TODO: unique dependency name. // TODO: Job *WorkflowJob - Name *string - PinnedAt *string - Location *File - Msg *string // Only for debug messages. - Type DependencyUseType + Name *string + PinnedAt *string + Location *File + Msg *string // Only for debug messages. + Pinned *bool + Remediation *rule.Remediation + Type DependencyUseType } // MaintainedData contains the raw results @@ -225,6 +231,46 @@ type SecurityPolicyFile struct { File File } +// SASTData contains the raw results +// for the SAST check. +type SASTData struct { + Workflows []SASTWorkflow + Commits []SASTCommit + NumWorkflows int +} + +type SASTCommit struct { + CommittedDate time.Time + Message string + SHA string + CheckRuns []clients.CheckRun + AssociatedMergeRequest clients.PullRequest + Committer clients.User + Compliant bool +} + +// SASTWorkflowType represents a type of SAST workflow. +type SASTWorkflowType string + +const ( + // CodeQLWorkflow represents a workflow that runs CodeQL. + CodeQLWorkflow SASTWorkflowType = "CodeQL" + // SonarWorkflow represents a workflow that runs Sonar. + SonarWorkflow SASTWorkflowType = "Sonar" + // SnykWorkflow represents a workflow that runs Snyk. + SnykWorkflow SASTWorkflowType = "Snyk" + // PysaWorkflow represents a workflow that runs Pysa. + PysaWorkflow SASTWorkflowType = "Pysa" + // QodanaWorkflow represents a workflow that runs Qodana. + QodanaWorkflow SASTWorkflowType = "Qodana" +) + +// SASTWorkflow represents a SAST workflow. +type SASTWorkflow struct { + Type SASTWorkflowType + File File +} + // SecurityPolicyData contains the raw results // for the Security-Policy check. type SecurityPolicyData struct { @@ -285,7 +331,7 @@ type Run struct { URL string } -// ArchivedStatus definess the archived status. +// ArchivedStatus defines the archived status. type ArchivedStatus struct { Status bool // TODO: add fields, e.g., date of archival. @@ -302,7 +348,7 @@ type File struct { // TODO: add hash. } -// CIIBestPracticesData contains data foor CIIBestPractices check. +// CIIBestPracticesData contains data for CIIBestPractices check. type CIIBestPracticesData struct { Badge clients.BadgeLevel } @@ -401,3 +447,23 @@ func (f *File) Location() *finding.Location { return loc } + +// ElementError allows us to identify the "element" that led to the given error. +// The "element" is the specific "code under analysis" that caused the error. It should +// describe what caused the error as precisely as possible. +// +// For example, if a shell parsing error occurs while parsing a Dockerfile `RUN` block +// or a GitHub workflow's `run:` step, the "element" should point to the Dockerfile +// lines or workflow job step that caused the failure, not just the file path. +type ElementError struct { + Err error + Location finding.Location +} + +func (e *ElementError) Error() string { + return fmt.Sprintf("%s: %v", e.Err, e.Location) +} + +func (e *ElementError) Unwrap() error { + return e.Err +} diff --git a/checker/raw_result_test.go b/checker/raw_result_test.go new file mode 100644 index 00000000000..4a04d7d0abc --- /dev/null +++ b/checker/raw_result_test.go @@ -0,0 +1,49 @@ +// Copyright 2023 OpenSSF Scorecard Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package checker + +import ( + "testing" + + "github.com/ossf/scorecard/v4/finding" +) + +func TestFile_Location(t *testing.T) { + t.Parallel() + file := File{ + Type: finding.FileTypeSource, + Path: "bar.go", + Offset: 10, + EndOffset: 20, + Snippet: "some code", + } + + loc := file.Location() + + if loc.Type != finding.FileTypeSource { + t.Errorf("Expected loc.Type to be 'foo', got %v", loc.Type) + } + if loc.Path != "bar.go" { + t.Errorf("Expected loc.Path to be 'bar.go', got %v", loc.Path) + } + if *loc.LineStart != 10 { + t.Errorf("Expected *loc.LineStart to be 10, got %v", *loc.LineStart) + } + if *loc.LineEnd != 20 { + t.Errorf("Expected *loc.LineEnd to be 20, got %v", *loc.LineEnd) + } + if *loc.Snippet != "some code" { + t.Errorf("Expected *loc.Snippet to be 'some code', got %v", *loc.Snippet) + } +} diff --git a/checks/all_checks_test.go b/checks/all_checks_test.go index 276bc7d789f..5cced127633 100644 --- a/checks/all_checks_test.go +++ b/checks/all_checks_test.go @@ -23,15 +23,13 @@ import ( func Test_registerCheck(t *testing.T) { t.Parallel() - //nolint type args struct { - name string fn checker.CheckFn + name string } - //nolint tests := []struct { - name string args args + name string wanterr bool }{ { diff --git a/checks/binary_artifact.go b/checks/binary_artifact.go index 354e9cbc1f6..f57d154524f 100644 --- a/checks/binary_artifact.go +++ b/checks/binary_artifact.go @@ -19,15 +19,18 @@ import ( "github.com/ossf/scorecard/v4/checks/evaluation" "github.com/ossf/scorecard/v4/checks/raw" sce "github.com/ossf/scorecard/v4/errors" + "github.com/ossf/scorecard/v4/probes" + "github.com/ossf/scorecard/v4/probes/zrunner" ) // CheckBinaryArtifacts is the exported name for Binary-Artifacts check. const CheckBinaryArtifacts string = "Binary-Artifacts" -//nolint +//nolint:gochecknoinits func init() { supportedRequestTypes := []checker.RequestType{ checker.CommitBased, + checker.FileBased, } if err := registerCheck(CheckBinaryArtifacts, BinaryArtifacts, supportedRequestTypes); err != nil { // this should never happen @@ -37,17 +40,22 @@ func init() { // BinaryArtifacts will check the repository contains binary artifacts. func BinaryArtifacts(c *checker.CheckRequest) checker.CheckResult { - rawData, err := raw.BinaryArtifacts(c.RepoClient) + rawData, err := raw.BinaryArtifacts(c) if err != nil { e := sce.WithMessage(sce.ErrScorecardInternal, err.Error()) return checker.CreateRuntimeErrorResult(CheckBinaryArtifacts, e) } - // Return raw results. - if c.RawResults != nil { - c.RawResults.BinaryArtifactResults = rawData + // Set the raw results. + pRawResults := getRawResults(c) + pRawResults.BinaryArtifactResults = rawData + + // Evaluate the probes. + findings, err := zrunner.Run(pRawResults, probes.BinaryArtifacts) + if err != nil { + e := sce.WithMessage(sce.ErrScorecardInternal, err.Error()) + return checker.CreateRuntimeErrorResult(CheckBinaryArtifacts, e) } - // Return the score evaluation. - return evaluation.BinaryArtifacts(CheckBinaryArtifacts, c.Dlogger, &rawData) + return evaluation.BinaryArtifacts(CheckBinaryArtifacts, findings, c.Dlogger) } diff --git a/checks/branch_protection_test.go b/checks/branch_protection_test.go index 929c20768c8..d56a4cce2bd 100644 --- a/checks/branch_protection_test.go +++ b/checks/branch_protection_test.go @@ -57,7 +57,6 @@ func TestReleaseAndDevBranchProtected(t *testing.T) { rel1 := "release/v.1" sha := "8fb3cb86082b17144a80402f5367ae65f06083bd" - //nolint:goconst main := "main" trueVal := true falseVal := false @@ -65,15 +64,14 @@ func TestReleaseAndDevBranchProtected(t *testing.T) { var oneVal int32 = 1 - //nolint tests := []struct { name string - expected scut.TestReturn - branches []*clients.BranchRef defaultBranch string + branches []*clients.BranchRef releases []string - nonadmin bool repoFiles []string + expected scut.TestReturn + nonadmin bool }{ { name: "Nil release and main branch names", @@ -87,7 +85,6 @@ func TestReleaseAndDevBranchProtected(t *testing.T) { defaultBranch: main, branches: []*clients.BranchRef{ { - Name: nil, Protected: &trueVal, BranchProtectionRule: clients.BranchProtectionRule{ CheckRules: clients.StatusChecksRule{ @@ -96,6 +93,7 @@ func TestReleaseAndDevBranchProtected(t *testing.T) { Contexts: []string{"foo"}, }, RequiredPullRequestReviews: clients.PullRequestReviewRule{ + Required: &trueVal, DismissStaleReviews: &trueVal, RequireCodeOwnerReviews: &trueVal, RequiredApprovingReviewCount: &oneVal, @@ -107,7 +105,6 @@ func TestReleaseAndDevBranchProtected(t *testing.T) { }, }, { - Name: nil, Protected: &trueVal, BranchProtectionRule: clients.BranchProtectionRule{ CheckRules: clients.StatusChecksRule{ @@ -116,6 +113,7 @@ func TestReleaseAndDevBranchProtected(t *testing.T) { Contexts: nil, }, RequiredPullRequestReviews: clients.PullRequestReviewRule{ + Required: &trueVal, DismissStaleReviews: &falseVal, RequireCodeOwnerReviews: &falseVal, RequiredApprovingReviewCount: &zeroVal, @@ -134,9 +132,9 @@ func TestReleaseAndDevBranchProtected(t *testing.T) { name: "Only development branch", expected: scut.TestReturn{ Error: nil, - Score: 2, + Score: 3, NumberOfWarn: 7, - NumberOfInfo: 2, + NumberOfInfo: 3, NumberOfDebug: 0, }, defaultBranch: main, @@ -155,6 +153,7 @@ func TestReleaseAndDevBranchProtected(t *testing.T) { Contexts: nil, }, RequiredPullRequestReviews: clients.PullRequestReviewRule{ + Required: &trueVal, DismissStaleReviews: &falseVal, RequireCodeOwnerReviews: &falseVal, RequiredApprovingReviewCount: &zeroVal, @@ -173,9 +172,9 @@ func TestReleaseAndDevBranchProtected(t *testing.T) { name: "Take worst of release and development", expected: scut.TestReturn{ Error: nil, - Score: 2, + Score: 4, NumberOfWarn: 9, - NumberOfInfo: 10, + NumberOfInfo: 12, NumberOfDebug: 0, }, defaultBranch: main, @@ -190,6 +189,7 @@ func TestReleaseAndDevBranchProtected(t *testing.T) { Contexts: []string{"foo"}, }, RequiredPullRequestReviews: clients.PullRequestReviewRule{ + Required: &trueVal, DismissStaleReviews: &trueVal, RequireCodeOwnerReviews: &trueVal, RequiredApprovingReviewCount: &oneVal, @@ -211,6 +211,7 @@ func TestReleaseAndDevBranchProtected(t *testing.T) { Contexts: nil, }, RequiredPullRequestReviews: clients.PullRequestReviewRule{ + Required: &trueVal, DismissStaleReviews: &falseVal, RequireCodeOwnerReviews: &falseVal, RequiredApprovingReviewCount: &zeroVal, @@ -231,7 +232,7 @@ func TestReleaseAndDevBranchProtected(t *testing.T) { Error: nil, Score: 8, NumberOfWarn: 4, - NumberOfInfo: 16, + NumberOfInfo: 18, NumberOfDebug: 0, }, defaultBranch: main, @@ -246,6 +247,7 @@ func TestReleaseAndDevBranchProtected(t *testing.T) { Contexts: []string{"foo"}, }, RequiredPullRequestReviews: clients.PullRequestReviewRule{ + Required: &trueVal, DismissStaleReviews: &trueVal, RequireCodeOwnerReviews: &trueVal, RequiredApprovingReviewCount: &oneVal, @@ -267,6 +269,7 @@ func TestReleaseAndDevBranchProtected(t *testing.T) { Contexts: []string{"foo"}, }, RequiredPullRequestReviews: clients.PullRequestReviewRule{ + Required: &trueVal, DismissStaleReviews: &trueVal, RequireCodeOwnerReviews: &trueVal, RequiredApprovingReviewCount: &oneVal, @@ -285,9 +288,9 @@ func TestReleaseAndDevBranchProtected(t *testing.T) { name: "Ignore a non-branch targetcommitish", expected: scut.TestReturn{ Error: nil, - Score: 2, + Score: 3, NumberOfWarn: 7, - NumberOfInfo: 2, + NumberOfInfo: 3, NumberOfDebug: 0, }, defaultBranch: main, @@ -303,6 +306,7 @@ func TestReleaseAndDevBranchProtected(t *testing.T) { Contexts: nil, }, RequiredPullRequestReviews: clients.PullRequestReviewRule{ + Required: &trueVal, DismissStaleReviews: &falseVal, RequireCodeOwnerReviews: &falseVal, RequiredApprovingReviewCount: &zeroVal, @@ -341,6 +345,7 @@ func TestReleaseAndDevBranchProtected(t *testing.T) { Contexts: nil, }, RequiredPullRequestReviews: clients.PullRequestReviewRule{ + Required: &trueVal, DismissStaleReviews: &falseVal, RequireCodeOwnerReviews: &falseVal, RequiredApprovingReviewCount: &zeroVal, @@ -358,7 +363,7 @@ func TestReleaseAndDevBranchProtected(t *testing.T) { expected: scut.TestReturn{ Error: nil, Score: 0, - NumberOfWarn: 4, + NumberOfWarn: 6, NumberOfInfo: 0, NumberOfDebug: 8, }, @@ -426,9 +431,7 @@ func TestReleaseAndDevBranchProtected(t *testing.T) { RepoClient: mockRepoClient, } r := BranchProtection(&req) - if !scut.ValidateTestReturn(t, tt.name, &tt.expected, &r, &dl) { - t.Fail() - } + scut.ValidateTestReturn(t, tt.name, &tt.expected, &r, &dl) ctrl.Finish() }) } diff --git a/checks/ci_tests.go b/checks/ci_tests.go index c2e577385af..b5d37405710 100644 --- a/checks/ci_tests.go +++ b/checks/ci_tests.go @@ -19,9 +19,10 @@ import ( "github.com/ossf/scorecard/v4/checks/evaluation" "github.com/ossf/scorecard/v4/checks/raw" sce "github.com/ossf/scorecard/v4/errors" + "github.com/ossf/scorecard/v4/probes" + "github.com/ossf/scorecard/v4/probes/zrunner" ) -// CheckCodeReview is the registered name for DoesCodeReview. const CheckCITests = "CI-Tests" //nolint:gochecknoinits @@ -35,7 +36,6 @@ func init() { } } -// CodeReview will check if the maintainers perform code review. func CITests(c *checker.CheckRequest) checker.CheckResult { rawData, err := raw.CITests(c.RepoClient) if err != nil { @@ -43,11 +43,15 @@ func CITests(c *checker.CheckRequest) checker.CheckResult { return checker.CreateRuntimeErrorResult(CheckCITests, e) } - // Return raw results. - if c.RawResults != nil { - c.RawResults.CITestResults = rawData + pRawResults := getRawResults(c) + pRawResults.CITestResults = rawData + + // Evaluate the probes. + findings, err := zrunner.Run(pRawResults, probes.CITests) + if err != nil { + e := sce.WithMessage(sce.ErrScorecardInternal, err.Error()) + return checker.CreateRuntimeErrorResult(CheckCITests, e) } - // Return the score evaluation. - return evaluation.CITests(CheckCITests, &rawData, c.Dlogger) + return evaluation.CITests(CheckCITests, findings, c.Dlogger) } diff --git a/checks/ci_tests_test.go b/checks/ci_tests_test.go index 8e19820e3c2..7acb4bb1810 100644 --- a/checks/ci_tests_test.go +++ b/checks/ci_tests_test.go @@ -29,7 +29,6 @@ func TestCITestsRuntimeError(t *testing.T) { ctrl := gomock.NewController(t) mockRepoClient := mockrepo.NewMockRepoClient(ctrl) - //nolint:goerr113 mockRepoClient.EXPECT().ListCommits().Return(nil, fmt.Errorf("some runtime error")).AnyTimes() req := checker.CheckRequest{ diff --git a/checks/cii_best_practices.go b/checks/cii_best_practices.go index fd7f5679ff0..4f39c9c78e3 100644 --- a/checks/cii_best_practices.go +++ b/checks/cii_best_practices.go @@ -19,6 +19,8 @@ import ( "github.com/ossf/scorecard/v4/checks/evaluation" "github.com/ossf/scorecard/v4/checks/raw" sce "github.com/ossf/scorecard/v4/errors" + "github.com/ossf/scorecard/v4/probes" + "github.com/ossf/scorecard/v4/probes/zrunner" ) // CheckCIIBestPractices is the registered name for CIIBestPractices. @@ -40,11 +42,17 @@ func CIIBestPractices(c *checker.CheckRequest) checker.CheckResult { return checker.CreateRuntimeErrorResult(CheckCIIBestPractices, e) } - // Return raw results. - if c.RawResults != nil { - c.RawResults.CIIBestPracticesResults = rawData + // Set the raw results. + pRawResults := getRawResults(c) + pRawResults.CIIBestPracticesResults = rawData + + // Evaluate the probes. + findings, err := zrunner.Run(pRawResults, probes.CIIBestPractices) + if err != nil { + e := sce.WithMessage(sce.ErrScorecardInternal, err.Error()) + return checker.CreateRuntimeErrorResult(CheckCIIBestPractices, e) } // Return the score evaluation. - return evaluation.CIIBestPractices(CheckCIIBestPractices, c.Dlogger, &rawData) + return evaluation.CIIBestPractices(CheckCIIBestPractices, findings, c.Dlogger) } diff --git a/checks/cii_best_practices_test.go b/checks/cii_best_practices_test.go index 943e52a7aef..4c9546de8ea 100644 --- a/checks/cii_best_practices_test.go +++ b/checks/cii_best_practices_test.go @@ -122,9 +122,7 @@ func TestCIIBestPractices(t *testing.T) { } res := CIIBestPractices(&req) dl := scut.TestDetailLogger{} - if !scut.ValidateTestReturn(t, tt.name, &tt.expected, &res, &dl) { - t.Fail() - } + scut.ValidateTestReturn(t, tt.name, &tt.expected, &res, &dl) ctrl.Finish() }) } diff --git a/checks/code_review.go b/checks/code_review.go index 17372e52b1f..6791ca54ebe 100644 --- a/checks/code_review.go +++ b/checks/code_review.go @@ -1,4 +1,4 @@ -// Copyright 2020 OpenSSF Scorecard Authors +// Copyright 2023 OpenSSF Scorecard Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/checks/code_review_test.go b/checks/code_review_test.go index 1b48c06093f..f71df3a04de 100644 --- a/checks/code_review_test.go +++ b/checks/code_review_test.go @@ -1,4 +1,4 @@ -// Copyright 2022 OpenSSF Scorecard Authors +// Copyright 2023 OpenSSF Scorecard Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -27,11 +27,16 @@ import ( scut "github.com/ossf/scorecard/v4/utests" ) +var errNew = errors.New("error") + // TestCodeReview tests the code review checker. func TestCodereview(t *testing.T) { t.Parallel() - //fieldalignment lint issue. Ignoring it as it is not important for this test. - //nolint + // fieldalignment lint issue. Ignoring it as it is not important for this test. + //nolint:gci + //nolint:gofmt + //nolint:gofumpt + //nolint:goimports tests := []struct { err error name string @@ -47,22 +52,22 @@ func TestCodereview(t *testing.T) { }, { name: "no commits with error", - commiterr: errors.New("error"), + commiterr: errNew, expected: checker.CheckResult{ Score: -1, }, }, { name: "no PR's with error", - err: errors.New("error"), + err: errNew, expected: checker.CheckResult{ Score: -1, }, }, { name: "no PR's with error as well as commits", - err: errors.New("error"), - commiterr: errors.New("error"), + err: errNew, + commiterr: errNew, expected: checker.CheckResult{ Score: -1, }, @@ -277,7 +282,7 @@ func TestCodereview(t *testing.T) { } for _, tt := range tests { - tt := tt // Re-initializing variable so it is not changed while executing the closure below + tt := tt // Re-initializing variable so it is not changed while executing the closure below. t.Run(tt.name, func(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) diff --git a/checks/contributors.go b/checks/contributors.go index c8e000a8c5d..15db76f2d9d 100644 --- a/checks/contributors.go +++ b/checks/contributors.go @@ -19,6 +19,8 @@ import ( "github.com/ossf/scorecard/v4/checks/evaluation" "github.com/ossf/scorecard/v4/checks/raw" sce "github.com/ossf/scorecard/v4/errors" + "github.com/ossf/scorecard/v4/probes" + "github.com/ossf/scorecard/v4/probes/zrunner" ) // CheckContributors is the registered name for Contributors. @@ -34,17 +36,23 @@ func init() { // Contributors run Contributors check. func Contributors(c *checker.CheckRequest) checker.CheckResult { - rawData, err := raw.Contributors(c.RepoClient) + rawData, err := raw.Contributors(c) if err != nil { e := sce.WithMessage(sce.ErrScorecardInternal, err.Error()) return checker.CreateRuntimeErrorResult(CheckContributors, e) } - // Return raw results. - if c.RawResults != nil { - c.RawResults.ContributorsResults = rawData + // Set the raw results. + pRawResults := getRawResults(c) + pRawResults.ContributorsResults = rawData + + // Evaluate the probes. + findings, err := zrunner.Run(pRawResults, probes.Contributors) + if err != nil { + e := sce.WithMessage(sce.ErrScorecardInternal, err.Error()) + return checker.CreateRuntimeErrorResult(CheckContributors, e) } // Return the score evaluation. - return evaluation.Contributors(CheckContributors, c.Dlogger, &rawData) + return evaluation.Contributors(CheckContributors, findings, c.Dlogger) } diff --git a/checks/contributors_test.go b/checks/contributors_test.go index d059325e8e2..2ab795374c4 100644 --- a/checks/contributors_test.go +++ b/checks/contributors_test.go @@ -29,8 +29,6 @@ import ( // TestContributors tests the contributors check. func TestContributors(t *testing.T) { t.Parallel() - //fieldalignment lint issue. Ignoring it as it is not important for this test. - //nolint tests := []struct { err error name string @@ -61,7 +59,6 @@ func TestContributors(t *testing.T) { name: "Valid contributors with enough contributors and companies", contrib: []clients.User{ { - Companies: []string{"company1"}, NumContributions: 10, Organizations: []clients.User{ diff --git a/checks/dangerous_workflow.go b/checks/dangerous_workflow.go index 5b12f488df4..71384bfe158 100644 --- a/checks/dangerous_workflow.go +++ b/checks/dangerous_workflow.go @@ -19,6 +19,8 @@ import ( "github.com/ossf/scorecard/v4/checks/evaluation" "github.com/ossf/scorecard/v4/checks/raw" sce "github.com/ossf/scorecard/v4/errors" + "github.com/ossf/scorecard/v4/probes" + "github.com/ossf/scorecard/v4/probes/zrunner" ) // CheckDangerousWorkflow is the exported name for Dangerous-Workflow check. @@ -38,17 +40,22 @@ func init() { // DangerousWorkflow will check the repository contains Dangerous-Workflow. func DangerousWorkflow(c *checker.CheckRequest) checker.CheckResult { - rawData, err := raw.DangerousWorkflow(c.RepoClient) + rawData, err := raw.DangerousWorkflow(c) if err != nil { e := sce.WithMessage(sce.ErrScorecardInternal, err.Error()) return checker.CreateRuntimeErrorResult(CheckDangerousWorkflow, e) } - // Return raw results. - if c.RawResults != nil { - c.RawResults.DangerousWorkflowResults = rawData + // Set the raw results. + pRawResults := getRawResults(c) + pRawResults.DangerousWorkflowResults = rawData + + // Evaluate the probes. + findings, err := zrunner.Run(pRawResults, probes.DangerousWorkflows) + if err != nil { + e := sce.WithMessage(sce.ErrScorecardInternal, err.Error()) + return checker.CreateRuntimeErrorResult(CheckDangerousWorkflow, e) } - // Return the score evaluation. - return evaluation.DangerousWorkflow(CheckDangerousWorkflow, c.Dlogger, &rawData) + return evaluation.DangerousWorkflow(CheckDangerousWorkflow, findings, c.Dlogger) } diff --git a/checks/dependency_update_tool.go b/checks/dependency_update_tool.go index 54f1954f9b1..486c4a19149 100644 --- a/checks/dependency_update_tool.go +++ b/checks/dependency_update_tool.go @@ -20,12 +20,13 @@ import ( "github.com/ossf/scorecard/v4/checks/raw" sce "github.com/ossf/scorecard/v4/errors" "github.com/ossf/scorecard/v4/probes" + "github.com/ossf/scorecard/v4/probes/zrunner" ) -// CheckDependencyUpdateTool is the exported name for Automatic-Depdendency-Update. +// CheckDependencyUpdateTool is the exported name for Dependency-Update-Tool. const CheckDependencyUpdateTool = "Dependency-Update-Tool" -// nolint +//nolint:gochecknoinits func init() { supportedRequestTypes := []checker.RequestType{ checker.FileBased, @@ -44,18 +45,17 @@ func DependencyUpdateTool(c *checker.CheckRequest) checker.CheckResult { return checker.CreateRuntimeErrorResult(CheckDependencyUpdateTool, e) } - // Return raw results. - if c.RawResults != nil { - c.RawResults.DependencyUpdateToolResults = rawData - } + // Set the raw results. + pRawResults := getRawResults(c) + pRawResults.DependencyUpdateToolResults = rawData // Evaluate the probes. - findings, err := evaluateProbes(c, CheckDependencyUpdateTool, probes.DependencyToolUpdates) + findings, err := zrunner.Run(pRawResults, probes.DependencyToolUpdates) if err != nil { e := sce.WithMessage(sce.ErrScorecardInternal, err.Error()) return checker.CreateRuntimeErrorResult(CheckDependencyUpdateTool, e) } // Return the score evaluation. - return evaluation.DependencyUpdateTool(CheckDependencyUpdateTool, findings) + return evaluation.DependencyUpdateTool(CheckDependencyUpdateTool, findings, c.Dlogger) } diff --git a/checks/dependency_update_tool_test.go b/checks/dependency_update_tool_test.go index 173f4e5198c..6cccdb7f111 100644 --- a/checks/dependency_update_tool_test.go +++ b/checks/dependency_update_tool_test.go @@ -32,15 +32,14 @@ const ( // TestDependencyUpdateTool tests the DependencyUpdateTool checker. func TestDependencyUpdateTool(t *testing.T) { t.Parallel() - //nolint tests := []struct { name string - wantErr bool + want checker.CheckResult SearchCommits []clients.Commit - CallSearchCommits int files []string - want checker.CheckResult expected scut.TestReturn + CallSearchCommits int + wantErr bool }{ { name: "dependency yml", @@ -51,7 +50,7 @@ func TestDependencyUpdateTool(t *testing.T) { CallSearchCommits: 0, expected: scut.TestReturn{ NumberOfInfo: 1, - NumberOfWarn: 3, + NumberOfWarn: 0, Score: 10, }, }, @@ -64,7 +63,7 @@ func TestDependencyUpdateTool(t *testing.T) { CallSearchCommits: 0, expected: scut.TestReturn{ NumberOfInfo: 1, - NumberOfWarn: 3, + NumberOfWarn: 0, Score: 10, }, }, @@ -77,7 +76,7 @@ func TestDependencyUpdateTool(t *testing.T) { SearchCommits: []clients.Commit{{Committer: clients.User{ID: 111111111}}}, CallSearchCommits: 1, expected: scut.TestReturn{ - NumberOfWarn: 4, + NumberOfWarn: 3, }, }, { @@ -89,7 +88,7 @@ func TestDependencyUpdateTool(t *testing.T) { SearchCommits: []clients.Commit{}, CallSearchCommits: 1, expected: scut.TestReturn{ - NumberOfWarn: 4, + NumberOfWarn: 3, }, }, @@ -103,7 +102,7 @@ func TestDependencyUpdateTool(t *testing.T) { CallSearchCommits: 1, expected: scut.TestReturn{ NumberOfInfo: 1, - NumberOfWarn: 3, + NumberOfWarn: 0, Score: 10, }, }, @@ -118,7 +117,7 @@ func TestDependencyUpdateTool(t *testing.T) { CallSearchCommits: 1, expected: scut.TestReturn{ NumberOfInfo: 1, - NumberOfWarn: 3, + NumberOfWarn: 0, Score: 10, }, }, @@ -136,7 +135,7 @@ func TestDependencyUpdateTool(t *testing.T) { CallSearchCommits: 1, expected: scut.TestReturn{ NumberOfInfo: 1, - NumberOfWarn: 3, + NumberOfWarn: 0, Score: 10, }, }, @@ -150,17 +149,31 @@ func TestDependencyUpdateTool(t *testing.T) { mockRepo.EXPECT().ListFiles(gomock.Any()).Return(tt.files, nil) mockRepo.EXPECT().SearchCommits(gomock.Any()).Return(tt.SearchCommits, nil).Times(tt.CallSearchCommits) dl := scut.TestDetailLogger{} - raw := checker.RawResults{} c := &checker.CheckRequest{ RepoClient: mockRepo, Dlogger: &dl, - RawResults: &raw, } res := DependencyUpdateTool(c) - if !scut.ValidateTestReturn(t, tt.name, &tt.expected, &res, &dl) { - t.Fail() - } + scut.ValidateTestReturn(t, tt.name, &tt.expected, &res, &dl) }) } } + +func TestDependencyUpdateTool_noSearchCommits(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + mockRepo := mockrepo.NewMockRepoClient(ctrl) + files := []string{"README.md"} + mockRepo.EXPECT().ListFiles(gomock.Any()).Return(files, nil) + mockRepo.EXPECT().SearchCommits(gomock.Any()).Return(nil, clients.ErrUnsupportedFeature) + dl := scut.TestDetailLogger{} + c := &checker.CheckRequest{ + RepoClient: mockRepo, + Dlogger: &dl, + } + got := DependencyUpdateTool(c) + if got.Error != nil { + t.Errorf("got: %v, wanted ErrUnsupportedFeature not to propagate", got.Error) + } +} diff --git a/checks/evaluation/binary_artifacts.go b/checks/evaluation/binary_artifacts.go index 59075912844..fff943fb2a1 100644 --- a/checks/evaluation/binary_artifacts.go +++ b/checks/evaluation/binary_artifacts.go @@ -18,33 +18,46 @@ import ( "github.com/ossf/scorecard/v4/checker" sce "github.com/ossf/scorecard/v4/errors" "github.com/ossf/scorecard/v4/finding" + "github.com/ossf/scorecard/v4/probes/freeOfUnverifiedBinaryArtifacts" ) // BinaryArtifacts applies the score policy for the Binary-Artifacts check. -func BinaryArtifacts(name string, dl checker.DetailLogger, - r *checker.BinaryArtifactData, +func BinaryArtifacts(name string, + findings []finding.Finding, + dl checker.DetailLogger, ) checker.CheckResult { - if r == nil { - e := sce.WithMessage(sce.ErrScorecardInternal, "empty raw data") + expectedProbes := []string{ + freeOfUnverifiedBinaryArtifacts.Probe, + } + + if !finding.UniqueProbesEqual(findings, expectedProbes) { + e := sce.WithMessage(sce.ErrScorecardInternal, "invalid probe results") return checker.CreateRuntimeErrorResult(name, e) } - // Apply the policy evaluation. - if r.Files == nil || len(r.Files) == 0 { + if findings[0].Outcome == finding.OutcomePositive { return checker.CreateMaxScoreResult(name, "no binaries found in the repo") } - score := checker.MaxResultScore - for _, f := range r.Files { + for i := range findings { + f := &findings[i] + if f.Outcome != finding.OutcomeNegative { + continue + } dl.Warn(&checker.LogMessage{ - Path: f.Path, Type: finding.FileTypeBinary, - Offset: f.Offset, + Path: f.Location.Path, + Type: f.Location.Type, + Offset: *f.Location.LineStart, Text: "binary detected", }) - // We remove one point for each binary. - score-- } + // There are only negative findings. + // Deduct the number of findings from max score + numberOfBinaryFilesFound := len(findings) + + score := checker.MaxResultScore - numberOfBinaryFilesFound + if score < checker.MinResultScore { score = checker.MinResultScore } diff --git a/checks/evaluation/binary_artifacts_test.go b/checks/evaluation/binary_artifacts_test.go index 7a4ccab29e4..59cf25dbf0c 100644 --- a/checks/evaluation/binary_artifacts_test.go +++ b/checks/evaluation/binary_artifacts_test.go @@ -18,256 +18,121 @@ import ( "testing" "github.com/ossf/scorecard/v4/checker" + sce "github.com/ossf/scorecard/v4/errors" + "github.com/ossf/scorecard/v4/finding" scut "github.com/ossf/scorecard/v4/utests" ) // TestBinaryArtifacts tests the binary artifacts check. func TestBinaryArtifacts(t *testing.T) { t.Parallel() - //nolint - type args struct { - name string - dl checker.DetailLogger - r *checker.BinaryArtifactData + lineStart := uint(123) + negativeFinding := finding.Finding{ + Probe: "freeOfUnverifiedBinaryArtifacts", + Outcome: finding.OutcomeNegative, + + Location: &finding.Location{ + Path: "path", + Type: finding.FileTypeBinary, + LineStart: &lineStart, + }, } + tests := []struct { - name string - args args - want checker.CheckResult - wantErr bool + name string + findings []finding.Finding + result scut.TestReturn }{ { - name: "r nil", - args: args{ - name: "test_binary_artifacts_check_pass", - dl: &scut.TestDetailLogger{}, + name: "no binary artifacts", + findings: []finding.Finding{ + { + Probe: "freeOfUnverifiedBinaryArtifacts", + Outcome: finding.OutcomePositive, + }, + }, + result: scut.TestReturn{ + Score: checker.MaxResultScore, }, - wantErr: true, }, { - name: "no binary artifacts", - args: args{ - name: "no binary artifacts", - dl: &scut.TestDetailLogger{}, - r: &checker.BinaryArtifactData{}, + name: "one binary artifact", + findings: []finding.Finding{ + negativeFinding, }, - want: checker.CheckResult{ - Score: checker.MaxResultScore, + result: scut.TestReturn{ + Score: 9, + NumberOfWarn: 1, }, }, { - name: "1 binary artifact", - args: args{ - name: "no binary artifacts", - dl: &scut.TestDetailLogger{}, - r: &checker.BinaryArtifactData{ - Files: []checker.File{ - { - Path: "test_binary_artifacts_check_pass", - Snippet: ` - package main - import "fmt" - func main() { - fmt.Println("Hello, world!") - }i`, - Offset: 0, - Type: 0, - }, + name: "two binary artifact", + findings: []finding.Finding{ + { + Probe: "freeOfUnverifiedBinaryArtifacts", + Outcome: finding.OutcomeNegative, + Location: &finding.Location{ + Path: "path", + Type: finding.FileTypeBinary, + LineStart: &lineStart, + }, + }, + { + Probe: "freeOfUnverifiedBinaryArtifacts", + Outcome: finding.OutcomeNegative, + Location: &finding.Location{ + Path: "path", + Type: finding.FileTypeBinary, + LineStart: &lineStart, }, }, }, - want: checker.CheckResult{ - Score: 9, + result: scut.TestReturn{ + Score: 8, + NumberOfWarn: 2, }, }, { - name: "many binary artifact", - args: args{ - name: "no binary artifacts", - dl: &scut.TestDetailLogger{}, - r: &checker.BinaryArtifactData{ - Files: []checker.File{ - { - Path: "test_binary_artifacts_check_pass", - Snippet: ` - package main - import "fmt" - func main() { - fmt.Println("Hello, world!") - }i`, - Offset: 0, - Type: 0, - }, - { - Path: "test_binary_artifacts_check_pass", - Snippet: ` - package main - import "fmt" - func main() { - fmt.Println("Hello, world!") - }i`, - Offset: 0, - Type: 0, - }, - { - Path: "test_binary_artifacts_check_pass", - Snippet: ` - package main - import "fmt" - func main() { - fmt.Println("Hello, world!") - }i`, - Offset: 0, - Type: 0, - }, - { - Path: "test_binary_artifacts_check_pass", - Snippet: ` - package main - import "fmt" - func main() { - fmt.Println("Hello, world!") - }i`, - Offset: 0, - Type: 0, - }, - { - Path: "test_binary_artifacts_check_pass", - Snippet: ` - package main - import "fmt" - func main() { - fmt.Println("Hello, world!") - }i`, - Offset: 0, - Type: 0, - }, - { - Path: "test_binary_artifacts_check_pass", - Snippet: ` - package main - import "fmt" - func main() { - fmt.Println("Hello, world!") - }i`, - Offset: 0, - Type: 0, - }, - { - Path: "test_binary_artifacts_check_pass", - Snippet: ` - package main - import "fmt" - func main() { - fmt.Println("Hello, world!") - }i`, - Offset: 0, - Type: 0, - }, - { - Path: "test_binary_artifacts_check_pass", - Snippet: ` - package main - import "fmt" - func main() { - fmt.Println("Hello, world!") - }i`, - Offset: 0, - Type: 0, - }, - { - Path: "test_binary_artifacts_check_pass", - Snippet: ` - package main - import "fmt" - func main() { - fmt.Println("Hello, world!") - }i`, - Offset: 0, - Type: 0, - }, - { - Path: "test_binary_artifacts_check_pass", - Snippet: ` - package main - import "fmt" - func main() { - fmt.Println("Hello, world!") - }i`, - Offset: 0, - Type: 0, - }, - { - Path: "test_binary_artifacts_check_pass", - Snippet: ` - package main - import "fmt" - func main() { - fmt.Println("Hello, world!") - }i`, - Offset: 0, - Type: 0, - }, - { - Path: "test_binary_artifacts_check_pass", - Snippet: ` - package main - import "fmt" - func main() { - fmt.Println("Hello, world!") - }i`, - Offset: 0, - Type: 0, - }, - { - Path: "test_binary_artifacts_check_pass", - Snippet: ` - package main - import "fmt" - func main() { - fmt.Println("Hello, world!") - }i`, - Offset: 0, - Type: 0, - }, - { - Path: "test_binary_artifacts_check_pass", - Snippet: ` - package main - import "fmt" - func main() { - fmt.Println("Hello, world!") - }i`, - Offset: 0, - Type: 0, - }, - { - Path: "test_binary_artifacts_check_pass", - Snippet: ` - package main - import "fmt" - func main() { - fmt.Println("Hello, world!") - }i`, - Offset: 0, - Type: 0, - }, - { - Path: "test_binary_artifacts_check_pass", - Snippet: ` - package main - import "fmt" - func main() { - fmt.Println("Hello, world!") - }i`, - Offset: 0, - Type: 0, - }, - }, - }, + name: "five binary artifact", + findings: []finding.Finding{ + negativeFinding, + negativeFinding, + negativeFinding, + negativeFinding, + negativeFinding, + }, + result: scut.TestReturn{ + Score: 5, + NumberOfWarn: 5, + }, + }, + { + name: "twelve binary artifact - ensure score doesn't drop below min", + findings: []finding.Finding{ + negativeFinding, + negativeFinding, + negativeFinding, + negativeFinding, + negativeFinding, + negativeFinding, + negativeFinding, + negativeFinding, + negativeFinding, + negativeFinding, + negativeFinding, + negativeFinding, + }, + result: scut.TestReturn{ + Score: checker.MinResultScore, + NumberOfWarn: 12, }, - want: checker.CheckResult{ - Score: 0, + }, + { + name: "invalid findings", + findings: []finding.Finding{}, + result: scut.TestReturn{ + Score: checker.InconclusiveResultScore, + Error: sce.ErrScorecardInternal, }, }, } @@ -275,16 +140,9 @@ func TestBinaryArtifacts(t *testing.T) { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() - got := BinaryArtifacts(tt.args.name, tt.args.dl, tt.args.r) - if tt.wantErr { - if got.Error == nil { - t.Errorf("BinaryArtifacts() error = %v, wantErr %v", got.Error, tt.wantErr) - } - } else { - if got.Score != tt.want.Score { - t.Errorf("BinaryArtifacts() = %v, want %v", got.Score, tt.want.Score) - } - } + dl := scut.TestDetailLogger{} + got := BinaryArtifacts(tt.name, tt.findings, &dl) + scut.ValidateTestReturn(t, tt.name, &tt.result, &got, &dl) }) } } diff --git a/checks/evaluation/branch_protection.go b/checks/evaluation/branch_protection.go index 7703cab920c..f5848d521dc 100644 --- a/checks/evaluation/branch_protection.go +++ b/checks/evaluation/branch_protection.go @@ -25,7 +25,7 @@ import ( const ( minReviews = 2 // Points incremented at each level. - adminNonAdminBasicLevel = 3 // Level 1. + basicLevel = 3 // Level 1. adminNonAdminReviewLevel = 3 // Level 2. nonAdminContextLevel = 2 // Level 3. nonAdminThoroughReviewLevel = 1 // Level 4. @@ -35,7 +35,6 @@ const ( type scoresInfo struct { basic int - adminBasic int review int adminReview int context int @@ -50,6 +49,16 @@ type levelScore struct { maxes scoresInfo // Maximum possible score for a branch. } +type tier uint8 + +const ( + Tier1 tier = iota + Tier2 + Tier3 + Tier4 + Tier5 +) + // BranchProtection runs Branch-Protection check. func BranchProtection(name string, dl checker.DetailLogger, r *checker.BranchProtectionsData, @@ -61,7 +70,7 @@ func BranchProtection(name string, dl checker.DetailLogger, var score levelScore b := r.Branches[i] - // Protected field only indates that the branch matches + // Protected field only indicates that the branch matches // one `Branch protection rules`. All settings may be disabled, // so it does not provide any guarantees. protected := !(b.Protected != nil && !*b.Protected) @@ -71,7 +80,6 @@ func BranchProtection(name string, dl checker.DetailLogger, }) } score.scores.basic, score.maxes.basic = basicNonAdminProtection(&b, dl) - score.scores.adminBasic, score.maxes.adminBasic = basicAdminProtection(&b, dl) score.scores.review, score.maxes.review = nonAdminReviewProtection(&b) score.scores.adminReview, score.maxes.adminReview = adminReviewProtection(&b, dl) score.scores.context, score.maxes.context = nonAdminContextProtection(&b, dl) @@ -87,7 +95,7 @@ func BranchProtection(name string, dl checker.DetailLogger, return checker.CreateInconclusiveResult(name, "unable to detect any development/release branches") } - score, err := computeScore(scores) + score, err := computeFinalScore(scores) if err != nil { return checker.CreateRuntimeErrorResult(name, err) } @@ -105,86 +113,34 @@ func BranchProtection(name string, dl checker.DetailLogger, } } -func computeNonAdminBasicScore(scores []levelScore) int { - score := 0 - for i := range scores { - s := scores[i] - score += s.scores.basic - } - return score -} - -func computeAdminBasicScore(scores []levelScore) int { - score := 0 - for i := range scores { - s := scores[i] - score += s.scores.adminBasic - } - return score -} - -func computeNonAdminReviewScore(scores []levelScore) int { - score := 0 - for i := range scores { - s := scores[i] - score += s.scores.review - } - return score -} - -func computeAdminReviewScore(scores []levelScore) int { - score := 0 - for i := range scores { - s := scores[i] - score += s.scores.adminReview - } - return score -} - -func computeNonAdminThoroughReviewScore(scores []levelScore) int { - score := 0 - for i := range scores { - s := scores[i] - score += s.scores.thoroughReview - } - return score -} - -func computeAdminThoroughReviewScore(scores []levelScore) int { - score := 0 - for i := range scores { - s := scores[i] - score += s.scores.adminThoroughReview - } - return score -} - -func computeNonAdminContextScore(scores []levelScore) int { - score := 0 - for i := range scores { - s := scores[i] - score += s.scores.context - } - return score -} - -func computeCodeownerThoroughReviewScore(scores []levelScore) int { - score := 0 - for i := range scores { - s := scores[i] - score += s.scores.codeownerReview +func sumUpScoreForTier(t tier, scoresData []levelScore) int { + sum := 0 + for i := range scoresData { + score := scoresData[i] + switch t { + case Tier1: + sum += score.scores.basic + case Tier2: + sum += score.scores.review + score.scores.adminReview + case Tier3: + sum += score.scores.context + case Tier4: + sum += score.scores.thoroughReview + score.scores.codeownerReview + case Tier5: + sum += score.scores.adminThoroughReview + } } - return score + return sum } -func noarmalizeScore(score, max, level int) float64 { +func normalizeScore(score, max, level int) float64 { if max == 0 { return float64(level) } return float64(score*level) / float64(max) } -func computeScore(scores []levelScore) (int, error) { +func computeFinalScore(scores []levelScore) (int, error) { if len(scores) == 0 { return 0, sce.WithMessage(sce.ErrScorecardInternal, "scores are empty") } @@ -194,31 +150,26 @@ func computeScore(scores []levelScore) (int, error) { // First, check if they all pass the basic (admin and non-admin) checks. maxBasicScore := maxScore.basic * len(scores) - maxAdminBasicScore := maxScore.adminBasic * len(scores) - basicScore := computeNonAdminBasicScore(scores) - adminBasicScore := computeAdminBasicScore(scores) - score += noarmalizeScore(basicScore+adminBasicScore, maxBasicScore+maxAdminBasicScore, adminNonAdminBasicLevel) - if basicScore != maxBasicScore || - adminBasicScore != maxAdminBasicScore { + basicScore := sumUpScoreForTier(Tier1, scores) + score += normalizeScore(basicScore, maxBasicScore, basicLevel) + if basicScore < maxBasicScore { return int(score), nil } // Second, check the (admin and non-admin) reviews. maxReviewScore := maxScore.review * len(scores) maxAdminReviewScore := maxScore.adminReview * len(scores) - reviewScore := computeNonAdminReviewScore(scores) - adminReviewScore := computeAdminReviewScore(scores) - score += noarmalizeScore(reviewScore+adminReviewScore, maxReviewScore+maxAdminReviewScore, adminNonAdminReviewLevel) - if reviewScore != maxReviewScore || - adminReviewScore != maxAdminReviewScore { + adminNonAdminReviewScore := sumUpScoreForTier(Tier2, scores) + score += normalizeScore(adminNonAdminReviewScore, maxReviewScore+maxAdminReviewScore, adminNonAdminReviewLevel) + if adminNonAdminReviewScore < maxReviewScore+maxAdminReviewScore { return int(score), nil } // Third, check the use of non-admin context. maxContextScore := maxScore.context * len(scores) - contextScore := computeNonAdminContextScore(scores) - score += noarmalizeScore(contextScore, maxContextScore, nonAdminContextLevel) - if contextScore != maxContextScore { + contextScore := sumUpScoreForTier(Tier3, scores) + score += normalizeScore(contextScore, maxContextScore, nonAdminContextLevel) + if contextScore < maxContextScore { return int(score), nil } @@ -226,11 +177,9 @@ func computeScore(scores []levelScore) (int, error) { // Also check whether this repo requires codeowner review maxThoroughReviewScore := maxScore.thoroughReview * len(scores) maxCodeownerReviewScore := maxScore.codeownerReview * len(scores) - thoroughReviewScore := computeNonAdminThoroughReviewScore(scores) - codeownerReviewScore := computeCodeownerThoroughReviewScore(scores) - score += noarmalizeScore(thoroughReviewScore+codeownerReviewScore, maxThoroughReviewScore+maxCodeownerReviewScore, - nonAdminThoroughReviewLevel) - if thoroughReviewScore != maxThoroughReviewScore { + tier4Score := sumUpScoreForTier(Tier4, scores) + score += normalizeScore(tier4Score, maxThoroughReviewScore+maxCodeownerReviewScore, nonAdminThoroughReviewLevel) + if tier4Score < maxThoroughReviewScore+maxCodeownerReviewScore { return int(score), nil } @@ -238,8 +187,8 @@ func computeScore(scores []levelScore) (int, error) { // This one is controversial and has usability issues // https://github.com/ossf/scorecard/issues/1027, so we may remove it. maxAdminThoroughReviewScore := maxScore.adminThoroughReview * len(scores) - adminThoroughReviewScore := computeAdminThoroughReviewScore(scores) - score += noarmalizeScore(adminThoroughReviewScore, maxAdminThoroughReviewScore, adminThoroughReviewLevel) + adminThoroughReviewScore := sumUpScoreForTier(Tier5, scores) + score += normalizeScore(adminThoroughReviewScore, maxAdminThoroughReviewScore, adminThoroughReviewLevel) if adminThoroughReviewScore != maxAdminThoroughReviewScore { return int(score), nil } @@ -308,30 +257,6 @@ func basicNonAdminProtection(branch *clients.BranchRef, dl checker.DetailLogger) return score, max } -func basicAdminProtection(branch *clients.BranchRef, dl checker.DetailLogger) (int, int) { - score := 0 - max := 0 - // Only log information if the branch is protected. - log := branch.Protected != nil && *branch.Protected - - // nil typically means we do not have access to the value. - if branch.BranchProtectionRule.EnforceAdmins != nil { - // Note: we don't inrecase max possible score for non-admin viewers. - max++ - switch *branch.BranchProtectionRule.EnforceAdmins { - case true: - info(dl, log, "settings apply to administrators on branch '%s'", *branch.Name) - score++ - case false: - warn(dl, log, "settings do not apply to administrators on branch '%s'", *branch.Name) - } - } else { - debug(dl, log, "unable to retrieve whether or not settings apply to administrators on branch '%s'", *branch.Name) - } - - return score, max -} - func nonAdminContextProtection(branch *clients.BranchRef, dl checker.DetailLogger) (int, int) { score := 0 max := 0 @@ -357,11 +282,12 @@ func nonAdminReviewProtection(branch *clients.BranchRef) (int, int) { score := 0 max := 0 - max++ - if branch.BranchProtectionRule.RequiredPullRequestReviews.RequiredApprovingReviewCount != nil && - *branch.BranchProtectionRule.RequiredPullRequestReviews.RequiredApprovingReviewCount > 0 { + // Having at least 1 reviewer is twice as important as the other Tier 2 requirements. + const reviewerWeight = 2 + max += reviewerWeight + if valueOrZero(branch.BranchProtectionRule.RequiredPullRequestReviews.RequiredApprovingReviewCount) > 0 { // We do not display anything here, it's done in nonAdminThoroughReviewProtection() - score++ + score += reviewerWeight } return score, max } @@ -400,6 +326,16 @@ func adminReviewProtection(branch *clients.BranchRef, dl checker.DetailLogger) ( } } + max++ + if valueOrZero(branch.BranchProtectionRule.RequiredPullRequestReviews.Required) { + score++ + info(dl, log, "PRs are required in order to make changes on branch '%s'", *branch.Name) + } else { + warn(dl, log, "PRs are not required to make changes on branch '%s'; or we don't have data to detect it."+ + "If you think it might be the latter, make sure to run Scorecard with a PAT or use Repo "+ + "Rules (that are always public) instead of Branch Protection settings", *branch.Name) + } + return score, max } @@ -410,7 +346,7 @@ func adminThoroughReviewProtection(branch *clients.BranchRef, dl checker.DetailL log := branch.Protected != nil && *branch.Protected if branch.BranchProtectionRule.RequiredPullRequestReviews.DismissStaleReviews != nil { - // Note: we don't inrecase max possible score for non-admin viewers. + // Note: we don't increase max possible score for non-admin viewers. max++ switch *branch.BranchProtectionRule.RequiredPullRequestReviews.DismissStaleReviews { case true: @@ -422,6 +358,22 @@ func adminThoroughReviewProtection(branch *clients.BranchRef, dl checker.DetailL } else { debug(dl, log, "unable to retrieve review dismissal on branch '%s'", *branch.Name) } + + // nil typically means we do not have access to the value. + if branch.BranchProtectionRule.EnforceAdmins != nil { + // Note: we don't increase max possible score for non-admin viewers. + max++ + switch *branch.BranchProtectionRule.EnforceAdmins { + case true: + info(dl, log, "settings apply to administrators on branch '%s'", *branch.Name) + score++ + case false: + warn(dl, log, "settings do not apply to administrators on branch '%s'", *branch.Name) + } + } else { + debug(dl, log, "unable to retrieve whether or not settings apply to administrators on branch '%s'", *branch.Name) + } + return score, max } @@ -433,19 +385,16 @@ func nonAdminThoroughReviewProtection(branch *clients.BranchRef, dl checker.Deta log := branch.Protected != nil && *branch.Protected max++ - if branch.BranchProtectionRule.RequiredPullRequestReviews.RequiredApprovingReviewCount != nil { - switch *branch.BranchProtectionRule.RequiredPullRequestReviews.RequiredApprovingReviewCount >= minReviews { - case true: - info(dl, log, "number of required reviewers is %d on branch '%s'", - *branch.BranchProtectionRule.RequiredPullRequestReviews.RequiredApprovingReviewCount, *branch.Name) - score++ - default: - warn(dl, log, "number of required reviewers is only %d on branch '%s'", - *branch.BranchProtectionRule.RequiredPullRequestReviews.RequiredApprovingReviewCount, *branch.Name) - } + + reviewers := valueOrZero(branch.BranchProtectionRule.RequiredPullRequestReviews.RequiredApprovingReviewCount) + if reviewers >= minReviews { + info(dl, log, "number of required reviewers is %d on branch '%s'", reviewers, *branch.Name) + score++ } else { - warn(dl, log, "number of required reviewers is 0 on branch '%s'", *branch.Name) + warn(dl, log, "number of required reviewers is %d on branch '%s', while the ideal suggested is %d", + reviewers, *branch.Name, minReviews) } + return score, max } @@ -473,3 +422,12 @@ func codeownerBranchProtection( return score, max } + +// returns the pointer's value if it exists, the type's zero-value otherwise. +func valueOrZero[T any](ptr *T) T { + if ptr == nil { + var zero T + return zero + } + return *ptr +} diff --git a/checks/evaluation/branch_protection_test.go b/checks/evaluation/branch_protection_test.go index 5fd5e541edb..0084e4e0a34 100644 --- a/checks/evaluation/branch_protection_test.go +++ b/checks/evaluation/branch_protection_test.go @@ -25,7 +25,6 @@ import ( func testScore(branch *clients.BranchRef, codeownersFiles []string, dl checker.DetailLogger) (int, error) { var score levelScore score.scores.basic, score.maxes.basic = basicNonAdminProtection(branch, dl) - score.scores.adminBasic, score.maxes.adminBasic = basicAdminProtection(branch, dl) score.scores.review, score.maxes.review = nonAdminReviewProtection(branch) score.scores.adminReview, score.maxes.adminReview = adminReviewProtection(branch, dl) score.scores.context, score.maxes.context = nonAdminContextProtection(branch, dl) @@ -33,9 +32,10 @@ func testScore(branch *clients.BranchRef, codeownersFiles []string, dl checker.D score.scores.adminThoroughReview, score.maxes.adminThoroughReview = adminThoroughReviewProtection(branch, dl) score.scores.codeownerReview, score.maxes.codeownerReview = codeownerBranchProtection(branch, codeownersFiles, dl) - return computeScore([]levelScore{score}) + return computeFinalScore([]levelScore{score}) } +// TODO: order of tests to have progressive scores. func TestIsBranchProtected(t *testing.T) { t.Parallel() trueVal := true @@ -50,13 +50,13 @@ func TestIsBranchProtected(t *testing.T) { expected scut.TestReturn }{ { - name: "Nothing is enabled", + name: "GitHub default settings", expected: scut.TestReturn{ Error: nil, - Score: 2, - NumberOfWarn: 7, + Score: 3, + NumberOfWarn: 6, NumberOfInfo: 2, - NumberOfDebug: 0, + NumberOfDebug: 1, }, branch: &clients.BranchRef{ Name: &branchVal, @@ -68,9 +68,7 @@ func TestIsBranchProtected(t *testing.T) { EnforceAdmins: &falseVal, RequireLastPushApproval: &falseVal, RequiredPullRequestReviews: clients.PullRequestReviewRule{ - DismissStaleReviews: &falseVal, - RequireCodeOwnerReviews: &falseVal, - RequiredApprovingReviewCount: &zeroVal, + Required: &falseVal, }, CheckRules: clients.StatusChecksRule{ RequiresStatusChecks: &trueVal, @@ -85,7 +83,7 @@ func TestIsBranchProtected(t *testing.T) { expected: scut.TestReturn{ Error: nil, Score: 0, - NumberOfWarn: 2, + NumberOfWarn: 3, NumberOfInfo: 0, NumberOfDebug: 4, }, @@ -98,9 +96,9 @@ func TestIsBranchProtected(t *testing.T) { name: "Required status check enabled", expected: scut.TestReturn{ Error: nil, - Score: 2, + Score: 4, NumberOfWarn: 5, - NumberOfInfo: 4, + NumberOfInfo: 5, NumberOfDebug: 0, }, branch: &clients.BranchRef{ @@ -108,6 +106,7 @@ func TestIsBranchProtected(t *testing.T) { Protected: &trueVal, BranchProtectionRule: clients.BranchProtectionRule{ RequiredPullRequestReviews: clients.PullRequestReviewRule{ + Required: &trueVal, DismissStaleReviews: &falseVal, RequireCodeOwnerReviews: &falseVal, RequiredApprovingReviewCount: &zeroVal, @@ -129,9 +128,9 @@ func TestIsBranchProtected(t *testing.T) { name: "Required status check enabled without checking for status string", expected: scut.TestReturn{ Error: nil, - Score: 2, + Score: 4, NumberOfWarn: 6, - NumberOfInfo: 3, + NumberOfInfo: 4, NumberOfDebug: 0, }, branch: &clients.BranchRef{ @@ -144,6 +143,7 @@ func TestIsBranchProtected(t *testing.T) { AllowForcePushes: &falseVal, AllowDeletions: &falseVal, RequiredPullRequestReviews: clients.PullRequestReviewRule{ + Required: &trueVal, DismissStaleReviews: &falseVal, RequireCodeOwnerReviews: &falseVal, RequiredApprovingReviewCount: &zeroVal, @@ -157,13 +157,13 @@ func TestIsBranchProtected(t *testing.T) { }, }, { - name: "Required pull request enabled", + name: "Admin run only preventing force pushes and deletions", expected: scut.TestReturn{ Error: nil, - Score: 2, + Score: 3, NumberOfWarn: 6, - NumberOfInfo: 3, - NumberOfDebug: 0, + NumberOfInfo: 2, + NumberOfDebug: 1, }, branch: &clients.BranchRef{ Name: &branchVal, @@ -171,15 +171,106 @@ func TestIsBranchProtected(t *testing.T) { BranchProtectionRule: clients.BranchProtectionRule{ EnforceAdmins: &falseVal, RequireLastPushApproval: &falseVal, + RequireLinearHistory: &falseVal, + AllowForcePushes: &falseVal, + AllowDeletions: &falseVal, + CheckRules: clients.StatusChecksRule{ + RequiresStatusChecks: &falseVal, + UpToDateBeforeMerge: &falseVal, + Contexts: nil, + }, + RequiredPullRequestReviews: clients.PullRequestReviewRule{ + Required: &falseVal, + }, + }, + }, + }, + { + name: "Admin run with all tier 2 requirements except require PRs and reviewers", + expected: scut.TestReturn{ + Error: nil, + Score: 4, // Should be 4.2 if we allow decimal puctuation + NumberOfWarn: 2, + NumberOfInfo: 6, + NumberOfDebug: 1, + }, + branch: &clients.BranchRef{ + Name: &branchVal, + Protected: &trueVal, + BranchProtectionRule: clients.BranchProtectionRule{ + EnforceAdmins: &trueVal, + RequireLastPushApproval: &trueVal, + RequireLinearHistory: &trueVal, + AllowForcePushes: &falseVal, + AllowDeletions: &falseVal, + CheckRules: clients.StatusChecksRule{ + RequiresStatusChecks: &falseVal, + UpToDateBeforeMerge: &trueVal, + Contexts: []string{"foo"}, + }, + RequiredPullRequestReviews: clients.PullRequestReviewRule{ + Required: &falseVal, + }, + }, + }, + }, + { + name: "Admin run on project requiring pull requests but without approver -- best a single maintainer can do", + expected: scut.TestReturn{ + Error: nil, + Score: 4, // Should be 4.8 if we allow decimal punctuation + NumberOfWarn: 2, + NumberOfInfo: 9, + NumberOfDebug: 0, + }, + branch: &clients.BranchRef{ + Name: &branchVal, + Protected: &trueVal, + BranchProtectionRule: clients.BranchProtectionRule{ + EnforceAdmins: &trueVal, + RequireLastPushApproval: &trueVal, RequireLinearHistory: &trueVal, AllowForcePushes: &falseVal, AllowDeletions: &falseVal, CheckRules: clients.StatusChecksRule{ RequiresStatusChecks: &trueVal, - UpToDateBeforeMerge: &falseVal, + UpToDateBeforeMerge: &trueVal, Contexts: []string{"foo"}, }, RequiredPullRequestReviews: clients.PullRequestReviewRule{ + Required: &trueVal, + DismissStaleReviews: &trueVal, + RequireCodeOwnerReviews: &trueVal, + RequiredApprovingReviewCount: &zeroVal, + }, + }, + }, + }, + { + name: "Admin run on project with all tier 2 requirements", + expected: scut.TestReturn{ + Error: nil, + Score: 6, + NumberOfWarn: 4, + NumberOfInfo: 6, + NumberOfDebug: 0, + }, + branch: &clients.BranchRef{ + Name: &branchVal, + Protected: &trueVal, + BranchProtectionRule: clients.BranchProtectionRule{ + EnforceAdmins: &trueVal, + RequireLastPushApproval: &trueVal, + RequireLinearHistory: &trueVal, + AllowForcePushes: &falseVal, + AllowDeletions: &falseVal, + CheckRules: clients.StatusChecksRule{ + RequiresStatusChecks: &falseVal, + UpToDateBeforeMerge: &trueVal, + Contexts: nil, + }, + RequiredPullRequestReviews: clients.PullRequestReviewRule{ + Required: &trueVal, DismissStaleReviews: &falseVal, RequireCodeOwnerReviews: &falseVal, RequiredApprovingReviewCount: &oneVal, @@ -187,13 +278,71 @@ func TestIsBranchProtected(t *testing.T) { }, }, }, + { + name: "Non-admin run on project that require zero reviewer (or don't require PRs at all, we can't differentiate it)", + expected: scut.TestReturn{ + Error: nil, + Score: 3, + NumberOfWarn: 3, + NumberOfInfo: 2, + NumberOfDebug: 4, + }, + branch: &clients.BranchRef{ + Name: &branchVal, + Protected: &trueVal, + BranchProtectionRule: clients.BranchProtectionRule{ + EnforceAdmins: nil, + RequireLastPushApproval: nil, + RequireLinearHistory: &falseVal, + AllowForcePushes: &falseVal, + AllowDeletions: &falseVal, + CheckRules: clients.StatusChecksRule{ + RequiresStatusChecks: nil, + UpToDateBeforeMerge: nil, + Contexts: nil, + }, + }, + }, + }, + { + name: "Non-admin run on project that require 1 reviewer", + expected: scut.TestReturn{ + Error: nil, + Score: 6, + NumberOfWarn: 3, + NumberOfInfo: 3, + NumberOfDebug: 4, + }, + branch: &clients.BranchRef{ + Name: &branchVal, + Protected: &trueVal, + BranchProtectionRule: clients.BranchProtectionRule{ + EnforceAdmins: nil, + RequireLastPushApproval: nil, + RequireLinearHistory: &falseVal, + AllowForcePushes: &falseVal, + AllowDeletions: &falseVal, + CheckRules: clients.StatusChecksRule{ + RequiresStatusChecks: nil, + UpToDateBeforeMerge: nil, + Contexts: nil, + }, + RequiredPullRequestReviews: clients.PullRequestReviewRule{ + Required: &trueVal, + DismissStaleReviews: nil, + RequireCodeOwnerReviews: &falseVal, + RequiredApprovingReviewCount: &oneVal, + }, + }, + }, + }, { name: "Required admin enforcement enabled", expected: scut.TestReturn{ Error: nil, Score: 3, NumberOfWarn: 5, - NumberOfInfo: 4, + NumberOfInfo: 5, NumberOfDebug: 0, }, branch: &clients.BranchRef{ @@ -211,6 +360,7 @@ func TestIsBranchProtected(t *testing.T) { Contexts: []string{"foo"}, }, RequiredPullRequestReviews: clients.PullRequestReviewRule{ + Required: &trueVal, DismissStaleReviews: &falseVal, RequireCodeOwnerReviews: &falseVal, RequiredApprovingReviewCount: &zeroVal, @@ -222,9 +372,9 @@ func TestIsBranchProtected(t *testing.T) { name: "Required linear history enabled", expected: scut.TestReturn{ Error: nil, - Score: 2, + Score: 3, NumberOfWarn: 6, - NumberOfInfo: 3, + NumberOfInfo: 4, NumberOfDebug: 0, }, branch: &clients.BranchRef{ @@ -242,6 +392,7 @@ func TestIsBranchProtected(t *testing.T) { Contexts: []string{"foo"}, }, RequiredPullRequestReviews: clients.PullRequestReviewRule{ + Required: &trueVal, DismissStaleReviews: &falseVal, RequireCodeOwnerReviews: &falseVal, RequiredApprovingReviewCount: &zeroVal, @@ -255,7 +406,7 @@ func TestIsBranchProtected(t *testing.T) { Error: nil, Score: 1, NumberOfWarn: 7, - NumberOfInfo: 2, + NumberOfInfo: 3, NumberOfDebug: 0, }, branch: &clients.BranchRef{ @@ -274,6 +425,7 @@ func TestIsBranchProtected(t *testing.T) { Contexts: []string{"foo"}, }, RequiredPullRequestReviews: clients.PullRequestReviewRule{ + Required: &trueVal, DismissStaleReviews: &falseVal, RequireCodeOwnerReviews: &falseVal, RequiredApprovingReviewCount: &zeroVal, @@ -287,7 +439,7 @@ func TestIsBranchProtected(t *testing.T) { Error: nil, Score: 1, NumberOfWarn: 7, - NumberOfInfo: 2, + NumberOfInfo: 3, NumberOfDebug: 0, }, branch: &clients.BranchRef{ @@ -305,6 +457,7 @@ func TestIsBranchProtected(t *testing.T) { Contexts: []string{"foo"}, }, RequiredPullRequestReviews: clients.PullRequestReviewRule{ + Required: &trueVal, DismissStaleReviews: &falseVal, RequireCodeOwnerReviews: &falseVal, RequiredApprovingReviewCount: &zeroVal, @@ -318,7 +471,7 @@ func TestIsBranchProtected(t *testing.T) { Error: nil, Score: 8, NumberOfWarn: 2, - NumberOfInfo: 8, + NumberOfInfo: 9, NumberOfDebug: 0, }, branch: &clients.BranchRef{ @@ -336,6 +489,7 @@ func TestIsBranchProtected(t *testing.T) { Contexts: []string{"foo"}, }, RequiredPullRequestReviews: clients.PullRequestReviewRule{ + Required: &trueVal, DismissStaleReviews: &trueVal, RequireCodeOwnerReviews: &trueVal, RequiredApprovingReviewCount: &oneVal, @@ -349,7 +503,7 @@ func TestIsBranchProtected(t *testing.T) { Error: nil, Score: 8, NumberOfWarn: 1, - NumberOfInfo: 8, + NumberOfInfo: 9, NumberOfDebug: 0, }, branch: &clients.BranchRef{ @@ -367,6 +521,7 @@ func TestIsBranchProtected(t *testing.T) { Contexts: []string{"foo"}, }, RequiredPullRequestReviews: clients.PullRequestReviewRule{ + Required: &trueVal, DismissStaleReviews: &trueVal, RequireCodeOwnerReviews: &trueVal, RequiredApprovingReviewCount: &oneVal, @@ -381,7 +536,7 @@ func TestIsBranchProtected(t *testing.T) { Error: nil, Score: 5, NumberOfWarn: 3, - NumberOfInfo: 7, + NumberOfInfo: 8, NumberOfDebug: 0, }, branch: &clients.BranchRef{ @@ -399,6 +554,7 @@ func TestIsBranchProtected(t *testing.T) { Contexts: []string{"foo"}, }, RequiredPullRequestReviews: clients.PullRequestReviewRule{ + Required: &trueVal, DismissStaleReviews: &trueVal, RequireCodeOwnerReviews: &trueVal, RequiredApprovingReviewCount: &oneVal, @@ -417,9 +573,7 @@ func TestIsBranchProtected(t *testing.T) { Score: score, Error: err, } - if !scut.ValidateTestReturn(t, tt.name, &tt.expected, actual, &dl) { - t.Fail() - } + scut.ValidateTestReturn(t, tt.name, &tt.expected, actual, &dl) }) } } diff --git a/checks/evaluation/ci_tests.go b/checks/evaluation/ci_tests.go index 8dc9311abe6..4c55fcc31c9 100644 --- a/checks/evaluation/ci_tests.go +++ b/checks/evaluation/ci_tests.go @@ -16,122 +16,72 @@ package evaluation import ( "fmt" - "strings" "github.com/ossf/scorecard/v4/checker" + sce "github.com/ossf/scorecard/v4/errors" "github.com/ossf/scorecard/v4/finding" + "github.com/ossf/scorecard/v4/probes/testsRunInCI" ) -const ( - // CheckCITests is the registered name for CITests. - CheckCITests = "CI-Tests" - success = "success" -) - -func CITests(_ string, c *checker.CITestData, dl checker.DetailLogger) checker.CheckResult { - totalMerged := 0 - totalTested := 0 - for i := range c.CIInfo { - r := c.CIInfo[i] - totalMerged++ - - var foundCI bool - - // GitHub Statuses. - prSuccessStatus, err := prHasSuccessStatus(r, dl) - if err != nil { - return checker.CreateRuntimeErrorResult(CheckCITests, err) - } - if prSuccessStatus { - totalTested++ - foundCI = true - continue - } +const CheckCITests = "CI-Tests" - // GitHub Check Runs. - prCheckSuccessful, err := prHasSuccessfulCheck(r, dl) - if err != nil { - return checker.CreateRuntimeErrorResult(CheckCITests, err) - } - if prCheckSuccessful { - totalTested++ - foundCI = true - } - - if !foundCI { - // Log message says commit, but really we only care about PRs, and - // use only one commit (branch HEAD) to refer to all commits in a PR +func CITests(name string, + findings []finding.Finding, + dl checker.DetailLogger, +) checker.CheckResult { + expectedProbes := []string{ + testsRunInCI.Probe, + } + if !finding.UniqueProbesEqual(findings, expectedProbes) { + e := sce.WithMessage(sce.ErrScorecardInternal, "invalid probe results") + return checker.CreateRuntimeErrorResult(name, e) + } + // Debug PRs that were merged without CI tests + for i := range findings { + f := &findings[i] + if f.Outcome == finding.OutcomeNegative || f.Outcome == finding.OutcomePositive { dl.Debug(&checker.LogMessage{ - Text: fmt.Sprintf("merged PR %d without CI test at HEAD: %s", r.PullRequestNumber, r.HeadSHA), + Text: f.Message, }) } } - if totalMerged == 0 { + // check that the project has pull requests + if noPullRequestsFound(findings) { return checker.CreateInconclusiveResult(CheckCITests, "no pull request found") } + totalMerged, totalTested := getMergedAndTested(findings) + + if totalMerged < totalTested || len(findings) < totalTested { + e := sce.WithMessage(sce.ErrScorecardInternal, "invalid finding values") + return checker.CreateRuntimeErrorResult(name, e) + } + reason := fmt.Sprintf("%d out of %d merged PRs checked by a CI test", totalTested, totalMerged) return checker.CreateProportionalScoreResult(CheckCITests, reason, totalTested, totalMerged) } -// PR has a status marked 'success' and a CI-related context. -// -//nolint:unparam -func prHasSuccessStatus(r checker.RevisionCIInfo, dl checker.DetailLogger) (bool, error) { - for _, status := range r.Statuses { - if status.State != success { - continue - } - if isTest(status.Context) || isTest(status.TargetURL) { - dl.Debug(&checker.LogMessage{ - Path: status.URL, - Type: finding.FileTypeURL, - Text: fmt.Sprintf("CI test found: pr: %s, context: %s", r.HeadSHA, - status.Context), - }) - return true, nil - } - } - return false, nil -} +func getMergedAndTested(findings []finding.Finding) (int, int) { + totalMerged := 0 + totalTested := 0 -// PR has a successful CI-related check. -// -//nolint:unparam -func prHasSuccessfulCheck(r checker.RevisionCIInfo, dl checker.DetailLogger) (bool, error) { - for _, cr := range r.CheckRuns { - if cr.Status != "completed" { - continue - } - if cr.Conclusion != success { - continue - } - if isTest(cr.App.Slug) { - dl.Debug(&checker.LogMessage{ - Path: cr.URL, - Type: finding.FileTypeURL, - Text: fmt.Sprintf("CI test found: pr: %d, context: %s", r.PullRequestNumber, - cr.App.Slug), - }) - return true, nil + for i := range findings { + f := &findings[i] + totalMerged++ + if f.Outcome == finding.OutcomePositive { + totalTested++ } } - return false, nil -} -// isTest returns true if the given string is a CI test. -func isTest(s string) bool { - l := strings.ToLower(s) + return totalMerged, totalTested +} - // Add more patterns here! - for _, pattern := range []string{ - "appveyor", "buildkite", "circleci", "e2e", "github-actions", "jenkins", - "mergeable", "packit-as-a-service", "semaphoreci", "test", "travis-ci", - "flutter-dashboard", "Cirrus CI", "azure-pipelines", - } { - if strings.Contains(l, pattern) { +func noPullRequestsFound(findings []finding.Finding) bool { + for i := range findings { + f := &findings[i] + if f.Outcome == finding.OutcomeNotApplicable { return true } } diff --git a/checks/evaluation/ci_tests_test.go b/checks/evaluation/ci_tests_test.go index 8a1ab871645..a8fc4ecb9ae 100644 --- a/checks/evaluation/ci_tests_test.go +++ b/checks/evaluation/ci_tests_test.go @@ -16,442 +16,102 @@ package evaluation import ( "testing" - "github.com/ossf/scorecard/v4/checker" - "github.com/ossf/scorecard/v4/clients" + "github.com/ossf/scorecard/v4/finding" scut "github.com/ossf/scorecard/v4/utests" ) -func Test_isTest(t *testing.T) { - t.Parallel() - type args struct { - s string - } - tests := []struct { - name string - args args - want bool - }{ - { - name: "appveyor", - args: args{ - s: "appveyor", - }, - want: true, - }, - { - name: "circleci", - args: args{ - s: "circleci", - }, - want: true, - }, - { - name: "jenkins", - args: args{ - s: "jenkins", - }, - want: true, - }, - { - name: "e2e", - args: args{ - s: "e2e", - }, - want: true, - }, - { - name: "github-actions", - args: args{ - s: "github-actions", - }, - want: true, - }, - { - name: "mergeable", - args: args{ - s: "mergeable", - }, - want: true, - }, - { - name: "packit-as-a-service", - args: args{ - s: "packit-as-a-service", - }, - want: true, - }, - { - name: "semaphoreci", - args: args{ - s: "semaphoreci", - }, - want: true, - }, - { - name: "test", - args: args{ - s: "test", - }, - want: true, - }, - { - name: "travis-ci", - args: args{ - s: "travis-ci", - }, - want: true, - }, - { - name: "azure-pipelines", - args: args{ - s: "azure-pipelines", - }, - want: true, - }, - { - name: "non-existing", - args: args{ - s: "non-existing", - }, - want: false, - }, - } - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - if got := isTest(tt.args.s); got != tt.want { - t.Errorf("isTest() = %v, want %v for test %v", got, tt.want, tt.name) - } - }) - } -} - -func Test_prHasSuccessfulCheck(t *testing.T) { +// Tip: If you add new findings to this test, else +// add a unit test to the probes with the same findings. +func TestCITests(t *testing.T) { t.Parallel() - - //enabled nolint because this is a test - //nolint tests := []struct { - name string - args checker.RevisionCIInfo - want bool - wantErr bool + name string + findings []finding.Finding + result scut.TestReturn }{ { - name: "check run with conclusion success", - args: checker.RevisionCIInfo{ - PullRequestNumber: 1, - HeadSHA: "sha", - CheckRuns: []clients.CheckRun{ - { - App: clients.CheckRunApp{Slug: "test"}, - Conclusion: "success", - URL: "url", - Status: "completed", - }, - }, - }, - want: true, - wantErr: false, - }, - { - name: "check run with conclusion not success", - args: checker.RevisionCIInfo{ - PullRequestNumber: 1, - HeadSHA: "sha", - CheckRuns: []clients.CheckRun{ - { - App: clients.CheckRunApp{Slug: "test"}, - Conclusion: "failed", - URL: "url", - Status: "completed", - }, + name: "Has CI tests. 1 tested out of 1 merged", + findings: []finding.Finding{ + { + Outcome: finding.OutcomePositive, + Probe: "testsRunInCI", + Message: "CI test found: pr: 1, context: e2e", + Location: &finding.Location{Type: 4}, }, }, - want: false, - wantErr: false, - }, - { - name: "check run with conclusion not success", - args: checker.RevisionCIInfo{ - PullRequestNumber: 1, - HeadSHA: "sha", - CheckRuns: []clients.CheckRun{ - { - App: clients.CheckRunApp{Slug: "test"}, - Conclusion: "success", - URL: "url", - Status: "notcompleted", - }, - }, - }, - want: false, - wantErr: false, - }, - } - for _, tt := range tests { - tt := tt - dl := &scut.TestDetailLogger{} - - //nolint:errcheck - got, _ := prHasSuccessfulCheck(tt.args, dl) - if got != tt.want { - t.Errorf("prHasSuccessfulCheck() = %v, want %v", got, tt.want) - } - } -} - -func Test_prHasSuccessStatus(t *testing.T) { - t.Parallel() - type args struct { //nolint:govet - r checker.RevisionCIInfo - dl checker.DetailLogger - } - tests := []struct { //nolint:govet - name string - args args - want bool - wantErr bool - }{ - { - name: "empty revision", - args: args{ - r: checker.RevisionCIInfo{}, - }, - want: false, - wantErr: false, - }, - { - name: "no statuses", - args: args{ - r: checker.RevisionCIInfo{ - Statuses: []clients.Status{}, - }, + result: scut.TestReturn{ + Score: 10, + NumberOfDebug: 1, }, }, { - name: "status is not success", - args: args{ - r: checker.RevisionCIInfo{ - Statuses: []clients.Status{ - { - State: "failure", - }, - }, + name: "Has CI tests. 3 tested out of 4 merged", + findings: []finding.Finding{ + { + Outcome: finding.OutcomePositive, + Probe: "testsRunInCI", + Message: "CI test found: pr: 1, context: e2e", + Location: &finding.Location{Type: 4}, }, - }, - }, - { - name: "status is success", - args: args{ - r: checker.RevisionCIInfo{ - Statuses: []clients.Status{ - { - State: "success", - Context: CheckCITests, - }, - }, + { + Outcome: finding.OutcomePositive, + Probe: "testsRunInCI", + Message: "CI test found: pr: 1, context: e2e", + Location: &finding.Location{Type: 4}, }, - dl: &scut.TestDetailLogger{}, - }, - want: true, - wantErr: false, - }, - } - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - got, err := prHasSuccessStatus(tt.args.r, tt.args.dl) //nolint:govet - if (err != nil) != tt.wantErr { //nolint:govet - t.Errorf("prHasSuccessStatus() error = %v, wantErr %v", err, tt.wantErr) //nolint:govet - return - } - if got != tt.want { //nolint:govet - t.Errorf("prHasSuccessStatus() got = %v, want %v", got, tt.want) //nolint:govet - } - }) - } -} - -func Test_prHasSuccessfulCheckAdditional(t *testing.T) { - t.Parallel() - type args struct { //nolint:govet - r checker.RevisionCIInfo - dl checker.DetailLogger - } - tests := []struct { //nolint:govet - name string - args args - want bool - wantErr bool - }{ - { - name: "empty revision", - args: args{ - r: checker.RevisionCIInfo{}, - }, - want: false, - wantErr: false, - }, - { - name: "status is not completed", - args: args{ - r: checker.RevisionCIInfo{ - CheckRuns: []clients.CheckRun{ - { - Status: "notcompleted", - }, - }, + { + Outcome: finding.OutcomePositive, + Probe: "testsRunInCI", + Message: "CI test found: pr: 1, context: e2e", + Location: &finding.Location{Type: 4}, }, - }, - }, - { - name: "status is not success", - args: args{ - r: checker.RevisionCIInfo{ - CheckRuns: []clients.CheckRun{ - { - Status: "completed", - Conclusion: "failure", - }, - }, + { + Outcome: finding.OutcomeNegative, + Probe: "testsRunInCI", + Message: "CI test found: pr: 1, context: e2e", + Location: &finding.Location{Type: 4}, }, }, - }, - { - name: "conclusion is success", - args: args{ - r: checker.RevisionCIInfo{ - CheckRuns: []clients.CheckRun{ - { - Status: "completed", - Conclusion: "success", - }, - }, - }, + result: scut.TestReturn{ + Score: 7, + NumberOfDebug: 4, }, }, { - name: "conclusion is succesls with a valid app slug", - args: args{ - r: checker.RevisionCIInfo{ - CheckRuns: []clients.CheckRun{ - { - Status: "completed", - Conclusion: "success", - App: clients.CheckRunApp{Slug: "e2e"}, - }, - }, + name: "Tests debugging", + findings: []finding.Finding{ + { + Outcome: finding.OutcomeNegative, + Probe: "testsRunInCI", + Message: "merged PR 1 without CI test at HEAD: 1", + Location: &finding.Location{Type: 4}, }, - dl: &scut.TestDetailLogger{}, - }, - want: true, - wantErr: false, - }, - } - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - got, err := prHasSuccessfulCheck(tt.args.r, tt.args.dl) - if (err != nil) != tt.wantErr { - t.Errorf("prHasSuccessfulCheck() error = %v, wantErr %v", err, tt.wantErr) - return - } - if got != tt.want { //nolint:govet - t.Errorf("prHasSuccessfulCheck() got = %v, want %v", got, tt.want) - } - }) - } -} - -func TestCITests(t *testing.T) { - t.Parallel() - type args struct { //nolint:govet - in0 string - c *checker.CITestData - dl checker.DetailLogger - } - tests := []struct { //nolint:govet - name string - args args - want int - }{ - { - name: "Status completed with failure", - args: args{ - in0: "", - c: &checker.CITestData{ - CIInfo: []checker.RevisionCIInfo{ - { - CheckRuns: []clients.CheckRun{ - { - Status: "completed", - App: clients.CheckRunApp{Slug: "e2e"}, - }, - }, - Statuses: []clients.Status{ - { - State: "failure", - Context: CheckCITests, - TargetURL: "e2e", - }, - }, - }, - }, + { + Outcome: finding.OutcomeNegative, + Probe: "testsRunInCI", + Message: "merged PR 1 without CI test at HEAD: 1", + Location: &finding.Location{Type: 4}, }, - dl: &scut.TestDetailLogger{}, - }, - want: 0, - }, - { - name: "valid", - args: args{ - in0: "", - c: &checker.CITestData{ - CIInfo: []checker.RevisionCIInfo{ - { - CheckRuns: []clients.CheckRun{ - { - Status: "completed", - Conclusion: "success", - App: clients.CheckRunApp{Slug: "e2e"}, - }, - }, - Statuses: []clients.Status{ - { - State: "success", - Context: CheckCITests, - TargetURL: "e2e", - }, - }, - }, - }, + { + Outcome: finding.OutcomeNegative, + Probe: "testsRunInCI", + Message: "merged PR 1 without CI test at HEAD: 1", + Location: &finding.Location{Type: 4}, }, - dl: &scut.TestDetailLogger{}, }, - want: 10, - }, - { - name: "no ci info", - args: args{ - in0: "", - c: &checker.CITestData{}, - dl: &scut.TestDetailLogger{}, + result: scut.TestReturn{ + NumberOfDebug: 3, + Score: 0, }, - want: -1, }, } - for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() - if got := CITests(tt.args.in0, tt.args.c, tt.args.dl); got.Score != tt.want { //nolint:govet - t.Errorf("CITests() = %v, want %v", got.Score, tt.want) //nolint:govet - } + dl := scut.TestDetailLogger{} + got := CITests(tt.name, tt.findings, &dl) + scut.ValidateTestReturn(t, tt.name, &tt.result, &got, &dl) }) } } diff --git a/checks/evaluation/cii_best_practices.go b/checks/evaluation/cii_best_practices.go index fa940b4c0ba..0dd8511cf38 100644 --- a/checks/evaluation/cii_best_practices.go +++ b/checks/evaluation/cii_best_practices.go @@ -15,14 +15,12 @@ package evaluation import ( - "fmt" - "github.com/ossf/scorecard/v4/checker" - "github.com/ossf/scorecard/v4/clients" sce "github.com/ossf/scorecard/v4/errors" + "github.com/ossf/scorecard/v4/finding" + "github.com/ossf/scorecard/v4/probes/hasOpenSSFBadge" ) -// Note: exported for unit tests. const ( silverScore = 7 // Note: if this value is changed, please update the action's threshold score @@ -32,31 +30,54 @@ const ( ) // CIIBestPractices applies the score policy for the CIIBestPractices check. -func CIIBestPractices(name string, _ checker.DetailLogger, r *checker.CIIBestPracticesData) checker.CheckResult { - if r == nil { - e := sce.WithMessage(sce.ErrScorecardInternal, "empty raw data") +func CIIBestPractices(name string, + findings []finding.Finding, dl checker.DetailLogger, +) checker.CheckResult { + expectedProbes := []string{ + hasOpenSSFBadge.Probe, + } + + if !finding.UniqueProbesEqual(findings, expectedProbes) { + e := sce.WithMessage(sce.ErrScorecardInternal, "invalid probe results") return checker.CreateRuntimeErrorResult(name, e) } - var results checker.CheckResult - switch r.Badge { - case clients.NotFound: - results = checker.CreateMinScoreResult(name, "no effort to earn an OpenSSF best practices badge detected") - case clients.InProgress: - msg := fmt.Sprintf("badge detected: %v", r.Badge) - results = checker.CreateResultWithScore(name, msg, inProgressScore) - case clients.Passing: - msg := fmt.Sprintf("badge detected: %v", r.Badge) - results = checker.CreateResultWithScore(name, msg, passingScore) - case clients.Silver: - msg := fmt.Sprintf("badge detected: %v", r.Badge) - results = checker.CreateResultWithScore(name, msg, silverScore) - case clients.Gold: - msg := fmt.Sprintf("badge detected: %v", r.Badge) - results = checker.CreateMaxScoreResult(name, msg) - case clients.Unknown: - e := sce.WithMessage(sce.ErrScorecardInternal, fmt.Sprintf("unsupported badge: %v", r.Badge)) - results = checker.CreateRuntimeErrorResult(name, e) + var score int + var text string + + if len(findings) != 1 { + errText := "invalid probe results: multiple findings detected" + e := sce.WithMessage(sce.ErrScorecardInternal, errText) + return checker.CreateRuntimeErrorResult(name, e) } - return results + + f := &findings[0] + if f.Outcome == finding.OutcomeNegative { + text = "no effort to earn an OpenSSF best practices badge detected" + return checker.CreateMinScoreResult(name, text) + } + //nolint:nestif + if _, hasKey := f.Values[hasOpenSSFBadge.GoldLevel]; hasKey { + score = checker.MaxResultScore + text = "badge detected: Gold" + } else if _, hasKey := f.Values[hasOpenSSFBadge.SilverLevel]; hasKey { + score = silverScore + text = "badge detected: Silver" + } else if _, hasKey := f.Values[hasOpenSSFBadge.PassingLevel]; hasKey { + score = passingScore + text = "badge detected: Passing" + } else if _, hasKey := f.Values[hasOpenSSFBadge.InProgressLevel]; hasKey { + score = inProgressScore + text = "badge detected: InProgress" + } else if _, hasKey := f.Values[hasOpenSSFBadge.UnknownLevel]; hasKey { + text = "unknown badge detected" + e := sce.WithMessage(sce.ErrScorecardInternal, text) + return checker.CreateRuntimeErrorResult(name, e) + } else { + text = "unsupported badge detected" + e := sce.WithMessage(sce.ErrScorecardInternal, text) + return checker.CreateRuntimeErrorResult(name, e) + } + + return checker.CreateResultWithScore(name, text, score) } diff --git a/checks/evaluation/cii_best_practices_test.go b/checks/evaluation/cii_best_practices_test.go index d1798bfa72b..5cfb08c2142 100644 --- a/checks/evaluation/cii_best_practices_test.go +++ b/checks/evaluation/cii_best_practices_test.go @@ -16,71 +16,134 @@ package evaluation import ( "testing" - "github.com/ossf/scorecard/v4/checker" - "github.com/ossf/scorecard/v4/clients" + sce "github.com/ossf/scorecard/v4/errors" + "github.com/ossf/scorecard/v4/finding" + "github.com/ossf/scorecard/v4/probes/hasOpenSSFBadge" + scut "github.com/ossf/scorecard/v4/utests" ) func TestCIIBestPractices(t *testing.T) { - t.Run("CIIBestPractices", func(t *testing.T) { - t.Run("in progress", func(t *testing.T) { - r := &checker.CIIBestPracticesData{ - Badge: clients.InProgress, - } - result := CIIBestPractices("CIIBestPractices", nil, r) - if result.Score != inProgressScore { - t.Errorf("CIIBestPractices() = %v, want %v", result.Score, inProgressScore) - } + t.Parallel() + tests := []struct { + name string + findings []finding.Finding + result scut.TestReturn + }{ + { + name: "Unsupported badge found with negative finding", + findings: []finding.Finding{ + { + Probe: "hasOpenSSFBadge", + Outcome: finding.OutcomeNegative, + Values: map[string]int{ + "Unsupported": 1, + }, + }, + }, + result: scut.TestReturn{ + Score: 0, + }, + }, + { + name: "Unsupported badge found with positive finding", + findings: []finding.Finding{ + { + Probe: "hasOpenSSFBadge", + Outcome: finding.OutcomePositive, + Values: map[string]int{ + "Unsupported": 1, + }, + }, + }, + result: scut.TestReturn{ + Score: -1, + Error: sce.ErrScorecardInternal, + }, + }, + { + name: "Has InProgress Badge", + findings: []finding.Finding{ + { + Probe: "hasOpenSSFBadge", + Outcome: finding.OutcomePositive, + Values: map[string]int{ + hasOpenSSFBadge.InProgressLevel: 1, + }, + }, + }, + result: scut.TestReturn{ + Score: 2, + }, + }, + { + name: "Has Passing Badge", + findings: []finding.Finding{ + { + Probe: "hasOpenSSFBadge", + Outcome: finding.OutcomePositive, + Values: map[string]int{ + hasOpenSSFBadge.PassingLevel: 1, + }, + }, + }, + result: scut.TestReturn{ + Score: 5, + }, + }, + { + name: "Has Silver Badge", + findings: []finding.Finding{ + { + Probe: "hasOpenSSFBadge", + Outcome: finding.OutcomePositive, + Values: map[string]int{ + hasOpenSSFBadge.SilverLevel: 1, + }, + }, + }, + result: scut.TestReturn{ + Score: 7, + }, + }, + { + name: "Has Gold Badge", + findings: []finding.Finding{ + { + Probe: "hasOpenSSFBadge", + Outcome: finding.OutcomePositive, + Values: map[string]int{ + hasOpenSSFBadge.GoldLevel: 1, + }, + }, + }, + result: scut.TestReturn{ + Score: 10, + }, + }, + { + name: "Has Unknown Badge", + findings: []finding.Finding{ + { + Probe: "hasOpenSSFBadge", + Outcome: finding.OutcomePositive, + Values: map[string]int{ + "Unknown": 1, + }, + }, + }, + result: scut.TestReturn{ + Score: -1, + Error: sce.ErrScorecardInternal, + }, + }, + } + for _, tt := range tests { + tt := tt // Parallel testing + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + dl := scut.TestDetailLogger{} + got := CIIBestPractices(tt.name, tt.findings, &dl) + scut.ValidateTestReturn(t, tt.name, &tt.result, &got, &dl) }) - t.Run("passing", func(t *testing.T) { - r := &checker.CIIBestPracticesData{ - Badge: clients.Passing, - } - result := CIIBestPractices("CIIBestPractices", nil, r) - if result.Score != passingScore { - t.Errorf("CIIBestPractices() = %v, want %v", result.Score, passingScore) - } - }) - t.Run("silver", func(t *testing.T) { - r := &checker.CIIBestPracticesData{ - Badge: clients.Silver, - } - result := CIIBestPractices("CIIBestPractices", nil, r) - if result.Score != silverScore { - t.Errorf("CIIBestPractices() = %v, want %v", result.Score, silverScore) - } - }) - t.Run("gold", func(t *testing.T) { - r := &checker.CIIBestPracticesData{ - Badge: clients.Gold, - } - result := CIIBestPractices("CIIBestPractices", nil, r) - if result.Score != checker.MaxResultScore { - t.Errorf("CIIBestPractices() = %v, want %v", result.Score, checker.MaxResultScore) - } - }) - t.Run("not found", func(t *testing.T) { - r := &checker.CIIBestPracticesData{ - Badge: clients.NotFound, - } - result := CIIBestPractices("CIIBestPractices", nil, r) - if result.Score != checker.MinResultScore { - t.Errorf("CIIBestPractices() = %v, want %v", result.Score, checker.MinResultScore) - } - }) - t.Run("error", func(t *testing.T) { - r := &checker.CIIBestPracticesData{ - Badge: clients.Unknown, - } - result := CIIBestPractices("CIIBestPractices", nil, r) - if result.Score != -1 { - t.Errorf("CIIBestPractices() = %v, want %v", result.Score, -1) - } - }) - t.Run("nil response", func(t *testing.T) { - result := CIIBestPractices("CIIBestPractices", nil, nil) - if result.Score != -1 { - t.Errorf("CIIBestPractices() = %v, want %v", result.Score, -1) - } - }) - }) + } } diff --git a/checks/evaluation/code_review.go b/checks/evaluation/code_review.go index 4c84c92378b..edd59ded537 100644 --- a/checks/evaluation/code_review.go +++ b/checks/evaluation/code_review.go @@ -16,7 +16,6 @@ package evaluation import ( "fmt" - "math" "github.com/ossf/scorecard/v4/checker" sce "github.com/ossf/scorecard/v4/errors" @@ -74,7 +73,7 @@ func CodeReview(name string, dl checker.DetailLogger, r *checker.CodeReviewData) return checker.CreateProportionalScoreResult( name, fmt.Sprintf("found %d unreviewed changesets out of %d", nUnreviewedChanges, nChanges), - int(math.Max(float64(nChanges-nUnreviewedChanges), 0)), + max(nChanges-nUnreviewedChanges, 0), nChanges, ) } diff --git a/checks/evaluation/code_review_test.go b/checks/evaluation/code_review_test.go index bb17732e24d..b916224509d 100644 --- a/checks/evaluation/code_review_test.go +++ b/checks/evaluation/code_review_test.go @@ -216,9 +216,7 @@ func TestCodeReview(t *testing.T) { dl := &scut.TestDetailLogger{} res := CodeReview(tt.name, dl, tt.rawData) - if !scut.ValidateTestReturn(t, tt.name, &tt.expected, &res, dl) { - t.Error() - } + scut.ValidateTestReturn(t, tt.name, &tt.expected, &res, dl) }) } } diff --git a/checks/evaluation/contributors.go b/checks/evaluation/contributors.go index d419e6a1ea4..d58653e6278 100644 --- a/checks/evaluation/contributors.go +++ b/checks/evaluation/contributors.go @@ -16,60 +16,67 @@ package evaluation import ( "fmt" - "sort" "strings" "github.com/ossf/scorecard/v4/checker" sce "github.com/ossf/scorecard/v4/errors" + "github.com/ossf/scorecard/v4/finding" + "github.com/ossf/scorecard/v4/probes/contributorsFromOrgOrCompany" ) const ( - minContributionsPerUser = 5 numberCompaniesForTopScore = 3 ) // Contributors applies the score policy for the Contributors check. -func Contributors(name string, dl checker.DetailLogger, - r *checker.ContributorsData, +func Contributors(name string, + findings []finding.Finding, + dl checker.DetailLogger, ) checker.CheckResult { - if r == nil { - e := sce.WithMessage(sce.ErrScorecardInternal, "empty raw data") - return checker.CreateRuntimeErrorResult(name, e) + expectedProbes := []string{ + contributorsFromOrgOrCompany.Probe, } - entities := make(map[string]bool) - - for _, user := range r.Users { - if user.NumContributions < minContributionsPerUser { - continue - } + if !finding.UniqueProbesEqual(findings, expectedProbes) { + e := sce.WithMessage(sce.ErrScorecardInternal, "invalid probe results") + return checker.CreateRuntimeErrorResult(name, e) + } - for _, org := range user.Organizations { - entities[org.Login] = true - } + numberOfPositives := getNumberOfPositives(findings) + reason := fmt.Sprintf("project has %d contributing companies or organizations", numberOfPositives) - for _, comp := range user.Companies { - entities[comp] = true - } + if numberOfPositives > 0 { + logFindings(findings, dl) } - - names := []string{} - for c := range entities { - names = append(names, c) + if numberOfPositives > numberCompaniesForTopScore { + return checker.CreateMaxScoreResult(name, reason) } - sort.Strings(names) + return checker.CreateProportionalScoreResult(name, reason, numberOfPositives, numberCompaniesForTopScore) +} - if len(names) > 0 { - dl.Info(&checker.LogMessage{ - Text: fmt.Sprintf("contributors work for %v", strings.Join(names, ",")), - }) - } else { - dl.Warn(&checker.LogMessage{ - Text: "no contributors have an org or company", - }) +func getNumberOfPositives(findings []finding.Finding) int { + var numberOfPositives int + for i := range findings { + f := &findings[i] + if f.Outcome == finding.OutcomePositive { + if f.Probe == contributorsFromOrgOrCompany.Probe { + numberOfPositives++ + } + } } + return numberOfPositives +} - reason := fmt.Sprintf("%d different organizations found", len(entities)) - return checker.CreateProportionalScoreResult(name, reason, len(entities), numberCompaniesForTopScore) +func logFindings(findings []finding.Finding, dl checker.DetailLogger) { + var sb strings.Builder + for i := range findings { + f := &findings[i] + if f.Outcome == finding.OutcomePositive { + sb.WriteString(fmt.Sprintf("%s, ", f.Message)) + } + } + dl.Info(&checker.LogMessage{ + Text: sb.String(), + }) } diff --git a/checks/evaluation/contributors_test.go b/checks/evaluation/contributors_test.go index 87b2c622df7..e00db8f9957 100644 --- a/checks/evaluation/contributors_test.go +++ b/checks/evaluation/contributors_test.go @@ -16,139 +16,74 @@ package evaluation import ( "testing" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/ossf/scorecard/v4/checker" - "github.com/ossf/scorecard/v4/clients" - "github.com/ossf/scorecard/v4/utests" + "github.com/ossf/scorecard/v4/finding" + scut "github.com/ossf/scorecard/v4/utests" ) func TestContributors(t *testing.T) { t.Parallel() - testCases := []struct { + tests := []struct { name string - raw *checker.ContributorsData - expected checker.CheckResult + findings []finding.Finding + result scut.TestReturn }{ { - name: "No data", - raw: nil, - expected: checker.CheckResult{ - Version: 2, - Score: -1, - Reason: "internal error: empty raw data", - }, - }, - { - name: "No contributors", - raw: &checker.ContributorsData{ - Users: []clients.User{}, + name: "Only has two positive outcomes", + findings: []finding.Finding{ + { + Probe: "contributorsFromOrgOrCompany", + Outcome: finding.OutcomePositive, + }, + { + Probe: "contributorsFromOrgOrCompany", + Outcome: finding.OutcomePositive, + }, }, - expected: checker.CheckResult{ - Version: 2, - Score: 0, - Reason: "0 different organizations found -- score normalized to 0", + result: scut.TestReturn{ + Score: 6, + NumberOfInfo: 1, }, - }, - { - name: "Contributors with orgs and number of contributions is greater than 5 with companies", - raw: &checker.ContributorsData{ - Users: []clients.User{ - { - NumContributions: 10, - Organizations: []clients.User{ - { - Login: "org1", - }, - }, - Companies: []string{"company1"}, - }, - { - NumContributions: 10, - Organizations: []clients.User{ - { - Login: "org2", - }, - }, - }, - { - NumContributions: 10, - Organizations: []clients.User{ - { - Login: "org3", - }, - }, - }, - { - NumContributions: 1, - Organizations: []clients.User{ - { - Login: "org2", - }, - }, - }, + }, { + name: "No contributors", + findings: []finding.Finding{ + { + Probe: "contributorsFromOrgOrCompany", + Outcome: finding.OutcomeNegative, }, }, - expected: checker.CheckResult{ - Version: 2, - Score: 10, - Reason: "4 different organizations found -- score normalized to 10", + result: scut.TestReturn{ + Score: 0, }, - }, - { - name: "Contributors with orgs and number of contributions is greater than 5 without companies", - raw: &checker.ContributorsData{ - Users: []clients.User{ - { - NumContributions: 10, - Organizations: []clients.User{ - { - Login: "org1", - }, - }, - }, - { - NumContributions: 10, - Organizations: []clients.User{ - { - Login: "org2", - }, - }, - }, - { - NumContributions: 10, - Organizations: []clients.User{ - { - Login: "org3", - }, - }, - }, - { - NumContributions: 1, - Organizations: []clients.User{ - { - Login: "org10", - }, - }, - }, + }, { + name: "Has three positive outcomes", + findings: []finding.Finding{ + { + Probe: "contributorsFromOrgOrCompany", + Outcome: finding.OutcomePositive, + }, + { + Probe: "contributorsFromOrgOrCompany", + Outcome: finding.OutcomePositive, + }, + { + Probe: "contributorsFromOrgOrCompany", + Outcome: finding.OutcomePositive, }, }, - expected: checker.CheckResult{ - Version: 2, - Score: 10, - Reason: "3 different organizations found -- score normalized to 10", + result: scut.TestReturn{ + Score: checker.MaxResultScore, + NumberOfInfo: 1, }, }, } - for _, tc := range testCases { - tc := tc - t.Run(tc.name, func(t *testing.T) { + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { t.Parallel() - result := Contributors("", &utests.TestDetailLogger{}, tc.raw) - if !cmp.Equal(result, tc.expected, cmpopts.IgnoreFields(checker.CheckResult{}, "Error")) { //nolint:govet - t.Errorf("expected %v, got %v", tc.expected, cmp.Diff(tc.expected, result, cmpopts.IgnoreFields(checker.CheckResult{}, "Error"))) //nolint:lll - } + dl := scut.TestDetailLogger{} + got := Contributors(tt.name, tt.findings, &dl) + scut.ValidateTestReturn(t, tt.name, &tt.result, &got, &dl) }) } } diff --git a/checks/evaluation/dangerous_workflow.go b/checks/evaluation/dangerous_workflow.go index 9aa76fe751c..9abdeebdda2 100644 --- a/checks/evaluation/dangerous_workflow.go +++ b/checks/evaluation/dangerous_workflow.go @@ -15,59 +15,89 @@ package evaluation import ( - "fmt" - "github.com/ossf/scorecard/v4/checker" sce "github.com/ossf/scorecard/v4/errors" + "github.com/ossf/scorecard/v4/finding" + "github.com/ossf/scorecard/v4/probes/hasDangerousWorkflowScriptInjection" + "github.com/ossf/scorecard/v4/probes/hasDangerousWorkflowUntrustedCheckout" ) // DangerousWorkflow applies the score policy for the DangerousWorkflow check. -func DangerousWorkflow(name string, dl checker.DetailLogger, - r *checker.DangerousWorkflowData, +func DangerousWorkflow(name string, + findings []finding.Finding, dl checker.DetailLogger, ) checker.CheckResult { - if r == nil { - e := sce.WithMessage(sce.ErrScorecardInternal, "empty raw data") + expectedProbes := []string{ + hasDangerousWorkflowScriptInjection.Probe, + hasDangerousWorkflowUntrustedCheckout.Probe, + } + + if !finding.UniqueProbesEqual(findings, expectedProbes) { + e := sce.WithMessage(sce.ErrScorecardInternal, "invalid probe results") return checker.CreateRuntimeErrorResult(name, e) } - if r.NumWorkflows == 0 { + if !hasWorkflows(findings) { return checker.CreateInconclusiveResult(name, "no workflows found") } - for _, e := range r.Workflows { - var text string - switch e.Type { - case checker.DangerousWorkflowUntrustedCheckout: - text = fmt.Sprintf("untrusted code checkout '%v'", e.File.Snippet) - case checker.DangerousWorkflowScriptInjection: - text = fmt.Sprintf("script injection with untrusted input '%v'", e.File.Snippet) - default: - err := sce.WithMessage(sce.ErrScorecardInternal, "invalid type") - return checker.CreateRuntimeErrorResult(name, err) + // Log all detected dangerous workflows + for i := range findings { + f := &findings[i] + if f.Outcome == finding.OutcomeNegative { + if f.Location == nil { + e := sce.WithMessage(sce.ErrScorecardInternal, "invalid probe results") + return checker.CreateRuntimeErrorResult(name, e) + } + dl.Warn(&checker.LogMessage{ + Path: f.Location.Path, + Type: f.Location.Type, + Offset: *f.Location.LineStart, + Text: f.Message, + Snippet: *f.Location.Snippet, + }) } + } - dl.Warn(&checker.LogMessage{ - Path: e.File.Path, - Type: e.File.Type, - Offset: e.File.Offset, - Text: text, - Snippet: e.File.Snippet, - }) + if hasDWWithUntrustedCheckout(findings) || hasDWWithScriptInjection(findings) { + return checker.CreateMinScoreResult(name, + "dangerous workflow patterns detected") } - if len(r.Workflows) > 0 { - return createResult(name, checker.MinResultScore) + return checker.CreateMaxScoreResult(name, + "no dangerous workflow patterns detected") +} + +// Both probes return OutcomeNotApplicable, if there project has no workflows. +func hasWorkflows(findings []finding.Finding) bool { + for i := range findings { + f := &findings[i] + if f.Outcome == finding.OutcomeNotApplicable { + return false + } } - return createResult(name, checker.MaxResultScore) + return true } -// Create the result. -func createResult(name string, score int) checker.CheckResult { - if score != checker.MaxResultScore { - return checker.CreateResultWithScore(name, - "dangerous workflow patterns detected", score) +func hasDWWithUntrustedCheckout(findings []finding.Finding) bool { + for i := range findings { + f := &findings[i] + if f.Probe == hasDangerousWorkflowUntrustedCheckout.Probe { + if f.Outcome == finding.OutcomeNegative { + return true + } + } } + return false +} - return checker.CreateMaxScoreResult(name, - "no dangerous workflow patterns detected") +func hasDWWithScriptInjection(findings []finding.Finding) bool { + for i := range findings { + f := &findings[i] + if f.Probe == hasDangerousWorkflowScriptInjection.Probe { + if f.Outcome == finding.OutcomeNegative { + return true + } + } + } + return false } diff --git a/checks/evaluation/dangerous_workflow_test.go b/checks/evaluation/dangerous_workflow_test.go index ae3eef25991..847c67694c2 100644 --- a/checks/evaluation/dangerous_workflow_test.go +++ b/checks/evaluation/dangerous_workflow_test.go @@ -16,151 +16,233 @@ package evaluation import ( "testing" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/ossf/scorecard/v4/checker" + "github.com/ossf/scorecard/v4/finding" scut "github.com/ossf/scorecard/v4/utests" ) +var ( + testSnippet = "other/checkout@a81bbbf8298c0fa03ea29cdc473d45769f953675" + testLineStart = uint(123) +) + func TestDangerousWorkflow(t *testing.T) { t.Parallel() - type args struct { //nolint:govet - name string - dl checker.DetailLogger - r *checker.DangerousWorkflowData - } tests := []struct { - name string - args args - want checker.CheckResult + name string + findings []finding.Finding + result scut.TestReturn }{ { - name: "DangerousWorkflow - empty", - args: args{ - name: "DangerousWorkflow", - dl: &scut.TestDetailLogger{}, - r: &checker.DangerousWorkflowData{}, + name: "Has untrusted checkout workflow", + findings: []finding.Finding{ + { + Probe: "hasDangerousWorkflowScriptInjection", + Outcome: finding.OutcomePositive, + }, { + Probe: "hasDangerousWorkflowUntrustedCheckout", + Outcome: finding.OutcomeNegative, + Location: &finding.Location{ + Type: finding.FileTypeText, + Path: "./github/workflows/dangerous-workflow.yml", + LineStart: &testLineStart, + Snippet: &testSnippet, + }, + }, + }, + result: scut.TestReturn{ + Score: 0, + NumberOfWarn: 1, + }, + }, + { + name: "DangerousWorkflow - no workflows", + findings: []finding.Finding{ + { + Probe: "hasDangerousWorkflowScriptInjection", + Outcome: finding.OutcomeNotApplicable, + }, { + Probe: "hasDangerousWorkflowUntrustedCheckout", + Outcome: finding.OutcomeNotApplicable, + }, }, - want: checker.CheckResult{ - Score: checker.InconclusiveResultScore, - Reason: "no workflows found", - Version: 2, - Name: "DangerousWorkflow", + result: scut.TestReturn{ + Score: checker.InconclusiveResultScore, }, }, { name: "DangerousWorkflow - found workflows, none dangerous", - args: args{ - name: "DangerousWorkflow", - dl: &scut.TestDetailLogger{}, - r: &checker.DangerousWorkflowData{ - NumWorkflows: 5, + findings: []finding.Finding{ + { + Probe: "hasDangerousWorkflowScriptInjection", + Outcome: finding.OutcomePositive, + }, { + Probe: "hasDangerousWorkflowUntrustedCheckout", + Outcome: finding.OutcomePositive, }, }, - want: checker.CheckResult{ - Score: checker.MaxResultScore, - Reason: "no dangerous workflow patterns detected", - Version: 2, - Name: "DangerousWorkflow", + result: scut.TestReturn{ + Score: 10, }, }, { name: "DangerousWorkflow - Dangerous workflow detected", - args: args{ - name: "DangerousWorkflow", - dl: &scut.TestDetailLogger{}, - r: &checker.DangerousWorkflowData{ - NumWorkflows: 1, - Workflows: []checker.DangerousWorkflow{ - { - Type: checker.DangerousWorkflowUntrustedCheckout, - File: checker.File{ - Path: "a", - Snippet: "a", - Offset: 0, - EndOffset: 0, - Type: 0, - }, - }, + findings: []finding.Finding{ + { + Probe: "hasDangerousWorkflowScriptInjection", + Outcome: finding.OutcomePositive, + }, { + Probe: "hasDangerousWorkflowUntrustedCheckout", + Outcome: finding.OutcomeNegative, + Location: &finding.Location{ + Type: finding.FileTypeText, + Path: "./github/workflows/dangerous-workflow.yml", + LineStart: &testLineStart, + Snippet: &testSnippet, }, }, }, - want: checker.CheckResult{ - Score: 0, - Reason: "dangerous workflow patterns detected", - Version: 2, - Name: "DangerousWorkflow", + result: scut.TestReturn{ + Score: 0, + NumberOfWarn: 1, }, }, { name: "DangerousWorkflow - Script injection detected", - args: args{ - name: "DangerousWorkflow", - dl: &scut.TestDetailLogger{}, - r: &checker.DangerousWorkflowData{ - NumWorkflows: 1, - Workflows: []checker.DangerousWorkflow{ - { - Type: checker.DangerousWorkflowScriptInjection, - File: checker.File{ - Path: "a", - Snippet: "a", - Offset: 0, - EndOffset: 0, - Type: 0, - }, - }, + findings: []finding.Finding{ + { + Probe: "hasDangerousWorkflowScriptInjection", + Outcome: finding.OutcomeNegative, + Location: &finding.Location{ + Type: finding.FileTypeText, + Path: "./github/workflows/dangerous-workflow.yml", + LineStart: &testLineStart, + Snippet: &testSnippet, }, + }, { + Probe: "hasDangerousWorkflowUntrustedCheckout", + Outcome: finding.OutcomePositive, }, }, - want: checker.CheckResult{ - Score: 0, - Reason: "dangerous workflow patterns detected", - Version: 2, - Name: "DangerousWorkflow", + result: scut.TestReturn{ + Score: 0, + NumberOfWarn: 1, }, }, { - name: "DangerousWorkflow - unknown type", - args: args{ - name: "DangerousWorkflow", - dl: &scut.TestDetailLogger{}, - r: &checker.DangerousWorkflowData{ - NumWorkflows: 1, - Workflows: []checker.DangerousWorkflow{ - { - Type: "foobar", - File: checker.File{ - Path: "a", - Snippet: "a", - Offset: 0, - EndOffset: 0, - Type: 0, - }, - }, + name: "DangerousWorkflow - 3 script injection workflows detected", + findings: []finding.Finding{ + { + Probe: "hasDangerousWorkflowScriptInjection", + Outcome: finding.OutcomeNegative, + Location: &finding.Location{ + Type: finding.FileTypeText, + Path: "./github/workflows/dangerous-workflow.yml", + LineStart: &testLineStart, + Snippet: &testSnippet, }, + }, { + Probe: "hasDangerousWorkflowScriptInjection", + Outcome: finding.OutcomeNegative, + Location: &finding.Location{ + Type: finding.FileTypeText, + Path: "./github/workflows/dangerous-workflow2.yml", + LineStart: &testLineStart, + Snippet: &testSnippet, + }, + }, { + Probe: "hasDangerousWorkflowUntrustedCheckout", + Outcome: finding.OutcomePositive, }, }, - want: checker.CheckResult{ - Score: -1, - Reason: "internal error: invalid type", - Version: 2, - Name: "DangerousWorkflow", + result: scut.TestReturn{ + Score: 0, + NumberOfWarn: 2, }, }, { - name: "DangerousWorkflow - nil data", - args: args{ - name: "DangerousWorkflow", - dl: &scut.TestDetailLogger{}, - r: nil, + name: "DangerousWorkflow - 8 script injection workflows detected", + findings: []finding.Finding{ + { + Probe: "hasDangerousWorkflowScriptInjection", + Outcome: finding.OutcomeNegative, + Location: &finding.Location{ + Type: finding.FileTypeText, + Path: "./github/workflows/dangerous-workflow.yml", + LineStart: &testLineStart, + Snippet: &testSnippet, + }, + }, { + Probe: "hasDangerousWorkflowScriptInjection", + Outcome: finding.OutcomeNegative, + Location: &finding.Location{ + Type: finding.FileTypeText, + Path: "./github/workflows/dangerous-workflow2.yml", + LineStart: &testLineStart, + Snippet: &testSnippet, + }, + }, { + Probe: "hasDangerousWorkflowScriptInjection", + Outcome: finding.OutcomeNegative, + Location: &finding.Location{ + Type: finding.FileTypeText, + Path: "./github/workflows/dangerous-workflow3.yml", + LineStart: &testLineStart, + Snippet: &testSnippet, + }, + }, { + Probe: "hasDangerousWorkflowScriptInjection", + Outcome: finding.OutcomeNegative, + Location: &finding.Location{ + Type: finding.FileTypeText, + Path: "./github/workflows/dangerous-workflow4.yml", + LineStart: &testLineStart, + Snippet: &testSnippet, + }, + }, { + Probe: "hasDangerousWorkflowScriptInjection", + Outcome: finding.OutcomeNegative, + Location: &finding.Location{ + Type: finding.FileTypeText, + Path: "./github/workflows/dangerous-workflow5.yml", + LineStart: &testLineStart, + Snippet: &testSnippet, + }, + }, { + Probe: "hasDangerousWorkflowScriptInjection", + Outcome: finding.OutcomeNegative, + Location: &finding.Location{ + Type: finding.FileTypeText, + Path: "./github/workflows/dangerous-workflow6.yml", + LineStart: &testLineStart, + Snippet: &testSnippet, + }, + }, { + Probe: "hasDangerousWorkflowScriptInjection", + Outcome: finding.OutcomeNegative, + Location: &finding.Location{ + Type: finding.FileTypeText, + Path: "./github/workflows/dangerous-workflow7.yml", + LineStart: &testLineStart, + Snippet: &testSnippet, + }, + }, { + Probe: "hasDangerousWorkflowScriptInjection", + Outcome: finding.OutcomeNegative, + Location: &finding.Location{ + Type: finding.FileTypeText, + Path: "./github/workflows/dangerous-workflow8.yml", + LineStart: &testLineStart, + Snippet: &testSnippet, + }, + }, { + Probe: "hasDangerousWorkflowUntrustedCheckout", + Outcome: finding.OutcomePositive, + }, }, - want: checker.CheckResult{ - Score: -1, - Reason: "internal error: empty raw data", - Name: "DangerousWorkflow", - Version: 2, + result: scut.TestReturn{ + Score: 0, + NumberOfWarn: 8, }, }, } @@ -168,9 +250,9 @@ func TestDangerousWorkflow(t *testing.T) { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() - if got := DangerousWorkflow(tt.args.name, tt.args.dl, tt.args.r); !cmp.Equal(got, tt.want, cmpopts.IgnoreFields(checker.CheckResult{}, "Error")) { //nolint:lll - t.Errorf("DangerousWorkflow() = %v, want %v", got, cmp.Diff(got, tt.want, cmpopts.IgnoreFields(checker.CheckResult{}, "Error"))) //nolint:lll - } + dl := scut.TestDetailLogger{} + got := DangerousWorkflow(tt.name, tt.findings, &dl) + scut.ValidateTestReturn(t, tt.name, &tt.result, &got, &dl) }) } } diff --git a/checks/evaluation/dependency_update_tool.go b/checks/evaluation/dependency_update_tool.go index 239167a4e3b..903252fcde2 100644 --- a/checks/evaluation/dependency_update_tool.go +++ b/checks/evaluation/dependency_update_tool.go @@ -16,19 +16,38 @@ package evaluation import ( "github.com/ossf/scorecard/v4/checker" + sce "github.com/ossf/scorecard/v4/errors" "github.com/ossf/scorecard/v4/finding" + "github.com/ossf/scorecard/v4/probes/toolDependabotInstalled" + "github.com/ossf/scorecard/v4/probes/toolPyUpInstalled" + "github.com/ossf/scorecard/v4/probes/toolRenovateInstalled" ) -// DependencyUpdateTool applies the score policy for the Dependency-Update-Tool check. +// DependencyUpdateTool applies the score policy and logs the details +// for the Dependency-Update-Tool check. func DependencyUpdateTool(name string, - findings []finding.Finding, + findings []finding.Finding, dl checker.DetailLogger, ) checker.CheckResult { + expectedProbes := []string{ + toolDependabotInstalled.Probe, + toolPyUpInstalled.Probe, + toolRenovateInstalled.Probe, + } + if !finding.UniqueProbesEqual(findings, expectedProbes) { + e := sce.WithMessage(sce.ErrScorecardInternal, "invalid probe results") + return checker.CreateRuntimeErrorResult(name, e) + } + for i := range findings { f := &findings[i] if f.Outcome == finding.OutcomePositive { + // Log all findings except the negative ones. + checker.LogFindings(nonNegativeFindings(findings), dl) return checker.CreateMaxScoreResult(name, "update tool detected") } } + // Log all findings. + checker.LogFindings(findings, dl) return checker.CreateMinScoreResult(name, "no update tool detected") } diff --git a/checks/evaluation/dependency_update_tool_test.go b/checks/evaluation/dependency_update_tool_test.go index 62667d6c3ca..bda157c4eff 100644 --- a/checks/evaluation/dependency_update_tool_test.go +++ b/checks/evaluation/dependency_update_tool_test.go @@ -18,19 +18,17 @@ import ( "testing" "github.com/ossf/scorecard/v4/checker" + sce "github.com/ossf/scorecard/v4/errors" "github.com/ossf/scorecard/v4/finding" scut "github.com/ossf/scorecard/v4/utests" ) func TestDependencyUpdateTool(t *testing.T) { t.Parallel() - //nolint tests := []struct { name string findings []finding.Finding - err bool - want checker.CheckResult - expected scut.TestReturn + result scut.TestReturn }{ { name: "dependabot", @@ -39,49 +37,102 @@ func TestDependencyUpdateTool(t *testing.T) { Probe: "toolDependabotInstalled", Outcome: finding.OutcomePositive, }, + { + Probe: "toolPyUpInstalled", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "toolRenovateInstalled", + Outcome: finding.OutcomeNegative, + }, }, - want: checker.CheckResult{ - Score: 10, + result: scut.TestReturn{ + Score: checker.MaxResultScore, + NumberOfInfo: 1, }, }, { name: "renovate", findings: []finding.Finding{ + { + Probe: "toolDependabotInstalled", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "toolPyUpInstalled", + Outcome: finding.OutcomeNegative, + }, { Probe: "toolRenovateInstalled", Outcome: finding.OutcomePositive, }, }, - want: checker.CheckResult{ - Score: 10, + result: scut.TestReturn{ + Score: checker.MaxResultScore, + NumberOfInfo: 1, }, }, { name: "pyup", findings: []finding.Finding{ + { + Probe: "toolDependabotInstalled", + Outcome: finding.OutcomeNegative, + }, { Probe: "toolPyUpInstalled", Outcome: finding.OutcomePositive, }, + { + Probe: "toolRenovateInstalled", + Outcome: finding.OutcomeNegative, + }, }, - want: checker.CheckResult{ - Score: 10, + result: scut.TestReturn{ + Score: checker.MaxResultScore, + NumberOfInfo: 1, }, }, { - name: "sonatype", + name: "none", findings: []finding.Finding{ { - Probe: "toolSonatypeInstalled", - Outcome: finding.OutcomePositive, + Probe: "toolDependabotInstalled", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "toolRenovateInstalled", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "toolPyUpInstalled", + Outcome: finding.OutcomeNegative, }, }, - want: checker.CheckResult{ - Score: 10, + result: scut.TestReturn{ + Score: checker.MinResultScore, + NumberOfWarn: 3, }, }, { - name: "none", + name: "missing probes renovate", + findings: []finding.Finding{ + { + Probe: "toolDependabotInstalled", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "toolPyUpInstalled", + Outcome: finding.OutcomeNegative, + }, + }, + result: scut.TestReturn{ + Score: checker.InconclusiveResultScore, + Error: sce.ErrScorecardInternal, + }, + }, + { + name: "invalid probe name", findings: []finding.Finding{ { Probe: "toolDependabotInstalled", @@ -96,19 +147,13 @@ func TestDependencyUpdateTool(t *testing.T) { Outcome: finding.OutcomeNegative, }, { - Probe: "toolSonatypeInstalled", + Probe: "toolInvalidProbeName", Outcome: finding.OutcomeNegative, }, }, - want: checker.CheckResult{ - Score: 0, - }, - }, - { - name: "empty tool list", - want: checker.CheckResult{ - Score: 0, - Error: nil, + result: scut.TestReturn{ + Score: checker.InconclusiveResultScore, + Error: sce.ErrScorecardInternal, }, }, } @@ -117,14 +162,9 @@ func TestDependencyUpdateTool(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - got := DependencyUpdateTool(tt.name, tt.findings) - if tt.want.Score != got.Score { - t.Errorf("DependencyUpdateTool() got Score = %v, want %v for %v", got.Score, tt.want.Score, tt.name) - } - if tt.err && got.Error == nil { - t.Errorf("DependencyUpdateTool() error = %v, want %v for %v", got.Error, tt.want.Error, tt.name) - return - } + dl := scut.TestDetailLogger{} + got := DependencyUpdateTool(tt.name, tt.findings, &dl) + scut.ValidateTestReturn(t, tt.name, &tt.result, &got, &dl) }) } } diff --git a/checks/evaluation/finding.go b/checks/evaluation/finding.go new file mode 100644 index 00000000000..11d1e6f9fc4 --- /dev/null +++ b/checks/evaluation/finding.go @@ -0,0 +1,42 @@ +// Copyright 2023 OpenSSF Scorecard Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package evaluation + +import ( + "github.com/ossf/scorecard/v4/finding" +) + +func nonNegativeFindings(findings []finding.Finding) []finding.Finding { + var ff []finding.Finding + for i := range findings { + f := &findings[i] + if f.Outcome == finding.OutcomeNegative { + continue + } + ff = append(ff, *f) + } + return ff +} + +func negativeFindings(findings []finding.Finding) []finding.Finding { + var ff []finding.Finding + for i := range findings { + f := &findings[i] + if f.Outcome == finding.OutcomeNegative { + ff = append(ff, *f) + } + } + return ff +} diff --git a/checks/evaluation/fuzzing.go b/checks/evaluation/fuzzing.go index 90695ff73d4..4b8308eaa23 100644 --- a/checks/evaluation/fuzzing.go +++ b/checks/evaluation/fuzzing.go @@ -15,40 +15,60 @@ package evaluation import ( - "fmt" - "github.com/ossf/scorecard/v4/checker" sce "github.com/ossf/scorecard/v4/errors" + "github.com/ossf/scorecard/v4/finding" + "github.com/ossf/scorecard/v4/probes/fuzzedWithCLibFuzzer" + "github.com/ossf/scorecard/v4/probes/fuzzedWithClusterFuzzLite" + "github.com/ossf/scorecard/v4/probes/fuzzedWithCppLibFuzzer" + "github.com/ossf/scorecard/v4/probes/fuzzedWithGoNative" + "github.com/ossf/scorecard/v4/probes/fuzzedWithJavaJazzerFuzzer" + "github.com/ossf/scorecard/v4/probes/fuzzedWithOSSFuzz" + "github.com/ossf/scorecard/v4/probes/fuzzedWithPropertyBasedHaskell" + "github.com/ossf/scorecard/v4/probes/fuzzedWithPropertyBasedJavascript" + "github.com/ossf/scorecard/v4/probes/fuzzedWithPropertyBasedTypescript" + "github.com/ossf/scorecard/v4/probes/fuzzedWithPythonAtheris" + "github.com/ossf/scorecard/v4/probes/fuzzedWithRustCargofuzz" + "github.com/ossf/scorecard/v4/probes/fuzzedWithSwiftLibFuzzer" ) // Fuzzing applies the score policy for the Fuzzing check. -func Fuzzing(name string, dl checker.DetailLogger, - r *checker.FuzzingData, +func Fuzzing(name string, + findings []finding.Finding, dl checker.DetailLogger, ) checker.CheckResult { - if r == nil { - e := sce.WithMessage(sce.ErrScorecardInternal, "empty raw data") - return checker.CreateRuntimeErrorResult(name, e) + // We have 7 unique probes, each should have a finding. + expectedProbes := []string{ + fuzzedWithClusterFuzzLite.Probe, + fuzzedWithGoNative.Probe, + fuzzedWithPythonAtheris.Probe, + fuzzedWithCLibFuzzer.Probe, + fuzzedWithCppLibFuzzer.Probe, + fuzzedWithRustCargofuzz.Probe, + fuzzedWithSwiftLibFuzzer.Probe, + fuzzedWithJavaJazzerFuzzer.Probe, + fuzzedWithOSSFuzz.Probe, + fuzzedWithPropertyBasedHaskell.Probe, + fuzzedWithPropertyBasedJavascript.Probe, + fuzzedWithPropertyBasedTypescript.Probe, } + // TODO: other packages to consider: + // - github.com/google/fuzztest - if len(r.Fuzzers) == 0 { - return checker.CreateMinScoreResult(name, "project is not fuzzed") + if !finding.UniqueProbesEqual(findings, expectedProbes) { + e := sce.WithMessage(sce.ErrScorecardInternal, "invalid probe results") + return checker.CreateRuntimeErrorResult(name, e) } - fuzzers := []string{} - for i := range r.Fuzzers { - fuzzer := r.Fuzzers[i] - for _, f := range fuzzer.Files { - msg := checker.LogMessage{ - Path: f.Path, - Type: f.Type, - Offset: f.Offset, - } - if f.Snippet != "" { - msg.Text = f.Snippet - } - dl.Info(&msg) + + // Compute the score. + for i := range findings { + f := &findings[i] + if f.Outcome == finding.OutcomePositive { + // Log all findings except the negative ones. + checker.LogFindings(nonNegativeFindings(findings), dl) + return checker.CreateMaxScoreResult(name, "project is fuzzed") } - fuzzers = append(fuzzers, fuzzer.Name) } - return checker.CreateMaxScoreResult(name, - fmt.Sprintf("project is fuzzed with %v", fuzzers)) + // Log all findings. + checker.LogFindings(findings, dl) + return checker.CreateMinScoreResult(name, "project is not fuzzed") } diff --git a/checks/evaluation/fuzzing_test.go b/checks/evaluation/fuzzing_test.go index 89311490586..042b18b24a6 100644 --- a/checks/evaluation/fuzzing_test.go +++ b/checks/evaluation/fuzzing_test.go @@ -16,79 +16,198 @@ package evaluation import ( "testing" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/ossf/scorecard/v4/checker" + sce "github.com/ossf/scorecard/v4/errors" + "github.com/ossf/scorecard/v4/finding" scut "github.com/ossf/scorecard/v4/utests" ) func TestFuzzing(t *testing.T) { t.Parallel() - type args struct { //nolint - name string - dl checker.DetailLogger - r *checker.FuzzingData - } tests := []struct { - name string - args args - want checker.CheckResult + name string + findings []finding.Finding + result scut.TestReturn }{ { name: "Fuzzing - no fuzzing", - args: args{ - name: "Fuzzing", - dl: &scut.TestDetailLogger{}, - r: &checker.FuzzingData{}, + findings: []finding.Finding{ + { + Probe: "fuzzedWithClusterFuzzLite", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "fuzzedWithGoNative", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "fuzzedWithPythonAtheris", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "fuzzedWithCLibFuzzer", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "fuzzedWithCppLibFuzzer", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "fuzzedWithRustCargofuzz", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "fuzzedWithSwiftLibFuzzer", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "fuzzedWithJavaJazzerFuzzer", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "fuzzedWithOSSFuzz", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "fuzzedWithPropertyBasedHaskell", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "fuzzedWithPropertyBasedJavascript", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "fuzzedWithPropertyBasedTypescript", + Outcome: finding.OutcomeNegative, + }, + }, + result: scut.TestReturn{ + Score: checker.MinResultScore, + NumberOfWarn: 12, + }, + }, + { + name: "Fuzzing - fuzzing GoNative", + findings: []finding.Finding{ + { + Probe: "fuzzedWithClusterFuzzLite", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "fuzzedWithGoNative", + Outcome: finding.OutcomePositive, + }, + { + Probe: "fuzzedWithPythonAtheris", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "fuzzedWithCLibFuzzer", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "fuzzedWithCppLibFuzzer", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "fuzzedWithRustCargofuzz", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "fuzzedWithSwiftLibFuzzer", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "fuzzedWithJavaJazzerFuzzer", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "fuzzedWithOSSFuzz", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "fuzzedWithPropertyBasedHaskell", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "fuzzedWithPropertyBasedJavascript", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "fuzzedWithPropertyBasedTypescript", + Outcome: finding.OutcomeNegative, + }, }, - want: checker.CheckResult{ - Score: 0, - Name: "Fuzzing", - Version: 2, - Reason: "project is not fuzzed", + result: scut.TestReturn{ + Score: checker.MaxResultScore, + NumberOfInfo: 1, }, }, + { - name: "Fuzzing - fuzzing", - args: args{ - name: "Fuzzing", - dl: &scut.TestDetailLogger{}, - r: &checker.FuzzingData{ - Fuzzers: []checker.Tool{ - { - Name: "Fuzzing", - Files: []checker.File{ - { - Path: "Fuzzing", - Type: 0, - Offset: 1, - Snippet: "Fuzzing", - }, - }, - }, - }, + name: "Fuzzing - fuzzing missing GoNative finding", + findings: []finding.Finding{ + { + Probe: "fuzzedWithClusterFuzzLite", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "fuzzedWithOSSFuzz", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "fuzzedWithPropertyBasedHaskell", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "fuzzedWithPropertyBasedJavascript", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "fuzzedWithPropertyBasedTypescript", + Outcome: finding.OutcomeNegative, }, }, - want: checker.CheckResult{ - Score: 10, - Name: "Fuzzing", - Version: 2, - Reason: "project is fuzzed with [Fuzzing]", + result: scut.TestReturn{ + Score: checker.InconclusiveResultScore, + Error: sce.ErrScorecardInternal, }, }, { - name: "Fuzzing - fuzzing data nil", - args: args{ - name: "Fuzzing", - dl: &scut.TestDetailLogger{}, - r: nil, + name: "Fuzzing - fuzzing invalid probe name", + findings: []finding.Finding{ + { + Probe: "fuzzedWithClusterFuzzLite", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "fuzzedWithGoNative", + Outcome: finding.OutcomePositive, + }, + { + Probe: "fuzzedWithOSSFuzz", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "fuzzedWithPropertyBasedHaskell", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "fuzzedWithPropertyBasedJavascript", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "fuzzedWithPropertyBasedTypescript", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "fuzzedWithInvalidProbeName", + Outcome: finding.OutcomePositive, + }, }, - want: checker.CheckResult{ - Score: -1, - Name: "Fuzzing", - Version: 2, - Reason: "internal error: empty raw data", + result: scut.TestReturn{ + Score: checker.InconclusiveResultScore, + Error: sce.ErrScorecardInternal, }, }, } @@ -96,9 +215,9 @@ func TestFuzzing(t *testing.T) { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() - if got := Fuzzing(tt.args.name, tt.args.dl, tt.args.r); !cmp.Equal(got, tt.want, cmpopts.IgnoreFields(checker.CheckResult{}, "Error")) { //nolint:lll - t.Errorf("Fuzzing() = %v, want %v", got, cmp.Diff(got, tt.want, cmpopts.IgnoreFields(checker.CheckResult{}, "Error"))) //nolint:lll - } + dl := scut.TestDetailLogger{} + got := Fuzzing(tt.name, tt.findings, &dl) + scut.ValidateTestReturn(t, tt.name, &tt.result, &got, &dl) }) } } diff --git a/checks/evaluation/license.go b/checks/evaluation/license.go index a0300c69dff..0401e5ecff2 100644 --- a/checks/evaluation/license.go +++ b/checks/evaluation/license.go @@ -18,78 +18,92 @@ import ( "github.com/ossf/scorecard/v4/checker" sce "github.com/ossf/scorecard/v4/errors" "github.com/ossf/scorecard/v4/finding" + "github.com/ossf/scorecard/v4/probes/hasFSFOrOSIApprovedLicense" + "github.com/ossf/scorecard/v4/probes/hasLicenseFile" + "github.com/ossf/scorecard/v4/probes/hasLicenseFileAtTopDir" ) -func scoreLicenseCriteria(f *checker.LicenseFile, +// License applies the score policy for the License check. +func License(name string, + findings []finding.Finding, dl checker.DetailLogger, -) int { - var score int - msg := checker.LogMessage{ - Path: "", - Type: finding.FileTypeNone, - Text: "", - Offset: 1, - } - msg.Path = f.File.Path - msg.Type = finding.FileTypeSource - // #1 a license file was found. - score += 6 - - // #2 the licence was found at the top-level or LICENSE/ folder. - switch f.LicenseInformation.Attribution { - case checker.LicenseAttributionTypeAPI, checker.LicenseAttributionTypeHeuristics: - // both repoAPI and scorecard (not using the API) follow checks.md - // for a file to be found it must have been in the correct location - // award location points. - score += 3 - msg.Text = "License file found in expected location" - dl.Info(&msg) - // for repo attribution prepare warning if not an recognized license" - msg.Text = "Any licence detected not an FSF or OSI recognized license" - case checker.LicenseAttributionTypeOther: - // TODO ascertain location found - score += 0 - msg.Text = "License file found in unexpected location" - dl.Warn(&msg) - // for non repo attribution not the license detection is not supported - msg.Text = "Detecting license content not supported" - default: - } - - // #3 is the license either an FSF or OSI recognized/approved license - if f.LicenseInformation.Approved { - score += 1 - msg.Text = "FSF or OSI recognized license" - dl.Info(&msg) - } else { - // message text for this condition set above - dl.Warn(&msg) +) checker.CheckResult { + // We have 3 unique probes, each should have a finding. + expectedProbes := []string{ + hasLicenseFile.Probe, + hasFSFOrOSIApprovedLicense.Probe, + hasLicenseFileAtTopDir.Probe, } - return score -} -// License applies the score policy for the License check. -func License(name string, dl checker.DetailLogger, - r *checker.LicenseData, -) checker.CheckResult { - var score int - if r == nil { - e := sce.WithMessage(sce.ErrScorecardInternal, "empty raw data") + if !finding.UniqueProbesEqual(findings, expectedProbes) { + e := sce.WithMessage(sce.ErrScorecardInternal, "invalid probe results") return checker.CreateRuntimeErrorResult(name, e) } - // Apply the policy evaluation. - if r.LicenseFiles == nil || len(r.LicenseFiles) == 0 { - return checker.CreateMinScoreResult(name, "license file not detected") + // Compute the score. + score := 0 + m := make(map[string]bool) + for i := range findings { + f := &findings[i] + switch f.Outcome { + case finding.OutcomeNotApplicable: + dl.Info(&checker.LogMessage{ + Type: finding.FileTypeSource, + Offset: 1, + Text: f.Message, + }) + case finding.OutcomePositive: + switch f.Probe { + case hasFSFOrOSIApprovedLicense.Probe: + dl.Info(&checker.LogMessage{ + Type: finding.FileTypeSource, + Offset: 1, + Path: f.Message, + Text: "FSF or OSI recognized license", + }) + score += scoreProbeOnce(f.Probe, m, 1) + case hasLicenseFileAtTopDir.Probe: + dl.Info(&checker.LogMessage{ + Type: finding.FileTypeSource, + Offset: 1, + Path: f.Message, + Text: "License file found in expected location", + }) + score += scoreProbeOnce(f.Probe, m, 3) + case hasLicenseFile.Probe: + score += scoreProbeOnce(f.Probe, m, 6) + default: + e := sce.WithMessage(sce.ErrScorecardInternal, "unknown probe results") + return checker.CreateRuntimeErrorResult(name, e) + } + case finding.OutcomeNegative: + switch f.Probe { + case hasLicenseFileAtTopDir.Probe: + dl.Warn(&checker.LogMessage{ + Type: finding.FileTypeSource, + Offset: 1, + Path: f.Message, + Text: "License file found in unexpected location", + }) + case hasFSFOrOSIApprovedLicense.Probe: + dl.Warn(&checker.LogMessage{ + Type: finding.FileTypeSource, + Offset: 1, + Path: "", + Text: f.Message, + }) + } + default: + continue // for linting + } } - - // TODO: although this a loop, the raw checks will only return one licence file - // when more than one license file can be aggregated into a composite - // score, that logic can be comprehended here. - score = 0 - for idx := range r.LicenseFiles { - score = scoreLicenseCriteria(&r.LicenseFiles[idx], dl) + _, defined := m[hasLicenseFile.Probe] + if !defined { + if score > 0 { + e := sce.WithMessage(sce.ErrScorecardInternal, "score calculation problem") + return checker.CreateRuntimeErrorResult(name, e) + } + return checker.CreateMinScoreResult(name, "license file not detected") } - return checker.CreateResultWithScore(name, "license file detected", score) } diff --git a/checks/evaluation/license_test.go b/checks/evaluation/license_test.go index 3ea4771582a..66857014f60 100644 --- a/checks/evaluation/license_test.go +++ b/checks/evaluation/license_test.go @@ -16,147 +16,136 @@ package evaluation import ( "testing" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/ossf/scorecard/v4/checker" + sce "github.com/ossf/scorecard/v4/errors" + "github.com/ossf/scorecard/v4/finding" scut "github.com/ossf/scorecard/v4/utests" ) -func Test_scoreLicenseCriteria(t *testing.T) { +func TestLicense(t *testing.T) { t.Parallel() - type args struct { - f *checker.LicenseFile - dl checker.DetailLogger - } - tests := []struct { //nolint:govet - name string - args args - want int + tests := []struct { + name string + findings []finding.Finding + result scut.TestReturn }{ { - name: "License Attribution Type API", - args: args{ - f: &checker.LicenseFile{ - LicenseInformation: checker.License{ - Attribution: checker.LicenseAttributionTypeAPI, - Approved: true, - }, - }, - dl: &scut.TestDetailLogger{}, + name: "Positive outcome = Max Score", + findings: []finding.Finding{ + { + Probe: "hasLicenseFile", + Outcome: finding.OutcomePositive, + }, + { + Probe: "hasFSFOrOSIApprovedLicense", + Outcome: finding.OutcomePositive, + }, + { + Probe: "hasLicenseFileAtTopDir", + Outcome: finding.OutcomePositive, + }, }, - want: 10, - }, - { - name: "License Attribution Type Heuristics", - args: args{ - f: &checker.LicenseFile{ - LicenseInformation: checker.License{ - Attribution: checker.LicenseAttributionTypeHeuristics, - }, - }, - dl: &scut.TestDetailLogger{}, + result: scut.TestReturn{ + Score: checker.MaxResultScore, + NumberOfInfo: 2, }, - want: 9, - }, - { - name: "License Attribution Type Other", - args: args{ - f: &checker.LicenseFile{ - LicenseInformation: checker.License{ - Attribution: checker.LicenseAttributionTypeOther, - }, - }, - dl: &scut.TestDetailLogger{}, + }, { + name: "Negative outcomes from all probes = Min score", + findings: []finding.Finding{ + { + Probe: "hasLicenseFile", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "hasFSFOrOSIApprovedLicense", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "hasLicenseFileAtTopDir", + Outcome: finding.OutcomeNegative, + }, }, - want: 6, - }, - { - name: "License Attribution Type Unknown", - args: args{ - f: &checker.LicenseFile{ - LicenseInformation: checker.License{ - Attribution: "Unknown", - }, - }, - dl: &scut.TestDetailLogger{}, + result: scut.TestReturn{ + Score: checker.MinResultScore, + NumberOfWarn: 2, }, - want: 6, - }, - } - for _, tt := range tests { - tt := tt // Parallel testing scoping hack. - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - if got := scoreLicenseCriteria(tt.args.f, tt.args.dl); got != tt.want { - t.Errorf("scoreLicenseCriteria() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestLicense(t *testing.T) { - t.Parallel() - type args struct { //nolint:govet - name string - dl checker.DetailLogger - r *checker.LicenseData - } - tests := []struct { - name string - args args - want checker.CheckResult - }{ - { - name: "No License", - args: args{ - name: "No License", - dl: &scut.TestDetailLogger{}, + }, { + name: "Has license file but not a top level or in OSI/FSF format", + findings: []finding.Finding{ + { + Probe: "hasLicenseFile", + Outcome: finding.OutcomePositive, + }, + { + Probe: "hasFSFOrOSIApprovedLicense", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "hasLicenseFileAtTopDir", + Outcome: finding.OutcomeNegative, + }, }, - want: checker.CheckResult{ - Score: -1, - Version: 2, - Reason: "internal error: empty raw data", - Name: "No License", + result: scut.TestReturn{ + Score: 6, + NumberOfWarn: 2, }, - }, - { - name: "No License Files", - args: args{ - name: "No License Files", - dl: &scut.TestDetailLogger{}, - r: &checker.LicenseData{ - LicenseFiles: []checker.LicenseFile{}, + }, { + name: "Findings missing a probe = Error", + findings: []finding.Finding{ + { + Probe: "hasLicenseFile", + Outcome: finding.OutcomePositive, + }, + { + Probe: "hasFSFOrOSIApprovedLicense", + Outcome: finding.OutcomeNegative, }, }, - want: checker.CheckResult{ - Score: 0, - Version: 2, - Reason: "license file not detected", - Name: "No License Files", + result: scut.TestReturn{ + Score: -1, + Error: sce.ErrScorecardInternal, }, - }, - { - name: "License Files Detected", - args: args{ - name: "License Files Detected", - dl: &scut.TestDetailLogger{}, - r: &checker.LicenseData{ - LicenseFiles: []checker.LicenseFile{ - { - LicenseInformation: checker.License{ - Attribution: checker.LicenseAttributionTypeAPI, - Approved: true, - }, - }, - }, + }, { + name: "Has a license at top dir but it is not OSI/FSF approved", + findings: []finding.Finding{ + { + Probe: "hasLicenseFile", + Outcome: finding.OutcomePositive, + }, + { + Probe: "hasFSFOrOSIApprovedLicense", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "hasLicenseFileAtTopDir", + Outcome: finding.OutcomePositive, + }, + }, + result: scut.TestReturn{ + Score: 9, + NumberOfInfo: 1, + NumberOfWarn: 1, + }, + }, { + name: "Has an OSI/FSF approved license but not at top level dir", + findings: []finding.Finding{ + { + Probe: "hasLicenseFile", + Outcome: finding.OutcomePositive, + }, + { + Probe: "hasFSFOrOSIApprovedLicense", + Outcome: finding.OutcomePositive, + }, + { + Probe: "hasLicenseFileAtTopDir", + Outcome: finding.OutcomeNegative, }, }, - want: checker.CheckResult{ - Score: 10, - Version: 2, - Reason: "license file detected", - Name: "License Files Detected", + result: scut.TestReturn{ + Score: 7, + NumberOfInfo: 1, + NumberOfWarn: 1, }, }, } @@ -164,9 +153,9 @@ func TestLicense(t *testing.T) { tt := tt // Parallel testing scoping hack. t.Run(tt.name, func(t *testing.T) { t.Parallel() - if got := License(tt.args.name, tt.args.dl, tt.args.r); !cmp.Equal(got, tt.want, cmpopts.IgnoreFields(checker.CheckResult{}, "Error")) { //nolint:lll - t.Errorf("License() = %v, want %v", got, cmp.Diff(got, tt.want, cmpopts.IgnoreFields(checker.CheckResult{}, "Error"))) //nolint:lll - } + dl := scut.TestDetailLogger{} + got := License(tt.name, tt.findings, &dl) + scut.ValidateTestReturn(t, tt.name, &tt.result, &got, &dl) }) } } diff --git a/checks/evaluation/maintained.go b/checks/evaluation/maintained.go index a4fd2bfc66d..3be51d577cb 100644 --- a/checks/evaluation/maintained.go +++ b/checks/evaluation/maintained.go @@ -16,11 +16,14 @@ package evaluation import ( "fmt" - "time" "github.com/ossf/scorecard/v4/checker" - "github.com/ossf/scorecard/v4/clients" sce "github.com/ossf/scorecard/v4/errors" + "github.com/ossf/scorecard/v4/finding" + "github.com/ossf/scorecard/v4/probes/hasRecentCommits" + "github.com/ossf/scorecard/v4/probes/issueActivityByProjectMember" + "github.com/ossf/scorecard/v4/probes/notArchived" + "github.com/ossf/scorecard/v4/probes/notCreatedRecently" ) const ( @@ -30,68 +33,67 @@ const ( ) // Maintained applies the score policy for the Maintained check. -func Maintained(name string, dl checker.DetailLogger, r *checker.MaintainedData) checker.CheckResult { - if r == nil { - e := sce.WithMessage(sce.ErrScorecardInternal, "empty raw data") - return checker.CreateRuntimeErrorResult(name, e) +func Maintained(name string, + findings []finding.Finding, dl checker.DetailLogger, +) checker.CheckResult { + // We have 4 unique probes, each should have a finding. + expectedProbes := []string{ + notArchived.Probe, + issueActivityByProjectMember.Probe, + hasRecentCommits.Probe, + notCreatedRecently.Probe, } - if r.ArchivedStatus.Status { - return checker.CreateMinScoreResult(name, "repo is marked as archived") + if !finding.UniqueProbesEqual(findings, expectedProbes) { + e := sce.WithMessage(sce.ErrScorecardInternal, "invalid probe results") + return checker.CreateRuntimeErrorResult(name, e) } - // If not explicitly marked archived, look for activity in past `lookBackDays`. - threshold := time.Now().AddDate(0 /*years*/, 0 /*months*/, -1*lookBackDays /*days*/) - commitsWithinThreshold := 0 - for i := range r.DefaultBranchCommits { - if r.DefaultBranchCommits[i].CommittedDate.After(threshold) { - commitsWithinThreshold++ - } + if projectIsArchived(findings) { + checker.LogFindings(negativeFindings(findings), dl) + return checker.CreateMinScoreResult(name, "project is archived") } - // Emit a warning if this repo was created recently - recencyThreshold := time.Now().AddDate(0 /*years*/, 0 /*months*/, -1*lookBackDays /*days*/) - if r.CreatedAt.After(recencyThreshold) { - dl.Warn(&checker.LogMessage{ - Text: fmt.Sprintf("repo was created in the last %d days (Created at: %s), please review its contents carefully", - lookBackDays, r.CreatedAt.Format(time.RFC3339)), - }) - daysSinceRepoCreated := int(time.Since(r.CreatedAt).Hours() / 24) - return checker.CreateMinScoreResult(name, - fmt.Sprintf("repo was created %d days ago, not enough maintenance history", daysSinceRepoCreated), - ) + if projectWasCreatedInLast90Days(findings) { + checker.LogFindings(negativeFindings(findings), dl) + return checker.CreateMinScoreResult(name, "project was created in last 90 days. please review its contents carefully") } - issuesUpdatedWithinThreshold := 0 - for i := range r.Issues { - if hasActivityByCollaboratorOrHigher(&r.Issues[i], threshold) { - issuesUpdatedWithinThreshold++ + commitsWithinThreshold := 0 + numberOfIssuesUpdatedWithinThreshold := 0 + + for i := range findings { + f := &findings[i] + if f.Outcome == finding.OutcomePositive { + switch f.Probe { + case issueActivityByProjectMember.Probe: + numberOfIssuesUpdatedWithinThreshold = f.Values[issueActivityByProjectMember.NoOfIssuesValue] + case hasRecentCommits.Probe: + commitsWithinThreshold = f.Values[hasRecentCommits.CommitsValue] + } } } return checker.CreateProportionalScoreResult(name, fmt.Sprintf( - "%d commit(s) out of %d and %d issue activity out of %d found in the last %d days", - commitsWithinThreshold, len(r.DefaultBranchCommits), issuesUpdatedWithinThreshold, len(r.Issues), lookBackDays), - commitsWithinThreshold+issuesUpdatedWithinThreshold, activityPerWeek*lookBackDays/daysInOneWeek) + "%d commit(s) and %d issue activity found in the last %d days", + commitsWithinThreshold, numberOfIssuesUpdatedWithinThreshold, lookBackDays), + commitsWithinThreshold+numberOfIssuesUpdatedWithinThreshold, activityPerWeek*lookBackDays/daysInOneWeek) } -// hasActivityByCollaboratorOrHigher returns true if the issue was created or commented on by an -// owner/collaborator/member since the threshold. -func hasActivityByCollaboratorOrHigher(issue *clients.Issue, threshold time.Time) bool { - if issue == nil { - return false +func projectIsArchived(findings []finding.Finding) bool { + for i := range findings { + f := &findings[i] + if f.Outcome == finding.OutcomeNegative && f.Probe == notArchived.Probe { + return true + } } + return false +} - if issue.AuthorAssociation.Gte(clients.RepoAssociationCollaborator) && - issue.CreatedAt != nil && issue.CreatedAt.After(threshold) { - // The creator of the issue is a collaborator or higher. - return true - } - for _, comment := range issue.Comments { - if comment.AuthorAssociation.Gte(clients.RepoAssociationCollaborator) && - comment.CreatedAt != nil && - comment.CreatedAt.After(threshold) { - // The author of the comment is a collaborator or higher. +func projectWasCreatedInLast90Days(findings []finding.Finding) bool { + for i := range findings { + f := &findings[i] + if f.Outcome == finding.OutcomeNegative && f.Probe == notCreatedRecently.Probe { return true } } diff --git a/checks/evaluation/maintained_test.go b/checks/evaluation/maintained_test.go index 5170691560c..d504a0c0864 100644 --- a/checks/evaluation/maintained_test.go +++ b/checks/evaluation/maintained_test.go @@ -15,222 +15,119 @@ package evaluation import ( "testing" - "time" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - - "github.com/ossf/scorecard/v4/checker" - "github.com/ossf/scorecard/v4/clients" + sce "github.com/ossf/scorecard/v4/errors" + "github.com/ossf/scorecard/v4/finding" scut "github.com/ossf/scorecard/v4/utests" ) -func Test_hasActivityByCollaboratorOrHigher(t *testing.T) { - t.Parallel() - r := clients.RepoAssociationCollaborator - twentDaysAgo := time.Now().AddDate(0 /*years*/, 0 /*months*/, -20 /*days*/) - type args struct { - issue *clients.Issue - threshold time.Time - } - tests := []struct { //nolint:govet - name string - args args - want bool - }{ - { - name: "nil issue", - args: args{ - issue: nil, - threshold: time.Now(), - }, - want: false, - }, - { - name: "repo-association collaborator", - args: args{ - issue: &clients.Issue{ - CreatedAt: nil, - AuthorAssociation: &r, - }, - }, - want: false, - }, - { - name: "twentyDaysAgo", - args: args{ - issue: &clients.Issue{ - CreatedAt: &twentDaysAgo, - AuthorAssociation: &r, - }, - }, - want: true, - }, - { - name: "repo-association collaborator with comment", - args: args{ - issue: &clients.Issue{ - CreatedAt: nil, - AuthorAssociation: &r, - Comments: []clients.IssueComment{ - { - CreatedAt: &twentDaysAgo, - AuthorAssociation: &r, - }, - }, - }, - }, - want: true, - }, - } - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - if got := hasActivityByCollaboratorOrHigher(tt.args.issue, tt.args.threshold); got != tt.want { - t.Errorf("hasActivityByCollaboratorOrHigher() = %v, want %v", got, tt.want) - } - }) - } -} - func TestMaintained(t *testing.T) { - twentyDaysAgo := time.Now().AddDate(0 /*years*/, 0 /*months*/, -20 /*days*/) - collab := clients.RepoAssociationCollaborator t.Parallel() - type args struct { //nolint:govet - name string - dl checker.DetailLogger - r *checker.MaintainedData - } tests := []struct { - name string - args args - want checker.CheckResult + name string + findings []finding.Finding + result scut.TestReturn }{ { - name: "nil", - args: args{ - name: "test", - dl: nil, - r: nil, - }, - want: checker.CheckResult{ - Name: "test", - Version: 2, - Reason: "internal error: empty raw data", - Score: -1, - }, - }, - { - name: "archived", - args: args{ - name: "test", - dl: nil, - r: &checker.MaintainedData{ - ArchivedStatus: checker.ArchivedStatus{Status: true}, + name: "Two commits in last 90 days", + findings: []finding.Finding{ + { + Probe: "hasRecentCommits", + Outcome: finding.OutcomePositive, + Values: map[string]int{ + "commitsWithinThreshold": 2, + }, + }, { + Probe: "issueActivityByProjectMember", + Outcome: finding.OutcomePositive, + Values: map[string]int{ + "numberOfIssuesUpdatedWithinThreshold": 1, + }, + }, { + Probe: "notArchived", + Outcome: finding.OutcomePositive, + }, { + Probe: "notCreatedRecently", + Outcome: finding.OutcomePositive, }, }, - want: checker.CheckResult{ - Name: "test", - Version: 2, - Reason: "repo is marked as archived", - Score: 0, + result: scut.TestReturn{ + Score: 2, }, }, { - name: "no activity", - args: args{ - name: "test", - dl: nil, - r: &checker.MaintainedData{ - ArchivedStatus: checker.ArchivedStatus{Status: false}, - DefaultBranchCommits: []clients.Commit{ - { - CommittedDate: time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC), - }, - }, + name: "No issues, no commits and not archived", + findings: []finding.Finding{ + { + Probe: "hasRecentCommits", + Outcome: finding.OutcomeNegative, + }, { + Probe: "issueActivityByProjectMember", + Outcome: finding.OutcomeNegative, + }, { + Probe: "notArchived", + Outcome: finding.OutcomePositive, + }, { + Probe: "notCreatedRecently", + Outcome: finding.OutcomePositive, }, }, - want: checker.CheckResult{ - Name: "test", - Version: 2, - Reason: "0 commit(s) out of 1 and 0 issue activity out of 0 found in the last 90 days -- score normalized to 0", - Score: 0, + result: scut.TestReturn{ + Score: 0, }, }, { - name: "commit activity in the last 30 days", - args: args{ - name: "test", - dl: &scut.TestDetailLogger{}, - r: &checker.MaintainedData{ - ArchivedStatus: checker.ArchivedStatus{Status: false}, - DefaultBranchCommits: []clients.Commit{ - { - CommittedDate: time.Now().AddDate(0 /*years*/, 0 /*months*/, -20 /*days*/), - }, - { - CommittedDate: time.Now().AddDate(0 /*years*/, 0 /*months*/, -10 /*days*/), - }, - }, - - Issues: []clients.Issue{ - { - CreatedAt: &twentyDaysAgo, - AuthorAssociation: &collab, - }, - }, - CreatedAt: time.Now().AddDate(0 /*years*/, 0 /*months*/, -100 /*days*/), + name: "Wrong probe name", + findings: []finding.Finding{ + { + Probe: "hasRecentCommits", + Outcome: finding.OutcomeNegative, + }, { + Probe: "issueActivityByProjectMember", + Outcome: finding.OutcomeNegative, + }, { + Probe: "archvied", /*misspelling*/ + Outcome: finding.OutcomePositive, + }, { + Probe: "notCreatedRecently", + Outcome: finding.OutcomePositive, }, }, - want: checker.CheckResult{ - Name: "test", - Version: 2, - Reason: "2 commit(s) out of 2 and 1 issue activity out of 1 found in the last 90 days -- score normalized to 2", - Score: 2, + result: scut.TestReturn{ + Score: -1, + Error: sce.ErrScorecardInternal, }, }, { - name: "Repo created recently", - args: args{ - name: "test", - dl: &scut.TestDetailLogger{}, - r: &checker.MaintainedData{ - ArchivedStatus: checker.ArchivedStatus{Status: false}, - DefaultBranchCommits: []clients.Commit{ - { - CommittedDate: time.Now().AddDate(0 /*years*/, 0 /*months*/, -20 /*days*/), - }, - { - CommittedDate: time.Now().AddDate(0 /*years*/, 0 /*months*/, -10 /*days*/), - }, - }, - - Issues: []clients.Issue{ - { - CreatedAt: &twentyDaysAgo, - AuthorAssociation: &collab, - }, - }, - CreatedAt: time.Now().AddDate(0 /*years*/, 0 /*months*/, -10 /*days*/), + name: "Project is archived", + findings: []finding.Finding{ + { + Probe: "hasRecentCommits", + Outcome: finding.OutcomeNegative, + }, { + Probe: "issueActivityByProjectMember", + Outcome: finding.OutcomeNegative, + }, { + Probe: "notArchived", + Outcome: finding.OutcomeNegative, + }, { + Probe: "notCreatedRecently", + Outcome: finding.OutcomePositive, }, }, - want: checker.CheckResult{ - Name: "test", - Version: 2, - Reason: "repo was created 10 days ago, not enough maintenance history", - Score: 0, + result: scut.TestReturn{ + Score: 0, + NumberOfWarn: 3, }, }, } for _, tt := range tests { - tt := tt + tt := tt // Parallel testing t.Run(tt.name, func(t *testing.T) { t.Parallel() - if got := Maintained(tt.args.name, tt.args.dl, tt.args.r); !cmp.Equal(got, tt.want, cmpopts.IgnoreFields(checker.CheckResult{}, "Error")) { //nolint:lll - t.Errorf("Maintained() = %v, want %v", got, cmp.Diff(got, tt.want, cmpopts.IgnoreFields(checker.CheckResult{}, "Error"))) //nolint:lll - } + dl := scut.TestDetailLogger{} + got := Maintained(tt.name, tt.findings, &dl) + scut.ValidateTestReturn(t, tt.name, &tt.result, &got, &dl) }) } } diff --git a/checks/evaluation/packaging.go b/checks/evaluation/packaging.go index a3c13fd3507..da69d8c196f 100644 --- a/checks/evaluation/packaging.go +++ b/checks/evaluation/packaging.go @@ -15,75 +15,46 @@ package evaluation import ( - "fmt" - "github.com/ossf/scorecard/v4/checker" sce "github.com/ossf/scorecard/v4/errors" + "github.com/ossf/scorecard/v4/finding" + "github.com/ossf/scorecard/v4/probes/packagedWithAutomatedWorkflow" ) // Packaging applies the score policy for the Packaging check. -func Packaging(name string, dl checker.DetailLogger, r *checker.PackagingData) checker.CheckResult { - if r == nil { - e := sce.WithMessage(sce.ErrScorecardInternal, "empty raw data") +func Packaging(name string, + findings []finding.Finding, + dl checker.DetailLogger, +) checker.CheckResult { + expectedProbes := []string{ + packagedWithAutomatedWorkflow.Probe, + } + + if !finding.UniqueProbesEqual(findings, expectedProbes) { + e := sce.WithMessage(sce.ErrScorecardInternal, "invalid probe results") return checker.CreateRuntimeErrorResult(name, e) } - pass := false - for _, p := range r.Packages { - if p.Msg != nil { - // This is a debug message. Let's just replay the message. - dl.Debug(&checker.LogMessage{ - Text: *p.Msg, + // Currently there is only a single packaging probe that returns + // a single positive or negative outcome. As such, in this evaluation, + // we return max score if the outcome is positive and lowest score if + // the outcome is negative. + maxScore := false + for _, f := range findings { + f := f + if f.Outcome == finding.OutcomePositive { + maxScore = true + // Log all findings except the negative ones. + dl.Info(&checker.LogMessage{ + Finding: &f, }) - continue - } - - // Presence of a single non-debug message means the - // check passes. - pass = true - - msg, err := createLogMessage(p) - if err != nil { - return checker.CreateRuntimeErrorResult(name, err) } - dl.Info(&msg) } - - if pass { - return checker.CreateMaxScoreResult(name, - "publishing workflow detected") + if maxScore { + return checker.CreateMaxScoreResult(name, "packaging workflow detected") } - dl.Warn(&checker.LogMessage{ - Text: "no GitHub/GitLab publishing workflow detected", - }) - + checker.LogFindings(negativeFindings(findings), dl) return checker.CreateInconclusiveResult(name, - "no published package detected") -} - -func createLogMessage(p checker.Package) (checker.LogMessage, error) { - var msg checker.LogMessage - - if p.Msg != nil { - return msg, sce.WithMessage(sce.ErrScorecardInternal, "Msg should be nil") - } - - if p.File == nil { - return msg, sce.WithMessage(sce.ErrScorecardInternal, "File field is nil") - } - - if p.File != nil { - msg.Path = p.File.Path - msg.Type = p.File.Type - msg.Offset = p.File.Offset - } - - if len(p.Runs) == 0 { - return msg, sce.WithMessage(sce.ErrScorecardInternal, "no run data") - } - - msg.Text = fmt.Sprintf("GitHub/GitLab publishing workflow used in run %s", p.Runs[0].URL) - - return msg, nil + "packaging workflow not detected") } diff --git a/checks/evaluation/packaging_test.go b/checks/evaluation/packaging_test.go index eaf6a623a2e..cfa52196f6d 100644 --- a/checks/evaluation/packaging_test.go +++ b/checks/evaluation/packaging_test.go @@ -16,153 +16,69 @@ package evaluation import ( "testing" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/ossf/scorecard/v4/checker" + sce "github.com/ossf/scorecard/v4/errors" + "github.com/ossf/scorecard/v4/finding" scut "github.com/ossf/scorecard/v4/utests" ) -func Test_createLogMessage(t *testing.T) { - msg := "msg" +func TestPackaging(t *testing.T) { t.Parallel() - tests := []struct { //nolint:govet - name string - args checker.Package - want checker.LogMessage - wantErr bool + tests := []struct { + name string + findings []finding.Finding + result scut.TestReturn }{ { - name: "nil package", - args: checker.Package{}, - want: checker.LogMessage{}, - wantErr: true, - }, - { - name: "nil file", - args: checker.Package{ - File: nil, - }, - want: checker.LogMessage{}, - wantErr: true, - }, - { - name: "msg is not nil", - args: checker.Package{ - File: &checker.File{}, - Msg: &msg, - }, - want: checker.LogMessage{ - Text: "", - }, - wantErr: true, - }, - { - name: "file is not nil", - args: checker.Package{ - File: &checker.File{ - Path: "path", + name: "test positive outcome", + findings: []finding.Finding{ + { + Probe: "packagedWithAutomatedWorkflow", + Outcome: finding.OutcomePositive, }, }, - want: checker.LogMessage{ - Path: "path", + result: scut.TestReturn{ + Score: checker.MaxResultScore, + NumberOfInfo: 1, }, - wantErr: true, }, { - name: "runs are not zero", - args: checker.Package{ - File: &checker.File{ - Path: "path", + name: "test positive outcome with wrong probes", + findings: []finding.Finding{ + { + Probe: "wrongProbe", + Outcome: finding.OutcomePositive, }, - Runs: []checker.Run{ - {}, - }, - }, - want: checker.LogMessage{ - Text: "GitHub/GitLab publishing workflow used in run ", - Path: "path", - }, - }, - } - for _, tt := range tests { - tt := tt // Parallel testing - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - got, err := createLogMessage(tt.args) - if (err != nil) != tt.wantErr { - t.Errorf("createLogMessage() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !cmp.Equal(got, tt.want) { - t.Errorf("createLogMessage() got = %v, want %v", got, cmp.Diff(got, tt.want)) - } - }) - } -} - -func TestPackaging(t *testing.T) { - t.Parallel() - type args struct { //nolint:govet - name string - dl checker.DetailLogger - r *checker.PackagingData - } - tests := []struct { - name string - args args - want checker.CheckResult - }{ - { - name: "nil packaging data", - args: args{ - name: "name", - dl: nil, - r: nil, }, - want: checker.CheckResult{ - Name: "name", - Version: 2, - Score: -1, - Reason: "internal error: empty raw data", + result: scut.TestReturn{ + Score: -1, + Error: sce.ErrScorecardInternal, }, }, { - name: "empty packaging data", - args: args{ - name: "name", - dl: &scut.TestDetailLogger{}, - r: &checker.PackagingData{}, + name: "test inconclusive outcome", + findings: []finding.Finding{ + { + Probe: "packagedWithAutomatedWorkflow", + Outcome: finding.OutcomeNegative, + }, }, - want: checker.CheckResult{ - Name: "name", - Version: 2, - Score: -1, - Reason: "no published package detected", + result: scut.TestReturn{ + Score: checker.InconclusiveResultScore, + NumberOfWarn: 1, }, }, { - name: "runs are not zero", - args: args{ - dl: &scut.TestDetailLogger{}, - r: &checker.PackagingData{ - Packages: []checker.Package{ - { - File: &checker.File{ - Path: "path", - }, - Runs: []checker.Run{ - {}, - }, - }, - }, + name: "test negative outcome with wrong probes", + findings: []finding.Finding{ + { + Probe: "wrongProbe", + Outcome: finding.OutcomeNegative, }, }, - want: checker.CheckResult{ - Name: "", - Version: 2, - Score: 10, - Reason: "publishing workflow detected", + result: scut.TestReturn{ + Score: -1, + Error: sce.ErrScorecardInternal, }, }, } @@ -170,9 +86,9 @@ func TestPackaging(t *testing.T) { tt := tt // Parallel testing t.Run(tt.name, func(t *testing.T) { t.Parallel() - if got := Packaging(tt.args.name, tt.args.dl, tt.args.r); !cmp.Equal(got, tt.want, cmpopts.IgnoreFields(checker.CheckResult{}, "Error")) { //nolint:lll - t.Errorf("Packaging() = %v, want %v", got, cmp.Diff(got, tt.want, cmpopts.IgnoreFields(checker.CheckResult{}, "Error"))) //nolint:lll - } + dl := scut.TestDetailLogger{} + got := Packaging(tt.name, tt.findings, &dl) + scut.ValidateTestReturn(t, tt.name, &tt.result, &got, &dl) }) } } diff --git a/checks/evaluation/permissions/gitHubWorkflowPermissionsTopNoWrite.yml b/checks/evaluation/permissions/gitHubWorkflowPermissionsTopNoWrite.yml index 65c89173c92..91b2f117c93 100644 --- a/checks/evaluation/permissions/gitHubWorkflowPermissionsTopNoWrite.yml +++ b/checks/evaluation/permissions/gitHubWorkflowPermissionsTopNoWrite.yml @@ -17,7 +17,7 @@ short: Checks that GitHub workflows do not have default write permissions motivation: > If no permissions are declared, a workflow's GitHub token's permissions default to write for all scopes. This include write permissions to push to the repository, to read encrypted secrets, etc. - For more information, see https://docs.github.com/en/actions/security-guides/automatic-token-authentication#permissions-for-the-github_token. + For more information, see https://docs.github.com/actions/security-guides/automatic-token-authentication#permissions-for-the-github_token. implementation: > The rule is implemented by checking whether the `permissions` keyword is defined at the top of the workflow, and that no write permissions are given. diff --git a/checks/evaluation/permissions/permissions.go b/checks/evaluation/permissions/permissions.go index ed8b0cbc21d..3aebd06c047 100644 --- a/checks/evaluation/permissions/permissions.go +++ b/checks/evaluation/permissions/permissions.go @@ -38,6 +38,58 @@ var ( topNoWriteID = "gitHubWorkflowPermissionsTopNoWrite" ) +type permissionLevel int + +const ( + // permissionLevelNone is a permission set to `none`. + permissionLevelNone permissionLevel = iota + // permissionLevelRead is a permission set to `read`. + permissionLevelRead + // permissionLevelUnknown is for other kinds of alerts, mostly to support debug messages. + // TODO: remove it once we have implemented severity (#1874). + permissionLevelUnknown + // permissionLevelUndeclared is an undeclared permission. + permissionLevelUndeclared + // permissionLevelWrite is a permission set to `write` for a permission we consider potentially dangerous. + permissionLevelWrite +) + +// permissionLocation represents a declaration type. +type permissionLocationType int + +const ( + // permissionLocationNil is in case the permission is nil. + permissionLocationNil permissionLocationType = iota + // permissionLocationNotDeclared is for undeclared permission. + permissionLocationNotDeclared + // permissionLocationTop is top-level workflow permission. + permissionLocationTop + // permissionLocationJob is job-level workflow permission. + permissionLocationJob +) + +// permissionType represents a permission type. +type permissionType int + +const ( + // permissionTypeNone represents none permission type. + permissionTypeNone permissionType = iota + // permissionTypeNone is the "all" github permission type. + permissionTypeAll + // permissionTypeNone is the "statuses" github permission type. + permissionTypeStatuses + // permissionTypeNone is the "checks" github permission type. + permissionTypeChecks + // permissionTypeNone is the "security-events" github permission type. + permissionTypeSecurityEvents + // permissionTypeNone is the "deployments" github permission type. + permissionTypeDeployments + // permissionTypeNone is the "packages" github permission type. + permissionTypePackages + // permissionTypeNone is the "actions" github permission type. + permissionTypeActions +) + // TokenPermissions applies the score policy for the Token-Permissions check. func TokenPermissions(name string, c *checker.CheckRequest, r *checker.TokenPermissionsData) checker.CheckResult { if r == nil { @@ -46,36 +98,34 @@ func TokenPermissions(name string, c *checker.CheckRequest, r *checker.TokenPerm } if r.NumTokens == 0 { - return checker.CreateInconclusiveResult(name, "no github tokens found") + return checker.CreateInconclusiveResult(name, "no tokens found") + } + + // This is a temporary step that should be replaced by probes in ./probes + findings, err := rawToFindings(r) + if err != nil { + e := sce.WithMessage(sce.ErrScorecardInternal, "could not convert raw data to findings") + return checker.CreateRuntimeErrorResult(name, e) } - score, err := applyScorePolicy(r, c) + score, err := applyScorePolicy(findings, c) if err != nil { return checker.CreateRuntimeErrorResult(name, err) } if score != checker.MaxResultScore { return checker.CreateResultWithScore(name, - "non read-only tokens detected in GitHub workflows", score) + "detected GitHub workflow tokens with excessive permissions", score) } return checker.CreateMaxScoreResult(name, - "tokens are read-only in GitHub workflows") + "GitHub workflow tokens follow principle of least privilege") } -func applyScorePolicy(results *checker.TokenPermissionsData, c *checker.CheckRequest) (int, error) { - // See list https://github.blog/changelog/2021-04-20-github-actions-control-permissions-for-github_token/. - // Note: there are legitimate reasons to use some of the permissions like checks, deployments, etc. - // in CI/CD systems https://docs.travis-ci.com/user/github-oauth-scopes/. - - hm := make(map[string]permissions) - dl := c.Dlogger - //nolint:errcheck - remediationMetadata, _ := remediation.New(c) - negativeProbeResults := map[string]bool{ - stepsNoWriteID: false, - topNoWriteID: false, - } +// rawToFindings is a temporary step for converting the raw results +// to findings. This should be replaced by probes in ./probes. +func rawToFindings(results *checker.TokenPermissionsData) ([]finding.Finding, error) { + var findings []finding.Finding for _, r := range results.TokenPermissions { var loc *finding.Location @@ -83,83 +133,221 @@ func applyScorePolicy(results *checker.TokenPermissionsData, c *checker.CheckReq loc = &finding.Location{ Type: r.File.Type, Path: r.File.Path, - LineStart: &r.File.Offset, + LineStart: newUint(r.File.Offset), } if r.File.Snippet != "" { - loc.Snippet = &r.File.Snippet + loc.Snippet = newStr(r.File.Snippet) } } - text, err := createText(r) if err != nil { - return checker.MinResultScore, err + return nil, err } - msg, err := createLogMsg(r.LocationType) + f, err := createFinding(r.LocationType, text, loc) if err != nil { - return checker.InconclusiveResultScore, err + return nil, err } - msg.Finding = msg.Finding.WithMessage(text).WithLocation(loc) + switch r.Type { - case checker.PermissionLevelNone, checker.PermissionLevelRead: - msg.Finding = msg.Finding.WithOutcome(finding.OutcomePositive) - dl.Info(msg) - case checker.PermissionLevelUnknown: - dl.Debug(msg) + case checker.PermissionLevelNone: + f = f.WithOutcome(finding.OutcomePositive) + f = f.WithValues(map[string]int{ + "PermissionLevel": int(permissionLevelNone), + }) + case checker.PermissionLevelRead: + f = f.WithOutcome(finding.OutcomePositive) + f = f.WithValues(map[string]int{ + "PermissionLevel": int(permissionLevelRead), + }) + case checker.PermissionLevelUnknown: + f = f.WithValues(map[string]int{ + "PermissionLevel": int(permissionLevelUnknown), + }).WithOutcome(finding.OutcomeError) case checker.PermissionLevelUndeclared: + var locationType permissionLocationType + //nolint:gocritic if r.LocationType == nil { - return checker.InconclusiveResultScore, - sce.WithMessage(sce.ErrScorecardInternal, "locationType is nil") + locationType = permissionLocationNil + } else if *r.LocationType == checker.PermissionLocationTop { + locationType = permissionLocationTop + } else { + locationType = permissionLocationNotDeclared + } + permType := permTypeToEnum(r.Name) + f = f.WithValues(map[string]int{ + "PermissionLevel": int(permissionLevelUndeclared), + "LocationType": int(locationType), + "PermissionType": int(permType), + }) + case checker.PermissionLevelWrite: + var locationType permissionLocationType + switch *r.LocationType { + case checker.PermissionLocationTop: + locationType = permissionLocationTop + case checker.PermissionLocationJob: + locationType = permissionLocationJob + default: + locationType = permissionLocationNotDeclared } + permType := permTypeToEnum(r.Name) + f = f.WithValues(map[string]int{ + "PermissionLevel": int(permissionLevelWrite), + "LocationType": int(locationType), + "PermissionType": int(permType), + }) + f = f.WithOutcome(finding.OutcomeNegative) + } + findings = append(findings, *f) + } + return findings, nil +} - // We warn only for top-level. - if *r.LocationType == checker.PermissionLocationTop { - warnWithRemediation(dl, msg, remediationMetadata, loc, negativeProbeResults) - } else { - dl.Debug(msg) +func permTypeToEnum(tokenName *string) permissionType { + if tokenName == nil { + return permissionTypeNone + } + switch *tokenName { + //nolint:goconst + case "all": + return permissionTypeAll + case "statuses": + return permissionTypeStatuses + case "checks": + return permissionTypeChecks + case "security-events": + return permissionTypeSecurityEvents + case "deployments": + return permissionTypeDeployments + case "contents": + return permissionTypePackages + case "actions": + return permissionTypeActions + default: + return permissionTypeNone + } +} + +func permTypeToName(permType int) *string { + var permName string + switch permissionType(permType) { + case permissionTypeAll: + permName = "all" + case permissionTypeStatuses: + permName = "statuses" + case permissionTypeChecks: + permName = "checks" + case permissionTypeSecurityEvents: + permName = "security-events" + case permissionTypeDeployments: + permName = "deployments" + case permissionTypePackages: + permName = "contents" + case permissionTypeActions: + permName = "actions" + default: + permName = "" + } + return &permName +} + +func createFinding(loct *checker.PermissionLocation, text string, loc *finding.Location) (*finding.Finding, error) { + probe := stepsNoWriteID + if loct == nil || *loct == checker.PermissionLocationTop { + probe = topNoWriteID + } + content, err := probes.ReadFile(probe + ".yml") + if err != nil { + return nil, fmt.Errorf("reading %v.yml: %w", probe, err) + } + f, err := finding.FromBytes(content, probe) + if err != nil { + return nil, + sce.WithMessage(sce.ErrScorecardInternal, err.Error()) + } + f = f.WithMessage(text) + if loc != nil { + f = f.WithLocation(loc) + } + return f, nil +} + +// avoid memory aliasing by returning a new copy. +func newUint(u uint) *uint { + return &u +} + +// avoid memory aliasing by returning a new copy. +func newStr(s string) *string { + return &s +} + +func applyScorePolicy(findings []finding.Finding, c *checker.CheckRequest) (int, error) { + // See list https://github.blog/changelog/2021-04-20-github-actions-control-permissions-for-github_token/. + // Note: there are legitimate reasons to use some of the permissions like checks, deployments, etc. + // in CI/CD systems https://docs.travis-ci.com/user/github-oauth-scopes/. + + hm := make(map[string]permissions) + dl := c.Dlogger + //nolint:errcheck + remediationMetadata, _ := remediation.New(c) + negativeProbeResults := map[string]bool{ + stepsNoWriteID: false, + topNoWriteID: false, + } + + for i := range findings { + f := &findings[i] + pLevel := permissionLevel(f.Values["PermissionLevel"]) + switch pLevel { + case permissionLevelNone, permissionLevelRead: + dl.Info(&checker.LogMessage{ + Finding: f, + }) + case permissionLevelUnknown: + dl.Debug(&checker.LogMessage{ + Finding: f, + }) + + case permissionLevelUndeclared: + switch permissionLocationType(f.Values["LocationType"]) { + case permissionLocationNil: + return checker.InconclusiveResultScore, + sce.WithMessage(sce.ErrScorecardInternal, "locationType is nil") + case permissionLocationTop: + warnWithRemediation(dl, remediationMetadata, f, negativeProbeResults) + default: + // We warn only for top-level. + dl.Debug(&checker.LogMessage{ + Finding: f, + }) } // Group results by workflow name for score computation. - if err := updateWorkflowHashMap(hm, r); err != nil { + if err := updateWorkflowHashMap(hm, f); err != nil { return checker.InconclusiveResultScore, err } - case checker.PermissionLevelWrite: - warnWithRemediation(dl, msg, remediationMetadata, loc, negativeProbeResults) + case permissionLevelWrite: + warnWithRemediation(dl, remediationMetadata, f, negativeProbeResults) // Group results by workflow name for score computation. - if err := updateWorkflowHashMap(hm, r); err != nil { + if err := updateWorkflowHashMap(hm, f); err != nil { return checker.InconclusiveResultScore, err } } } - if err := reportDefaultFindings(results, c.Dlogger, negativeProbeResults); err != nil { + if err := reportDefaultFindings(findings, c.Dlogger, negativeProbeResults); err != nil { return checker.InconclusiveResultScore, err } - return calculateScore(hm), nil } -func reportDefaultFindings(results *checker.TokenPermissionsData, +func reportDefaultFindings(results []finding.Finding, dl checker.DetailLogger, negativeProbeResults map[string]bool, ) error { - // TODO(#2928): re-visit the need for NotApplicable outcome. - // No workflow files exist. - if len(results.TokenPermissions) == 0 { - text := "no workflows found in the repository" - if err := reportFinding(stepsNoWriteID, - text, finding.OutcomeNotAvailable, dl); err != nil { - return err - } - if err := reportFinding(topNoWriteID, - text, finding.OutcomeNotAvailable, dl); err != nil { - return err - } - return nil - } - // Workflow files found, report positive findings if no // negative findings were found. // NOTE: we don't consider probe `topNoWriteID` @@ -192,44 +380,28 @@ func reportFinding(probe, text string, o finding.Outcome, dl checker.DetailLogge return nil } -func createLogMsg(loct *checker.PermissionLocation) (*checker.LogMessage, error) { - probe := stepsNoWriteID - if loct == nil || *loct == checker.PermissionLocationTop { - probe = topNoWriteID - } - content, err := probes.ReadFile(probe + ".yml") - if err != nil { - return nil, fmt.Errorf("%w", err) - } - f, err := finding.FromBytes(content, probe) - if err != nil { - return nil, - sce.WithMessage(sce.ErrScorecardInternal, err.Error()) - } - return &checker.LogMessage{ - Finding: f, - }, nil -} - -func warnWithRemediation(logger checker.DetailLogger, msg *checker.LogMessage, - rem *remediation.RemediationMetadata, loc *finding.Location, +func warnWithRemediation(logger checker.DetailLogger, + rem *remediation.RemediationMetadata, + f *finding.Finding, negativeProbeResults map[string]bool, ) { - if loc != nil && loc.Path != "" { - msg.Finding = msg.Finding.WithRemediationMetadata(map[string]string{ + if f.Location != nil && f.Location.Path != "" { + f = f.WithRemediationMetadata(map[string]string{ "repo": rem.Repo, "branch": rem.Branch, - "workflow": strings.TrimPrefix(loc.Path, ".github/workflows/"), + "workflow": strings.TrimPrefix(f.Location.Path, ".github/workflows/"), }) } - logger.Warn(msg) + logger.Warn(&checker.LogMessage{ + Finding: f, + }) // Record that we found a negative result. - negativeProbeResults[msg.Finding.Probe] = true + negativeProbeResults[f.Probe] = true } func recordPermissionWrite(hm map[string]permissions, path string, - locType checker.PermissionLocation, permName *string, + locType permissionLocationType, permType int, ) { if _, exists := hm[path]; !exists { hm[path] = permissions{ @@ -240,11 +412,12 @@ func recordPermissionWrite(hm map[string]permissions, path string, // Select the hash map to update. m := hm[path].jobLevelWritePermissions - if locType == checker.PermissionLocationTop { + if locType == permissionLocationTop { m = hm[path].topLevelWritePermissions } // Set the permission name to record. + permName := permTypeToName(permType) name := "all" if permName != nil && *permName != "" { name = *permName @@ -252,21 +425,21 @@ func recordPermissionWrite(hm map[string]permissions, path string, m[name] = true } -func updateWorkflowHashMap(hm map[string]permissions, t checker.TokenPermission) error { - if t.LocationType == nil { +func updateWorkflowHashMap(hm map[string]permissions, f *finding.Finding) error { + if _, ok := f.Values["LocationType"]; !ok { return sce.WithMessage(sce.ErrScorecardInternal, "locationType is nil") } - if t.File == nil || t.File.Path == "" { + if f.Location == nil || f.Location.Path == "" { return sce.WithMessage(sce.ErrScorecardInternal, "path is not set") } - if t.Type != checker.PermissionLevelWrite && - t.Type != checker.PermissionLevelUndeclared { + if permissionLevel(f.Values["PermissionLevel"]) != permissionLevelWrite && + permissionLevel(f.Values["PermissionLevel"]) != permissionLevelUndeclared { return nil } - - recordPermissionWrite(hm, t.File.Path, *t.LocationType, t.Name) + plt := permissionLocationType(f.Values["LocationType"]) + recordPermissionWrite(hm, f.Location.Path, plt, f.Values["PermissionType"]) return nil } @@ -325,21 +498,21 @@ func calculateScore(result map[string]permissions) int { // status: https://docs.github.com/en/rest/reference/repos#statuses. // May allow an attacker to change the result of pre-submit and get a PR merged. // Low risk: -0.5. - if permissionIsPresent(perms, "statuses") { + if permissionIsPresentInTopLevel(perms, "statuses") { score -= 0.5 } // checks. // May allow an attacker to edit checks to remove pre-submit and introduce a bug. // Low risk: -0.5. - if permissionIsPresent(perms, "checks") { + if permissionIsPresentInTopLevel(perms, "checks") { score -= 0.5 } // secEvents. // May allow attacker to read vuln reports before patch available. // Low risk: -1 - if permissionIsPresent(perms, "security-events") { + if permissionIsPresentInTopLevel(perms, "security-events") { score-- } @@ -348,7 +521,7 @@ func calculateScore(result map[string]permissions) int { // and tiny chance an attacker can trigger a remote // service with code they own if server accepts code/location var unsanitized. // Low risk: -1 - if permissionIsPresent(perms, "deployments") { + if permissionIsPresentInTopLevel(perms, "deployments") { score-- } @@ -386,11 +559,6 @@ func calculateScore(result map[string]permissions) int { return int(score) } -func permissionIsPresent(perms permissions, name string) bool { - return permissionIsPresentInTopLevel(perms, name) || - permissionIsPresentInRunLevel(perms, name) -} - func permissionIsPresentInTopLevel(perms permissions, name string) bool { _, ok := perms.topLevelWritePermissions[name] return ok diff --git a/checks/evaluation/pinned_dependencies.go b/checks/evaluation/pinned_dependencies.go index d9133d76da4..6344c9bf82c 100644 --- a/checks/evaluation/pinned_dependencies.go +++ b/checks/evaluation/pinned_dependencies.go @@ -15,128 +15,281 @@ package evaluation import ( - "errors" "fmt" "github.com/ossf/scorecard/v4/checker" "github.com/ossf/scorecard/v4/checks/fileparser" sce "github.com/ossf/scorecard/v4/errors" "github.com/ossf/scorecard/v4/finding" - "github.com/ossf/scorecard/v4/remediation" + "github.com/ossf/scorecard/v4/finding/probe" "github.com/ossf/scorecard/v4/rule" ) -var errInvalidValue = errors.New("invalid value") - -type pinnedResult int - -const ( - pinnedUndefined pinnedResult = iota - pinned - notPinned -) +type pinnedResult struct { + pinned int + total int +} // Structure to host information about pinned github // or third party dependencies. -type worklowPinningResult struct { +type workflowPinningResult struct { thirdParties pinnedResult gitHubOwned pinnedResult } -// PinningDependencies applies the score policy for the Pinned-Dependencies check. -func PinningDependencies(name string, c *checker.CheckRequest, - r *checker.PinningDependenciesData, -) checker.CheckResult { - if r == nil { - e := sce.WithMessage(sce.ErrScorecardInternal, "empty raw data") - return checker.CreateRuntimeErrorResult(name, e) +// Weights used for proportional score. +// This defines the priority of pinning a dependency over other dependencies. +// The dependencies from all ecosystems are equally prioritized except +// for GitHub Actions. GitHub Actions can be GitHub-owned or from third-party +// development. The GitHub Actions ecosystem has equal priority compared to other +// ecosystems, but, within GitHub Actions, pinning third-party actions has more +// priority than pinning GitHub-owned actions. +// https://github.com/ossf/scorecard/issues/802 +const ( + gitHubOwnedActionWeight int = 2 + thirdPartyActionWeight int = 8 + normalWeight int = gitHubOwnedActionWeight + thirdPartyActionWeight + + // depTypeKey is the Values map key used to fetch the dependency type. + depTypeKey = "dependencyType" +) + +var ( + dependencyTypes = map[checker.DependencyUseType]int{ + checker.DependencyUseTypeGHAction: 0, + checker.DependencyUseTypeDockerfileContainerImage: 1, + checker.DependencyUseTypeDownloadThenRun: 2, + checker.DependencyUseTypeGoCommand: 3, + checker.DependencyUseTypeChocoCommand: 4, + checker.DependencyUseTypeNpmCommand: 5, + checker.DependencyUseTypePipCommand: 6, + checker.DependencyUseTypeNugetCommand: 7, + } + intToDepType = map[int]checker.DependencyUseType{ + 0: checker.DependencyUseTypeGHAction, + 1: checker.DependencyUseTypeDockerfileContainerImage, + 2: checker.DependencyUseTypeDownloadThenRun, + 3: checker.DependencyUseTypeGoCommand, + 4: checker.DependencyUseTypeChocoCommand, + 5: checker.DependencyUseTypeNpmCommand, + 6: checker.DependencyUseTypePipCommand, + 7: checker.DependencyUseTypeNugetCommand, } +) - var wp worklowPinningResult - pr := make(map[checker.DependencyUseType]pinnedResult) - dl := c.Dlogger - //nolint:errcheck - remediationMetadata, _ := remediation.New(c) +func ruleRemToProbeRem(rem *rule.Remediation) *probe.Remediation { + return &probe.Remediation{ + Patch: rem.Patch, + Text: rem.Text, + Markdown: rem.Markdown, + Effort: probe.RemediationEffort(rem.Effort), + } +} + +func probeRemToRuleRem(rem *probe.Remediation) *rule.Remediation { + return &rule.Remediation{ + Patch: rem.Patch, + Text: rem.Text, + Markdown: rem.Markdown, + Effort: rule.RemediationEffort(rem.Effort), + } +} + +func dependenciesToFindings(r *checker.PinningDependenciesData) ([]finding.Finding, error) { + findings := make([]finding.Finding, 0) + + for i := range r.ProcessingErrors { + e := r.ProcessingErrors[i] + f := finding.Finding{ + Message: generateTextIncompleteResults(e), + Location: &e.Location, + Outcome: finding.OutcomeNotAvailable, + } + findings = append(findings, f) + } for i := range r.Dependencies { rr := r.Dependencies[i] if rr.Location == nil { if rr.Msg == nil { e := sce.WithMessage(sce.ErrScorecardInternal, "empty File field") - return checker.CreateRuntimeErrorResult(name, e) + return findings, e } - dl.Debug(&checker.LogMessage{ - Text: *rr.Msg, - }) + f := &finding.Finding{ + Probe: "", + Outcome: finding.OutcomeNotApplicable, + Message: *rr.Msg, + } + findings = append(findings, *f) continue } - if rr.Msg != nil { - dl.Debug(&checker.LogMessage{ + loc := &finding.Location{ + Type: rr.Location.Type, Path: rr.Location.Path, + LineStart: &rr.Location.Offset, + LineEnd: &rr.Location.EndOffset, + Snippet: &rr.Location.Snippet, + } + f := &finding.Finding{ + Probe: "", + Outcome: finding.OutcomeNotApplicable, + Message: *rr.Msg, + Location: loc, + } + findings = append(findings, *f) + continue + } + if rr.Pinned == nil { + loc := &finding.Location{ Type: rr.Location.Type, - Offset: rr.Location.Offset, - EndOffset: rr.Location.EndOffset, - Text: *rr.Msg, - Snippet: rr.Location.Snippet, + Path: rr.Location.Path, + LineStart: &rr.Location.Offset, + LineEnd: &rr.Location.EndOffset, + Snippet: &rr.Location.Snippet, + } + f := &finding.Finding{ + Probe: "", + Outcome: finding.OutcomeNotApplicable, + Message: fmt.Sprintf("%s has empty Pinned field", rr.Type), + Location: loc, + } + findings = append(findings, *f) + continue + } + if !*rr.Pinned { + loc := &finding.Location{ + Type: rr.Location.Type, + Path: rr.Location.Path, + LineStart: &rr.Location.Offset, + LineEnd: &rr.Location.EndOffset, + Snippet: &rr.Location.Snippet, + } + f := &finding.Finding{ + Probe: "", + Outcome: finding.OutcomeNegative, + Message: generateTextUnpinned(&rr), + Location: loc, + } + if rr.Remediation != nil { + f.Remediation = ruleRemToProbeRem(rr.Remediation) + } + f = f.WithValues(map[string]int{ + depTypeKey: dependencyTypes[rr.Type], }) + findings = append(findings, *f) } else { - dl.Warn(&checker.LogMessage{ - Path: rr.Location.Path, - Type: rr.Location.Type, - Offset: rr.Location.Offset, - EndOffset: rr.Location.EndOffset, - Text: generateText(&rr), - Snippet: rr.Location.Snippet, - Remediation: generateRemediation(remediationMetadata, &rr), + loc := &finding.Location{ + Type: rr.Location.Type, + Path: rr.Location.Path, + LineStart: &rr.Location.Offset, + LineEnd: &rr.Location.EndOffset, + Snippet: &rr.Location.Snippet, + } + f := &finding.Finding{ + Probe: "", + Outcome: finding.OutcomePositive, + Location: loc, + } + f = f.WithValues(map[string]int{ + depTypeKey: dependencyTypes[rr.Type], }) - - // Update the pinning status. - updatePinningResults(&rr, &wp, pr) + findings = append(findings, *f) } } + return findings, nil +} - // Generate scores and Info results. - // GitHub actions. - actionScore, err := createReturnForIsGitHubActionsWorkflowPinned(wp, dl) - if err != nil { - return checker.CreateRuntimeErrorResult(name, err) +// PinningDependencies applies the score policy for the Pinned-Dependencies check. +func PinningDependencies(name string, c *checker.CheckRequest, + r *checker.PinningDependenciesData, +) checker.CheckResult { + if r == nil { + e := sce.WithMessage(sce.ErrScorecardInternal, "empty raw data") + return checker.CreateRuntimeErrorResult(name, e) } - // Docker files. - dockerFromScore, err := createReturnForIsDockerfilePinned(pr, dl) + var wp workflowPinningResult + pr := make(map[checker.DependencyUseType]pinnedResult) + dl := c.Dlogger + + findings, err := dependenciesToFindings(r) if err != nil { return checker.CreateRuntimeErrorResult(name, err) } - // Docker downloads. - dockerDownloadScore, err := createReturnForIsDockerfileFreeOfInsecureDownloads(pr, dl) - if err != nil { - return checker.CreateRuntimeErrorResult(name, err) + for i := range findings { + f := findings[i] + switch f.Outcome { + case finding.OutcomeNotApplicable: + if f.Location != nil { + dl.Debug(&checker.LogMessage{ + Path: f.Location.Path, + Type: f.Location.Type, + Offset: *f.Location.LineStart, + EndOffset: *f.Location.LineEnd, + Text: f.Message, + Snippet: *f.Location.Snippet, + }) + } else { + dl.Debug(&checker.LogMessage{ + Text: f.Message, + }) + } + continue + case finding.OutcomeNegative: + lm := &checker.LogMessage{ + Path: f.Location.Path, + Type: f.Location.Type, + Offset: *f.Location.LineStart, + EndOffset: *f.Location.LineEnd, + Text: f.Message, + Snippet: *f.Location.Snippet, + } + + if f.Remediation != nil { + lm.Remediation = probeRemToRuleRem(f.Remediation) + } + dl.Warn(lm) + case finding.OutcomeNotAvailable: + dl.Info(&checker.LogMessage{ + Finding: &f, + }) + continue + default: + // ignore + } + updatePinningResults(intToDepType[f.Values[depTypeKey]], + f.Outcome, f.Location.Snippet, + &wp, pr) } - // Script downloads. - scriptScore, err := createReturnForIsShellScriptFreeOfInsecureDownloads(pr, dl) - if err != nil { - return checker.CreateRuntimeErrorResult(name, err) + // Generate scores and Info results. + var scores []checker.ProportionalScoreWeighted + // Go through all dependency types + // GitHub Actions need to be handled separately since they are not in pr + scores = append(scores, createScoreForGitHubActionsWorkflow(&wp, dl)...) + // Only existing dependencies will be found in pr + // We will only score the ecosystem if there are dependencies + // This results in only existing ecosystems being included in the final score + for t := range pr { + logPinnedResult(dl, pr[t], string(t)) + scores = append(scores, checker.ProportionalScoreWeighted{ + Success: pr[t].pinned, + Total: pr[t].total, + Weight: normalWeight, + }) } - // Pip installs. - pipScore, err := createReturnForIsPipInstallPinned(pr, dl) + if len(scores) == 0 { + return checker.CreateInconclusiveResult(name, "no dependencies found") + } + + score, err := checker.CreateProportionalScoreWeighted(scores...) if err != nil { return checker.CreateRuntimeErrorResult(name, err) } - // Scores may be inconclusive. - actionScore = maxScore(0, actionScore) - dockerFromScore = maxScore(0, dockerFromScore) - dockerDownloadScore = maxScore(0, dockerDownloadScore) - scriptScore = maxScore(0, scriptScore) - pipScore = maxScore(0, pipScore) - - score := checker.AggregateScores(actionScore, dockerFromScore, - dockerDownloadScore, scriptScore, pipScore) - if score == checker.MaxResultScore { return checker.CreateMaxScoreResult(name, "all dependencies are pinned") } @@ -145,172 +298,104 @@ func PinningDependencies(name string, c *checker.CheckRequest, "dependency not pinned by hash detected", score, checker.MaxResultScore) } -func generateRemediation(remediationMd *remediation.RemediationMetadata, rr *checker.Dependency) *rule.Remediation { - switch rr.Type { - case checker.DependencyUseTypeGHAction: - return remediationMd.CreateWorkflowPinningRemediation(rr.Location.Path) - case checker.DependencyUseTypeDockerfileContainerImage: - return remediation.CreateDockerfilePinningRemediation(rr, remediation.CraneDigester{}) - default: - return nil - } -} - -func updatePinningResults(rr *checker.Dependency, - wp *worklowPinningResult, pr map[checker.DependencyUseType]pinnedResult, +func updatePinningResults(dependencyType checker.DependencyUseType, + outcome finding.Outcome, snippet *string, + wp *workflowPinningResult, pr map[checker.DependencyUseType]pinnedResult, ) { - if rr.Type == checker.DependencyUseTypeGHAction { - // Note: `Snippet` contains `action/name@xxx`, so we cna use it to infer + if dependencyType == checker.DependencyUseTypeGHAction { + // Note: `Snippet` contains `action/name@xxx`, so we can use it to infer // if it's a GitHub-owned action or not. - gitHubOwned := fileparser.IsGitHubOwnedAction(rr.Location.Snippet) - addWorkflowPinnedResult(wp, false, gitHubOwned) + gitHubOwned := fileparser.IsGitHubOwnedAction(*snippet) + addWorkflowPinnedResult(outcome, wp, gitHubOwned) return } // Update other result types. - var p pinnedResult - addPinnedResult(&p, false) - pr[rr.Type] = p + p := pr[dependencyType] + addPinnedResult(outcome, &p) + pr[dependencyType] = p } -func generateText(rr *checker.Dependency) string { +func generateTextUnpinned(rr *checker.Dependency) string { if rr.Type == checker.DependencyUseTypeGHAction { // Check if we are dealing with a GitHub action or a third-party one. gitHubOwned := fileparser.IsGitHubOwnedAction(rr.Location.Snippet) owner := generateOwnerToDisplay(gitHubOwned) - return fmt.Sprintf("%s %s not pinned by hash", owner, rr.Type) + return fmt.Sprintf("%s not pinned by hash", owner) } return fmt.Sprintf("%s not pinned by hash", rr.Type) } -func generateOwnerToDisplay(gitHubOwned bool) string { - if gitHubOwned { - return "GitHub-owned" - } - return "third-party" +func generateTextIncompleteResults(e checker.ElementError) string { + return fmt.Sprintf("Possibly incomplete results: %s", e.Err) } -// TODO(laurent): need to support GCB pinning. -func maxScore(s1, s2 int) int { - if s1 > s2 { - return s1 +func generateOwnerToDisplay(gitHubOwned bool) string { + if gitHubOwned { + return fmt.Sprintf("GitHub-owned %s", checker.DependencyUseTypeGHAction) } - return s2 + return fmt.Sprintf("third-party %s", checker.DependencyUseTypeGHAction) } -// For the 'to' param, true means the file is pinning dependencies (or there are no dependencies), -// false means there are unpinned dependencies. -func addPinnedResult(r *pinnedResult, to bool) { - // If the result is `notPinned`, we keep it. - // In other cases, we always update the result. - if *r == notPinned { - return - } - - switch to { - case true: - *r = pinned - case false: - *r = notPinned +func addPinnedResult(outcome finding.Outcome, r *pinnedResult) { + if outcome == finding.OutcomePositive { + r.pinned += 1 } + r.total += 1 } -func addWorkflowPinnedResult(w *worklowPinningResult, to, isGitHub bool) { +func addWorkflowPinnedResult(outcome finding.Outcome, w *workflowPinningResult, isGitHub bool) { if isGitHub { - addPinnedResult(&w.gitHubOwned, to) + addPinnedResult(outcome, &w.gitHubOwned) } else { - addPinnedResult(&w.thirdParties, to) + addPinnedResult(outcome, &w.thirdParties) } } -// Create the result for scripts. -func createReturnForIsShellScriptFreeOfInsecureDownloads(pr map[checker.DependencyUseType]pinnedResult, - dl checker.DetailLogger, -) (int, error) { - return createReturnValues(pr, checker.DependencyUseTypeDownloadThenRun, - "no insecure (not pinned by hash) dependency downloads found in shell scripts", - dl) -} - -// Create the result for docker containers. -func createReturnForIsDockerfilePinned(pr map[checker.DependencyUseType]pinnedResult, - dl checker.DetailLogger, -) (int, error) { - return createReturnValues(pr, checker.DependencyUseTypeDockerfileContainerImage, - "Dockerfile dependencies are pinned", - dl) -} - -// Create the result for docker commands. -func createReturnForIsDockerfileFreeOfInsecureDownloads(pr map[checker.DependencyUseType]pinnedResult, - dl checker.DetailLogger, -) (int, error) { - return createReturnValues(pr, checker.DependencyUseTypeDownloadThenRun, - "no insecure (not pinned by hash) dependency downloads found in Dockerfiles", - dl) +func logPinnedResult(dl checker.DetailLogger, p pinnedResult, name string) { + dl.Info(&checker.LogMessage{ + Text: fmt.Sprintf("%3d out of %3d %s dependencies pinned", p.pinned, p.total, name), + }) } -// Create the result for pip install commands. -func createReturnForIsPipInstallPinned(pr map[checker.DependencyUseType]pinnedResult, - dl checker.DetailLogger, -) (int, error) { - return createReturnValues(pr, checker.DependencyUseTypePipCommand, - "Pip installs are pinned", - dl) -} - -func createReturnValues(pr map[checker.DependencyUseType]pinnedResult, - t checker.DependencyUseType, infoMsg string, - dl checker.DetailLogger, -) (int, error) { - // Note: we don't check if the entry exists, - // as it will have the default value which is handled in the switch statement. - //nolint - r, _ := pr[t] - switch r { - default: - return checker.InconclusiveResultScore, fmt.Errorf("%w: %v", errInvalidValue, r) - case pinned, pinnedUndefined: - dl.Info(&checker.LogMessage{ - Text: infoMsg, - }) - return checker.MaxResultScore, nil - case notPinned: - // No logging needed as it's done by the checks. - return checker.MinResultScore, nil +func createScoreForGitHubActionsWorkflow(wp *workflowPinningResult, dl checker.DetailLogger, +) []checker.ProportionalScoreWeighted { + if wp.gitHubOwned.total == 0 && wp.thirdParties.total == 0 { + return []checker.ProportionalScoreWeighted{} } -} - -// Create the result. -func createReturnForIsGitHubActionsWorkflowPinned(wp worklowPinningResult, dl checker.DetailLogger) (int, error) { - return createReturnValuesForGitHubActionsWorkflowPinned(wp, - fmt.Sprintf("%ss are pinned", checker.DependencyUseTypeGHAction), - dl) -} - -func createReturnValuesForGitHubActionsWorkflowPinned(r worklowPinningResult, infoMsg string, - dl checker.DetailLogger, -) (int, error) { - score := checker.MinResultScore - - if r.gitHubOwned != notPinned { - score += 2 - dl.Info(&checker.LogMessage{ - Type: finding.FileTypeSource, - Offset: checker.OffsetDefault, - Text: fmt.Sprintf("%s %s", "GitHub-owned", infoMsg), - }) + if wp.gitHubOwned.total != 0 && wp.thirdParties.total != 0 { + logPinnedResult(dl, wp.gitHubOwned, generateOwnerToDisplay(true)) + logPinnedResult(dl, wp.thirdParties, generateOwnerToDisplay(false)) + return []checker.ProportionalScoreWeighted{ + { + Success: wp.gitHubOwned.pinned, + Total: wp.gitHubOwned.total, + Weight: gitHubOwnedActionWeight, + }, + { + Success: wp.thirdParties.pinned, + Total: wp.thirdParties.total, + Weight: thirdPartyActionWeight, + }, + } } - - if r.thirdParties != notPinned { - score += 8 - dl.Info(&checker.LogMessage{ - Type: finding.FileTypeSource, - Offset: checker.OffsetDefault, - Text: fmt.Sprintf("%s %s", "Third-party", infoMsg), - }) + if wp.gitHubOwned.total != 0 { + logPinnedResult(dl, wp.gitHubOwned, generateOwnerToDisplay(true)) + return []checker.ProportionalScoreWeighted{ + { + Success: wp.gitHubOwned.pinned, + Total: wp.gitHubOwned.total, + Weight: normalWeight, + }, + } + } + logPinnedResult(dl, wp.thirdParties, generateOwnerToDisplay(false)) + return []checker.ProportionalScoreWeighted{ + { + Success: wp.thirdParties.pinned, + Total: wp.thirdParties.total, + Weight: normalWeight, + }, } - - return score, nil } diff --git a/checks/evaluation/pinned_dependencies_test.go b/checks/evaluation/pinned_dependencies_test.go index cd497a6cfbf..ef3fefb7edd 100644 --- a/checks/evaluation/pinned_dependencies_test.go +++ b/checks/evaluation/pinned_dependencies_test.go @@ -20,67 +20,209 @@ import ( "github.com/google/go-cmp/cmp" "github.com/ossf/scorecard/v4/checker" + sce "github.com/ossf/scorecard/v4/errors" + "github.com/ossf/scorecard/v4/finding" scut "github.com/ossf/scorecard/v4/utests" ) -func Test_createReturnValuesForGitHubActionsWorkflowPinned(t *testing.T) { +func Test_createScoreForGitHubActionsWorkflow(t *testing.T) { t.Parallel() - //nolint - type args struct { - r worklowPinningResult - infoMsg string - dl checker.DetailLogger - } - //nolint + //nolint:govet tests := []struct { - name string - args args - want int + name string + r workflowPinningResult + scores []checker.ProportionalScoreWeighted }{ { - name: "both actions workflow pinned", - args: args{ - r: worklowPinningResult{ - thirdParties: 1, - gitHubOwned: 1, + name: "GitHub-owned and Third-Party actions pinned", + r: workflowPinningResult{ + gitHubOwned: pinnedResult{ + pinned: 1, + total: 1, + }, + thirdParties: pinnedResult{ + pinned: 1, + total: 1, + }, + }, + scores: []checker.ProportionalScoreWeighted{ + { + Success: 1, + Total: 1, + Weight: 2, + }, + { + Success: 1, + Total: 1, + Weight: 8, }, - dl: &scut.TestDetailLogger{}, }, - want: 10, }, { - name: "github actions workflow pinned", - args: args{ - r: worklowPinningResult{ - thirdParties: 2, - gitHubOwned: 2, + name: "only GitHub-owned actions pinned", + r: workflowPinningResult{ + gitHubOwned: pinnedResult{ + pinned: 1, + total: 1, + }, + thirdParties: pinnedResult{ + pinned: 0, + total: 1, + }, + }, + scores: []checker.ProportionalScoreWeighted{ + { + Success: 1, + Total: 1, + Weight: 2, + }, + { + Success: 0, + Total: 1, + Weight: 8, }, - dl: &scut.TestDetailLogger{}, }, - want: 0, }, { - name: "error in github actions workflow pinned", - args: args{ - r: worklowPinningResult{ - thirdParties: 2, - gitHubOwned: 2, + name: "only Third-Party actions pinned", + r: workflowPinningResult{ + gitHubOwned: pinnedResult{ + pinned: 0, + total: 1, + }, + thirdParties: pinnedResult{ + pinned: 1, + total: 1, + }, + }, + scores: []checker.ProportionalScoreWeighted{ + { + Success: 0, + Total: 1, + Weight: 2, + }, + { + Success: 1, + Total: 1, + Weight: 8, + }, + }, + }, + { + name: "no GitHub actions pinned", + r: workflowPinningResult{ + gitHubOwned: pinnedResult{ + pinned: 0, + total: 1, + }, + thirdParties: pinnedResult{ + pinned: 0, + total: 1, + }, + }, + scores: []checker.ProportionalScoreWeighted{ + { + Success: 0, + Total: 1, + Weight: 2, + }, + { + Success: 0, + Total: 1, + Weight: 8, + }, + }, + }, + { + name: "no GitHub-owned actions and Third-party actions unpinned", + r: workflowPinningResult{ + gitHubOwned: pinnedResult{ + pinned: 0, + total: 0, + }, + thirdParties: pinnedResult{ + pinned: 0, + total: 1, + }, + }, + scores: []checker.ProportionalScoreWeighted{ + { + Success: 0, + Total: 1, + Weight: 10, + }, + }, + }, + { + name: "no Third-party actions and GitHub-owned actions unpinned", + r: workflowPinningResult{ + gitHubOwned: pinnedResult{ + pinned: 0, + total: 1, + }, + thirdParties: pinnedResult{ + pinned: 0, + total: 0, + }, + }, + scores: []checker.ProportionalScoreWeighted{ + { + Success: 0, + Total: 1, + Weight: 10, + }, + }, + }, + { + name: "no GitHub-owned actions and Third-party actions pinned", + r: workflowPinningResult{ + gitHubOwned: pinnedResult{ + pinned: 0, + total: 0, + }, + thirdParties: pinnedResult{ + pinned: 1, + total: 1, + }, + }, + scores: []checker.ProportionalScoreWeighted{ + { + Success: 1, + Total: 1, + Weight: 10, + }, + }, + }, + { + name: "no Third-party actions and GitHub-owned actions pinned", + r: workflowPinningResult{ + gitHubOwned: pinnedResult{ + pinned: 1, + total: 1, + }, + thirdParties: pinnedResult{ + pinned: 0, + total: 0, + }, + }, + scores: []checker.ProportionalScoreWeighted{ + { + Success: 1, + Total: 1, + Weight: 10, }, - dl: &scut.TestDetailLogger{}, }, - want: 0, }, } for _, tt := range tests { tt := tt // Re-initializing variable so it is not changed while executing the closure below t.Run(tt.name, func(t *testing.T) { t.Parallel() - got, err := createReturnValuesForGitHubActionsWorkflowPinned(tt.args.r, tt.args.infoMsg, tt.args.dl) - if err != nil { - t.Errorf("error during createReturnValuesForGitHubActionsWorkflowPinned: %v", err) - } - if got != tt.want { - t.Errorf("createReturnValuesForGitHubActionsWorkflowPinned() = %v, want %v", got, tt.want) + dl := scut.TestDetailLogger{} + actual := createScoreForGitHubActionsWorkflow(&tt.r, &dl) + diff := cmp.Diff(tt.scores, actual) + if diff != "" { + t.Errorf("createScoreForGitHubActionsWorkflow (-want,+got) %+v", diff) } }) } @@ -90,301 +232,624 @@ func asPointer(s string) *string { return &s } +func asBoolPointer(b bool) *bool { + return &b +} + func Test_PinningDependencies(t *testing.T) { t.Parallel() tests := []struct { - name string - dependencies []checker.Dependency - expected scut.TestReturn + name string + dependencies []checker.Dependency + processingErrors []checker.ElementError + expected scut.TestReturn }{ { - name: "download then run pinned debug", + name: "all dependencies pinned", dependencies: []checker.Dependency{ + { + Location: &checker.File{ + Snippet: "actions/checkout@a81bbbf8298c0fa03ea29cdc473d45769f953675", + }, + Type: checker.DependencyUseTypeGHAction, + Pinned: asBoolPointer(true), + }, + { + Location: &checker.File{ + Snippet: "other/checkout@a81bbbf8298c0fa03ea29cdc473d45769f953675", + }, + Type: checker.DependencyUseTypeGHAction, + Pinned: asBoolPointer(true), + }, + { + Location: &checker.File{}, + Type: checker.DependencyUseTypeDockerfileContainerImage, + Pinned: asBoolPointer(true), + }, { Location: &checker.File{}, - Msg: asPointer("some message"), Type: checker.DependencyUseTypeDownloadThenRun, + Pinned: asBoolPointer(true), + }, + { + Location: &checker.File{}, + Type: checker.DependencyUseTypeGoCommand, + Pinned: asBoolPointer(true), + }, + { + Location: &checker.File{}, + Type: checker.DependencyUseTypeNpmCommand, + Pinned: asBoolPointer(true), + }, + { + Location: &checker.File{}, + Type: checker.DependencyUseTypePipCommand, + Pinned: asBoolPointer(true), }, }, expected: scut.TestReturn{ Error: nil, - Score: checker.MaxResultScore, + Score: 10, NumberOfWarn: 0, - NumberOfInfo: 6, - NumberOfDebug: 1, + NumberOfInfo: 7, + NumberOfDebug: 0, }, }, { - name: "download then run pinned debug and warn", + name: "all dependencies unpinned", dependencies: []checker.Dependency{ + { + Location: &checker.File{ + Snippet: "actions/checkout@v2", + }, + Type: checker.DependencyUseTypeGHAction, + Pinned: asBoolPointer(false), + }, + { + Location: &checker.File{ + Snippet: "other/checkout@v2", + }, + Type: checker.DependencyUseTypeGHAction, + Pinned: asBoolPointer(false), + }, { Location: &checker.File{}, - Msg: asPointer("some message"), - Type: checker.DependencyUseTypeDownloadThenRun, + Type: checker.DependencyUseTypeDockerfileContainerImage, + Pinned: asBoolPointer(false), }, { Location: &checker.File{}, Type: checker.DependencyUseTypeDownloadThenRun, + Pinned: asBoolPointer(false), + }, + { + Location: &checker.File{}, + Type: checker.DependencyUseTypeGoCommand, + Pinned: asBoolPointer(false), + }, + { + Location: &checker.File{}, + Type: checker.DependencyUseTypeNpmCommand, + Pinned: asBoolPointer(false), + }, + { + Location: &checker.File{}, + Type: checker.DependencyUseTypePipCommand, + Pinned: asBoolPointer(false), }, }, expected: scut.TestReturn{ Error: nil, - Score: 6, - NumberOfWarn: 1, - NumberOfInfo: 4, - NumberOfDebug: 1, + Score: 0, + NumberOfWarn: 7, + NumberOfInfo: 7, + NumberOfDebug: 0, }, }, { - name: "various wanrings", + name: "1 ecosystem pinned and 1 ecosystem unpinned", dependencies: []checker.Dependency{ { Location: &checker.File{}, Type: checker.DependencyUseTypePipCommand, + Pinned: asBoolPointer(false), }, { Location: &checker.File{}, - Type: checker.DependencyUseTypeDownloadThenRun, + Type: checker.DependencyUseTypeGoCommand, + Pinned: asBoolPointer(true), }, + }, + expected: scut.TestReturn{ + Error: nil, + Score: 5, + NumberOfWarn: 1, + NumberOfInfo: 2, + NumberOfDebug: 0, + }, + }, + { + name: "1 ecosystem partially pinned", + dependencies: []checker.Dependency{ { Location: &checker.File{}, - Type: checker.DependencyUseTypeDockerfileContainerImage, + Type: checker.DependencyUseTypePipCommand, + Pinned: asBoolPointer(false), }, { Location: &checker.File{}, - Msg: asPointer("debug message"), + Type: checker.DependencyUseTypePipCommand, + Pinned: asBoolPointer(true), }, }, expected: scut.TestReturn{ Error: nil, - Score: 2, - NumberOfWarn: 3, - NumberOfInfo: 2, - NumberOfDebug: 1, + Score: 5, + NumberOfWarn: 1, + NumberOfInfo: 1, + NumberOfDebug: 0, }, }, { - name: "unpinned pip install", + name: "no dependencies found", + dependencies: []checker.Dependency{}, + expected: scut.TestReturn{ + Error: nil, + Score: -1, + NumberOfWarn: 0, + NumberOfInfo: 0, + NumberOfDebug: 0, + }, + }, + { + name: "pinned dependency shows no warn message", dependencies: []checker.Dependency{ { Location: &checker.File{}, Type: checker.DependencyUseTypePipCommand, + Pinned: asBoolPointer(true), }, }, expected: scut.TestReturn{ Error: nil, - Score: 8, + Score: 10, + NumberOfWarn: 0, + NumberOfInfo: 1, + NumberOfDebug: 0, + }, + }, + { + name: "unpinned dependency shows warn message", + dependencies: []checker.Dependency{ + { + Location: &checker.File{}, + Type: checker.DependencyUseTypePipCommand, + Pinned: asBoolPointer(false), + }, + }, + expected: scut.TestReturn{ + Error: nil, + Score: 0, NumberOfWarn: 1, - NumberOfInfo: 5, + NumberOfInfo: 1, NumberOfDebug: 0, }, }, { - name: "undefined pip install", + name: "dependency with parsing error does not count for score and shows debug message", dependencies: []checker.Dependency{ { Location: &checker.File{}, + Msg: asPointer("some message"), Type: checker.DependencyUseTypePipCommand, - Msg: asPointer("debug message"), }, }, expected: scut.TestReturn{ Error: nil, - Score: 10, + Score: -1, NumberOfWarn: 0, - NumberOfInfo: 6, + NumberOfInfo: 0, NumberOfDebug: 1, }, }, { - name: "all dependencies pinned", + name: "dependency missing Pinned info does not count for score and shows debug message", + dependencies: []checker.Dependency{ + { + Location: &checker.File{}, + Type: checker.DependencyUseTypePipCommand, + }, + }, expected: scut.TestReturn{ Error: nil, - Score: 10, + Score: -1, NumberOfWarn: 0, - NumberOfInfo: 6, + NumberOfInfo: 0, + NumberOfDebug: 1, + }, + }, + { + name: "dependency missing Location info and no error message throws error", + dependencies: []checker.Dependency{{}}, + expected: scut.TestReturn{ + Error: sce.ErrScorecardInternal, + Score: -1, + NumberOfWarn: 0, + NumberOfInfo: 0, NumberOfDebug: 0, }, }, { - name: "Validate various wanrings and info", + name: "dependency missing Location info with error message shows debug message", + dependencies: []checker.Dependency{{ + Msg: asPointer("some message"), + }}, + expected: scut.TestReturn{ + Error: nil, + Score: -1, + NumberOfWarn: 0, + NumberOfInfo: 0, + NumberOfDebug: 1, + }, + }, + { + name: "unpinned choco install", dependencies: []checker.Dependency{ { Location: &checker.File{}, - Type: checker.DependencyUseTypePipCommand, + Type: checker.DependencyUseTypeChocoCommand, + Pinned: asBoolPointer(false), + }, + }, + expected: scut.TestReturn{ + Error: nil, + Score: 0, + NumberOfWarn: 1, + NumberOfInfo: 1, + NumberOfDebug: 0, + }, + }, + { + name: "unpinned Dockerfile container image", + dependencies: []checker.Dependency{ + { + Location: &checker.File{}, + Type: checker.DependencyUseTypeDockerfileContainerImage, + Pinned: asBoolPointer(false), }, + }, + expected: scut.TestReturn{ + Error: nil, + Score: 0, + NumberOfWarn: 1, + NumberOfInfo: 1, + NumberOfDebug: 0, + }, + }, + { + name: "unpinned download then run", + dependencies: []checker.Dependency{ { Location: &checker.File{}, Type: checker.DependencyUseTypeDownloadThenRun, + Pinned: asBoolPointer(false), }, + }, + expected: scut.TestReturn{ + Error: nil, + Score: 0, + NumberOfWarn: 1, + NumberOfInfo: 1, + NumberOfDebug: 0, + }, + }, + { + name: "unpinned go install", + dependencies: []checker.Dependency{ { Location: &checker.File{}, - Type: checker.DependencyUseTypeDockerfileContainerImage, + Type: checker.DependencyUseTypeGoCommand, + Pinned: asBoolPointer(false), + }, + }, + expected: scut.TestReturn{ + Error: nil, + Score: 0, + NumberOfWarn: 1, + NumberOfInfo: 1, + NumberOfDebug: 0, + }, + }, + { + name: "unpinned npm install", + dependencies: []checker.Dependency{ + { + Location: &checker.File{}, + Type: checker.DependencyUseTypeNpmCommand, + Pinned: asBoolPointer(false), }, + }, + expected: scut.TestReturn{ + Error: nil, + Score: 0, + NumberOfWarn: 1, + NumberOfInfo: 1, + NumberOfDebug: 0, + }, + }, + { + name: "unpinned nuget install", + dependencies: []checker.Dependency{ { Location: &checker.File{}, - Msg: asPointer("debug message"), + Type: checker.DependencyUseTypeNugetCommand, + Pinned: asBoolPointer(false), }, }, expected: scut.TestReturn{ Error: nil, - Score: 2, - NumberOfWarn: 3, + Score: 0, + NumberOfWarn: 1, + NumberOfInfo: 1, + NumberOfDebug: 0, + }, + }, + { + name: "unpinned pip install", + dependencies: []checker.Dependency{ + { + Location: &checker.File{}, + Type: checker.DependencyUseTypePipCommand, + Pinned: asBoolPointer(false), + }, + }, + expected: scut.TestReturn{ + Error: nil, + Score: 0, + NumberOfWarn: 1, + NumberOfInfo: 1, + NumberOfDebug: 0, + }, + }, + { + name: "2 unpinned dependencies for 1 ecosystem shows 2 warn messages", + dependencies: []checker.Dependency{ + { + Location: &checker.File{}, + Type: checker.DependencyUseTypePipCommand, + Pinned: asBoolPointer(false), + }, + { + Location: &checker.File{}, + Type: checker.DependencyUseTypePipCommand, + Pinned: asBoolPointer(false), + }, + }, + expected: scut.TestReturn{ + Error: nil, + Score: 0, + NumberOfWarn: 2, + NumberOfInfo: 1, + NumberOfDebug: 0, + }, + }, + { + name: "2 unpinned dependencies for 2 ecosystems shows 2 warn messages", + dependencies: []checker.Dependency{ + { + Location: &checker.File{}, + Type: checker.DependencyUseTypePipCommand, + Pinned: asBoolPointer(false), + }, + { + Location: &checker.File{}, + Type: checker.DependencyUseTypeGoCommand, + Pinned: asBoolPointer(false), + }, + }, + expected: scut.TestReturn{ + Error: nil, + Score: 0, + NumberOfWarn: 2, NumberOfInfo: 2, - NumberOfDebug: 1, + NumberOfDebug: 0, }, }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - dl := scut.TestDetailLogger{} - c := checker.CheckRequest{Dlogger: &dl} - actual := PinningDependencies("checkname", &c, - &checker.PinningDependenciesData{ - Dependencies: tt.dependencies, - }) - - if !scut.ValidateTestReturn(t, tt.name, &tt.expected, &actual, &dl) { - t.Fail() - } - }) - } -} - -func Test_createReturnValues(t *testing.T) { - t.Parallel() - - type args struct { - pr map[checker.DependencyUseType]pinnedResult - dl *scut.TestDetailLogger - t checker.DependencyUseType - } - - tests := []struct { - name string - args args - want int - }{ { - name: "returns 10 if no error and no pinnedResult", - args: args{ - t: checker.DependencyUseTypeDownloadThenRun, - dl: &scut.TestDetailLogger{}, + name: "GitHub Actions ecosystem with GitHub-owned pinned", + dependencies: []checker.Dependency{ + { + Location: &checker.File{ + Snippet: "actions/checkout@a81bbbf8298c0fa03ea29cdc473d45769f953675", + }, + Type: checker.DependencyUseTypeGHAction, + Pinned: asBoolPointer(true), + }, + }, + expected: scut.TestReturn{ + Error: nil, + Score: 10, + NumberOfWarn: 0, + NumberOfInfo: 1, + NumberOfDebug: 0, }, - want: 10, }, { - name: "returns 10 if pinned undefined", - args: args{ - t: checker.DependencyUseTypeDownloadThenRun, - pr: map[checker.DependencyUseType]pinnedResult{ - checker.DependencyUseTypeDownloadThenRun: pinnedUndefined, + name: "GitHub Actions ecosystem with third-party pinned", + dependencies: []checker.Dependency{ + { + Location: &checker.File{ + Snippet: "other/checkout@a81bbbf8298c0fa03ea29cdc473d45769f953675", + }, + Type: checker.DependencyUseTypeGHAction, + Pinned: asBoolPointer(true), }, - dl: &scut.TestDetailLogger{}, }, - want: 10, + expected: scut.TestReturn{ + Error: nil, + Score: 10, + NumberOfWarn: 0, + NumberOfInfo: 1, + NumberOfDebug: 0, + }, }, { - name: "returns 10 if pinned", - args: args{ - t: checker.DependencyUseTypeDownloadThenRun, - pr: map[checker.DependencyUseType]pinnedResult{ - checker.DependencyUseTypeDownloadThenRun: pinned, + name: "GitHub Actions ecosystem with GitHub-owned and third-party pinned", + dependencies: []checker.Dependency{ + { + Location: &checker.File{ + Snippet: "actions/checkout@a81bbbf8298c0fa03ea29cdc473d45769f953675", + }, + Type: checker.DependencyUseTypeGHAction, + Pinned: asBoolPointer(true), }, - dl: &scut.TestDetailLogger{}, + { + Location: &checker.File{ + Snippet: "other/checkout@a81bbbf8298c0fa03ea29cdc473d45769f953675", + }, + Type: checker.DependencyUseTypeGHAction, + Pinned: asBoolPointer(true), + }, + }, + expected: scut.TestReturn{ + Error: nil, + Score: 10, + NumberOfWarn: 0, + NumberOfInfo: 2, + NumberOfDebug: 0, }, - want: 10, }, { - name: "returns 0 if unpinned", - args: args{ - t: checker.DependencyUseTypeDownloadThenRun, - pr: map[checker.DependencyUseType]pinnedResult{ - checker.DependencyUseTypeDownloadThenRun: notPinned, + name: "GitHub Actions ecosystem with GitHub-owned and third-party unpinned", + dependencies: []checker.Dependency{ + { + Location: &checker.File{ + Snippet: "actions/checkout@v2", + }, + Type: checker.DependencyUseTypeGHAction, + Pinned: asBoolPointer(false), + }, + { + Location: &checker.File{ + Snippet: "other/checkout@v2", + }, + Type: checker.DependencyUseTypeGHAction, + Pinned: asBoolPointer(false), }, - dl: &scut.TestDetailLogger{}, }, - want: 0, + expected: scut.TestReturn{ + Error: nil, + Score: 0, + NumberOfWarn: 2, + NumberOfInfo: 2, + NumberOfDebug: 0, + }, }, - } - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - got, err := createReturnValues(tt.args.pr, tt.args.t, "some message", tt.args.dl) - if err != nil { - t.Errorf("error during createReturnValues: %v", err) - } - if got != tt.want { - t.Errorf("createReturnValues() = %v, want %v", got, tt.want) - } - - if tt.want < 10 { - return - } - - isExpectedLog := func(logMessage checker.LogMessage, logType checker.DetailType) bool { - return logMessage.Text == "some message" && logType == checker.DetailInfo - } - if !scut.ValidateLogMessage(isExpectedLog, tt.args.dl) { - t.Errorf("test failed: log message not present: %+v", "some message") - } - }) - } -} - -func Test_maxScore(t *testing.T) { - t.Parallel() - type args struct { - s1 int - s2 int - } - tests := []struct { - name string - args args - want int - }{ { - name: "returns s1 if s1 is greater than s2", - args: args{ - s1: 10, - s2: 5, + name: "GitHub Actions ecosystem with GitHub-owned pinned and third-party unpinned", + dependencies: []checker.Dependency{ + { + Location: &checker.File{ + Snippet: "actions/checkout@a81bbbf8298c0fa03ea29cdc473d45769f953675", + }, + Type: checker.DependencyUseTypeGHAction, + Pinned: asBoolPointer(true), + }, + { + Location: &checker.File{ + Snippet: "other/checkout@v2", + }, + Type: checker.DependencyUseTypeGHAction, + Pinned: asBoolPointer(false), + }, + }, + expected: scut.TestReturn{ + Error: nil, + Score: 2, + NumberOfWarn: 1, + NumberOfInfo: 2, + NumberOfDebug: 0, }, - want: 10, }, { - name: "returns s2 if s2 is greater than s1", - args: args{ - s1: 5, - s2: 10, + name: "GitHub Actions ecosystem with GitHub-owned unpinned and third-party pinned", + dependencies: []checker.Dependency{ + { + Location: &checker.File{ + Snippet: "actions/checkout@v2", + }, + Type: checker.DependencyUseTypeGHAction, + Pinned: asBoolPointer(false), + }, + { + Location: &checker.File{ + Snippet: "other/checkout@a81bbbf8298c0fa03ea29cdc473d45769f953675", + }, + Type: checker.DependencyUseTypeGHAction, + Pinned: asBoolPointer(true), + }, + }, + expected: scut.TestReturn{ + Error: nil, + Score: 8, + NumberOfWarn: 1, + NumberOfInfo: 2, + NumberOfDebug: 0, }, - want: 10, }, { - name: "returns s1 if s1 is equal to s2", - args: args{ - s1: 10, - s2: 10, + name: "Skipped objects and dependencies", + dependencies: []checker.Dependency{ + { + Location: &checker.File{}, + Type: checker.DependencyUseTypeNpmCommand, + Pinned: asBoolPointer(false), + }, + { + Location: &checker.File{}, + Type: checker.DependencyUseTypeNpmCommand, + Pinned: asBoolPointer(false), + }, + }, + processingErrors: []checker.ElementError{ + { + Err: sce.ErrJobOSParsing, + Location: finding.Location{}, + }, + }, + expected: scut.TestReturn{ + Error: nil, + Score: 0, + NumberOfWarn: 2, // unpinned deps + NumberOfInfo: 2, // 1 for npm deps, 1 for processing error + NumberOfDebug: 0, }, - want: 10, }, } + for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() - if got := maxScore(tt.args.s1, tt.args.s2); got != tt.want { - t.Errorf("maxScore() = %v, want %v", got, tt.want) - } + + dl := scut.TestDetailLogger{} + c := checker.CheckRequest{Dlogger: &dl} + actual := PinningDependencies("checkname", &c, + &checker.PinningDependenciesData{ + Dependencies: tt.dependencies, + ProcessingErrors: tt.processingErrors, + }) + + scut.ValidateTestReturn(t, tt.name, &tt.expected, &actual, &dl) }) } } +func stringAsPointer(s string) *string { + return &s +} + func Test_generateOwnerToDisplay(t *testing.T) { t.Parallel() tests := []struct { //nolint:govet @@ -395,12 +860,12 @@ func Test_generateOwnerToDisplay(t *testing.T) { { name: "returns GitHub if gitHubOwned is true", gitHubOwned: true, - want: "GitHub-owned", + want: "GitHub-owned GitHubAction", }, { name: "returns GitHub if gitHubOwned is false", gitHubOwned: false, - want: "third-party", + want: "third-party GitHubAction", }, } for _, tt := range tests { @@ -417,56 +882,111 @@ func Test_generateOwnerToDisplay(t *testing.T) { func Test_addWorkflowPinnedResult(t *testing.T) { t.Parallel() type args struct { - w *worklowPinningResult - to bool + w *workflowPinningResult + outcome finding.Outcome isGitHub bool } - tests := []struct { //nolint:govet + tests := []struct { name string - want pinnedResult + want *workflowPinningResult args args }{ { - name: "sets pinned to true if to is true", + name: "add pinned GitHub-owned action dependency", args: args{ - w: &worklowPinningResult{}, - to: true, + outcome: finding.OutcomePositive, + w: &workflowPinningResult{}, isGitHub: true, }, - want: pinned, + want: &workflowPinningResult{ + thirdParties: pinnedResult{ + pinned: 0, + total: 0, + }, + gitHubOwned: pinnedResult{ + pinned: 1, + total: 1, + }, + }, }, { - name: "sets pinned to false if to is false", + name: "add unpinned GitHub-owned action dependency", args: args{ - w: &worklowPinningResult{}, - to: false, + outcome: finding.OutcomeNegative, + w: &workflowPinningResult{}, isGitHub: true, }, - want: notPinned, + want: &workflowPinningResult{ + thirdParties: pinnedResult{ + pinned: 0, + total: 0, + }, + gitHubOwned: pinnedResult{ + pinned: 0, + total: 1, + }, + }, + }, + { + name: "add pinned Third-Party action dependency", + args: args{ + outcome: finding.OutcomePositive, + w: &workflowPinningResult{}, + isGitHub: false, + }, + want: &workflowPinningResult{ + thirdParties: pinnedResult{ + pinned: 1, + total: 1, + }, + gitHubOwned: pinnedResult{ + pinned: 0, + total: 0, + }, + }, }, { - name: "sets pinned to undefined if to is false and isGitHub is false", + name: "add unpinned Third-Party action dependency", args: args{ - w: &worklowPinningResult{}, - to: false, + outcome: finding.OutcomeNegative, + w: &workflowPinningResult{}, isGitHub: false, }, - want: pinnedUndefined, + want: &workflowPinningResult{ + thirdParties: pinnedResult{ + pinned: 0, + total: 1, + }, + gitHubOwned: pinnedResult{ + pinned: 0, + total: 0, + }, + }, }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() - addWorkflowPinnedResult(tt.args.w, tt.args.to, tt.args.isGitHub) - if tt.args.w.gitHubOwned != tt.want { - t.Errorf("addWorkflowPinnedResult() = %v, want %v", tt.args.w.gitHubOwned, tt.want) + addWorkflowPinnedResult(tt.args.outcome, tt.args.w, tt.args.isGitHub) + if tt.want.thirdParties != tt.args.w.thirdParties { + t.Errorf("addWorkflowPinnedResult Third-party GitHub actions mismatch (-want +got):"+ + "\nThird-party pinned: %s\nThird-party total: %s", + cmp.Diff(tt.want.thirdParties.pinned, tt.args.w.thirdParties.pinned), + cmp.Diff(tt.want.thirdParties.total, tt.args.w.thirdParties.total)) + } + if tt.want.gitHubOwned != tt.args.w.gitHubOwned { + t.Errorf("addWorkflowPinnedResult GitHub-owned GitHub actions mismatch (-want +got):"+ + "\nGitHub-owned pinned: %s\nGitHub-owned total: %s", + cmp.Diff(tt.want.gitHubOwned.pinned, tt.args.w.gitHubOwned.pinned), + cmp.Diff(tt.want.gitHubOwned.total, tt.args.w.gitHubOwned.total)) } }) } } func TestGenerateText(t *testing.T) { + t.Parallel() tests := []struct { name string dependency *checker.Dependency @@ -495,8 +1015,10 @@ func TestGenerateText(t *testing.T) { } for _, tc := range tests { + tc := tc t.Run(tc.name, func(t *testing.T) { - result := generateText(tc.dependency) + t.Parallel() + result := generateTextUnpinned(tc.dependency) if !cmp.Equal(tc.expectedText, result) { t.Errorf("generateText mismatch (-want +got):\n%s", cmp.Diff(tc.expectedText, result)) } @@ -506,50 +1028,175 @@ func TestGenerateText(t *testing.T) { func TestUpdatePinningResults(t *testing.T) { t.Parallel() + type args struct { + snippet *string + w *workflowPinningResult + pr map[checker.DependencyUseType]pinnedResult + dependencyType checker.DependencyUseType + outcome finding.Outcome + } + type want struct { + w *workflowPinningResult + pr map[checker.DependencyUseType]pinnedResult + } tests := []struct { //nolint:govet - name string - dependency *checker.Dependency - expectedPinningResult *worklowPinningResult - expectedPinnedResult map[checker.DependencyUseType]pinnedResult + name string + args args + want want }{ { - name: "GitHub Action - GitHub-owned", - dependency: &checker.Dependency{ - Type: checker.DependencyUseTypeGHAction, - Location: &checker.File{ - Snippet: "actions/checkout@v2", + name: "add pinned GitHub-owned action", + args: args{ + dependencyType: checker.DependencyUseTypeGHAction, + outcome: finding.OutcomePositive, + snippet: stringAsPointer("actions/checkout@a81bbbf8298c0fa03ea29cdc473d45769f953675"), + w: &workflowPinningResult{}, + pr: make(map[checker.DependencyUseType]pinnedResult), + }, + want: want{ + w: &workflowPinningResult{ + thirdParties: pinnedResult{ + pinned: 0, + total: 0, + }, + gitHubOwned: pinnedResult{ + pinned: 1, + total: 1, + }, }, + pr: make(map[checker.DependencyUseType]pinnedResult), }, - expectedPinningResult: &worklowPinningResult{ - thirdParties: 0, - gitHubOwned: 2, + }, + { + name: "add unpinned GitHub-owned action", + args: args{ + dependencyType: checker.DependencyUseTypeGHAction, + outcome: finding.OutcomeNegative, + snippet: stringAsPointer("actions/checkout@v2"), + w: &workflowPinningResult{}, + pr: make(map[checker.DependencyUseType]pinnedResult), + }, + want: want{ + w: &workflowPinningResult{ + thirdParties: pinnedResult{ + pinned: 0, + total: 0, + }, + gitHubOwned: pinnedResult{ + pinned: 0, + total: 1, + }, + }, + pr: make(map[checker.DependencyUseType]pinnedResult), }, - expectedPinnedResult: map[checker.DependencyUseType]pinnedResult{}, }, { - name: "Third party owned.", - dependency: &checker.Dependency{ - Type: checker.DependencyUseTypeGHAction, - Location: &checker.File{ - Snippet: "other/checkout@v2", + name: "add pinned Third-party action", + args: args{ + dependencyType: checker.DependencyUseTypeGHAction, + outcome: finding.OutcomePositive, + w: &workflowPinningResult{}, + snippet: stringAsPointer("other/checkout@ffa6706ff2127a749973072756f83c532e43ed02"), + pr: make(map[checker.DependencyUseType]pinnedResult), + }, + want: want{ + w: &workflowPinningResult{ + thirdParties: pinnedResult{ + pinned: 1, + total: 1, + }, + gitHubOwned: pinnedResult{ + pinned: 0, + total: 0, + }, }, + pr: make(map[checker.DependencyUseType]pinnedResult), + }, + }, + { + name: "add unpinned Third-party action", + args: args{ + dependencyType: checker.DependencyUseTypeGHAction, + snippet: stringAsPointer("other/checkout@v2"), + outcome: finding.OutcomeNegative, + w: &workflowPinningResult{}, + pr: make(map[checker.DependencyUseType]pinnedResult), }, - expectedPinningResult: &worklowPinningResult{ - thirdParties: 2, - gitHubOwned: 0, + want: want{ + w: &workflowPinningResult{ + thirdParties: pinnedResult{ + pinned: 0, + total: 1, + }, + gitHubOwned: pinnedResult{ + pinned: 0, + total: 0, + }, + }, + pr: make(map[checker.DependencyUseType]pinnedResult), + }, + }, + { + name: "add pinned pip install", + args: args{ + dependencyType: checker.DependencyUseTypePipCommand, + outcome: finding.OutcomePositive, + w: &workflowPinningResult{}, + pr: make(map[checker.DependencyUseType]pinnedResult), + }, + want: want{ + w: &workflowPinningResult{}, + pr: map[checker.DependencyUseType]pinnedResult{ + checker.DependencyUseTypePipCommand: { + pinned: 1, + total: 1, + }, + }, + }, + }, + { + name: "add unpinned pip install", + args: args{ + dependencyType: checker.DependencyUseTypePipCommand, + outcome: finding.OutcomeNegative, + w: &workflowPinningResult{}, + pr: make(map[checker.DependencyUseType]pinnedResult), + }, + want: want{ + w: &workflowPinningResult{}, + pr: map[checker.DependencyUseType]pinnedResult{ + checker.DependencyUseTypePipCommand: { + pinned: 0, + total: 1, + }, + }, }, - expectedPinnedResult: map[checker.DependencyUseType]pinnedResult{}, }, } for _, tc := range tests { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() - wp := &worklowPinningResult{} - pr := make(map[checker.DependencyUseType]pinnedResult) - updatePinningResults(tc.dependency, wp, pr) - if tc.expectedPinningResult.thirdParties != wp.thirdParties && tc.expectedPinningResult.gitHubOwned != wp.gitHubOwned { //nolint:lll - t.Errorf("updatePinningResults mismatch (-want +got):\n%s", cmp.Diff(tc.expectedPinningResult, wp)) + updatePinningResults(tc.args.dependencyType, tc.args.outcome, tc.args.snippet, tc.args.w, tc.args.pr) + if tc.want.w.thirdParties != tc.args.w.thirdParties { + t.Errorf("updatePinningResults Third-party GitHub actions mismatch (-want +got):"+ + "\nThird-party pinned: %s\nThird-party total: %s", + cmp.Diff(tc.want.w.thirdParties.pinned, tc.args.w.thirdParties.pinned), + cmp.Diff(tc.want.w.thirdParties.total, tc.args.w.thirdParties.total)) + } + if tc.want.w.gitHubOwned != tc.args.w.gitHubOwned { + t.Errorf("updatePinningResults GitHub-owned GitHub actions mismatch (-want +got):"+ + "\nGitHub-owned pinned: %s\nGitHub-owned total: %s", + cmp.Diff(tc.want.w.gitHubOwned.pinned, tc.args.w.gitHubOwned.pinned), + cmp.Diff(tc.want.w.gitHubOwned.total, tc.args.w.gitHubOwned.total)) + } + for dependencyUseType := range tc.want.pr { + if tc.want.pr[dependencyUseType] != tc.args.pr[dependencyUseType] { + t.Errorf("updatePinningResults %s mismatch (-want +got):\npinned: %s\ntotal: %s", + dependencyUseType, + cmp.Diff(tc.want.pr[dependencyUseType].pinned, tc.args.pr[dependencyUseType].pinned), + cmp.Diff(tc.want.pr[dependencyUseType].total, tc.args.pr[dependencyUseType].total)) + } } }) } diff --git a/checks/evaluation/sast.go b/checks/evaluation/sast.go new file mode 100644 index 00000000000..f6a6897cc98 --- /dev/null +++ b/checks/evaluation/sast.go @@ -0,0 +1,186 @@ +// Copyright 2023 OpenSSF Scorecard Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package evaluation + +import ( + "github.com/ossf/scorecard/v4/checker" + sce "github.com/ossf/scorecard/v4/errors" + "github.com/ossf/scorecard/v4/finding" + "github.com/ossf/scorecard/v4/probes/sastToolCodeQLInstalled" + "github.com/ossf/scorecard/v4/probes/sastToolPysaInstalled" + "github.com/ossf/scorecard/v4/probes/sastToolQodanaInstalled" + "github.com/ossf/scorecard/v4/probes/sastToolRunsOnAllCommits" + "github.com/ossf/scorecard/v4/probes/sastToolSnykInstalled" + "github.com/ossf/scorecard/v4/probes/sastToolSonarInstalled" +) + +// SAST applies the score policy for the SAST check. +func SAST(name string, + findings []finding.Finding, dl checker.DetailLogger, +) checker.CheckResult { + // We have 3 unique probes, each should have a finding. + expectedProbes := []string{ + sastToolCodeQLInstalled.Probe, + sastToolPysaInstalled.Probe, + sastToolQodanaInstalled.Probe, + sastToolRunsOnAllCommits.Probe, + sastToolSonarInstalled.Probe, + sastToolSnykInstalled.Probe, + } + + if !finding.UniqueProbesEqual(findings, expectedProbes) { + e := sce.WithMessage(sce.ErrScorecardInternal, "invalid probe results") + return checker.CreateRuntimeErrorResult(name, e) + } + + var sastScore, codeQlScore, pysaScore, qodanaScore, snykScore, sonarScore int + // Assign sastScore, codeQlScore and sonarScore + for i := range findings { + f := &findings[i] + switch f.Probe { + case sastToolRunsOnAllCommits.Probe: + sastScore = getSASTScore(f, dl) + case sastToolCodeQLInstalled.Probe: + codeQlScore = getSastToolScore(f, dl) + case sastToolSnykInstalled.Probe: + snykScore = getSastToolScore(f, dl) + case sastToolPysaInstalled.Probe: + pysaScore = getSastToolScore(f, dl) + case sastToolQodanaInstalled.Probe: + qodanaScore = getSastToolScore(f, dl) + case sastToolSonarInstalled.Probe: + if f.Outcome == finding.OutcomePositive { + sonarScore = checker.MaxResultScore + dl.Info(&checker.LogMessage{ + Text: f.Message, + Type: f.Location.Type, + Path: f.Location.Path, + Offset: *f.Location.LineStart, + EndOffset: *f.Location.LineEnd, + Snippet: *f.Location.Snippet, + }) + } else if f.Outcome == finding.OutcomeNegative { + sonarScore = checker.MinResultScore + } + } + } + + if sonarScore == checker.MaxResultScore { + return checker.CreateMaxScoreResult(name, "SAST tool detected") + } + if snykScore == checker.MaxResultScore { + return checker.CreateMaxScoreResult(name, "SAST tool detected: Snyk") + } + if pysaScore == checker.MaxResultScore { + return checker.CreateMaxScoreResult(name, "SAST tool detected: Pysa") + } + if qodanaScore == checker.MaxResultScore { + return checker.CreateMaxScoreResult(name, "SAST tool detected: Qodana") + } + + if sastScore == checker.InconclusiveResultScore && + codeQlScore == checker.InconclusiveResultScore { + // That can never happen since sastToolInCheckRuns can never + // return checker.InconclusiveResultScore. + return checker.CreateRuntimeErrorResult(name, sce.ErrScorecardInternal) + } + + // Both scores are conclusive. + // We assume the CodeQl config uses a cron and is not enabled as pre-submit. + // TODO: verify the above comment in code. + // We encourage developers to have sast check run on every pre-submit rather + // than as cron jobs through the score computation below. + // Warning: there is a hidden assumption that *any* sast tool is equally good. + if sastScore != checker.InconclusiveResultScore && + codeQlScore != checker.InconclusiveResultScore { + switch { + case sastScore == checker.MaxResultScore: + return checker.CreateMaxScoreResult(name, "SAST tool is run on all commits") + case codeQlScore == checker.MinResultScore: + return checker.CreateResultWithScore(name, + checker.NormalizeReason("SAST tool is not run on all commits", sastScore), sastScore) + + // codeQl is enabled and sast has 0+ (but not all) PRs checks. + case codeQlScore == checker.MaxResultScore: + const sastWeight = 3 + const codeQlWeight = 7 + score := checker.AggregateScoresWithWeight(map[int]int{sastScore: sastWeight, codeQlScore: codeQlWeight}) + return checker.CreateResultWithScore(name, "SAST tool detected but not run on all commits", score) + default: + return checker.CreateRuntimeErrorResult(name, sce.WithMessage(sce.ErrScorecardInternal, "contact team")) + } + } + + // Sast inconclusive. + if codeQlScore != checker.InconclusiveResultScore { + if codeQlScore == checker.MaxResultScore { + return checker.CreateMaxScoreResult(name, "SAST tool detected: CodeQL") + } + return checker.CreateMinScoreResult(name, "no SAST tool detected") + } + + // CodeQl inconclusive. + if sastScore != checker.InconclusiveResultScore { + if sastScore == checker.MaxResultScore { + return checker.CreateMaxScoreResult(name, "SAST tool is run on all commits") + } + + return checker.CreateResultWithScore(name, + checker.NormalizeReason("SAST tool is not run on all commits", sastScore), sastScore) + } + + // Should never happen. + return checker.CreateRuntimeErrorResult(name, sce.WithMessage(sce.ErrScorecardInternal, "contact team")) +} + +// getSASTScore returns the proportional score of how many commits +// run SAST tools. +func getSASTScore(f *finding.Finding, dl checker.DetailLogger) int { + switch f.Outcome { + case finding.OutcomeNotApplicable: + dl.Warn(&checker.LogMessage{ + Text: f.Message, + }) + return checker.InconclusiveResultScore + case finding.OutcomePositive: + dl.Info(&checker.LogMessage{ + Text: f.Message, + }) + case finding.OutcomeNegative: + dl.Warn(&checker.LogMessage{ + Text: f.Message, + }) + default: + } + analyzed := f.Values[sastToolRunsOnAllCommits.AnalyzedPRsKey] + total := f.Values[sastToolRunsOnAllCommits.TotalPRsKey] + return checker.CreateProportionalScore(analyzed, total) +} + +// getSastToolScore returns positive if the project runs the Sast tool +// and negative if it doesn't. +func getSastToolScore(f *finding.Finding, dl checker.DetailLogger) int { + switch f.Outcome { + case finding.OutcomePositive: + dl.Info(&checker.LogMessage{ + Text: f.Message, + }) + return checker.MaxResultScore + case finding.OutcomeNegative: + return checker.MinResultScore + default: + return checker.InconclusiveResultScore + } +} diff --git a/checks/evaluation/sast_test.go b/checks/evaluation/sast_test.go new file mode 100644 index 00000000000..656ebe9afdc --- /dev/null +++ b/checks/evaluation/sast_test.go @@ -0,0 +1,307 @@ +// Copyright 2023 OpenSSF Scorecard Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package evaluation + +import ( + "testing" + + "github.com/ossf/scorecard/v4/checker" + sce "github.com/ossf/scorecard/v4/errors" + "github.com/ossf/scorecard/v4/finding" + "github.com/ossf/scorecard/v4/probes/sastToolRunsOnAllCommits" + scut "github.com/ossf/scorecard/v4/utests" +) + +func TestSAST(t *testing.T) { + snippet := "some code snippet" + sline := uint(10) + eline := uint(46) + t.Parallel() + tests := []struct { + name string + findings []finding.Finding + result scut.TestReturn + }{ + { + name: "SAST - Missing a probe", + findings: []finding.Finding{ + { + Probe: "sastToolCodeQLInstalled", + Outcome: finding.OutcomePositive, + }, + { + Probe: "sastToolSnykInstalled", + Outcome: finding.OutcomeNegative, + }, + { + Probe: sastToolRunsOnAllCommits.Probe, + Outcome: finding.OutcomePositive, + }, + }, + result: scut.TestReturn{ + Score: checker.InconclusiveResultScore, + Error: sce.ErrScorecardInternal, + }, + }, + { + name: "Sonar and codeQL is installed. Snyk, Qodana and Pysa are not installed.", + findings: []finding.Finding{ + { + Probe: "sastToolCodeQLInstalled", + Outcome: finding.OutcomePositive, + }, + { + Probe: "sastToolSnykInstalled", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "sastToolPysaInstalled", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "sastToolQodanaInstalled", + Outcome: finding.OutcomeNegative, + }, + { + Probe: sastToolRunsOnAllCommits.Probe, + Outcome: finding.OutcomePositive, + Values: map[string]int{ + sastToolRunsOnAllCommits.AnalyzedPRsKey: 1, + sastToolRunsOnAllCommits.TotalPRsKey: 2, + }, + }, + { + Probe: "sastToolSonarInstalled", + Outcome: finding.OutcomePositive, + Location: &finding.Location{ + Type: finding.FileTypeSource, + Path: "path/to/file.txt", + LineStart: &sline, + LineEnd: &eline, + Snippet: &snippet, + }, + }, + }, + result: scut.TestReturn{ + Score: 10, + NumberOfInfo: 3, + NumberOfWarn: 0, + }, + }, + { + name: "Pysa is installed. CodeQL, Snyk, Qodana and Sonar are not installed.", + findings: []finding.Finding{ + { + Probe: "sastToolCodeQLInstalled", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "sastToolSnykInstalled", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "sastToolPysaInstalled", + Outcome: finding.OutcomePositive, + }, + { + Probe: "sastToolQodanaInstalled", + Outcome: finding.OutcomeNegative, + }, + { + Probe: sastToolRunsOnAllCommits.Probe, + Outcome: finding.OutcomePositive, + Values: map[string]int{ + sastToolRunsOnAllCommits.AnalyzedPRsKey: 1, + sastToolRunsOnAllCommits.TotalPRsKey: 2, + }, + }, + { + Probe: "sastToolSonarInstalled", + Outcome: finding.OutcomeNegative, + }, + }, + result: scut.TestReturn{ + Score: 10, + NumberOfInfo: 2, + NumberOfWarn: 0, + }, + }, + { + name: `Sonar is installed. CodeQL, Snyk, Pysa, Qodana are not installed. + Does not have info about whether SAST runs + on every commit.`, + findings: []finding.Finding{ + { + Probe: "sastToolCodeQLInstalled", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "sastToolSnykInstalled", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "sastToolQodanaInstalled", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "sastToolPysaInstalled", + Outcome: finding.OutcomeNegative, + }, + { + Probe: sastToolRunsOnAllCommits.Probe, + Outcome: finding.OutcomeNotApplicable, + }, + { + Probe: "sastToolSonarInstalled", + Outcome: finding.OutcomePositive, + Location: &finding.Location{ + Type: finding.FileTypeSource, + Path: "path/to/file.txt", + LineStart: &sline, + LineEnd: &eline, + Snippet: &snippet, + }, + }, + }, + result: scut.TestReturn{ + Score: 10, + NumberOfInfo: 1, + NumberOfWarn: 1, + }, + }, + { + name: "Sonar, CodeQL, Snyk, Qodana and Pysa are not installed", + findings: []finding.Finding{ + { + Probe: "sastToolCodeQLInstalled", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "sastToolSnykInstalled", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "sastToolPysaInstalled", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "sastToolQodanaInstalled", + Outcome: finding.OutcomeNegative, + }, + { + Probe: sastToolRunsOnAllCommits.Probe, + Outcome: finding.OutcomeNegative, + Values: map[string]int{ + sastToolRunsOnAllCommits.AnalyzedPRsKey: 1, + sastToolRunsOnAllCommits.TotalPRsKey: 3, + }, + }, + { + Probe: "sastToolSonarInstalled", + Outcome: finding.OutcomeNegative, + }, + }, + result: scut.TestReturn{ + Score: 3, + NumberOfWarn: 1, + NumberOfInfo: 0, + }, + }, + { + name: "Snyk is installed, Sonar, Qodana and CodeQL are not installed", + findings: []finding.Finding{ + { + Probe: "sastToolCodeQLInstalled", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "sastToolSnykInstalled", + Outcome: finding.OutcomePositive, + }, + { + Probe: sastToolRunsOnAllCommits.Probe, + Outcome: finding.OutcomePositive, + Values: map[string]int{ + sastToolRunsOnAllCommits.AnalyzedPRsKey: 1, + sastToolRunsOnAllCommits.TotalPRsKey: 3, + }, + }, + { + Probe: "sastToolSonarInstalled", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "sastToolPysaInstalled", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "sastToolQodanaInstalled", + Outcome: finding.OutcomeNegative, + }, + }, + result: scut.TestReturn{ + Score: 10, + NumberOfWarn: 0, + NumberOfInfo: 2, + }, + }, + { + name: "Qodana is installed, Snyk, Sonar, and CodeQL are not installed", + findings: []finding.Finding{ + { + Probe: "sastToolCodeQLInstalled", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "sastToolSnykInstalled", + Outcome: finding.OutcomeNegative, + }, + { + Probe: sastToolRunsOnAllCommits.Probe, + Outcome: finding.OutcomePositive, + Values: map[string]int{ + sastToolRunsOnAllCommits.AnalyzedPRsKey: 1, + sastToolRunsOnAllCommits.TotalPRsKey: 3, + }, + }, + { + Probe: "sastToolSonarInstalled", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "sastToolPysaInstalled", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "sastToolQodanaInstalled", + Outcome: finding.OutcomePositive, + }, + }, + result: scut.TestReturn{ + Score: 10, + NumberOfWarn: 0, + NumberOfInfo: 2, + }, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + dl := scut.TestDetailLogger{} + got := SAST(tt.name, tt.findings, &dl) + scut.ValidateTestReturn(t, tt.name, &tt.result, &got, &dl) + }) + } +} diff --git a/checks/evaluation/security_policy.go b/checks/evaluation/security_policy.go index 3512ad67e97..10125c6a312 100644 --- a/checks/evaluation/security_policy.go +++ b/checks/evaluation/security_policy.go @@ -18,139 +18,71 @@ import ( "github.com/ossf/scorecard/v4/checker" sce "github.com/ossf/scorecard/v4/errors" "github.com/ossf/scorecard/v4/finding" + "github.com/ossf/scorecard/v4/probes/securityPolicyContainsLinks" + "github.com/ossf/scorecard/v4/probes/securityPolicyContainsText" + "github.com/ossf/scorecard/v4/probes/securityPolicyContainsVulnerabilityDisclosure" + "github.com/ossf/scorecard/v4/probes/securityPolicyPresent" ) -func scoreSecurityCriteria(f checker.File, - info []checker.SecurityPolicyInformation, - dl checker.DetailLogger, -) int { - var urls, emails, discvuls, linkedContentLen, score int - - emails = countSecInfo(info, checker.SecurityPolicyInformationTypeEmail, true) - urls = countSecInfo(info, checker.SecurityPolicyInformationTypeLink, true) - discvuls = countSecInfo(info, checker.SecurityPolicyInformationTypeText, false) - - for _, i := range findSecInfo(info, checker.SecurityPolicyInformationTypeEmail, true) { - linkedContentLen += len(i.InformationValue.Match) - } - for _, i := range findSecInfo(info, checker.SecurityPolicyInformationTypeLink, true) { - linkedContentLen += len(i.InformationValue.Match) - } - - msg := checker.LogMessage{ - Path: f.Path, - Type: f.Type, - Text: "", - } - - // #1: linked content found (email/http): score += 6 - if (urls + emails) > 0 { - score += 6 - msg.Text = "Found linked content in security policy" - dl.Info(&msg) - } else { - msg.Text = "no email or URL found in security policy" - dl.Warn(&msg) - } - - // #2: more bytes than the sum of the length of all the linked content found: score += 3 - // rationale: there appears to be information and context around those links - // no credit if there is just a link to a site or an email address (those given above) - // the test here is that each piece of linked content will likely contain a space - // before and after the content (hence the two multiplier) - if f.FileSize > 1 && (f.FileSize > uint(linkedContentLen+((urls+emails)*2))) { - score += 3 - msg.Text = "Found text in security policy" - dl.Info(&msg) - } else { - msg.Text = "No text (beyond any linked content) found in security policy" - dl.Warn(&msg) +// SecurityPolicy applies the score policy for the Security-Policy check. +func SecurityPolicy(name string, findings []finding.Finding, dl checker.DetailLogger) checker.CheckResult { + // We have 4 unique probes, each should have a finding. + expectedProbes := []string{ + securityPolicyContainsVulnerabilityDisclosure.Probe, + securityPolicyContainsLinks.Probe, + securityPolicyContainsText.Probe, + securityPolicyPresent.Probe, } - - // #3: found whole number(s) and or match(es) to "Disclos" and or "Vuln": score += 1 - // rationale: works towards the intent of the security policy file - // regarding whom to contact about vuls and disclosures and timing - // e.g., we'll disclose, report a vulnerability, 30 days, etc. - // looking for at least 2 hits - if discvuls > 1 { - score += 1 - msg.Text = "Found disclosure, vulnerability, and/or timelines in security policy" - dl.Info(&msg) - } else { - msg.Text = "One or no descriptive hints of disclosure, vulnerability, and/or timelines in security policy" - dl.Warn(&msg) + if !finding.UniqueProbesEqual(findings, expectedProbes) { + e := sce.WithMessage(sce.ErrScorecardInternal, "invalid probe results") + return checker.CreateRuntimeErrorResult(name, e) } - return score -} - -func countSecInfo(secInfo []checker.SecurityPolicyInformation, - infoType checker.SecurityPolicyInformationType, - unique bool, -) int { - keys := make(map[string]bool) - count := 0 - for _, entry := range secInfo { - if _, present := keys[entry.InformationValue.Match]; !present && entry.InformationType == infoType { - keys[entry.InformationValue.Match] = true - count += 1 - } else if !unique && entry.InformationType == infoType { - count += 1 + score := 0 + m := make(map[string]bool) + for i := range findings { + f := &findings[i] + if f.Outcome == finding.OutcomePositive { + switch f.Probe { + case securityPolicyContainsVulnerabilityDisclosure.Probe: + score += scoreProbeOnce(f.Probe, m, 1) + case securityPolicyContainsLinks.Probe: + score += scoreProbeOnce(f.Probe, m, 6) + case securityPolicyContainsText.Probe: + score += scoreProbeOnce(f.Probe, m, 3) + case securityPolicyPresent.Probe: + m[f.Probe] = true + default: + e := sce.WithMessage(sce.ErrScorecardInternal, "unknown probe results") + return checker.CreateRuntimeErrorResult(name, e) + } } } - return count -} - -func findSecInfo(secInfo []checker.SecurityPolicyInformation, - infoType checker.SecurityPolicyInformationType, - unique bool, -) []checker.SecurityPolicyInformation { - keys := make(map[string]bool) - var secList []checker.SecurityPolicyInformation - for _, entry := range secInfo { - if _, present := keys[entry.InformationValue.Match]; !present && entry.InformationType == infoType { - keys[entry.InformationValue.Match] = true - secList = append(secList, entry) - } else if !unique && entry.InformationType == infoType { - secList = append(secList, entry) + _, defined := m[securityPolicyPresent.Probe] + if !defined { + if score > 0 { + e := sce.WithMessage(sce.ErrScorecardInternal, "score calculation problem") + return checker.CreateRuntimeErrorResult(name, e) } - } - return secList -} - -// SecurityPolicy applies the score policy for the Security-Policy check. -func SecurityPolicy(name string, dl checker.DetailLogger, r *checker.SecurityPolicyData) checker.CheckResult { - if r == nil { - e := sce.WithMessage(sce.ErrScorecardInternal, "empty raw data") - return checker.CreateRuntimeErrorResult(name, e) - } - // Apply the policy evaluation. - if len(r.PolicyFiles) == 0 { - // If the file is unset, directly return as not detected. + // Log all findings. + checker.LogFindings(findings, dl) return checker.CreateMinScoreResult(name, "security policy file not detected") } - // TODO: although this a loop, the raw checks will only return one security policy - // when more than one security policy file can be aggregated into a composite - // score, that logic can be comprehended here. - score := 0 - for _, spd := range r.PolicyFiles { - score = scoreSecurityCriteria(spd.File, - spd.Information, dl) + // Log all findings. + // NOTE: if the score is checker.MaxResultScore, then all findings are positive. + // If the score is less than checker.MaxResultScore, some findings are negative, + // so we log both positive and negative findings. + checker.LogFindings(findings, dl) - msg := checker.LogMessage{ - Path: spd.File.Path, - Type: spd.File.Type, - } - if msg.Type == finding.FileTypeURL { - msg.Text = "security policy detected in org repo" - } else { - msg.Text = "security policy detected in current repo" - } + return checker.CreateResultWithScore(name, "security policy file detected", score) +} - dl.Info(&msg) +func scoreProbeOnce(probeID string, m map[string]bool, bump int) int { + if _, exists := m[probeID]; !exists { + m[probeID] = true + return bump } - - return checker.CreateResultWithScore(name, "security policy file detected", score) + return 0 } diff --git a/checks/evaluation/security_policy_test.go b/checks/evaluation/security_policy_test.go index 2c6e45fe9b2..ac55f7073c4 100644 --- a/checks/evaluation/security_policy_test.go +++ b/checks/evaluation/security_policy_test.go @@ -18,218 +18,179 @@ import ( "testing" "github.com/ossf/scorecard/v4/checker" + sce "github.com/ossf/scorecard/v4/errors" "github.com/ossf/scorecard/v4/finding" scut "github.com/ossf/scorecard/v4/utests" ) func TestSecurityPolicy(t *testing.T) { t.Parallel() - //nolint - type args struct { - name string - r *checker.SecurityPolicyData - } - //nolint tests := []struct { - name string - args args - err bool - want checker.CheckResult + name string + findings []finding.Finding + result scut.TestReturn }{ { - name: "test_security_policy_1", - args: args{ - name: "test_security_policy_1", + name: "missing findings links", + findings: []finding.Finding{ + { + Probe: "securityPolicyContainsVulnerabilityDisclosure", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "securityPolicyContainsText", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "securityPolicyPresent", + Outcome: finding.OutcomeNegative, + }, }, - want: checker.CheckResult{ - Score: -1, + result: scut.TestReturn{ + Score: checker.InconclusiveResultScore, + Error: sce.ErrScorecardInternal, }, }, { - name: "test_security_policy_2", - args: args{ - name: "test_security_policy_2", - r: &checker.SecurityPolicyData{}, + name: "invalid probe name", + findings: []finding.Finding{ + { + Probe: "securityPolicyContainsVulnerabilityDisclosure", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "securityPolicyContainsLinks", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "securityPolicyContainsText", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "securityPolicyPresent", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "securityPolicyInvalidProbeName", + Outcome: finding.OutcomeNegative, + }, }, - want: checker.CheckResult{ - Score: 0, + result: scut.TestReturn{ + Score: checker.InconclusiveResultScore, + Error: sce.ErrScorecardInternal, }, }, { - name: "test_security_policy_3", - args: args{ - name: "test_security_policy_3", - r: &checker.SecurityPolicyData{ - PolicyFiles: []checker.SecurityPolicyFile{ - { - File: checker.File{ - Path: "/etc/security/pam_env.conf", - Type: finding.FileTypeURL, - }, - Information: make([]checker.SecurityPolicyInformation, 0), - }, - }, + name: "file found only", + findings: []finding.Finding{ + { + Probe: "securityPolicyContainsVulnerabilityDisclosure", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "securityPolicyContainsLinks", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "securityPolicyContainsText", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "securityPolicyPresent", + Outcome: finding.OutcomePositive, }, }, - want: checker.CheckResult{ - Score: 0, + result: scut.TestReturn{ + Score: checker.MinResultScore, + NumberOfInfo: 1, + NumberOfWarn: 3, }, }, { - name: "test_security_policy_4", - args: args{ - name: "test_security_policy_4", - r: &checker.SecurityPolicyData{ - PolicyFiles: []checker.SecurityPolicyFile{ - { - File: checker.File{ - Path: "/etc/security/pam_env.conf", - }, - Information: make([]checker.SecurityPolicyInformation, 0), - }, - }, + name: "file not found with positive probes", + findings: []finding.Finding{ + { + Probe: "securityPolicyContainsVulnerabilityDisclosure", + Outcome: finding.OutcomePositive, + }, + { + Probe: "securityPolicyContainsLinks", + Outcome: finding.OutcomePositive, + }, + { + Probe: "securityPolicyContainsText", + Outcome: finding.OutcomePositive, + }, + { + Probe: "securityPolicyPresent", + Outcome: finding.OutcomeNegative, }, }, - want: checker.CheckResult{ - Score: 0, + result: scut.TestReturn{ + Score: checker.InconclusiveResultScore, + Error: sce.ErrScorecardInternal, }, }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - x := checker.CheckRequest{Dlogger: &scut.TestDetailLogger{}} - - got := SecurityPolicy(tt.args.name, x.Dlogger, tt.args.r) - if tt.err { - if got.Score != -1 { - t.Errorf("SecurityPolicy() = %v, want %v", got, tt.want) - } - } - if got.Score != tt.want.Score { - t.Errorf("SecurityPolicy() = %v, want %v for %v", got.Score, tt.want.Score, tt.name) - } - }) - } -} - -func TestScoreSecurityCriteria(t *testing.T) { - t.Parallel() - tests := []struct { //nolint:govet - name string - file checker.File - info []checker.SecurityPolicyInformation - expectedScore int - }{ { - name: "Full score", - file: checker.File{ - Path: "/path/to/security_policy.md", - FileSize: 100, - }, - info: []checker.SecurityPolicyInformation{ + name: "file found with no disclosure and text", + findings: []finding.Finding{ { - InformationType: checker.SecurityPolicyInformationTypeEmail, - InformationValue: checker.SecurityPolicyValueType{ - Match: "security@example.com", - LineNumber: 2, - Offset: 0, - }, + Probe: "securityPolicyContainsVulnerabilityDisclosure", + Outcome: finding.OutcomeNegative, }, { - InformationType: checker.SecurityPolicyInformationTypeLink, - InformationValue: checker.SecurityPolicyValueType{ - Match: "https://example.com/report", - LineNumber: 4, - Offset: 0, - }, + Probe: "securityPolicyContainsLinks", + Outcome: finding.OutcomePositive, }, { - InformationType: checker.SecurityPolicyInformationTypeText, - InformationValue: checker.SecurityPolicyValueType{ - Match: "Disclose vulnerability", - LineNumber: 6, - Offset: 0, - }, + Probe: "securityPolicyContainsText", + Outcome: finding.OutcomeNegative, }, { - InformationType: checker.SecurityPolicyInformationTypeText, - InformationValue: checker.SecurityPolicyValueType{ - Match: "30 days", - LineNumber: 7, - Offset: 0, - }, + Probe: "securityPolicyPresent", + Outcome: finding.OutcomePositive, }, }, - expectedScore: 10, + result: scut.TestReturn{ + Score: 6, + NumberOfInfo: 2, + NumberOfWarn: 2, + }, }, { - name: "Partial score", - file: checker.File{ - Path: "/path/to/security_policy.md", - FileSize: 50, - }, - info: []checker.SecurityPolicyInformation{ + name: "file found all positive", + findings: []finding.Finding{ { - InformationType: checker.SecurityPolicyInformationTypeLink, - InformationValue: checker.SecurityPolicyValueType{ - Match: "https://example.com/report", - LineNumber: 4, - Offset: 0, - }, + Probe: "securityPolicyContainsVulnerabilityDisclosure", + Outcome: finding.OutcomePositive, }, { - InformationType: checker.SecurityPolicyInformationTypeText, - InformationValue: checker.SecurityPolicyValueType{ - Match: "Disclose vulnerability", - LineNumber: 6, - Offset: 0, - }, + Probe: "securityPolicyContainsLinks", + Outcome: finding.OutcomePositive, + }, + { + Probe: "securityPolicyContainsText", + Outcome: finding.OutcomePositive, }, - }, - expectedScore: 9, - }, - { - name: "Low score", - file: checker.File{ - Path: "/path/to/security_policy.md", - FileSize: 10, - }, - info: []checker.SecurityPolicyInformation{ { - InformationType: checker.SecurityPolicyInformationTypeEmail, - InformationValue: checker.SecurityPolicyValueType{ - Match: "security@example.com", - LineNumber: 2, - Offset: 0, - }, + Probe: "securityPolicyPresent", + Outcome: finding.OutcomePositive, }, }, - expectedScore: 6, - }, - { - name: "Low score", - file: checker.File{ - Path: "/path/to/security_policy.md", - FileSize: 5, + result: scut.TestReturn{ + Score: checker.MaxResultScore, + NumberOfInfo: 4, }, - info: []checker.SecurityPolicyInformation{}, - expectedScore: 3, }, } - for _, tc := range tests { - tc := tc - t.Run(tc.name, func(t *testing.T) { + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { t.Parallel() - mockDetailLogger := &scut.TestDetailLogger{} - score := scoreSecurityCriteria(tc.file, tc.info, mockDetailLogger) - - if score != tc.expectedScore { - t.Errorf("scoreSecurityCriteria() mismatch, expected score: %d, got: %d", tc.expectedScore, score) - } + dl := scut.TestDetailLogger{} + got := SecurityPolicy(tt.name, tt.findings, &dl) + scut.ValidateTestReturn(t, tt.name, &tt.result, &got, &dl) }) } } diff --git a/checks/evaluation/signed_releases.go b/checks/evaluation/signed_releases.go index f50c7895517..97b07b51ac8 100644 --- a/checks/evaluation/signed_releases.go +++ b/checks/evaluation/signed_releases.go @@ -15,119 +15,142 @@ package evaluation import ( + "errors" "fmt" "math" - "strings" "github.com/ossf/scorecard/v4/checker" sce "github.com/ossf/scorecard/v4/errors" "github.com/ossf/scorecard/v4/finding" + "github.com/ossf/scorecard/v4/probes/releasesAreSigned" + "github.com/ossf/scorecard/v4/probes/releasesHaveProvenance" ) -var ( - signatureExtensions = []string{".asc", ".minisig", ".sig", ".sign"} - provenanceExtensions = []string{".intoto.jsonl"} -) - -const releaseLookBack = 5 +var errNoReleaseFound = errors.New("no release found") // SignedReleases applies the score policy for the Signed-Releases check. -// nolint -func SignedReleases(name string, dl checker.DetailLogger, r *checker.SignedReleasesData) checker.CheckResult { - if r == nil { - e := sce.WithMessage(sce.ErrScorecardInternal, "empty raw data") +func SignedReleases(name string, + findings []finding.Finding, dl checker.DetailLogger, +) checker.CheckResult { + expectedProbes := []string{ + releasesAreSigned.Probe, + releasesHaveProvenance.Probe, + } + + if !finding.UniqueProbesEqual(findings, expectedProbes) { + e := sce.WithMessage(sce.ErrScorecardInternal, "invalid probe results") return checker.CreateRuntimeErrorResult(name, e) } - totalReleases := 0 - total := 0 - score := 0 - for _, release := range r.Releases { - if len(release.Assets) == 0 { - continue + // Debug all releases and check for OutcomeNotApplicable + // All probes have OutcomeNotApplicable in case the project has no + // releases. Therefore, check for any finding with OutcomeNotApplicable. + loggedReleases := make([]string, 0) + for i := range findings { + f := &findings[i] + + // Debug release name + if f.Outcome == finding.OutcomeNotApplicable { + // Generic summary. + return checker.CreateInconclusiveResult(name, "no releases found") + } + releaseName := getReleaseName(f) + if releaseName == "" { + // Generic summary. + return checker.CreateRuntimeErrorResult(name, errNoReleaseFound) } - dl.Debug(&checker.LogMessage{ - Text: fmt.Sprintf("GitHub release found: %s", release.TagName), - }) - - totalReleases++ - signed := false - hasProvenance := false - - // Check for provenance. - for _, asset := range release.Assets { - for _, suffix := range provenanceExtensions { - if strings.HasSuffix(asset.Name, suffix) { - dl.Info(&checker.LogMessage{ - Path: asset.URL, - Type: finding.FileTypeURL, - Text: fmt.Sprintf("provenance for release artifact: %s", asset.Name), - }) - hasProvenance = true - total++ - break - } - } - if hasProvenance { - // Assign maximum points. - score += 10 - break - } + if !contains(loggedReleases, releaseName) { + dl.Debug(&checker.LogMessage{ + Text: fmt.Sprintf("GitHub release found: %s", releaseName), + }) + loggedReleases = append(loggedReleases, releaseName) } - if hasProvenance { - continue + // Check if outcome is NotApplicable + } + + totalPositive := 0 + releaseMap := make(map[string]int) + uniqueReleaseTags := make([]string, 0) + checker.LogFindings(findings, dl) + + for i := range findings { + f := &findings[i] + + releaseName := getReleaseName(f) + if releaseName == "" { + return checker.CreateRuntimeErrorResult(name, errNoReleaseFound) } + if !contains(uniqueReleaseTags, releaseName) { + uniqueReleaseTags = append(uniqueReleaseTags, releaseName) + } + + if f.Outcome == finding.OutcomePositive { + totalPositive++ - dl.Warn(&checker.LogMessage{ - Path: release.URL, - Type: finding.FileTypeURL, - Text: fmt.Sprintf("release artifact %s does not have provenance", release.TagName), - }) - - // No provenance. Try signatures. - for _, asset := range release.Assets { - for _, suffix := range signatureExtensions { - if strings.HasSuffix(asset.Name, suffix) { - dl.Info(&checker.LogMessage{ - Path: asset.URL, - Type: finding.FileTypeURL, - Text: fmt.Sprintf("signed release artifact: %s", asset.Name), - }) - signed = true - total++ - break + switch f.Probe { + case releasesAreSigned.Probe: + if _, ok := releaseMap[releaseName]; !ok { + releaseMap[releaseName] = 8 } - } - if signed { - // Assign 8 points. - score += 8 - break + case releasesHaveProvenance.Probe: + releaseMap[releaseName] = 10 } } + } - if !signed { - dl.Warn(&checker.LogMessage{ - Path: release.URL, - Type: finding.FileTypeURL, - Text: fmt.Sprintf("release artifact %s not signed", release.TagName), - }) - } - if totalReleases >= releaseLookBack { - break - } + if totalPositive == 0 { + return checker.CreateMinScoreResult(name, "Project has not signed or included provenance with any releases.") } + totalReleases := len(uniqueReleaseTags) + + // TODO, the evaluation code should be the one limiting to 5, not assuming the probes have done it already + // however there are some ordering issues to consider, so going with the easy way for now + if totalReleases > 5 { + err := sce.CreateInternal(sce.ErrScorecardInternal, "too many releases, please report this") + return checker.CreateRuntimeErrorResult(name, err) + } if totalReleases == 0 { - dl.Warn(&checker.LogMessage{ - Text: "no GitHub releases found", - }) - // Generic summary. + // This should not happen in production, but it is useful to have + // for testing. return checker.CreateInconclusiveResult(name, "no releases found") } + score := 0 + for _, s := range releaseMap { + score += s + } + score = int(math.Floor(float64(score) / float64(totalReleases))) - reason := fmt.Sprintf("%d out of %d artifacts are signed or have provenance", total, totalReleases) + reason := fmt.Sprintf("%d out of the last %d releases have a total of %d signed artifacts.", + len(releaseMap), totalReleases, totalPositive) return checker.CreateResultWithScore(name, reason, score) } + +func getReleaseName(f *finding.Finding) string { + m := f.Values + for k, v := range m { + var value int + switch f.Probe { + case releasesAreSigned.Probe: + value = int(releasesAreSigned.ValueTypeRelease) + case releasesHaveProvenance.Probe: + value = int(releasesHaveProvenance.ValueTypeRelease) + } + if v == value { + return k + } + } + return "" +} + +func contains(releases []string, release string) bool { + for _, r := range releases { + if r == release { + return true + } + } + return false +} diff --git a/checks/evaluation/signed_releases_test.go b/checks/evaluation/signed_releases_test.go index 35cf5cff0ff..e02516f4032 100644 --- a/checks/evaluation/signed_releases_test.go +++ b/checks/evaluation/signed_releases_test.go @@ -15,93 +15,350 @@ package evaluation import ( + "fmt" "testing" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/ossf/scorecard/v4/checker" - "github.com/ossf/scorecard/v4/clients" + sce "github.com/ossf/scorecard/v4/errors" + "github.com/ossf/scorecard/v4/finding" + "github.com/ossf/scorecard/v4/probes/releasesAreSigned" + "github.com/ossf/scorecard/v4/probes/releasesHaveProvenance" scut "github.com/ossf/scorecard/v4/utests" ) +const ( + release0 = 0 + release1 = 1 + release2 = 2 + release3 = 3 + release4 = 4 + release5 = 5 +) + +const ( + asset0 = 0 + asset1 = 1 + asset2 = 2 + asset3 = 3 +) + +func signedProbe(release, asset int, outcome finding.Outcome) finding.Finding { + return finding.Finding{ + Probe: releasesAreSigned.Probe, + Outcome: outcome, + Values: map[string]int{ + fmt.Sprintf("v%d", release): int(releasesAreSigned.ValueTypeRelease), + fmt.Sprintf("artifact-%d", asset): int(releasesAreSigned.ValueTypeReleaseAsset), + }, + } +} + +func provenanceProbe(release, asset int, outcome finding.Outcome) finding.Finding { + return finding.Finding{ + Probe: releasesHaveProvenance.Probe, + Outcome: outcome, + Values: map[string]int{ + fmt.Sprintf("v%d", release): int(releasesHaveProvenance.ValueTypeRelease), + fmt.Sprintf("artifact-%d", asset): int(releasesHaveProvenance.ValueTypeReleaseAsset), + }, + } +} + func TestSignedReleases(t *testing.T) { + t.Parallel() tests := []struct { - name string - releases []clients.Release - expectedResult checker.CheckResult + name string + findings []finding.Finding + result scut.TestReturn }{ { - name: "Full score", - releases: []clients.Release{ - { - TagName: "v1.0", - Assets: []clients.ReleaseAsset{ - {Name: "binary.tar.gz"}, - {Name: "binary.tar.gz.sig"}, - {Name: "binary.tar.gz.intoto.jsonl"}, - }, - }, + name: "Has one release that is signed but no provenance", + findings: []finding.Finding{ + signedProbe(0, 0, finding.OutcomePositive), + provenanceProbe(0, 0, finding.OutcomeNegative), }, - expectedResult: checker.CheckResult{ - Name: "Signed-Releases", - Version: 2, - Score: 10, - Reason: "1 out of 1 artifacts are signed or have provenance", + result: scut.TestReturn{ + Score: 8, + NumberOfInfo: 1, + NumberOfWarn: 1, + NumberOfDebug: 1, }, }, { - name: "Partial score", - releases: []clients.Release{ - { - TagName: "v1.0", - Assets: []clients.ReleaseAsset{ - {Name: "binary.tar.gz"}, - {Name: "binary.tar.gz.sig"}, - }, - }, + name: "Has one release that is signed and has provenance", + findings: []finding.Finding{ + signedProbe(0, 0, finding.OutcomePositive), + provenanceProbe(0, 0, finding.OutcomePositive), }, - expectedResult: checker.CheckResult{ - Name: "Signed-Releases", - Version: 2, - Score: 8, - Reason: "1 out of 1 artifacts are signed or have provenance", + result: scut.TestReturn{ + Score: 10, + NumberOfInfo: 2, + NumberOfDebug: 1, }, }, { - name: "No score", - releases: []clients.Release{ - { - TagName: "v1.0", - Assets: []clients.ReleaseAsset{ - {Name: "binary.tar.gz"}, - }, - }, + name: "Has one release that is not signed but has provenance", + findings: []finding.Finding{ + signedProbe(0, 0, finding.OutcomeNegative), + provenanceProbe(0, 0, finding.OutcomePositive), + }, + result: scut.TestReturn{ + Score: checker.MaxResultScore, + NumberOfInfo: 1, + NumberOfWarn: 1, + NumberOfDebug: 1, + }, + }, + + { + name: "3 releases. One release has one signed, and one release has two provenance.", + findings: []finding.Finding{ + // Release 1: + // Asset 1: + signedProbe(release0, asset0, finding.OutcomeNegative), + provenanceProbe(release0, asset0, finding.OutcomeNegative), + // Asset 2: + signedProbe(release0, asset1, finding.OutcomePositive), + provenanceProbe(release0, asset1, finding.OutcomeNegative), + // Release 2 + // Asset 1: + signedProbe(release1, asset0, finding.OutcomeNegative), + provenanceProbe(release1, asset0, finding.OutcomeNegative), + // Release 2 + // Asset 2: + signedProbe(release1, asset1, finding.OutcomeNegative), + provenanceProbe(release1, asset1, finding.OutcomeNegative), + // Release 2 + // Asset 3: + signedProbe(release1, asset2, finding.OutcomeNegative), + provenanceProbe(release1, asset2, finding.OutcomeNegative), + // Release 3 + // Asset 1: + signedProbe(release2, asset0, finding.OutcomeNegative), + provenanceProbe(release2, asset0, finding.OutcomePositive), + // Asset 2: + signedProbe(release2, asset1, finding.OutcomeNegative), + provenanceProbe(release2, asset1, finding.OutcomePositive), + // Asset 3: + signedProbe(release2, asset2, finding.OutcomeNegative), + provenanceProbe(release2, asset2, finding.OutcomeNegative), }, - expectedResult: checker.CheckResult{ - Name: "Signed-Releases", - Version: 2, - Score: 0, - Reason: "0 out of 1 artifacts are signed or have provenance", + result: scut.TestReturn{ + Score: 6, + NumberOfInfo: 3, + NumberOfWarn: 13, + NumberOfDebug: 3, }, }, { - name: "No releases", - releases: []clients.Release{}, - expectedResult: checker.CreateInconclusiveResult("Signed-Releases", "no releases found"), + name: "5 releases. Two releases have one signed each, and two releases have one provenance each.", + findings: []finding.Finding{ + // Release 1: + // Release 1, Asset 1: + signedProbe(release0, asset0, finding.OutcomeNegative), + provenanceProbe(release0, asset0, finding.OutcomeNegative), + signedProbe(release0, asset1, finding.OutcomePositive), + provenanceProbe(release0, asset1, finding.OutcomeNegative), + // Release 2: + // Release 2, Asset 1: + signedProbe(release1, asset1, finding.OutcomePositive), + provenanceProbe(release1, asset0, finding.OutcomeNegative), + // Release 2, Asset 2: + signedProbe(release1, asset1, finding.OutcomeNegative), + provenanceProbe(release1, asset1, finding.OutcomeNegative), + // Release 2, Asset 3: + signedProbe(release1, asset2, finding.OutcomeNegative), + provenanceProbe(release1, asset2, finding.OutcomeNegative), + // Release 3, Asset 1: + signedProbe(release2, asset0, finding.OutcomeNegative), + provenanceProbe(release2, asset0, finding.OutcomePositive), + // Release 3, Asset 2: + signedProbe(release2, asset1, finding.OutcomeNegative), + provenanceProbe(release2, asset1, finding.OutcomeNegative), + // Release 3, Asset 3: + signedProbe(release2, asset2, finding.OutcomeNegative), + provenanceProbe(release2, asset2, finding.OutcomeNegative), + // Release 4, Asset 1: + signedProbe(release3, asset0, finding.OutcomeNegative), + provenanceProbe(release3, asset0, finding.OutcomePositive), + // Release 4, Asset 2: + signedProbe(release3, asset1, finding.OutcomeNegative), + provenanceProbe(release3, asset1, finding.OutcomeNegative), + // Release 4, Asset 3: + signedProbe(release3, asset2, finding.OutcomeNegative), + provenanceProbe(release3, asset2, finding.OutcomeNegative), + // Release 5, Asset 1: + signedProbe(release4, asset0, finding.OutcomeNegative), + provenanceProbe(release4, asset0, finding.OutcomeNegative), + // Release 5, Asset 2: + signedProbe(release4, asset1, finding.OutcomeNegative), + provenanceProbe(release4, asset1, finding.OutcomeNegative), + // Release 5, Asset 3: + signedProbe(release4, asset2, finding.OutcomeNegative), + provenanceProbe(release4, asset2, finding.OutcomeNegative), + // Release 5, Asset 4: + signedProbe(release4, asset3, finding.OutcomeNegative), + provenanceProbe(release4, asset3, finding.OutcomeNegative), + }, + result: scut.TestReturn{ + Score: 7, + NumberOfInfo: 4, + NumberOfWarn: 26, + NumberOfDebug: 5, + }, + }, + { + name: "5 releases. All have one signed artifact.", + findings: []finding.Finding{ + // Release 1: + // Release 1, Asset 1: + signedProbe(release0, asset0, finding.OutcomeNegative), + provenanceProbe(release0, asset0, finding.OutcomeNegative), + signedProbe(release0, asset1, finding.OutcomePositive), + provenanceProbe(release0, asset1, finding.OutcomeNegative), + // Release 2: + // Release 2, Asset 1: + signedProbe(release1, asset0, finding.OutcomePositive), + provenanceProbe(release1, asset0, finding.OutcomeNegative), + // Release 2, Asset 2: + signedProbe(release1, asset1, finding.OutcomeNegative), + provenanceProbe(release1, asset1, finding.OutcomeNegative), + // Release 2, Asset 3: + signedProbe(release1, asset2, finding.OutcomeNegative), + provenanceProbe(release1, asset2, finding.OutcomeNegative), + // Release 3, Asset 1: + signedProbe(release2, asset0, finding.OutcomePositive), + provenanceProbe(release2, asset0, finding.OutcomePositive), + // Release 3, Asset 2: + signedProbe(release2, asset1, finding.OutcomeNegative), + provenanceProbe(release2, asset1, finding.OutcomeNegative), + // Release 3, Asset 3: + signedProbe(release2, asset2, finding.OutcomeNegative), + provenanceProbe(release2, asset2, finding.OutcomeNegative), + // Release 4, Asset 1: + signedProbe(release3, asset0, finding.OutcomePositive), + provenanceProbe(release3, asset0, finding.OutcomePositive), + // Release 4, Asset 2: + signedProbe(release3, asset1, finding.OutcomeNegative), + provenanceProbe(release3, asset1, finding.OutcomeNegative), + // Release 4, Asset 3: + signedProbe(release3, asset2, finding.OutcomeNegative), + provenanceProbe(release3, asset2, finding.OutcomeNegative), + // Release 5, Asset 1: + signedProbe(release4, asset0, finding.OutcomePositive), + provenanceProbe(release4, asset0, finding.OutcomeNegative), + // Release 5, Asset 2: + signedProbe(release4, asset1, finding.OutcomeNegative), + provenanceProbe(release4, asset1, finding.OutcomeNegative), + // Release 5, Asset 3: + signedProbe(release4, asset2, finding.OutcomeNegative), + provenanceProbe(release4, asset2, finding.OutcomeNegative), + // Release 5, Asset 4: + signedProbe(release4, asset3, finding.OutcomeNegative), + provenanceProbe(release4, asset3, finding.OutcomeNegative), + }, + result: scut.TestReturn{ + Score: 8, + NumberOfInfo: 7, + NumberOfWarn: 23, + NumberOfDebug: 5, + }, + }, + { + name: "too many releases (6 when lookback is 5)", + findings: []finding.Finding{ + // Release 1: + // Release 1, Asset 1: + signedProbe(release0, asset0, finding.OutcomePositive), + provenanceProbe(release0, asset0, finding.OutcomePositive), + // Release 2: + // Release 2, Asset 1: + signedProbe(release1, asset0, finding.OutcomePositive), + provenanceProbe(release1, asset0, finding.OutcomePositive), + // Release 3, Asset 1: + signedProbe(release2, asset0, finding.OutcomePositive), + provenanceProbe(release2, asset0, finding.OutcomePositive), + // Release 4, Asset 1: + signedProbe(release3, asset0, finding.OutcomePositive), + provenanceProbe(release3, asset0, finding.OutcomePositive), + // Release 5, Asset 1: + signedProbe(release4, asset0, finding.OutcomePositive), + provenanceProbe(release4, asset0, finding.OutcomePositive), + // Release 6, Asset 1: + signedProbe(release5, asset0, finding.OutcomePositive), + provenanceProbe(release5, asset0, finding.OutcomePositive), + }, + result: scut.TestReturn{ + Score: checker.InconclusiveResultScore, + Error: sce.ErrScorecardInternal, + NumberOfInfo: 12, // 2 (signed + provenance) for each release + NumberOfDebug: 6, // 1 for each release + }, }, } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - dl := &scut.TestDetailLogger{} - data := &checker.SignedReleasesData{Releases: tc.releases} - actualResult := SignedReleases("Signed-Releases", dl, data) + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + dl := scut.TestDetailLogger{} + got := SignedReleases(tt.name, tt.findings, &dl) + scut.ValidateTestReturn(t, tt.name, &tt.result, &got, &dl) + }) + } +} - if !cmp.Equal(tc.expectedResult, actualResult, - cmpopts.IgnoreFields(checker.CheckResult{}, "Error")) { - t.Errorf("SignedReleases() mismatch (-want +got):\n%s", cmp.Diff(tc.expectedResult, actualResult, - cmpopts.IgnoreFields(checker.CheckResult{}, "Error"))) +func Test_getReleaseName(t *testing.T) { + t.Parallel() + type args struct { + f *finding.Finding + } + tests := []struct { + name string + args args + want string + }{ + { + name: "no release", + args: args{ + f: &finding.Finding{ + Values: map[string]int{}, + }, + }, + want: "", + }, + { + name: "release", + args: args{ + f: &finding.Finding{ + Values: map[string]int{ + "v1": int(releasesAreSigned.ValueTypeRelease), + }, + Probe: releasesAreSigned.Probe, + }, + }, + want: "v1", + }, + { + name: "release and asset", + args: args{ + f: &finding.Finding{ + Values: map[string]int{ + "v1": int(releasesAreSigned.ValueTypeRelease), + "artifact-1": int(releasesAreSigned.ValueTypeReleaseAsset), + }, + Probe: releasesAreSigned.Probe, + }, + }, + want: "v1", + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + if got := getReleaseName(tt.args.f); got != tt.want { + t.Errorf("getReleaseName() = %v, want %v", got, tt.want) } }) } diff --git a/checks/evaluation/vulnerabilities.go b/checks/evaluation/vulnerabilities.go index cecca7ba8c6..0f5c91da45a 100644 --- a/checks/evaluation/vulnerabilities.go +++ b/checks/evaluation/vulnerabilities.go @@ -16,45 +16,37 @@ package evaluation import ( "fmt" - "strings" - - "github.com/google/osv-scanner/pkg/grouper" "github.com/ossf/scorecard/v4/checker" sce "github.com/ossf/scorecard/v4/errors" + "github.com/ossf/scorecard/v4/finding" + "github.com/ossf/scorecard/v4/probes/hasOSVVulnerabilities" ) // Vulnerabilities applies the score policy for the Vulnerabilities check. -func Vulnerabilities(name string, dl checker.DetailLogger, - r *checker.VulnerabilitiesData, +func Vulnerabilities(name string, + findings []finding.Finding, + dl checker.DetailLogger, ) checker.CheckResult { - if r == nil { - e := sce.WithMessage(sce.ErrScorecardInternal, "empty raw data") - return checker.CreateRuntimeErrorResult(name, e) + expectedProbes := []string{ + hasOSVVulnerabilities.Probe, } - aliasVulnerabilities := []grouper.IDAliases{} - for _, vuln := range r.Vulnerabilities { - aliasVulnerabilities = append(aliasVulnerabilities, grouper.IDAliases(vuln)) + if !finding.UniqueProbesEqual(findings, expectedProbes) { + e := sce.WithMessage(sce.ErrScorecardInternal, "invalid probe results") + return checker.CreateRuntimeErrorResult(name, e) } - IDs := grouper.Group(aliasVulnerabilities) - score := checker.MaxResultScore - len(IDs) + vulnsFound := negativeFindings(findings) + numVulnsFound := len(vulnsFound) + checker.LogFindings(vulnsFound, dl) + + score := checker.MaxResultScore - numVulnsFound if score < checker.MinResultScore { score = checker.MinResultScore } - if len(IDs) > 0 { - for _, v := range IDs { - dl.Warn(&checker.LogMessage{ - Text: fmt.Sprintf("Project is vulnerable to: %s", strings.Join(v.IDs, " / ")), - }) - } - - return checker.CreateResultWithScore(name, - fmt.Sprintf("%v existing vulnerabilities detected", len(IDs)), score) - } - - return checker.CreateMaxScoreResult(name, "no vulnerabilities detected") + return checker.CreateResultWithScore(name, + fmt.Sprintf("%v existing vulnerabilities detected", numVulnsFound), score) } diff --git a/checks/evaluation/vulnerabilities_test.go b/checks/evaluation/vulnerabilities_test.go index c524b357fd1..382a44b308a 100644 --- a/checks/evaluation/vulnerabilities_test.go +++ b/checks/evaluation/vulnerabilities_test.go @@ -17,62 +17,119 @@ package evaluation import ( "testing" - "github.com/ossf/scorecard/v4/checker" - "github.com/ossf/scorecard/v4/clients" + sce "github.com/ossf/scorecard/v4/errors" + "github.com/ossf/scorecard/v4/finding" scut "github.com/ossf/scorecard/v4/utests" ) // TestVulnerabilities tests the vulnerabilities checker. func TestVulnerabilities(t *testing.T) { t.Parallel() - //nolint - type args struct { - name string - r *checker.VulnerabilitiesData - } + //nolint:govet tests := []struct { name string - args args - want checker.CheckResult + findings []finding.Finding + result scut.TestReturn expected []struct { lineNumber uint } }{ { name: "no vulnerabilities", - args: args{ - name: "vulnerabilities_test.go", - r: &checker.VulnerabilitiesData{ - Vulnerabilities: []clients.Vulnerability{}, + findings: []finding.Finding{ + { + Probe: "hasOSVVulnerabilities", + Outcome: finding.OutcomePositive, }, }, - want: checker.CheckResult{ + result: scut.TestReturn{ Score: 10, }, }, { - name: "one vulnerability", - args: args{ - name: "vulnerabilities_test.go", - r: &checker.VulnerabilitiesData{ - Vulnerabilities: []clients.Vulnerability{ - { - ID: "CVE-2019-1234", - }, - }, + name: "three vulnerabilities", + findings: []finding.Finding{ + { + Probe: "hasOSVVulnerabilities", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "hasOSVVulnerabilities", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "hasOSVVulnerabilities", + Outcome: finding.OutcomeNegative, }, }, - want: checker.CheckResult{ - Score: 9, + result: scut.TestReturn{ + Score: 7, + NumberOfWarn: 3, }, }, { - name: "one vulnerability", - args: args{ - name: "vulnerabilities_test.go", + name: "twelve vulnerabilities to check that score is not less than 0", + findings: []finding.Finding{ + { + Probe: "hasOSVVulnerabilities", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "hasOSVVulnerabilities", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "hasOSVVulnerabilities", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "hasOSVVulnerabilities", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "hasOSVVulnerabilities", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "hasOSVVulnerabilities", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "hasOSVVulnerabilities", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "hasOSVVulnerabilities", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "hasOSVVulnerabilities", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "hasOSVVulnerabilities", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "hasOSVVulnerabilities", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "hasOSVVulnerabilities", + Outcome: finding.OutcomeNegative, + }, + }, + result: scut.TestReturn{ + Score: 0, + NumberOfWarn: 12, }, - want: checker.CheckResult{ + }, + { + name: "invalid findings", + findings: []finding.Finding{}, + result: scut.TestReturn{ Score: -1, + Error: sce.ErrScorecardInternal, }, }, } @@ -81,10 +138,8 @@ func TestVulnerabilities(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() dl := scut.TestDetailLogger{} - res := Vulnerabilities(tt.args.name, &dl, tt.args.r) - if res.Score != tt.want.Score { - t.Errorf("Vulnerabilities() = %v, want %v", res.Score, tt.want.Score) - } + got := Vulnerabilities(tt.name, tt.findings, &dl) + scut.ValidateTestReturn(t, tt.name, &tt.result, &got, &dl) }) } } diff --git a/checks/evaluation/webhooks.go b/checks/evaluation/webhooks.go index d7a5a761923..37ed12c5e6b 100644 --- a/checks/evaluation/webhooks.go +++ b/checks/evaluation/webhooks.go @@ -20,42 +20,50 @@ import ( "github.com/ossf/scorecard/v4/checker" sce "github.com/ossf/scorecard/v4/errors" "github.com/ossf/scorecard/v4/finding" + "github.com/ossf/scorecard/v4/probes/webhooksUseSecrets" ) // Webhooks applies the score policy for the Webhooks check. -func Webhooks(name string, dl checker.DetailLogger, - r *checker.WebhooksData, +func Webhooks(name string, + findings []finding.Finding, dl checker.DetailLogger, ) checker.CheckResult { - if r == nil { - e := sce.WithMessage(sce.ErrScorecardInternal, "empty raw data") + expectedProbes := []string{ + webhooksUseSecrets.Probe, + } + + if !finding.UniqueProbesEqual(findings, expectedProbes) { + e := sce.WithMessage(sce.ErrScorecardInternal, "invalid probe results") return checker.CreateRuntimeErrorResult(name, e) } - if len(r.Webhooks) < 1 { - return checker.CreateMaxScoreResult(name, "no webhooks defined") + if len(findings) == 1 && findings[0].Outcome == finding.OutcomeNotApplicable { + return checker.CreateMaxScoreResult(name, "project does not have webhook") } - hasNoSecretCount := 0 - for _, hook := range r.Webhooks { - if !hook.UsesAuthSecret { - dl.Warn(&checker.LogMessage{ - Path: hook.Path, - Type: finding.FileTypeURL, - Text: "Webhook with no secret configured", - }) - hasNoSecretCount++ + var webhooksWithNoSecret int + + totalWebhooks := len(findings) + + for i := range findings { + f := &findings[i] + if f.Outcome == finding.OutcomeNegative { + webhooksWithNoSecret++ } } - if hasNoSecretCount == 0 { - return checker.CreateMaxScoreResult(name, fmt.Sprintf("all %d hook(s) have a secret configured", len(r.Webhooks))) + if totalWebhooks == webhooksWithNoSecret { + return checker.CreateMinScoreResult(name, "no hook(s) have a secret configured") } - if len(r.Webhooks) == hasNoSecretCount { - return checker.CreateMinScoreResult(name, fmt.Sprintf("%d hook(s) do not have a secret configured", len(r.Webhooks))) + if webhooksWithNoSecret == 0 { + msg := fmt.Sprintf("All %d of the projects webhooks are configured with a secret", totalWebhooks) + return checker.CreateMaxScoreResult(name, msg) } + msg := fmt.Sprintf("%d out of the projects %d webhooks are configured without a secret", + webhooksWithNoSecret, + totalWebhooks) + return checker.CreateProportionalScoreResult(name, - fmt.Sprintf("%d/%d hook(s) with no secrets configured detected", - hasNoSecretCount, len(r.Webhooks)), hasNoSecretCount, len(r.Webhooks)) + msg, totalWebhooks-webhooksWithNoSecret, totalWebhooks) } diff --git a/checks/evaluation/webhooks_test.go b/checks/evaluation/webhooks_test.go index 4fefeb47e30..a6de4ec5dc6 100644 --- a/checks/evaluation/webhooks_test.go +++ b/checks/evaluation/webhooks_test.go @@ -18,136 +18,218 @@ import ( "testing" "github.com/ossf/scorecard/v4/checker" - "github.com/ossf/scorecard/v4/clients" + "github.com/ossf/scorecard/v4/finding" scut "github.com/ossf/scorecard/v4/utests" ) // TestWebhooks tests the webhooks check. func TestWebhooks(t *testing.T) { t.Parallel() - //nolint - type args struct { - name string - dl checker.DetailLogger - r *checker.WebhooksData - } tests := []struct { - name string - args args - want checker.CheckResult - wantErr bool + name string + findings []finding.Finding + result scut.TestReturn }{ { - name: "r nil", - args: args{ - name: "test_webhook_check_pass", - dl: &scut.TestDetailLogger{}, + name: "no webhooks", + findings: []finding.Finding{ + { + Probe: "webhooksUseSecrets", + Outcome: finding.OutcomeNotApplicable, + }, + }, + result: scut.TestReturn{ + Score: checker.MaxResultScore, }, - wantErr: true, }, { - name: "no webhooks", - args: args{ - name: "no webhooks", - dl: &scut.TestDetailLogger{}, - r: &checker.WebhooksData{}, + name: "1 webhook with no secret", + findings: []finding.Finding{ + { + Probe: "webhooksUseSecrets", + Outcome: finding.OutcomeNegative, + }, }, - want: checker.CheckResult{ - Score: checker.MaxResultScore, + result: scut.TestReturn{ + Score: checker.MinResultScore, }, }, { name: "1 webhook with secret", - args: args{ - name: "1 webhook with secret", - dl: &scut.TestDetailLogger{}, - r: &checker.WebhooksData{ - Webhooks: []clients.Webhook{ - { - Path: "https://github.com/owner/repo/settings/hooks/1234", - ID: 1234, - UsesAuthSecret: true, - }, - }, + findings: []finding.Finding{ + { + Probe: "webhooksUseSecrets", + Outcome: finding.OutcomePositive, }, }, - want: checker.CheckResult{ - Score: 10, + result: scut.TestReturn{ + Score: checker.MaxResultScore, }, }, { - name: "1 webhook with no secret", - args: args{ - name: "1 webhook with no secret", - dl: &scut.TestDetailLogger{}, - r: &checker.WebhooksData{ - Webhooks: []clients.Webhook{ - { - Path: "https://github.com/owner/repo/settings/hooks/1234", - ID: 1234, - UsesAuthSecret: false, - }, - }, + name: "2 webhooks one of which has secret", + findings: []finding.Finding{ + { + Probe: "webhooksUseSecrets", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "webhooksUseSecrets", + Outcome: finding.OutcomePositive, }, }, - want: checker.CheckResult{ - Score: 0, + result: scut.TestReturn{ + Score: 5, }, }, { - name: "many webhooks with no secret and with secret", - args: args{ - name: "many webhooks with no secret and with secret", - dl: &scut.TestDetailLogger{}, - r: &checker.WebhooksData{ - Webhooks: []clients.Webhook{ - { - Path: "https://github.com/owner/repo/settings/hooks/1234", - ID: 1234, - UsesAuthSecret: false, - }, - { - Path: "https://github.com/owner/repo/settings/hooks/1111", - ID: 1111, - UsesAuthSecret: true, - }, - { - Path: "https://github.com/owner/repo/settings/hooks/4444", - ID: 4444, - UsesAuthSecret: true, - }, - { - Path: "https://github.com/owner/repo/settings/hooks/3333", - ID: 3333, - UsesAuthSecret: false, - }, - { - Path: "https://github.com/owner/repo/settings/hooks/2222", - ID: 2222, - UsesAuthSecret: false, - }, - }, + name: "Five webhooks three of which have secrets", + findings: []finding.Finding{ + { + Probe: "webhooksUseSecrets", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "webhooksUseSecrets", + Outcome: finding.OutcomePositive, + }, + { + Probe: "webhooksUseSecrets", + Outcome: finding.OutcomePositive, + }, + { + Probe: "webhooksUseSecrets", + Outcome: finding.OutcomePositive, + }, + { + Probe: "webhooksUseSecrets", + Outcome: finding.OutcomeNegative, }, }, - want: checker.CheckResult{ + result: scut.TestReturn{ Score: 6, }, }, + { + name: "One of 12 webhooks does not have secrets", + findings: []finding.Finding{ + { + Probe: "webhooksUseSecrets", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "webhooksUseSecrets", + Outcome: finding.OutcomePositive, + }, + { + Probe: "webhooksUseSecrets", + Outcome: finding.OutcomePositive, + }, + { + Probe: "webhooksUseSecrets", + Outcome: finding.OutcomePositive, + }, + { + Probe: "webhooksUseSecrets", + Outcome: finding.OutcomePositive, + }, + { + Probe: "webhooksUseSecrets", + Outcome: finding.OutcomePositive, + }, + { + Probe: "webhooksUseSecrets", + Outcome: finding.OutcomePositive, + }, + { + Probe: "webhooksUseSecrets", + Outcome: finding.OutcomePositive, + }, + { + Probe: "webhooksUseSecrets", + Outcome: finding.OutcomePositive, + }, + { + Probe: "webhooksUseSecrets", + Outcome: finding.OutcomePositive, + }, + { + Probe: "webhooksUseSecrets", + Outcome: finding.OutcomePositive, + }, + { + Probe: "webhooksUseSecrets", + Outcome: finding.OutcomePositive, + }, + }, + result: scut.TestReturn{ + Score: 9, + }, + }, + { + name: "Score should not drop below min score", + findings: []finding.Finding{ + { + Probe: "webhooksUseSecrets", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "webhooksUseSecrets", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "webhooksUseSecrets", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "webhooksUseSecrets", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "webhooksUseSecrets", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "webhooksUseSecrets", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "webhooksUseSecrets", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "webhooksUseSecrets", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "webhooksUseSecrets", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "webhooksUseSecrets", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "webhooksUseSecrets", + Outcome: finding.OutcomeNegative, + }, + { + Probe: "webhooksUseSecrets", + Outcome: finding.OutcomeNegative, + }, + }, + result: scut.TestReturn{ + Score: checker.MinResultScore, + }, + }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() - got := Webhooks(tt.args.name, tt.args.dl, tt.args.r) - if tt.wantErr { - if got.Error == nil { - t.Errorf("Webhooks() error = %v, wantErr %v", got.Error, tt.wantErr) - } - } else { - if got.Score != tt.want.Score { - t.Errorf("Webhooks() = %v, want %v", got.Score, tt.want.Score) - } - } + dl := scut.TestDetailLogger{} + got := Webhooks(tt.name, tt.findings, &dl) + scut.ValidateTestReturn(t, tt.name, &tt.result, &got, &dl) }) } } diff --git a/checks/fileparser/github_workflow.go b/checks/fileparser/github_workflow.go index cc1622d0654..5ffc9722933 100644 --- a/checks/fileparser/github_workflow.go +++ b/checks/fileparser/github_workflow.go @@ -131,6 +131,11 @@ func getJobDefaultRunShell(job *actionlint.Job) string { func getJobRunsOnLabels(job *actionlint.Job) []*actionlint.String { if job != nil && job.RunsOn != nil { + // Starting at v1.6.16, either field may be set + // https://github.com/rhysd/actionlint/issues/164 + if job.RunsOn.LabelsExpr != nil { + return []*actionlint.String{job.RunsOn.LabelsExpr} + } return job.RunsOn.Labels } return nil @@ -203,8 +208,15 @@ func GetOSesForJob(job *actionlint.Job) ([]string, error) { } if len(jobOSes) == 0 { - return jobOSes, sce.WithMessage(sce.ErrScorecardInternal, - fmt.Sprintf("unable to determine OS for job: %v", GetJobName(job))) + // This error is caught by the caller, which is responsible for adding more + // precise location information + jobName := GetJobName(job) + return jobOSes, &checker.ElementError{ + Location: finding.Location{ + Snippet: &jobName, + }, + Err: sce.ErrJobOSParsing, + } } return jobOSes, nil } @@ -235,7 +247,7 @@ func GetShellForStep(step *actionlint.Step, job *actionlint.Job) (string, error) } execRunShell := getExecRunShell(execRun) if execRunShell != "" { - return execRun.Shell.Value, nil + return execRunShell, nil } jobDefaultRunShell := getJobDefaultRunShell(job) if jobDefaultRunShell != "" { @@ -331,7 +343,7 @@ type JobMatcherStep struct { Run string } -// JobMatchResult represents the result of a matche. +// JobMatchResult represents the result of a match. type JobMatchResult struct { Msg string File checker.File diff --git a/checks/fileparser/github_workflow_test.go b/checks/fileparser/github_workflow_test.go index cf2912e4fc2..29f378bb5b2 100644 --- a/checks/fileparser/github_workflow_test.go +++ b/checks/fileparser/github_workflow_test.go @@ -20,8 +20,8 @@ import ( "strings" "testing" + "github.com/google/go-cmp/cmp" "github.com/rhysd/actionlint" - "gotest.tools/assert/cmp" ) func TestGitHubWorkflowShell(t *testing.T) { @@ -103,7 +103,7 @@ func TestGitHubWorkflowShell(t *testing.T) { }, { name: "shell specified in step", - filename: "../testdata/.github/workflows/github-workflow-shells-speficied-step.yaml", + filename: "../testdata/.github/workflows/github-workflow-shells-specified-step.yaml", expectedShells: []string{"pwsh"}, }, { @@ -142,7 +142,7 @@ func TestGitHubWorkflowShell(t *testing.T) { actualShells = append(actualShells, shell) } } - if !cmp.DeepEqual(tt.expectedShells, actualShells)().Success() { + if !cmp.Equal(tt.expectedShells, actualShells) { t.Errorf("%v: Got (%v) expected (%v)", tt.name, actualShells, tt.expectedShells) } }) @@ -401,7 +401,7 @@ func TestGetLineNumber(t *testing.T) { type args struct { pos *actionlint.Pos } - //nolint + //nolint:govet tests := []struct { name string args args @@ -488,7 +488,7 @@ func TestGetUses(t *testing.T) { type args struct { step *actionlint.Step } - //nolint + //nolint:govet tests := []struct { name string args args @@ -562,7 +562,7 @@ func Test_getWith(t *testing.T) { type args struct { step *actionlint.Step } - //nolint + //nolint:govet tests := []struct { name string args args @@ -648,7 +648,7 @@ func Test_getRun(t *testing.T) { type args struct { step *actionlint.Step } - //nolint + //nolint:govet tests := []struct { name string args args @@ -746,7 +746,7 @@ func Test_stepsMatch(t *testing.T) { stepToMatch *JobMatcherStep step *actionlint.Step } - //nolint + //nolint:govet tests := []struct { name string args args diff --git a/checks/fileparser/listing_test.go b/checks/fileparser/listing_test.go index 7d3e7cbfacb..1b09c921fd7 100644 --- a/checks/fileparser/listing_test.go +++ b/checks/fileparser/listing_test.go @@ -135,7 +135,7 @@ func TestIsTemplateFile(t *testing.T) { // TestCheckFileContainsCommands tests if the content starts with a comment. func TestCheckFileContainsCommands(t *testing.T) { t.Parallel() - //nolint + //nolint:govet type args struct { content []byte comment string @@ -202,7 +202,7 @@ func Test_isMatchingPath(t *testing.T) { want: true, }, { - name: "matching path with case insensitive", + name: "matching path with case-insensitive", args: args{ pattern: "Dockerfile", fullpath: "dockerfile", @@ -211,7 +211,7 @@ func Test_isMatchingPath(t *testing.T) { want: true, }, { - name: "matching path with case insensitive", + name: "matching path with case-insensitive", args: args{ pattern: "Dockerfile", fullpath: "dockerfile", @@ -220,7 +220,7 @@ func Test_isMatchingPath(t *testing.T) { want: false, }, { - name: "matching path with case insensitive", + name: "matching path with case-insensitive", args: args{ pattern: "Dockerfile", fullpath: "Dockerfile.template", @@ -229,7 +229,7 @@ func Test_isMatchingPath(t *testing.T) { want: false, }, { - name: "matching path with case insensitive", + name: "matching path with case-insensitive", args: args{ pattern: "Dockerfile", fullpath: "Dockerfile.template", @@ -238,7 +238,7 @@ func Test_isMatchingPath(t *testing.T) { want: false, }, { - name: "matching path with case insensitive", + name: "matching path with case-insensitive", args: args{ pattern: "Dockerfile", fullpath: "Dockerfile.template", @@ -247,7 +247,7 @@ func Test_isMatchingPath(t *testing.T) { want: false, }, { - name: "matching path with case insensitive", + name: "matching path with case-insensitive", args: args{ pattern: "Dockerfile", fullpath: "Dockerfile.template", @@ -255,7 +255,7 @@ func Test_isMatchingPath(t *testing.T) { }, }, { - name: "matching path with case insensitive", + name: "matching path with case-insensitive", args: args{ pattern: "Dockerfile", fullpath: "Dockerfile.template", @@ -263,7 +263,7 @@ func Test_isMatchingPath(t *testing.T) { }, }, { - name: "matching path with case insensitive", + name: "matching path with case-insensitive", args: args{ pattern: "Dockerfile", fullpath: "Dockerfile.template", @@ -271,7 +271,7 @@ func Test_isMatchingPath(t *testing.T) { }, }, { - name: "matching path with case insensitive", + name: "matching path with case-insensitive", args: args{ pattern: "Dockerfile", fullpath: "Dockerfile.template", @@ -279,7 +279,7 @@ func Test_isMatchingPath(t *testing.T) { }, }, { - name: "matching path with case insensitive", + name: "matching path with case-insensitive", args: args{ pattern: "Dockerfile", fullpath: "Dockerfile.template", @@ -287,7 +287,7 @@ func Test_isMatchingPath(t *testing.T) { }, }, { - name: "matching path with case insensitive", + name: "matching path with case-insensitive", args: args{ pattern: "Dockerfile", fullpath: "Dockerfile.template", @@ -295,7 +295,7 @@ func Test_isMatchingPath(t *testing.T) { }, }, { - name: "matching path with case insensitive", + name: "matching path with case-insensitive", args: args{ pattern: "Dockerfile", fullpath: "Dockerfile.template", @@ -303,7 +303,7 @@ func Test_isMatchingPath(t *testing.T) { }, }, { - name: "matching path with case insensitive", + name: "matching path with case-insensitive", args: args{ pattern: "Dockerfile", fullpath: "Dockerfile.template", @@ -397,7 +397,7 @@ func Test_isTestdataFile(t *testing.T) { // TestOnMatchingFileContentDo tests the OnMatchingFileContent function. func TestOnMatchingFileContent(t *testing.T) { t.Parallel() - //nolint + //nolint:govet tests := []struct { name string wantErr bool @@ -515,7 +515,6 @@ func TestOnMatchingFileContent(t *testing.T) { t.Parallel() x := func(path string, content []byte, args ...interface{}) (bool, error) { if tt.shouldFuncFail { - //nolint return false, errors.New("test error") } if tt.shouldGetPredicateFail { @@ -542,8 +541,6 @@ func TestOnMatchingFileContent(t *testing.T) { } // TestOnAllFilesDo tests the OnAllFilesDo function. -// -//nolint:gocognit func TestOnAllFilesDo(t *testing.T) { t.Parallel() @@ -584,7 +581,7 @@ func TestOnAllFilesDo(t *testing.T) { alwaysFail := func(path string, args ...interface{}) (bool, error) { return false, errTest } - //nolint + //nolint:govet tests := []struct { name string onFile DoWhileTrueOnFilename diff --git a/checks/fuzzing.go b/checks/fuzzing.go index 1e92d02142b..6774830101c 100644 --- a/checks/fuzzing.go +++ b/checks/fuzzing.go @@ -19,6 +19,8 @@ import ( "github.com/ossf/scorecard/v4/checks/evaluation" "github.com/ossf/scorecard/v4/checks/raw" sce "github.com/ossf/scorecard/v4/errors" + "github.com/ossf/scorecard/v4/probes" + "github.com/ossf/scorecard/v4/probes/zrunner" ) // CheckFuzzing is the registered name for Fuzzing. @@ -41,9 +43,16 @@ func Fuzzing(c *checker.CheckRequest) checker.CheckResult { } // Set the raw results. - if c.RawResults != nil { - c.RawResults.FuzzingResults = rawData + pRawResults := getRawResults(c) + pRawResults.FuzzingResults = rawData + + // Evaluate the probes. + findings, err := zrunner.Run(pRawResults, probes.Fuzzing) + if err != nil { + e := sce.WithMessage(sce.ErrScorecardInternal, err.Error()) + return checker.CreateRuntimeErrorResult(CheckFuzzing, e) } - return evaluation.Fuzzing(CheckFuzzing, c.Dlogger, &rawData) + // Return the score evaluation. + return evaluation.Fuzzing(CheckFuzzing, findings, c.Dlogger) } diff --git a/checks/fuzzing_test.go b/checks/fuzzing_test.go index 3e6e515975d..852b0172ff4 100644 --- a/checks/fuzzing_test.go +++ b/checks/fuzzing_test.go @@ -30,17 +30,15 @@ import ( // TestFuzzing is a test function for Fuzzing. func TestFuzzing(t *testing.T) { t.Parallel() - //nolint tests := []struct { name string - want checker.CheckResult + fileContent string langs []clients.Language + fileName []string response clients.SearchResponse + expected scut.TestReturn wantErr bool wantFuzzErr bool - fileName []string - fileContent string - expected scut.TestReturn }{ { name: "empty response", @@ -52,6 +50,13 @@ func TestFuzzing(t *testing.T) { }, }, wantErr: false, + expected: scut.TestReturn{ + Error: nil, + NumberOfWarn: 12, + NumberOfDebug: 0, + NumberOfInfo: 0, + Score: 0, + }, }, { name: "hits 1", @@ -69,11 +74,10 @@ func TestFuzzing(t *testing.T) { }, }, wantErr: false, - want: checker.CheckResult{Score: 10}, expected: scut.TestReturn{ NumberOfWarn: 0, NumberOfDebug: 0, - NumberOfInfo: 0, + NumberOfInfo: 1, Score: 10, }, }, @@ -86,7 +90,6 @@ func TestFuzzing(t *testing.T) { }, }, wantErr: true, - want: checker.CheckResult{Score: -1}, expected: scut.TestReturn{ Error: sce.ErrScorecardInternal, NumberOfWarn: 0, @@ -104,12 +107,24 @@ func TestFuzzing(t *testing.T) { }, }, wantFuzzErr: false, - want: checker.CheckResult{Score: 0}, + expected: scut.TestReturn{ + Error: nil, + NumberOfWarn: 12, + NumberOfDebug: 0, + NumberOfInfo: 0, + Score: 0, + }, }, { name: "error", wantFuzzErr: true, - want: checker.CheckResult{}, + expected: scut.TestReturn{ + Error: nil, + NumberOfWarn: 12, + NumberOfDebug: 0, + NumberOfInfo: 0, + Score: 0, + }, }, } for _, tt := range tests { @@ -123,7 +138,6 @@ func TestFuzzing(t *testing.T) { mockFuzz.EXPECT().Search(gomock.Any()). DoAndReturn(func(q clients.SearchRequest) (clients.SearchResponse, error) { if tt.wantErr { - //nolint return clients.SearchResponse{}, errors.New("error") } return tt.response, nil @@ -132,17 +146,19 @@ func TestFuzzing(t *testing.T) { mockFuzz.EXPECT().ListFiles(gomock.Any()).Return(tt.fileName, nil).AnyTimes() mockFuzz.EXPECT().GetFileContent(gomock.Any()).DoAndReturn(func(f string) (string, error) { if tt.wantErr { - //nolint return "", errors.New("error") } return tt.fileContent, nil }).AnyTimes() dl := scut.TestDetailLogger{} + raw := checker.RawResults{} req := checker.CheckRequest{ RepoClient: mockFuzz, OssFuzzRepo: mockFuzz, Dlogger: &dl, + RawResults: &raw, } + if tt.wantFuzzErr { req.OssFuzzRepo = nil } @@ -153,9 +169,7 @@ func TestFuzzing(t *testing.T) { return } - if !scut.ValidateTestReturn(t, tt.name, &tt.expected, &result, &dl) { - t.Fatalf(tt.name, tt.expected) - } + scut.ValidateTestReturn(t, tt.name, &tt.expected, &result, &dl) }) } } diff --git a/checks/license.go b/checks/license.go index 99c87a1b839..f062cf5a57a 100644 --- a/checks/license.go +++ b/checks/license.go @@ -19,6 +19,8 @@ import ( "github.com/ossf/scorecard/v4/checks/evaluation" "github.com/ossf/scorecard/v4/checks/raw" sce "github.com/ossf/scorecard/v4/errors" + "github.com/ossf/scorecard/v4/probes" + "github.com/ossf/scorecard/v4/probes/zrunner" ) // CheckLicense is the registered name for License. @@ -44,9 +46,15 @@ func License(c *checker.CheckRequest) checker.CheckResult { } // Set the raw results. - if c.RawResults != nil { - c.RawResults.LicenseResults = rawData + pRawResults := getRawResults(c) + pRawResults.LicenseResults = rawData + + // Evaluate the probes. + findings, err := zrunner.Run(pRawResults, probes.License) + if err != nil { + e := sce.WithMessage(sce.ErrScorecardInternal, err.Error()) + return checker.CreateRuntimeErrorResult(CheckLicense, e) } - return evaluation.License(CheckLicense, c.Dlogger, &rawData) + return evaluation.License(CheckLicense, findings, c.Dlogger) } diff --git a/checks/license_test.go b/checks/license_test.go index 253d13ca754..7726d1c7372 100644 --- a/checks/license_test.go +++ b/checks/license_test.go @@ -42,7 +42,7 @@ func TestLicenseFileSubdirectory(t *testing.T) { inputFolder: "testdata/licensedir/withlicense", expected: scut.TestReturn{ Error: nil, - Score: checker.MaxResultScore - 1, + Score: 9, // Does not have approved format NumberOfInfo: 1, NumberOfWarn: 1, }, @@ -52,8 +52,10 @@ func TestLicenseFileSubdirectory(t *testing.T) { name: "Without LICENSE", inputFolder: "testdata/licensedir/withoutlicense", expected: scut.TestReturn{ - Error: nil, - Score: checker.MinResultScore, + Error: nil, + Score: checker.MinResultScore, + NumberOfWarn: 0, + NumberOfInfo: 2, }, err: nil, }, @@ -90,9 +92,7 @@ func TestLicenseFileSubdirectory(t *testing.T) { res := License(&req) - if !scut.ValidateTestReturn(t, tt.name, &tt.expected, &res, &dl) { - t.Fail() - } + scut.ValidateTestReturn(t, tt.name, &tt.expected, &res, &dl) ctrl.Finish() }) diff --git a/checks/maintained.go b/checks/maintained.go index ec653d6a20e..d94e6978d30 100644 --- a/checks/maintained.go +++ b/checks/maintained.go @@ -19,6 +19,8 @@ import ( "github.com/ossf/scorecard/v4/checks/evaluation" "github.com/ossf/scorecard/v4/checks/raw" sce "github.com/ossf/scorecard/v4/errors" + "github.com/ossf/scorecard/v4/probes" + "github.com/ossf/scorecard/v4/probes/zrunner" ) // CheckMaintained is the exported check name for Maintained. @@ -41,9 +43,16 @@ func Maintained(c *checker.CheckRequest) checker.CheckResult { } // Set the raw results. - if c.RawResults != nil { - c.RawResults.MaintainedResults = rawData + pRawResults := getRawResults(c) + pRawResults.MaintainedResults = rawData + + // Evaluate the probes. + findings, err := zrunner.Run(pRawResults, probes.Maintained) + if err != nil { + e := sce.WithMessage(sce.ErrScorecardInternal, err.Error()) + return checker.CreateRuntimeErrorResult(CheckMaintained, e) } - return evaluation.Maintained(CheckMaintained, c.Dlogger, &rawData) + // Return the score evaluation. + return evaluation.Maintained(CheckMaintained, findings, c.Dlogger) } diff --git a/checks/maintained_test.go b/checks/maintained_test.go index 25f496de60e..794043bffbd 100644 --- a/checks/maintained_test.go +++ b/checks/maintained_test.go @@ -27,9 +27,9 @@ import ( scut "github.com/ossf/scorecard/v4/utests" ) -// ignoring the linter for cyclomatic complexity because it is a test func // TestMaintained tests the maintained check. -//nolint +// +//nolint:gocognit // ignoring the linter for cyclomatic complexity because it is a test func func Test_Maintained(t *testing.T) { t.Parallel() threeHundredDaysAgo := time.Now().AddDate(0, 0, -300) @@ -38,25 +38,23 @@ func Test_Maintained(t *testing.T) { oneDayAgo := time.Now().AddDate(0, 0, -1) ownerAssociation := clients.RepoAssociationOwner noneAssociation := clients.RepoAssociationNone - // fieldalignment lint issue. Ignoring it as it is not important for this test. someone := clients.User{ Login: "someone", } otheruser := clients.User{ Login: "someone-else", } - //nolint tests := []struct { + createdat time.Time err error - name string - isarchived bool archiveerr error - commits []clients.Commit commiterr error - issues []clients.Issue issueerr error - createdat time.Time + name string expected checker.CheckResult + commits []clients.Commit + issues []clients.Issue + isarchived bool }{ { name: "archived", @@ -339,7 +337,7 @@ func Test_Maintained(t *testing.T) { } return tt.isarchived, nil }) - + //nolint:nestif if tt.archiveerr == nil { mockRepo.EXPECT().ListCommits().DoAndReturn( func() ([]clients.Commit, error) { diff --git a/checks/packaging.go b/checks/packaging.go index 1ae19bdaada..cfa7878229b 100644 --- a/checks/packaging.go +++ b/checks/packaging.go @@ -22,6 +22,8 @@ import ( "github.com/ossf/scorecard/v4/clients/githubrepo" "github.com/ossf/scorecard/v4/clients/gitlabrepo" sce "github.com/ossf/scorecard/v4/errors" + "github.com/ossf/scorecard/v4/probes" + "github.com/ossf/scorecard/v4/probes/zrunner" ) // CheckPackaging is the registered name for Packaging. @@ -54,10 +56,14 @@ func Packaging(c *checker.CheckRequest) checker.CheckResult { return checker.CreateRuntimeErrorResult(CheckPackaging, e) } - // Set the raw results. - if c.RawResults != nil { - c.RawResults.PackagingResults = rawData + pRawResults := getRawResults(c) + pRawResults.PackagingResults = rawData + + findings, err := zrunner.Run(pRawResults, probes.Packaging) + if err != nil { + e := sce.WithMessage(sce.ErrScorecardInternal, err.Error()) + return checker.CreateRuntimeErrorResult(CheckPackaging, e) } - return evaluation.Packaging(CheckPackaging, c.Dlogger, &rawData) + return evaluation.Packaging(CheckPackaging, findings, c.Dlogger) } diff --git a/checks/permissions_test.go b/checks/permissions_test.go index 41f42f105bf..2d80ef33f1c 100644 --- a/checks/permissions_test.go +++ b/checks/permissions_test.go @@ -28,7 +28,6 @@ import ( scut "github.com/ossf/scorecard/v4/utests" ) -// nolint func TestGithubTokenPermissions(t *testing.T) { t.Parallel() @@ -53,7 +52,7 @@ func TestGithubTokenPermissions(t *testing.T) { filenames: []string{"./testdata/.github/workflows/github-workflow-permissions-run-no-codeql-write.yaml"}, expected: scut.TestReturn{ Error: nil, - Score: checker.MaxResultScore - 1, + Score: checker.MaxResultScore, NumberOfWarn: 1, NumberOfInfo: 1, NumberOfDebug: 4, @@ -280,7 +279,7 @@ func TestGithubTokenPermissions(t *testing.T) { }, }, { - name: "release workflow contents write semantic-release", + name: "release workflow contents write semantic-release with npx", filenames: []string{"./testdata/.github/workflows/github-workflow-permissions-contents-writes-release-semantic-release.yaml"}, expected: scut.TestReturn{ Error: nil, @@ -290,6 +289,28 @@ func TestGithubTokenPermissions(t *testing.T) { NumberOfDebug: 4, }, }, + { + name: "release workflow contents write semantic-release with yarn command", + filenames: []string{"./testdata/.github/workflows/github-workflow-permissions-contents-writes-release-semantic-release-yarn.yaml"}, + expected: scut.TestReturn{ + Error: nil, + Score: checker.MaxResultScore, + NumberOfWarn: 0, + NumberOfInfo: 2, + NumberOfDebug: 4, + }, + }, + { + name: "release workflow contents write semantic-release with pnpm and dlx", + filenames: []string{"./testdata/.github/workflows/github-workflow-permissions-contents-writes-release-semantic-release-pnpm.yaml"}, + expected: scut.TestReturn{ + Error: nil, + Score: checker.MaxResultScore, + NumberOfWarn: 0, + NumberOfInfo: 2, + NumberOfDebug: 4, + }, + }, { name: "package workflow write", filenames: []string{"./testdata/.github/workflows/github-workflow-permissions-packages-writes.yaml"}, @@ -302,11 +323,11 @@ func TestGithubTokenPermissions(t *testing.T) { }, }, { - name: "workflow jobs only", + name: "penalize job-level read without top level permissions", filenames: []string{"./testdata/.github/workflows/github-workflow-permissions-jobs-only.yaml"}, expected: scut.TestReturn{ Error: nil, - Score: 9, + Score: checker.MaxResultScore - 1, NumberOfWarn: 1, NumberOfInfo: 4, NumberOfDebug: 4, @@ -317,7 +338,7 @@ func TestGithubTokenPermissions(t *testing.T) { filenames: []string{"./testdata/.github/workflows/github-workflow-permissions-run-write-codeql-comment.yaml"}, expected: scut.TestReturn{ Error: nil, - Score: checker.MaxResultScore - 1, + Score: checker.MaxResultScore, NumberOfWarn: 1, NumberOfInfo: 1, NumberOfDebug: 4, @@ -389,6 +410,19 @@ func TestGithubTokenPermissions(t *testing.T) { NumberOfDebug: 5, }, }, + { + name: "don't penalize job-level writes", + filenames: []string{ + "./testdata/.github/workflows/github-workflow-permissions-run-multiple-writes.yaml", + }, + expected: scut.TestReturn{ + Error: nil, + Score: checker.MaxResultScore, + NumberOfWarn: 7, // number of job-level write permissions + NumberOfInfo: 1, // read-only top-level permissions + NumberOfDebug: 4, // This is 4 + (number of actions = 0) + }, + }, } for _, tt := range tests { tt := tt // Re-initializing variable so it is not changed while executing the closure below @@ -424,9 +458,7 @@ func TestGithubTokenPermissions(t *testing.T) { res := TokenPermissions(&c) - if !scut.ValidateTestReturn(t, tt.name, &tt.expected, &res, &dl) { - t.Errorf("test failed: log message not present: %+v\n%+v", tt.expected, dl) - } + scut.ValidateTestReturn(t, tt.name, &tt.expected, &res, &dl) }) } } diff --git a/checks/probes.go b/checks/probes.go new file mode 100644 index 00000000000..357da984969 --- /dev/null +++ b/checks/probes.go @@ -0,0 +1,28 @@ +// Copyright 2023 OpenSSF Scorecard Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package checks + +import ( + "github.com/ossf/scorecard/v4/checker" +) + +// getRawResults returns a pointer to the raw results in the CheckRequest +// if the pointer is not nil. Else, it creates a new raw result. +func getRawResults(c *checker.CheckRequest) *checker.RawResults { + if c.RawResults != nil { + return c.RawResults + } + return &checker.RawResults{} +} diff --git a/checks/raw/binary_artifact.go b/checks/raw/binary_artifact.go index 13f9108eca6..1c7a82f246d 100644 --- a/checks/raw/binary_artifact.go +++ b/checks/raw/binary_artifact.go @@ -15,6 +15,7 @@ package raw import ( + "errors" "fmt" "path/filepath" "regexp" @@ -48,7 +49,8 @@ func mustParseConstraint(c string) *semver.Constraints { } // BinaryArtifacts retrieves the raw data for the Binary-Artifacts check. -func BinaryArtifacts(c clients.RepoClient) (checker.BinaryArtifactData, error) { +func BinaryArtifacts(req *checker.CheckRequest) (checker.BinaryArtifactData, error) { + c := req.RepoClient files := []checker.File{} err := fileparser.OnMatchingFileContentDo(c, fileparser.PathMatcher{ Pattern: "*", @@ -86,13 +88,11 @@ func excludeValidatedGradleWrappers(c clients.RepoClient, files []checker.File) } // It has been confirmed that latest commit has validated JARs! // Remove Gradle wrapper JARs from files. - filterFiles := []checker.File{} - for _, f := range files { - if filepath.Base(f.Path) != "gradle-wrapper.jar" { - filterFiles = append(filterFiles, f) + for i := range files { + if filepath.Base(files[i].Path) == "gradle-wrapper.jar" { + files[i].Type = finding.FileTypeBinaryVerified } } - files = filterFiles return files, nil } @@ -116,6 +116,7 @@ var checkBinaryFileContent fileparser.DoWhileTrueOnFileContent = func(path strin "dey": true, "elf": true, "o": true, + "a": true, "so": true, "macho": true, "iso": true, @@ -201,26 +202,35 @@ func gradleWrapperValidated(c clients.RepoClient) (bool, error) { if err != nil { return false, fmt.Errorf("%w", err) } - if gradleWrapperValidatingWorkflowFile != "" { - // If validated, check that latest commit has a relevant successful run - runs, err := c.ListSuccessfulWorkflowRuns(gradleWrapperValidatingWorkflowFile) - if err != nil { - return false, fmt.Errorf("failure listing workflow runs: %w", err) - } - commits, err := c.ListCommits() - if err != nil { - return false, fmt.Errorf("failure listing commits: %w", err) - } - if len(commits) < 1 || len(runs) < 1 { + // no matching files, validation failed + if gradleWrapperValidatingWorkflowFile == "" { + return false, nil + } + + // If validated, check that latest commit has a relevant successful run + runs, err := c.ListSuccessfulWorkflowRuns(gradleWrapperValidatingWorkflowFile) + if err != nil { + // some clients, such as the local file client, don't support this feature + // claim unvalidated, so that other parts of the check can still be used. + if errors.Is(err, clients.ErrUnsupportedFeature) { return false, nil } - for _, r := range runs { - if *r.HeadSHA == commits[0].SHA { - // Commit has corresponding successful run! - return true, nil - } + return false, fmt.Errorf("failure listing workflow runs: %w", err) + } + commits, err := c.ListCommits() + if err != nil { + return false, fmt.Errorf("failure listing commits: %w", err) + } + if len(commits) < 1 || len(runs) < 1 { + return false, nil + } + for _, r := range runs { + if *r.HeadSHA == commits[0].SHA { + // Commit has corresponding successful run! + return true, nil } } + return false, nil } @@ -268,7 +278,7 @@ func checkWorkflowValidatesGradleWrapper(path string, content []byte, args ...in return true, nil } -// fileExists checks if a file of name name exists, including within +// fileExists checks if a file named `name` exists, including within // subdirectories. func fileExists(files []checker.File, name string) bool { for _, f := range files { diff --git a/checks/raw/binary_artifact_test.go b/checks/raw/binary_artifact_test.go index 92181c8a45d..590e86a7833 100644 --- a/checks/raw/binary_artifact_test.go +++ b/checks/raw/binary_artifact_test.go @@ -21,8 +21,10 @@ import ( "github.com/golang/mock/gomock" + "github.com/ossf/scorecard/v4/checker" "github.com/ossf/scorecard/v4/clients" mockrepo "github.com/ossf/scorecard/v4/clients/mockclients" + scut "github.com/ossf/scorecard/v4/utests" ) func strptr(s string) *string { @@ -80,7 +82,7 @@ func TestBinaryArtifacts(t *testing.T) { name: "non binary file", err: nil, files: [][]string{ - {"../doesnotexist"}, + {"../nonexistent"}, }, getFileContentCount: 1, }, @@ -126,7 +128,7 @@ func TestBinaryArtifacts(t *testing.T) { }, }, getFileContentCount: 3, - expect: 0, + expect: 1, }, { name: "gradle-wrapper.jar with non-verification action", @@ -210,7 +212,7 @@ func TestBinaryArtifacts(t *testing.T) { }, }, getFileContentCount: 3, - expect: 0, + expect: 1, }, } for _, tt := range tests { @@ -220,6 +222,7 @@ func TestBinaryArtifacts(t *testing.T) { ctrl := gomock.NewController(t) mockRepoClient := mockrepo.NewMockRepoClient(ctrl) + mockRepo := mockrepo.NewMockRepo(ctrl) for _, files := range tt.files { mockRepoClient.EXPECT().ListFiles(gomock.Any()).Return(files, nil) } @@ -240,7 +243,14 @@ func TestBinaryArtifacts(t *testing.T) { mockRepoClient.EXPECT().ListCommits().Return(tt.commits, nil) } - f, err := BinaryArtifacts(mockRepoClient) + dl := scut.TestDetailLogger{} + c := &checker.CheckRequest{ + RepoClient: mockRepoClient, + Repo: mockRepo, + Dlogger: &dl, + } + + f, err := BinaryArtifacts(c) if tt.err != nil { // If we expect an error, make sure it is the same @@ -256,3 +266,43 @@ func TestBinaryArtifacts(t *testing.T) { }) } } + +func TestBinaryArtifacts_workflow_runs_unsupported(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + mockRepoClient := mockrepo.NewMockRepoClient(ctrl) + mockRepo := mockrepo.NewMockRepo(ctrl) + const jarFile = "gradle-wrapper.jar" + const verifyWorkflow = ".github/workflows/verify.yaml" + files := []string{jarFile, verifyWorkflow} + mockRepoClient.EXPECT().ListFiles(gomock.Any()).Return(files, nil).AnyTimes() + mockRepoClient.EXPECT().GetFileContent(jarFile).DoAndReturn(func(file string) ([]byte, error) { + content, err := os.ReadFile("../testdata/binaryartifacts/jars/gradle-wrapper.jar") + if err != nil { + return nil, fmt.Errorf("%w", err) + } + return content, nil + }).AnyTimes() + mockRepoClient.EXPECT().GetFileContent(verifyWorkflow).DoAndReturn(func(file string) ([]byte, error) { + content, err := os.ReadFile("../testdata/binaryartifacts/workflows/verify.yaml") + if err != nil { + return nil, fmt.Errorf("%w", err) + } + return content, nil + }).AnyTimes() + + mockRepoClient.EXPECT().ListSuccessfulWorkflowRuns(gomock.Any()).Return(nil, clients.ErrUnsupportedFeature).AnyTimes() + dl := scut.TestDetailLogger{} + c := &checker.CheckRequest{ + RepoClient: mockRepoClient, + Repo: mockRepo, + Dlogger: &dl, + } + got, err := BinaryArtifacts(c) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(got.Files) != 1 { + t.Errorf("expected 1 file, got %d", len(got.Files)) + } +} diff --git a/checks/raw/branch_protection_test.go b/checks/raw/branch_protection_test.go index 1ae134d4155..4db6f385f65 100644 --- a/checks/raw/branch_protection_test.go +++ b/checks/raw/branch_protection_test.go @@ -33,7 +33,7 @@ var ( mainBranchName = "main" ) -// nolint: govet +//nolint:govet type branchArg struct { err error name string @@ -63,7 +63,7 @@ func (ba branchesArg) getBranch(b string) (*clients.BranchRef, error) { func TestBranchProtection(t *testing.T) { t.Parallel() - //nolint: govet + //nolint:govet tests := []struct { name string branches branchesArg diff --git a/checks/raw/contributors.go b/checks/raw/contributors.go index e8a9d56b327..309d21b7d5f 100644 --- a/checks/raw/contributors.go +++ b/checks/raw/contributors.go @@ -23,7 +23,8 @@ import ( ) // Contributors retrieves the raw data for the Contributors check. -func Contributors(c clients.RepoClient) (checker.ContributorsData, error) { +func Contributors(cr *checker.CheckRequest) (checker.ContributorsData, error) { + c := cr.RepoClient var users []clients.User contribs, err := c.ListContributors() diff --git a/checks/raw/contributors_test.go b/checks/raw/contributors_test.go index 6d7814246fc..f7a2c51dc1a 100644 --- a/checks/raw/contributors_test.go +++ b/checks/raw/contributors_test.go @@ -19,6 +19,7 @@ import ( "github.com/golang/mock/gomock" "github.com/google/go-cmp/cmp" + "github.com/ossf/scorecard/v4/checker" "github.com/ossf/scorecard/v4/clients" mockrepo "github.com/ossf/scorecard/v4/clients/mockclients" ) @@ -104,6 +105,7 @@ func TestOrgContains(t *testing.T) { } func TestContributors(t *testing.T) { + t.Parallel() ctrl := gomock.NewController(t) defer ctrl.Finish() @@ -130,8 +132,10 @@ func TestContributors(t *testing.T) { } mockRepoClient.EXPECT().ListContributors().Return(contributors, nil) - - data, err := Contributors(mockRepoClient) + req := &checker.CheckRequest{ + RepoClient: mockRepoClient, + } + data, err := Contributors(req) if err != nil { t.Fatalf("unexpected error: %v", err) } diff --git a/checks/raw/dangerous_workflow.go b/checks/raw/dangerous_workflow.go index 1da80f0e2eb..4ab79c64d47 100644 --- a/checks/raw/dangerous_workflow.go +++ b/checks/raw/dangerous_workflow.go @@ -23,7 +23,6 @@ import ( "github.com/ossf/scorecard/v4/checker" "github.com/ossf/scorecard/v4/checks/fileparser" - "github.com/ossf/scorecard/v4/clients" sce "github.com/ossf/scorecard/v4/errors" "github.com/ossf/scorecard/v4/finding" ) @@ -66,10 +65,10 @@ var ( ) // DangerousWorkflow retrieves the raw data for the DangerousWorkflow check. -func DangerousWorkflow(c clients.RepoClient) (checker.DangerousWorkflowData, error) { +func DangerousWorkflow(c *checker.CheckRequest) (checker.DangerousWorkflowData, error) { // data is shared across all GitHub workflows. var data checker.DangerousWorkflowData - err := fileparser.OnMatchingFileContentDo(c, fileparser.PathMatcher{ + err := fileparser.OnMatchingFileContentDo(c.RepoClient, fileparser.PathMatcher{ Pattern: ".github/workflows/*", CaseSensitive: false, }, validateGitHubActionWorkflowPatterns, &data) diff --git a/checks/raw/dangerous_workflow_test.go b/checks/raw/dangerous_workflow_test.go index 3e0e43a65ce..787f37f3311 100644 --- a/checks/raw/dangerous_workflow_test.go +++ b/checks/raw/dangerous_workflow_test.go @@ -15,6 +15,7 @@ package raw import ( + "context" "errors" "fmt" "os" @@ -24,6 +25,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + "github.com/ossf/scorecard/v4/checker" mockrepo "github.com/ossf/scorecard/v4/clients/mockclients" ) @@ -166,7 +168,12 @@ func TestGithubDangerousWorkflow(t *testing.T) { return content, nil }) - dw, err := DangerousWorkflow(mockRepoClient) + req := &checker.CheckRequest{ + Ctx: context.Background(), + RepoClient: mockRepoClient, + } + + dw, err := DangerousWorkflow(req) if !errCmp(err, tt.expected.err) { t.Errorf(cmp.Diff(err, tt.expected.err, cmpopts.EquateErrors())) diff --git a/checks/raw/dependency_update_tool.go b/checks/raw/dependency_update_tool.go index 3af013b4088..04155eab607 100644 --- a/checks/raw/dependency_update_tool.go +++ b/checks/raw/dependency_update_tool.go @@ -15,6 +15,7 @@ package raw import ( + "errors" "fmt" "strings" @@ -28,7 +29,7 @@ const ( dependabotID = 49699333 ) -// DependencyUpdateTool is the exported name for Depdendency-Update-Tool. +// DependencyUpdateTool is the exported name for Dependency-Update-Tool. func DependencyUpdateTool(c clients.RepoClient) (checker.DependencyUpdateToolData, error) { var tools []checker.Tool err := fileparser.OnAllFilesDo(c, checkDependencyFileExists, &tools) @@ -42,7 +43,13 @@ func DependencyUpdateTool(c clients.RepoClient) (checker.DependencyUpdateToolDat commits, err := c.SearchCommits(clients.SearchCommitsOptions{Author: "dependabot[bot]"}) if err != nil { - return checker.DependencyUpdateToolData{}, fmt.Errorf("%w", err) + // TODO https://github.com/ossf/scorecard/issues/1709 + // some repo clients (e.g. local) don't currently have the ability to search commits, + // but some data is better than none. + if errors.Is(err, clients.ErrUnsupportedFeature) { + return checker.DependencyUpdateToolData{Tools: tools}, nil + } + return checker.DependencyUpdateToolData{}, fmt.Errorf("dependabot commit search: %w", err) } for i := range commits { @@ -85,9 +92,16 @@ var checkDependencyFileExists fileparser.DoWhileTrueOnFilename = func(name strin }, }) - // https://docs.renovatebot.com/configuration-options/ - case ".github/renovate.json", ".github/renovate.json5", ".renovaterc.json", "renovate.json", - "renovate.json5", ".renovaterc": + // https://docs.renovatebot.com/configuration-options/ + case "renovate.json", + "renovate.json5", + ".github/renovate.json", + ".github/renovate.json5", + ".gitlab/renovate.json", + ".gitlab/renovate.json5", + ".renovaterc", + ".renovaterc.json", + ".renovaterc.json5": *ptools = append(*ptools, checker.Tool{ Name: "RenovateBot", URL: asPointer("https://github.com/renovatebot/renovate"), @@ -113,19 +127,6 @@ var checkDependencyFileExists fileparser.DoWhileTrueOnFilename = func(name strin }, }, }) - case ".lift.toml", ".lift/config.toml": - *ptools = append(*ptools, checker.Tool{ - Name: "Sonatype Lift", - URL: asPointer("https://lift.sonatype.com"), - Desc: asPointer("Automated dependency updates. Multi-platform and multi-language."), - Files: []checker.File{ - { - Path: name, - Type: finding.FileTypeSource, - Offset: checker.OffsetDefault, - }, - }, - }) } // Continue iterating, even if we have found a tool. @@ -136,3 +137,7 @@ var checkDependencyFileExists fileparser.DoWhileTrueOnFilename = func(name strin func asPointer(s string) *string { return &s } + +func asBoolPointer(b bool) *bool { + return &b +} diff --git a/checks/raw/dependency_update_tool_test.go b/checks/raw/dependency_update_tool_test.go index 02a3128690b..501ac5f904f 100644 --- a/checks/raw/dependency_update_tool_test.go +++ b/checks/raw/dependency_update_tool_test.go @@ -27,7 +27,6 @@ import ( func Test_checkDependencyFileExists(t *testing.T) { t.Parallel() - //nolint tests := []struct { name string path string @@ -64,6 +63,18 @@ func Test_checkDependencyFileExists(t *testing.T) { want: true, wantErr: false, }, + { + name: ".gitlab/renovate.json", + path: ".gitlab/renovate.json", + want: true, + wantErr: false, + }, + { + name: ".gitlab/renovate.json5", + path: ".gitlab/renovate.json5", + want: true, + wantErr: false, + }, { name: ".renovaterc.json", path: ".renovaterc.json", @@ -97,13 +108,13 @@ func Test_checkDependencyFileExists(t *testing.T) { { name: ".lift.toml", path: ".lift.toml", - want: true, + want: false, // support removed wantErr: false, }, { name: ".lift/config.toml", path: ".lift/config.toml", - want: true, + want: false, // support removed wantErr: false, }, } @@ -130,7 +141,7 @@ func Test_checkDependencyFileExists(t *testing.T) { // TestDependencyUpdateTool tests the DependencyUpdateTool function. func TestDependencyUpdateTool(t *testing.T) { t.Parallel() - //nolint + //nolint:govet tests := []struct { name string wantErr bool diff --git a/checks/raw/fuzzing.go b/checks/raw/fuzzing.go index a8a161d9847..c26cd028906 100644 --- a/checks/raw/fuzzing.go +++ b/checks/raw/fuzzing.go @@ -25,17 +25,7 @@ import ( "github.com/ossf/scorecard/v4/clients" sce "github.com/ossf/scorecard/v4/errors" "github.com/ossf/scorecard/v4/finding" -) - -const ( - fuzzerOSSFuzz = "OSSFuzz" - fuzzerClusterFuzzLite = "ClusterFuzzLite" - oneFuzz = "OneFuzz" - fuzzerBuiltInGo = "GoBuiltInFuzzer" - fuzzerPropertyBasedHaskell = "HaskellPropertyBasedTesting" - fuzzerPropertyBasedJavaScript = "JavaScriptPropertyBasedTesting" - fuzzerPropertyBasedTypeScript = "TypeScriptPropertyBasedTesting" - // TODO: add more fuzzing check supports. + "github.com/ossf/scorecard/v4/internal/fuzzers" ) type filesWithPatternStr struct { @@ -47,22 +37,22 @@ type filesWithPatternStr struct { type languageFuzzConfig struct { URL, Desc *string - // Pattern is according to path.Match. - filePattern string - funcPattern, Name string // TODO: add more language fuzzing-related fields. + + // Patterns are according to path.Match. + filePatterns []string } -// Contains fuzzing speficications for programming languages. +// Contains fuzzing specifications for programming languages. // Please use the type Language defined in clients/languages.go rather than a raw string. var languageFuzzSpecs = map[clients.LanguageName]languageFuzzConfig{ // Default fuzz patterns for Go. clients.Go: { - filePattern: "*_test.go", - funcPattern: `func\s+Fuzz\w+\s*\(\w+\s+\*testing.F\)`, - Name: fuzzerBuiltInGo, - URL: asPointer("https://go.dev/doc/fuzz/"), + filePatterns: []string{"*_test.go"}, + funcPattern: `func\s+Fuzz\w+\s*\(\w+\s+\*testing.F\)`, + Name: fuzzers.BuiltInGo, + URL: asPointer("https://go.dev/doc/fuzz/"), Desc: asPointer( "Go fuzzing intelligently walks through the source code to report failures and find vulnerabilities."), }, @@ -80,53 +70,94 @@ var languageFuzzSpecs = map[clients.LanguageName]languageFuzzConfig{ // // This is not an exhaustive list. clients.Haskell: { - filePattern: "*.hs", + filePatterns: []string{"*.hs", "*.lhs"}, // Look for direct imports of QuickCheck, Hedgehog, validity, or SmallCheck, // or their indirect imports through the higher-level Hspec or Tasty testing frameworks. funcPattern: `import\s+(qualified\s+)?Test\.((Hspec|Tasty)\.)?(QuickCheck|Hedgehog|Validity|SmallCheck)`, - Name: fuzzerPropertyBasedHaskell, - Desc: asPointer( - "Property-based testing in Haskell generates test instances randomly or exhaustively " + - "and test that specific properties are satisfied."), + Name: fuzzers.PropertyBasedHaskell, + Desc: propertyBasedDescription("Haskell"), }, // Fuzz patterns for JavaScript and TypeScript based on property-based testing. // // Based on the import of one of these packages: - // * https://fast-check.dev/ + // * https://github.com/dubzzz/fast-check/tree/main/packages/fast-check#readme + // * https://github.com/dubzzz/fast-check/tree/main/packages/ava#readme + // * https://github.com/dubzzz/fast-check/tree/main/packages/jest#readme + // * https://github.com/dubzzz/fast-check/tree/main/packages/vitest#readme // // This is not an exhaustive list. clients.JavaScript: { - filePattern: "*.js", - // Look for direct imports of fast-check. - funcPattern: `(from\s+['"]fast-check['"]|require\(\s*['"]fast-check['"]\s*\))`, - Name: fuzzerPropertyBasedJavaScript, - Desc: asPointer( - "Property-based testing in JavaScript generates test instances randomly or exhaustively " + - "and test that specific properties are satisfied."), + filePatterns: []string{"*.js"}, + // Look for direct imports of fast-check and its test runners integrations. + funcPattern: `(from\s+['"](fast-check|@fast-check/(ava|jest|vitest))['"]|` + + `require\(\s*['"](fast-check|@fast-check/(ava|jest|vitest))['"]\s*\))`, + Name: fuzzers.PropertyBasedJavaScript, + Desc: propertyBasedDescription("JavaScript"), }, clients.TypeScript: { - filePattern: "*.ts", - // Look for direct imports of fast-check. - funcPattern: `(from\s+['"]fast-check['"]|require\(\s*['"]fast-check['"]\s*\))`, - Name: fuzzerPropertyBasedTypeScript, + filePatterns: []string{"*.ts"}, + // Look for direct imports of fast-check and its test runners integrations. + funcPattern: `(from\s+['"](fast-check|@fast-check/(ava|jest|vitest))['"]|` + + `require\(\s*['"](fast-check|@fast-check/(ava|jest|vitest))['"]\s*\))`, + Name: fuzzers.PropertyBasedTypeScript, + Desc: propertyBasedDescription("TypeScript"), + }, + clients.Python: { + filePatterns: []string{"*.py"}, + funcPattern: `import atheris`, + Name: fuzzers.PythonAtheris, + Desc: asPointer( + "Python fuzzing by way of Atheris"), + }, + clients.C: { + filePatterns: []string{"*.c"}, + funcPattern: `LLVMFuzzerTestOneInput`, + Name: fuzzers.CLibFuzzer, Desc: asPointer( - "Property-based testing in TypeScript generates test instances randomly or exhaustively " + - "and test that specific properties are satisfied."), + "Fuzzed with C LibFuzzer"), + }, + clients.Cpp: { + filePatterns: []string{"*.cc", "*.cpp"}, + funcPattern: `LLVMFuzzerTestOneInput`, + Name: fuzzers.CppLibFuzzer, + Desc: asPointer( + "Fuzzed with cpp LibFuzzer"), + }, + clients.Rust: { + filePatterns: []string{"*.rs"}, + funcPattern: `libfuzzer_sys`, + Name: fuzzers.RustCargoFuzz, + Desc: asPointer( + "Fuzzed with Cargo-fuzz"), + }, + clients.Java: { + filePatterns: []string{"*.java"}, + funcPattern: `com.code_intelligence.jazzer.api.FuzzedDataProvider;`, + Name: fuzzers.JavaJazzerFuzzer, + Desc: asPointer( + "Fuzzed with Jazzer fuzzer"), + }, + clients.Swift: { + filePatterns: []string{"*.swift"}, + funcPattern: `LLVMFuzzerTestOneInput`, + Name: fuzzers.SwiftLibFuzzer, + Desc: asPointer( + "Fuzzed with Swift LibFuzzer"), }, // TODO: add more language-specific fuzz patterns & configs. } // Fuzzing runs Fuzzing check. func Fuzzing(c *checker.CheckRequest) (checker.FuzzingData, error) { - var fuzzers []checker.Tool + var detectedFuzzers []checker.Tool usingCFLite, e := checkCFLite(c) if e != nil { return checker.FuzzingData{}, fmt.Errorf("%w", e) } if usingCFLite { - fuzzers = append(fuzzers, + detectedFuzzers = append(detectedFuzzers, checker.Tool{ - Name: fuzzerClusterFuzzLite, + Name: fuzzers.ClusterFuzzLite, URL: asPointer("https://github.com/google/clusterfuzzlite"), Desc: asPointer("continuous fuzzing solution that runs as part of Continuous Integration (CI) workflows"), // TODO: File. @@ -134,29 +165,14 @@ func Fuzzing(c *checker.CheckRequest) (checker.FuzzingData, error) { ) } - usingOneFuzz, e := checkOneFuzz(c) - if e != nil { - return checker.FuzzingData{}, fmt.Errorf("%w", e) - } - if usingOneFuzz { - fuzzers = append(fuzzers, - checker.Tool{ - Name: oneFuzz, - URL: asPointer("https://github.com/microsoft/onefuzz"), - Desc: asPointer("Enables continuous developer-driven fuzzing to proactively harden software prior to release."), - // TODO: File. - }, - ) - } - usingOSSFuzz, e := checkOSSFuzz(c) if e != nil { return checker.FuzzingData{}, fmt.Errorf("%w", e) } if usingOSSFuzz { - fuzzers = append(fuzzers, + detectedFuzzers = append(detectedFuzzers, checker.Tool{ - Name: fuzzerOSSFuzz, + Name: fuzzers.OSSFuzz, URL: asPointer("https://github.com/google/oss-fuzz"), Desc: asPointer("Continuous Fuzzing for Open Source Software"), // TODO: File. @@ -175,7 +191,7 @@ func Fuzzing(c *checker.CheckRequest) (checker.FuzzingData, error) { return checker.FuzzingData{}, fmt.Errorf("%w", e) } if usingFuzzFunc { - fuzzers = append(fuzzers, + detectedFuzzers = append(detectedFuzzers, checker.Tool{ Name: languageFuzzSpecs[lang].Name, URL: languageFuzzSpecs[lang].URL, @@ -185,7 +201,7 @@ func Fuzzing(c *checker.CheckRequest) (checker.FuzzingData, error) { ) } } - return checker.FuzzingData{Fuzzers: fuzzers}, nil + return checker.FuzzingData{Fuzzers: detectedFuzzers}, nil } func checkCFLite(c *checker.CheckRequest) (bool, error) { @@ -204,22 +220,6 @@ func checkCFLite(c *checker.CheckRequest) (bool, error) { return result, nil } -func checkOneFuzz(c *checker.CheckRequest) (bool, error) { - result := false - e := fileparser.OnMatchingFileContentDo(c.RepoClient, fileparser.PathMatcher{ - Pattern: "^\\.onefuzz$", - CaseSensitive: true, - }, func(path string, content []byte, args ...interface{}) (bool, error) { - result = true - return false, nil - }, nil) - if e != nil { - return result, fmt.Errorf("%w", e) - } - - return result, nil -} - func checkOSSFuzz(c *checker.CheckRequest) (bool, error) { if c.OssFuzzRepo == nil { return false, nil @@ -254,22 +254,26 @@ func checkFuzzFunc(c *checker.CheckRequest, lang clients.LanguageName) (bool, [] // Get patterns for file and func. // We use the file pattern in the matcher to match the test files, // and put the func pattern in var data to match file contents (func names). - filePattern, funcPattern := pattern.filePattern, pattern.funcPattern - matcher := fileparser.PathMatcher{ - Pattern: filePattern, - CaseSensitive: false, - } - data.pattern = funcPattern - err := fileparser.OnMatchingFileContentDo(c.RepoClient, matcher, getFuzzFunc, &data) - if err != nil { - return false, nil, fmt.Errorf("error when OnMatchingFileContentDo: %w", err) + filePatterns, funcPattern := pattern.filePatterns, pattern.funcPattern + var dataFiles []checker.File + for _, filePattern := range filePatterns { + matcher := fileparser.PathMatcher{ + Pattern: filePattern, + CaseSensitive: false, + } + data.pattern = funcPattern + err := fileparser.OnMatchingFileContentDo(c.RepoClient, matcher, getFuzzFunc, &data) + if err != nil { + return false, nil, fmt.Errorf("error when OnMatchingFileContentDo: %w", err) + } + dataFiles = append(dataFiles, data.files...) } - if len(data.files) == 0 { + if len(dataFiles) == 0 { // This means no fuzz funcs matched for this language. return false, nil, nil } - return true, data.files, nil + return true, dataFiles, nil } // This is the callback func for interface OnMatchingFileContentDo @@ -320,12 +324,19 @@ func getProminentLanguages(langs []clients.Language) []clients.LanguageName { // This var can stay as an int, no need for a precise float value. avgLoC := totalLoC / numLangs // Languages that have lines of code above average will be considered prominent. + prominentThreshold := avgLoC / 4.0 ret := []clients.LanguageName{} for lName, loC := range langMap { - if loC >= avgLoC { + if loC >= prominentThreshold { lang := clients.LanguageName(strings.ToLower(string(lName))) ret = append(ret, lang) } } return ret } + +func propertyBasedDescription(language string) *string { + s := fmt.Sprintf("Property-based testing in %s generates test instances randomly or exhaustively "+ + "and test that specific properties are satisfied.", language) + return &s +} diff --git a/checks/raw/fuzzing_test.go b/checks/raw/fuzzing_test.go index c0a06edfe72..e409b401447 100644 --- a/checks/raw/fuzzing_test.go +++ b/checks/raw/fuzzing_test.go @@ -30,7 +30,7 @@ import ( // Test_checkOSSFuzz is a test function for checkOSSFuzz. func Test_checkOSSFuzz(t *testing.T) { t.Parallel() - //nolint + //nolint:govet tests := []struct { name string want bool @@ -76,7 +76,6 @@ func Test_checkOSSFuzz(t *testing.T) { mockFuzz.EXPECT().Search(gomock.Any()). DoAndReturn(func(q clients.SearchRequest) (clients.SearchResponse, error) { if tt.wantErr { - //nolint return clients.SearchResponse{}, errors.New("error") } return tt.response, nil @@ -103,69 +102,10 @@ func Test_checkOSSFuzz(t *testing.T) { } } -// Test_checkOneFuzz is a test function for checkOneFuzz. -func Test_checkOneFuzz(t *testing.T) { - t.Parallel() - //nolint - tests := []struct { - name string - want bool - wantErr bool - fileName []string - }{ - { - name: "Test_checkOneFuzz success", - want: true, - wantErr: false, - fileName: []string{".onefuzz"}, - }, - { - name: "Test_checkOneFuzz not found", - want: false, - wantErr: false, - fileName: []string{}, - }, - { - name: "Test_checkOneFuzz failure", - want: false, - wantErr: true, - fileName: []string{".onefuzz"}, - }, - } - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - defer ctrl.Finish() - mockFuzz := mockrepo.NewMockRepoClient(ctrl) - mockFuzz.EXPECT().ListFiles(gomock.Any()).Return(tt.fileName, nil).AnyTimes() - mockFuzz.EXPECT().GetFileContent(gomock.Any()).DoAndReturn(func(f string) (string, error) { - if tt.wantErr { - //nolint - return "", errors.New("error") - } - return "", nil - }).AnyTimes() - req := checker.CheckRequest{ - RepoClient: mockFuzz, - } - got, err := checkOneFuzz(&req) - if (err != nil) != tt.wantErr { - t.Errorf("checkOneFuzz() error = %v, wantErr %v", err, tt.wantErr) - return - } - if got != tt.want { - t.Errorf("checkOneFuzz() = %v, want %v for test %v", got, tt.want, tt.name) - } - }) - } -} - // Test_checkCFLite is a test function for checkCFLite. func Test_checkCFLite(t *testing.T) { t.Parallel() - //nolint + //nolint:govet tests := []struct { name string want bool @@ -197,7 +137,6 @@ func Test_checkCFLite(t *testing.T) { mockFuzz.EXPECT().ListFiles(gomock.Any()).Return(tt.fileName, nil).AnyTimes() mockFuzz.EXPECT().GetFileContent(gomock.Any()).DoAndReturn(func(f string) (string, error) { if tt.wantErr { - //nolint return "", errors.New("error") } return tt.fileContent, nil @@ -219,7 +158,7 @@ func Test_checkCFLite(t *testing.T) { func Test_fuzzFileAndFuncMatchPattern(t *testing.T) { t.Parallel() - //nolint + //nolint:govet tests := []struct { name string expectedFileMatch bool @@ -261,8 +200,8 @@ func Test_fuzzFileAndFuncMatchPattern(t *testing.T) { expectedFileMatch: false, expectedFuncMatch: false, lang: clients.LanguageName("not_a_supported_one"), - fileName: "a_fuzz_test.py", - fileContent: `def NotSupported (foo)`, + fileName: "a_fuzz_test.php", + fileContent: `function function-not-supported (foo)`, wantErr: true, }, } @@ -274,16 +213,19 @@ func Test_fuzzFileAndFuncMatchPattern(t *testing.T) { if !ok && !tt.wantErr { t.Errorf("retrieve supported language error") } - fileMatchPattern := langSpecs.filePattern - fileMatch, err := path.Match(fileMatchPattern, tt.fileName) - if (fileMatch != tt.expectedFileMatch || err != nil) && !tt.wantErr { - t.Errorf("fileMatch = %v, want %v for %v", fileMatch, tt.expectedFileMatch, tt.name) + var found bool + for _, fileMatchPattern := range langSpecs.filePatterns { + fileMatch, err := path.Match(fileMatchPattern, tt.fileName) + if (fileMatch != tt.expectedFileMatch || err != nil) && !tt.wantErr { + t.Errorf("fileMatch = %v, want %v for %v", fileMatch, tt.expectedFileMatch, tt.name) + } + funcRegexPattern := langSpecs.funcPattern + r := regexp.MustCompile(funcRegexPattern) + found = found || r.MatchString(tt.fileContent) } - funcRegexPattern := langSpecs.funcPattern - r := regexp.MustCompile(funcRegexPattern) - found := r.MatchString(tt.fileContent) + if (found != tt.expectedFuncMatch) && !tt.wantErr { - t.Errorf("funcMatch = %v, want %v for %v", fileMatch, tt.expectedFileMatch, tt.name) + t.Errorf("found = %v, want %v for %v", found, tt.expectedFileMatch, tt.name) } }) } @@ -291,7 +233,7 @@ func Test_fuzzFileAndFuncMatchPattern(t *testing.T) { func Test_checkFuzzFunc(t *testing.T) { t.Parallel() - //nolint + //nolint:govet tests := []struct { name string want bool @@ -437,6 +379,30 @@ func Test_checkFuzzFunc(t *testing.T) { }, fileContent: "import fc from \"fast-check\";", }, + { + name: "JavaScript fast-check scoped via require", + want: true, + fileName: []string{"main.spec.js"}, + langs: []clients.Language{ + { + Name: clients.JavaScript, + NumLines: 50, + }, + }, + fileContent: "const { fc, testProp } = require('@fast-check/ava');", + }, + { + name: "JavaScript fast-check scoped via import", + want: true, + fileName: []string{"main.spec.js"}, + langs: []clients.Language{ + { + Name: clients.JavaScript, + NumLines: 50, + }, + }, + fileContent: "import { fc, test } from \"@fast-check/jest\";", + }, { name: "JavaScript with no property-based testing", want: false, @@ -474,6 +440,30 @@ func Test_checkFuzzFunc(t *testing.T) { }, fileContent: "import fc from \"fast-check\";", }, + { + name: "TypeScript fast-check scoped via require", + want: true, + fileName: []string{"main.spec.ts"}, + langs: []clients.Language{ + { + Name: clients.TypeScript, + NumLines: 50, + }, + }, + fileContent: "const { fc, testProp } = require('@fast-check/ava');", + }, + { + name: "TypeScript fast-check scoped via import", + want: true, + fileName: []string{"main.spec.ts"}, + langs: []clients.Language{ + { + Name: clients.TypeScript, + NumLines: 50, + }, + }, + fileContent: "import { fc, test } from \"@fast-check/vitest\";", + }, { name: "TypeScript with no property-based testing", want: false, @@ -498,7 +488,6 @@ func Test_checkFuzzFunc(t *testing.T) { mockClient.EXPECT().ListFiles(gomock.Any()).Return(tt.fileName, nil).AnyTimes() mockClient.EXPECT().GetFileContent(gomock.Any()).DoAndReturn(func(f string) ([]byte, error) { if tt.wantErr { - //nolint return nil, errors.New("error") } return []byte(tt.fileContent), nil @@ -518,7 +507,6 @@ func Test_checkFuzzFunc(t *testing.T) { func Test_getProminentLanguages(t *testing.T) { t.Parallel() - //nolint tests := []struct { name string languages []clients.Language diff --git a/checks/raw/gitlab/packaging_test.go b/checks/raw/gitlab/packaging_test.go index 49fd91fe1f5..1e66f045200 100644 --- a/checks/raw/gitlab/packaging_test.go +++ b/checks/raw/gitlab/packaging_test.go @@ -27,7 +27,7 @@ import ( func TestGitlabPackagingYamlCheck(t *testing.T) { t.Parallel() - //nolint + //nolint:govet tests := []struct { name string lineNumber uint @@ -100,7 +100,7 @@ func TestGitlabPackagingYamlCheck(t *testing.T) { func TestGitlabPackagingPackager(t *testing.T) { t.Parallel() - //nolint + //nolint:govet tests := []struct { name string lineNumber uint @@ -136,9 +136,8 @@ func TestGitlabPackagingPackager(t *testing.T) { moqRepoClient.EXPECT().GetFileContent(tt.filename). DoAndReturn(func(b string) ([]byte, error) { - //nolint: errcheck - content, _ := os.ReadFile(b) - return content, nil + content, err := os.ReadFile(b) + return content, err }).AnyTimes() if tt.exists { @@ -150,8 +149,10 @@ func TestGitlabPackagingPackager(t *testing.T) { Repo: moqRepo, } - //nolint: errcheck - packagingData, _ := Packaging(&req) + packagingData, err := Packaging(&req) + if err != nil { + t.Fatalf("unexpected err: %v", err) + } if !tt.exists { if len(packagingData.Packages) != 0 { diff --git a/checks/raw/license.go b/checks/raw/license.go index 618d24fa027..4375c1a3c9a 100644 --- a/checks/raw/license.go +++ b/checks/raw/license.go @@ -116,6 +116,11 @@ func License(c *checker.CheckRequest) (checker.LicenseData, error) { // repo API for licenses is supported // go the work and return from immediate (no searching repo). case lerr == nil: + // licenses API may be supported, but platform might not detect license same way we do + // fallback to our local file logic + if len(licensesFound) == 0 { + break + } for _, v := range licensesFound { results.LicenseFiles = append(results.LicenseFiles, checker.LicenseFile{ diff --git a/checks/raw/license_test.go b/checks/raw/license_test.go index c6055ee90ec..bc54e26c98c 100644 --- a/checks/raw/license_test.go +++ b/checks/raw/license_test.go @@ -646,7 +646,6 @@ func TestLicenseFileCheck(t *testing.T) { }, } - //nolint: paralleltest for _, tt := range tests { tt := tt // Re-initializing variable so it is not changed while executing the closure below for _, ext := range tt.extensions { diff --git a/checks/raw/maintained_test.go b/checks/raw/maintained_test.go index 07f3b691d51..5dc5db825ec 100644 --- a/checks/raw/maintained_test.go +++ b/checks/raw/maintained_test.go @@ -26,6 +26,7 @@ import ( mockrepo "github.com/ossf/scorecard/v4/clients/mockclients" ) +//nolint:paralleltest // need to break into separate tests func TestMaintained(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() @@ -78,7 +79,7 @@ func TestMaintained(t *testing.T) { }) t.Run("returns error if IsArchived fails", func(t *testing.T) { - mockRepoClient.EXPECT().IsArchived().Return(false, fmt.Errorf("some error")) // nolint: goerr113 + mockRepoClient.EXPECT().IsArchived().Return(false, fmt.Errorf("some error")) _, err := Maintained(req) if err == nil { @@ -88,7 +89,7 @@ func TestMaintained(t *testing.T) { t.Run("returns error if ListCommits fails", func(t *testing.T) { mockRepoClient.EXPECT().IsArchived().Return(false, nil) - mockRepoClient.EXPECT().ListCommits().Return(nil, fmt.Errorf("some error")) // nolint: goerr113 + mockRepoClient.EXPECT().ListCommits().Return(nil, fmt.Errorf("some error")) _, err := Maintained(req) if err == nil { @@ -99,7 +100,7 @@ func TestMaintained(t *testing.T) { t.Run("returns error if ListIssues fails", func(t *testing.T) { mockRepoClient.EXPECT().IsArchived().Return(false, nil) mockRepoClient.EXPECT().ListCommits().Return([]clients.Commit{}, nil) - mockRepoClient.EXPECT().ListIssues().Return(nil, fmt.Errorf("some error")) // nolint: goerr113 + mockRepoClient.EXPECT().ListIssues().Return(nil, fmt.Errorf("some error")) _, err := Maintained(req) if err == nil { @@ -111,7 +112,7 @@ func TestMaintained(t *testing.T) { mockRepoClient.EXPECT().IsArchived().Return(false, nil) mockRepoClient.EXPECT().ListCommits().Return([]clients.Commit{}, nil) mockRepoClient.EXPECT().ListIssues().Return([]clients.Issue{}, nil) - mockRepoClient.EXPECT().GetCreatedAt().Return(time.Time{}, fmt.Errorf("some error")) // nolint: goerr113 + mockRepoClient.EXPECT().GetCreatedAt().Return(time.Time{}, fmt.Errorf("some error")) _, err := Maintained(req) if err == nil { diff --git a/checks/raw/permissions.go b/checks/raw/permissions.go index 2bbc29a923e..c3c7132db44 100644 --- a/checks/raw/permissions.go +++ b/checks/raw/permissions.go @@ -93,7 +93,7 @@ var validateGitHubActionTokenPermissions fileparser.DoWhileTrueOnFileContent = f } // 1. Top-level permission definitions. - //nolint + //nolint:lll // https://docs.github.com/en/actions/reference/authentication-in-a-workflow#example-1-passing-the-github_token-as-an-input, // https://github.blog/changelog/2021-04-20-github-actions-control-permissions-for-github_token/, // https://docs.github.com/en/actions/reference/authentication-in-a-workflow#modifying-the-permissions-for-the-github_token. @@ -352,21 +352,20 @@ func createIgnoredPermissions(workflow *actionlint.Workflow, fp string, // Scanning tool run externally and SARIF file uploaded. func isSARIFUploadWorkflow(workflow *actionlint.Workflow, fp string, pdata *permissionCbData) bool { - // TODO: some third party tools may upload directly thru their actions. + // TODO: some third party tools may upload directly through their actions. // Very unlikely. // See https://github.com/marketplace for tools. return isAllowedWorkflow(workflow, fp, pdata) } func isAllowedWorkflow(workflow *actionlint.Workflow, fp string, pdata *permissionCbData) bool { + //nolint:lll allowlist := map[string]bool{ - //nolint // CodeQl analysis workflow automatically sends sarif file to GitHub. // https://docs.github.com/en/code-security/secure-coding/integrating-with-code-scanning/uploading-a-sarif-file-to-github#about-sarif-file-uploads-for-code-scanning. // `The CodeQL action uploads the SARIF file automatically when it completes analysis`. "github/codeql-action/analyze": true, - //nolint // Third-party scanning tools use the SARIF-upload action from code-ql. // https://docs.github.com/en/code-security/secure-coding/integrating-with-code-scanning/uploading-a-sarif-file-to-github#uploading-a-code-scanning-analysis-with-github-actions // We only support CodeQl today. @@ -473,7 +472,7 @@ func isReleasingWorkflow(workflow *actionlint.Workflow, fp string, pdata *permis // Commonly JavaScript packages, but supports multiple ecosystems Steps: []*fileparser.JobMatcherStep{ { - Run: "npx.*semantic-release", + Run: "(npx|pnpm|yarn).*semantic-release", }, }, LogText: "candidate publishing workflow using semantic-release", diff --git a/checks/raw/pinned_dependencies.go b/checks/raw/pinned_dependencies.go index 0b93181443e..be251571dac 100644 --- a/checks/raw/pinned_dependencies.go +++ b/checks/raw/pinned_dependencies.go @@ -15,7 +15,9 @@ package raw import ( + "errors" "fmt" + "path/filepath" "reflect" "regexp" "strings" @@ -27,6 +29,7 @@ import ( "github.com/ossf/scorecard/v4/checks/fileparser" sce "github.com/ossf/scorecard/v4/errors" "github.com/ossf/scorecard/v4/finding" + "github.com/ossf/scorecard/v4/remediation" ) // PinningDependencies checks for (un)pinned dependencies. @@ -109,6 +112,21 @@ func collectDockerfileInsecureDownloads(c *checker.CheckRequest, r *checker.Pinn }, validateDockerfileInsecureDownloads, r) } +func fileIsInVendorDir(pathfn string) bool { + cleanedPath := filepath.Clean(pathfn) + splitCleanedPath := strings.Split(cleanedPath, "/") + + for _, d := range splitCleanedPath { + if strings.EqualFold(d, "vendor") { + return true + } + if strings.EqualFold(d, "third_party") { + return true + } + } + return false +} + var validateDockerfileInsecureDownloads fileparser.DoWhileTrueOnFileContent = func( pathfn string, content []byte, @@ -120,6 +138,10 @@ var validateDockerfileInsecureDownloads fileparser.DoWhileTrueOnFileContent = fu len(args), errInvalidArgLength) } + if fileIsInVendorDir(pathfn) { + return true, nil + } + pdata := dataAsPinnedDependenciesPointer(args[0]) // Return early if this is not a docker file. @@ -140,7 +162,6 @@ var validateDockerfileInsecureDownloads fileparser.DoWhileTrueOnFileContent = fu // Walk the Dockerfile's AST. taintedFiles := make(map[string]bool) for i := range res.AST.Children { - var bytes []byte child := res.AST.Children[i] cmdType := child.Value @@ -150,21 +171,33 @@ var validateDockerfileInsecureDownloads fileparser.DoWhileTrueOnFileContent = fu continue } - var valueList []string - for n := child.Next; n != nil; n = n.Next { - valueList = append(valueList, n.Value) - } + if len(child.Heredocs) > 0 { + startOffset := 1 + for _, heredoc := range child.Heredocs { + cmd := heredoc.Content + lineCount := startOffset + strings.Count(cmd, "\n") + if err := validateShellFile(pathfn, uint(child.StartLine+startOffset)-1, uint(child.StartLine+lineCount)-2, + []byte(cmd), taintedFiles, pdata); err != nil { + return false, err + } + startOffset += lineCount + } + } else { + var valueList []string + for n := child.Next; n != nil; n = n.Next { + valueList = append(valueList, n.Value) + } - if len(valueList) == 0 { - return false, sce.WithMessage(sce.ErrScorecardInternal, errInternalInvalidDockerFile.Error()) - } + if len(valueList) == 0 { + return false, sce.WithMessage(sce.ErrScorecardInternal, errInternalInvalidDockerFile.Error()) + } - // Build a file content. - cmd := strings.Join(valueList, " ") - bytes = append(bytes, cmd...) - if err := validateShellFile(pathfn, uint(child.StartLine)-1, uint(child.EndLine)-1, - bytes, taintedFiles, pdata); err != nil { - return false, err + // Build a file content. + cmd := strings.Join(valueList, " ") + if err := validateShellFile(pathfn, uint(child.StartLine)-1, uint(child.EndLine)-1, + []byte(cmd), taintedFiles, pdata); err != nil { + return false, err + } } } @@ -187,10 +220,22 @@ func isDockerfile(pathfn string, content []byte) bool { } func collectDockerfilePinning(c *checker.CheckRequest, r *checker.PinningDependenciesData) error { - return fileparser.OnMatchingFileContentDo(c.RepoClient, fileparser.PathMatcher{ + err := fileparser.OnMatchingFileContentDo(c.RepoClient, fileparser.PathMatcher{ Pattern: "*Dockerfile*", CaseSensitive: false, }, validateDockerfilesPinning, r) + if err != nil { + return err + } + + for i := range r.Dependencies { + rr := &r.Dependencies[i] + if !*rr.Pinned { + remediate := remediation.CreateDockerfilePinningRemediation(rr, remediation.CraneDigester{}) + rr.Remediation = remediate + } + } + return nil } var validateDockerfilesPinning fileparser.DoWhileTrueOnFileContent = func( @@ -205,6 +250,11 @@ var validateDockerfilesPinning fileparser.DoWhileTrueOnFileContent = func( return false, fmt.Errorf( "validateDockerfilesPinning requires exactly 2 arguments: got %v: %w", len(args), errInvalidArgLength) } + + if fileIsInVendorDir(pathfn) { + return true, nil + } + pdata := dataAsPinnedDependenciesPointer(args[0]) // Return early if this is not a dockerfile. @@ -261,7 +311,6 @@ var validateDockerfilesPinning fileparser.DoWhileTrueOnFileContent = func( if pinned || regex.MatchString(name) { // Record the asName. pinnedAsNames[asName] = true - continue } pdata.Dependencies = append(pdata.Dependencies, @@ -275,6 +324,7 @@ var validateDockerfilesPinning fileparser.DoWhileTrueOnFileContent = func( }, Name: asPointer(name), PinnedAt: asPointer(asName), + Pinned: asBoolPointer(pinnedAsNames[asName]), Type: checker.DependencyUseTypeDockerfileContainerImage, }, ) @@ -283,34 +333,33 @@ var validateDockerfilesPinning fileparser.DoWhileTrueOnFileContent = func( case len(valueList) == 1: name := valueList[0] pinned := pinnedAsNames[name] - if !pinned && !regex.MatchString(name) { - dep := checker.Dependency{ - Location: &checker.File{ - Path: pathfn, - Type: finding.FileTypeSource, - Offset: uint(child.StartLine), - EndOffset: uint(child.EndLine), - Snippet: child.Original, - }, - Type: checker.DependencyUseTypeDockerfileContainerImage, - } - parts := strings.SplitN(name, ":", 2) - if len(parts) > 0 { - dep.Name = asPointer(parts[0]) - if len(parts) > 1 { - dep.PinnedAt = asPointer(parts[1]) - } + + dep := checker.Dependency{ + Location: &checker.File{ + Path: pathfn, + Type: finding.FileTypeSource, + Offset: uint(child.StartLine), + EndOffset: uint(child.EndLine), + Snippet: child.Original, + }, + Pinned: asBoolPointer(pinned || regex.MatchString(name)), + Type: checker.DependencyUseTypeDockerfileContainerImage, + } + parts := strings.SplitN(name, ":", 2) + if len(parts) > 0 { + dep.Name = asPointer(parts[0]) + if len(parts) > 1 { + dep.PinnedAt = asPointer(parts[1]) } - pdata.Dependencies = append(pdata.Dependencies, dep) } - + pdata.Dependencies = append(pdata.Dependencies, dep) default: // That should not happen. return false, sce.WithMessage(sce.ErrScorecardInternal, errInternalInvalidDockerFile.Error()) } } - //nolint + //nolint:lll // The file need not have a FROM statement, // https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/dockerfiles/partials/jupyter.partial.Dockerfile. @@ -361,6 +410,7 @@ var validateGitHubWorkflowIsFreeOfInsecureDownloads fileparser.DoWhileTrueOnFile jobName = fileparser.GetJobName(job) } taintedFiles := make(map[string]bool) + for _, step := range job.Steps { step := step if !fileparser.IsStepExecKind(step, actionlint.ExecKindRun) { @@ -383,6 +433,23 @@ var validateGitHubWorkflowIsFreeOfInsecureDownloads fileparser.DoWhileTrueOnFile // https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#jobsjob_idstepsrun. shell, err := fileparser.GetShellForStep(step, job) if err != nil { + var elementError *checker.ElementError + if errors.As(err, &elementError) { + // Add the workflow name and step ID to the element + lineStart := uint(step.Pos.Line) + elementError.Location = finding.Location{ + Path: pathfn, + Snippet: elementError.Location.Snippet, + LineStart: &lineStart, + Type: finding.FileTypeSource, + } + + pdata.ProcessingErrors = append(pdata.ProcessingErrors, *elementError) + + // continue instead of break because other `run` steps may declare + // a valid shell we can scan + continue + } return false, err } // Skip unsupported shells. We don't support Windows shells or some Unix shells. @@ -406,10 +473,24 @@ var validateGitHubWorkflowIsFreeOfInsecureDownloads fileparser.DoWhileTrueOnFile // Check pinning of github actions in workflows. func collectGitHubActionsWorkflowPinning(c *checker.CheckRequest, r *checker.PinningDependenciesData) error { - return fileparser.OnMatchingFileContentDo(c.RepoClient, fileparser.PathMatcher{ + err := fileparser.OnMatchingFileContentDo(c.RepoClient, fileparser.PathMatcher{ Pattern: ".github/workflows/*", CaseSensitive: true, }, validateGitHubActionWorkflow, r) + if err != nil { + return err + } + //nolint:errcheck + remediationMetadata, _ := remediation.New(c) + + for i := range r.Dependencies { + rr := &r.Dependencies[i] + if !*rr.Pinned { + remediate := remediationMetadata.CreateWorkflowPinningRemediation(rr.Location.Path) + rr.Remediation = remediate + } + } + return nil } // validateGitHubActionWorkflow checks if the workflow file contains unpinned actions. Returns true if the check @@ -470,26 +551,25 @@ var validateGitHubActionWorkflow fileparser.DoWhileTrueOnFileContent = func( continue } - if !isActionDependencyPinned(execAction.Uses.Value) { - dep := checker.Dependency{ - Location: &checker.File{ - Path: pathfn, - Type: finding.FileTypeSource, - Offset: uint(execAction.Uses.Pos.Line), - EndOffset: uint(execAction.Uses.Pos.Line), // `Uses` always span a single line. - Snippet: execAction.Uses.Value, - }, - Type: checker.DependencyUseTypeGHAction, - } - parts := strings.SplitN(execAction.Uses.Value, "@", 2) - if len(parts) > 0 { - dep.Name = asPointer(parts[0]) - if len(parts) > 1 { - dep.PinnedAt = asPointer(parts[1]) - } + dep := checker.Dependency{ + Location: &checker.File{ + Path: pathfn, + Type: finding.FileTypeSource, + Offset: uint(execAction.Uses.Pos.Line), + EndOffset: uint(execAction.Uses.Pos.Line), // `Uses` always span a single line. + Snippet: execAction.Uses.Value, + }, + Pinned: asBoolPointer(isActionDependencyPinned(execAction.Uses.Value)), + Type: checker.DependencyUseTypeGHAction, + } + parts := strings.SplitN(execAction.Uses.Value, "@", 2) + if len(parts) > 0 { + dep.Name = asPointer(parts[0]) + if len(parts) > 1 { + dep.PinnedAt = asPointer(parts[1]) } - pdata.Dependencies = append(pdata.Dependencies, dep) } + pdata.Dependencies = append(pdata.Dependencies, dep) } } diff --git a/checks/raw/pinned_dependencies_test.go b/checks/raw/pinned_dependencies_test.go index d1bf323d6c9..82b0588511c 100644 --- a/checks/raw/pinned_dependencies_test.go +++ b/checks/raw/pinned_dependencies_test.go @@ -15,21 +15,26 @@ package raw import ( + "fmt" "os" + "path/filepath" "strings" "testing" + "github.com/golang/mock/gomock" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/ossf/scorecard/v4/checker" + mockrepo "github.com/ossf/scorecard/v4/clients/mockclients" + "github.com/ossf/scorecard/v4/rule" scut "github.com/ossf/scorecard/v4/utests" ) func TestGithubWorkflowPinning(t *testing.T) { t.Parallel() - //nolint + //nolint:govet tests := []struct { warns int err error @@ -65,6 +70,11 @@ func TestGithubWorkflowPinning(t *testing.T) { name: "Matrix as expression", filename: "./testdata/.github/workflows/github-workflow-matrix-expression.yaml", }, + { + name: "Can't detect OS, but still detects unpinned Actions", + filename: "./testdata/.github/workflows/github-workflow-unknown-os.yaml", + warns: 2, // 1 in job with unknown OS, 1 in job with known OS + }, } for _, tt := range tests { tt := tt // Re-initializing variable so it is not changed while executing the closure below @@ -92,14 +102,17 @@ func TestGithubWorkflowPinning(t *testing.T) { return } - if tt.warns != len(r.Dependencies) { - t.Errorf("expected %v. Got %v", tt.warns, len(r.Dependencies)) + unpinned := countUnpinned(r.Dependencies) + + if tt.warns != unpinned { + t.Errorf("expected %v. Got %v", tt.warns, unpinned) } }) } } func TestGithubWorkflowPinningPattern(t *testing.T) { + t.Parallel() tests := []struct { desc string uses string @@ -111,7 +124,7 @@ func TestGithubWorkflowPinningPattern(t *testing.T) { ispinned: false, }, { - desc: "hecking out mutable tag", + desc: "checking out mutable tag", uses: "actions/checkout@v3.2.0", ispinned: false, }, @@ -131,12 +144,12 @@ func TestGithubWorkflowPinningPattern(t *testing.T) { ispinned: false, }, { - desc: "checking out specific commmit from github with truncated SHA-1", + desc: "checking out specific commit from github with truncated SHA-1", uses: "actions/checkout@a81bbbf", ispinned: false, }, { - desc: "checking out specific commmit from github with SHA-1", + desc: "checking out specific commit from github with SHA-1", uses: "actions/checkout@a81bbbf8298c0fa03ea29cdc473d45769f953675", ispinned: true, }, @@ -146,14 +159,12 @@ func TestGithubWorkflowPinningPattern(t *testing.T) { ispinned: true, }, { - desc: "non-github docker image pinned by digest", - //nolint:lll + desc: "non-github docker image pinned by digest", uses: "docker://gcr.io/distroless/static-debian11@sha256:9e6f8952f12974d088f648ed6252ea1887cdd8641719c8acd36bf6d2537e71c0", ispinned: true, }, { - desc: "non-github docker image pinned to mutable tag", - //nolint:lll + desc: "non-github docker image pinned to mutable tag", uses: "docker://gcr.io/distroless/static-debian11:sha256-3876708467ad6f38f263774aa107d331e8de6558a2874aa223b96fc0d9dfc820.sig", ispinned: false, }, @@ -183,7 +194,7 @@ func TestGithubWorkflowPinningPattern(t *testing.T) { func TestNonGithubWorkflowPinning(t *testing.T) { t.Parallel() - //nolint + //nolint:govet tests := []struct { warns int err error @@ -213,6 +224,11 @@ func TestNonGithubWorkflowPinning(t *testing.T) { filename: "./testdata/.github/workflows/workflow-mix-pinned-and-non-pinned-non-github.yaml", warns: 1, }, + { + name: "Can't detect OS, but still detects unpinned Actions", + filename: "./testdata/.github/workflows/github-workflow-unknown-os.yaml", + warns: 2, // 1 in job with unknown OS, 1 in job with known OS + }, } for _, tt := range tests { tt := tt // Re-initializing variable so it is not changed while executing the closure below @@ -241,8 +257,10 @@ func TestNonGithubWorkflowPinning(t *testing.T) { return } - if tt.warns != len(r.Dependencies) { - t.Errorf("expected %v. Got %v", tt.warns, len(r.Dependencies)) + unpinned := countUnpinned(r.Dependencies) + + if tt.warns != unpinned { + t.Errorf("expected %v. Got %v", tt.warns, unpinned) } }) } @@ -251,17 +269,24 @@ func TestNonGithubWorkflowPinning(t *testing.T) { func TestGithubWorkflowPkgManagerPinning(t *testing.T) { t.Parallel() - //nolint + //nolint:govet tests := []struct { - warns int - err error - name string - filename string + unpinned int + processingErrors int + err error + name string + filename string }{ { name: "npm packages without verification", filename: "./testdata/.github/workflows/github-workflow-pkg-managers.yaml", - warns: 49, + unpinned: 49, + }, + { + name: "Can't identify OS but doesn't crash", + filename: "./testdata/.github/workflows/github-workflow-unknown-os.yaml", + processingErrors: 1, // job with unknown OS is skipped + unpinned: 1, // only 1 in job with known OS, since other job is skipped }, } for _, tt := range tests { @@ -288,8 +313,14 @@ func TestGithubWorkflowPkgManagerPinning(t *testing.T) { return } - if tt.warns != len(r.Dependencies) { - t.Errorf("expected %v. Got %v", tt.warns, len(r.Dependencies)) + unpinned := countUnpinned(r.Dependencies) + + if tt.unpinned != unpinned { + t.Errorf("expected %v unpinned. Got %v", tt.unpinned, unpinned) + } + + if tt.processingErrors != len(r.ProcessingErrors) { + t.Errorf("expected %v processing errors. Got %v", tt.processingErrors, len(r.ProcessingErrors)) } }) } @@ -298,7 +329,7 @@ func TestGithubWorkflowPkgManagerPinning(t *testing.T) { func TestDockerfilePinning(t *testing.T) { t.Parallel() - //nolint + //nolint:govet tests := []struct { warns int err error @@ -307,36 +338,46 @@ func TestDockerfilePinning(t *testing.T) { }{ { name: "invalid dockerfile", - filename: "./testdata/Dockerfile-invalid", + filename: "Dockerfile-invalid", }, { name: "invalid dockerfile sh", - filename: "../testdata/script-sh", + filename: "../../testdata/script-sh", }, { name: "empty file", - filename: "./testdata/Dockerfile-empty", + filename: "Dockerfile-empty", }, { name: "comments only", - filename: "./testdata/Dockerfile-comments", + filename: "Dockerfile-comments", }, { name: "Pinned dockerfile", - filename: "./testdata/Dockerfile-pinned", + filename: "Dockerfile-pinned", }, { name: "Pinned dockerfile as", - filename: "./testdata/Dockerfile-pinned-as", + filename: "Dockerfile-pinned-as", }, { name: "Non-pinned dockerfile as", - filename: "./testdata/Dockerfile-not-pinned-as", + filename: "Dockerfile-not-pinned-as", warns: 2, }, + { + name: "Non-pinned dockerfile but in vendor, ie: 0 warns", + filename: "vendor/Dockerfile-not-pinned-as", + warns: 0, + }, { name: "Non-pinned dockerfile", - filename: "./testdata/Dockerfile-not-pinned", + filename: "Dockerfile-not-pinned", + warns: 1, + }, + { + name: "Parser error doesn't affect docker image pinning", + filename: "Dockerfile-not-pinned-with-parser-error", warns: 1, }, } @@ -349,14 +390,14 @@ func TestDockerfilePinning(t *testing.T) { if tt.filename == "" { content = make([]byte, 0) } else { - content, err = os.ReadFile(tt.filename) + content, err = os.ReadFile(filepath.Join("testdata", tt.filename)) if err != nil { t.Errorf("cannot read file: %v", err) } } var r checker.PinningDependenciesData - _, err = validateDockerfilesPinning(tt.filename, content, &r) + _, err = validateDockerfilesPinning(filepath.Join("testdata", tt.filename), content, &r) if !errCmp(err, tt.err) { t.Errorf(cmp.Diff(err, tt.err, cmpopts.EquateErrors())) } @@ -365,8 +406,61 @@ func TestDockerfilePinning(t *testing.T) { return } - if tt.warns != len(r.Dependencies) { - t.Errorf("expected %v. Got %v", tt.warns, len(r.Dependencies)) + unpinned := countUnpinned(r.Dependencies) + + if tt.warns != unpinned { + t.Errorf("expected %v. Got %v", tt.warns, unpinned) + } + }) + } +} + +func TestFileIsInVendorDir(t *testing.T) { + t.Parallel() + tests := []struct { + name string + filename string + expected bool + }{ + { + name: "not in vendor or third_party", + filename: "a/b/c/d/Dockerfile", + expected: false, + }, + { + name: "is third_party deep in tree", + filename: "a/b/third_party/Dockerfile", + expected: true, + }, + { + name: "in vendor", + filename: "vendor/a/b/Dockerfile", + expected: true, + }, + { + name: "in third_party", + filename: "third_party/b/c/Dockerfile", + expected: true, + }, + { + name: "in deep vendor", + filename: "a/b/c/vendor/Dockerfile", + expected: true, + }, + { + name: "misspelled vendor dir", + filename: "a/vendor_/Dockerfile", + expected: false, + }, + } + + for _, tt := range tests { + tt := tt // Re-initializing variable so it is not changed while executing the closure below + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + got := fileIsInVendorDir(tt.filename) + if got != tt.expected { + t.Errorf("expected %v. Got %v", tt.expected, got) } }) } @@ -418,6 +512,21 @@ func TestDockerfilePinningFromLineNumber(t *testing.T) { }, }, }, + { + name: "Parser error doesn't affect docker image pinning", + filename: "./testdata/Dockerfile-not-pinned-with-parser-error", + expected: []struct { + snippet string + startLine uint + endLine uint + }{ + { + snippet: "FROM abrarov/msvc-2017:2.11.0", + startLine: 1, + endLine: 1, + }, + }, + }, } for _, tt := range tests { tt := tt // Re-initializing variable so it is not changed while executing the closure below @@ -522,13 +631,46 @@ func TestDockerfileInvalidFiles(t *testing.T) { } } -func TestDockerfileInsecureDownloadsLineNumber(t *testing.T) { +func TestDockerfileInsecureDownloadsBrokenCommands(t *testing.T) { t.Parallel() - //nolint + //nolint:govet tests := []struct { name string filename string - expected []struct { + err error + }{ + { + name: "dockerfile downloads", + filename: "./testdata/Dockerfile-empty-run-array", + err: errInternalInvalidDockerFile, + }, + } + for _, tt := range tests { + tt := tt // Re-initializing variable so it is not changed while executing the closure below + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + content, err := os.ReadFile(tt.filename) + if err != nil { + t.Errorf("cannot read file: %v", err) + } + + var r checker.PinningDependenciesData + _, err = validateDockerfileInsecureDownloads(tt.filename, content, &r) + if !strings.Contains(err.Error(), tt.err.Error()) { + t.Errorf(cmp.Diff(err, tt.err, cmpopts.EquateErrors())) + } + }) + } +} + +func TestDockerfileInsecureDownloadsLineNumber(t *testing.T) { + t.Parallel() + //nolint:govet + tests := []struct { + name string + filename string + processingErrors int + expected []struct { snippet string startLine uint endLine uint @@ -538,7 +680,6 @@ func TestDockerfileInsecureDownloadsLineNumber(t *testing.T) { { name: "dockerfile downloads", filename: "./testdata/Dockerfile-download-lines", - //nolint expected: []struct { snippet string startLine uint @@ -617,12 +758,29 @@ func TestDockerfileInsecureDownloadsLineNumber(t *testing.T) { endLine: 64, t: checker.DependencyUseTypePipCommand, }, + { + snippet: `bash <(curl --silent --show-error "https://raw.githubusercontent.com/rhysd/actionlint/main/scripts/download-actionlint.bash")`, + startLine: 68, + endLine: 68, + t: checker.DependencyUseTypeDownloadThenRun, + }, + { + snippet: "curl -sSL https://dot.net/v1/dotnet-install.sh | bash /dev/stdin", + startLine: 69, + endLine: 69, + t: checker.DependencyUseTypeDownloadThenRun, + }, + { + snippet: "curl -sSL https://raw.githubusercontent.com/dotnet/install-scripts/main/src/dotnet-install.sh | bash /dev/stdin", + startLine: 70, + endLine: 70, + t: checker.DependencyUseTypeDownloadThenRun, + }, }, }, { name: "dockerfile downloads multi-run", filename: "./testdata/Dockerfile-download-multi-runs", - //nolint expected: []struct { snippet string startLine uint @@ -655,6 +813,31 @@ func TestDockerfileInsecureDownloadsLineNumber(t *testing.T) { }, }, }, + { + name: "Parser error may lead to incomplete data", + filename: "./testdata/Dockerfile-not-pinned-with-parser-error", + processingErrors: 1, + expected: []struct { + snippet string + startLine uint + endLine uint + t checker.DependencyUseType + }{ + { + snippet: "choco install --no-progress -r -y cmake", + startLine: 4, + endLine: 4, + t: checker.DependencyUseTypeChocoCommand, + }, + { + snippet: "choco install --no-progress -r -y gzip wget ninja", + startLine: 9, + endLine: 9, + t: checker.DependencyUseTypeChocoCommand, + }, + // `curl bla | bash` isn't detected due to parser error + }, + }, } for _, tt := range tests { tt := tt // Re-initializing variable so it is not changed while executing the closure below @@ -684,13 +867,160 @@ func TestDockerfileInsecureDownloadsLineNumber(t *testing.T) { t.Errorf("test failed: dependency not present: %+v", tt.expected) } } + + if tt.processingErrors != len(r.ProcessingErrors) { + t.Errorf("expected %v processing errors. Got %v", tt.processingErrors, len(r.ProcessingErrors)) + } + }) + } +} + +func TestDockerfileWithHeredocsInsecureDownloadsLineNumber(t *testing.T) { + t.Parallel() + //nolint:govet + tests := []struct { + name string + filename string + processingErrors int + expected []struct { + snippet string + startLine uint + endLine uint + pinned bool + t checker.DependencyUseType + } + }{ + { + name: "dockerfile heredoc downloads", + filename: "./testdata/Dockerfile-download-heredoc", + processingErrors: 1, + expected: []struct { + snippet string + startLine uint + endLine uint + pinned bool + t checker.DependencyUseType + }{ + { + snippet: "pip install --no-deps -e git+https://github.com/username/repo.git@v1.0#egg=package", + startLine: 20, + endLine: 20, + pinned: false, + t: checker.DependencyUseTypePipCommand, + }, + { + snippet: "pip install --no-deps -e git+https://github.com/username/repo.git@0123456789abcdef0123456789abcdef01234567", + startLine: 24, + endLine: 24, + pinned: true, + t: checker.DependencyUseTypePipCommand, + }, + { + snippet: "curl bla | bash", + startLine: 28, + endLine: 28, + pinned: false, + t: checker.DependencyUseTypeDownloadThenRun, + }, + { + snippet: "pip install --no-deps -e git+https://github.com/username/repo.git@0123456789abcdef0123456789abcdef01234567", + startLine: 32, + endLine: 32, + pinned: true, + t: checker.DependencyUseTypePipCommand, + }, + { + snippet: "pip install --no-deps -e git+https://github.com/username/repo.git@v1.0#egg=package", + startLine: 36, + endLine: 36, + pinned: false, + t: checker.DependencyUseTypePipCommand, + }, + { + snippet: "curl bla | bash", + startLine: 38, + endLine: 38, + pinned: false, + t: checker.DependencyUseTypeDownloadThenRun, + }, + { + snippet: "pip install --no-deps -e git+https://github.com/username/repo.git@0123456789abcdef0123456789abcdef01234567", + startLine: 42, + endLine: 43, + pinned: true, + t: checker.DependencyUseTypePipCommand, + }, + { + snippet: "pip install --no-deps -e git+https://github.com/username/repo.git@v1.0#egg=package", + startLine: 43, + endLine: 44, + pinned: false, + t: checker.DependencyUseTypePipCommand, + }, + { + snippet: "curl bla | bash", + startLine: 45, + endLine: 45, + pinned: false, + t: checker.DependencyUseTypeDownloadThenRun, + }, + { + snippet: "pip install --no-deps -e git+https://github.com/username/repo.git@0123456789abcdef0123456789abcdef01234567", + startLine: 50, + endLine: 52, + pinned: true, + t: checker.DependencyUseTypePipCommand, + }, + { + snippet: "curl bla | bash", + startLine: 51, + endLine: 53, + pinned: false, + t: checker.DependencyUseTypeDownloadThenRun, + }, + }, + }, + } + for _, tt := range tests { + tt := tt // Re-initializing variable so it is not changed while executing the closure below + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + content, err := os.ReadFile(tt.filename) + if err != nil { + t.Errorf("cannot read file: %v", err) + } + + var r checker.PinningDependenciesData + _, err = validateDockerfileInsecureDownloads(tt.filename, content, &r) + if err != nil { + t.Errorf("error during validateDockerfileInsecureDownloads: %v", err) + } + + for _, expectedDep := range tt.expected { + isExpectedDep := func(dep checker.Dependency) bool { + return dep.Location.Offset == expectedDep.startLine && + dep.Location.EndOffset == expectedDep.endLine && + dep.Location.Path == tt.filename && + dep.Location.Snippet == expectedDep.snippet && + *dep.Pinned == expectedDep.pinned && + dep.Type == expectedDep.t + } + + if !scut.ValidatePinningDependencies(isExpectedDep, &r) { + t.Errorf("test failed: dependency not present: %+v", tt.expected) + } + } + + if tt.processingErrors != len(r.ProcessingErrors) { + t.Errorf("expected %v processing errors. Got %v", tt.processingErrors, len(r.ProcessingErrors)) + } }) } } func TestShellscriptInsecureDownloadsLineNumber(t *testing.T) { t.Parallel() - //nolint + //nolint:govet tests := []struct { name string filename string @@ -704,7 +1034,6 @@ func TestShellscriptInsecureDownloadsLineNumber(t *testing.T) { { name: "shell downloads", filename: "./testdata/shell-download-lines.sh", - //nolint expected: []struct { snippet string startLine uint @@ -837,6 +1166,24 @@ func TestShellscriptInsecureDownloadsLineNumber(t *testing.T) { endLine: 64, t: checker.DependencyUseTypeNugetCommand, }, + { + snippet: `bash <(curl --silent --show-error "https://raw.githubusercontent.com/rhysd/actionlint/main/scripts/download-actionlint.bash")`, + startLine: 69, + endLine: 69, + t: checker.DependencyUseTypeDownloadThenRun, + }, + { + snippet: "curl -sSL https://dot.net/v1/dotnet-install.sh | bash /dev/stdin", + startLine: 70, + endLine: 70, + t: checker.DependencyUseTypeDownloadThenRun, + }, + { + snippet: "curl -sSL https://raw.githubusercontent.com/dotnet/install-scripts/main/src/dotnet-install.sh | bash /dev/stdin", + startLine: 71, + endLine: 71, + t: checker.DependencyUseTypeDownloadThenRun, + }, }, }, } @@ -873,9 +1220,9 @@ func TestShellscriptInsecureDownloadsLineNumber(t *testing.T) { } } -func TestDockerfilePinningWihoutHash(t *testing.T) { +func TestDockerfilePinningWithoutHash(t *testing.T) { t.Parallel() - //nolint + //nolint:govet tests := []struct { warns int err error @@ -919,8 +1266,10 @@ func TestDockerfilePinningWihoutHash(t *testing.T) { return } - if tt.warns != len(r.Dependencies) { - t.Errorf("expected %v. Got %v", tt.warns, len(r.Dependencies)) + unpinned := countUnpinned(r.Dependencies) + + if tt.warns != unpinned { + t.Errorf("expected %v. Got %v", tt.warns, unpinned) } }) } @@ -928,17 +1277,18 @@ func TestDockerfilePinningWihoutHash(t *testing.T) { func TestDockerfileScriptDownload(t *testing.T) { t.Parallel() - //nolint + //nolint:govet tests := []struct { - warns int - err error - name string - filename string + unpinned int + processingErrors int + err error + name string + filename string }{ { name: "curl | sh", filename: "./testdata/Dockerfile-curl-sh", - warns: 4, + unpinned: 5, }, { name: "empty file", @@ -955,7 +1305,7 @@ func TestDockerfileScriptDownload(t *testing.T) { { name: "wget | /bin/sh", filename: "./testdata/Dockerfile-wget-bin-sh", - warns: 3, + unpinned: 4, }, { name: "wget no exec", @@ -964,37 +1314,43 @@ func TestDockerfileScriptDownload(t *testing.T) { { name: "curl file sh", filename: "./testdata/Dockerfile-curl-file-sh", - warns: 12, + unpinned: 12, }, { name: "proc substitution", filename: "./testdata/Dockerfile-proc-subs", - warns: 6, + unpinned: 6, }, { name: "wget file", filename: "./testdata/Dockerfile-wget-file", - warns: 10, + unpinned: 10, }, { name: "gsutil file", filename: "./testdata/Dockerfile-gsutil-file", - warns: 17, + unpinned: 17, }, { name: "aws file", filename: "./testdata/Dockerfile-aws-file", - warns: 15, + unpinned: 15, }, { name: "pkg managers", filename: "./testdata/Dockerfile-pkg-managers", - warns: 60, + unpinned: 60, }, { name: "download with some python", filename: "./testdata/Dockerfile-some-python", - warns: 1, + unpinned: 1, + }, + { + name: "Parser error doesn't affect docker image pinning", + filename: "./testdata/Dockerfile-not-pinned-with-parser-error", + processingErrors: 1, + unpinned: 2, // `curl bla | bash` missed due to parser error }, } for _, tt := range tests { @@ -1022,8 +1378,14 @@ func TestDockerfileScriptDownload(t *testing.T) { return } - if tt.warns != len(r.Dependencies) { - t.Errorf("expected %v. Got %v", tt.warns, len(r.Dependencies)) + unpinned := countUnpinned(r.Dependencies) + + if tt.unpinned != unpinned { + t.Errorf("expected %v unpinned. Got %v", tt.unpinned, unpinned) + } + + if tt.processingErrors != len(r.ProcessingErrors) { + t.Errorf("expected %v processing errors. Got %v", tt.processingErrors, len(r.ProcessingErrors)) } }) } @@ -1031,17 +1393,23 @@ func TestDockerfileScriptDownload(t *testing.T) { func TestDockerfileScriptDownloadInfo(t *testing.T) { t.Parallel() - //nolint + //nolint:govet tests := []struct { - name string - filename string - warns int - err error + name string + filename string + unpinned int + processingErrors int + err error }{ { name: "curl | sh", filename: "./testdata/Dockerfile-no-curl-sh", }, + { + name: "Parser error doesn't affect docker image pinning", + filename: "./testdata/Dockerfile-no-curl-sh-with-parser-error", + processingErrors: 1, // everything is pinned, but parser error still throws warning + }, } for _, tt := range tests { tt := tt // Re-initializing variable so it is not changed while executing the closure below @@ -1064,8 +1432,14 @@ func TestDockerfileScriptDownloadInfo(t *testing.T) { return } - if tt.warns != len(r.Dependencies) { - t.Errorf("expected %v. Got %v", tt.warns, len(r.Dependencies)) + unpinned := countUnpinned(r.Dependencies) + + if tt.unpinned != unpinned { + t.Errorf("expected %v unpinned. Got %v", tt.unpinned, unpinned) + } + + if tt.processingErrors != len(r.ProcessingErrors) { + t.Errorf("expected %v processing errors. Got %v", tt.processingErrors, len(r.ProcessingErrors)) } }) } @@ -1073,18 +1447,18 @@ func TestDockerfileScriptDownloadInfo(t *testing.T) { func TestShellScriptDownload(t *testing.T) { t.Parallel() - //nolint + //nolint:govet tests := []struct { - name string - filename string - warns int - debugs int - err error + name string + filename string + unpinned int + processingErrors int + err error }{ { name: "sh script", filename: "../testdata/script-sh", - warns: 7, + unpinned: 7, }, { name: "empty file", @@ -1097,21 +1471,22 @@ func TestShellScriptDownload(t *testing.T) { { name: "bash script", filename: "./testdata/script-bash", - warns: 7, + unpinned: 11, }, { name: "sh script 2", filename: "../testdata/script.sh", - warns: 7, + unpinned: 7, }, { name: "pkg managers", filename: "./testdata/script-pkg-managers", - warns: 56, + unpinned: 56, }, { - name: "invalid shell script", - filename: "./testdata/script-invalid.sh", + name: "invalid shell script", + filename: "./testdata/script-invalid.sh", + processingErrors: 1, // `curl bla | bash` not detected due to invalid script }, } for _, tt := range tests { @@ -1140,12 +1515,14 @@ func TestShellScriptDownload(t *testing.T) { return } - // Note: this works because all our examples - // either have warns or debugs. - ws := (tt.warns == len(r.Dependencies)) && (tt.debugs == 0) - ds := (tt.debugs == len(r.Dependencies)) && (tt.warns == 0) - if !ws && !ds { - t.Errorf("expected %v or %v. Got %v", tt.warns, tt.debugs, len(r.Dependencies)) + unpinned := countUnpinned(r.Dependencies) + + if tt.unpinned != unpinned { + t.Errorf("expected %v unpinned. Got %v", tt.unpinned, len(r.Dependencies)) + } + + if tt.processingErrors != len(r.ProcessingErrors) { + t.Errorf("expected %v processing errors. Got %v", tt.processingErrors, len(r.ProcessingErrors)) } }) } @@ -1153,7 +1530,7 @@ func TestShellScriptDownload(t *testing.T) { func TestShellScriptDownloadPinned(t *testing.T) { t.Parallel() - //nolint + //nolint:govet tests := []struct { name string filename string @@ -1192,36 +1569,45 @@ func TestShellScriptDownloadPinned(t *testing.T) { return } - if tt.warns != len(r.Dependencies) { - t.Errorf("expected %v. Got %v", tt.warns, len(r.Dependencies)) + unpinned := countUnpinned(r.Dependencies) + + if tt.warns != unpinned { + t.Errorf("expected %v. Got %v", tt.warns, unpinned) } }) } } -func TestGitHubWorflowRunDownload(t *testing.T) { +func TestGitHubWorkflowRunDownload(t *testing.T) { t.Parallel() - //nolint + //nolint:govet tests := []struct { - name string - filename string - warns int - err error + name string + filename string + unpinned int + processingErrors int + err error }{ { name: "workflow curl default", filename: "./testdata/.github/workflows/github-workflow-curl-default.yaml", - warns: 1, + unpinned: 1, }, { name: "workflow curl no default", filename: "./testdata/.github/workflows/github-workflow-curl-no-default.yaml", - warns: 1, + unpinned: 1, }, { name: "wget across steps", filename: "./testdata/.github/workflows/github-workflow-wget-across-steps.yaml", - warns: 2, + unpinned: 2, + }, + { + name: "Can't identify OS but doesn't crash", + filename: "./testdata/.github/workflows/github-workflow-unknown-os.yaml", + processingErrors: 1, // job with unknown OS has a skipped step + unpinned: 1, // only found in 1 in job with known OS }, } for _, tt := range tests { @@ -1251,8 +1637,14 @@ func TestGitHubWorflowRunDownload(t *testing.T) { return } - if tt.warns != len(r.Dependencies) { - t.Errorf("expected %v. Got %v", tt.warns, len(r.Dependencies)) + unpinned := countUnpinned(r.Dependencies) + + if tt.unpinned != unpinned { + t.Errorf("expected %v unpinned. Got %v", tt.unpinned, unpinned) + } + + if tt.processingErrors != len(r.ProcessingErrors) { + t.Errorf("expected %v processing errors. Got %v", tt.processingErrors, len(r.ProcessingErrors)) } }) } @@ -1409,3 +1801,226 @@ func TestGitHubWorkInsecureDownloadsLineNumber(t *testing.T) { }) } } + +func countUnpinned(r []checker.Dependency) int { + var unpinned int + + for _, dependency := range r { + if *dependency.Pinned == false { + unpinned += 1 + } + } + + return unpinned +} + +func stringAsPointer(s string) *string { + return &s +} + +func boolAsPointer(b bool) *bool { + return &b +} + +// TestCollectDockerfilePinning tests the collectDockerfilePinning function. +func TestCollectDockerfilePinning(t *testing.T) { + t.Parallel() + tests := []struct { + name string + filename string + outcomeDependencies []checker.Dependency + expectError bool + }{ + { + name: "Workflow with error", + filename: "./testdata/.github/workflows/github-workflow-download-lines.yaml", + expectError: true, + }, + { + name: "Pinned dockerfile", + filename: "./testdata/Dockerfile-pinned", + expectError: false, + outcomeDependencies: []checker.Dependency{ + { + Name: stringAsPointer("python"), + PinnedAt: stringAsPointer("3.7@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2"), + Location: &checker.File{ + Path: "./testdata/Dockerfile-pinned", + Snippet: "FROM python:3.7@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2", + Offset: 16, + EndOffset: 16, + Type: 1, + }, + Pinned: boolAsPointer(true), + Type: "containerImage", + }, + }, + }, + { + name: "Non-pinned dockerfile", + filename: "./testdata/Dockerfile-not-pinned", + expectError: false, + outcomeDependencies: []checker.Dependency{ + { + Name: stringAsPointer("python"), + PinnedAt: stringAsPointer("3.7"), + Location: &checker.File{ + Path: "./testdata/Dockerfile-not-pinned", + Snippet: "FROM python:3.7", + Offset: 17, + EndOffset: 17, + FileSize: 0, + Type: 1, + }, + Pinned: boolAsPointer(false), + Type: "containerImage", + Remediation: &rule.Remediation{ + Text: "pin your Docker image by updating python:3.7 to python:3.7" + + "@sha256:eedf63967cdb57d8214db38ce21f105003ed4e4d0358f02bedc057341bcf92a0", + Markdown: "pin your Docker image by updating python:3.7 to python:3.7" + + "@sha256:eedf63967cdb57d8214db38ce21f105003ed4e4d0358f02bedc057341bcf92a0", + }, + }, + }, + }, + } + + for _, tt := range tests { + tt := tt // Re-initializing variable so it is not changed while executing the closure below + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ctrl := gomock.NewController(t) + mockRepoClient := mockrepo.NewMockRepoClient(ctrl) + mockRepoClient.EXPECT().ListFiles(gomock.Any()).Return([]string{tt.filename}, nil).AnyTimes() + mockRepoClient.EXPECT().GetDefaultBranchName().Return("main", nil).AnyTimes() + mockRepoClient.EXPECT().URI().Return("github.com/ossf/scorecard").AnyTimes() + mockRepoClient.EXPECT().GetFileContent(gomock.Any()).DoAndReturn(func(file string) ([]byte, error) { + // This will read the file and return the content + content, err := os.ReadFile(file) + if err != nil { + return content, fmt.Errorf("%w", err) + } + return content, nil + }) + + req := checker.CheckRequest{ + RepoClient: mockRepoClient, + } + var r checker.PinningDependenciesData + err := collectDockerfilePinning(&req, &r) + if err != nil { + if !tt.expectError { + t.Error(err.Error()) + } + } + for i := range tt.outcomeDependencies { + outcomeDependency := &tt.outcomeDependencies[i] + depend := &r.Dependencies[i] + if diff := cmp.Diff(outcomeDependency, depend); diff != "" { + t.Errorf("mismatch (-want +got):\n%s", diff) + } + } + }) + } +} + +// TestCollectGitHubActionsWorkflowPinning tests the collectGitHubActionsWorkflowPinning function. +func TestCollectGitHubActionsWorkflowPinning(t *testing.T) { + t.Parallel() + tests := []struct { + name string + filename string + outcomeDependencies []checker.Dependency + expectError bool + }{ + { + name: "Pinned dockerfile", + filename: "Dockerfile-empty", + expectError: true, + }, + { + name: "Pinned workflow", + filename: ".github/workflows/workflow-pinned.yaml", + expectError: false, + outcomeDependencies: []checker.Dependency{ + { + Name: stringAsPointer("actions/checkout"), + PinnedAt: stringAsPointer("daadedc81d5f9d3c06d2c92f49202a3cc2b919ba"), + Location: &checker.File{ + Path: ".github/workflows/workflow-pinned.yaml", + Snippet: "actions/checkout@daadedc81d5f9d3c06d2c92f49202a3cc2b919ba", + Offset: 31, + EndOffset: 31, + Type: 1, + }, + Pinned: boolAsPointer(true), + Type: "GitHubAction", + Remediation: nil, + }, + }, + }, + { + name: "Non-pinned workflow", + filename: ".github/workflows/workflow-not-pinned.yaml", + expectError: false, + outcomeDependencies: []checker.Dependency{ + { + Name: stringAsPointer("actions/checkout"), + PinnedAt: stringAsPointer("daadedc81d5f9d3c06d2c92f49202a3cc2b919ba"), + Location: &checker.File{ + Path: ".github/workflows/workflow-not-pinned.yaml", + Snippet: "actions/checkout@daadedc81d5f9d3c06d2c92f49202a3cc2b919ba", + Offset: 31, + EndOffset: 31, + FileSize: 0, + Type: 1, + }, + Pinned: boolAsPointer(true), + Type: "GitHubAction", + Remediation: nil, + }, + }, + }, + } + + for _, tt := range tests { + tt := tt // Re-initializing variable so it is not changed while executing the closure below + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ctrl := gomock.NewController(t) + mockRepoClient := mockrepo.NewMockRepoClient(ctrl) + mockRepoClient.EXPECT().ListFiles(gomock.Any()).Return([]string{tt.filename}, nil).AnyTimes() + mockRepoClient.EXPECT().GetDefaultBranchName().Return("main", nil).AnyTimes() + mockRepoClient.EXPECT().URI().Return("github.com/ossf/scorecard").AnyTimes() + mockRepoClient.EXPECT().GetFileContent(gomock.Any()).DoAndReturn(func(file string) ([]byte, error) { + // This will read the file and return the content + content, err := os.ReadFile(filepath.Join("testdata", file)) + if err != nil { + return content, fmt.Errorf("%w", err) + } + return content, nil + }) + + req := checker.CheckRequest{ + RepoClient: mockRepoClient, + } + var r checker.PinningDependenciesData + err := collectGitHubActionsWorkflowPinning(&req, &r) + if err != nil { + if !tt.expectError { + t.Error(err.Error()) + } + } + t.Log(r.Dependencies) + for i := range tt.outcomeDependencies { + outcomeDependency := &tt.outcomeDependencies[i] + depend := &r.Dependencies[i] + if diff := cmp.Diff(outcomeDependency, depend); diff != "" { + t.Errorf("mismatch (-want +got):\n%s", diff) + } + } + }) + } +} diff --git a/checks/raw/sast.go b/checks/raw/sast.go new file mode 100644 index 00000000000..0dc9ef52ff7 --- /dev/null +++ b/checks/raw/sast.go @@ -0,0 +1,325 @@ +// Copyright 2023 OpenSSF Scorecard Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raw + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "path" + "regexp" + "strings" + + "github.com/rhysd/actionlint" + + "github.com/ossf/scorecard/v4/checker" + "github.com/ossf/scorecard/v4/checks/fileparser" + sce "github.com/ossf/scorecard/v4/errors" + "github.com/ossf/scorecard/v4/finding" +) + +const CheckSAST = "SAST" + +var errInvalid = errors.New("invalid") + +var sastTools = map[string]bool{ + "github-advanced-security": true, + "github-code-scanning": true, + "lgtm-com": true, + "sonarcloud": true, +} + +var allowedConclusions = map[string]bool{"success": true, "neutral": true} + +// SAST checks for presence of static analysis tools. +func SAST(c *checker.CheckRequest) (checker.SASTData, error) { + var data checker.SASTData + + commits, err := sastToolInCheckRuns(c) + if err != nil { + return data, err + } + data.Commits = commits + + codeQLWorkflows, err := getSastUsesWorkflows(c, "^github/codeql-action/analyze$", checker.CodeQLWorkflow) + if err != nil { + return data, err + } + data.Workflows = append(data.Workflows, codeQLWorkflows...) + + sonarWorkflows, err := getSonarWorkflows(c) + if err != nil { + return data, err + } + data.Workflows = append(data.Workflows, sonarWorkflows...) + + snykWorkflows, err := getSastUsesWorkflows(c, "^snyk/actions/.*", checker.SnykWorkflow) + if err != nil { + return data, err + } + data.Workflows = append(data.Workflows, snykWorkflows...) + + pysaWorkflows, err := getSastUsesWorkflows(c, "^facebook/pysa-action$", checker.PysaWorkflow) + if err != nil { + return data, err + } + data.Workflows = append(data.Workflows, pysaWorkflows...) + + qodanaWorkflows, err := getSastUsesWorkflows(c, "^JetBrains/qodana-action$", checker.QodanaWorkflow) + if err != nil { + return data, err + } + data.Workflows = append(data.Workflows, qodanaWorkflows...) + + return data, nil +} + +func sastToolInCheckRuns(c *checker.CheckRequest) ([]checker.SASTCommit, error) { + var sastCommits []checker.SASTCommit + commits, err := c.RepoClient.ListCommits() + if err != nil { + return sastCommits, + sce.WithMessage(sce.ErrScorecardInternal, fmt.Sprintf("RepoClient.ListCommits: %v", err)) + } + + for i := range commits { + pr := commits[i].AssociatedMergeRequest + // TODO(#575): We ignore associated PRs if Scorecard is being run on a fork + // but the PR was created in the original repo. + if pr.MergedAt.IsZero() { + continue + } + + checked := false + crs, err := c.RepoClient.ListCheckRunsForRef(pr.HeadSHA) + if err != nil { + return sastCommits, + sce.WithMessage(sce.ErrScorecardInternal, fmt.Sprintf("Client.Checks.ListCheckRunsForRef: %v", err)) + } + // Note: crs may be `nil`: in this case + // the loop below will be skipped. + for _, cr := range crs { + if cr.Status != "completed" { + continue + } + if !allowedConclusions[cr.Conclusion] { + continue + } + if sastTools[cr.App.Slug] { + c.Dlogger.Debug(&checker.LogMessage{ + Path: cr.URL, + Type: finding.FileTypeURL, + Text: fmt.Sprintf("tool detected: %v", cr.App.Slug), + }) + checked = true + break + } + } + sastCommit := checker.SASTCommit{ + CommittedDate: commits[i].CommittedDate, + Message: commits[i].Message, + SHA: commits[i].SHA, + AssociatedMergeRequest: commits[i].AssociatedMergeRequest, + Committer: commits[i].Committer, + Compliant: checked, + } + sastCommits = append(sastCommits, sastCommit) + } + return sastCommits, nil +} + +// getSastUsesWorkflows matches if the "uses" field of a GitHub action matches +// a given regex by way of usesRegex. Each workflow that matches the usesRegex +// is appended to the slice that is returned. +func getSastUsesWorkflows( + c *checker.CheckRequest, + usesRegex string, + checkerType checker.SASTWorkflowType, +) ([]checker.SASTWorkflow, error) { + var workflowPaths []string + var sastWorkflows []checker.SASTWorkflow + err := fileparser.OnMatchingFileContentDo(c.RepoClient, fileparser.PathMatcher{ + Pattern: ".github/workflows/*", + CaseSensitive: false, + }, searchGitHubActionWorkflowUseRegex, &workflowPaths, usesRegex) + if err != nil { + return sastWorkflows, err + } + for _, path := range workflowPaths { + sastWorkflow := checker.SASTWorkflow{ + File: checker.File{ + Path: path, + Offset: checker.OffsetDefault, + Type: finding.FileTypeSource, + }, + Type: checkerType, + } + + sastWorkflows = append(sastWorkflows, sastWorkflow) + } + return sastWorkflows, nil +} + +var searchGitHubActionWorkflowUseRegex fileparser.DoWhileTrueOnFileContent = func(path string, + content []byte, + args ...interface{}, +) (bool, error) { + if !fileparser.IsWorkflowFile(path) { + return true, nil + } + + if len(args) != 2 { + return false, fmt.Errorf( + "searchGitHubActionWorkflowUseRegex requires exactly 2 arguments: %w", errInvalid) + } + + // Verify the type of the data. + paths, ok := args[0].(*[]string) + if !ok { + return false, fmt.Errorf( + "searchGitHubActionWorkflowUseRegex expects arg[0] of type *[]string: %w", errInvalid) + } + + usesRegex, ok := args[1].(string) + if !ok { + return false, fmt.Errorf( + "searchGitHubActionWorkflowUseRegex expects arg[1] of type string: %w", errInvalid) + } + + workflow, errs := actionlint.Parse(content) + if len(errs) > 0 && workflow == nil { + return false, fileparser.FormatActionlintError(errs) + } + + for _, job := range workflow.Jobs { + for _, step := range job.Steps { + e, ok := step.Exec.(*actionlint.ExecAction) + if !ok || e == nil || e.Uses == nil { + continue + } + // Parse out repo / SHA. + uses := strings.TrimPrefix(e.Uses.Value, "actions://") + action, _, _ := strings.Cut(uses, "@") + re := regexp.MustCompile(usesRegex) + if re.MatchString(action) { + *paths = append(*paths, path) + } + } + } + return true, nil +} + +type sonarConfig struct { + url string + file checker.File +} + +func getSonarWorkflows(c *checker.CheckRequest) ([]checker.SASTWorkflow, error) { + var config []sonarConfig + var sastWorkflows []checker.SASTWorkflow + err := fileparser.OnMatchingFileContentDo(c.RepoClient, fileparser.PathMatcher{ + Pattern: "*", + CaseSensitive: false, + }, validateSonarConfig, &config) + if err != nil { + return sastWorkflows, err + } + for _, result := range config { + sastWorkflow := checker.SASTWorkflow{ + File: checker.File{ + Path: result.file.Path, + Offset: result.file.Offset, + EndOffset: result.file.EndOffset, + Type: result.file.Type, + Snippet: result.url, + }, + Type: checker.SonarWorkflow, + } + + sastWorkflows = append(sastWorkflows, sastWorkflow) + } + return sastWorkflows, nil +} + +// Check file content. +var validateSonarConfig fileparser.DoWhileTrueOnFileContent = func(pathfn string, + content []byte, + args ...interface{}, +) (bool, error) { + if !strings.EqualFold(path.Base(pathfn), "pom.xml") { + return true, nil + } + + if len(args) != 1 { + return false, fmt.Errorf( + "validateSonarConfig requires exactly 1 argument: %w", errInvalid) + } + + // Verify the type of the data. + pdata, ok := args[0].(*[]sonarConfig) + if !ok { + return false, fmt.Errorf( + "validateSonarConfig expects arg[0] of type *[]sonarConfig]: %w", errInvalid) + } + + regex := regexp.MustCompile(`\s*(\S+)\s*<\/sonar\.host\.url>`) + match := regex.FindSubmatch(content) + + if len(match) < 2 { + return true, nil + } + offset, err := findLine(content, []byte("")) + if err != nil { + return false, err + } + + endOffset, err := findLine(content, []byte("")) + if err != nil { + return false, err + } + + *pdata = append(*pdata, sonarConfig{ + url: string(match[1]), + file: checker.File{ + Path: pathfn, + Type: finding.FileTypeSource, + Offset: offset, + EndOffset: endOffset, + }, + }) + + return true, nil +} + +func findLine(content, data []byte) (uint, error) { + r := bytes.NewReader(content) + scanner := bufio.NewScanner(r) + + line := 0 + // https://golang.org/pkg/bufio/#Scanner.Scan + for scanner.Scan() { + line++ + if strings.Contains(scanner.Text(), string(data)) { + return uint(line), nil + } + } + + if err := scanner.Err(); err != nil { + return 0, fmt.Errorf("scanner.Err(): %w", err) + } + + return 0, nil +} diff --git a/checks/raw/sast_test.go b/checks/raw/sast_test.go new file mode 100644 index 00000000000..8bdf4020463 --- /dev/null +++ b/checks/raw/sast_test.go @@ -0,0 +1,230 @@ +// Copyright 2021 OpenSSF Scorecard Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raw + +import ( + "fmt" + "os" + "testing" + + "github.com/golang/mock/gomock" + "github.com/google/go-cmp/cmp" + + "github.com/ossf/scorecard/v4/checker" + "github.com/ossf/scorecard/v4/clients" + mockrepo "github.com/ossf/scorecard/v4/clients/mockclients" + "github.com/ossf/scorecard/v4/finding" +) + +func TestSAST(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + files []string + commits []clients.Commit + expected checker.SASTData + }{ + { + name: "has codeql 1", + files: []string{ + ".github/workflows/workflow-not-pinned.yaml", + ".github/workflows/pom.xml", + }, + commits: []clients.Commit{ + { + AssociatedMergeRequest: clients.PullRequest{ + Number: 1, + }, + }, + }, + expected: checker.SASTData{ + Workflows: []checker.SASTWorkflow{ + { + Type: checker.CodeQLWorkflow, + File: checker.File{ + Path: ".github/workflows/workflow-not-pinned.yaml", + Offset: checker.OffsetDefault, + Type: finding.FileTypeSource, + }, + }, + { + Type: checker.SonarWorkflow, + File: checker.File{ + Path: ".github/workflows/pom.xml", + Type: finding.FileTypeSource, + Snippet: "https://sonarqube.private.domain", + Offset: 2, + EndOffset: 2, + }, + }, + }, + }, + }, + { + name: "has codeql 2", + files: []string{".github/workflows/github-workflow-multiple-unpinned-uses.yaml"}, + commits: []clients.Commit{ + { + AssociatedMergeRequest: clients.PullRequest{ + Number: 1, + }, + }, + }, + expected: checker.SASTData{ + Workflows: []checker.SASTWorkflow{ + { + Type: checker.CodeQLWorkflow, + File: checker.File{ + Path: ".github/workflows/github-workflow-multiple-unpinned-uses.yaml", + Offset: checker.OffsetDefault, + Type: finding.FileTypeSource, + }, + }, + }, + }, + }, + { + name: "Does not use CodeQL", + files: []string{".github/workflows/github-workflow-download-lines.yaml"}, + expected: checker.SASTData{ + Workflows: nil, + }, + }, + { + name: "Airflows CodeQL workflow - Has CodeQL", + files: []string{".github/workflows/airflows-codeql.yaml"}, + commits: []clients.Commit{ + { + AssociatedMergeRequest: clients.PullRequest{ + Number: 1, + }, + }, + }, + expected: checker.SASTData{ + Workflows: []checker.SASTWorkflow{ + { + Type: checker.CodeQLWorkflow, + File: checker.File{ + Path: ".github/workflows/airflows-codeql.yaml", + Offset: checker.OffsetDefault, + Type: finding.FileTypeSource, + }, + }, + }, + }, + }, + { + name: "Has Snyk", + files: []string{".github/workflows/github-workflow-snyk.yaml"}, + commits: []clients.Commit{ + { + AssociatedMergeRequest: clients.PullRequest{ + Number: 1, + }, + }, + }, + expected: checker.SASTData{ + Workflows: []checker.SASTWorkflow{ + { + Type: checker.SnykWorkflow, + File: checker.File{ + Path: ".github/workflows/github-workflow-snyk.yaml", + Offset: checker.OffsetDefault, + Type: finding.FileTypeSource, + }, + }, + }, + }, + }, + { + name: "Has Pysa", + files: []string{".github/workflows/github-pysa-workflow.yaml"}, + commits: []clients.Commit{ + { + AssociatedMergeRequest: clients.PullRequest{ + Number: 1, + }, + }, + }, + expected: checker.SASTData{ + Workflows: []checker.SASTWorkflow{ + { + Type: checker.PysaWorkflow, + File: checker.File{ + Path: ".github/workflows/github-pysa-workflow.yaml", + Offset: checker.OffsetDefault, + Type: finding.FileTypeSource, + }, + }, + }, + }, + }, + { + name: "Has Qodana", + files: []string{".github/workflows/github-qodana-workflow.yaml"}, + commits: []clients.Commit{ + { + AssociatedMergeRequest: clients.PullRequest{ + Number: 1, + }, + }, + }, + expected: checker.SASTData{ + Workflows: []checker.SASTWorkflow{ + { + Type: checker.QodanaWorkflow, + File: checker.File{ + Path: ".github/workflows/github-qodana-workflow.yaml", + Offset: checker.OffsetDefault, + Type: finding.FileTypeSource, + }, + }, + }, + }, + }, + } + for _, tt := range tests { + tt := tt // Re-initializing variable so it is not changed while executing the closure below + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ctrl := gomock.NewController(t) + mockRepoClient := mockrepo.NewMockRepoClient(ctrl) + mockRepoClient.EXPECT().ListFiles(gomock.Any()).Return(tt.files, nil).AnyTimes() + mockRepoClient.EXPECT().ListCommits().DoAndReturn(func() ([]clients.Commit, error) { + return tt.commits, nil + }) + mockRepoClient.EXPECT().GetFileContent(gomock.Any()).DoAndReturn(func(file string) ([]byte, error) { + // This will read the file and return the content + content, err := os.ReadFile("./testdata/" + file) + if err != nil { + return content, fmt.Errorf("%w", err) + } + return content, nil + }).AnyTimes() + req := checker.CheckRequest{ + RepoClient: mockRepoClient, + } + sastWorkflowsGot, err := SAST(&req) + if err != nil { + t.Error(err) + } + if diff := cmp.Diff(tt.expected, sastWorkflowsGot); diff != "" { + t.Errorf("mismatch (-want +got):\n%s", diff) + } + }) + } +} diff --git a/checks/raw/security_policy_test.go b/checks/raw/security_policy_test.go index 51fefd346bb..d311d85e283 100644 --- a/checks/raw/security_policy_test.go +++ b/checks/raw/security_policy_test.go @@ -63,7 +63,7 @@ func Test_isSecurityPolicyFilename(t *testing.T) { // TestSecurityPolicy tests the security policy. func TestSecurityPolicy(t *testing.T) { t.Parallel() - //nolint + //nolint:govet tests := []struct { name string files []string @@ -165,9 +165,7 @@ func TestSecurityPolicy(t *testing.T) { res, err := SecurityPolicy(&c) - if !scut.ValidateTestReturn(t, tt.name, &tt.want, &checker.CheckResult{}, &dl) { - t.Errorf("test failed: log message not present: %+v , for test %v", tt.want, tt.name) - } + scut.ValidateTestReturn(t, tt.name, &tt.want, &checker.CheckResult{}, &dl) if (err != nil) != tt.wantErr { t.Errorf("SecurityPolicy() error = %v, wantErr %v", err, tt.wantErr) diff --git a/checks/raw/shell_download_validate.go b/checks/raw/shell_download_validate.go index 524e6d6e57e..3688bd02bac 100644 --- a/checks/raw/shell_download_validate.go +++ b/checks/raw/shell_download_validate.go @@ -17,14 +17,15 @@ package raw import ( "bufio" "bytes" + "errors" "fmt" "net/url" "path" "path/filepath" "regexp" + "slices" "strings" - "golang.org/x/exp/slices" "mvdan.cc/sh/v3/syntax" "github.com/ossf/scorecard/v4/checker" @@ -56,6 +57,8 @@ var downloadUtils = []string{ "curl", "wget", "gsutil", } +var gitCommitHashRegex = regexp.MustCompile(`^[a-fA-F0-9]{40}$`) + func isBinaryName(expected, name string) bool { return strings.EqualFold(path.Base(name), expected) } @@ -295,6 +298,36 @@ func getLine(startLine, endLine uint, node syntax.Node) (uint, uint) { startLine + node.Pos().Line() } +func hasUnpinnedURLs(cmd []string) bool { + var urls []*url.URL + + // Extract any URLs passed to the download utility + for _, s := range cmd { + u, err := url.ParseRequestURI(s) + if err == nil { + urls = append(urls, u) + } + } + + // Look for any URLs which are pinned to a GitHub SHA + var pinned []*url.URL + for _, u := range urls { + // Look for a URL of the form: https://raw.githubusercontent.com/{owner}/{repo}/{ref}/{path} + if u.Scheme == "https" && u.Host == "raw.githubusercontent.com" { + segments := strings.Split(u.Path, "/") + if len(segments) > 4 && gitCommitHashRegex.MatchString(segments[3]) { + pinned = append(pinned, u) + } + } + } + + if len(pinned) > 0 && len(urls) == len(pinned) { + return false + } + + return true +} + func collectFetchPipeExecute(startLine, endLine uint, node syntax.Node, cmd, pathfn string, r *checker.PinningDependenciesData, ) { @@ -326,6 +359,10 @@ func collectFetchPipeExecute(startLine, endLine uint, node syntax.Node, cmd, pat return } + if !hasUnpinnedURLs(leftStmt) { + return + } + startLine, endLine = getLine(startLine, endLine, node) r.Dependencies = append(r.Dependencies, @@ -337,7 +374,8 @@ func collectFetchPipeExecute(startLine, endLine uint, node syntax.Node, cmd, pat EndOffset: endLine, Snippet: cmd, }, - Type: checker.DependencyUseTypeDownloadThenRun, + Pinned: asBoolPointer(false), + Type: checker.DependencyUseTypeDownloadThenRun, }, ) } @@ -388,7 +426,8 @@ func collectExecuteFiles(startLine, endLine uint, node syntax.Node, cmd, pathfn EndOffset: endLine, Snippet: cmd, }, - Type: checker.DependencyUseTypeDownloadThenRun, + Pinned: asBoolPointer(false), + Type: checker.DependencyUseTypeDownloadThenRun, }, ) } @@ -397,56 +436,50 @@ func collectExecuteFiles(startLine, endLine uint, node syntax.Node, cmd, pathfn // Npm install docs are here. // https://docs.npmjs.com/cli/v7/commands/npm-install -func isNpmUnpinnedDownload(cmd []string) bool { - if len(cmd) == 0 { - return false - } - +func isNpmDownload(cmd []string) bool { if !isBinaryName("npm", cmd[0]) { return false } for i := 1; i < len(cmd); i++ { // Search for get/install/update commands. - // `npm ci` wil verify all hashes are present. if strings.EqualFold(cmd[i], "install") || strings.EqualFold(cmd[i], "i") || strings.EqualFold(cmd[i], "install-test") || - strings.EqualFold(cmd[i], "update") { + strings.EqualFold(cmd[i], "update") || + strings.EqualFold(cmd[i], "ci") { return true } } return false } -func isGoUnpinnedDownload(cmd []string) bool { - if len(cmd) == 0 { - return false +func isNpmUnpinnedDownload(cmd []string) bool { + for i := 1; i < len(cmd); i++ { + // `npm ci` will verify all hashes are present. + if strings.EqualFold(cmd[i], "ci") { + return false + } } + return true +} - if !isBinaryName("go", cmd[0]) { - return false - } +func isGoDownload(cmd []string) bool { // `Go install` will automatically look up the // go.mod and go.sum, so we don't flag it. if len(cmd) <= 2 { return false } - found := false + return isBinaryName("go", cmd[0]) && slices.Contains([]string{"get", "install"}, cmd[1]) +} + +func isGoUnpinnedDownload(cmd []string) bool { insecure := false hashRegex := regexp.MustCompile("^[A-Fa-f0-9]{40,}$") semverRegex := regexp.MustCompile(`^v\d+\.\d+\.\d+(-[0-9A-Za-z-.]+)?(\+[0-9A-Za-z-.]+)?$`) - for i := 1; i < len(cmd)-1; i++ { - // Search for get and install commands. - if slices.Contains([]string{"get", "install"}, cmd[i]) { - found = true - } - - if !found { - continue - } + for i := 1; i < len(cmd)-1; i++ { // Skip all flags // TODO skip other build flags which might take arguments for i < len(cmd)-1 && slices.Contains([]string{"-d", "-f", "-t", "-u", "-v", "-fix", "-insecure"}, cmd[i+1]) { @@ -485,7 +518,15 @@ func isGoUnpinnedDownload(cmd []string) bool { } } - return found + return true +} + +func isPipInstall(cmd []string) bool { + if len(cmd) < 2 { + return false + } + + return (isBinaryName("pip", cmd[0]) || isBinaryName("pip3", cmd[0])) && strings.EqualFold(cmd[1], "install") } func isPinnedEditableSource(pkgSource string) bool { @@ -509,28 +550,13 @@ func isFlag(cmd string) bool { } func isUnpinnedPipInstall(cmd []string) bool { - if !isBinaryName("pip", cmd[0]) && !isBinaryName("pip3", cmd[0]) { - return false - } - - isInstall := false hasNoDeps := false isEditableInstall := false isPinnedEditableInstall := true hasRequireHashes := false hasAdditionalArgs := false hasWheel := false - for i := 1; i < len(cmd); i++ { - // Search for install commands. - if strings.EqualFold(cmd[i], "install") { - isInstall = true - continue - } - - if !isInstall { - break - } - + for i := 2; i < len(cmd); i++ { // Require --no-deps to not install the dependencies when doing editable install // because we can't verify if dependencies are pinned // https://pip.pypa.io/en/stable/topics/secure-installs/#do-not-use-setuptools-directly @@ -609,7 +635,7 @@ func isUnpinnedPipInstall(cmd []string) bool { // Any other form of install is unpinned, // e.g. `pip install`. - return isInstall + return true } func isPythonCommand(cmd []string) bool { @@ -637,49 +663,52 @@ func extractPipCommand(cmd []string) ([]string, bool) { return nil, false } -func isUnpinnedPythonPipInstall(cmd []string) bool { +func isPythonPipInstall(cmd []string) bool { if !isPythonCommand(cmd) { return false } + pipCommand, ok := extractPipCommand(cmd) if !ok { return false } + + return isPipInstall(pipCommand) +} + +func isUnpinnedPythonPipInstall(cmd []string) bool { + pipCommand, _ := extractPipCommand(cmd) return isUnpinnedPipInstall(pipCommand) } -func isPipUnpinnedDownload(cmd []string) bool { - if len(cmd) == 0 { - return false - } +func isPipDownload(cmd []string) bool { + return isPipInstall(cmd) || isPythonPipInstall(cmd) +} - if isUnpinnedPipInstall(cmd) { +func isPipUnpinnedDownload(cmd []string) bool { + if isPipInstall(cmd) && isUnpinnedPipInstall(cmd) { return true } - if isUnpinnedPythonPipInstall(cmd) { + if isPythonPipInstall(cmd) && isUnpinnedPythonPipInstall(cmd) { return true } return false } -func isChocoUnpinnedDownload(cmd []string) bool { +func isChocoDownload(cmd []string) bool { // Install command is in the form 'choco install ...' if len(cmd) < 2 { return false } - if !isBinaryName("choco", cmd[0]) && !isBinaryName("choco.exe", cmd[0]) { - return false - } - - if !strings.EqualFold(cmd[1], "install") { - return false - } + return (isBinaryName("choco", cmd[0]) || isBinaryName("choco.exe", cmd[0])) && strings.EqualFold(cmd[1], "install") +} +func isChocoUnpinnedDownload(cmd []string) bool { // If this is an install command, then some variant of requirechecksum must be present. - for i := 1; i < len(cmd); i++ { + for i := 2; i < len(cmd); i++ { parts := strings.Split(cmd[i], "=") if len(parts) == 0 { continue @@ -697,22 +726,17 @@ func isChocoUnpinnedDownload(cmd []string) bool { return true } -func isUnpinnedNugetCliInstall(cmd []string) bool { +func isNugetCliInstall(cmd []string) bool { // looking for command of type nuget install ... if len(cmd) < 2 { return false } - // Search for nuget commands. - if !isBinaryName("nuget", cmd[0]) && !isBinaryName("nuget.exe", cmd[0]) { - return false - } - - // Search for install commands. - if !strings.EqualFold(cmd[1], "install") { - return false - } + // Search for nuget install commands. + return (isBinaryName("nuget", cmd[0]) || isBinaryName("nuget.exe", cmd[0])) && strings.EqualFold(cmd[1], "install") +} +func isUnpinnedNugetCliInstall(cmd []string) bool { // Assume installing a project with PackageReference (with versions) // or packages.config at the root of command if len(cmd) == 2 { @@ -740,26 +764,19 @@ func isUnpinnedNugetCliInstall(cmd []string) bool { return unpinnedDependency } -func isUnpinnedDotNetCliInstall(cmd []string) bool { +func isDotNetCliInstall(cmd []string) bool { // Search for command of type dotnet add package if len(cmd) < 4 { return false } - // Search for dotnet commands. - if !isBinaryName("dotnet", cmd[0]) && !isBinaryName("dotnet.exe", cmd[0]) { - return false - } - - // Search for add commands. - if !strings.EqualFold(cmd[1], "add") { - return false - } - - // Search for package commands (can be either the second or the third word) - if !(strings.EqualFold(cmd[2], "package") || strings.EqualFold(cmd[3], "package")) { - return false - } + // Search for dotnet add package + // where package command can be either the second or the third word + return (isBinaryName("dotnet", cmd[0]) || isBinaryName("dotnet.exe", cmd[0])) && + strings.EqualFold(cmd[1], "add") && + (strings.EqualFold(cmd[2], "package") || strings.EqualFold(cmd[3], "package")) +} +func isUnpinnedDotNetCliInstall(cmd []string) bool { unpinnedDependency := true for i := 3; i < len(cmd); i++ { // look for version flag @@ -772,19 +789,23 @@ func isUnpinnedDotNetCliInstall(cmd []string) bool { return unpinnedDependency } +func isNugetDownload(cmd []string) bool { + return isDotNetCliInstall(cmd) || isNugetCliInstall(cmd) +} + func isNugetUnpinnedDownload(cmd []string) bool { - if isUnpinnedDotNetCliInstall(cmd) { + if isDotNetCliInstall(cmd) && isUnpinnedDotNetCliInstall(cmd) { return true } - if isUnpinnedNugetCliInstall(cmd) { + if isNugetCliInstall(cmd) && isUnpinnedNugetCliInstall(cmd) { return true } return false } -func collectUnpinnedPakageManagerDownload(startLine, endLine uint, node syntax.Node, +func collectUnpinnedPackageManagerDownload(startLine, endLine uint, node syntax.Node, cmd, pathfn string, r *checker.PinningDependenciesData, ) { ce, ok := node.(*syntax.CallExpr) @@ -799,8 +820,12 @@ func collectUnpinnedPakageManagerDownload(startLine, endLine uint, node syntax.N startLine, endLine = getLine(startLine, endLine, node) + if len(c) == 0 { + return + } + // Go get/install. - if isGoUnpinnedDownload(c) { + if isGoDownload(c) { r.Dependencies = append(r.Dependencies, checker.Dependency{ Location: &checker.File{ @@ -810,7 +835,8 @@ func collectUnpinnedPakageManagerDownload(startLine, endLine uint, node syntax.N EndOffset: endLine, Snippet: cmd, }, - Type: checker.DependencyUseTypeGoCommand, + Pinned: asBoolPointer(!isGoUnpinnedDownload(c)), + Type: checker.DependencyUseTypeGoCommand, }, ) @@ -818,7 +844,7 @@ func collectUnpinnedPakageManagerDownload(startLine, endLine uint, node syntax.N } // Pip install. - if isPipUnpinnedDownload(c) { + if isPipDownload(c) { r.Dependencies = append(r.Dependencies, checker.Dependency{ Location: &checker.File{ @@ -828,7 +854,8 @@ func collectUnpinnedPakageManagerDownload(startLine, endLine uint, node syntax.N EndOffset: endLine, Snippet: cmd, }, - Type: checker.DependencyUseTypePipCommand, + Pinned: asBoolPointer(!isPipUnpinnedDownload(c)), + Type: checker.DependencyUseTypePipCommand, }, ) @@ -836,7 +863,7 @@ func collectUnpinnedPakageManagerDownload(startLine, endLine uint, node syntax.N } // Npm install. - if isNpmUnpinnedDownload(c) { + if isNpmDownload(c) { r.Dependencies = append(r.Dependencies, checker.Dependency{ Location: &checker.File{ @@ -846,7 +873,8 @@ func collectUnpinnedPakageManagerDownload(startLine, endLine uint, node syntax.N EndOffset: endLine, Snippet: cmd, }, - Type: checker.DependencyUseTypeNpmCommand, + Pinned: asBoolPointer(!isNpmUnpinnedDownload(c)), + Type: checker.DependencyUseTypeNpmCommand, }, ) @@ -854,7 +882,7 @@ func collectUnpinnedPakageManagerDownload(startLine, endLine uint, node syntax.N } // Choco install. - if isChocoUnpinnedDownload(c) { + if isChocoDownload(c) { r.Dependencies = append(r.Dependencies, checker.Dependency{ Location: &checker.File{ @@ -864,7 +892,8 @@ func collectUnpinnedPakageManagerDownload(startLine, endLine uint, node syntax.N EndOffset: endLine, Snippet: cmd, }, - Type: checker.DependencyUseTypeChocoCommand, + Pinned: asBoolPointer(!isChocoUnpinnedDownload(c)), + Type: checker.DependencyUseTypeChocoCommand, }, ) @@ -872,7 +901,7 @@ func collectUnpinnedPakageManagerDownload(startLine, endLine uint, node syntax.N } // Nuget install. - if isNugetUnpinnedDownload(c) { + if isNugetDownload(c) { r.Dependencies = append(r.Dependencies, checker.Dependency{ Location: &checker.File{ @@ -882,7 +911,8 @@ func collectUnpinnedPakageManagerDownload(startLine, endLine uint, node syntax.N EndOffset: endLine, Snippet: cmd, }, - Type: checker.DependencyUseTypeNugetCommand, + Pinned: asBoolPointer(!isNugetUnpinnedDownload(c)), + Type: checker.DependencyUseTypeNugetCommand, }, ) @@ -977,7 +1007,8 @@ func collectFetchProcSubsExecute(startLine, endLine uint, node syntax.Node, cmd, EndOffset: endLine, Snippet: cmd, }, - Type: checker.DependencyUseTypeDownloadThenRun, + Pinned: asBoolPointer(false), + Type: checker.DependencyUseTypeDownloadThenRun, }, ) } @@ -1061,9 +1092,22 @@ func validateShellFileAndRecord(pathfn string, startLine, endLine uint, content in := strings.NewReader(string(content)) f, err := syntax.NewParser().Parse(in, pathfn) if err != nil { - // Note: this is caught by internal caller and only printed - // to avoid failing on shell scripts that our parser does not understand. - // Example: https://github.com/openssl/openssl/blob/master/util/shlib_wrap.sh.in + // If we cannot parse the file, register that we are skipping it + var parseError syntax.ParseError + if errors.As(err, &parseError) { + content := string(content) + r.ProcessingErrors = append(r.ProcessingErrors, checker.ElementError{ + Err: sce.WithMessage(sce.ErrorShellParsing, parseError.Text), + Location: finding.Location{ + Path: pathfn, + LineStart: &startLine, + LineEnd: &endLine, + Snippet: &content, + Type: finding.FileTypeSource, + }, + }) + return nil + } return sce.WithMessage(sce.ErrorShellParsing, err.Error()) } @@ -1081,7 +1125,6 @@ func validateShellFileAndRecord(pathfn string, startLine, endLine uint, content // TODO: support other interpreters. // Example: https://github.com/apache/airflow/blob/main/scripts/ci/kubernetes/ci_run_kubernetes_tests.sh#L75 // HOST_PYTHON_VERSION=$(python3 -c 'import sys; print(f"{sys.version_info[0]}.{sys.version_info[1]}")')`` - // nolint if ok && isShellInterpreterOrCommand([]string{i}) { start, end := getLine(startLine, endLine, node) e := validateShellFileAndRecord(pathfn, start, end, @@ -1103,7 +1146,7 @@ func validateShellFileAndRecord(pathfn string, startLine, endLine uint, content collectFetchProcSubsExecute(startLine, endLine, node, cmdStr, pathfn, r) // Package manager's unpinned installs. - collectUnpinnedPakageManagerDownload(startLine, endLine, node, cmdStr, pathfn, r) + collectUnpinnedPackageManagerDownload(startLine, endLine, node, cmdStr, pathfn, r) // TODO(laurent): add check for cat file | bash. // TODO(laurent): detect downloads of zip/tar files containing scripts. diff --git a/checks/raw/shell_download_validate_test.go b/checks/raw/shell_download_validate_test.go index 4624353e29c..05e23e10529 100644 --- a/checks/raw/shell_download_validate_test.go +++ b/checks/raw/shell_download_validate_test.go @@ -102,12 +102,17 @@ func TestValidateShellFile(t *testing.T) { var r checker.PinningDependenciesData err = validateShellFile(filename, 0, 0, content, map[string]bool{}, &r) - if err == nil { - t.Errorf("failed to detect shell parsing error: %v", err) + if err != nil { + t.Errorf("error validating shell file: %v", err) + } + + if r.ProcessingErrors == nil { + t.Errorf("failed to register shell parsing error") } } func Test_isDotNetUnpinnedDownload(t *testing.T) { + t.Parallel() type args struct { cmd []string } @@ -202,7 +207,9 @@ func Test_isDotNetUnpinnedDownload(t *testing.T) { }, } for _, tt := range tests { + tt := tt t.Run(tt.name, func(t *testing.T) { + t.Parallel() if got := isNugetUnpinnedDownload(tt.args.cmd); got != tt.want { t.Errorf("isNugetUnpinnedDownload() = %v, want %v", got, tt.want) } @@ -211,6 +218,7 @@ func Test_isDotNetUnpinnedDownload(t *testing.T) { } func Test_isGoUnpinnedDownload(t *testing.T) { + t.Parallel() type args struct { cmd []string } @@ -235,10 +243,212 @@ func Test_isGoUnpinnedDownload(t *testing.T) { }, } for _, tt := range tests { + tt := tt t.Run(tt.name, func(t *testing.T) { + t.Parallel() if got := isGoUnpinnedDownload(tt.args.cmd); got != tt.want { t.Errorf("isGoUnpinnedDownload() = %v, want %v", got, tt.want) } }) } } + +func Test_isNpmDownload(t *testing.T) { + t.Parallel() + type args struct { + cmd []string + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "npm install", + args: args{ + cmd: []string{"npm", "install"}, + }, + want: true, + }, + { + name: "npm ci", + args: args{ + cmd: []string{"npm", "ci"}, + }, + want: true, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + if got := isNpmDownload(tt.args.cmd); got != tt.want { + t.Errorf("isNpmDownload() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_isNpmUnpinnedDownload(t *testing.T) { + t.Parallel() + type args struct { + cmd []string + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "npm install", + args: args{ + cmd: []string{"npm", "install"}, + }, + want: true, + }, + { + name: "npm ci", + args: args{ + cmd: []string{"npm", "ci"}, + }, + want: false, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + if got := isNpmUnpinnedDownload(tt.args.cmd); got != tt.want { + t.Errorf("isNpmUnpinnedDownload() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_hasUnpinnedURLs(t *testing.T) { + t.Parallel() + type args struct { + cmd []string + } + tests := []struct { + name string + args args + expected bool + }{ + { + name: "Unpinned URL", + args: args{ + cmd: []string{ + "curl", + "-sSL", + "https://dot.net/v1/dotnet-install.sh", + }, + }, + expected: true, + }, + { + name: "GitHub content URL but no path", + args: args{ + cmd: []string{ + "wget", + "-0", + "-", + "https://raw.githubusercontent.com", + }, + }, + expected: true, + }, + { + name: "GitHub content URL but no ref", + args: args{ + cmd: []string{ + "wget", + "-0", + "-", + "https://raw.githubusercontent.com/dotnet/install-scripts", + }, + }, + expected: true, + }, + { + name: "Unpinned GitHub content URL", + args: args{ + cmd: []string{ + "curl", + "-sSL", + "https://raw.githubusercontent.com/dotnet/install-scripts/main/src/dotnet-install.sh", + }, + }, + expected: true, + }, + { + name: "Pinned GitHub content URL but invalid SHA", + args: args{ + cmd: []string{ + "wget", + "-0", + "-", + "https://raw.githubusercontent.com/dotnet/install-scripts/zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz/src/dotnet-install.sh", + }, + }, + expected: true, + }, + { + name: "Pinned GitHub content URL but no file path", + args: args{ + cmd: []string{ + "wget", + "-0", + "-", + "https://raw.githubusercontent.com/dotnet/install-scripts/5b142a1e445a6f060d6430b661408989e9580b85", + }, + }, + expected: true, + }, + { + name: "Pinned GitHub content URL", + args: args{ + cmd: []string{ + "wget", + "-0", + "-", + "https://raw.githubusercontent.com/dotnet/install-scripts/5b142a1e445a6f060d6430b661408989e9580b85/src/dotnet-install.sh", + }, + }, + expected: false, + }, + { + name: "Pinned GitHub content URL but HTTP", + args: args{ + cmd: []string{ + "wget", + "-0", + "-", + "http://raw.githubusercontent.com/dotnet/install-scripts/5b142a1e445a6f060d6430b661408989e9580b85/src/dotnet-install.sh", + }, + }, + expected: true, + }, + { + name: "Pinned GitHub URL but not raw content", + args: args{ + cmd: []string{ + "wget", + "-0", + "-", + "https://github.com/dotnet/install-scripts/blob/5b142a1e445a6f060d6430b661408989e9580b85/src/dotnet-install.sh", + }, + }, + expected: true, + }, + } + for _, tt := range tests { + tt := tt // Re-initializing variable so it is not changed while executing the closure below + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + if actual := hasUnpinnedURLs(tt.args.cmd); actual != tt.expected { + t.Errorf("hasUnpinnedURLs() = %v, expected %v for %v", actual, tt.expected, tt.name) + } + }) + } +} diff --git a/checks/raw/testdata/.github/workflows/airflows-codeql.yaml b/checks/raw/testdata/.github/workflows/airflows-codeql.yaml new file mode 100644 index 00000000000..e07d4a50e54 --- /dev/null +++ b/checks/raw/testdata/.github/workflows/airflows-codeql.yaml @@ -0,0 +1,109 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +--- +name: "CodeQL" + +on: # yamllint disable-line rule:truthy + push: + branches: [main] + schedule: + - cron: '0 2 * * *' + +permissions: + contents: read +concurrency: + group: codeql-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +jobs: + selective-checks: + name: Selective checks + runs-on: ubuntu-20.04 + outputs: + needs-python-scans: ${{ steps.selective-checks.outputs.needs-python-scans }} + needs-javascript-scans: ${{ steps.selective-checks.outputs.needs-javascript-scans }} + steps: + - name: Checkout repository + uses: actions/checkout@v2 + with: + fetch-depth: 2 + persist-credentials: false + - name: Selective checks + id: selective-checks + env: + EVENT_NAME: ${{ github.event_name }} + TARGET_COMMIT_SHA: ${{ github.sha }} + run: | + if [[ ${EVENT_NAME} == "pull_request" ]]; then + # Run selective checks + ./scripts/ci/selective_ci_checks.sh "${TARGET_COMMIT_SHA}" + else + # Run all checks + ./scripts/ci/selective_ci_checks.sh + fi + + analyze: + name: Analyze + runs-on: ubuntu-20.04 + needs: [selective-checks] + strategy: + fail-fast: false + matrix: + # Override automatic language detection by changing the below list + # Supported options are ['csharp', 'cpp', 'go', 'java', 'javascript', 'python'] + language: ['python', 'javascript'] + permissions: + actions: read + contents: read + pull-requests: read + security-events: write + steps: + - name: Checkout repository + uses: actions/checkout@v2 + with: + persist-credentials: false + if: | + matrix.language == 'python' && needs.selective-checks.outputs.needs-python-scans == 'true' || + matrix.language == 'javascript' && needs.selective-checks.outputs.needs-javascript-scans == 'true' + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v1 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + # queries: ./path/to/local/query, your-org/your-repo/queries@main + if: | + matrix.language == 'python' && needs.selective-checks.outputs.needs-python-scans == 'true' || + matrix.language == 'javascript' && needs.selective-checks.outputs.needs-javascript-scans == 'true' + + # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@v1 + if: | + matrix.language == 'python' && needs.selective-checks.outputs.needs-python-scans == 'true' || + matrix.language == 'javascript' && needs.selective-checks.outputs.needs-javascript-scans == 'true' + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v1 + if: | + matrix.language == 'python' && needs.selective-checks.outputs.needs-python-scans == 'true' || + matrix.language == 'javascript' && needs.selective-checks.outputs.needs-javascript-scans == 'true' \ No newline at end of file diff --git a/checks/raw/testdata/.github/workflows/github-pysa-workflow.yaml b/checks/raw/testdata/.github/workflows/github-pysa-workflow.yaml new file mode 100644 index 00000000000..e36886bbc2f --- /dev/null +++ b/checks/raw/testdata/.github/workflows/github-pysa-workflow.yaml @@ -0,0 +1,22 @@ +name: Pysa Scan + +on: + push: + branches: + - main + pull_request: + +name: Pysa + +jobs: + pysa: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: Run Pysa Action + uses: facebook/pysa-action + with: + repo-directory: './' + requirements-path: 'requirements.txt' + infer-types: true \ No newline at end of file diff --git a/checks/raw/testdata/.github/workflows/github-qodana-workflow.yaml b/checks/raw/testdata/.github/workflows/github-qodana-workflow.yaml new file mode 100644 index 00000000000..5553c46651f --- /dev/null +++ b/checks/raw/testdata/.github/workflows/github-qodana-workflow.yaml @@ -0,0 +1,25 @@ +name: Qodana +on: + workflow_dispatch: + pull_request: + push: + branches: + - main + - 'releases/*' + +jobs: + qodana: + runs-on: ubuntu-latest + permissions: + contents: write + pull-requests: write + checks: write + steps: + - uses: actions/checkout@v3 + with: + ref: ${{ github.event.pull_request.head.sha }} + fetch-depth: 0 + - name: 'Qodana Scan' + uses: JetBrains/qodana-action@v2023.2 + env: + QODANA_TOKEN: ${{ secrets.QODANA_TOKEN }} \ No newline at end of file diff --git a/checks/raw/testdata/.github/workflows/github-workflow-snyk.yaml b/checks/raw/testdata/.github/workflows/github-workflow-snyk.yaml new file mode 100644 index 00000000000..306486284a6 --- /dev/null +++ b/checks/raw/testdata/.github/workflows/github-workflow-snyk.yaml @@ -0,0 +1,19 @@ +name: Snyk Scan + +on: pull_request +permissions: + contents: read + +jobs: + scan-snyk: + runs-on: ubuntu-latest + permissions: + security-events: write + steps: + - uses: actions/checkout@master + - uses: snyk/actions/setup@master + - name: Run Snyk Scanning + run: | + snyk test + env: + SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} diff --git a/checks/raw/testdata/.github/workflows/github-workflow-unknown-os.yaml b/checks/raw/testdata/.github/workflows/github-workflow-unknown-os.yaml new file mode 100644 index 00000000000..b182842eda0 --- /dev/null +++ b/checks/raw/testdata/.github/workflows/github-workflow-unknown-os.yaml @@ -0,0 +1,45 @@ +name: Workflow with job with unknown operating system + +on: + push: + +jobs: + unknown-os: + name: Job with unknown operating system + runs-on: ${{ matrix.os_python.os }} + strategy: + matrix: + os_python: + [ + { "os": "ubuntu-latest", "python": "py3" }, + { "os": "macos-latest", "python": "py3" }, + { "os": "windows-latest", "python": "py3" }, + ] + steps: + - name: Install Python + uses: actions/setup-python@v4 + with: + python-version: 3.8 + - name: Install cibuildwheel + # note: sync cibuildwheel version with gradle task sdks:python:bdistPy* steps + run: pip install cibuildwheel==2.9.0 + + ubuntu-os: + name: Job with ubuntu operating system + runs-on: ubuntu-latest + strategy: + matrix: + os_python: + [ + { "os": "ubuntu-latest", "python": "py3" }, + { "os": "macos-latest", "python": "py3" }, + { "os": "windows-latest", "python": "py3" }, + ] + steps: + - name: Install Python + uses: actions/setup-python@v4 + with: + python-version: 3.8 + - name: Install cibuildwheel + # note: sync cibuildwheel version with gradle task sdks:python:bdistPy* steps + run: pip install cibuildwheel==2.9.0 \ No newline at end of file diff --git a/checks/raw/testdata/.github/workflows/pom.xml b/checks/raw/testdata/.github/workflows/pom.xml new file mode 100644 index 00000000000..29a367ec0af --- /dev/null +++ b/checks/raw/testdata/.github/workflows/pom.xml @@ -0,0 +1,4 @@ + target/jacoco-report/jacoco.xml +https://sonarqube.private.domain +${projectKey} +${project.artifactId} \ No newline at end of file diff --git a/checks/raw/testdata/Dockerfile-curl-sh b/checks/raw/testdata/Dockerfile-curl-sh index e80a7e194dc..19a9ab0dfac 100644 --- a/checks/raw/testdata/Dockerfile-curl-sh +++ b/checks/raw/testdata/Dockerfile-curl-sh @@ -20,5 +20,11 @@ RUN echo hello && curl -s file-with-sudo2 | sudo bash RUN echo hello && sudo curl -s file-with-sudo | bash | bla RUN ["echo", "hello", "&&", "curl", "-s", "/etc/file2", "|", "sh"] +# Unpinned +RUN curl -sSL https://raw.githubusercontent.com/dotnet/install-scripts/main/src/dotnet-install.sh | bash /dev/stdin + +# Pinned +RUN curl -sSL https://raw.githubusercontent.com/dotnet/install-scripts/5b142a1e445a6f060d6430b661408989e9580b85/src/dotnet-install.sh | bash /dev/stdin + FROM scratch FROM python@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 \ No newline at end of file diff --git a/checks/raw/testdata/Dockerfile-download-heredoc b/checks/raw/testdata/Dockerfile-download-heredoc new file mode 100644 index 00000000000..fb1669f4a15 --- /dev/null +++ b/checks/raw/testdata/Dockerfile-download-heredoc @@ -0,0 +1,59 @@ + +# Copyright 2024 OpenSSF Scorecard Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Add tab + FROM python:3.7 + +RUN <<-EOT + pip install --no-deps -e git+https://github.com/username/repo.git@v1.0#egg=package +EOT + +RUN <