Skip to content
This repository has been archived by the owner on Feb 1, 2023. It is now read-only.

sync: update CI config files #485

Merged
merged 6 commits into from
Jun 24, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 0 additions & 18 deletions .circleci/config.yml

This file was deleted.

51 changes: 51 additions & 0 deletions .github/workflows/automerge.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
# File managed by web3-bot. DO NOT EDIT.
# See https://github.com/protocol/.github/ for details.

# Automatically merge pull requests opened by web3-bot, as soon as (and only if) all tests pass.
# This reduces the friction associated with updating with our workflows.

on: [ pull_request ]
name: Automerge

jobs:
automerge-check:
if: github.event.pull_request.user.login == 'web3-bot'
runs-on: ubuntu-latest
outputs:
status: ${{ steps.should-automerge.outputs.status }}
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Check if we should automerge
id: should-automerge
run: |
for commit in $(git rev-list --first-parent origin/${{ github.event.pull_request.base.ref }}..${{ github.event.pull_request.head.sha }}); do
committer=$(git show --format=$'%ce' -s $commit)
echo "Committer: $committer"
if [[ "$committer" != "[email protected]" ]]; then
echo "Commit $commit wasn't committed by web3-bot, but by $committer."
echo "::set-output name=status::false"
exit
fi
done
echo "::set-output name=status::true"
automerge:
needs: automerge-check
runs-on: ubuntu-latest
if: ${{ needs.automerge-check.outputs.status == 'true' }}
steps:
- name: Wait on tests
uses: lewagon/wait-on-check-action@bafe56a6863672c681c3cf671f5e10b20abf2eaa # v0.2
with:
ref: ${{ github.event.pull_request.head.sha }}
repo-token: ${{ secrets.GITHUB_TOKEN }}
wait-interval: 10
running-workflow-name: 'automerge' # the name of this job
- name: Merge PR
uses: pascalgn/automerge-action@741c311a47881be9625932b0a0de1b0937aab1ae # v0.13.1
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
MERGE_LABELS: ""
MERGE_METHOD: "squash"
MERGE_DELETE_BRANCH: true
50 changes: 50 additions & 0 deletions .github/workflows/go-check.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
# File managed by web3-bot. DO NOT EDIT.
# See https://github.com/protocol/.github/ for details.

on: [push, pull_request]
name: Go Checks

jobs:
unit:
runs-on: ubuntu-latest
name: All
steps:
- uses: actions/checkout@v2
with:
submodules: recursive
- uses: actions/setup-go@v2
with:
go-version: "1.16.x"
- name: Install staticcheck
run: go install honnef.co/go/tools/cmd/staticcheck@434f5f3816b358fe468fa83dcba62d794e7fe04b # 2021.1 (v0.2.0)
- name: Check that go.mod is tidy
uses: protocol/[email protected]
with:
run: |
go mod tidy
if [[ -n $(git ls-files --other --exclude-standard --directory -- go.sum) ]]; then
echo "go.sum was added by go mod tidy"
exit 1
fi
git diff --exit-code -- go.sum go.mod
- name: gofmt
if: ${{ success() || failure() }} # run this step even if the previous one failed
run: |
out=$(gofmt -s -l .)
if [[ -n "$out" ]]; then
echo $out | awk '{print "::error file=" $0 ",line=0,col=0::File is not gofmt-ed."}'
exit 1
fi
- name: go vet
if: ${{ success() || failure() }} # run this step even if the previous one failed
uses: protocol/[email protected]
with:
run: go vet ./...
- name: staticcheck
if: ${{ success() || failure() }} # run this step even if the previous one failed
uses: protocol/[email protected]
with:
run: |
set -o pipefail
staticcheck ./... | sed -e 's@\(.*\)\.go@./\1.go@g'

47 changes: 47 additions & 0 deletions .github/workflows/go-test.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
# File managed by web3-bot. DO NOT EDIT.
# See https://github.com/protocol/.github/ for details.

on: [push, pull_request]
name: Go Test

jobs:
unit:
strategy:
fail-fast: false
matrix:
os: [ "ubuntu", "windows", "macos" ]
go: [ "1.15.x", "1.16.x" ]
runs-on: ${{ matrix.os }}-latest
name: ${{ matrix.os}} (go ${{ matrix.go }})
steps:
- uses: actions/checkout@v2
with:
submodules: recursive
- uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go }}
- name: Go information
run: |
go version
go env
- name: Run tests
uses: protocol/[email protected]
with:
run: go test -v -coverprofile coverage.txt ./...
- name: Run tests (32 bit)
if: ${{ matrix.os != 'macos' }} # can't run 32 bit tests on OSX.
uses: protocol/[email protected]
env:
GOARCH: 386
with:
run: go test -v ./...
- name: Run tests with race detector
if: ${{ matrix.os == 'ubuntu' }} # speed things up. Windows and OSX VMs are slow
uses: protocol/[email protected]
with:
run: go test -v -race ./...
- name: Upload coverage to Codecov
uses: codecov/codecov-action@a1ed4b322b4b38cb846afb5a0ebfa17086917d27 # v1.5.0
with:
file: coverage.txt
env_vars: OS=${{ matrix.os }}, GO=${{ matrix.go }}
44 changes: 22 additions & 22 deletions benchmarks_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@ import (

bitswap "github.com/ipfs/go-bitswap"
bssession "github.com/ipfs/go-bitswap/internal/session"
bsnet "github.com/ipfs/go-bitswap/network"
testinstance "github.com/ipfs/go-bitswap/testinstance"
tn "github.com/ipfs/go-bitswap/testnet"
bsnet "github.com/ipfs/go-bitswap/network"
cid "github.com/ipfs/go-cid"
delay "github.com/ipfs/go-ipfs-delay"
mockrouting "github.com/ipfs/go-ipfs-routing/mock"
Expand Down Expand Up @@ -53,55 +53,55 @@ type bench struct {
var benches = []bench{
// Fetch from two seed nodes that both have all 100 blocks
// - request one at a time, in series
bench{"3Nodes-AllToAll-OneAtATime", 3, 100, allToAll, oneAtATime},
{"3Nodes-AllToAll-OneAtATime", 3, 100, allToAll, oneAtATime},
// - request all 100 with a single GetBlocks() call
bench{"3Nodes-AllToAll-BigBatch", 3, 100, allToAll, batchFetchAll},
{"3Nodes-AllToAll-BigBatch", 3, 100, allToAll, batchFetchAll},

// Fetch from two seed nodes, one at a time, where:
// - node A has blocks 0 - 74
// - node B has blocks 25 - 99
bench{"3Nodes-Overlap1-OneAtATime", 3, 100, overlap1, oneAtATime},
{"3Nodes-Overlap1-OneAtATime", 3, 100, overlap1, oneAtATime},

// Fetch from two seed nodes, where:
// - node A has even blocks
// - node B has odd blocks
// - both nodes have every third block

// - request one at a time, in series
bench{"3Nodes-Overlap3-OneAtATime", 3, 100, overlap2, oneAtATime},
{"3Nodes-Overlap3-OneAtATime", 3, 100, overlap2, oneAtATime},
// - request 10 at a time, in series
bench{"3Nodes-Overlap3-BatchBy10", 3, 100, overlap2, batchFetchBy10},
{"3Nodes-Overlap3-BatchBy10", 3, 100, overlap2, batchFetchBy10},
// - request all 100 in parallel as individual GetBlock() calls
bench{"3Nodes-Overlap3-AllConcurrent", 3, 100, overlap2, fetchAllConcurrent},
{"3Nodes-Overlap3-AllConcurrent", 3, 100, overlap2, fetchAllConcurrent},
// - request all 100 with a single GetBlocks() call
bench{"3Nodes-Overlap3-BigBatch", 3, 100, overlap2, batchFetchAll},
{"3Nodes-Overlap3-BigBatch", 3, 100, overlap2, batchFetchAll},
// - request 1, then 10, then 89 blocks (similar to how IPFS would fetch a file)
bench{"3Nodes-Overlap3-UnixfsFetch", 3, 100, overlap2, unixfsFileFetch},
{"3Nodes-Overlap3-UnixfsFetch", 3, 100, overlap2, unixfsFileFetch},

// Fetch from nine seed nodes, all nodes have all blocks
// - request one at a time, in series
bench{"10Nodes-AllToAll-OneAtATime", 10, 100, allToAll, oneAtATime},
{"10Nodes-AllToAll-OneAtATime", 10, 100, allToAll, oneAtATime},
// - request 10 at a time, in series
bench{"10Nodes-AllToAll-BatchFetchBy10", 10, 100, allToAll, batchFetchBy10},
{"10Nodes-AllToAll-BatchFetchBy10", 10, 100, allToAll, batchFetchBy10},
// - request all 100 with a single GetBlocks() call
bench{"10Nodes-AllToAll-BigBatch", 10, 100, allToAll, batchFetchAll},
{"10Nodes-AllToAll-BigBatch", 10, 100, allToAll, batchFetchAll},
// - request all 100 in parallel as individual GetBlock() calls
bench{"10Nodes-AllToAll-AllConcurrent", 10, 100, allToAll, fetchAllConcurrent},
{"10Nodes-AllToAll-AllConcurrent", 10, 100, allToAll, fetchAllConcurrent},
// - request 1, then 10, then 89 blocks (similar to how IPFS would fetch a file)
bench{"10Nodes-AllToAll-UnixfsFetch", 10, 100, allToAll, unixfsFileFetch},
{"10Nodes-AllToAll-UnixfsFetch", 10, 100, allToAll, unixfsFileFetch},
// - follow a typical IPFS request pattern for 1000 blocks
bench{"10Nodes-AllToAll-UnixfsFetchLarge", 10, 1000, allToAll, unixfsFileFetchLarge},
{"10Nodes-AllToAll-UnixfsFetchLarge", 10, 1000, allToAll, unixfsFileFetchLarge},

// Fetch from nine seed nodes, blocks are distributed randomly across all nodes (no dups)
// - request one at a time, in series
bench{"10Nodes-OnePeerPerBlock-OneAtATime", 10, 100, onePeerPerBlock, oneAtATime},
{"10Nodes-OnePeerPerBlock-OneAtATime", 10, 100, onePeerPerBlock, oneAtATime},
// - request all 100 with a single GetBlocks() call
bench{"10Nodes-OnePeerPerBlock-BigBatch", 10, 100, onePeerPerBlock, batchFetchAll},
{"10Nodes-OnePeerPerBlock-BigBatch", 10, 100, onePeerPerBlock, batchFetchAll},
// - request 1, then 10, then 89 blocks (similar to how IPFS would fetch a file)
bench{"10Nodes-OnePeerPerBlock-UnixfsFetch", 10, 100, onePeerPerBlock, unixfsFileFetch},
{"10Nodes-OnePeerPerBlock-UnixfsFetch", 10, 100, onePeerPerBlock, unixfsFileFetch},

// Fetch from 199 seed nodes, all nodes have all blocks, fetch all 20 blocks with a single GetBlocks() call
bench{"200Nodes-AllToAll-BigBatch", 200, 20, allToAll, batchFetchAll},
{"200Nodes-AllToAll-BigBatch", 200, 20, allToAll, batchFetchAll},
}

func BenchmarkFixedDelay(b *testing.B) {
Expand All @@ -127,9 +127,9 @@ type mixedBench struct {
}

var mixedBenches = []mixedBench{
mixedBench{bench{"3Nodes-Overlap3-OneAtATime", 3, 10, overlap2, oneAtATime}, 1, 2},
mixedBench{bench{"3Nodes-AllToAll-OneAtATime", 3, 10, allToAll, oneAtATime}, 1, 2},
mixedBench{bench{"3Nodes-Overlap3-AllConcurrent", 3, 10, overlap2, fetchAllConcurrent}, 1, 2},
{bench{"3Nodes-Overlap3-OneAtATime", 3, 10, overlap2, oneAtATime}, 1, 2},
{bench{"3Nodes-AllToAll-OneAtATime", 3, 10, allToAll, oneAtATime}, 1, 2},
{bench{"3Nodes-Overlap3-AllConcurrent", 3, 10, overlap2, fetchAllConcurrent}, 1, 2},
// mixedBench{bench{"3Nodes-Overlap3-UnixfsFetch", 3, 100, overlap2, unixfsFileFetch}, 1, 2},
}

Expand Down
14 changes: 7 additions & 7 deletions bitswap.go
Original file line number Diff line number Diff line change
Expand Up @@ -183,13 +183,13 @@ func New(parent context.Context, network bsnet.BitSwapNetwork,
sm = bssm.New(ctx, sessionFactory, sim, sessionPeerManagerFactory, bpm, pm, notif, network.Self())

bs := &Bitswap{
blockstore: bstore,
network: network,
process: px,
newBlocks: make(chan cid.Cid, HasBlockBufferSize),
provideKeys: make(chan cid.Cid, provideKeysBufferSize),
pm: pm,
pqm: pqm,
blockstore: bstore,
network: network,
process: px,
newBlocks: make(chan cid.Cid, HasBlockBufferSize),
provideKeys: make(chan cid.Cid, provideKeysBufferSize),
pm: pm,
pqm: pqm,
sm: sm,
sim: sim,
notif: notif,
Expand Down
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -30,4 +30,4 @@ require (
go.uber.org/zap v1.16.0
)

go 1.12
go 1.15
26 changes: 13 additions & 13 deletions internal/blockpresencemanager/blockpresencemanager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -208,24 +208,24 @@ func TestAllPeersDoNotHaveBlock(t *testing.T) {
}

testcases := []testcase{
testcase{[]peer.ID{p0}, []cid.Cid{c0}, []cid.Cid{}},
testcase{[]peer.ID{p1}, []cid.Cid{c0}, []cid.Cid{c0}},
testcase{[]peer.ID{p2}, []cid.Cid{c0}, []cid.Cid{}},
{[]peer.ID{p0}, []cid.Cid{c0}, []cid.Cid{}},
{[]peer.ID{p1}, []cid.Cid{c0}, []cid.Cid{c0}},
{[]peer.ID{p2}, []cid.Cid{c0}, []cid.Cid{}},

testcase{[]peer.ID{p0}, []cid.Cid{c1}, []cid.Cid{c1}},
testcase{[]peer.ID{p1}, []cid.Cid{c1}, []cid.Cid{}},
testcase{[]peer.ID{p2}, []cid.Cid{c1}, []cid.Cid{}},
{[]peer.ID{p0}, []cid.Cid{c1}, []cid.Cid{c1}},
{[]peer.ID{p1}, []cid.Cid{c1}, []cid.Cid{}},
{[]peer.ID{p2}, []cid.Cid{c1}, []cid.Cid{}},

testcase{[]peer.ID{p0}, []cid.Cid{c2}, []cid.Cid{c2}},
testcase{[]peer.ID{p1}, []cid.Cid{c2}, []cid.Cid{}},
testcase{[]peer.ID{p2}, []cid.Cid{c2}, []cid.Cid{c2}},
{[]peer.ID{p0}, []cid.Cid{c2}, []cid.Cid{c2}},
{[]peer.ID{p1}, []cid.Cid{c2}, []cid.Cid{}},
{[]peer.ID{p2}, []cid.Cid{c2}, []cid.Cid{c2}},

// p0 recieved DONT_HAVE for c1 & c2 (but not for c0)
testcase{[]peer.ID{p0}, []cid.Cid{c0, c1, c2}, []cid.Cid{c1, c2}},
testcase{[]peer.ID{p0, p1}, []cid.Cid{c0, c1, c2}, []cid.Cid{}},
{[]peer.ID{p0}, []cid.Cid{c0, c1, c2}, []cid.Cid{c1, c2}},
{[]peer.ID{p0, p1}, []cid.Cid{c0, c1, c2}, []cid.Cid{}},
// Both p0 and p2 received DONT_HAVE for c2
testcase{[]peer.ID{p0, p2}, []cid.Cid{c0, c1, c2}, []cid.Cid{c2}},
testcase{[]peer.ID{p0, p1, p2}, []cid.Cid{c0, c1, c2}, []cid.Cid{}},
{[]peer.ID{p0, p2}, []cid.Cid{c0, c1, c2}, []cid.Cid{c2}},
{[]peer.ID{p0, p1, p2}, []cid.Cid{c0, c1, c2}, []cid.Cid{}},
}

for i, tc := range testcases {
Expand Down
2 changes: 1 addition & 1 deletion internal/decision/engine.go
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ type Engine struct {

sendDontHaves bool

self peer.ID
self peer.ID
}

// NewEngine creates a new block sending engine for the given block store
Expand Down
Loading