diff --git a/.circleci/config.yml b/.circleci/config.yml index c5d41fc93fa..85b3fc73e50 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -2,6 +2,7 @@ version: 2.1 orbs: go: gotest/tools@0.0.13 aws-cli: circleci/aws-cli@1.3.2 + packer: salaxander/packer@0.0.3 executors: golang: @@ -277,6 +278,11 @@ jobs: - install-deps - prepare - run: make calibnet + - run: mkdir linux-calibnet && mv lotus lotus-miner lotus-worker linux-calibnet + - persist_to_workspace: + root: "." + paths: + - linux-calibnet build-lotus-soup: description: | Compile `lotus-soup` Testground test plan @@ -289,7 +295,7 @@ jobs: - run: cd extern/filecoin-ffi && make - run: name: "go get lotus@master" - command: cd testplans/lotus-soup && go get github.com/filecoin-project/lotus@master + command: cd testplans/lotus-soup && go mod edit -replace=github.com/filecoin-project/lotus=../.. - run: name: "build lotus-soup testplan" command: pushd testplans/lotus-soup && go build -tags=testground . @@ -583,6 +589,22 @@ jobs: docker push $<>/<>:${tag} done + publish-packer: + description: build and push AWS IAM and DigitalOcean droplet. + executor: + name: packer/default + packer-version: 1.6.6 + steps: + - checkout + - attach_workspace: + at: "." + - packer/build: + template: tools/packer/lotus.pkr.hcl + args: "-var ci_workspace_bins=./linux -var lotus_network=mainnet -var git_tag=$CIRCLE_TAG" + - packer/build: + template: tools/packer/lotus.pkr.hcl + args: "-var ci_workspace_bins=./linux-calibnet -var lotus_network=calibrationnet -var git_tag=$CIRCLE_TAG" + workflows: version: 2.1 ci: @@ -683,3 +705,15 @@ workflows: path: . repo: lotus-dev tag: '${CIRCLE_SHA1:0:8}' + - publish-packer: + requires: + - build-all + - build-ntwk-calibration + filters: + branches: + ignore: + - /.*/ + tags: + only: + - /^v\d+\.\d+\.\d+$/ + diff --git a/api/api_common.go b/api/api_common.go index 30a52e8662e..fc89f11cd98 100644 --- a/api/api_common.go +++ b/api/api_common.go @@ -11,8 +11,6 @@ import ( "github.com/libp2p/go-libp2p-core/network" "github.com/libp2p/go-libp2p-core/peer" protocol "github.com/libp2p/go-libp2p-core/protocol" - - "github.com/filecoin-project/lotus/build" ) type Common interface { @@ -33,6 +31,7 @@ type Common interface { NetPubsubScores(context.Context) ([]PubsubScore, error) NetAutoNatStatus(context.Context) (NatInfo, error) NetAgentVersion(ctx context.Context, p peer.ID) (string, error) + NetPeerInfo(context.Context, peer.ID) (*ExtendedPeerInfo, error) // NetBandwidthStats returns statistics about the nodes total bandwidth // usage and current rate across all peers and protocols. @@ -57,7 +56,7 @@ type Common interface { ID(context.Context) (peer.ID, error) // Version provides information about API provider - Version(context.Context) (Version, error) + Version(context.Context) (APIVersion, error) LogList(context.Context) ([]string, error) LogSetLevel(context.Context, string, string) error @@ -71,15 +70,15 @@ type Common interface { Closing(context.Context) (<-chan struct{}, error) } -// Version provides various build-time information -type Version struct { +// APIVersion provides various build-time information +type APIVersion struct { Version string // APIVersion is a binary encoded semver version of the remote implementing // this api // // See APIVersion in build/version.go - APIVersion build.Version + APIVersion Version // TODO: git commit / os / genesis cid? @@ -87,7 +86,7 @@ type Version struct { BlockDelay uint64 } -func (v Version) String() string { +func (v APIVersion) String() string { return fmt.Sprintf("%s+api%s", v.Version, v.APIVersion.String()) } diff --git a/api/api_full.go b/api/api_full.go index 9c967ca32fd..ca3a02c747e 100644 --- a/api/api_full.go +++ b/api/api_full.go @@ -32,6 +32,14 @@ import ( "github.com/filecoin-project/lotus/node/modules/dtypes" ) +//go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_full.go -package=mocks . FullNode + +// ChainIO abstracts operations for accessing raw IPLD objects. +type ChainIO interface { + ChainReadObj(context.Context, cid.Cid) ([]byte, error) + ChainHasObj(context.Context, cid.Cid) (bool, error) +} + // FullNode API is a low-level interface to the Filecoin network full node type FullNode interface { Common @@ -862,6 +870,8 @@ const ( func (v SyncStateStage) String() string { switch v { + case StageIdle: + return "idle" case StageHeaders: return "header sync" case StagePersistHeaders: diff --git a/api/api_storage.go b/api/api_storage.go index eb4584e103a..a153875f0de 100644 --- a/api/api_storage.go +++ b/api/api_storage.go @@ -36,7 +36,7 @@ type StorageMiner interface { MiningBase(context.Context) (*types.TipSet, error) // Temp api for testing - PledgeSector(context.Context) error + PledgeSector(context.Context) (abi.SectorID, error) // Get the status of a given sector by ID SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (SectorInfo, error) @@ -238,6 +238,9 @@ type AddressConfig struct { PreCommitControl []address.Address CommitControl []address.Address TerminateControl []address.Address + + DisableOwnerFallback bool + DisableWorkerFallback bool } // PendingDealInfo has info about pending deals and when they are due to be diff --git a/api/api_worker.go b/api/api_worker.go index e85f1e7d41d..999c42680fa 100644 --- a/api/api_worker.go +++ b/api/api_worker.go @@ -9,12 +9,10 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" "github.com/filecoin-project/lotus/extern/sector-storage/stores" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" - - "github.com/filecoin-project/lotus/build" ) type WorkerAPI interface { - Version(context.Context) (build.Version, error) + Version(context.Context) (Version, error) // TODO: Info() (name, ...) ? TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) // TaskType -> Weight diff --git a/api/apibstore/apibstore.go b/api/apibstore/apibstore.go deleted file mode 100644 index cf9f4f24c66..00000000000 --- a/api/apibstore/apibstore.go +++ /dev/null @@ -1,68 +0,0 @@ -package apibstore - -import ( - "context" - - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/lib/blockstore" -) - -type ChainIO interface { - ChainReadObj(context.Context, cid.Cid) ([]byte, error) - ChainHasObj(context.Context, cid.Cid) (bool, error) -} - -type apiBStore struct { - api ChainIO -} - -func NewAPIBlockstore(cio ChainIO) blockstore.Blockstore { - return &apiBStore{ - api: cio, - } -} - -func (a *apiBStore) DeleteBlock(cid.Cid) error { - return xerrors.New("not supported") -} - -func (a *apiBStore) Has(c cid.Cid) (bool, error) { - return a.api.ChainHasObj(context.TODO(), c) -} - -func (a *apiBStore) Get(c cid.Cid) (blocks.Block, error) { - bb, err := a.api.ChainReadObj(context.TODO(), c) - if err != nil { - return nil, err - } - return blocks.NewBlockWithCid(bb, c) -} - -func (a *apiBStore) GetSize(c cid.Cid) (int, error) { - bb, err := a.api.ChainReadObj(context.TODO(), c) - if err != nil { - return 0, err - } - return len(bb), nil -} - -func (a *apiBStore) Put(blocks.Block) error { - return xerrors.New("not supported") -} - -func (a *apiBStore) PutMany([]blocks.Block) error { - return xerrors.New("not supported") -} - -func (a *apiBStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { - return nil, xerrors.New("not supported") -} - -func (a *apiBStore) HashOnRead(enabled bool) { - return -} - -var _ blockstore.Blockstore = &apiBStore{} diff --git a/api/apistruct/struct.go b/api/apistruct/struct.go index ded34ac5ad8..34b18cd4198 100644 --- a/api/apistruct/struct.go +++ b/api/apistruct/struct.go @@ -33,7 +33,6 @@ import ( "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/paych" "github.com/filecoin-project/lotus/chain/types" @@ -60,12 +59,13 @@ type CommonStruct struct { NetBandwidthStatsByPeer func(ctx context.Context) (map[string]metrics.Stats, error) `perm:"read"` NetBandwidthStatsByProtocol func(ctx context.Context) (map[protocol.ID]metrics.Stats, error) `perm:"read"` NetAgentVersion func(ctx context.Context, p peer.ID) (string, error) `perm:"read"` + NetPeerInfo func(context.Context, peer.ID) (*api.ExtendedPeerInfo, error) `perm:"read"` NetBlockAdd func(ctx context.Context, acl api.NetBlockList) error `perm:"admin"` NetBlockRemove func(ctx context.Context, acl api.NetBlockList) error `perm:"admin"` NetBlockList func(ctx context.Context) (api.NetBlockList, error) `perm:"read"` - ID func(context.Context) (peer.ID, error) `perm:"read"` - Version func(context.Context) (api.Version, error) `perm:"read"` + ID func(context.Context) (peer.ID, error) `perm:"read"` + Version func(context.Context) (api.APIVersion, error) `perm:"read"` LogList func(context.Context) ([]string, error) `perm:"write"` LogSetLevel func(context.Context, string, string) error `perm:"write"` @@ -304,7 +304,7 @@ type StorageMinerStruct struct { MarketPendingDeals func(ctx context.Context) (api.PendingDealInfo, error) `perm:"write"` MarketPublishPendingDeals func(ctx context.Context) error `perm:"admin"` - PledgeSector func(context.Context) error `perm:"write"` + PledgeSector func(context.Context) (abi.SectorID, error) `perm:"write"` SectorsStatus func(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) `perm:"read"` SectorsList func(context.Context) ([]abi.SectorNumber, error) `perm:"read"` @@ -389,7 +389,7 @@ type WorkerStruct struct { Internal struct { // TODO: lower perms - Version func(context.Context) (build.Version, error) `perm:"admin"` + Version func(context.Context) (api.Version, error) `perm:"admin"` TaskTypes func(context.Context) (map[sealtasks.TaskType]struct{}, error) `perm:"admin"` Paths func(context.Context) ([]stores.StoragePath, error) `perm:"admin"` @@ -540,13 +540,17 @@ func (c *CommonStruct) NetAgentVersion(ctx context.Context, p peer.ID) (string, return c.Internal.NetAgentVersion(ctx, p) } +func (c *CommonStruct) NetPeerInfo(ctx context.Context, p peer.ID) (*api.ExtendedPeerInfo, error) { + return c.Internal.NetPeerInfo(ctx, p) +} + // ID implements API.ID func (c *CommonStruct) ID(ctx context.Context) (peer.ID, error) { return c.Internal.ID(ctx) } // Version implements API.Version -func (c *CommonStruct) Version(ctx context.Context) (api.Version, error) { +func (c *CommonStruct) Version(ctx context.Context) (api.APIVersion, error) { return c.Internal.Version(ctx) } @@ -1274,7 +1278,7 @@ func (c *StorageMinerStruct) ActorAddressConfig(ctx context.Context) (api.Addres return c.Internal.ActorAddressConfig(ctx) } -func (c *StorageMinerStruct) PledgeSector(ctx context.Context) error { +func (c *StorageMinerStruct) PledgeSector(ctx context.Context) (abi.SectorID, error) { return c.Internal.PledgeSector(ctx) } @@ -1610,7 +1614,7 @@ func (c *StorageMinerStruct) CheckProvable(ctx context.Context, pp abi.Registere // WorkerStruct -func (w *WorkerStruct) Version(ctx context.Context) (build.Version, error) { +func (w *WorkerStruct) Version(ctx context.Context) (api.Version, error) { return w.Internal.Version(ctx) } diff --git a/api/docgen/docgen.go b/api/docgen/docgen.go index 7d3ac4bcf75..7b6a2725b0f 100644 --- a/api/docgen/docgen.go +++ b/api/docgen/docgen.go @@ -113,7 +113,7 @@ func init() { addExample(network.Connected) addExample(dtypes.NetworkName("lotus")) addExample(api.SyncStateStage(1)) - addExample(build.FullAPIVersion) + addExample(api.FullAPIVersion) addExample(api.PCHInbound) addExample(time.Minute) addExample(datatransfer.TransferID(3)) @@ -123,6 +123,8 @@ func init() { addExample(retrievalmarket.DealStatusNew) addExample(network.ReachabilityPublic) addExample(build.NewestNetworkVersion) + addExample(map[string]int{"name": 42}) + addExample(map[string]time.Time{"name": time.Unix(1615243938, 0).UTC()}) addExample(&types.ExecutionTrace{ Msg: exampleValue("init", reflect.TypeOf(&types.Message{}), nil).(*types.Message), MsgRct: exampleValue("init", reflect.TypeOf(&types.MessageReceipt{}), nil).(*types.MessageReceipt), diff --git a/api/mocks/mock_full.go b/api/mocks/mock_full.go new file mode 100644 index 00000000000..0b76c784dcd --- /dev/null +++ b/api/mocks/mock_full.go @@ -0,0 +1,2987 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/filecoin-project/lotus/api (interfaces: FullNode) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + address "github.com/filecoin-project/go-address" + bitfield "github.com/filecoin-project/go-bitfield" + datatransfer "github.com/filecoin-project/go-data-transfer" + storagemarket "github.com/filecoin-project/go-fil-markets/storagemarket" + auth "github.com/filecoin-project/go-jsonrpc/auth" + multistore "github.com/filecoin-project/go-multistore" + abi "github.com/filecoin-project/go-state-types/abi" + big "github.com/filecoin-project/go-state-types/big" + crypto "github.com/filecoin-project/go-state-types/crypto" + dline "github.com/filecoin-project/go-state-types/dline" + network "github.com/filecoin-project/go-state-types/network" + api "github.com/filecoin-project/lotus/api" + miner "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + types "github.com/filecoin-project/lotus/chain/types" + marketevents "github.com/filecoin-project/lotus/markets/loggers" + dtypes "github.com/filecoin-project/lotus/node/modules/dtypes" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + paych "github.com/filecoin-project/specs-actors/actors/builtin/paych" + gomock "github.com/golang/mock/gomock" + uuid "github.com/google/uuid" + cid "github.com/ipfs/go-cid" + metrics "github.com/libp2p/go-libp2p-core/metrics" + network0 "github.com/libp2p/go-libp2p-core/network" + peer "github.com/libp2p/go-libp2p-core/peer" + protocol "github.com/libp2p/go-libp2p-core/protocol" + reflect "reflect" +) + +// MockFullNode is a mock of FullNode interface +type MockFullNode struct { + ctrl *gomock.Controller + recorder *MockFullNodeMockRecorder +} + +// MockFullNodeMockRecorder is the mock recorder for MockFullNode +type MockFullNodeMockRecorder struct { + mock *MockFullNode +} + +// NewMockFullNode creates a new mock instance +func NewMockFullNode(ctrl *gomock.Controller) *MockFullNode { + mock := &MockFullNode{ctrl: ctrl} + mock.recorder = &MockFullNodeMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockFullNode) EXPECT() *MockFullNodeMockRecorder { + return m.recorder +} + +// AuthNew mocks base method +func (m *MockFullNode) AuthNew(arg0 context.Context, arg1 []auth.Permission) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AuthNew", arg0, arg1) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AuthNew indicates an expected call of AuthNew +func (mr *MockFullNodeMockRecorder) AuthNew(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AuthNew", reflect.TypeOf((*MockFullNode)(nil).AuthNew), arg0, arg1) +} + +// AuthVerify mocks base method +func (m *MockFullNode) AuthVerify(arg0 context.Context, arg1 string) ([]auth.Permission, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AuthVerify", arg0, arg1) + ret0, _ := ret[0].([]auth.Permission) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AuthVerify indicates an expected call of AuthVerify +func (mr *MockFullNodeMockRecorder) AuthVerify(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AuthVerify", reflect.TypeOf((*MockFullNode)(nil).AuthVerify), arg0, arg1) +} + +// BeaconGetEntry mocks base method +func (m *MockFullNode) BeaconGetEntry(arg0 context.Context, arg1 abi.ChainEpoch) (*types.BeaconEntry, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BeaconGetEntry", arg0, arg1) + ret0, _ := ret[0].(*types.BeaconEntry) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BeaconGetEntry indicates an expected call of BeaconGetEntry +func (mr *MockFullNodeMockRecorder) BeaconGetEntry(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeaconGetEntry", reflect.TypeOf((*MockFullNode)(nil).BeaconGetEntry), arg0, arg1) +} + +// ChainDeleteObj mocks base method +func (m *MockFullNode) ChainDeleteObj(arg0 context.Context, arg1 cid.Cid) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainDeleteObj", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ChainDeleteObj indicates an expected call of ChainDeleteObj +func (mr *MockFullNodeMockRecorder) ChainDeleteObj(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainDeleteObj", reflect.TypeOf((*MockFullNode)(nil).ChainDeleteObj), arg0, arg1) +} + +// ChainExport mocks base method +func (m *MockFullNode) ChainExport(arg0 context.Context, arg1 abi.ChainEpoch, arg2 bool, arg3 types.TipSetKey) (<-chan []byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainExport", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(<-chan []byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainExport indicates an expected call of ChainExport +func (mr *MockFullNodeMockRecorder) ChainExport(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainExport", reflect.TypeOf((*MockFullNode)(nil).ChainExport), arg0, arg1, arg2, arg3) +} + +// ChainGetBlock mocks base method +func (m *MockFullNode) ChainGetBlock(arg0 context.Context, arg1 cid.Cid) (*types.BlockHeader, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetBlock", arg0, arg1) + ret0, _ := ret[0].(*types.BlockHeader) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetBlock indicates an expected call of ChainGetBlock +func (mr *MockFullNodeMockRecorder) ChainGetBlock(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetBlock", reflect.TypeOf((*MockFullNode)(nil).ChainGetBlock), arg0, arg1) +} + +// ChainGetBlockMessages mocks base method +func (m *MockFullNode) ChainGetBlockMessages(arg0 context.Context, arg1 cid.Cid) (*api.BlockMessages, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetBlockMessages", arg0, arg1) + ret0, _ := ret[0].(*api.BlockMessages) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetBlockMessages indicates an expected call of ChainGetBlockMessages +func (mr *MockFullNodeMockRecorder) ChainGetBlockMessages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetBlockMessages", reflect.TypeOf((*MockFullNode)(nil).ChainGetBlockMessages), arg0, arg1) +} + +// ChainGetGenesis mocks base method +func (m *MockFullNode) ChainGetGenesis(arg0 context.Context) (*types.TipSet, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetGenesis", arg0) + ret0, _ := ret[0].(*types.TipSet) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetGenesis indicates an expected call of ChainGetGenesis +func (mr *MockFullNodeMockRecorder) ChainGetGenesis(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetGenesis", reflect.TypeOf((*MockFullNode)(nil).ChainGetGenesis), arg0) +} + +// ChainGetMessage mocks base method +func (m *MockFullNode) ChainGetMessage(arg0 context.Context, arg1 cid.Cid) (*types.Message, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetMessage", arg0, arg1) + ret0, _ := ret[0].(*types.Message) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetMessage indicates an expected call of ChainGetMessage +func (mr *MockFullNodeMockRecorder) ChainGetMessage(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetMessage", reflect.TypeOf((*MockFullNode)(nil).ChainGetMessage), arg0, arg1) +} + +// ChainGetNode mocks base method +func (m *MockFullNode) ChainGetNode(arg0 context.Context, arg1 string) (*api.IpldObject, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetNode", arg0, arg1) + ret0, _ := ret[0].(*api.IpldObject) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetNode indicates an expected call of ChainGetNode +func (mr *MockFullNodeMockRecorder) ChainGetNode(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetNode", reflect.TypeOf((*MockFullNode)(nil).ChainGetNode), arg0, arg1) +} + +// ChainGetParentMessages mocks base method +func (m *MockFullNode) ChainGetParentMessages(arg0 context.Context, arg1 cid.Cid) ([]api.Message, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetParentMessages", arg0, arg1) + ret0, _ := ret[0].([]api.Message) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetParentMessages indicates an expected call of ChainGetParentMessages +func (mr *MockFullNodeMockRecorder) ChainGetParentMessages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetParentMessages", reflect.TypeOf((*MockFullNode)(nil).ChainGetParentMessages), arg0, arg1) +} + +// ChainGetParentReceipts mocks base method +func (m *MockFullNode) ChainGetParentReceipts(arg0 context.Context, arg1 cid.Cid) ([]*types.MessageReceipt, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetParentReceipts", arg0, arg1) + ret0, _ := ret[0].([]*types.MessageReceipt) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetParentReceipts indicates an expected call of ChainGetParentReceipts +func (mr *MockFullNodeMockRecorder) ChainGetParentReceipts(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetParentReceipts", reflect.TypeOf((*MockFullNode)(nil).ChainGetParentReceipts), arg0, arg1) +} + +// ChainGetPath mocks base method +func (m *MockFullNode) ChainGetPath(arg0 context.Context, arg1, arg2 types.TipSetKey) ([]*api.HeadChange, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetPath", arg0, arg1, arg2) + ret0, _ := ret[0].([]*api.HeadChange) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetPath indicates an expected call of ChainGetPath +func (mr *MockFullNodeMockRecorder) ChainGetPath(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetPath", reflect.TypeOf((*MockFullNode)(nil).ChainGetPath), arg0, arg1, arg2) +} + +// ChainGetRandomnessFromBeacon mocks base method +func (m *MockFullNode) ChainGetRandomnessFromBeacon(arg0 context.Context, arg1 types.TipSetKey, arg2 crypto.DomainSeparationTag, arg3 abi.ChainEpoch, arg4 []byte) (abi.Randomness, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetRandomnessFromBeacon", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(abi.Randomness) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetRandomnessFromBeacon indicates an expected call of ChainGetRandomnessFromBeacon +func (mr *MockFullNodeMockRecorder) ChainGetRandomnessFromBeacon(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetRandomnessFromBeacon", reflect.TypeOf((*MockFullNode)(nil).ChainGetRandomnessFromBeacon), arg0, arg1, arg2, arg3, arg4) +} + +// ChainGetRandomnessFromTickets mocks base method +func (m *MockFullNode) ChainGetRandomnessFromTickets(arg0 context.Context, arg1 types.TipSetKey, arg2 crypto.DomainSeparationTag, arg3 abi.ChainEpoch, arg4 []byte) (abi.Randomness, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetRandomnessFromTickets", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(abi.Randomness) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetRandomnessFromTickets indicates an expected call of ChainGetRandomnessFromTickets +func (mr *MockFullNodeMockRecorder) ChainGetRandomnessFromTickets(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetRandomnessFromTickets", reflect.TypeOf((*MockFullNode)(nil).ChainGetRandomnessFromTickets), arg0, arg1, arg2, arg3, arg4) +} + +// ChainGetTipSet mocks base method +func (m *MockFullNode) ChainGetTipSet(arg0 context.Context, arg1 types.TipSetKey) (*types.TipSet, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetTipSet", arg0, arg1) + ret0, _ := ret[0].(*types.TipSet) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetTipSet indicates an expected call of ChainGetTipSet +func (mr *MockFullNodeMockRecorder) ChainGetTipSet(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetTipSet", reflect.TypeOf((*MockFullNode)(nil).ChainGetTipSet), arg0, arg1) +} + +// ChainGetTipSetByHeight mocks base method +func (m *MockFullNode) ChainGetTipSetByHeight(arg0 context.Context, arg1 abi.ChainEpoch, arg2 types.TipSetKey) (*types.TipSet, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetTipSetByHeight", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.TipSet) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetTipSetByHeight indicates an expected call of ChainGetTipSetByHeight +func (mr *MockFullNodeMockRecorder) ChainGetTipSetByHeight(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetTipSetByHeight", reflect.TypeOf((*MockFullNode)(nil).ChainGetTipSetByHeight), arg0, arg1, arg2) +} + +// ChainHasObj mocks base method +func (m *MockFullNode) ChainHasObj(arg0 context.Context, arg1 cid.Cid) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainHasObj", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainHasObj indicates an expected call of ChainHasObj +func (mr *MockFullNodeMockRecorder) ChainHasObj(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHasObj", reflect.TypeOf((*MockFullNode)(nil).ChainHasObj), arg0, arg1) +} + +// ChainHead mocks base method +func (m *MockFullNode) ChainHead(arg0 context.Context) (*types.TipSet, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainHead", arg0) + ret0, _ := ret[0].(*types.TipSet) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainHead indicates an expected call of ChainHead +func (mr *MockFullNodeMockRecorder) ChainHead(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHead", reflect.TypeOf((*MockFullNode)(nil).ChainHead), arg0) +} + +// ChainNotify mocks base method +func (m *MockFullNode) ChainNotify(arg0 context.Context) (<-chan []*api.HeadChange, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainNotify", arg0) + ret0, _ := ret[0].(<-chan []*api.HeadChange) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainNotify indicates an expected call of ChainNotify +func (mr *MockFullNodeMockRecorder) ChainNotify(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainNotify", reflect.TypeOf((*MockFullNode)(nil).ChainNotify), arg0) +} + +// ChainReadObj mocks base method +func (m *MockFullNode) ChainReadObj(arg0 context.Context, arg1 cid.Cid) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainReadObj", arg0, arg1) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainReadObj indicates an expected call of ChainReadObj +func (mr *MockFullNodeMockRecorder) ChainReadObj(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainReadObj", reflect.TypeOf((*MockFullNode)(nil).ChainReadObj), arg0, arg1) +} + +// ChainSetHead mocks base method +func (m *MockFullNode) ChainSetHead(arg0 context.Context, arg1 types.TipSetKey) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainSetHead", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ChainSetHead indicates an expected call of ChainSetHead +func (mr *MockFullNodeMockRecorder) ChainSetHead(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainSetHead", reflect.TypeOf((*MockFullNode)(nil).ChainSetHead), arg0, arg1) +} + +// ChainStatObj mocks base method +func (m *MockFullNode) ChainStatObj(arg0 context.Context, arg1, arg2 cid.Cid) (api.ObjStat, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainStatObj", arg0, arg1, arg2) + ret0, _ := ret[0].(api.ObjStat) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainStatObj indicates an expected call of ChainStatObj +func (mr *MockFullNodeMockRecorder) ChainStatObj(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainStatObj", reflect.TypeOf((*MockFullNode)(nil).ChainStatObj), arg0, arg1, arg2) +} + +// ChainTipSetWeight mocks base method +func (m *MockFullNode) ChainTipSetWeight(arg0 context.Context, arg1 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainTipSetWeight", arg0, arg1) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainTipSetWeight indicates an expected call of ChainTipSetWeight +func (mr *MockFullNodeMockRecorder) ChainTipSetWeight(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainTipSetWeight", reflect.TypeOf((*MockFullNode)(nil).ChainTipSetWeight), arg0, arg1) +} + +// ClientCalcCommP mocks base method +func (m *MockFullNode) ClientCalcCommP(arg0 context.Context, arg1 string) (*api.CommPRet, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientCalcCommP", arg0, arg1) + ret0, _ := ret[0].(*api.CommPRet) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientCalcCommP indicates an expected call of ClientCalcCommP +func (mr *MockFullNodeMockRecorder) ClientCalcCommP(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCalcCommP", reflect.TypeOf((*MockFullNode)(nil).ClientCalcCommP), arg0, arg1) +} + +// ClientCancelDataTransfer mocks base method +func (m *MockFullNode) ClientCancelDataTransfer(arg0 context.Context, arg1 datatransfer.TransferID, arg2 peer.ID, arg3 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientCancelDataTransfer", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// ClientCancelDataTransfer indicates an expected call of ClientCancelDataTransfer +func (mr *MockFullNodeMockRecorder) ClientCancelDataTransfer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCancelDataTransfer", reflect.TypeOf((*MockFullNode)(nil).ClientCancelDataTransfer), arg0, arg1, arg2, arg3) +} + +// ClientDataTransferUpdates mocks base method +func (m *MockFullNode) ClientDataTransferUpdates(arg0 context.Context) (<-chan api.DataTransferChannel, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientDataTransferUpdates", arg0) + ret0, _ := ret[0].(<-chan api.DataTransferChannel) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientDataTransferUpdates indicates an expected call of ClientDataTransferUpdates +func (mr *MockFullNodeMockRecorder) ClientDataTransferUpdates(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDataTransferUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientDataTransferUpdates), arg0) +} + +// ClientDealPieceCID mocks base method +func (m *MockFullNode) ClientDealPieceCID(arg0 context.Context, arg1 cid.Cid) (api.DataCIDSize, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientDealPieceCID", arg0, arg1) + ret0, _ := ret[0].(api.DataCIDSize) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientDealPieceCID indicates an expected call of ClientDealPieceCID +func (mr *MockFullNodeMockRecorder) ClientDealPieceCID(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDealPieceCID", reflect.TypeOf((*MockFullNode)(nil).ClientDealPieceCID), arg0, arg1) +} + +// ClientDealSize mocks base method +func (m *MockFullNode) ClientDealSize(arg0 context.Context, arg1 cid.Cid) (api.DataSize, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientDealSize", arg0, arg1) + ret0, _ := ret[0].(api.DataSize) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientDealSize indicates an expected call of ClientDealSize +func (mr *MockFullNodeMockRecorder) ClientDealSize(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDealSize", reflect.TypeOf((*MockFullNode)(nil).ClientDealSize), arg0, arg1) +} + +// ClientFindData mocks base method +func (m *MockFullNode) ClientFindData(arg0 context.Context, arg1 cid.Cid, arg2 *cid.Cid) ([]api.QueryOffer, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientFindData", arg0, arg1, arg2) + ret0, _ := ret[0].([]api.QueryOffer) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientFindData indicates an expected call of ClientFindData +func (mr *MockFullNodeMockRecorder) ClientFindData(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientFindData", reflect.TypeOf((*MockFullNode)(nil).ClientFindData), arg0, arg1, arg2) +} + +// ClientGenCar mocks base method +func (m *MockFullNode) ClientGenCar(arg0 context.Context, arg1 api.FileRef, arg2 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientGenCar", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// ClientGenCar indicates an expected call of ClientGenCar +func (mr *MockFullNodeMockRecorder) ClientGenCar(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGenCar", reflect.TypeOf((*MockFullNode)(nil).ClientGenCar), arg0, arg1, arg2) +} + +// ClientGetDealInfo mocks base method +func (m *MockFullNode) ClientGetDealInfo(arg0 context.Context, arg1 cid.Cid) (*api.DealInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientGetDealInfo", arg0, arg1) + ret0, _ := ret[0].(*api.DealInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientGetDealInfo indicates an expected call of ClientGetDealInfo +func (mr *MockFullNodeMockRecorder) ClientGetDealInfo(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealInfo", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealInfo), arg0, arg1) +} + +// ClientGetDealStatus mocks base method +func (m *MockFullNode) ClientGetDealStatus(arg0 context.Context, arg1 uint64) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientGetDealStatus", arg0, arg1) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientGetDealStatus indicates an expected call of ClientGetDealStatus +func (mr *MockFullNodeMockRecorder) ClientGetDealStatus(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealStatus", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealStatus), arg0, arg1) +} + +// ClientGetDealUpdates mocks base method +func (m *MockFullNode) ClientGetDealUpdates(arg0 context.Context) (<-chan api.DealInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientGetDealUpdates", arg0) + ret0, _ := ret[0].(<-chan api.DealInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientGetDealUpdates indicates an expected call of ClientGetDealUpdates +func (mr *MockFullNodeMockRecorder) ClientGetDealUpdates(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealUpdates), arg0) +} + +// ClientHasLocal mocks base method +func (m *MockFullNode) ClientHasLocal(arg0 context.Context, arg1 cid.Cid) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientHasLocal", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientHasLocal indicates an expected call of ClientHasLocal +func (mr *MockFullNodeMockRecorder) ClientHasLocal(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientHasLocal", reflect.TypeOf((*MockFullNode)(nil).ClientHasLocal), arg0, arg1) +} + +// ClientImport mocks base method +func (m *MockFullNode) ClientImport(arg0 context.Context, arg1 api.FileRef) (*api.ImportRes, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientImport", arg0, arg1) + ret0, _ := ret[0].(*api.ImportRes) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientImport indicates an expected call of ClientImport +func (mr *MockFullNodeMockRecorder) ClientImport(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientImport", reflect.TypeOf((*MockFullNode)(nil).ClientImport), arg0, arg1) +} + +// ClientListDataTransfers mocks base method +func (m *MockFullNode) ClientListDataTransfers(arg0 context.Context) ([]api.DataTransferChannel, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientListDataTransfers", arg0) + ret0, _ := ret[0].([]api.DataTransferChannel) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientListDataTransfers indicates an expected call of ClientListDataTransfers +func (mr *MockFullNodeMockRecorder) ClientListDataTransfers(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListDataTransfers", reflect.TypeOf((*MockFullNode)(nil).ClientListDataTransfers), arg0) +} + +// ClientListDeals mocks base method +func (m *MockFullNode) ClientListDeals(arg0 context.Context) ([]api.DealInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientListDeals", arg0) + ret0, _ := ret[0].([]api.DealInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientListDeals indicates an expected call of ClientListDeals +func (mr *MockFullNodeMockRecorder) ClientListDeals(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListDeals", reflect.TypeOf((*MockFullNode)(nil).ClientListDeals), arg0) +} + +// ClientListImports mocks base method +func (m *MockFullNode) ClientListImports(arg0 context.Context) ([]api.Import, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientListImports", arg0) + ret0, _ := ret[0].([]api.Import) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientListImports indicates an expected call of ClientListImports +func (mr *MockFullNodeMockRecorder) ClientListImports(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListImports", reflect.TypeOf((*MockFullNode)(nil).ClientListImports), arg0) +} + +// ClientMinerQueryOffer mocks base method +func (m *MockFullNode) ClientMinerQueryOffer(arg0 context.Context, arg1 address.Address, arg2 cid.Cid, arg3 *cid.Cid) (api.QueryOffer, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientMinerQueryOffer", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(api.QueryOffer) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientMinerQueryOffer indicates an expected call of ClientMinerQueryOffer +func (mr *MockFullNodeMockRecorder) ClientMinerQueryOffer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientMinerQueryOffer", reflect.TypeOf((*MockFullNode)(nil).ClientMinerQueryOffer), arg0, arg1, arg2, arg3) +} + +// ClientQueryAsk mocks base method +func (m *MockFullNode) ClientQueryAsk(arg0 context.Context, arg1 peer.ID, arg2 address.Address) (*storagemarket.StorageAsk, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientQueryAsk", arg0, arg1, arg2) + ret0, _ := ret[0].(*storagemarket.StorageAsk) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientQueryAsk indicates an expected call of ClientQueryAsk +func (mr *MockFullNodeMockRecorder) ClientQueryAsk(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientQueryAsk", reflect.TypeOf((*MockFullNode)(nil).ClientQueryAsk), arg0, arg1, arg2) +} + +// ClientRemoveImport mocks base method +func (m *MockFullNode) ClientRemoveImport(arg0 context.Context, arg1 multistore.StoreID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientRemoveImport", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ClientRemoveImport indicates an expected call of ClientRemoveImport +func (mr *MockFullNodeMockRecorder) ClientRemoveImport(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRemoveImport", reflect.TypeOf((*MockFullNode)(nil).ClientRemoveImport), arg0, arg1) +} + +// ClientRestartDataTransfer mocks base method +func (m *MockFullNode) ClientRestartDataTransfer(arg0 context.Context, arg1 datatransfer.TransferID, arg2 peer.ID, arg3 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientRestartDataTransfer", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// ClientRestartDataTransfer indicates an expected call of ClientRestartDataTransfer +func (mr *MockFullNodeMockRecorder) ClientRestartDataTransfer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRestartDataTransfer", reflect.TypeOf((*MockFullNode)(nil).ClientRestartDataTransfer), arg0, arg1, arg2, arg3) +} + +// ClientRetrieve mocks base method +func (m *MockFullNode) ClientRetrieve(arg0 context.Context, arg1 api.RetrievalOrder, arg2 *api.FileRef) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientRetrieve", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// ClientRetrieve indicates an expected call of ClientRetrieve +func (mr *MockFullNodeMockRecorder) ClientRetrieve(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieve", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieve), arg0, arg1, arg2) +} + +// ClientRetrieveTryRestartInsufficientFunds mocks base method +func (m *MockFullNode) ClientRetrieveTryRestartInsufficientFunds(arg0 context.Context, arg1 address.Address) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientRetrieveTryRestartInsufficientFunds", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ClientRetrieveTryRestartInsufficientFunds indicates an expected call of ClientRetrieveTryRestartInsufficientFunds +func (mr *MockFullNodeMockRecorder) ClientRetrieveTryRestartInsufficientFunds(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveTryRestartInsufficientFunds", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveTryRestartInsufficientFunds), arg0, arg1) +} + +// ClientRetrieveWithEvents mocks base method +func (m *MockFullNode) ClientRetrieveWithEvents(arg0 context.Context, arg1 api.RetrievalOrder, arg2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientRetrieveWithEvents", arg0, arg1, arg2) + ret0, _ := ret[0].(<-chan marketevents.RetrievalEvent) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientRetrieveWithEvents indicates an expected call of ClientRetrieveWithEvents +func (mr *MockFullNodeMockRecorder) ClientRetrieveWithEvents(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveWithEvents", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveWithEvents), arg0, arg1, arg2) +} + +// ClientStartDeal mocks base method +func (m *MockFullNode) ClientStartDeal(arg0 context.Context, arg1 *api.StartDealParams) (*cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientStartDeal", arg0, arg1) + ret0, _ := ret[0].(*cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientStartDeal indicates an expected call of ClientStartDeal +func (mr *MockFullNodeMockRecorder) ClientStartDeal(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientStartDeal", reflect.TypeOf((*MockFullNode)(nil).ClientStartDeal), arg0, arg1) +} + +// Closing mocks base method +func (m *MockFullNode) Closing(arg0 context.Context) (<-chan struct{}, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Closing", arg0) + ret0, _ := ret[0].(<-chan struct{}) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Closing indicates an expected call of Closing +func (mr *MockFullNodeMockRecorder) Closing(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Closing", reflect.TypeOf((*MockFullNode)(nil).Closing), arg0) +} + +// CreateBackup mocks base method +func (m *MockFullNode) CreateBackup(arg0 context.Context, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateBackup", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateBackup indicates an expected call of CreateBackup +func (mr *MockFullNodeMockRecorder) CreateBackup(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateBackup", reflect.TypeOf((*MockFullNode)(nil).CreateBackup), arg0, arg1) +} + +// GasEstimateFeeCap mocks base method +func (m *MockFullNode) GasEstimateFeeCap(arg0 context.Context, arg1 *types.Message, arg2 int64, arg3 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GasEstimateFeeCap", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GasEstimateFeeCap indicates an expected call of GasEstimateFeeCap +func (mr *MockFullNodeMockRecorder) GasEstimateFeeCap(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateFeeCap", reflect.TypeOf((*MockFullNode)(nil).GasEstimateFeeCap), arg0, arg1, arg2, arg3) +} + +// GasEstimateGasLimit mocks base method +func (m *MockFullNode) GasEstimateGasLimit(arg0 context.Context, arg1 *types.Message, arg2 types.TipSetKey) (int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GasEstimateGasLimit", arg0, arg1, arg2) + ret0, _ := ret[0].(int64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GasEstimateGasLimit indicates an expected call of GasEstimateGasLimit +func (mr *MockFullNodeMockRecorder) GasEstimateGasLimit(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateGasLimit", reflect.TypeOf((*MockFullNode)(nil).GasEstimateGasLimit), arg0, arg1, arg2) +} + +// GasEstimateGasPremium mocks base method +func (m *MockFullNode) GasEstimateGasPremium(arg0 context.Context, arg1 uint64, arg2 address.Address, arg3 int64, arg4 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GasEstimateGasPremium", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GasEstimateGasPremium indicates an expected call of GasEstimateGasPremium +func (mr *MockFullNodeMockRecorder) GasEstimateGasPremium(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateGasPremium", reflect.TypeOf((*MockFullNode)(nil).GasEstimateGasPremium), arg0, arg1, arg2, arg3, arg4) +} + +// GasEstimateMessageGas mocks base method +func (m *MockFullNode) GasEstimateMessageGas(arg0 context.Context, arg1 *types.Message, arg2 *api.MessageSendSpec, arg3 types.TipSetKey) (*types.Message, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GasEstimateMessageGas", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*types.Message) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GasEstimateMessageGas indicates an expected call of GasEstimateMessageGas +func (mr *MockFullNodeMockRecorder) GasEstimateMessageGas(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateMessageGas", reflect.TypeOf((*MockFullNode)(nil).GasEstimateMessageGas), arg0, arg1, arg2, arg3) +} + +// ID mocks base method +func (m *MockFullNode) ID(arg0 context.Context) (peer.ID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ID", arg0) + ret0, _ := ret[0].(peer.ID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ID indicates an expected call of ID +func (mr *MockFullNodeMockRecorder) ID(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ID", reflect.TypeOf((*MockFullNode)(nil).ID), arg0) +} + +// LogList mocks base method +func (m *MockFullNode) LogList(arg0 context.Context) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LogList", arg0) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// LogList indicates an expected call of LogList +func (mr *MockFullNodeMockRecorder) LogList(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LogList", reflect.TypeOf((*MockFullNode)(nil).LogList), arg0) +} + +// LogSetLevel mocks base method +func (m *MockFullNode) LogSetLevel(arg0 context.Context, arg1, arg2 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LogSetLevel", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// LogSetLevel indicates an expected call of LogSetLevel +func (mr *MockFullNodeMockRecorder) LogSetLevel(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LogSetLevel", reflect.TypeOf((*MockFullNode)(nil).LogSetLevel), arg0, arg1, arg2) +} + +// MarketAddBalance mocks base method +func (m *MockFullNode) MarketAddBalance(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketAddBalance", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarketAddBalance indicates an expected call of MarketAddBalance +func (mr *MockFullNodeMockRecorder) MarketAddBalance(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketAddBalance", reflect.TypeOf((*MockFullNode)(nil).MarketAddBalance), arg0, arg1, arg2, arg3) +} + +// MarketGetReserved mocks base method +func (m *MockFullNode) MarketGetReserved(arg0 context.Context, arg1 address.Address) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketGetReserved", arg0, arg1) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarketGetReserved indicates an expected call of MarketGetReserved +func (mr *MockFullNodeMockRecorder) MarketGetReserved(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketGetReserved", reflect.TypeOf((*MockFullNode)(nil).MarketGetReserved), arg0, arg1) +} + +// MarketReleaseFunds mocks base method +func (m *MockFullNode) MarketReleaseFunds(arg0 context.Context, arg1 address.Address, arg2 big.Int) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketReleaseFunds", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// MarketReleaseFunds indicates an expected call of MarketReleaseFunds +func (mr *MockFullNodeMockRecorder) MarketReleaseFunds(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketReleaseFunds", reflect.TypeOf((*MockFullNode)(nil).MarketReleaseFunds), arg0, arg1, arg2) +} + +// MarketReserveFunds mocks base method +func (m *MockFullNode) MarketReserveFunds(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketReserveFunds", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarketReserveFunds indicates an expected call of MarketReserveFunds +func (mr *MockFullNodeMockRecorder) MarketReserveFunds(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketReserveFunds", reflect.TypeOf((*MockFullNode)(nil).MarketReserveFunds), arg0, arg1, arg2, arg3) +} + +// MarketWithdraw mocks base method +func (m *MockFullNode) MarketWithdraw(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketWithdraw", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarketWithdraw indicates an expected call of MarketWithdraw +func (mr *MockFullNodeMockRecorder) MarketWithdraw(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketWithdraw", reflect.TypeOf((*MockFullNode)(nil).MarketWithdraw), arg0, arg1, arg2, arg3) +} + +// MinerCreateBlock mocks base method +func (m *MockFullNode) MinerCreateBlock(arg0 context.Context, arg1 *api.BlockTemplate) (*types.BlockMsg, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MinerCreateBlock", arg0, arg1) + ret0, _ := ret[0].(*types.BlockMsg) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MinerCreateBlock indicates an expected call of MinerCreateBlock +func (mr *MockFullNodeMockRecorder) MinerCreateBlock(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MinerCreateBlock", reflect.TypeOf((*MockFullNode)(nil).MinerCreateBlock), arg0, arg1) +} + +// MinerGetBaseInfo mocks base method +func (m *MockFullNode) MinerGetBaseInfo(arg0 context.Context, arg1 address.Address, arg2 abi.ChainEpoch, arg3 types.TipSetKey) (*api.MiningBaseInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MinerGetBaseInfo", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*api.MiningBaseInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MinerGetBaseInfo indicates an expected call of MinerGetBaseInfo +func (mr *MockFullNodeMockRecorder) MinerGetBaseInfo(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MinerGetBaseInfo", reflect.TypeOf((*MockFullNode)(nil).MinerGetBaseInfo), arg0, arg1, arg2, arg3) +} + +// MpoolBatchPush mocks base method +func (m *MockFullNode) MpoolBatchPush(arg0 context.Context, arg1 []*types.SignedMessage) ([]cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolBatchPush", arg0, arg1) + ret0, _ := ret[0].([]cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolBatchPush indicates an expected call of MpoolBatchPush +func (mr *MockFullNodeMockRecorder) MpoolBatchPush(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolBatchPush", reflect.TypeOf((*MockFullNode)(nil).MpoolBatchPush), arg0, arg1) +} + +// MpoolBatchPushMessage mocks base method +func (m *MockFullNode) MpoolBatchPushMessage(arg0 context.Context, arg1 []*types.Message, arg2 *api.MessageSendSpec) ([]*types.SignedMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolBatchPushMessage", arg0, arg1, arg2) + ret0, _ := ret[0].([]*types.SignedMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolBatchPushMessage indicates an expected call of MpoolBatchPushMessage +func (mr *MockFullNodeMockRecorder) MpoolBatchPushMessage(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolBatchPushMessage", reflect.TypeOf((*MockFullNode)(nil).MpoolBatchPushMessage), arg0, arg1, arg2) +} + +// MpoolBatchPushUntrusted mocks base method +func (m *MockFullNode) MpoolBatchPushUntrusted(arg0 context.Context, arg1 []*types.SignedMessage) ([]cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolBatchPushUntrusted", arg0, arg1) + ret0, _ := ret[0].([]cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolBatchPushUntrusted indicates an expected call of MpoolBatchPushUntrusted +func (mr *MockFullNodeMockRecorder) MpoolBatchPushUntrusted(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolBatchPushUntrusted", reflect.TypeOf((*MockFullNode)(nil).MpoolBatchPushUntrusted), arg0, arg1) +} + +// MpoolClear mocks base method +func (m *MockFullNode) MpoolClear(arg0 context.Context, arg1 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolClear", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// MpoolClear indicates an expected call of MpoolClear +func (mr *MockFullNodeMockRecorder) MpoolClear(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolClear", reflect.TypeOf((*MockFullNode)(nil).MpoolClear), arg0, arg1) +} + +// MpoolGetConfig mocks base method +func (m *MockFullNode) MpoolGetConfig(arg0 context.Context) (*types.MpoolConfig, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolGetConfig", arg0) + ret0, _ := ret[0].(*types.MpoolConfig) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolGetConfig indicates an expected call of MpoolGetConfig +func (mr *MockFullNodeMockRecorder) MpoolGetConfig(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolGetConfig", reflect.TypeOf((*MockFullNode)(nil).MpoolGetConfig), arg0) +} + +// MpoolGetNonce mocks base method +func (m *MockFullNode) MpoolGetNonce(arg0 context.Context, arg1 address.Address) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolGetNonce", arg0, arg1) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolGetNonce indicates an expected call of MpoolGetNonce +func (mr *MockFullNodeMockRecorder) MpoolGetNonce(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolGetNonce", reflect.TypeOf((*MockFullNode)(nil).MpoolGetNonce), arg0, arg1) +} + +// MpoolPending mocks base method +func (m *MockFullNode) MpoolPending(arg0 context.Context, arg1 types.TipSetKey) ([]*types.SignedMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolPending", arg0, arg1) + ret0, _ := ret[0].([]*types.SignedMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolPending indicates an expected call of MpoolPending +func (mr *MockFullNodeMockRecorder) MpoolPending(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPending", reflect.TypeOf((*MockFullNode)(nil).MpoolPending), arg0, arg1) +} + +// MpoolPush mocks base method +func (m *MockFullNode) MpoolPush(arg0 context.Context, arg1 *types.SignedMessage) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolPush", arg0, arg1) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolPush indicates an expected call of MpoolPush +func (mr *MockFullNodeMockRecorder) MpoolPush(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPush", reflect.TypeOf((*MockFullNode)(nil).MpoolPush), arg0, arg1) +} + +// MpoolPushMessage mocks base method +func (m *MockFullNode) MpoolPushMessage(arg0 context.Context, arg1 *types.Message, arg2 *api.MessageSendSpec) (*types.SignedMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolPushMessage", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.SignedMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolPushMessage indicates an expected call of MpoolPushMessage +func (mr *MockFullNodeMockRecorder) MpoolPushMessage(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPushMessage", reflect.TypeOf((*MockFullNode)(nil).MpoolPushMessage), arg0, arg1, arg2) +} + +// MpoolPushUntrusted mocks base method +func (m *MockFullNode) MpoolPushUntrusted(arg0 context.Context, arg1 *types.SignedMessage) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolPushUntrusted", arg0, arg1) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolPushUntrusted indicates an expected call of MpoolPushUntrusted +func (mr *MockFullNodeMockRecorder) MpoolPushUntrusted(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPushUntrusted", reflect.TypeOf((*MockFullNode)(nil).MpoolPushUntrusted), arg0, arg1) +} + +// MpoolSelect mocks base method +func (m *MockFullNode) MpoolSelect(arg0 context.Context, arg1 types.TipSetKey, arg2 float64) ([]*types.SignedMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolSelect", arg0, arg1, arg2) + ret0, _ := ret[0].([]*types.SignedMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolSelect indicates an expected call of MpoolSelect +func (mr *MockFullNodeMockRecorder) MpoolSelect(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolSelect", reflect.TypeOf((*MockFullNode)(nil).MpoolSelect), arg0, arg1, arg2) +} + +// MpoolSetConfig mocks base method +func (m *MockFullNode) MpoolSetConfig(arg0 context.Context, arg1 *types.MpoolConfig) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolSetConfig", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// MpoolSetConfig indicates an expected call of MpoolSetConfig +func (mr *MockFullNodeMockRecorder) MpoolSetConfig(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolSetConfig", reflect.TypeOf((*MockFullNode)(nil).MpoolSetConfig), arg0, arg1) +} + +// MpoolSub mocks base method +func (m *MockFullNode) MpoolSub(arg0 context.Context) (<-chan api.MpoolUpdate, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolSub", arg0) + ret0, _ := ret[0].(<-chan api.MpoolUpdate) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolSub indicates an expected call of MpoolSub +func (mr *MockFullNodeMockRecorder) MpoolSub(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolSub", reflect.TypeOf((*MockFullNode)(nil).MpoolSub), arg0) +} + +// MsigAddApprove mocks base method +func (m *MockFullNode) MsigAddApprove(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4, arg5 address.Address, arg6 bool) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigAddApprove", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigAddApprove indicates an expected call of MsigAddApprove +func (mr *MockFullNodeMockRecorder) MsigAddApprove(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigAddApprove", reflect.TypeOf((*MockFullNode)(nil).MsigAddApprove), arg0, arg1, arg2, arg3, arg4, arg5, arg6) +} + +// MsigAddCancel mocks base method +func (m *MockFullNode) MsigAddCancel(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4 address.Address, arg5 bool) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigAddCancel", arg0, arg1, arg2, arg3, arg4, arg5) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigAddCancel indicates an expected call of MsigAddCancel +func (mr *MockFullNodeMockRecorder) MsigAddCancel(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigAddCancel", reflect.TypeOf((*MockFullNode)(nil).MsigAddCancel), arg0, arg1, arg2, arg3, arg4, arg5) +} + +// MsigAddPropose mocks base method +func (m *MockFullNode) MsigAddPropose(arg0 context.Context, arg1, arg2, arg3 address.Address, arg4 bool) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigAddPropose", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigAddPropose indicates an expected call of MsigAddPropose +func (mr *MockFullNodeMockRecorder) MsigAddPropose(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigAddPropose", reflect.TypeOf((*MockFullNode)(nil).MsigAddPropose), arg0, arg1, arg2, arg3, arg4) +} + +// MsigApprove mocks base method +func (m *MockFullNode) MsigApprove(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigApprove", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigApprove indicates an expected call of MsigApprove +func (mr *MockFullNodeMockRecorder) MsigApprove(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigApprove", reflect.TypeOf((*MockFullNode)(nil).MsigApprove), arg0, arg1, arg2, arg3) +} + +// MsigApproveTxnHash mocks base method +func (m *MockFullNode) MsigApproveTxnHash(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3, arg4 address.Address, arg5 big.Int, arg6 address.Address, arg7 uint64, arg8 []byte) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigApproveTxnHash", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigApproveTxnHash indicates an expected call of MsigApproveTxnHash +func (mr *MockFullNodeMockRecorder) MsigApproveTxnHash(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigApproveTxnHash", reflect.TypeOf((*MockFullNode)(nil).MsigApproveTxnHash), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) +} + +// MsigCancel mocks base method +func (m *MockFullNode) MsigCancel(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address, arg4 big.Int, arg5 address.Address, arg6 uint64, arg7 []byte) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigCancel", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigCancel indicates an expected call of MsigCancel +func (mr *MockFullNodeMockRecorder) MsigCancel(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigCancel", reflect.TypeOf((*MockFullNode)(nil).MsigCancel), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) +} + +// MsigCreate mocks base method +func (m *MockFullNode) MsigCreate(arg0 context.Context, arg1 uint64, arg2 []address.Address, arg3 abi.ChainEpoch, arg4 big.Int, arg5 address.Address, arg6 big.Int) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigCreate", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigCreate indicates an expected call of MsigCreate +func (mr *MockFullNodeMockRecorder) MsigCreate(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigCreate", reflect.TypeOf((*MockFullNode)(nil).MsigCreate), arg0, arg1, arg2, arg3, arg4, arg5, arg6) +} + +// MsigGetAvailableBalance mocks base method +func (m *MockFullNode) MsigGetAvailableBalance(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigGetAvailableBalance", arg0, arg1, arg2) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigGetAvailableBalance indicates an expected call of MsigGetAvailableBalance +func (mr *MockFullNodeMockRecorder) MsigGetAvailableBalance(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigGetAvailableBalance", reflect.TypeOf((*MockFullNode)(nil).MsigGetAvailableBalance), arg0, arg1, arg2) +} + +// MsigGetPending mocks base method +func (m *MockFullNode) MsigGetPending(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) ([]*api.MsigTransaction, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigGetPending", arg0, arg1, arg2) + ret0, _ := ret[0].([]*api.MsigTransaction) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigGetPending indicates an expected call of MsigGetPending +func (mr *MockFullNodeMockRecorder) MsigGetPending(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigGetPending", reflect.TypeOf((*MockFullNode)(nil).MsigGetPending), arg0, arg1, arg2) +} + +// MsigGetVested mocks base method +func (m *MockFullNode) MsigGetVested(arg0 context.Context, arg1 address.Address, arg2, arg3 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigGetVested", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigGetVested indicates an expected call of MsigGetVested +func (mr *MockFullNodeMockRecorder) MsigGetVested(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigGetVested", reflect.TypeOf((*MockFullNode)(nil).MsigGetVested), arg0, arg1, arg2, arg3) +} + +// MsigGetVestingSchedule mocks base method +func (m *MockFullNode) MsigGetVestingSchedule(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (api.MsigVesting, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigGetVestingSchedule", arg0, arg1, arg2) + ret0, _ := ret[0].(api.MsigVesting) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigGetVestingSchedule indicates an expected call of MsigGetVestingSchedule +func (mr *MockFullNodeMockRecorder) MsigGetVestingSchedule(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigGetVestingSchedule", reflect.TypeOf((*MockFullNode)(nil).MsigGetVestingSchedule), arg0, arg1, arg2) +} + +// MsigPropose mocks base method +func (m *MockFullNode) MsigPropose(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int, arg4 address.Address, arg5 uint64, arg6 []byte) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigPropose", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigPropose indicates an expected call of MsigPropose +func (mr *MockFullNodeMockRecorder) MsigPropose(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigPropose", reflect.TypeOf((*MockFullNode)(nil).MsigPropose), arg0, arg1, arg2, arg3, arg4, arg5, arg6) +} + +// MsigRemoveSigner mocks base method +func (m *MockFullNode) MsigRemoveSigner(arg0 context.Context, arg1, arg2, arg3 address.Address, arg4 bool) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigRemoveSigner", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigRemoveSigner indicates an expected call of MsigRemoveSigner +func (mr *MockFullNodeMockRecorder) MsigRemoveSigner(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigRemoveSigner", reflect.TypeOf((*MockFullNode)(nil).MsigRemoveSigner), arg0, arg1, arg2, arg3, arg4) +} + +// MsigSwapApprove mocks base method +func (m *MockFullNode) MsigSwapApprove(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4, arg5, arg6 address.Address) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigSwapApprove", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigSwapApprove indicates an expected call of MsigSwapApprove +func (mr *MockFullNodeMockRecorder) MsigSwapApprove(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigSwapApprove", reflect.TypeOf((*MockFullNode)(nil).MsigSwapApprove), arg0, arg1, arg2, arg3, arg4, arg5, arg6) +} + +// MsigSwapCancel mocks base method +func (m *MockFullNode) MsigSwapCancel(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4, arg5 address.Address) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigSwapCancel", arg0, arg1, arg2, arg3, arg4, arg5) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigSwapCancel indicates an expected call of MsigSwapCancel +func (mr *MockFullNodeMockRecorder) MsigSwapCancel(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigSwapCancel", reflect.TypeOf((*MockFullNode)(nil).MsigSwapCancel), arg0, arg1, arg2, arg3, arg4, arg5) +} + +// MsigSwapPropose mocks base method +func (m *MockFullNode) MsigSwapPropose(arg0 context.Context, arg1, arg2, arg3, arg4 address.Address) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigSwapPropose", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigSwapPropose indicates an expected call of MsigSwapPropose +func (mr *MockFullNodeMockRecorder) MsigSwapPropose(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigSwapPropose", reflect.TypeOf((*MockFullNode)(nil).MsigSwapPropose), arg0, arg1, arg2, arg3, arg4) +} + +// NetAddrsListen mocks base method +func (m *MockFullNode) NetAddrsListen(arg0 context.Context) (peer.AddrInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetAddrsListen", arg0) + ret0, _ := ret[0].(peer.AddrInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetAddrsListen indicates an expected call of NetAddrsListen +func (mr *MockFullNodeMockRecorder) NetAddrsListen(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetAddrsListen", reflect.TypeOf((*MockFullNode)(nil).NetAddrsListen), arg0) +} + +// NetAgentVersion mocks base method +func (m *MockFullNode) NetAgentVersion(arg0 context.Context, arg1 peer.ID) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetAgentVersion", arg0, arg1) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetAgentVersion indicates an expected call of NetAgentVersion +func (mr *MockFullNodeMockRecorder) NetAgentVersion(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetAgentVersion", reflect.TypeOf((*MockFullNode)(nil).NetAgentVersion), arg0, arg1) +} + +// NetAutoNatStatus mocks base method +func (m *MockFullNode) NetAutoNatStatus(arg0 context.Context) (api.NatInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetAutoNatStatus", arg0) + ret0, _ := ret[0].(api.NatInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetAutoNatStatus indicates an expected call of NetAutoNatStatus +func (mr *MockFullNodeMockRecorder) NetAutoNatStatus(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetAutoNatStatus", reflect.TypeOf((*MockFullNode)(nil).NetAutoNatStatus), arg0) +} + +// NetBandwidthStats mocks base method +func (m *MockFullNode) NetBandwidthStats(arg0 context.Context) (metrics.Stats, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetBandwidthStats", arg0) + ret0, _ := ret[0].(metrics.Stats) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetBandwidthStats indicates an expected call of NetBandwidthStats +func (mr *MockFullNodeMockRecorder) NetBandwidthStats(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBandwidthStats", reflect.TypeOf((*MockFullNode)(nil).NetBandwidthStats), arg0) +} + +// NetBandwidthStatsByPeer mocks base method +func (m *MockFullNode) NetBandwidthStatsByPeer(arg0 context.Context) (map[string]metrics.Stats, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetBandwidthStatsByPeer", arg0) + ret0, _ := ret[0].(map[string]metrics.Stats) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetBandwidthStatsByPeer indicates an expected call of NetBandwidthStatsByPeer +func (mr *MockFullNodeMockRecorder) NetBandwidthStatsByPeer(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBandwidthStatsByPeer", reflect.TypeOf((*MockFullNode)(nil).NetBandwidthStatsByPeer), arg0) +} + +// NetBandwidthStatsByProtocol mocks base method +func (m *MockFullNode) NetBandwidthStatsByProtocol(arg0 context.Context) (map[protocol.ID]metrics.Stats, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetBandwidthStatsByProtocol", arg0) + ret0, _ := ret[0].(map[protocol.ID]metrics.Stats) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetBandwidthStatsByProtocol indicates an expected call of NetBandwidthStatsByProtocol +func (mr *MockFullNodeMockRecorder) NetBandwidthStatsByProtocol(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBandwidthStatsByProtocol", reflect.TypeOf((*MockFullNode)(nil).NetBandwidthStatsByProtocol), arg0) +} + +// NetBlockAdd mocks base method +func (m *MockFullNode) NetBlockAdd(arg0 context.Context, arg1 api.NetBlockList) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetBlockAdd", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// NetBlockAdd indicates an expected call of NetBlockAdd +func (mr *MockFullNodeMockRecorder) NetBlockAdd(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBlockAdd", reflect.TypeOf((*MockFullNode)(nil).NetBlockAdd), arg0, arg1) +} + +// NetBlockList mocks base method +func (m *MockFullNode) NetBlockList(arg0 context.Context) (api.NetBlockList, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetBlockList", arg0) + ret0, _ := ret[0].(api.NetBlockList) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetBlockList indicates an expected call of NetBlockList +func (mr *MockFullNodeMockRecorder) NetBlockList(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBlockList", reflect.TypeOf((*MockFullNode)(nil).NetBlockList), arg0) +} + +// NetBlockRemove mocks base method +func (m *MockFullNode) NetBlockRemove(arg0 context.Context, arg1 api.NetBlockList) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetBlockRemove", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// NetBlockRemove indicates an expected call of NetBlockRemove +func (mr *MockFullNodeMockRecorder) NetBlockRemove(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBlockRemove", reflect.TypeOf((*MockFullNode)(nil).NetBlockRemove), arg0, arg1) +} + +// NetConnect mocks base method +func (m *MockFullNode) NetConnect(arg0 context.Context, arg1 peer.AddrInfo) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetConnect", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// NetConnect indicates an expected call of NetConnect +func (mr *MockFullNodeMockRecorder) NetConnect(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetConnect", reflect.TypeOf((*MockFullNode)(nil).NetConnect), arg0, arg1) +} + +// NetConnectedness mocks base method +func (m *MockFullNode) NetConnectedness(arg0 context.Context, arg1 peer.ID) (network0.Connectedness, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetConnectedness", arg0, arg1) + ret0, _ := ret[0].(network0.Connectedness) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetConnectedness indicates an expected call of NetConnectedness +func (mr *MockFullNodeMockRecorder) NetConnectedness(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetConnectedness", reflect.TypeOf((*MockFullNode)(nil).NetConnectedness), arg0, arg1) +} + +// NetDisconnect mocks base method +func (m *MockFullNode) NetDisconnect(arg0 context.Context, arg1 peer.ID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetDisconnect", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// NetDisconnect indicates an expected call of NetDisconnect +func (mr *MockFullNodeMockRecorder) NetDisconnect(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetDisconnect", reflect.TypeOf((*MockFullNode)(nil).NetDisconnect), arg0, arg1) +} + +// NetFindPeer mocks base method +func (m *MockFullNode) NetFindPeer(arg0 context.Context, arg1 peer.ID) (peer.AddrInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetFindPeer", arg0, arg1) + ret0, _ := ret[0].(peer.AddrInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetFindPeer indicates an expected call of NetFindPeer +func (mr *MockFullNodeMockRecorder) NetFindPeer(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetFindPeer", reflect.TypeOf((*MockFullNode)(nil).NetFindPeer), arg0, arg1) +} + +// NetPeerInfo mocks base method +func (m *MockFullNode) NetPeerInfo(arg0 context.Context, arg1 peer.ID) (*api.ExtendedPeerInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetPeerInfo", arg0, arg1) + ret0, _ := ret[0].(*api.ExtendedPeerInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetPeerInfo indicates an expected call of NetPeerInfo +func (mr *MockFullNodeMockRecorder) NetPeerInfo(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPeerInfo", reflect.TypeOf((*MockFullNode)(nil).NetPeerInfo), arg0, arg1) +} + +// NetPeers mocks base method +func (m *MockFullNode) NetPeers(arg0 context.Context) ([]peer.AddrInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetPeers", arg0) + ret0, _ := ret[0].([]peer.AddrInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetPeers indicates an expected call of NetPeers +func (mr *MockFullNodeMockRecorder) NetPeers(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPeers", reflect.TypeOf((*MockFullNode)(nil).NetPeers), arg0) +} + +// NetPubsubScores mocks base method +func (m *MockFullNode) NetPubsubScores(arg0 context.Context) ([]api.PubsubScore, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetPubsubScores", arg0) + ret0, _ := ret[0].([]api.PubsubScore) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetPubsubScores indicates an expected call of NetPubsubScores +func (mr *MockFullNodeMockRecorder) NetPubsubScores(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPubsubScores", reflect.TypeOf((*MockFullNode)(nil).NetPubsubScores), arg0) +} + +// PaychAllocateLane mocks base method +func (m *MockFullNode) PaychAllocateLane(arg0 context.Context, arg1 address.Address) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychAllocateLane", arg0, arg1) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychAllocateLane indicates an expected call of PaychAllocateLane +func (mr *MockFullNodeMockRecorder) PaychAllocateLane(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychAllocateLane", reflect.TypeOf((*MockFullNode)(nil).PaychAllocateLane), arg0, arg1) +} + +// PaychAvailableFunds mocks base method +func (m *MockFullNode) PaychAvailableFunds(arg0 context.Context, arg1 address.Address) (*api.ChannelAvailableFunds, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychAvailableFunds", arg0, arg1) + ret0, _ := ret[0].(*api.ChannelAvailableFunds) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychAvailableFunds indicates an expected call of PaychAvailableFunds +func (mr *MockFullNodeMockRecorder) PaychAvailableFunds(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychAvailableFunds", reflect.TypeOf((*MockFullNode)(nil).PaychAvailableFunds), arg0, arg1) +} + +// PaychAvailableFundsByFromTo mocks base method +func (m *MockFullNode) PaychAvailableFundsByFromTo(arg0 context.Context, arg1, arg2 address.Address) (*api.ChannelAvailableFunds, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychAvailableFundsByFromTo", arg0, arg1, arg2) + ret0, _ := ret[0].(*api.ChannelAvailableFunds) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychAvailableFundsByFromTo indicates an expected call of PaychAvailableFundsByFromTo +func (mr *MockFullNodeMockRecorder) PaychAvailableFundsByFromTo(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychAvailableFundsByFromTo", reflect.TypeOf((*MockFullNode)(nil).PaychAvailableFundsByFromTo), arg0, arg1, arg2) +} + +// PaychCollect mocks base method +func (m *MockFullNode) PaychCollect(arg0 context.Context, arg1 address.Address) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychCollect", arg0, arg1) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychCollect indicates an expected call of PaychCollect +func (mr *MockFullNodeMockRecorder) PaychCollect(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychCollect", reflect.TypeOf((*MockFullNode)(nil).PaychCollect), arg0, arg1) +} + +// PaychGet mocks base method +func (m *MockFullNode) PaychGet(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (*api.ChannelInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychGet", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*api.ChannelInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychGet indicates an expected call of PaychGet +func (mr *MockFullNodeMockRecorder) PaychGet(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychGet", reflect.TypeOf((*MockFullNode)(nil).PaychGet), arg0, arg1, arg2, arg3) +} + +// PaychGetWaitReady mocks base method +func (m *MockFullNode) PaychGetWaitReady(arg0 context.Context, arg1 cid.Cid) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychGetWaitReady", arg0, arg1) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychGetWaitReady indicates an expected call of PaychGetWaitReady +func (mr *MockFullNodeMockRecorder) PaychGetWaitReady(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychGetWaitReady", reflect.TypeOf((*MockFullNode)(nil).PaychGetWaitReady), arg0, arg1) +} + +// PaychList mocks base method +func (m *MockFullNode) PaychList(arg0 context.Context) ([]address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychList", arg0) + ret0, _ := ret[0].([]address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychList indicates an expected call of PaychList +func (mr *MockFullNodeMockRecorder) PaychList(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychList", reflect.TypeOf((*MockFullNode)(nil).PaychList), arg0) +} + +// PaychNewPayment mocks base method +func (m *MockFullNode) PaychNewPayment(arg0 context.Context, arg1, arg2 address.Address, arg3 []api.VoucherSpec) (*api.PaymentInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychNewPayment", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*api.PaymentInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychNewPayment indicates an expected call of PaychNewPayment +func (mr *MockFullNodeMockRecorder) PaychNewPayment(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychNewPayment", reflect.TypeOf((*MockFullNode)(nil).PaychNewPayment), arg0, arg1, arg2, arg3) +} + +// PaychSettle mocks base method +func (m *MockFullNode) PaychSettle(arg0 context.Context, arg1 address.Address) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychSettle", arg0, arg1) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychSettle indicates an expected call of PaychSettle +func (mr *MockFullNodeMockRecorder) PaychSettle(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychSettle", reflect.TypeOf((*MockFullNode)(nil).PaychSettle), arg0, arg1) +} + +// PaychStatus mocks base method +func (m *MockFullNode) PaychStatus(arg0 context.Context, arg1 address.Address) (*api.PaychStatus, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychStatus", arg0, arg1) + ret0, _ := ret[0].(*api.PaychStatus) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychStatus indicates an expected call of PaychStatus +func (mr *MockFullNodeMockRecorder) PaychStatus(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychStatus", reflect.TypeOf((*MockFullNode)(nil).PaychStatus), arg0, arg1) +} + +// PaychVoucherAdd mocks base method +func (m *MockFullNode) PaychVoucherAdd(arg0 context.Context, arg1 address.Address, arg2 *paych.SignedVoucher, arg3 []byte, arg4 big.Int) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychVoucherAdd", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychVoucherAdd indicates an expected call of PaychVoucherAdd +func (mr *MockFullNodeMockRecorder) PaychVoucherAdd(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherAdd", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherAdd), arg0, arg1, arg2, arg3, arg4) +} + +// PaychVoucherCheckSpendable mocks base method +func (m *MockFullNode) PaychVoucherCheckSpendable(arg0 context.Context, arg1 address.Address, arg2 *paych.SignedVoucher, arg3, arg4 []byte) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychVoucherCheckSpendable", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychVoucherCheckSpendable indicates an expected call of PaychVoucherCheckSpendable +func (mr *MockFullNodeMockRecorder) PaychVoucherCheckSpendable(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherCheckSpendable", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherCheckSpendable), arg0, arg1, arg2, arg3, arg4) +} + +// PaychVoucherCheckValid mocks base method +func (m *MockFullNode) PaychVoucherCheckValid(arg0 context.Context, arg1 address.Address, arg2 *paych.SignedVoucher) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychVoucherCheckValid", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// PaychVoucherCheckValid indicates an expected call of PaychVoucherCheckValid +func (mr *MockFullNodeMockRecorder) PaychVoucherCheckValid(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherCheckValid", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherCheckValid), arg0, arg1, arg2) +} + +// PaychVoucherCreate mocks base method +func (m *MockFullNode) PaychVoucherCreate(arg0 context.Context, arg1 address.Address, arg2 big.Int, arg3 uint64) (*api.VoucherCreateResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychVoucherCreate", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*api.VoucherCreateResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychVoucherCreate indicates an expected call of PaychVoucherCreate +func (mr *MockFullNodeMockRecorder) PaychVoucherCreate(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherCreate", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherCreate), arg0, arg1, arg2, arg3) +} + +// PaychVoucherList mocks base method +func (m *MockFullNode) PaychVoucherList(arg0 context.Context, arg1 address.Address) ([]*paych.SignedVoucher, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychVoucherList", arg0, arg1) + ret0, _ := ret[0].([]*paych.SignedVoucher) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychVoucherList indicates an expected call of PaychVoucherList +func (mr *MockFullNodeMockRecorder) PaychVoucherList(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherList", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherList), arg0, arg1) +} + +// PaychVoucherSubmit mocks base method +func (m *MockFullNode) PaychVoucherSubmit(arg0 context.Context, arg1 address.Address, arg2 *paych.SignedVoucher, arg3, arg4 []byte) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychVoucherSubmit", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychVoucherSubmit indicates an expected call of PaychVoucherSubmit +func (mr *MockFullNodeMockRecorder) PaychVoucherSubmit(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherSubmit", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherSubmit), arg0, arg1, arg2, arg3, arg4) +} + +// Session mocks base method +func (m *MockFullNode) Session(arg0 context.Context) (uuid.UUID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Session", arg0) + ret0, _ := ret[0].(uuid.UUID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Session indicates an expected call of Session +func (mr *MockFullNodeMockRecorder) Session(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Session", reflect.TypeOf((*MockFullNode)(nil).Session), arg0) +} + +// Shutdown mocks base method +func (m *MockFullNode) Shutdown(arg0 context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Shutdown", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Shutdown indicates an expected call of Shutdown +func (mr *MockFullNodeMockRecorder) Shutdown(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Shutdown", reflect.TypeOf((*MockFullNode)(nil).Shutdown), arg0) +} + +// StateAccountKey mocks base method +func (m *MockFullNode) StateAccountKey(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateAccountKey", arg0, arg1, arg2) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateAccountKey indicates an expected call of StateAccountKey +func (mr *MockFullNodeMockRecorder) StateAccountKey(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateAccountKey", reflect.TypeOf((*MockFullNode)(nil).StateAccountKey), arg0, arg1, arg2) +} + +// StateAllMinerFaults mocks base method +func (m *MockFullNode) StateAllMinerFaults(arg0 context.Context, arg1 abi.ChainEpoch, arg2 types.TipSetKey) ([]*api.Fault, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateAllMinerFaults", arg0, arg1, arg2) + ret0, _ := ret[0].([]*api.Fault) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateAllMinerFaults indicates an expected call of StateAllMinerFaults +func (mr *MockFullNodeMockRecorder) StateAllMinerFaults(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateAllMinerFaults", reflect.TypeOf((*MockFullNode)(nil).StateAllMinerFaults), arg0, arg1, arg2) +} + +// StateCall mocks base method +func (m *MockFullNode) StateCall(arg0 context.Context, arg1 *types.Message, arg2 types.TipSetKey) (*api.InvocResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateCall", arg0, arg1, arg2) + ret0, _ := ret[0].(*api.InvocResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateCall indicates an expected call of StateCall +func (mr *MockFullNodeMockRecorder) StateCall(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateCall", reflect.TypeOf((*MockFullNode)(nil).StateCall), arg0, arg1, arg2) +} + +// StateChangedActors mocks base method +func (m *MockFullNode) StateChangedActors(arg0 context.Context, arg1, arg2 cid.Cid) (map[string]types.Actor, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateChangedActors", arg0, arg1, arg2) + ret0, _ := ret[0].(map[string]types.Actor) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateChangedActors indicates an expected call of StateChangedActors +func (mr *MockFullNodeMockRecorder) StateChangedActors(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateChangedActors", reflect.TypeOf((*MockFullNode)(nil).StateChangedActors), arg0, arg1, arg2) +} + +// StateCirculatingSupply mocks base method +func (m *MockFullNode) StateCirculatingSupply(arg0 context.Context, arg1 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateCirculatingSupply", arg0, arg1) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateCirculatingSupply indicates an expected call of StateCirculatingSupply +func (mr *MockFullNodeMockRecorder) StateCirculatingSupply(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateCirculatingSupply", reflect.TypeOf((*MockFullNode)(nil).StateCirculatingSupply), arg0, arg1) +} + +// StateCompute mocks base method +func (m *MockFullNode) StateCompute(arg0 context.Context, arg1 abi.ChainEpoch, arg2 []*types.Message, arg3 types.TipSetKey) (*api.ComputeStateOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateCompute", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*api.ComputeStateOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateCompute indicates an expected call of StateCompute +func (mr *MockFullNodeMockRecorder) StateCompute(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateCompute", reflect.TypeOf((*MockFullNode)(nil).StateCompute), arg0, arg1, arg2, arg3) +} + +// StateDealProviderCollateralBounds mocks base method +func (m *MockFullNode) StateDealProviderCollateralBounds(arg0 context.Context, arg1 abi.PaddedPieceSize, arg2 bool, arg3 types.TipSetKey) (api.DealCollateralBounds, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateDealProviderCollateralBounds", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(api.DealCollateralBounds) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateDealProviderCollateralBounds indicates an expected call of StateDealProviderCollateralBounds +func (mr *MockFullNodeMockRecorder) StateDealProviderCollateralBounds(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateDealProviderCollateralBounds", reflect.TypeOf((*MockFullNode)(nil).StateDealProviderCollateralBounds), arg0, arg1, arg2, arg3) +} + +// StateDecodeParams mocks base method +func (m *MockFullNode) StateDecodeParams(arg0 context.Context, arg1 address.Address, arg2 abi.MethodNum, arg3 []byte, arg4 types.TipSetKey) (interface{}, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateDecodeParams", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(interface{}) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateDecodeParams indicates an expected call of StateDecodeParams +func (mr *MockFullNodeMockRecorder) StateDecodeParams(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateDecodeParams", reflect.TypeOf((*MockFullNode)(nil).StateDecodeParams), arg0, arg1, arg2, arg3, arg4) +} + +// StateGetActor mocks base method +func (m *MockFullNode) StateGetActor(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*types.Actor, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetActor", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.Actor) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetActor indicates an expected call of StateGetActor +func (mr *MockFullNodeMockRecorder) StateGetActor(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetActor", reflect.TypeOf((*MockFullNode)(nil).StateGetActor), arg0, arg1, arg2) +} + +// StateGetReceipt mocks base method +func (m *MockFullNode) StateGetReceipt(arg0 context.Context, arg1 cid.Cid, arg2 types.TipSetKey) (*types.MessageReceipt, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetReceipt", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.MessageReceipt) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetReceipt indicates an expected call of StateGetReceipt +func (mr *MockFullNodeMockRecorder) StateGetReceipt(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetReceipt", reflect.TypeOf((*MockFullNode)(nil).StateGetReceipt), arg0, arg1, arg2) +} + +// StateListActors mocks base method +func (m *MockFullNode) StateListActors(arg0 context.Context, arg1 types.TipSetKey) ([]address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateListActors", arg0, arg1) + ret0, _ := ret[0].([]address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateListActors indicates an expected call of StateListActors +func (mr *MockFullNodeMockRecorder) StateListActors(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateListActors", reflect.TypeOf((*MockFullNode)(nil).StateListActors), arg0, arg1) +} + +// StateListMessages mocks base method +func (m *MockFullNode) StateListMessages(arg0 context.Context, arg1 *api.MessageMatch, arg2 types.TipSetKey, arg3 abi.ChainEpoch) ([]cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateListMessages", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([]cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateListMessages indicates an expected call of StateListMessages +func (mr *MockFullNodeMockRecorder) StateListMessages(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateListMessages", reflect.TypeOf((*MockFullNode)(nil).StateListMessages), arg0, arg1, arg2, arg3) +} + +// StateListMiners mocks base method +func (m *MockFullNode) StateListMiners(arg0 context.Context, arg1 types.TipSetKey) ([]address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateListMiners", arg0, arg1) + ret0, _ := ret[0].([]address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateListMiners indicates an expected call of StateListMiners +func (mr *MockFullNodeMockRecorder) StateListMiners(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateListMiners", reflect.TypeOf((*MockFullNode)(nil).StateListMiners), arg0, arg1) +} + +// StateLookupID mocks base method +func (m *MockFullNode) StateLookupID(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateLookupID", arg0, arg1, arg2) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateLookupID indicates an expected call of StateLookupID +func (mr *MockFullNodeMockRecorder) StateLookupID(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateLookupID", reflect.TypeOf((*MockFullNode)(nil).StateLookupID), arg0, arg1, arg2) +} + +// StateMarketBalance mocks base method +func (m *MockFullNode) StateMarketBalance(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (api.MarketBalance, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMarketBalance", arg0, arg1, arg2) + ret0, _ := ret[0].(api.MarketBalance) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMarketBalance indicates an expected call of StateMarketBalance +func (mr *MockFullNodeMockRecorder) StateMarketBalance(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketBalance", reflect.TypeOf((*MockFullNode)(nil).StateMarketBalance), arg0, arg1, arg2) +} + +// StateMarketDeals mocks base method +func (m *MockFullNode) StateMarketDeals(arg0 context.Context, arg1 types.TipSetKey) (map[string]api.MarketDeal, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMarketDeals", arg0, arg1) + ret0, _ := ret[0].(map[string]api.MarketDeal) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMarketDeals indicates an expected call of StateMarketDeals +func (mr *MockFullNodeMockRecorder) StateMarketDeals(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketDeals", reflect.TypeOf((*MockFullNode)(nil).StateMarketDeals), arg0, arg1) +} + +// StateMarketParticipants mocks base method +func (m *MockFullNode) StateMarketParticipants(arg0 context.Context, arg1 types.TipSetKey) (map[string]api.MarketBalance, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMarketParticipants", arg0, arg1) + ret0, _ := ret[0].(map[string]api.MarketBalance) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMarketParticipants indicates an expected call of StateMarketParticipants +func (mr *MockFullNodeMockRecorder) StateMarketParticipants(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketParticipants", reflect.TypeOf((*MockFullNode)(nil).StateMarketParticipants), arg0, arg1) +} + +// StateMarketStorageDeal mocks base method +func (m *MockFullNode) StateMarketStorageDeal(arg0 context.Context, arg1 abi.DealID, arg2 types.TipSetKey) (*api.MarketDeal, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMarketStorageDeal", arg0, arg1, arg2) + ret0, _ := ret[0].(*api.MarketDeal) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMarketStorageDeal indicates an expected call of StateMarketStorageDeal +func (mr *MockFullNodeMockRecorder) StateMarketStorageDeal(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketStorageDeal", reflect.TypeOf((*MockFullNode)(nil).StateMarketStorageDeal), arg0, arg1, arg2) +} + +// StateMinerActiveSectors mocks base method +func (m *MockFullNode) StateMinerActiveSectors(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerActiveSectors", arg0, arg1, arg2) + ret0, _ := ret[0].([]*miner.SectorOnChainInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerActiveSectors indicates an expected call of StateMinerActiveSectors +func (mr *MockFullNodeMockRecorder) StateMinerActiveSectors(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerActiveSectors", reflect.TypeOf((*MockFullNode)(nil).StateMinerActiveSectors), arg0, arg1, arg2) +} + +// StateMinerAvailableBalance mocks base method +func (m *MockFullNode) StateMinerAvailableBalance(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerAvailableBalance", arg0, arg1, arg2) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerAvailableBalance indicates an expected call of StateMinerAvailableBalance +func (mr *MockFullNodeMockRecorder) StateMinerAvailableBalance(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerAvailableBalance", reflect.TypeOf((*MockFullNode)(nil).StateMinerAvailableBalance), arg0, arg1, arg2) +} + +// StateMinerDeadlines mocks base method +func (m *MockFullNode) StateMinerDeadlines(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) ([]api.Deadline, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerDeadlines", arg0, arg1, arg2) + ret0, _ := ret[0].([]api.Deadline) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerDeadlines indicates an expected call of StateMinerDeadlines +func (mr *MockFullNodeMockRecorder) StateMinerDeadlines(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerDeadlines", reflect.TypeOf((*MockFullNode)(nil).StateMinerDeadlines), arg0, arg1, arg2) +} + +// StateMinerFaults mocks base method +func (m *MockFullNode) StateMinerFaults(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (bitfield.BitField, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerFaults", arg0, arg1, arg2) + ret0, _ := ret[0].(bitfield.BitField) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerFaults indicates an expected call of StateMinerFaults +func (mr *MockFullNodeMockRecorder) StateMinerFaults(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerFaults", reflect.TypeOf((*MockFullNode)(nil).StateMinerFaults), arg0, arg1, arg2) +} + +// StateMinerInfo mocks base method +func (m *MockFullNode) StateMinerInfo(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (miner.MinerInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerInfo", arg0, arg1, arg2) + ret0, _ := ret[0].(miner.MinerInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerInfo indicates an expected call of StateMinerInfo +func (mr *MockFullNodeMockRecorder) StateMinerInfo(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerInfo", reflect.TypeOf((*MockFullNode)(nil).StateMinerInfo), arg0, arg1, arg2) +} + +// StateMinerInitialPledgeCollateral mocks base method +func (m *MockFullNode) StateMinerInitialPledgeCollateral(arg0 context.Context, arg1 address.Address, arg2 miner0.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerInitialPledgeCollateral", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerInitialPledgeCollateral indicates an expected call of StateMinerInitialPledgeCollateral +func (mr *MockFullNodeMockRecorder) StateMinerInitialPledgeCollateral(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerInitialPledgeCollateral", reflect.TypeOf((*MockFullNode)(nil).StateMinerInitialPledgeCollateral), arg0, arg1, arg2, arg3) +} + +// StateMinerPartitions mocks base method +func (m *MockFullNode) StateMinerPartitions(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 types.TipSetKey) ([]api.Partition, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerPartitions", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([]api.Partition) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerPartitions indicates an expected call of StateMinerPartitions +func (mr *MockFullNodeMockRecorder) StateMinerPartitions(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerPartitions", reflect.TypeOf((*MockFullNode)(nil).StateMinerPartitions), arg0, arg1, arg2, arg3) +} + +// StateMinerPower mocks base method +func (m *MockFullNode) StateMinerPower(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*api.MinerPower, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerPower", arg0, arg1, arg2) + ret0, _ := ret[0].(*api.MinerPower) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerPower indicates an expected call of StateMinerPower +func (mr *MockFullNodeMockRecorder) StateMinerPower(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerPower", reflect.TypeOf((*MockFullNode)(nil).StateMinerPower), arg0, arg1, arg2) +} + +// StateMinerPreCommitDepositForPower mocks base method +func (m *MockFullNode) StateMinerPreCommitDepositForPower(arg0 context.Context, arg1 address.Address, arg2 miner0.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerPreCommitDepositForPower", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerPreCommitDepositForPower indicates an expected call of StateMinerPreCommitDepositForPower +func (mr *MockFullNodeMockRecorder) StateMinerPreCommitDepositForPower(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerPreCommitDepositForPower", reflect.TypeOf((*MockFullNode)(nil).StateMinerPreCommitDepositForPower), arg0, arg1, arg2, arg3) +} + +// StateMinerProvingDeadline mocks base method +func (m *MockFullNode) StateMinerProvingDeadline(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*dline.Info, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerProvingDeadline", arg0, arg1, arg2) + ret0, _ := ret[0].(*dline.Info) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerProvingDeadline indicates an expected call of StateMinerProvingDeadline +func (mr *MockFullNodeMockRecorder) StateMinerProvingDeadline(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerProvingDeadline", reflect.TypeOf((*MockFullNode)(nil).StateMinerProvingDeadline), arg0, arg1, arg2) +} + +// StateMinerRecoveries mocks base method +func (m *MockFullNode) StateMinerRecoveries(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (bitfield.BitField, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerRecoveries", arg0, arg1, arg2) + ret0, _ := ret[0].(bitfield.BitField) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerRecoveries indicates an expected call of StateMinerRecoveries +func (mr *MockFullNodeMockRecorder) StateMinerRecoveries(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerRecoveries", reflect.TypeOf((*MockFullNode)(nil).StateMinerRecoveries), arg0, arg1, arg2) +} + +// StateMinerSectorAllocated mocks base method +func (m *MockFullNode) StateMinerSectorAllocated(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerSectorAllocated", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerSectorAllocated indicates an expected call of StateMinerSectorAllocated +func (mr *MockFullNodeMockRecorder) StateMinerSectorAllocated(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerSectorAllocated", reflect.TypeOf((*MockFullNode)(nil).StateMinerSectorAllocated), arg0, arg1, arg2, arg3) +} + +// StateMinerSectorCount mocks base method +func (m *MockFullNode) StateMinerSectorCount(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (api.MinerSectors, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerSectorCount", arg0, arg1, arg2) + ret0, _ := ret[0].(api.MinerSectors) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerSectorCount indicates an expected call of StateMinerSectorCount +func (mr *MockFullNodeMockRecorder) StateMinerSectorCount(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerSectorCount", reflect.TypeOf((*MockFullNode)(nil).StateMinerSectorCount), arg0, arg1, arg2) +} + +// StateMinerSectors mocks base method +func (m *MockFullNode) StateMinerSectors(arg0 context.Context, arg1 address.Address, arg2 *bitfield.BitField, arg3 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerSectors", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([]*miner.SectorOnChainInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerSectors indicates an expected call of StateMinerSectors +func (mr *MockFullNodeMockRecorder) StateMinerSectors(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerSectors", reflect.TypeOf((*MockFullNode)(nil).StateMinerSectors), arg0, arg1, arg2, arg3) +} + +// StateNetworkName mocks base method +func (m *MockFullNode) StateNetworkName(arg0 context.Context) (dtypes.NetworkName, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateNetworkName", arg0) + ret0, _ := ret[0].(dtypes.NetworkName) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateNetworkName indicates an expected call of StateNetworkName +func (mr *MockFullNodeMockRecorder) StateNetworkName(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateNetworkName", reflect.TypeOf((*MockFullNode)(nil).StateNetworkName), arg0) +} + +// StateNetworkVersion mocks base method +func (m *MockFullNode) StateNetworkVersion(arg0 context.Context, arg1 types.TipSetKey) (network.Version, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateNetworkVersion", arg0, arg1) + ret0, _ := ret[0].(network.Version) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateNetworkVersion indicates an expected call of StateNetworkVersion +func (mr *MockFullNodeMockRecorder) StateNetworkVersion(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateNetworkVersion", reflect.TypeOf((*MockFullNode)(nil).StateNetworkVersion), arg0, arg1) +} + +// StateReadState mocks base method +func (m *MockFullNode) StateReadState(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*api.ActorState, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateReadState", arg0, arg1, arg2) + ret0, _ := ret[0].(*api.ActorState) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateReadState indicates an expected call of StateReadState +func (mr *MockFullNodeMockRecorder) StateReadState(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateReadState", reflect.TypeOf((*MockFullNode)(nil).StateReadState), arg0, arg1, arg2) +} + +// StateReplay mocks base method +func (m *MockFullNode) StateReplay(arg0 context.Context, arg1 types.TipSetKey, arg2 cid.Cid) (*api.InvocResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateReplay", arg0, arg1, arg2) + ret0, _ := ret[0].(*api.InvocResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateReplay indicates an expected call of StateReplay +func (mr *MockFullNodeMockRecorder) StateReplay(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateReplay", reflect.TypeOf((*MockFullNode)(nil).StateReplay), arg0, arg1, arg2) +} + +// StateSearchMsg mocks base method +func (m *MockFullNode) StateSearchMsg(arg0 context.Context, arg1 cid.Cid) (*api.MsgLookup, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateSearchMsg", arg0, arg1) + ret0, _ := ret[0].(*api.MsgLookup) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateSearchMsg indicates an expected call of StateSearchMsg +func (mr *MockFullNodeMockRecorder) StateSearchMsg(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSearchMsg", reflect.TypeOf((*MockFullNode)(nil).StateSearchMsg), arg0, arg1) +} + +// StateSearchMsgLimited mocks base method +func (m *MockFullNode) StateSearchMsgLimited(arg0 context.Context, arg1 cid.Cid, arg2 abi.ChainEpoch) (*api.MsgLookup, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateSearchMsgLimited", arg0, arg1, arg2) + ret0, _ := ret[0].(*api.MsgLookup) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateSearchMsgLimited indicates an expected call of StateSearchMsgLimited +func (mr *MockFullNodeMockRecorder) StateSearchMsgLimited(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSearchMsgLimited", reflect.TypeOf((*MockFullNode)(nil).StateSearchMsgLimited), arg0, arg1, arg2) +} + +// StateSectorExpiration mocks base method +func (m *MockFullNode) StateSectorExpiration(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner.SectorExpiration, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateSectorExpiration", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*miner.SectorExpiration) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateSectorExpiration indicates an expected call of StateSectorExpiration +func (mr *MockFullNodeMockRecorder) StateSectorExpiration(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorExpiration", reflect.TypeOf((*MockFullNode)(nil).StateSectorExpiration), arg0, arg1, arg2, arg3) +} + +// StateSectorGetInfo mocks base method +func (m *MockFullNode) StateSectorGetInfo(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner.SectorOnChainInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateSectorGetInfo", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*miner.SectorOnChainInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateSectorGetInfo indicates an expected call of StateSectorGetInfo +func (mr *MockFullNodeMockRecorder) StateSectorGetInfo(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorGetInfo", reflect.TypeOf((*MockFullNode)(nil).StateSectorGetInfo), arg0, arg1, arg2, arg3) +} + +// StateSectorPartition mocks base method +func (m *MockFullNode) StateSectorPartition(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner.SectorLocation, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateSectorPartition", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*miner.SectorLocation) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateSectorPartition indicates an expected call of StateSectorPartition +func (mr *MockFullNodeMockRecorder) StateSectorPartition(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorPartition", reflect.TypeOf((*MockFullNode)(nil).StateSectorPartition), arg0, arg1, arg2, arg3) +} + +// StateSectorPreCommitInfo mocks base method +func (m *MockFullNode) StateSectorPreCommitInfo(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateSectorPreCommitInfo", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(miner.SectorPreCommitOnChainInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateSectorPreCommitInfo indicates an expected call of StateSectorPreCommitInfo +func (mr *MockFullNodeMockRecorder) StateSectorPreCommitInfo(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorPreCommitInfo", reflect.TypeOf((*MockFullNode)(nil).StateSectorPreCommitInfo), arg0, arg1, arg2, arg3) +} + +// StateVMCirculatingSupplyInternal mocks base method +func (m *MockFullNode) StateVMCirculatingSupplyInternal(arg0 context.Context, arg1 types.TipSetKey) (api.CirculatingSupply, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateVMCirculatingSupplyInternal", arg0, arg1) + ret0, _ := ret[0].(api.CirculatingSupply) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateVMCirculatingSupplyInternal indicates an expected call of StateVMCirculatingSupplyInternal +func (mr *MockFullNodeMockRecorder) StateVMCirculatingSupplyInternal(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVMCirculatingSupplyInternal", reflect.TypeOf((*MockFullNode)(nil).StateVMCirculatingSupplyInternal), arg0, arg1) +} + +// StateVerifiedClientStatus mocks base method +func (m *MockFullNode) StateVerifiedClientStatus(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateVerifiedClientStatus", arg0, arg1, arg2) + ret0, _ := ret[0].(*big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateVerifiedClientStatus indicates an expected call of StateVerifiedClientStatus +func (mr *MockFullNodeMockRecorder) StateVerifiedClientStatus(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVerifiedClientStatus", reflect.TypeOf((*MockFullNode)(nil).StateVerifiedClientStatus), arg0, arg1, arg2) +} + +// StateVerifiedRegistryRootKey mocks base method +func (m *MockFullNode) StateVerifiedRegistryRootKey(arg0 context.Context, arg1 types.TipSetKey) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateVerifiedRegistryRootKey", arg0, arg1) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateVerifiedRegistryRootKey indicates an expected call of StateVerifiedRegistryRootKey +func (mr *MockFullNodeMockRecorder) StateVerifiedRegistryRootKey(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVerifiedRegistryRootKey", reflect.TypeOf((*MockFullNode)(nil).StateVerifiedRegistryRootKey), arg0, arg1) +} + +// StateVerifierStatus mocks base method +func (m *MockFullNode) StateVerifierStatus(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateVerifierStatus", arg0, arg1, arg2) + ret0, _ := ret[0].(*big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateVerifierStatus indicates an expected call of StateVerifierStatus +func (mr *MockFullNodeMockRecorder) StateVerifierStatus(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVerifierStatus", reflect.TypeOf((*MockFullNode)(nil).StateVerifierStatus), arg0, arg1, arg2) +} + +// StateWaitMsg mocks base method +func (m *MockFullNode) StateWaitMsg(arg0 context.Context, arg1 cid.Cid, arg2 uint64) (*api.MsgLookup, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateWaitMsg", arg0, arg1, arg2) + ret0, _ := ret[0].(*api.MsgLookup) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateWaitMsg indicates an expected call of StateWaitMsg +func (mr *MockFullNodeMockRecorder) StateWaitMsg(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateWaitMsg", reflect.TypeOf((*MockFullNode)(nil).StateWaitMsg), arg0, arg1, arg2) +} + +// StateWaitMsgLimited mocks base method +func (m *MockFullNode) StateWaitMsgLimited(arg0 context.Context, arg1 cid.Cid, arg2 uint64, arg3 abi.ChainEpoch) (*api.MsgLookup, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateWaitMsgLimited", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*api.MsgLookup) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateWaitMsgLimited indicates an expected call of StateWaitMsgLimited +func (mr *MockFullNodeMockRecorder) StateWaitMsgLimited(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateWaitMsgLimited", reflect.TypeOf((*MockFullNode)(nil).StateWaitMsgLimited), arg0, arg1, arg2, arg3) +} + +// SyncCheckBad mocks base method +func (m *MockFullNode) SyncCheckBad(arg0 context.Context, arg1 cid.Cid) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncCheckBad", arg0, arg1) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SyncCheckBad indicates an expected call of SyncCheckBad +func (mr *MockFullNodeMockRecorder) SyncCheckBad(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncCheckBad", reflect.TypeOf((*MockFullNode)(nil).SyncCheckBad), arg0, arg1) +} + +// SyncCheckpoint mocks base method +func (m *MockFullNode) SyncCheckpoint(arg0 context.Context, arg1 types.TipSetKey) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncCheckpoint", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SyncCheckpoint indicates an expected call of SyncCheckpoint +func (mr *MockFullNodeMockRecorder) SyncCheckpoint(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncCheckpoint", reflect.TypeOf((*MockFullNode)(nil).SyncCheckpoint), arg0, arg1) +} + +// SyncIncomingBlocks mocks base method +func (m *MockFullNode) SyncIncomingBlocks(arg0 context.Context) (<-chan *types.BlockHeader, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncIncomingBlocks", arg0) + ret0, _ := ret[0].(<-chan *types.BlockHeader) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SyncIncomingBlocks indicates an expected call of SyncIncomingBlocks +func (mr *MockFullNodeMockRecorder) SyncIncomingBlocks(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncIncomingBlocks", reflect.TypeOf((*MockFullNode)(nil).SyncIncomingBlocks), arg0) +} + +// SyncMarkBad mocks base method +func (m *MockFullNode) SyncMarkBad(arg0 context.Context, arg1 cid.Cid) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncMarkBad", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SyncMarkBad indicates an expected call of SyncMarkBad +func (mr *MockFullNodeMockRecorder) SyncMarkBad(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncMarkBad", reflect.TypeOf((*MockFullNode)(nil).SyncMarkBad), arg0, arg1) +} + +// SyncState mocks base method +func (m *MockFullNode) SyncState(arg0 context.Context) (*api.SyncState, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncState", arg0) + ret0, _ := ret[0].(*api.SyncState) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SyncState indicates an expected call of SyncState +func (mr *MockFullNodeMockRecorder) SyncState(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncState", reflect.TypeOf((*MockFullNode)(nil).SyncState), arg0) +} + +// SyncSubmitBlock mocks base method +func (m *MockFullNode) SyncSubmitBlock(arg0 context.Context, arg1 *types.BlockMsg) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncSubmitBlock", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SyncSubmitBlock indicates an expected call of SyncSubmitBlock +func (mr *MockFullNodeMockRecorder) SyncSubmitBlock(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncSubmitBlock", reflect.TypeOf((*MockFullNode)(nil).SyncSubmitBlock), arg0, arg1) +} + +// SyncUnmarkAllBad mocks base method +func (m *MockFullNode) SyncUnmarkAllBad(arg0 context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncUnmarkAllBad", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SyncUnmarkAllBad indicates an expected call of SyncUnmarkAllBad +func (mr *MockFullNodeMockRecorder) SyncUnmarkAllBad(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncUnmarkAllBad", reflect.TypeOf((*MockFullNode)(nil).SyncUnmarkAllBad), arg0) +} + +// SyncUnmarkBad mocks base method +func (m *MockFullNode) SyncUnmarkBad(arg0 context.Context, arg1 cid.Cid) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncUnmarkBad", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SyncUnmarkBad indicates an expected call of SyncUnmarkBad +func (mr *MockFullNodeMockRecorder) SyncUnmarkBad(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncUnmarkBad", reflect.TypeOf((*MockFullNode)(nil).SyncUnmarkBad), arg0, arg1) +} + +// SyncValidateTipset mocks base method +func (m *MockFullNode) SyncValidateTipset(arg0 context.Context, arg1 types.TipSetKey) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncValidateTipset", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SyncValidateTipset indicates an expected call of SyncValidateTipset +func (mr *MockFullNodeMockRecorder) SyncValidateTipset(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncValidateTipset", reflect.TypeOf((*MockFullNode)(nil).SyncValidateTipset), arg0, arg1) +} + +// Version mocks base method +func (m *MockFullNode) Version(arg0 context.Context) (api.APIVersion, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Version", arg0) + ret0, _ := ret[0].(api.APIVersion) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Version indicates an expected call of Version +func (mr *MockFullNodeMockRecorder) Version(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockFullNode)(nil).Version), arg0) +} + +// WalletBalance mocks base method +func (m *MockFullNode) WalletBalance(arg0 context.Context, arg1 address.Address) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletBalance", arg0, arg1) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletBalance indicates an expected call of WalletBalance +func (mr *MockFullNodeMockRecorder) WalletBalance(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletBalance", reflect.TypeOf((*MockFullNode)(nil).WalletBalance), arg0, arg1) +} + +// WalletDefaultAddress mocks base method +func (m *MockFullNode) WalletDefaultAddress(arg0 context.Context) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletDefaultAddress", arg0) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletDefaultAddress indicates an expected call of WalletDefaultAddress +func (mr *MockFullNodeMockRecorder) WalletDefaultAddress(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletDefaultAddress", reflect.TypeOf((*MockFullNode)(nil).WalletDefaultAddress), arg0) +} + +// WalletDelete mocks base method +func (m *MockFullNode) WalletDelete(arg0 context.Context, arg1 address.Address) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletDelete", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// WalletDelete indicates an expected call of WalletDelete +func (mr *MockFullNodeMockRecorder) WalletDelete(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletDelete", reflect.TypeOf((*MockFullNode)(nil).WalletDelete), arg0, arg1) +} + +// WalletExport mocks base method +func (m *MockFullNode) WalletExport(arg0 context.Context, arg1 address.Address) (*types.KeyInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletExport", arg0, arg1) + ret0, _ := ret[0].(*types.KeyInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletExport indicates an expected call of WalletExport +func (mr *MockFullNodeMockRecorder) WalletExport(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletExport", reflect.TypeOf((*MockFullNode)(nil).WalletExport), arg0, arg1) +} + +// WalletHas mocks base method +func (m *MockFullNode) WalletHas(arg0 context.Context, arg1 address.Address) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletHas", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletHas indicates an expected call of WalletHas +func (mr *MockFullNodeMockRecorder) WalletHas(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletHas", reflect.TypeOf((*MockFullNode)(nil).WalletHas), arg0, arg1) +} + +// WalletImport mocks base method +func (m *MockFullNode) WalletImport(arg0 context.Context, arg1 *types.KeyInfo) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletImport", arg0, arg1) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletImport indicates an expected call of WalletImport +func (mr *MockFullNodeMockRecorder) WalletImport(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletImport", reflect.TypeOf((*MockFullNode)(nil).WalletImport), arg0, arg1) +} + +// WalletList mocks base method +func (m *MockFullNode) WalletList(arg0 context.Context) ([]address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletList", arg0) + ret0, _ := ret[0].([]address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletList indicates an expected call of WalletList +func (mr *MockFullNodeMockRecorder) WalletList(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletList", reflect.TypeOf((*MockFullNode)(nil).WalletList), arg0) +} + +// WalletNew mocks base method +func (m *MockFullNode) WalletNew(arg0 context.Context, arg1 types.KeyType) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletNew", arg0, arg1) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletNew indicates an expected call of WalletNew +func (mr *MockFullNodeMockRecorder) WalletNew(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletNew", reflect.TypeOf((*MockFullNode)(nil).WalletNew), arg0, arg1) +} + +// WalletSetDefault mocks base method +func (m *MockFullNode) WalletSetDefault(arg0 context.Context, arg1 address.Address) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletSetDefault", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// WalletSetDefault indicates an expected call of WalletSetDefault +func (mr *MockFullNodeMockRecorder) WalletSetDefault(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletSetDefault", reflect.TypeOf((*MockFullNode)(nil).WalletSetDefault), arg0, arg1) +} + +// WalletSign mocks base method +func (m *MockFullNode) WalletSign(arg0 context.Context, arg1 address.Address, arg2 []byte) (*crypto.Signature, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletSign", arg0, arg1, arg2) + ret0, _ := ret[0].(*crypto.Signature) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletSign indicates an expected call of WalletSign +func (mr *MockFullNodeMockRecorder) WalletSign(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletSign", reflect.TypeOf((*MockFullNode)(nil).WalletSign), arg0, arg1, arg2) +} + +// WalletSignMessage mocks base method +func (m *MockFullNode) WalletSignMessage(arg0 context.Context, arg1 address.Address, arg2 *types.Message) (*types.SignedMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletSignMessage", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.SignedMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletSignMessage indicates an expected call of WalletSignMessage +func (mr *MockFullNodeMockRecorder) WalletSignMessage(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletSignMessage", reflect.TypeOf((*MockFullNode)(nil).WalletSignMessage), arg0, arg1, arg2) +} + +// WalletValidateAddress mocks base method +func (m *MockFullNode) WalletValidateAddress(arg0 context.Context, arg1 string) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletValidateAddress", arg0, arg1) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletValidateAddress indicates an expected call of WalletValidateAddress +func (mr *MockFullNodeMockRecorder) WalletValidateAddress(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletValidateAddress", reflect.TypeOf((*MockFullNode)(nil).WalletValidateAddress), arg0, arg1) +} + +// WalletVerify mocks base method +func (m *MockFullNode) WalletVerify(arg0 context.Context, arg1 address.Address, arg2 []byte, arg3 *crypto.Signature) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletVerify", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletVerify indicates an expected call of WalletVerify +func (mr *MockFullNodeMockRecorder) WalletVerify(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletVerify", reflect.TypeOf((*MockFullNode)(nil).WalletVerify), arg0, arg1, arg2, arg3) +} diff --git a/api/test/paych.go b/api/test/paych.go index 2bcea436966..b38ba6189f3 100644 --- a/api/test/paych.go +++ b/api/test/paych.go @@ -15,7 +15,7 @@ import ( cbor "github.com/ipfs/go-ipld-cbor" "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/apibstore" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" @@ -132,7 +132,7 @@ func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) { t.Fatal("Unable to settle payment channel") } - creatorStore := adt.WrapStore(ctx, cbor.NewCborStore(apibstore.NewAPIBlockstore(paymentCreator))) + creatorStore := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(paymentCreator))) // wait for the receiver to submit their vouchers ev := events.NewEvents(ctx, paymentCreator) diff --git a/api/test/tape.go b/api/test/tape.go index 5640ada6ebf..74206a97a32 100644 --- a/api/test/tape.go +++ b/api/test/tape.go @@ -6,7 +6,6 @@ import ( "testing" "time" - "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" @@ -75,23 +74,9 @@ func testTapeFix(t *testing.T, b APIBuilder, blocktime time.Duration, after bool <-done }() - err = miner.PledgeSector(ctx) + sid, err := miner.PledgeSector(ctx) require.NoError(t, err) - // Wait till done. - var sectorNo abi.SectorNumber - for { - s, err := miner.SectorsList(ctx) // Note - the test builder doesn't import genesis sectors into FSM - require.NoError(t, err) - fmt.Printf("Sectors: %d\n", len(s)) - if len(s) == 1 { - sectorNo = s[0] - break - } - - build.Clock.Sleep(100 * time.Millisecond) - } - fmt.Printf("All sectors is fsm\n") // If before, we expect the precommit to fail @@ -103,7 +88,7 @@ func testTapeFix(t *testing.T, b APIBuilder, blocktime time.Duration, after bool } for { - st, err := miner.SectorsStatus(ctx, sectorNo, false) + st, err := miner.SectorsStatus(ctx, sid.Number, false) require.NoError(t, err) if st.State == successState { break diff --git a/api/test/test.go b/api/test/test.go index eaf092a21da..eed760bc24c 100644 --- a/api/test/test.go +++ b/api/test/test.go @@ -155,13 +155,13 @@ var MineNext = miner.MineReq{ } func (ts *testSuite) testVersion(t *testing.T) { - build.RunningNodeType = build.NodeFull + api.RunningNodeType = api.NodeFull ctx := context.Background() apis, _ := ts.makeNodes(t, OneFull, OneMiner) - api := apis[0] + napi := apis[0] - v, err := api.Version(ctx) + v, err := napi.Version(ctx) if err != nil { t.Fatal(err) } diff --git a/api/test/window_post.go b/api/test/window_post.go index 99d48083688..ce42318b2b2 100644 --- a/api/test/window_post.go +++ b/api/test/window_post.go @@ -162,7 +162,7 @@ func pledgeSectors(t *testing.T, ctx context.Context, miner TestStorageNode, n, log.Errorf("WAIT") } log.Errorf("PLEDGING %d", i) - err := miner.PledgeSector(ctx) + _, err := miner.PledgeSector(ctx) require.NoError(t, err) } diff --git a/api/types.go b/api/types.go index 28141b83a3f..a463dd36eb0 100644 --- a/api/types.go +++ b/api/types.go @@ -3,6 +3,7 @@ package api import ( "encoding/json" "fmt" + "time" datatransfer "github.com/filecoin-project/go-data-transfer" "github.com/filecoin-project/go-state-types/abi" @@ -99,3 +100,18 @@ type NetBlockList struct { IPAddrs []string IPSubnets []string } + +type ExtendedPeerInfo struct { + ID peer.ID + Agent string + Addrs []string + Protocols []string + ConnMgrMeta *ConnMgrInfo +} + +type ConnMgrInfo struct { + FirstSeen time.Time + Value int + Tags map[string]int + Conns map[string]time.Time +} diff --git a/api/version.go b/api/version.go new file mode 100644 index 00000000000..17605b518a6 --- /dev/null +++ b/api/version.go @@ -0,0 +1,71 @@ +package api + +import ( + "fmt" + + xerrors "golang.org/x/xerrors" +) + +type Version uint32 + +func newVer(major, minor, patch uint8) Version { + return Version(uint32(major)<<16 | uint32(minor)<<8 | uint32(patch)) +} + +// Ints returns (major, minor, patch) versions +func (ve Version) Ints() (uint32, uint32, uint32) { + v := uint32(ve) + return (v & majorOnlyMask) >> 16, (v & minorOnlyMask) >> 8, v & patchOnlyMask +} + +func (ve Version) String() string { + vmj, vmi, vp := ve.Ints() + return fmt.Sprintf("%d.%d.%d", vmj, vmi, vp) +} + +func (ve Version) EqMajorMinor(v2 Version) bool { + return ve&minorMask == v2&minorMask +} + +type NodeType int + +const ( + NodeUnknown NodeType = iota + + NodeFull + NodeMiner + NodeWorker +) + +var RunningNodeType NodeType + +func VersionForType(nodeType NodeType) (Version, error) { + switch nodeType { + case NodeFull: + return FullAPIVersion, nil + case NodeMiner: + return MinerAPIVersion, nil + case NodeWorker: + return WorkerAPIVersion, nil + default: + return Version(0), xerrors.Errorf("unknown node type %d", nodeType) + } +} + +// semver versions of the rpc api exposed +var ( + FullAPIVersion = newVer(1, 1, 0) + MinerAPIVersion = newVer(1, 0, 1) + WorkerAPIVersion = newVer(1, 0, 0) +) + +//nolint:varcheck,deadcode +const ( + majorMask = 0xff0000 + minorMask = 0xffff00 + patchMask = 0xffffff + + majorOnlyMask = 0xff0000 + minorOnlyMask = 0x00ff00 + patchOnlyMask = 0x0000ff +) diff --git a/blockstore/api.go b/blockstore/api.go new file mode 100644 index 00000000000..6715b476677 --- /dev/null +++ b/blockstore/api.go @@ -0,0 +1,66 @@ +package blockstore + +import ( + "context" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" +) + +type ChainIO interface { + ChainReadObj(context.Context, cid.Cid) ([]byte, error) + ChainHasObj(context.Context, cid.Cid) (bool, error) +} + +type apiBlockstore struct { + api ChainIO +} + +// This blockstore is adapted in the constructor. +var _ BasicBlockstore = (*apiBlockstore)(nil) + +func NewAPIBlockstore(cio ChainIO) Blockstore { + bs := &apiBlockstore{api: cio} + return Adapt(bs) // return an adapted blockstore. +} + +func (a *apiBlockstore) DeleteBlock(cid.Cid) error { + return xerrors.New("not supported") +} + +func (a *apiBlockstore) Has(c cid.Cid) (bool, error) { + return a.api.ChainHasObj(context.TODO(), c) +} + +func (a *apiBlockstore) Get(c cid.Cid) (blocks.Block, error) { + bb, err := a.api.ChainReadObj(context.TODO(), c) + if err != nil { + return nil, err + } + return blocks.NewBlockWithCid(bb, c) +} + +func (a *apiBlockstore) GetSize(c cid.Cid) (int, error) { + bb, err := a.api.ChainReadObj(context.TODO(), c) + if err != nil { + return 0, err + } + return len(bb), nil +} + +func (a *apiBlockstore) Put(blocks.Block) error { + return xerrors.New("not supported") +} + +func (a *apiBlockstore) PutMany([]blocks.Block) error { + return xerrors.New("not supported") +} + +func (a *apiBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + return nil, xerrors.New("not supported") +} + +func (a *apiBlockstore) HashOnRead(enabled bool) { + return +} diff --git a/lib/blockstore/badger/blockstore.go b/blockstore/badger/blockstore.go similarity index 89% rename from lib/blockstore/badger/blockstore.go rename to blockstore/badger/blockstore.go index fa9d55bdb18..2c00f424077 100644 --- a/lib/blockstore/badger/blockstore.go +++ b/blockstore/badger/blockstore.go @@ -16,7 +16,7 @@ import ( logger "github.com/ipfs/go-log/v2" pool "github.com/libp2p/go-buffer-pool" - "github.com/filecoin-project/lotus/lib/blockstore" + "github.com/filecoin-project/lotus/blockstore" ) var ( @@ -110,10 +110,7 @@ func Open(opts Options) (*Blockstore, error) { return nil, fmt.Errorf("failed to open badger blockstore: %w", err) } - bs := &Blockstore{ - DB: db, - } - + bs := &Blockstore{DB: db} if p := opts.Prefix; p != "" { bs.prefixing = true bs.prefix = []byte(p) @@ -134,6 +131,25 @@ func (b *Blockstore) Close() error { return b.DB.Close() } +// CollectGarbage runs garbage collection on the value log +func (b *Blockstore) CollectGarbage() error { + if atomic.LoadInt64(&b.state) != stateOpen { + return ErrBlockstoreClosed + } + + var err error + for err == nil { + err = b.DB.RunValueLogGC(0.125) + } + + if err == badger.ErrNoRewrite { + // not really an error in this case + return nil + } + + return err +} + // View implements blockstore.Viewer, which leverages zero-copy read-only // access to values. func (b *Blockstore) View(cid cid.Cid, fn func([]byte) error) error { @@ -321,6 +337,44 @@ func (b *Blockstore) DeleteBlock(cid cid.Cid) error { }) } +func (b *Blockstore) DeleteMany(cids []cid.Cid) error { + if atomic.LoadInt64(&b.state) != stateOpen { + return ErrBlockstoreClosed + } + + batch := b.DB.NewWriteBatch() + defer batch.Cancel() + + // toReturn tracks the byte slices to return to the pool, if we're using key + // prefixing. we can't return each slice to the pool after each Set, because + // badger holds on to the slice. + var toReturn [][]byte + if b.prefixing { + toReturn = make([][]byte, 0, len(cids)) + defer func() { + for _, b := range toReturn { + KeyPool.Put(b) + } + }() + } + + for _, cid := range cids { + k, pooled := b.PooledStorageKey(cid) + if pooled { + toReturn = append(toReturn, k) + } + if err := batch.Delete(k); err != nil { + return err + } + } + + err := batch.Flush() + if err != nil { + err = fmt.Errorf("failed to delete blocks from badger blockstore: %w", err) + } + return err +} + // AllKeysChan implements Blockstore.AllKeysChan. func (b *Blockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { if atomic.LoadInt64(&b.state) != stateOpen { diff --git a/lib/blockstore/badger/blockstore_test.go b/blockstore/badger/blockstore_test.go similarity index 85% rename from lib/blockstore/badger/blockstore_test.go rename to blockstore/badger/blockstore_test.go index e357117e584..3221458d28f 100644 --- a/lib/blockstore/badger/blockstore_test.go +++ b/blockstore/badger/blockstore_test.go @@ -6,8 +6,9 @@ import ( "testing" blocks "github.com/ipfs/go-block-format" - blockstore "github.com/ipfs/go-ipfs-blockstore" "github.com/stretchr/testify/require" + + "github.com/filecoin-project/lotus/blockstore" ) func TestBadgerBlockstore(t *testing.T) { @@ -60,8 +61,8 @@ func TestStorageKey(t *testing.T) { require.Equal(t, k3, k2) } -func newBlockstore(optsSupplier func(path string) Options) func(tb testing.TB) (bs blockstore.Blockstore, path string) { - return func(tb testing.TB) (bs blockstore.Blockstore, path string) { +func newBlockstore(optsSupplier func(path string) Options) func(tb testing.TB) (bs blockstore.BasicBlockstore, path string) { + return func(tb testing.TB) (bs blockstore.BasicBlockstore, path string) { tb.Helper() path, err := ioutil.TempDir("", "") @@ -82,8 +83,8 @@ func newBlockstore(optsSupplier func(path string) Options) func(tb testing.TB) ( } } -func openBlockstore(optsSupplier func(path string) Options) func(tb testing.TB, path string) (bs blockstore.Blockstore, err error) { - return func(tb testing.TB, path string) (bs blockstore.Blockstore, err error) { +func openBlockstore(optsSupplier func(path string) Options) func(tb testing.TB, path string) (bs blockstore.BasicBlockstore, err error) { + return func(tb testing.TB, path string) (bs blockstore.BasicBlockstore, err error) { tb.Helper() return Open(optsSupplier(path)) } diff --git a/lib/blockstore/badger/blockstore_test_suite.go b/blockstore/badger/blockstore_test_suite.go similarity index 96% rename from lib/blockstore/badger/blockstore_test_suite.go rename to blockstore/badger/blockstore_test_suite.go index 9332e62c5bf..93be82ac87e 100644 --- a/lib/blockstore/badger/blockstore_test_suite.go +++ b/blockstore/badger/blockstore_test_suite.go @@ -8,18 +8,19 @@ import ( "strings" "testing" - "github.com/filecoin-project/lotus/lib/blockstore" blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" u "github.com/ipfs/go-ipfs-util" + "github.com/filecoin-project/lotus/blockstore" + "github.com/stretchr/testify/require" ) // TODO: move this to go-ipfs-blockstore. type Suite struct { - NewBlockstore func(tb testing.TB) (bs blockstore.Blockstore, path string) - OpenBlockstore func(tb testing.TB, path string) (bs blockstore.Blockstore, err error) + NewBlockstore func(tb testing.TB) (bs blockstore.BasicBlockstore, path string) + OpenBlockstore func(tb testing.TB, path string) (bs blockstore.BasicBlockstore, err error) } func (s *Suite) RunTests(t *testing.T, prefix string) { @@ -290,7 +291,7 @@ func (s *Suite) TestDelete(t *testing.T) { } -func insertBlocks(t *testing.T, bs blockstore.Blockstore, count int) []cid.Cid { +func insertBlocks(t *testing.T, bs blockstore.BasicBlockstore, count int) []cid.Cid { keys := make([]cid.Cid, count) for i := 0; i < count; i++ { block := blocks.NewBlock([]byte(fmt.Sprintf("some data %d", i))) diff --git a/blockstore/blockstore.go b/blockstore/blockstore.go new file mode 100644 index 00000000000..23f0bd7546c --- /dev/null +++ b/blockstore/blockstore.go @@ -0,0 +1,95 @@ +package blockstore + +import ( + cid "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + logging "github.com/ipfs/go-log/v2" + + blockstore "github.com/ipfs/go-ipfs-blockstore" +) + +var log = logging.Logger("blockstore") + +var ErrNotFound = blockstore.ErrNotFound + +// Blockstore is the blockstore interface used by Lotus. It is the union +// of the basic go-ipfs blockstore, with other capabilities required by Lotus, +// e.g. View or Sync. +type Blockstore interface { + blockstore.Blockstore + blockstore.Viewer + BatchDeleter +} + +// BasicBlockstore is an alias to the original IPFS Blockstore. +type BasicBlockstore = blockstore.Blockstore + +type Viewer = blockstore.Viewer + +type BatchDeleter interface { + DeleteMany(cids []cid.Cid) error +} + +// WrapIDStore wraps the underlying blockstore in an "identity" blockstore. +// The ID store filters out all puts for blocks with CIDs using the "identity" +// hash function. It also extracts inlined blocks from CIDs using the identity +// hash function and returns them on get/has, ignoring the contents of the +// blockstore. +func WrapIDStore(bstore blockstore.Blockstore) Blockstore { + if is, ok := bstore.(*idstore); ok { + // already wrapped + return is + } + + if bs, ok := bstore.(Blockstore); ok { + // we need to wrap our own because we don't want to neuter the DeleteMany method + // the underlying blockstore has implemented an (efficient) DeleteMany + return NewIDStore(bs) + } + + // The underlying blockstore does not implement DeleteMany, so we need to shim it. + // This is less efficient as it'll iterate and perform single deletes. + return NewIDStore(Adapt(bstore)) +} + +// FromDatastore creates a new blockstore backed by the given datastore. +func FromDatastore(dstore ds.Batching) Blockstore { + return WrapIDStore(blockstore.NewBlockstore(dstore)) +} + +type adaptedBlockstore struct { + blockstore.Blockstore +} + +var _ Blockstore = (*adaptedBlockstore)(nil) + +func (a *adaptedBlockstore) View(cid cid.Cid, callback func([]byte) error) error { + blk, err := a.Get(cid) + if err != nil { + return err + } + return callback(blk.RawData()) +} + +func (a *adaptedBlockstore) DeleteMany(cids []cid.Cid) error { + for _, cid := range cids { + err := a.DeleteBlock(cid) + if err != nil { + return err + } + } + + return nil +} + +// Adapt adapts a standard blockstore to a Lotus blockstore by +// enriching it with the extra methods that Lotus requires (e.g. View, Sync). +// +// View proxies over to Get and calls the callback with the value supplied by Get. +// Sync noops. +func Adapt(bs blockstore.Blockstore) Blockstore { + if ret, ok := bs.(Blockstore); ok { + return ret + } + return &adaptedBlockstore{bs} +} diff --git a/blockstore/buffered.go b/blockstore/buffered.go new file mode 100644 index 00000000000..5d3d38f78f9 --- /dev/null +++ b/blockstore/buffered.go @@ -0,0 +1,174 @@ +package blockstore + +import ( + "context" + "os" + + block "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" +) + +// buflog is a logger for the buffered blockstore. It is subscoped from the +// blockstore logger. +var buflog = log.Named("buf") + +type BufferedBlockstore struct { + read Blockstore + write Blockstore +} + +func NewBuffered(base Blockstore) *BufferedBlockstore { + var buf Blockstore + if os.Getenv("LOTUS_DISABLE_VM_BUF") == "iknowitsabadidea" { + buflog.Warn("VM BLOCKSTORE BUFFERING IS DISABLED") + buf = base + } else { + buf = NewMemory() + } + + bs := &BufferedBlockstore{ + read: base, + write: buf, + } + return bs +} + +func NewTieredBstore(r Blockstore, w Blockstore) *BufferedBlockstore { + return &BufferedBlockstore{ + read: r, + write: w, + } +} + +var ( + _ Blockstore = (*BufferedBlockstore)(nil) + _ Viewer = (*BufferedBlockstore)(nil) +) + +func (bs *BufferedBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + a, err := bs.read.AllKeysChan(ctx) + if err != nil { + return nil, err + } + + b, err := bs.write.AllKeysChan(ctx) + if err != nil { + return nil, err + } + + out := make(chan cid.Cid) + go func() { + defer close(out) + for a != nil || b != nil { + select { + case val, ok := <-a: + if !ok { + a = nil + } else { + select { + case out <- val: + case <-ctx.Done(): + return + } + } + case val, ok := <-b: + if !ok { + b = nil + } else { + select { + case out <- val: + case <-ctx.Done(): + return + } + } + } + } + }() + + return out, nil +} + +func (bs *BufferedBlockstore) DeleteBlock(c cid.Cid) error { + if err := bs.read.DeleteBlock(c); err != nil { + return err + } + + return bs.write.DeleteBlock(c) +} + +func (bs *BufferedBlockstore) DeleteMany(cids []cid.Cid) error { + if err := bs.read.DeleteMany(cids); err != nil { + return err + } + + return bs.write.DeleteMany(cids) +} + +func (bs *BufferedBlockstore) View(c cid.Cid, callback func([]byte) error) error { + // both stores are viewable. + if err := bs.write.View(c, callback); err == ErrNotFound { + // not found in write blockstore; fall through. + } else { + return err // propagate errors, or nil, i.e. found. + } + return bs.read.View(c, callback) +} + +func (bs *BufferedBlockstore) Get(c cid.Cid) (block.Block, error) { + if out, err := bs.write.Get(c); err != nil { + if err != ErrNotFound { + return nil, err + } + } else { + return out, nil + } + + return bs.read.Get(c) +} + +func (bs *BufferedBlockstore) GetSize(c cid.Cid) (int, error) { + s, err := bs.read.GetSize(c) + if err == ErrNotFound || s == 0 { + return bs.write.GetSize(c) + } + + return s, err +} + +func (bs *BufferedBlockstore) Put(blk block.Block) error { + has, err := bs.read.Has(blk.Cid()) // TODO: consider dropping this check + if err != nil { + return err + } + + if has { + return nil + } + + return bs.write.Put(blk) +} + +func (bs *BufferedBlockstore) Has(c cid.Cid) (bool, error) { + has, err := bs.write.Has(c) + if err != nil { + return false, err + } + if has { + return true, nil + } + + return bs.read.Has(c) +} + +func (bs *BufferedBlockstore) HashOnRead(hor bool) { + bs.read.HashOnRead(hor) + bs.write.HashOnRead(hor) +} + +func (bs *BufferedBlockstore) PutMany(blks []block.Block) error { + return bs.write.PutMany(blks) +} + +func (bs *BufferedBlockstore) Read() Blockstore { + return bs.read +} diff --git a/blockstore/cached.go b/blockstore/cached.go new file mode 100644 index 00000000000..f2bb3ed57a4 --- /dev/null +++ b/blockstore/cached.go @@ -0,0 +1,25 @@ +package blockstore + +import ( + "context" + + blockstore "github.com/ipfs/go-ipfs-blockstore" +) + +type CacheOpts = blockstore.CacheOpts + +func DefaultCacheOpts() CacheOpts { + return CacheOpts{ + HasBloomFilterSize: 0, + HasBloomFilterHashes: 0, + HasARCCacheSize: 512 << 10, + } +} + +func CachedBlockstore(ctx context.Context, bs Blockstore, opts CacheOpts) (Blockstore, error) { + cached, err := blockstore.CachedBlockstore(ctx, bs, opts) + if err != nil { + return nil, err + } + return WrapIDStore(cached), nil +} diff --git a/blockstore/doc.go b/blockstore/doc.go new file mode 100644 index 00000000000..fea1126f5ca --- /dev/null +++ b/blockstore/doc.go @@ -0,0 +1,9 @@ +// Package blockstore and subpackages contain most of the blockstore +// implementations used by Lotus. +// +// Blockstores not ultimately constructed out of the building blocks in this +// package may not work properly. +// +// This package re-exports parts of the go-ipfs-blockstore package such that +// no other package needs to import it directly, for ergonomics and traceability. +package blockstore diff --git a/lib/blockstore/fallbackstore.go b/blockstore/fallback.go similarity index 54% rename from lib/blockstore/fallbackstore.go rename to blockstore/fallback.go index 0ce397d4454..5f220f941bb 100644 --- a/lib/blockstore/fallbackstore.go +++ b/blockstore/fallback.go @@ -9,48 +9,61 @@ import ( blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" - blockstore "github.com/ipfs/go-ipfs-blockstore" - logging "github.com/ipfs/go-log" ) -var log = logging.Logger("blockstore") +// UnwrapFallbackStore takes a blockstore, and returns the underlying blockstore +// if it was a FallbackStore. Otherwise, it just returns the supplied store +// unmodified. +func UnwrapFallbackStore(bs Blockstore) (Blockstore, bool) { + if fbs, ok := bs.(*FallbackStore); ok { + return fbs.Blockstore, true + } + return bs, false +} +// FallbackStore is a read-through store that queries another (potentially +// remote) source if the block is not found locally. If the block is found +// during the fallback, it stores it in the local store. type FallbackStore struct { - blockstore.Blockstore + Blockstore - fallbackGetBlock func(context.Context, cid.Cid) (blocks.Block, error) - lk sync.RWMutex + lk sync.RWMutex + // missFn is the function that will be invoked on a local miss to pull the + // block from elsewhere. + missFn func(context.Context, cid.Cid) (blocks.Block, error) } -func (fbs *FallbackStore) SetFallback(fg func(context.Context, cid.Cid) (blocks.Block, error)) { +var _ Blockstore = (*FallbackStore)(nil) + +func (fbs *FallbackStore) SetFallback(missFn func(context.Context, cid.Cid) (blocks.Block, error)) { fbs.lk.Lock() defer fbs.lk.Unlock() - fbs.fallbackGetBlock = fg + fbs.missFn = missFn } func (fbs *FallbackStore) getFallback(c cid.Cid) (blocks.Block, error) { - log.Errorw("fallbackstore: Block not found locally, fetching from the network", "cid", c) + log.Warnf("fallbackstore: block not found locally, fetching from the network; cid: %s", c) fbs.lk.RLock() defer fbs.lk.RUnlock() - if fbs.fallbackGetBlock == nil { + if fbs.missFn == nil { // FallbackStore wasn't configured yet (chainstore/bitswap aren't up yet) // Wait for a bit and retry fbs.lk.RUnlock() time.Sleep(5 * time.Second) fbs.lk.RLock() - if fbs.fallbackGetBlock == nil { - log.Errorw("fallbackstore: fallbackGetBlock not configured yet") - return nil, blockstore.ErrNotFound + if fbs.missFn == nil { + log.Errorw("fallbackstore: missFn not configured yet") + return nil, ErrNotFound } } ctx, cancel := context.WithTimeout(context.TODO(), 120*time.Second) defer cancel() - b, err := fbs.fallbackGetBlock(ctx, c) + b, err := fbs.missFn(ctx, c) if err != nil { return nil, err } @@ -69,7 +82,7 @@ func (fbs *FallbackStore) Get(c cid.Cid) (blocks.Block, error) { switch err { case nil: return b, nil - case blockstore.ErrNotFound: + case ErrNotFound: return fbs.getFallback(c) default: return b, err @@ -81,7 +94,7 @@ func (fbs *FallbackStore) GetSize(c cid.Cid) (int, error) { switch err { case nil: return sz, nil - case blockstore.ErrNotFound: + case ErrNotFound: b, err := fbs.getFallback(c) if err != nil { return 0, err @@ -91,5 +104,3 @@ func (fbs *FallbackStore) GetSize(c cid.Cid) (int, error) { return sz, err } } - -var _ blockstore.Blockstore = &FallbackStore{} diff --git a/blockstore/idstore.go b/blockstore/idstore.go new file mode 100644 index 00000000000..e6148ff04e2 --- /dev/null +++ b/blockstore/idstore.go @@ -0,0 +1,174 @@ +package blockstore + +import ( + "context" + "io" + + "golang.org/x/xerrors" + + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" +) + +var _ Blockstore = (*idstore)(nil) + +type idstore struct { + bs Blockstore +} + +func NewIDStore(bs Blockstore) Blockstore { + return &idstore{bs: bs} +} + +func decodeCid(cid cid.Cid) (inline bool, data []byte, err error) { + if cid.Prefix().MhType != mh.IDENTITY { + return false, nil, nil + } + + dmh, err := mh.Decode(cid.Hash()) + if err != nil { + return false, nil, err + } + + if dmh.Code == mh.IDENTITY { + return true, dmh.Digest, nil + } + + return false, nil, err +} + +func (b *idstore) Has(cid cid.Cid) (bool, error) { + inline, _, err := decodeCid(cid) + if err != nil { + return false, xerrors.Errorf("error decoding Cid: %w", err) + } + + if inline { + return true, nil + } + + return b.bs.Has(cid) +} + +func (b *idstore) Get(cid cid.Cid) (blocks.Block, error) { + inline, data, err := decodeCid(cid) + if err != nil { + return nil, xerrors.Errorf("error decoding Cid: %w", err) + } + + if inline { + return blocks.NewBlockWithCid(data, cid) + } + + return b.bs.Get(cid) +} + +func (b *idstore) GetSize(cid cid.Cid) (int, error) { + inline, data, err := decodeCid(cid) + if err != nil { + return 0, xerrors.Errorf("error decoding Cid: %w", err) + } + + if inline { + return len(data), err + } + + return b.bs.GetSize(cid) +} + +func (b *idstore) View(cid cid.Cid, cb func([]byte) error) error { + inline, data, err := decodeCid(cid) + if err != nil { + return xerrors.Errorf("error decoding Cid: %w", err) + } + + if inline { + return cb(data) + } + + return b.bs.View(cid, cb) +} + +func (b *idstore) Put(blk blocks.Block) error { + inline, _, err := decodeCid(blk.Cid()) + if err != nil { + return xerrors.Errorf("error decoding Cid: %w", err) + } + + if inline { + return nil + } + + return b.bs.Put(blk) +} + +func (b *idstore) PutMany(blks []blocks.Block) error { + toPut := make([]blocks.Block, 0, len(blks)) + for _, blk := range blks { + inline, _, err := decodeCid(blk.Cid()) + if err != nil { + return xerrors.Errorf("error decoding Cid: %w", err) + } + + if inline { + continue + } + toPut = append(toPut, blk) + } + + if len(toPut) > 0 { + return b.bs.PutMany(toPut) + } + + return nil +} + +func (b *idstore) DeleteBlock(cid cid.Cid) error { + inline, _, err := decodeCid(cid) + if err != nil { + return xerrors.Errorf("error decoding Cid: %w", err) + } + + if inline { + return nil + } + + return b.bs.DeleteBlock(cid) +} + +func (b *idstore) DeleteMany(cids []cid.Cid) error { + toDelete := make([]cid.Cid, 0, len(cids)) + for _, cid := range cids { + inline, _, err := decodeCid(cid) + if err != nil { + return xerrors.Errorf("error decoding Cid: %w", err) + } + + if inline { + continue + } + toDelete = append(toDelete, cid) + } + + if len(toDelete) > 0 { + return b.bs.DeleteMany(toDelete) + } + + return nil +} + +func (b *idstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + return b.bs.AllKeysChan(ctx) +} + +func (b *idstore) HashOnRead(enabled bool) { + b.bs.HashOnRead(enabled) +} + +func (b *idstore) Close() error { + if c, ok := b.bs.(io.Closer); ok { + return c.Close() + } + return nil +} diff --git a/lib/ipfsbstore/ipfsbstore.go b/blockstore/ipfs.go similarity index 77% rename from lib/ipfsbstore/ipfsbstore.go rename to blockstore/ipfs.go index 5f1c63f36ac..51b4bd95123 100644 --- a/lib/ipfsbstore/ipfsbstore.go +++ b/blockstore/ipfs.go @@ -1,4 +1,4 @@ -package ipfsbstore +package blockstore import ( "bytes" @@ -16,16 +16,16 @@ import ( iface "github.com/ipfs/interface-go-ipfs-core" "github.com/ipfs/interface-go-ipfs-core/options" "github.com/ipfs/interface-go-ipfs-core/path" - - "github.com/filecoin-project/lotus/lib/blockstore" ) -type IpfsBstore struct { +type IPFSBlockstore struct { ctx context.Context api, offlineAPI iface.CoreAPI } -func NewIpfsBstore(ctx context.Context, onlineMode bool) (*IpfsBstore, error) { +var _ BasicBlockstore = (*IPFSBlockstore)(nil) + +func NewLocalIPFSBlockstore(ctx context.Context, onlineMode bool) (Blockstore, error) { localApi, err := httpapi.NewLocalApi() if err != nil { return nil, xerrors.Errorf("getting local ipfs api: %w", err) @@ -34,6 +34,7 @@ func NewIpfsBstore(ctx context.Context, onlineMode bool) (*IpfsBstore, error) { if err != nil { return nil, xerrors.Errorf("setting offline mode: %s", err) } + offlineAPI := api if onlineMode { offlineAPI, err = localApi.WithOptions(options.Api.Offline(true)) @@ -42,14 +43,16 @@ func NewIpfsBstore(ctx context.Context, onlineMode bool) (*IpfsBstore, error) { } } - return &IpfsBstore{ + bs := &IPFSBlockstore{ ctx: ctx, api: api, offlineAPI: offlineAPI, - }, nil + } + + return Adapt(bs), nil } -func NewRemoteIpfsBstore(ctx context.Context, maddr multiaddr.Multiaddr, onlineMode bool) (*IpfsBstore, error) { +func NewRemoteIPFSBlockstore(ctx context.Context, maddr multiaddr.Multiaddr, onlineMode bool) (Blockstore, error) { httpApi, err := httpapi.NewApi(maddr) if err != nil { return nil, xerrors.Errorf("setting remote ipfs api: %w", err) @@ -58,6 +61,7 @@ func NewRemoteIpfsBstore(ctx context.Context, maddr multiaddr.Multiaddr, onlineM if err != nil { return nil, xerrors.Errorf("applying offline mode: %s", err) } + offlineAPI := api if onlineMode { offlineAPI, err = httpApi.WithOptions(options.Api.Offline(true)) @@ -66,18 +70,20 @@ func NewRemoteIpfsBstore(ctx context.Context, maddr multiaddr.Multiaddr, onlineM } } - return &IpfsBstore{ + bs := &IPFSBlockstore{ ctx: ctx, api: api, offlineAPI: offlineAPI, - }, nil + } + + return Adapt(bs), nil } -func (i *IpfsBstore) DeleteBlock(cid cid.Cid) error { +func (i *IPFSBlockstore) DeleteBlock(cid cid.Cid) error { return xerrors.Errorf("not supported") } -func (i *IpfsBstore) Has(cid cid.Cid) (bool, error) { +func (i *IPFSBlockstore) Has(cid cid.Cid) (bool, error) { _, err := i.offlineAPI.Block().Stat(i.ctx, path.IpldPath(cid)) if err != nil { // The underlying client is running in Offline mode. @@ -93,7 +99,7 @@ func (i *IpfsBstore) Has(cid cid.Cid) (bool, error) { return true, nil } -func (i *IpfsBstore) Get(cid cid.Cid) (blocks.Block, error) { +func (i *IPFSBlockstore) Get(cid cid.Cid) (blocks.Block, error) { rd, err := i.api.Block().Get(i.ctx, path.IpldPath(cid)) if err != nil { return nil, xerrors.Errorf("getting ipfs block: %w", err) @@ -107,7 +113,7 @@ func (i *IpfsBstore) Get(cid cid.Cid) (blocks.Block, error) { return blocks.NewBlockWithCid(data, cid) } -func (i *IpfsBstore) GetSize(cid cid.Cid) (int, error) { +func (i *IPFSBlockstore) GetSize(cid cid.Cid) (int, error) { st, err := i.api.Block().Stat(i.ctx, path.IpldPath(cid)) if err != nil { return 0, xerrors.Errorf("getting ipfs block: %w", err) @@ -116,7 +122,7 @@ func (i *IpfsBstore) GetSize(cid cid.Cid) (int, error) { return st.Size(), nil } -func (i *IpfsBstore) Put(block blocks.Block) error { +func (i *IPFSBlockstore) Put(block blocks.Block) error { mhd, err := multihash.Decode(block.Cid().Hash()) if err != nil { return err @@ -128,7 +134,7 @@ func (i *IpfsBstore) Put(block blocks.Block) error { return err } -func (i *IpfsBstore) PutMany(blocks []blocks.Block) error { +func (i *IPFSBlockstore) PutMany(blocks []blocks.Block) error { // TODO: could be done in parallel for _, block := range blocks { @@ -140,12 +146,10 @@ func (i *IpfsBstore) PutMany(blocks []blocks.Block) error { return nil } -func (i *IpfsBstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { +func (i *IPFSBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { return nil, xerrors.Errorf("not supported") } -func (i *IpfsBstore) HashOnRead(enabled bool) { +func (i *IPFSBlockstore) HashOnRead(enabled bool) { return // TODO: We could technically support this, but.. } - -var _ blockstore.Blockstore = &IpfsBstore{} diff --git a/lib/blockstore/memstore.go b/blockstore/mem.go similarity index 62% rename from lib/blockstore/memstore.go rename to blockstore/mem.go index 5cfaf40a929..8ea69d46a49 100644 --- a/lib/blockstore/memstore.go +++ b/blockstore/mem.go @@ -7,20 +7,32 @@ import ( "github.com/ipfs/go-cid" ) -// MemStore is a terminal blockstore that keeps blocks in memory. -type MemStore map[cid.Cid]blocks.Block +// NewMemory returns a temporary memory-backed blockstore. +func NewMemory() MemBlockstore { + return make(MemBlockstore) +} + +// MemBlockstore is a terminal blockstore that keeps blocks in memory. +type MemBlockstore map[cid.Cid]blocks.Block -func (m MemStore) DeleteBlock(k cid.Cid) error { +func (m MemBlockstore) DeleteBlock(k cid.Cid) error { delete(m, k) return nil } -func (m MemStore) Has(k cid.Cid) (bool, error) { +func (m MemBlockstore) DeleteMany(ks []cid.Cid) error { + for _, k := range ks { + delete(m, k) + } + return nil +} + +func (m MemBlockstore) Has(k cid.Cid) (bool, error) { _, ok := m[k] return ok, nil } -func (m MemStore) View(k cid.Cid, callback func([]byte) error) error { +func (m MemBlockstore) View(k cid.Cid, callback func([]byte) error) error { b, ok := m[k] if !ok { return ErrNotFound @@ -28,7 +40,7 @@ func (m MemStore) View(k cid.Cid, callback func([]byte) error) error { return callback(b.RawData()) } -func (m MemStore) Get(k cid.Cid) (blocks.Block, error) { +func (m MemBlockstore) Get(k cid.Cid) (blocks.Block, error) { b, ok := m[k] if !ok { return nil, ErrNotFound @@ -37,7 +49,7 @@ func (m MemStore) Get(k cid.Cid) (blocks.Block, error) { } // GetSize returns the CIDs mapped BlockSize -func (m MemStore) GetSize(k cid.Cid) (int, error) { +func (m MemBlockstore) GetSize(k cid.Cid) (int, error) { b, ok := m[k] if !ok { return 0, ErrNotFound @@ -46,7 +58,7 @@ func (m MemStore) GetSize(k cid.Cid) (int, error) { } // Put puts a given block to the underlying datastore -func (m MemStore) Put(b blocks.Block) error { +func (m MemBlockstore) Put(b blocks.Block) error { // Convert to a basic block for safety, but try to reuse the existing // block if it's already a basic block. k := b.Cid() @@ -64,7 +76,7 @@ func (m MemStore) Put(b blocks.Block) error { // PutMany puts a slice of blocks at the same time using batching // capabilities of the underlying datastore whenever possible. -func (m MemStore) PutMany(bs []blocks.Block) error { +func (m MemBlockstore) PutMany(bs []blocks.Block) error { for _, b := range bs { _ = m.Put(b) // can't fail } @@ -74,7 +86,7 @@ func (m MemStore) PutMany(bs []blocks.Block) error { // AllKeysChan returns a channel from which // the CIDs in the Blockstore can be read. It should respect // the given context, closing the channel if it becomes Done. -func (m MemStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { +func (m MemBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { ch := make(chan cid.Cid, len(m)) for k := range m { ch <- k @@ -85,6 +97,6 @@ func (m MemStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { // HashOnRead specifies if every read block should be // rehashed to make sure it matches its CID. -func (m MemStore) HashOnRead(enabled bool) { +func (m MemBlockstore) HashOnRead(enabled bool) { // no-op } diff --git a/blockstore/metrics.go b/blockstore/metrics.go new file mode 100644 index 00000000000..737690a1106 --- /dev/null +++ b/blockstore/metrics.go @@ -0,0 +1,154 @@ +package blockstore + +import ( + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" +) + +// +// Currently unused, but kept in repo in case we introduce one of the candidate +// cache implementations (Freecache, Ristretto), both of which report these +// metrics. +// + +// CacheMetricsEmitInterval is the interval at which metrics are emitted onto +// OpenCensus. +var CacheMetricsEmitInterval = 5 * time.Second + +var ( + CacheName, _ = tag.NewKey("cache_name") +) + +// CacheMeasures groups all metrics emitted by the blockstore caches. +var CacheMeasures = struct { + HitRatio *stats.Float64Measure + Hits *stats.Int64Measure + Misses *stats.Int64Measure + Entries *stats.Int64Measure + QueriesServed *stats.Int64Measure + Adds *stats.Int64Measure + Updates *stats.Int64Measure + Evictions *stats.Int64Measure + CostAdded *stats.Int64Measure + CostEvicted *stats.Int64Measure + SetsDropped *stats.Int64Measure + SetsRejected *stats.Int64Measure + QueriesDropped *stats.Int64Measure +}{ + HitRatio: stats.Float64("blockstore/cache/hit_ratio", "Hit ratio of blockstore cache", stats.UnitDimensionless), + Hits: stats.Int64("blockstore/cache/hits", "Total number of hits at blockstore cache", stats.UnitDimensionless), + Misses: stats.Int64("blockstore/cache/misses", "Total number of misses at blockstore cache", stats.UnitDimensionless), + Entries: stats.Int64("blockstore/cache/entry_count", "Total number of entries currently in the blockstore cache", stats.UnitDimensionless), + QueriesServed: stats.Int64("blockstore/cache/queries_served", "Total number of queries served by the blockstore cache", stats.UnitDimensionless), + Adds: stats.Int64("blockstore/cache/adds", "Total number of adds to blockstore cache", stats.UnitDimensionless), + Updates: stats.Int64("blockstore/cache/updates", "Total number of updates in blockstore cache", stats.UnitDimensionless), + Evictions: stats.Int64("blockstore/cache/evictions", "Total number of evictions from blockstore cache", stats.UnitDimensionless), + CostAdded: stats.Int64("blockstore/cache/cost_added", "Total cost (byte size) of entries added into blockstore cache", stats.UnitBytes), + CostEvicted: stats.Int64("blockstore/cache/cost_evicted", "Total cost (byte size) of entries evicted by blockstore cache", stats.UnitBytes), + SetsDropped: stats.Int64("blockstore/cache/sets_dropped", "Total number of sets dropped by blockstore cache", stats.UnitDimensionless), + SetsRejected: stats.Int64("blockstore/cache/sets_rejected", "Total number of sets rejected by blockstore cache", stats.UnitDimensionless), + QueriesDropped: stats.Int64("blockstore/cache/queries_dropped", "Total number of queries dropped by blockstore cache", stats.UnitDimensionless), +} + +// CacheViews groups all cache-related default views. +var CacheViews = struct { + HitRatio *view.View + Hits *view.View + Misses *view.View + Entries *view.View + QueriesServed *view.View + Adds *view.View + Updates *view.View + Evictions *view.View + CostAdded *view.View + CostEvicted *view.View + SetsDropped *view.View + SetsRejected *view.View + QueriesDropped *view.View +}{ + HitRatio: &view.View{ + Measure: CacheMeasures.HitRatio, + Aggregation: view.LastValue(), + TagKeys: []tag.Key{CacheName}, + }, + Hits: &view.View{ + Measure: CacheMeasures.Hits, + Aggregation: view.LastValue(), + TagKeys: []tag.Key{CacheName}, + }, + Misses: &view.View{ + Measure: CacheMeasures.Misses, + Aggregation: view.LastValue(), + TagKeys: []tag.Key{CacheName}, + }, + Entries: &view.View{ + Measure: CacheMeasures.Entries, + Aggregation: view.LastValue(), + TagKeys: []tag.Key{CacheName}, + }, + QueriesServed: &view.View{ + Measure: CacheMeasures.QueriesServed, + Aggregation: view.LastValue(), + TagKeys: []tag.Key{CacheName}, + }, + Adds: &view.View{ + Measure: CacheMeasures.Adds, + Aggregation: view.LastValue(), + TagKeys: []tag.Key{CacheName}, + }, + Updates: &view.View{ + Measure: CacheMeasures.Updates, + Aggregation: view.LastValue(), + TagKeys: []tag.Key{CacheName}, + }, + Evictions: &view.View{ + Measure: CacheMeasures.Evictions, + Aggregation: view.LastValue(), + TagKeys: []tag.Key{CacheName}, + }, + CostAdded: &view.View{ + Measure: CacheMeasures.CostAdded, + Aggregation: view.LastValue(), + TagKeys: []tag.Key{CacheName}, + }, + CostEvicted: &view.View{ + Measure: CacheMeasures.CostEvicted, + Aggregation: view.LastValue(), + TagKeys: []tag.Key{CacheName}, + }, + SetsDropped: &view.View{ + Measure: CacheMeasures.SetsDropped, + Aggregation: view.LastValue(), + TagKeys: []tag.Key{CacheName}, + }, + SetsRejected: &view.View{ + Measure: CacheMeasures.SetsRejected, + Aggregation: view.LastValue(), + TagKeys: []tag.Key{CacheName}, + }, + QueriesDropped: &view.View{ + Measure: CacheMeasures.QueriesDropped, + Aggregation: view.LastValue(), + TagKeys: []tag.Key{CacheName}, + }, +} + +// DefaultViews exports all default views for this package. +var DefaultViews = []*view.View{ + CacheViews.HitRatio, + CacheViews.Hits, + CacheViews.Misses, + CacheViews.Entries, + CacheViews.QueriesServed, + CacheViews.Adds, + CacheViews.Updates, + CacheViews.Evictions, + CacheViews.CostAdded, + CacheViews.CostEvicted, + CacheViews.SetsDropped, + CacheViews.SetsRejected, + CacheViews.QueriesDropped, +} diff --git a/blockstore/splitstore/markset.go b/blockstore/splitstore/markset.go new file mode 100644 index 00000000000..ef14a2fc668 --- /dev/null +++ b/blockstore/splitstore/markset.go @@ -0,0 +1,38 @@ +package splitstore + +import ( + "path/filepath" + + "golang.org/x/xerrors" + + cid "github.com/ipfs/go-cid" +) + +// MarkSet is a utility to keep track of seen CID, and later query for them. +// +// * If the expected dataset is large, it can be backed by a datastore (e.g. bbolt). +// * If a probabilistic result is acceptable, it can be backed by a bloom filter (default). +type MarkSet interface { + Mark(cid.Cid) error + Has(cid.Cid) (bool, error) + Close() error +} + +// markBytes is deliberately a non-nil empty byte slice for serialization. +var markBytes = []byte{} + +type MarkSetEnv interface { + Create(name string, sizeHint int64) (MarkSet, error) + Close() error +} + +func OpenMarkSetEnv(path string, mtype string) (MarkSetEnv, error) { + switch mtype { + case "", "bloom": + return NewBloomMarkSetEnv() + case "bolt": + return NewBoltMarkSetEnv(filepath.Join(path, "markset.bolt")) + default: + return nil, xerrors.Errorf("unknown mark set type %s", mtype) + } +} diff --git a/blockstore/splitstore/markset_bloom.go b/blockstore/splitstore/markset_bloom.go new file mode 100644 index 00000000000..c213436c898 --- /dev/null +++ b/blockstore/splitstore/markset_bloom.go @@ -0,0 +1,77 @@ +package splitstore + +import ( + "crypto/rand" + "crypto/sha256" + + "golang.org/x/xerrors" + + bbloom "github.com/ipfs/bbloom" + cid "github.com/ipfs/go-cid" +) + +const ( + BloomFilterMinSize = 10_000_000 + BloomFilterProbability = 0.01 +) + +type BloomMarkSetEnv struct{} + +var _ MarkSetEnv = (*BloomMarkSetEnv)(nil) + +type BloomMarkSet struct { + salt []byte + bf *bbloom.Bloom +} + +var _ MarkSet = (*BloomMarkSet)(nil) + +func NewBloomMarkSetEnv() (*BloomMarkSetEnv, error) { + return &BloomMarkSetEnv{}, nil +} + +func (e *BloomMarkSetEnv) Create(name string, sizeHint int64) (MarkSet, error) { + size := int64(BloomFilterMinSize) + for size < sizeHint { + size += BloomFilterMinSize + } + + salt := make([]byte, 4) + _, err := rand.Read(salt) + if err != nil { + return nil, xerrors.Errorf("error reading salt: %w", err) + } + + bf, err := bbloom.New(float64(size), BloomFilterProbability) + if err != nil { + return nil, xerrors.Errorf("error creating bloom filter: %w", err) + } + + return &BloomMarkSet{salt: salt, bf: bf}, nil +} + +func (e *BloomMarkSetEnv) Close() error { + return nil +} + +func (s *BloomMarkSet) saltedKey(cid cid.Cid) []byte { + hash := cid.Hash() + key := make([]byte, len(s.salt)+len(hash)) + n := copy(key, s.salt) + copy(key[n:], hash) + rehash := sha256.Sum256(key) + return rehash[:] +} + +func (s *BloomMarkSet) Mark(cid cid.Cid) error { + s.bf.Add(s.saltedKey(cid)) + return nil +} + +func (s *BloomMarkSet) Has(cid cid.Cid) (bool, error) { + return s.bf.Has(s.saltedKey(cid)), nil +} + +func (s *BloomMarkSet) Close() error { + return nil +} diff --git a/blockstore/splitstore/markset_bolt.go b/blockstore/splitstore/markset_bolt.go new file mode 100644 index 00000000000..cab0dd74af9 --- /dev/null +++ b/blockstore/splitstore/markset_bolt.go @@ -0,0 +1,81 @@ +package splitstore + +import ( + "time" + + "golang.org/x/xerrors" + + cid "github.com/ipfs/go-cid" + bolt "go.etcd.io/bbolt" +) + +type BoltMarkSetEnv struct { + db *bolt.DB +} + +var _ MarkSetEnv = (*BoltMarkSetEnv)(nil) + +type BoltMarkSet struct { + db *bolt.DB + bucketId []byte +} + +var _ MarkSet = (*BoltMarkSet)(nil) + +func NewBoltMarkSetEnv(path string) (*BoltMarkSetEnv, error) { + db, err := bolt.Open(path, 0644, + &bolt.Options{ + Timeout: 1 * time.Second, + NoSync: true, + }) + if err != nil { + return nil, err + } + + return &BoltMarkSetEnv{db: db}, nil +} + +func (e *BoltMarkSetEnv) Create(name string, hint int64) (MarkSet, error) { + bucketId := []byte(name) + err := e.db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucketIfNotExists(bucketId) + if err != nil { + return xerrors.Errorf("error creating bolt db bucket %s: %w", name, err) + } + return nil + }) + + if err != nil { + return nil, err + } + + return &BoltMarkSet{db: e.db, bucketId: bucketId}, nil +} + +func (e *BoltMarkSetEnv) Close() error { + return e.db.Close() +} + +func (s *BoltMarkSet) Mark(cid cid.Cid) error { + return s.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket(s.bucketId) + return b.Put(cid.Hash(), markBytes) + }) +} + +func (s *BoltMarkSet) Has(cid cid.Cid) (result bool, err error) { + err = s.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket(s.bucketId) + v := b.Get(cid.Hash()) + result = v != nil + return nil + }) + + return result, err +} + +func (s *BoltMarkSet) Close() error { + return s.db.Update(func(tx *bolt.Tx) error { + return tx.DeleteBucket(s.bucketId) + }) +} diff --git a/blockstore/splitstore/markset_test.go b/blockstore/splitstore/markset_test.go new file mode 100644 index 00000000000..367ab8d06e7 --- /dev/null +++ b/blockstore/splitstore/markset_test.go @@ -0,0 +1,138 @@ +package splitstore + +import ( + "io/ioutil" + "testing" + + cid "github.com/ipfs/go-cid" + "github.com/multiformats/go-multihash" +) + +func TestBoltMarkSet(t *testing.T) { + testMarkSet(t, "bolt") +} + +func TestBloomMarkSet(t *testing.T) { + testMarkSet(t, "bloom") +} + +func testMarkSet(t *testing.T, lsType string) { + t.Helper() + + path, err := ioutil.TempDir("", "sweep-test.*") + if err != nil { + t.Fatal(err) + } + + env, err := OpenMarkSetEnv(path, lsType) + if err != nil { + t.Fatal(err) + } + defer env.Close() //nolint:errcheck + + hotSet, err := env.Create("hot", 0) + if err != nil { + t.Fatal(err) + } + + coldSet, err := env.Create("cold", 0) + if err != nil { + t.Fatal(err) + } + + makeCid := func(key string) cid.Cid { + h, err := multihash.Sum([]byte(key), multihash.SHA2_256, -1) + if err != nil { + t.Fatal(err) + } + + return cid.NewCidV1(cid.Raw, h) + } + + mustHave := func(s MarkSet, cid cid.Cid) { + has, err := s.Has(cid) + if err != nil { + t.Fatal(err) + } + + if !has { + t.Fatal("mark not found") + } + } + + mustNotHave := func(s MarkSet, cid cid.Cid) { + has, err := s.Has(cid) + if err != nil { + t.Fatal(err) + } + + if has { + t.Fatal("unexpected mark") + } + } + + k1 := makeCid("a") + k2 := makeCid("b") + k3 := makeCid("c") + k4 := makeCid("d") + + hotSet.Mark(k1) //nolint + hotSet.Mark(k2) //nolint + coldSet.Mark(k3) //nolint + + mustHave(hotSet, k1) + mustHave(hotSet, k2) + mustNotHave(hotSet, k3) + mustNotHave(hotSet, k4) + + mustNotHave(coldSet, k1) + mustNotHave(coldSet, k2) + mustHave(coldSet, k3) + mustNotHave(coldSet, k4) + + // close them and reopen to redo the dance + + err = hotSet.Close() + if err != nil { + t.Fatal(err) + } + + err = coldSet.Close() + if err != nil { + t.Fatal(err) + } + + hotSet, err = env.Create("hot", 0) + if err != nil { + t.Fatal(err) + } + + coldSet, err = env.Create("cold", 0) + if err != nil { + t.Fatal(err) + } + + hotSet.Mark(k3) //nolint + hotSet.Mark(k4) //nolint + coldSet.Mark(k1) //nolint + + mustNotHave(hotSet, k1) + mustNotHave(hotSet, k2) + mustHave(hotSet, k3) + mustHave(hotSet, k4) + + mustHave(coldSet, k1) + mustNotHave(coldSet, k2) + mustNotHave(coldSet, k3) + mustNotHave(coldSet, k4) + + err = hotSet.Close() + if err != nil { + t.Fatal(err) + } + + err = coldSet.Close() + if err != nil { + t.Fatal(err) + } +} diff --git a/blockstore/splitstore/splitstore.go b/blockstore/splitstore/splitstore.go new file mode 100644 index 00000000000..fb3e2880315 --- /dev/null +++ b/blockstore/splitstore/splitstore.go @@ -0,0 +1,1069 @@ +package splitstore + +import ( + "context" + "encoding/binary" + "errors" + "sync" + "sync/atomic" + "time" + + "go.uber.org/multierr" + "golang.org/x/xerrors" + + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + dstore "github.com/ipfs/go-datastore" + logging "github.com/ipfs/go-log/v2" + + "github.com/filecoin-project/go-state-types/abi" + + bstore "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/metrics" + + "go.opencensus.io/stats" +) + +var ( + // CompactionThreshold is the number of epochs that need to have elapsed + // from the previously compacted epoch to trigger a new compaction. + // + // |················· CompactionThreshold ··················| + // | | + // =======‖≡≡≡≡≡≡≡‖-----------------------|------------------------» + // | | | chain --> ↑__ current epoch + // |·······| | + // ↑________ CompactionCold ↑________ CompactionBoundary + // + // === :: cold (already archived) + // ≡≡≡ :: to be archived in this compaction + // --- :: hot + CompactionThreshold = 5 * build.Finality + + // CompactionCold is the number of epochs that will be archived to the + // cold store on compaction. See diagram on CompactionThreshold for a + // better sense. + CompactionCold = build.Finality + + // CompactionBoundary is the number of epochs from the current epoch at which + // we will walk the chain for live objects + CompactionBoundary = 2 * build.Finality +) + +var ( + // baseEpochKey stores the base epoch (last compaction epoch) in the + // metadata store. + baseEpochKey = dstore.NewKey("/splitstore/baseEpoch") + + // warmupEpochKey stores whether a hot store warmup has been performed. + // On first start, the splitstore will walk the state tree and will copy + // all active blocks into the hotstore. + warmupEpochKey = dstore.NewKey("/splitstore/warmupEpoch") + + // markSetSizeKey stores the current estimate for the mark set size. + // this is first computed at warmup and updated in every compaction + markSetSizeKey = dstore.NewKey("/splitstore/markSetSize") + + log = logging.Logger("splitstore") +) + +const ( + batchSize = 16384 + + defaultColdPurgeSize = 7_000_000 + defaultDeadPurgeSize = 1_000_000 +) + +type Config struct { + // TrackingStore is the type of tracking store to use. + // + // Supported values are: "bolt" (default if omitted), "mem" (for tests and readonly access). + TrackingStoreType string + + // MarkSetType is the type of mark set to use. + // + // Supported values are: "bloom" (default if omitted), "bolt". + MarkSetType string + // perform full reachability analysis (expensive) for compaction + // You should enable this option if you plan to use the splitstore without a backing coldstore + EnableFullCompaction bool + // EXPERIMENTAL enable pruning of unreachable objects. + // This has not been sufficiently tested yet; only enable if you know what you are doing. + // Only applies if you enable full compaction. + EnableGC bool + // full archival nodes should enable this if EnableFullCompaction is enabled + // do NOT enable this if you synced from a snapshot. + // Only applies if you enabled full compaction + Archival bool +} + +// ChainAccessor allows the Splitstore to access the chain. It will most likely +// be a ChainStore at runtime. +type ChainAccessor interface { + GetTipsetByHeight(context.Context, abi.ChainEpoch, *types.TipSet, bool) (*types.TipSet, error) + GetHeaviestTipSet() *types.TipSet + SubscribeHeadChanges(change func(revert []*types.TipSet, apply []*types.TipSet) error) + WalkSnapshot(context.Context, *types.TipSet, abi.ChainEpoch, bool, bool, func(cid.Cid) error) error +} + +type SplitStore struct { + compacting int32 // compaction (or warmp up) in progress + critsection int32 // compaction critical section + closing int32 // the split store is closing + + fullCompaction bool + enableGC bool + skipOldMsgs bool + skipMsgReceipts bool + + baseEpoch abi.ChainEpoch + warmupEpoch abi.ChainEpoch + + coldPurgeSize int + deadPurgeSize int + + mx sync.Mutex + curTs *types.TipSet + + chain ChainAccessor + ds dstore.Datastore + hot bstore.Blockstore + cold bstore.Blockstore + tracker TrackingStore + + env MarkSetEnv + + markSetSize int64 +} + +var _ bstore.Blockstore = (*SplitStore)(nil) + +// Open opens an existing splistore, or creates a new splitstore. The splitstore +// is backed by the provided hot and cold stores. The returned SplitStore MUST be +// attached to the ChainStore with Start in order to trigger compaction. +func Open(path string, ds dstore.Datastore, hot, cold bstore.Blockstore, cfg *Config) (*SplitStore, error) { + // the tracking store + tracker, err := OpenTrackingStore(path, cfg.TrackingStoreType) + if err != nil { + return nil, err + } + + // the markset env + env, err := OpenMarkSetEnv(path, cfg.MarkSetType) + if err != nil { + _ = tracker.Close() + return nil, err + } + + // and now we can make a SplitStore + ss := &SplitStore{ + ds: ds, + hot: hot, + cold: cold, + tracker: tracker, + env: env, + + fullCompaction: cfg.EnableFullCompaction, + enableGC: cfg.EnableGC, + skipOldMsgs: !(cfg.EnableFullCompaction && cfg.Archival), + skipMsgReceipts: !(cfg.EnableFullCompaction && cfg.Archival), + + coldPurgeSize: defaultColdPurgeSize, + } + + if cfg.EnableGC { + ss.deadPurgeSize = defaultDeadPurgeSize + } + + return ss, nil +} + +// Blockstore interface +func (s *SplitStore) DeleteBlock(_ cid.Cid) error { + // afaict we don't seem to be using this method, so it's not implemented + return errors.New("DeleteBlock not implemented on SplitStore; don't do this Luke!") //nolint +} + +func (s *SplitStore) DeleteMany(_ []cid.Cid) error { + // afaict we don't seem to be using this method, so it's not implemented + return errors.New("DeleteMany not implemented on SplitStore; don't do this Luke!") //nolint +} + +func (s *SplitStore) Has(cid cid.Cid) (bool, error) { + has, err := s.hot.Has(cid) + + if err != nil || has { + return has, err + } + + return s.cold.Has(cid) +} + +func (s *SplitStore) Get(cid cid.Cid) (blocks.Block, error) { + blk, err := s.hot.Get(cid) + + switch err { + case nil: + return blk, nil + + case bstore.ErrNotFound: + blk, err = s.cold.Get(cid) + if err == nil { + stats.Record(context.Background(), metrics.SplitstoreMiss.M(1)) + } + return blk, err + + default: + return nil, err + } +} + +func (s *SplitStore) GetSize(cid cid.Cid) (int, error) { + size, err := s.hot.GetSize(cid) + + switch err { + case nil: + return size, nil + + case bstore.ErrNotFound: + size, err = s.cold.GetSize(cid) + if err == nil { + stats.Record(context.Background(), metrics.SplitstoreMiss.M(1)) + } + return size, err + + default: + return 0, err + } +} + +func (s *SplitStore) Put(blk blocks.Block) error { + s.mx.Lock() + if s.curTs == nil { + s.mx.Unlock() + return s.cold.Put(blk) + } + + epoch := s.curTs.Height() + s.mx.Unlock() + + err := s.tracker.Put(blk.Cid(), epoch) + if err != nil { + log.Errorf("error tracking CID in hotstore: %s; falling back to coldstore", err) + return s.cold.Put(blk) + } + + return s.hot.Put(blk) +} + +func (s *SplitStore) PutMany(blks []blocks.Block) error { + s.mx.Lock() + if s.curTs == nil { + s.mx.Unlock() + return s.cold.PutMany(blks) + } + + epoch := s.curTs.Height() + s.mx.Unlock() + + batch := make([]cid.Cid, 0, len(blks)) + for _, blk := range blks { + batch = append(batch, blk.Cid()) + } + + err := s.tracker.PutBatch(batch, epoch) + if err != nil { + log.Errorf("error tracking CIDs in hotstore: %s; falling back to coldstore", err) + return s.cold.PutMany(blks) + } + + return s.hot.PutMany(blks) +} + +func (s *SplitStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + ctx, cancel := context.WithCancel(ctx) + + chHot, err := s.hot.AllKeysChan(ctx) + if err != nil { + cancel() + return nil, err + } + + chCold, err := s.cold.AllKeysChan(ctx) + if err != nil { + cancel() + return nil, err + } + + ch := make(chan cid.Cid) + go func() { + defer cancel() + defer close(ch) + + for _, in := range []<-chan cid.Cid{chHot, chCold} { + for cid := range in { + select { + case ch <- cid: + case <-ctx.Done(): + return + } + } + } + }() + + return ch, nil +} + +func (s *SplitStore) HashOnRead(enabled bool) { + s.hot.HashOnRead(enabled) + s.cold.HashOnRead(enabled) +} + +func (s *SplitStore) View(cid cid.Cid, cb func([]byte) error) error { + err := s.hot.View(cid, cb) + switch err { + case bstore.ErrNotFound: + return s.cold.View(cid, cb) + + default: + return err + } +} + +// State tracking +func (s *SplitStore) Start(chain ChainAccessor) error { + s.chain = chain + s.curTs = chain.GetHeaviestTipSet() + + // load base epoch from metadata ds + // if none, then use current epoch because it's a fresh start + bs, err := s.ds.Get(baseEpochKey) + switch err { + case nil: + s.baseEpoch = bytesToEpoch(bs) + + case dstore.ErrNotFound: + if s.curTs == nil { + // this can happen in some tests + break + } + + err = s.setBaseEpoch(s.curTs.Height()) + if err != nil { + return xerrors.Errorf("error saving base epoch: %w", err) + } + + default: + return xerrors.Errorf("error loading base epoch: %w", err) + } + + // load warmup epoch from metadata ds + // if none, then the splitstore will warm up the hotstore at first head change notif + // by walking the current tipset + bs, err = s.ds.Get(warmupEpochKey) + switch err { + case nil: + s.warmupEpoch = bytesToEpoch(bs) + + case dstore.ErrNotFound: + default: + return xerrors.Errorf("error loading warmup epoch: %w", err) + } + + // load markSetSize from metadata ds + // if none, the splitstore will compute it during warmup and update in every compaction + bs, err = s.ds.Get(markSetSizeKey) + switch err { + case nil: + s.markSetSize = bytesToInt64(bs) + + case dstore.ErrNotFound: + default: + return xerrors.Errorf("error loading mark set size: %w", err) + } + + log.Infow("starting splitstore", "baseEpoch", s.baseEpoch, "warmupEpoch", s.warmupEpoch) + + // watch the chain + chain.SubscribeHeadChanges(s.HeadChange) + + return nil +} + +func (s *SplitStore) Close() error { + atomic.StoreInt32(&s.closing, 1) + + if atomic.LoadInt32(&s.critsection) == 1 { + log.Warn("ongoing compaction in critical section; waiting for it to finish...") + for atomic.LoadInt32(&s.critsection) == 1 { + time.Sleep(time.Second) + } + } + + return multierr.Combine(s.tracker.Close(), s.env.Close()) +} + +func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error { + s.mx.Lock() + curTs := apply[len(apply)-1] + epoch := curTs.Height() + s.curTs = curTs + s.mx.Unlock() + + if !atomic.CompareAndSwapInt32(&s.compacting, 0, 1) { + // we are currently compacting, do nothing and wait for the next head change + return nil + } + + if s.warmupEpoch == 0 { + // splitstore needs to warm up + go func() { + defer atomic.StoreInt32(&s.compacting, 0) + + log.Info("warming up hotstore") + start := time.Now() + + s.warmup(curTs) + + log.Infow("warm up done", "took", time.Since(start)) + }() + + return nil + } + + if epoch-s.baseEpoch > CompactionThreshold { + // it's time to compact + go func() { + defer atomic.StoreInt32(&s.compacting, 0) + + log.Info("compacting splitstore") + start := time.Now() + + s.compact(curTs) + + log.Infow("compaction done", "took", time.Since(start)) + }() + } else { + // no compaction necessary + atomic.StoreInt32(&s.compacting, 0) + } + + return nil +} + +func (s *SplitStore) warmup(curTs *types.TipSet) { + epoch := curTs.Height() + + batchHot := make([]blocks.Block, 0, batchSize) + batchSnoop := make([]cid.Cid, 0, batchSize) + + count := int64(0) + err := s.chain.WalkSnapshot(context.Background(), curTs, 1, s.skipOldMsgs, s.skipMsgReceipts, + func(cid cid.Cid) error { + count++ + + has, err := s.hot.Has(cid) + if err != nil { + return err + } + + if has { + return nil + } + + blk, err := s.cold.Get(cid) + if err != nil { + return err + } + + batchHot = append(batchHot, blk) + batchSnoop = append(batchSnoop, cid) + + if len(batchHot) == batchSize { + err = s.tracker.PutBatch(batchSnoop, epoch) + if err != nil { + return err + } + batchSnoop = batchSnoop[:0] + + err = s.hot.PutMany(batchHot) + if err != nil { + return err + } + batchHot = batchHot[:0] + } + + return nil + }) + + if err != nil { + log.Errorf("error warming up splitstore: %s", err) + return + } + + if len(batchHot) > 0 { + err = s.tracker.PutBatch(batchSnoop, epoch) + if err != nil { + log.Errorf("error warming up splitstore: %s", err) + return + } + + err = s.hot.PutMany(batchHot) + if err != nil { + log.Errorf("error warming up splitstore: %s", err) + return + } + } + + if count > s.markSetSize { + s.markSetSize = count + count>>2 // overestimate a bit + } + + // save the warmup epoch + s.warmupEpoch = epoch + err = s.ds.Put(warmupEpochKey, epochToBytes(epoch)) + if err != nil { + log.Errorf("error saving warmup epoch: %s", err) + } + + err = s.ds.Put(markSetSizeKey, int64ToBytes(s.markSetSize)) + if err != nil { + log.Errorf("error saving mark set size: %s", err) + } +} + +// Compaction/GC Algorithm +func (s *SplitStore) compact(curTs *types.TipSet) { + var err error + if s.markSetSize == 0 { + start := time.Now() + log.Info("estimating mark set size") + err = s.estimateMarkSetSize(curTs) + if err != nil { + log.Errorf("error estimating mark set size: %s; aborting compaction", err) + return + } + log.Infow("estimating mark set size done", "took", time.Since(start), "size", s.markSetSize) + } else { + log.Infow("current mark set size estimate", "size", s.markSetSize) + } + + start := time.Now() + if s.fullCompaction { + err = s.compactFull(curTs) + } else { + err = s.compactSimple(curTs) + } + took := time.Since(start).Milliseconds() + stats.Record(context.Background(), metrics.SplitstoreCompactionTimeSeconds.M(float64(took)/1e3)) + + if err != nil { + log.Errorf("COMPACTION ERROR: %s", err) + } +} + +func (s *SplitStore) estimateMarkSetSize(curTs *types.TipSet) error { + var count int64 + err := s.chain.WalkSnapshot(context.Background(), curTs, 1, s.skipOldMsgs, s.skipMsgReceipts, + func(cid cid.Cid) error { + count++ + return nil + }) + + if err != nil { + return err + } + + s.markSetSize = count + count>>2 // overestimate a bit + return nil +} + +func (s *SplitStore) compactSimple(curTs *types.TipSet) error { + coldEpoch := s.baseEpoch + CompactionCold + currentEpoch := curTs.Height() + boundaryEpoch := currentEpoch - CompactionBoundary + + log.Infow("running simple compaction", "currentEpoch", currentEpoch, "baseEpoch", s.baseEpoch, "coldEpoch", coldEpoch, "boundaryEpoch", boundaryEpoch) + + coldSet, err := s.env.Create("cold", s.markSetSize) + if err != nil { + return xerrors.Errorf("error creating mark set: %w", err) + } + defer coldSet.Close() //nolint:errcheck + + // 1. mark reachable cold objects by looking at the objects reachable only from the cold epoch + log.Infow("marking reachable cold blocks", "boundaryEpoch", boundaryEpoch) + startMark := time.Now() + + boundaryTs, err := s.chain.GetTipsetByHeight(context.Background(), boundaryEpoch, curTs, true) + if err != nil { + return xerrors.Errorf("error getting tipset at boundary epoch: %w", err) + } + + var count int64 + err = s.chain.WalkSnapshot(context.Background(), boundaryTs, 1, s.skipOldMsgs, s.skipMsgReceipts, + func(cid cid.Cid) error { + count++ + return coldSet.Mark(cid) + }) + + if err != nil { + return xerrors.Errorf("error marking cold blocks: %w", err) + } + + if count > s.markSetSize { + s.markSetSize = count + count>>2 // overestimate a bit + } + + log.Infow("marking done", "took", time.Since(startMark)) + + // 2. move cold unreachable objects to the coldstore + log.Info("collecting cold objects") + startCollect := time.Now() + + cold := make([]cid.Cid, 0, s.coldPurgeSize) + + // some stats for logging + var hotCnt, coldCnt int + + // 2.1 iterate through the tracking store and collect unreachable cold objects + err = s.tracker.ForEach(func(cid cid.Cid, writeEpoch abi.ChainEpoch) error { + // is the object still hot? + if writeEpoch > coldEpoch { + // yes, stay in the hotstore + hotCnt++ + return nil + } + + // check whether it is reachable in the cold boundary + mark, err := coldSet.Has(cid) + if err != nil { + return xerrors.Errorf("error checkiing cold set for %s: %w", cid, err) + } + + if mark { + hotCnt++ + return nil + } + + // it's cold, mark it for move + cold = append(cold, cid) + coldCnt++ + return nil + }) + + if err != nil { + return xerrors.Errorf("error collecting cold objects: %w", err) + } + + if coldCnt > 0 { + s.coldPurgeSize = coldCnt + coldCnt>>2 // overestimate a bit + } + + log.Infow("collection done", "took", time.Since(startCollect)) + log.Infow("compaction stats", "hot", hotCnt, "cold", coldCnt) + stats.Record(context.Background(), metrics.SplitstoreCompactionHot.M(int64(hotCnt))) + stats.Record(context.Background(), metrics.SplitstoreCompactionCold.M(int64(coldCnt))) + + // Enter critical section + atomic.StoreInt32(&s.critsection, 1) + defer atomic.StoreInt32(&s.critsection, 0) + + // check to see if we are closing first; if that's the case just return + if atomic.LoadInt32(&s.closing) == 1 { + log.Info("splitstore is closing; aborting compaction") + return xerrors.Errorf("compaction aborted") + } + + // 2.2 copy the cold objects to the coldstore + log.Info("moving cold blocks to the coldstore") + startMove := time.Now() + err = s.moveColdBlocks(cold) + if err != nil { + return xerrors.Errorf("error moving cold blocks: %w", err) + } + log.Infow("moving done", "took", time.Since(startMove)) + + // 2.3 delete cold objects from the hotstore + log.Info("purging cold objects from the hotstore") + startPurge := time.Now() + err = s.purgeBlocks(cold) + if err != nil { + return xerrors.Errorf("error purging cold blocks: %w", err) + } + log.Infow("purging cold from hotstore done", "took", time.Since(startPurge)) + + // 2.4 remove the tracker tracking for cold objects + startPurge = time.Now() + log.Info("purging cold objects from tracker") + err = s.purgeTracking(cold) + if err != nil { + return xerrors.Errorf("error purging tracking for cold blocks: %w", err) + } + log.Infow("purging cold from tracker done", "took", time.Since(startPurge)) + + // we are done; do some housekeeping + err = s.tracker.Sync() + if err != nil { + return xerrors.Errorf("error syncing tracker: %w", err) + } + + s.gcHotstore() + + err = s.setBaseEpoch(coldEpoch) + if err != nil { + return xerrors.Errorf("error saving base epoch: %w", err) + } + + err = s.ds.Put(markSetSizeKey, int64ToBytes(s.markSetSize)) + if err != nil { + return xerrors.Errorf("error saving mark set size: %w", err) + } + + return nil +} + +func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error { + batch := make([]blocks.Block, 0, batchSize) + + for _, cid := range cold { + blk, err := s.hot.Get(cid) + if err != nil { + if err == dstore.ErrNotFound { + // this can happen if the node is killed after we have deleted the block from the hotstore + // but before we have deleted it from the tracker; just delete the tracker. + err = s.tracker.Delete(cid) + if err != nil { + return xerrors.Errorf("error deleting unreachable cid %s from tracker: %w", cid, err) + } + } else { + return xerrors.Errorf("error retrieving tracked block %s from hotstore: %w", cid, err) + } + + continue + } + + batch = append(batch, blk) + if len(batch) == batchSize { + err = s.cold.PutMany(batch) + if err != nil { + return xerrors.Errorf("error putting batch to coldstore: %w", err) + } + batch = batch[:0] + } + } + + if len(batch) > 0 { + err := s.cold.PutMany(batch) + if err != nil { + return xerrors.Errorf("error putting cold to coldstore: %w", err) + } + } + + return nil +} + +func (s *SplitStore) purgeBatch(cids []cid.Cid, deleteBatch func([]cid.Cid) error) error { + if len(cids) == 0 { + return nil + } + + // don't delete one giant batch of 7M objects, but rather do smaller batches + done := false + for i := 0; !done; i++ { + start := i * batchSize + end := start + batchSize + if end >= len(cids) { + end = len(cids) + done = true + } + + err := deleteBatch(cids[start:end]) + if err != nil { + return xerrors.Errorf("error deleting batch: %w", err) + } + } + + return nil +} + +func (s *SplitStore) purgeBlocks(cids []cid.Cid) error { + return s.purgeBatch(cids, s.hot.DeleteMany) +} + +func (s *SplitStore) purgeTracking(cids []cid.Cid) error { + return s.purgeBatch(cids, s.tracker.DeleteBatch) +} + +func (s *SplitStore) gcHotstore() { + if gc, ok := s.hot.(interface{ CollectGarbage() error }); ok { + log.Infof("garbage collecting hotstore") + startGC := time.Now() + err := gc.CollectGarbage() + if err != nil { + log.Warnf("error garbage collecting hotstore: %s", err) + } else { + log.Infow("garbage collection done", "took", time.Since(startGC)) + } + } +} + +func (s *SplitStore) compactFull(curTs *types.TipSet) error { + currentEpoch := curTs.Height() + coldEpoch := s.baseEpoch + CompactionCold + boundaryEpoch := currentEpoch - CompactionBoundary + + log.Infow("running full compaction", "currentEpoch", currentEpoch, "baseEpoch", s.baseEpoch, "coldEpoch", coldEpoch, "boundaryEpoch", boundaryEpoch) + + // create two mark sets, one for marking the cold finality region + // and one for marking the hot region + hotSet, err := s.env.Create("hot", s.markSetSize) + if err != nil { + return xerrors.Errorf("error creating hot mark set: %w", err) + } + defer hotSet.Close() //nolint:errcheck + + coldSet, err := s.env.Create("cold", s.markSetSize) + if err != nil { + return xerrors.Errorf("error creating cold mark set: %w", err) + } + defer coldSet.Close() //nolint:errcheck + + // Phase 1: marking + log.Info("marking live blocks") + startMark := time.Now() + + // Phase 1a: mark all reachable CIDs in the hot range + boundaryTs, err := s.chain.GetTipsetByHeight(context.Background(), boundaryEpoch, curTs, true) + if err != nil { + return xerrors.Errorf("error getting tipset at boundary epoch: %w", err) + } + + count := int64(0) + err = s.chain.WalkSnapshot(context.Background(), boundaryTs, boundaryEpoch-coldEpoch, s.skipOldMsgs, s.skipMsgReceipts, + func(cid cid.Cid) error { + count++ + return hotSet.Mark(cid) + }) + + if err != nil { + return xerrors.Errorf("error marking hot blocks: %w", err) + } + + if count > s.markSetSize { + s.markSetSize = count + count>>2 // overestimate a bit + } + + // Phase 1b: mark all reachable CIDs in the cold range + coldTs, err := s.chain.GetTipsetByHeight(context.Background(), coldEpoch, curTs, true) + if err != nil { + return xerrors.Errorf("error getting tipset at cold epoch: %w", err) + } + + count = 0 + err = s.chain.WalkSnapshot(context.Background(), coldTs, CompactionCold, s.skipOldMsgs, s.skipMsgReceipts, + func(cid cid.Cid) error { + count++ + return coldSet.Mark(cid) + }) + + if err != nil { + return xerrors.Errorf("error marking cold blocks: %w", err) + } + + if count > s.markSetSize { + s.markSetSize = count + count>>2 // overestimate a bit + } + + log.Infow("marking done", "took", time.Since(startMark)) + + // Phase 2: sweep cold objects: + // - If a cold object is reachable in the hot range, it stays in the hotstore. + // - If a cold object is reachable in the cold range, it is moved to the coldstore. + // - If a cold object is unreachable, it is deleted if GC is enabled, otherwise moved to the coldstore. + log.Info("collecting cold objects") + startCollect := time.Now() + + // some stats for logging + var hotCnt, coldCnt, deadCnt int + + cold := make([]cid.Cid, 0, s.coldPurgeSize) + dead := make([]cid.Cid, 0, s.deadPurgeSize) + + // 2.1 iterate through the tracker and collect cold and dead objects + err = s.tracker.ForEach(func(cid cid.Cid, wrEpoch abi.ChainEpoch) error { + // is the object stil hot? + if wrEpoch > coldEpoch { + // yes, stay in the hotstore + hotCnt++ + return nil + } + + // the object is cold -- check whether it is reachable in the hot range + mark, err := hotSet.Has(cid) + if err != nil { + return xerrors.Errorf("error checking live mark for %s: %w", cid, err) + } + + if mark { + // the object is reachable in the hot range, stay in the hotstore + hotCnt++ + return nil + } + + // check whether it is reachable in the cold range + mark, err = coldSet.Has(cid) + if err != nil { + return xerrors.Errorf("error checkiing cold set for %s: %w", cid, err) + } + + if s.enableGC { + if mark { + // the object is reachable in the cold range, move it to the cold store + cold = append(cold, cid) + coldCnt++ + } else { + // the object is dead and will be deleted + dead = append(dead, cid) + deadCnt++ + } + } else { + // if GC is disabled, we move both cold and dead objects to the coldstore + cold = append(cold, cid) + if mark { + coldCnt++ + } else { + deadCnt++ + } + } + + return nil + }) + + if err != nil { + return xerrors.Errorf("error collecting cold objects: %w", err) + } + + if coldCnt > 0 { + s.coldPurgeSize = coldCnt + coldCnt>>2 // overestimate a bit + } + if deadCnt > 0 { + s.deadPurgeSize = deadCnt + deadCnt>>2 // overestimate a bit + } + + log.Infow("collection done", "took", time.Since(startCollect)) + log.Infow("compaction stats", "hot", hotCnt, "cold", coldCnt, "dead", deadCnt) + stats.Record(context.Background(), metrics.SplitstoreCompactionHot.M(int64(hotCnt))) + stats.Record(context.Background(), metrics.SplitstoreCompactionCold.M(int64(coldCnt))) + stats.Record(context.Background(), metrics.SplitstoreCompactionDead.M(int64(deadCnt))) + + // Enter critical section + atomic.StoreInt32(&s.critsection, 1) + defer atomic.StoreInt32(&s.critsection, 0) + + // check to see if we are closing first; if that's the case just return + if atomic.LoadInt32(&s.closing) == 1 { + log.Info("splitstore is closing; aborting compaction") + return xerrors.Errorf("compaction aborted") + } + + // 2.2 copy the cold objects to the coldstore + log.Info("moving cold objects to the coldstore") + startMove := time.Now() + err = s.moveColdBlocks(cold) + if err != nil { + return xerrors.Errorf("error moving cold blocks: %w", err) + } + log.Infow("moving done", "took", time.Since(startMove)) + + // 2.3 delete cold objects from the hotstore + log.Info("purging cold objects from the hotstore") + startPurge := time.Now() + err = s.purgeBlocks(cold) + if err != nil { + return xerrors.Errorf("error purging cold blocks: %w", err) + } + log.Infow("purging cold from hotstore done", "took", time.Since(startPurge)) + + // 2.4 remove the tracker tracking for cold objects + startPurge = time.Now() + log.Info("purging cold objects from tracker") + err = s.purgeTracking(cold) + if err != nil { + return xerrors.Errorf("error purging tracking for cold blocks: %w", err) + } + log.Infow("purging cold from tracker done", "took", time.Since(startPurge)) + + // 3. if we have dead objects, delete them from the hotstore and remove the tracking + if len(dead) > 0 { + log.Info("deleting dead objects") + err = s.purgeBlocks(dead) + if err != nil { + return xerrors.Errorf("error purging dead blocks: %w", err) + } + + // remove the tracker tracking + startPurge := time.Now() + log.Info("purging dead objects from tracker") + err = s.purgeTracking(dead) + if err != nil { + return xerrors.Errorf("error purging tracking for dead blocks: %w", err) + } + log.Infow("purging dead from tracker done", "took", time.Since(startPurge)) + } + + // we are done; do some housekeeping + err = s.tracker.Sync() + if err != nil { + return xerrors.Errorf("error syncing tracker: %w", err) + } + + s.gcHotstore() + + err = s.setBaseEpoch(coldEpoch) + if err != nil { + return xerrors.Errorf("error saving base epoch: %w", err) + } + + err = s.ds.Put(markSetSizeKey, int64ToBytes(s.markSetSize)) + if err != nil { + return xerrors.Errorf("error saving mark set size: %w", err) + } + + return nil +} + +func (s *SplitStore) setBaseEpoch(epoch abi.ChainEpoch) error { + s.baseEpoch = epoch + // write to datastore + return s.ds.Put(baseEpochKey, epochToBytes(epoch)) +} + +func epochToBytes(epoch abi.ChainEpoch) []byte { + return uint64ToBytes(uint64(epoch)) +} + +func bytesToEpoch(buf []byte) abi.ChainEpoch { + return abi.ChainEpoch(bytesToUint64(buf)) +} + +func int64ToBytes(i int64) []byte { + return uint64ToBytes(uint64(i)) +} + +func bytesToInt64(buf []byte) int64 { + return int64(bytesToUint64(buf)) +} + +func uint64ToBytes(i uint64) []byte { + buf := make([]byte, 16) + n := binary.PutUvarint(buf, i) + return buf[:n] +} + +func bytesToUint64(buf []byte) uint64 { + i, _ := binary.Uvarint(buf) + return i +} diff --git a/blockstore/splitstore/splitstore_test.go b/blockstore/splitstore/splitstore_test.go new file mode 100644 index 00000000000..e5314b80f3b --- /dev/null +++ b/blockstore/splitstore/splitstore_test.go @@ -0,0 +1,255 @@ +package splitstore + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/types/mock" + + cid "github.com/ipfs/go-cid" + datastore "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" + logging "github.com/ipfs/go-log/v2" +) + +func init() { + CompactionThreshold = 5 + CompactionCold = 1 + CompactionBoundary = 2 + logging.SetLogLevel("splitstore", "DEBUG") +} + +func testSplitStore(t *testing.T, cfg *Config) { + chain := &mockChain{} + // genesis + genBlock := mock.MkBlock(nil, 0, 0) + genTs := mock.TipSet(genBlock) + chain.push(genTs) + + // the myriads of stores + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + hot := blockstore.NewMemorySync() + cold := blockstore.NewMemorySync() + + // put the genesis block to cold store + blk, err := genBlock.ToStorageBlock() + if err != nil { + t.Fatal(err) + } + + err = cold.Put(blk) + if err != nil { + t.Fatal(err) + } + + // open the splitstore + ss, err := Open("", ds, hot, cold, cfg) + if err != nil { + t.Fatal(err) + } + defer ss.Close() //nolint + + err = ss.Start(chain) + if err != nil { + t.Fatal(err) + } + + // make some tipsets, but not enough to cause compaction + mkBlock := func(curTs *types.TipSet, i int) *types.TipSet { + blk := mock.MkBlock(curTs, uint64(i), uint64(i)) + sblk, err := blk.ToStorageBlock() + if err != nil { + t.Fatal(err) + } + err = ss.Put(sblk) + if err != nil { + t.Fatal(err) + } + ts := mock.TipSet(blk) + chain.push(ts) + + return ts + } + + mkGarbageBlock := func(curTs *types.TipSet, i int) { + blk := mock.MkBlock(curTs, uint64(i), uint64(i)) + sblk, err := blk.ToStorageBlock() + if err != nil { + t.Fatal(err) + } + err = ss.Put(sblk) + if err != nil { + t.Fatal(err) + } + } + + waitForCompaction := func() { + for atomic.LoadInt32(&ss.compacting) == 1 { + time.Sleep(100 * time.Millisecond) + } + } + + curTs := genTs + for i := 1; i < 5; i++ { + curTs = mkBlock(curTs, i) + waitForCompaction() + } + + mkGarbageBlock(genTs, 1) + + // count objects in the cold and hot stores + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + countBlocks := func(bs blockstore.Blockstore) int { + count := 0 + ch, err := bs.AllKeysChan(ctx) + if err != nil { + t.Fatal(err) + } + for range ch { + count++ + } + return count + } + + coldCnt := countBlocks(cold) + hotCnt := countBlocks(hot) + + if coldCnt != 1 { + t.Errorf("expected %d blocks, but got %d", 1, coldCnt) + } + + if hotCnt != 5 { + t.Errorf("expected %d blocks, but got %d", 5, hotCnt) + } + + // trigger a compaction + for i := 5; i < 10; i++ { + curTs = mkBlock(curTs, i) + waitForCompaction() + } + + coldCnt = countBlocks(cold) + hotCnt = countBlocks(hot) + + if !cfg.EnableFullCompaction { + if coldCnt != 5 { + t.Errorf("expected %d cold blocks, but got %d", 5, coldCnt) + } + + if hotCnt != 5 { + t.Errorf("expected %d hot blocks, but got %d", 5, hotCnt) + } + } + + if cfg.EnableFullCompaction && !cfg.EnableGC { + if coldCnt != 3 { + t.Errorf("expected %d cold blocks, but got %d", 3, coldCnt) + } + + if hotCnt != 7 { + t.Errorf("expected %d hot blocks, but got %d", 7, hotCnt) + } + } + + if cfg.EnableFullCompaction && cfg.EnableGC { + if coldCnt != 2 { + t.Errorf("expected %d cold blocks, but got %d", 2, coldCnt) + } + + if hotCnt != 7 { + t.Errorf("expected %d hot blocks, but got %d", 7, hotCnt) + } + } +} + +func TestSplitStoreSimpleCompaction(t *testing.T) { + testSplitStore(t, &Config{TrackingStoreType: "mem"}) +} + +func TestSplitStoreFullCompactionWithoutGC(t *testing.T) { + testSplitStore(t, &Config{ + TrackingStoreType: "mem", + EnableFullCompaction: true, + }) +} + +func TestSplitStoreFullCompactionWithGC(t *testing.T) { + testSplitStore(t, &Config{ + TrackingStoreType: "mem", + EnableFullCompaction: true, + EnableGC: true, + }) +} + +type mockChain struct { + sync.Mutex + tipsets []*types.TipSet + listener func(revert []*types.TipSet, apply []*types.TipSet) error +} + +func (c *mockChain) push(ts *types.TipSet) { + c.Lock() + c.tipsets = append(c.tipsets, ts) + c.Unlock() + + if c.listener != nil { + err := c.listener(nil, []*types.TipSet{ts}) + if err != nil { + log.Errorf("mockchain: error dispatching listener: %s", err) + } + } +} + +func (c *mockChain) GetTipsetByHeight(_ context.Context, epoch abi.ChainEpoch, _ *types.TipSet, _ bool) (*types.TipSet, error) { + c.Lock() + defer c.Unlock() + + iEpoch := int(epoch) + if iEpoch > len(c.tipsets) { + return nil, fmt.Errorf("bad epoch %d", epoch) + } + + return c.tipsets[iEpoch-1], nil +} + +func (c *mockChain) GetHeaviestTipSet() *types.TipSet { + c.Lock() + defer c.Unlock() + + return c.tipsets[len(c.tipsets)-1] +} + +func (c *mockChain) SubscribeHeadChanges(change func(revert []*types.TipSet, apply []*types.TipSet) error) { + c.listener = change +} + +func (c *mockChain) WalkSnapshot(_ context.Context, ts *types.TipSet, epochs abi.ChainEpoch, _ bool, _ bool, f func(cid.Cid) error) error { + c.Lock() + defer c.Unlock() + + start := int(ts.Height()) - 1 + end := start - int(epochs) + if end < 0 { + end = -1 + } + for i := start; i > end; i-- { + ts := c.tipsets[i] + for _, cid := range ts.Cids() { + err := f(cid) + if err != nil { + return err + } + } + } + + return nil +} diff --git a/blockstore/splitstore/tracking.go b/blockstore/splitstore/tracking.go new file mode 100644 index 00000000000..d57fd45ef6a --- /dev/null +++ b/blockstore/splitstore/tracking.go @@ -0,0 +1,109 @@ +package splitstore + +import ( + "path/filepath" + "sync" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" + cid "github.com/ipfs/go-cid" +) + +// TrackingStore is a persistent store that tracks blocks that are added +// to the hotstore, tracking the epoch at which they are written. +type TrackingStore interface { + Put(cid.Cid, abi.ChainEpoch) error + PutBatch([]cid.Cid, abi.ChainEpoch) error + Get(cid.Cid) (abi.ChainEpoch, error) + Delete(cid.Cid) error + DeleteBatch([]cid.Cid) error + ForEach(func(cid.Cid, abi.ChainEpoch) error) error + Sync() error + Close() error +} + +// OpenTrackingStore opens a tracking store of the specified type in the +// specified path. +func OpenTrackingStore(path string, ttype string) (TrackingStore, error) { + switch ttype { + case "", "bolt": + return OpenBoltTrackingStore(filepath.Join(path, "tracker.bolt")) + case "mem": + return NewMemTrackingStore(), nil + default: + return nil, xerrors.Errorf("unknown tracking store type %s", ttype) + } +} + +// NewMemTrackingStore creates an in-memory tracking store. +// This is only useful for test or situations where you don't want to open the +// real tracking store (eg concurrent read only access on a node's datastore) +func NewMemTrackingStore() *MemTrackingStore { + return &MemTrackingStore{tab: make(map[cid.Cid]abi.ChainEpoch)} +} + +// MemTrackingStore is a simple in-memory tracking store +type MemTrackingStore struct { + sync.Mutex + tab map[cid.Cid]abi.ChainEpoch +} + +var _ TrackingStore = (*MemTrackingStore)(nil) + +func (s *MemTrackingStore) Put(cid cid.Cid, epoch abi.ChainEpoch) error { + s.Lock() + defer s.Unlock() + s.tab[cid] = epoch + return nil +} + +func (s *MemTrackingStore) PutBatch(cids []cid.Cid, epoch abi.ChainEpoch) error { + s.Lock() + defer s.Unlock() + for _, cid := range cids { + s.tab[cid] = epoch + } + return nil +} + +func (s *MemTrackingStore) Get(cid cid.Cid) (abi.ChainEpoch, error) { + s.Lock() + defer s.Unlock() + epoch, ok := s.tab[cid] + if ok { + return epoch, nil + } + return 0, xerrors.Errorf("missing tracking epoch for %s", cid) +} + +func (s *MemTrackingStore) Delete(cid cid.Cid) error { + s.Lock() + defer s.Unlock() + delete(s.tab, cid) + return nil +} + +func (s *MemTrackingStore) DeleteBatch(cids []cid.Cid) error { + s.Lock() + defer s.Unlock() + for _, cid := range cids { + delete(s.tab, cid) + } + return nil +} + +func (s *MemTrackingStore) ForEach(f func(cid.Cid, abi.ChainEpoch) error) error { + s.Lock() + defer s.Unlock() + for cid, epoch := range s.tab { + err := f(cid, epoch) + if err != nil { + return err + } + } + return nil +} + +func (s *MemTrackingStore) Sync() error { return nil } +func (s *MemTrackingStore) Close() error { return nil } diff --git a/blockstore/splitstore/tracking_bolt.go b/blockstore/splitstore/tracking_bolt.go new file mode 100644 index 00000000000..c5c451e1570 --- /dev/null +++ b/blockstore/splitstore/tracking_bolt.go @@ -0,0 +1,120 @@ +package splitstore + +import ( + "time" + + "golang.org/x/xerrors" + + cid "github.com/ipfs/go-cid" + bolt "go.etcd.io/bbolt" + + "github.com/filecoin-project/go-state-types/abi" +) + +type BoltTrackingStore struct { + db *bolt.DB + bucketId []byte +} + +var _ TrackingStore = (*BoltTrackingStore)(nil) + +func OpenBoltTrackingStore(path string) (*BoltTrackingStore, error) { + opts := &bolt.Options{ + Timeout: 1 * time.Second, + NoSync: true, + } + db, err := bolt.Open(path, 0644, opts) + if err != nil { + return nil, err + } + + bucketId := []byte("tracker") + err = db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucketIfNotExists(bucketId) + if err != nil { + return xerrors.Errorf("error creating bolt db bucket %s: %w", string(bucketId), err) + } + return nil + }) + + if err != nil { + _ = db.Close() + return nil, err + } + + return &BoltTrackingStore{db: db, bucketId: bucketId}, nil +} + +func (s *BoltTrackingStore) Put(cid cid.Cid, epoch abi.ChainEpoch) error { + val := epochToBytes(epoch) + return s.db.Batch(func(tx *bolt.Tx) error { + b := tx.Bucket(s.bucketId) + return b.Put(cid.Hash(), val) + }) +} + +func (s *BoltTrackingStore) PutBatch(cids []cid.Cid, epoch abi.ChainEpoch) error { + val := epochToBytes(epoch) + return s.db.Batch(func(tx *bolt.Tx) error { + b := tx.Bucket(s.bucketId) + for _, cid := range cids { + err := b.Put(cid.Hash(), val) + if err != nil { + return err + } + } + return nil + }) +} + +func (s *BoltTrackingStore) Get(cid cid.Cid) (epoch abi.ChainEpoch, err error) { + err = s.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket(s.bucketId) + val := b.Get(cid.Hash()) + if val == nil { + return xerrors.Errorf("missing tracking epoch for %s", cid) + } + epoch = bytesToEpoch(val) + return nil + }) + return epoch, err +} + +func (s *BoltTrackingStore) Delete(cid cid.Cid) error { + return s.db.Batch(func(tx *bolt.Tx) error { + b := tx.Bucket(s.bucketId) + return b.Delete(cid.Hash()) + }) +} + +func (s *BoltTrackingStore) DeleteBatch(cids []cid.Cid) error { + return s.db.Batch(func(tx *bolt.Tx) error { + b := tx.Bucket(s.bucketId) + for _, cid := range cids { + err := b.Delete(cid.Hash()) + if err != nil { + return xerrors.Errorf("error deleting %s", cid) + } + } + return nil + }) +} + +func (s *BoltTrackingStore) ForEach(f func(cid.Cid, abi.ChainEpoch) error) error { + return s.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket(s.bucketId) + return b.ForEach(func(k, v []byte) error { + cid := cid.NewCidV1(cid.Raw, k) + epoch := bytesToEpoch(v) + return f(cid, epoch) + }) + }) +} + +func (s *BoltTrackingStore) Sync() error { + return s.db.Sync() +} + +func (s *BoltTrackingStore) Close() error { + return s.db.Close() +} diff --git a/blockstore/splitstore/tracking_test.go b/blockstore/splitstore/tracking_test.go new file mode 100644 index 00000000000..afd475da5a5 --- /dev/null +++ b/blockstore/splitstore/tracking_test.go @@ -0,0 +1,130 @@ +package splitstore + +import ( + "io/ioutil" + "testing" + + cid "github.com/ipfs/go-cid" + "github.com/multiformats/go-multihash" + + "github.com/filecoin-project/go-state-types/abi" +) + +func TestBoltTrackingStore(t *testing.T) { + testTrackingStore(t, "bolt") +} + +func testTrackingStore(t *testing.T, tsType string) { + t.Helper() + + makeCid := func(key string) cid.Cid { + h, err := multihash.Sum([]byte(key), multihash.SHA2_256, -1) + if err != nil { + t.Fatal(err) + } + + return cid.NewCidV1(cid.Raw, h) + } + + mustHave := func(s TrackingStore, cid cid.Cid, epoch abi.ChainEpoch) { + val, err := s.Get(cid) + if err != nil { + t.Fatal(err) + } + + if val != epoch { + t.Fatal("epoch mismatch") + } + } + + mustNotHave := func(s TrackingStore, cid cid.Cid) { + _, err := s.Get(cid) + if err == nil { + t.Fatal("expected error") + } + } + + path, err := ioutil.TempDir("", "snoop-test.*") + if err != nil { + t.Fatal(err) + } + + s, err := OpenTrackingStore(path, tsType) + if err != nil { + t.Fatal(err) + } + + k1 := makeCid("a") + k2 := makeCid("b") + k3 := makeCid("c") + k4 := makeCid("d") + + s.Put(k1, 1) //nolint + s.Put(k2, 2) //nolint + s.Put(k3, 3) //nolint + s.Put(k4, 4) //nolint + + mustHave(s, k1, 1) + mustHave(s, k2, 2) + mustHave(s, k3, 3) + mustHave(s, k4, 4) + + s.Delete(k1) // nolint + s.Delete(k2) // nolint + + mustNotHave(s, k1) + mustNotHave(s, k2) + mustHave(s, k3, 3) + mustHave(s, k4, 4) + + s.PutBatch([]cid.Cid{k1}, 1) //nolint + s.PutBatch([]cid.Cid{k2}, 2) //nolint + + mustHave(s, k1, 1) + mustHave(s, k2, 2) + mustHave(s, k3, 3) + mustHave(s, k4, 4) + + allKeys := map[string]struct{}{ + k1.String(): {}, + k2.String(): {}, + k3.String(): {}, + k4.String(): {}, + } + + err = s.ForEach(func(k cid.Cid, _ abi.ChainEpoch) error { + _, ok := allKeys[k.String()] + if !ok { + t.Fatal("unexpected key") + } + + delete(allKeys, k.String()) + return nil + }) + + if err != nil { + t.Fatal(err) + } + + if len(allKeys) != 0 { + t.Fatal("not all keys were returned") + } + + // no close and reopen and ensure the keys still exist + err = s.Close() + if err != nil { + t.Fatal(err) + } + + s, err = OpenTrackingStore(path, tsType) + if err != nil { + t.Fatal(err) + } + + mustHave(s, k1, 1) + mustHave(s, k2, 2) + mustHave(s, k3, 3) + mustHave(s, k4, 4) + + s.Close() //nolint:errcheck +} diff --git a/blockstore/sync.go b/blockstore/sync.go new file mode 100644 index 00000000000..848ccd19d2b --- /dev/null +++ b/blockstore/sync.go @@ -0,0 +1,81 @@ +package blockstore + +import ( + "context" + "sync" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" +) + +// NewMemorySync returns a thread-safe in-memory blockstore. +func NewMemorySync() *SyncBlockstore { + return &SyncBlockstore{bs: make(MemBlockstore)} +} + +// SyncBlockstore is a terminal blockstore that is a synchronized version +// of MemBlockstore. +type SyncBlockstore struct { + mu sync.RWMutex + bs MemBlockstore // specifically use a memStore to save indirection overhead. +} + +func (m *SyncBlockstore) DeleteBlock(k cid.Cid) error { + m.mu.Lock() + defer m.mu.Unlock() + return m.bs.DeleteBlock(k) +} + +func (m *SyncBlockstore) DeleteMany(ks []cid.Cid) error { + m.mu.Lock() + defer m.mu.Unlock() + return m.bs.DeleteMany(ks) +} + +func (m *SyncBlockstore) Has(k cid.Cid) (bool, error) { + m.mu.RLock() + defer m.mu.RUnlock() + return m.bs.Has(k) +} + +func (m *SyncBlockstore) View(k cid.Cid, callback func([]byte) error) error { + m.mu.RLock() + defer m.mu.RUnlock() + + return m.bs.View(k, callback) +} + +func (m *SyncBlockstore) Get(k cid.Cid) (blocks.Block, error) { + m.mu.RLock() + defer m.mu.RUnlock() + return m.bs.Get(k) +} + +func (m *SyncBlockstore) GetSize(k cid.Cid) (int, error) { + m.mu.RLock() + defer m.mu.RUnlock() + return m.bs.GetSize(k) +} + +func (m *SyncBlockstore) Put(b blocks.Block) error { + m.mu.Lock() + defer m.mu.Unlock() + return m.bs.Put(b) +} + +func (m *SyncBlockstore) PutMany(bs []blocks.Block) error { + m.mu.Lock() + defer m.mu.Unlock() + return m.bs.PutMany(bs) +} + +func (m *SyncBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + m.mu.RLock() + defer m.mu.RUnlock() + // this blockstore implementation doesn't do any async work. + return m.bs.AllKeysChan(ctx) +} + +func (m *SyncBlockstore) HashOnRead(enabled bool) { + // noop +} diff --git a/lib/timedbs/timedbs.go b/blockstore/timed.go similarity index 54% rename from lib/timedbs/timedbs.go rename to blockstore/timed.go index c5c1a8fe003..ce25bb5bc02 100644 --- a/lib/timedbs/timedbs.go +++ b/blockstore/timed.go @@ -1,4 +1,4 @@ -package timedbs +package blockstore import ( "context" @@ -10,37 +10,37 @@ import ( "github.com/ipfs/go-cid" "github.com/raulk/clock" "go.uber.org/multierr" - - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/lib/blockstore" ) -// TimedCacheBS is a blockstore that keeps blocks for at least the specified -// caching interval before discarding them. Garbage collection must be started -// and stopped by calling Start/Stop. +// TimedCacheBlockstore is a blockstore that keeps blocks for at least the +// specified caching interval before discarding them. Garbage collection must +// be started and stopped by calling Start/Stop. // // Under the covers, it's implemented with an active and an inactive blockstore // that are rotated every cache time interval. This means all blocks will be // stored at most 2x the cache interval. -type TimedCacheBS struct { +// +// Create a new instance by calling the NewTimedCacheBlockstore constructor. +type TimedCacheBlockstore struct { mu sync.RWMutex - active, inactive blockstore.MemStore + active, inactive MemBlockstore clock clock.Clock interval time.Duration closeCh chan struct{} doneRotatingCh chan struct{} } -func NewTimedCacheBS(cacheTime time.Duration) *TimedCacheBS { - return &TimedCacheBS{ - active: blockstore.NewTemporary(), - inactive: blockstore.NewTemporary(), - interval: cacheTime, - clock: build.Clock, +func NewTimedCacheBlockstore(interval time.Duration) *TimedCacheBlockstore { + b := &TimedCacheBlockstore{ + active: NewMemory(), + inactive: NewMemory(), + interval: interval, + clock: clock.New(), } + return b } -func (t *TimedCacheBS) Start(ctx context.Context) error { +func (t *TimedCacheBlockstore) Start(_ context.Context) error { t.mu.Lock() defer t.mu.Unlock() if t.closeCh != nil { @@ -65,11 +65,11 @@ func (t *TimedCacheBS) Start(ctx context.Context) error { return nil } -func (t *TimedCacheBS) Stop(ctx context.Context) error { +func (t *TimedCacheBlockstore) Stop(_ context.Context) error { t.mu.Lock() defer t.mu.Unlock() if t.closeCh == nil { - return fmt.Errorf("not started started") + return fmt.Errorf("not started") } select { case <-t.closeCh: @@ -80,15 +80,15 @@ func (t *TimedCacheBS) Stop(ctx context.Context) error { return nil } -func (t *TimedCacheBS) rotate() { - newBs := blockstore.NewTemporary() +func (t *TimedCacheBlockstore) rotate() { + newBs := NewMemory() t.mu.Lock() t.inactive, t.active = t.active, newBs t.mu.Unlock() } -func (t *TimedCacheBS) Put(b blocks.Block) error { +func (t *TimedCacheBlockstore) Put(b blocks.Block) error { // Don't check the inactive set here. We want to keep this block for at // least one interval. t.mu.Lock() @@ -96,33 +96,43 @@ func (t *TimedCacheBS) Put(b blocks.Block) error { return t.active.Put(b) } -func (t *TimedCacheBS) PutMany(bs []blocks.Block) error { +func (t *TimedCacheBlockstore) PutMany(bs []blocks.Block) error { t.mu.Lock() defer t.mu.Unlock() return t.active.PutMany(bs) } -func (t *TimedCacheBS) Get(k cid.Cid) (blocks.Block, error) { +func (t *TimedCacheBlockstore) View(k cid.Cid, callback func([]byte) error) error { + t.mu.RLock() + defer t.mu.RUnlock() + err := t.active.View(k, callback) + if err == ErrNotFound { + err = t.inactive.View(k, callback) + } + return err +} + +func (t *TimedCacheBlockstore) Get(k cid.Cid) (blocks.Block, error) { t.mu.RLock() defer t.mu.RUnlock() b, err := t.active.Get(k) - if err == blockstore.ErrNotFound { + if err == ErrNotFound { b, err = t.inactive.Get(k) } return b, err } -func (t *TimedCacheBS) GetSize(k cid.Cid) (int, error) { +func (t *TimedCacheBlockstore) GetSize(k cid.Cid) (int, error) { t.mu.RLock() defer t.mu.RUnlock() size, err := t.active.GetSize(k) - if err == blockstore.ErrNotFound { + if err == ErrNotFound { size, err = t.inactive.GetSize(k) } return size, err } -func (t *TimedCacheBS) Has(k cid.Cid) (bool, error) { +func (t *TimedCacheBlockstore) Has(k cid.Cid) (bool, error) { t.mu.RLock() defer t.mu.RUnlock() if has, err := t.active.Has(k); err != nil { @@ -133,17 +143,23 @@ func (t *TimedCacheBS) Has(k cid.Cid) (bool, error) { return t.inactive.Has(k) } -func (t *TimedCacheBS) HashOnRead(_ bool) { +func (t *TimedCacheBlockstore) HashOnRead(_ bool) { // no-op } -func (t *TimedCacheBS) DeleteBlock(k cid.Cid) error { +func (t *TimedCacheBlockstore) DeleteBlock(k cid.Cid) error { t.mu.Lock() defer t.mu.Unlock() return multierr.Combine(t.active.DeleteBlock(k), t.inactive.DeleteBlock(k)) } -func (t *TimedCacheBS) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { +func (t *TimedCacheBlockstore) DeleteMany(ks []cid.Cid) error { + t.mu.Lock() + defer t.mu.Unlock() + return multierr.Combine(t.active.DeleteMany(ks), t.inactive.DeleteMany(ks)) +} + +func (t *TimedCacheBlockstore) AllKeysChan(_ context.Context) (<-chan cid.Cid, error) { t.mu.RLock() defer t.mu.RUnlock() diff --git a/lib/timedbs/timedbs_test.go b/blockstore/timed_test.go similarity index 93% rename from lib/timedbs/timedbs_test.go rename to blockstore/timed_test.go index e01215bbdb0..d5fefff9461 100644 --- a/lib/timedbs/timedbs_test.go +++ b/blockstore/timed_test.go @@ -1,4 +1,4 @@ -package timedbs +package blockstore import ( "context" @@ -12,8 +12,8 @@ import ( "github.com/ipfs/go-cid" ) -func TestTimedBSSimple(t *testing.T) { - tc := NewTimedCacheBS(10 * time.Millisecond) +func TestTimedCacheBlockstoreSimple(t *testing.T) { + tc := NewTimedCacheBlockstore(10 * time.Millisecond) mClock := clock.NewMock() mClock.Set(time.Now()) tc.clock = mClock diff --git a/blockstore/union.go b/blockstore/union.go new file mode 100644 index 00000000000..a99ba259133 --- /dev/null +++ b/blockstore/union.go @@ -0,0 +1,119 @@ +package blockstore + +import ( + "context" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" +) + +type unionBlockstore []Blockstore + +// Union returns an unioned blockstore. +// +// * Reads return from the first blockstore that has the value, querying in the +// supplied order. +// * Writes (puts and deltes) are broadcast to all stores. +// +func Union(stores ...Blockstore) Blockstore { + return unionBlockstore(stores) +} + +func (m unionBlockstore) Has(cid cid.Cid) (has bool, err error) { + for _, bs := range m { + if has, err = bs.Has(cid); has || err != nil { + break + } + } + return has, err +} + +func (m unionBlockstore) Get(cid cid.Cid) (blk blocks.Block, err error) { + for _, bs := range m { + if blk, err = bs.Get(cid); err == nil || err != ErrNotFound { + break + } + } + return blk, err +} + +func (m unionBlockstore) View(cid cid.Cid, callback func([]byte) error) (err error) { + for _, bs := range m { + if err = bs.View(cid, callback); err == nil || err != ErrNotFound { + break + } + } + return err +} + +func (m unionBlockstore) GetSize(cid cid.Cid) (size int, err error) { + for _, bs := range m { + if size, err = bs.GetSize(cid); err == nil || err != ErrNotFound { + break + } + } + return size, err +} + +func (m unionBlockstore) Put(block blocks.Block) (err error) { + for _, bs := range m { + if err = bs.Put(block); err != nil { + break + } + } + return err +} + +func (m unionBlockstore) PutMany(blks []blocks.Block) (err error) { + for _, bs := range m { + if err = bs.PutMany(blks); err != nil { + break + } + } + return err +} + +func (m unionBlockstore) DeleteBlock(cid cid.Cid) (err error) { + for _, bs := range m { + if err = bs.DeleteBlock(cid); err != nil { + break + } + } + return err +} + +func (m unionBlockstore) DeleteMany(cids []cid.Cid) (err error) { + for _, bs := range m { + if err = bs.DeleteMany(cids); err != nil { + break + } + } + return err +} + +func (m unionBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + // this does not deduplicate; this interface needs to be revisited. + outCh := make(chan cid.Cid) + + go func() { + defer close(outCh) + + for _, bs := range m { + ch, err := bs.AllKeysChan(ctx) + if err != nil { + return + } + for cid := range ch { + outCh <- cid + } + } + }() + + return outCh, nil +} + +func (m unionBlockstore) HashOnRead(enabled bool) { + for _, bs := range m { + bs.HashOnRead(enabled) + } +} diff --git a/blockstore/union_test.go b/blockstore/union_test.go new file mode 100644 index 00000000000..b6202689227 --- /dev/null +++ b/blockstore/union_test.go @@ -0,0 +1,102 @@ +package blockstore + +import ( + "context" + "testing" + + blocks "github.com/ipfs/go-block-format" + "github.com/stretchr/testify/require" +) + +var ( + b0 = blocks.NewBlock([]byte("abc")) + b1 = blocks.NewBlock([]byte("foo")) + b2 = blocks.NewBlock([]byte("bar")) +) + +func TestUnionBlockstore_Get(t *testing.T) { + m1 := NewMemory() + m2 := NewMemory() + + _ = m1.Put(b1) + _ = m2.Put(b2) + + u := Union(m1, m2) + + v1, err := u.Get(b1.Cid()) + require.NoError(t, err) + require.Equal(t, b1.RawData(), v1.RawData()) + + v2, err := u.Get(b2.Cid()) + require.NoError(t, err) + require.Equal(t, b2.RawData(), v2.RawData()) +} + +func TestUnionBlockstore_Put_PutMany_Delete_AllKeysChan(t *testing.T) { + m1 := NewMemory() + m2 := NewMemory() + + u := Union(m1, m2) + + err := u.Put(b0) + require.NoError(t, err) + + var has bool + + // write was broadcasted to all stores. + has, _ = m1.Has(b0.Cid()) + require.True(t, has) + + has, _ = m2.Has(b0.Cid()) + require.True(t, has) + + has, _ = u.Has(b0.Cid()) + require.True(t, has) + + // put many. + err = u.PutMany([]blocks.Block{b1, b2}) + require.NoError(t, err) + + // write was broadcasted to all stores. + has, _ = m1.Has(b1.Cid()) + require.True(t, has) + + has, _ = m1.Has(b2.Cid()) + require.True(t, has) + + has, _ = m2.Has(b1.Cid()) + require.True(t, has) + + has, _ = m2.Has(b2.Cid()) + require.True(t, has) + + // also in the union store. + has, _ = u.Has(b1.Cid()) + require.True(t, has) + + has, _ = u.Has(b2.Cid()) + require.True(t, has) + + // deleted from all stores. + err = u.DeleteBlock(b1.Cid()) + require.NoError(t, err) + + has, _ = u.Has(b1.Cid()) + require.False(t, has) + + has, _ = m1.Has(b1.Cid()) + require.False(t, has) + + has, _ = m2.Has(b1.Cid()) + require.False(t, has) + + // check that AllKeysChan returns b0 and b2, twice (once per backing store) + ch, err := u.AllKeysChan(context.Background()) + require.NoError(t, err) + + var i int + for range ch { + i++ + } + require.Equal(t, 4, i) +} diff --git a/build/tools.go b/build/tools.go index 7296c142c01..ad45397bb37 100644 --- a/build/tools.go +++ b/build/tools.go @@ -4,5 +4,6 @@ package build import ( _ "github.com/GeertJohan/go.rice/rice" + _ "github.com/golang/mock/mockgen" _ "github.com/whyrusleeping/bencher" ) diff --git a/build/version.go b/build/version.go index 55b8d6b90eb..7ca51a9f5d3 100644 --- a/build/version.go +++ b/build/version.go @@ -1,11 +1,5 @@ package build -import ( - "fmt" - - "golang.org/x/xerrors" -) - var CurrentCommit string var BuildType int @@ -40,67 +34,3 @@ const BuildVersion = "1.5.0" func UserVersion() string { return BuildVersion + buildType() + CurrentCommit } - -type Version uint32 - -func newVer(major, minor, patch uint8) Version { - return Version(uint32(major)<<16 | uint32(minor)<<8 | uint32(patch)) -} - -// Ints returns (major, minor, patch) versions -func (ve Version) Ints() (uint32, uint32, uint32) { - v := uint32(ve) - return (v & majorOnlyMask) >> 16, (v & minorOnlyMask) >> 8, v & patchOnlyMask -} - -func (ve Version) String() string { - vmj, vmi, vp := ve.Ints() - return fmt.Sprintf("%d.%d.%d", vmj, vmi, vp) -} - -func (ve Version) EqMajorMinor(v2 Version) bool { - return ve&minorMask == v2&minorMask -} - -type NodeType int - -const ( - NodeUnknown NodeType = iota - - NodeFull - NodeMiner - NodeWorker -) - -var RunningNodeType NodeType - -func VersionForType(nodeType NodeType) (Version, error) { - switch nodeType { - case NodeFull: - return FullAPIVersion, nil - case NodeMiner: - return MinerAPIVersion, nil - case NodeWorker: - return WorkerAPIVersion, nil - default: - return Version(0), xerrors.Errorf("unknown node type %d", nodeType) - } -} - -// semver versions of the rpc api exposed -var ( - FullAPIVersion = newVer(1, 1, 0) - MinerAPIVersion = newVer(1, 0, 1) - WorkerAPIVersion = newVer(1, 0, 0) -) - -//nolint:varcheck,deadcode -const ( - majorMask = 0xff0000 - minorMask = 0xffff00 - patchMask = 0xffffff - - majorOnlyMask = 0xff0000 - minorOnlyMask = 0x00ff00 - patchOnlyMask = 0x0000ff -) diff --git a/chain/actors/adt/diff_adt_test.go b/chain/actors/adt/diff_adt_test.go index a187c9f3568..b0e01b78d31 100644 --- a/chain/actors/adt/diff_adt_test.go +++ b/chain/actors/adt/diff_adt_test.go @@ -16,7 +16,7 @@ import ( builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt" - bstore "github.com/filecoin-project/lotus/lib/blockstore" + bstore "github.com/filecoin-project/lotus/blockstore" ) func TestDiffAdtArray(t *testing.T) { @@ -295,7 +295,7 @@ func (t *TestDiffArray) Remove(key uint64, val *typegen.Deferred) error { func newContextStore() Store { ctx := context.Background() - bs := bstore.NewTemporarySync() + bs := bstore.NewMemorySync() store := cbornode.NewCborStore(bs) return WrapStore(ctx, store) } diff --git a/chain/events/state/mock/api.go b/chain/events/state/mock/api.go index 4e8bcc94db6..2ed48dc39c3 100644 --- a/chain/events/state/mock/api.go +++ b/chain/events/state/mock/api.go @@ -5,8 +5,8 @@ import ( "sync" "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/blockstore" "github.com/ipfs/go-cid" "golang.org/x/xerrors" ) diff --git a/chain/events/state/predicates.go b/chain/events/state/predicates.go index 551b776c221..33f49628978 100644 --- a/chain/events/state/predicates.go +++ b/chain/events/state/predicates.go @@ -3,6 +3,7 @@ package state import ( "context" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/go-address" @@ -10,7 +11,7 @@ import ( "github.com/filecoin-project/go-state-types/big" cbor "github.com/ipfs/go-ipld-cbor" - "github.com/filecoin-project/lotus/api/apibstore" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/actors/adt" init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" "github.com/filecoin-project/lotus/chain/actors/builtin/market" @@ -23,7 +24,7 @@ type UserData interface{} // ChainAPI abstracts out calls made by this class to external APIs type ChainAPI interface { - apibstore.ChainIO + api.ChainIO StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) } @@ -36,7 +37,7 @@ type StatePredicates struct { func NewStatePredicates(api ChainAPI) *StatePredicates { return &StatePredicates{ api: api, - cst: cbor.NewCborStore(apibstore.NewAPIBlockstore(api)), + cst: cbor.NewCborStore(blockstore.NewAPIBlockstore(api)), } } diff --git a/chain/events/state/predicates_test.go b/chain/events/state/predicates_test.go index 8fc93d9cd2e..8af3bb6a0b9 100644 --- a/chain/events/state/predicates_test.go +++ b/chain/events/state/predicates_test.go @@ -23,9 +23,9 @@ import ( adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt" tutils "github.com/filecoin-project/specs-actors/v2/support/testing" + bstore "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/actors/builtin/market" "github.com/filecoin-project/lotus/chain/types" - bstore "github.com/filecoin-project/lotus/lib/blockstore" ) var dummyCid cid.Cid @@ -36,7 +36,7 @@ func init() { func TestMarketPredicates(t *testing.T) { ctx := context.Background() - bs := bstore.NewTemporarySync() + bs := bstore.NewMemorySync() store := adt2.WrapStore(ctx, cbornode.NewCborStore(bs)) oldDeal1 := &market2.DealState{ @@ -334,7 +334,7 @@ func TestMarketPredicates(t *testing.T) { func TestMinerSectorChange(t *testing.T) { ctx := context.Background() - bs := bstore.NewTemporarySync() + bs := bstore.NewMemorySync() store := adt2.WrapStore(ctx, cbornode.NewCborStore(bs)) nextID := uint64(0) diff --git a/chain/gen/gen.go b/chain/gen/gen.go index 9332c880878..d06c755fa34 100644 --- a/chain/gen/gen.go +++ b/chain/gen/gen.go @@ -27,6 +27,7 @@ import ( proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/beacon" @@ -40,7 +41,6 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/genesis" "github.com/filecoin-project/lotus/journal" - "github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/lib/sigs" "github.com/filecoin-project/lotus/node/repo" ) @@ -125,7 +125,7 @@ func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) { return nil, xerrors.Errorf("failed to get metadata datastore: %w", err) } - bs, err := lr.Blockstore(context.TODO(), repo.BlockstoreChain) + bs, err := lr.Blockstore(context.TODO(), repo.UniversalBlockstore) if err != nil { return nil, err } diff --git a/chain/gen/genesis/f00_system.go b/chain/gen/genesis/f00_system.go index 6e6cc976aba..015dfac4a99 100644 --- a/chain/gen/genesis/f00_system.go +++ b/chain/gen/genesis/f00_system.go @@ -8,8 +8,8 @@ import ( "github.com/filecoin-project/specs-actors/actors/builtin" cbor "github.com/ipfs/go-ipld-cbor" + bstore "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/types" - bstore "github.com/filecoin-project/lotus/lib/blockstore" ) func SetupSystemActor(bs bstore.Blockstore) (*types.Actor, error) { diff --git a/chain/gen/genesis/f01_init.go b/chain/gen/genesis/f01_init.go index 24f06f2b61e..9fc6cfb9e5c 100644 --- a/chain/gen/genesis/f01_init.go +++ b/chain/gen/genesis/f01_init.go @@ -16,9 +16,9 @@ import ( cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" + bstore "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/genesis" - bstore "github.com/filecoin-project/lotus/lib/blockstore" ) func SetupInitActor(bs bstore.Blockstore, netname string, initialActors []genesis.Actor, rootVerifier genesis.Actor, remainder genesis.Actor) (int64, *types.Actor, map[address.Address]address.Address, error) { diff --git a/chain/gen/genesis/f02_reward.go b/chain/gen/genesis/f02_reward.go index 92531051b14..e218da6fe53 100644 --- a/chain/gen/genesis/f02_reward.go +++ b/chain/gen/genesis/f02_reward.go @@ -9,9 +9,9 @@ import ( reward0 "github.com/filecoin-project/specs-actors/actors/builtin/reward" cbor "github.com/ipfs/go-ipld-cbor" + bstore "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/types" - bstore "github.com/filecoin-project/lotus/lib/blockstore" ) func SetupRewardActor(bs bstore.Blockstore, qaPower big.Int) (*types.Actor, error) { diff --git a/chain/gen/genesis/f03_cron.go b/chain/gen/genesis/f03_cron.go index cf2c0d7a741..dd43a59a4ec 100644 --- a/chain/gen/genesis/f03_cron.go +++ b/chain/gen/genesis/f03_cron.go @@ -7,8 +7,8 @@ import ( "github.com/filecoin-project/specs-actors/actors/builtin/cron" cbor "github.com/ipfs/go-ipld-cbor" + bstore "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/types" - bstore "github.com/filecoin-project/lotus/lib/blockstore" ) func SetupCronActor(bs bstore.Blockstore) (*types.Actor, error) { diff --git a/chain/gen/genesis/f04_power.go b/chain/gen/genesis/f04_power.go index 2f1303ba46c..ed349c18bc6 100644 --- a/chain/gen/genesis/f04_power.go +++ b/chain/gen/genesis/f04_power.go @@ -9,8 +9,8 @@ import ( power0 "github.com/filecoin-project/specs-actors/actors/builtin/power" cbor "github.com/ipfs/go-ipld-cbor" + bstore "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/types" - bstore "github.com/filecoin-project/lotus/lib/blockstore" ) func SetupStoragePowerActor(bs bstore.Blockstore) (*types.Actor, error) { diff --git a/chain/gen/genesis/f05_market.go b/chain/gen/genesis/f05_market.go index 615e8370ba5..f7ac26f434f 100644 --- a/chain/gen/genesis/f05_market.go +++ b/chain/gen/genesis/f05_market.go @@ -8,8 +8,8 @@ import ( "github.com/filecoin-project/specs-actors/actors/util/adt" cbor "github.com/ipfs/go-ipld-cbor" + bstore "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/types" - bstore "github.com/filecoin-project/lotus/lib/blockstore" ) func SetupStorageMarketActor(bs bstore.Blockstore) (*types.Actor, error) { diff --git a/chain/gen/genesis/f06_vreg.go b/chain/gen/genesis/f06_vreg.go index 1709b205f1f..1ba8abede57 100644 --- a/chain/gen/genesis/f06_vreg.go +++ b/chain/gen/genesis/f06_vreg.go @@ -10,8 +10,8 @@ import ( verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" "github.com/filecoin-project/specs-actors/actors/util/adt" + bstore "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/types" - bstore "github.com/filecoin-project/lotus/lib/blockstore" ) var RootVerifierID address.Address diff --git a/chain/gen/genesis/genesis.go b/chain/gen/genesis/genesis.go index ef81410bba7..d382e4d0dff 100644 --- a/chain/gen/genesis/genesis.go +++ b/chain/gen/genesis/genesis.go @@ -7,6 +7,7 @@ import ( "fmt" "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/journal" "github.com/ipfs/go-cid" @@ -26,13 +27,13 @@ import ( verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" + bstore "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" "github.com/filecoin-project/lotus/genesis" - bstore "github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/lib/sigs" ) @@ -233,13 +234,36 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge } - vregroot, err := address.NewIDAddress(80) - if err != nil { - return nil, nil, err - } + switch template.VerifregRootKey.Type { + case genesis.TAccount: + var ainfo genesis.AccountMeta + if err := json.Unmarshal(template.VerifregRootKey.Meta, &ainfo); err != nil { + return nil, nil, xerrors.Errorf("unmarshaling account meta: %w", err) + } + st, err := cst.Put(ctx, &account0.State{Address: ainfo.Owner}) + if err != nil { + return nil, nil, err + } - if err = createMultisigAccount(ctx, bs, cst, state, vregroot, template.VerifregRootKey, keyIDs); err != nil { - return nil, nil, xerrors.Errorf("failed to set up verified registry signer: %w", err) + _, ok := keyIDs[ainfo.Owner] + if ok { + return nil, nil, fmt.Errorf("rootkey account has already been declared, cannot be assigned 80: %s", ainfo.Owner) + } + + err = state.SetActor(builtin.RootVerifierAddress, &types.Actor{ + Code: builtin0.AccountActorCodeID, + Balance: template.VerifregRootKey.Balance, + Head: st, + }) + if err != nil { + return nil, nil, xerrors.Errorf("setting verifreg rootkey account: %w", err) + } + case genesis.TMultisig: + if err = createMultisigAccount(ctx, bs, cst, state, builtin.RootVerifierAddress, template.VerifregRootKey, keyIDs); err != nil { + return nil, nil, xerrors.Errorf("failed to set up verified registry signer: %w", err) + } + default: + return nil, nil, xerrors.Errorf("unknown account type for verifreg rootkey: %w", err) } // Setup the first verifier as ID-address 81 @@ -300,8 +324,36 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge template.RemainderAccount.Balance = remainingFil - if err := createMultisigAccount(ctx, bs, cst, state, builtin.ReserveAddress, template.RemainderAccount, keyIDs); err != nil { - return nil, nil, xerrors.Errorf("failed to set up remainder account: %w", err) + switch template.RemainderAccount.Type { + case genesis.TAccount: + var ainfo genesis.AccountMeta + if err := json.Unmarshal(template.RemainderAccount.Meta, &ainfo); err != nil { + return nil, nil, xerrors.Errorf("unmarshaling account meta: %w", err) + } + st, err := cst.Put(ctx, &account0.State{Address: ainfo.Owner}) + if err != nil { + return nil, nil, err + } + + _, ok := keyIDs[ainfo.Owner] + if ok { + return nil, nil, fmt.Errorf("remainder account has already been declared, cannot be assigned 90: %s", ainfo.Owner) + } + + err = state.SetActor(builtin.ReserveAddress, &types.Actor{ + Code: builtin0.AccountActorCodeID, + Balance: template.RemainderAccount.Balance, + Head: st, + }) + if err != nil { + return nil, nil, xerrors.Errorf("setting remainder account: %w", err) + } + case genesis.TMultisig: + if err = createMultisigAccount(ctx, bs, cst, state, builtin.ReserveAddress, template.RemainderAccount, keyIDs); err != nil { + return nil, nil, xerrors.Errorf("failed to set up remainder: %w", err) + } + default: + return nil, nil, xerrors.Errorf("unknown account type for remainder: %w", err) } return state, keyIDs, nil @@ -406,7 +458,7 @@ func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, stateroot ci StateBase: stateroot, Epoch: 0, Rand: &fakeRand{}, - Bstore: cs.Blockstore(), + Bstore: cs.StateBlockstore(), Syscalls: mkFakedSigSyscalls(cs.VMSys()), CircSupplyCalc: nil, NtwkVersion: genesisNetworkVersion, diff --git a/chain/gen/genesis/miners.go b/chain/gen/genesis/miners.go index 850c2f39ff0..297543886dd 100644 --- a/chain/gen/genesis/miners.go +++ b/chain/gen/genesis/miners.go @@ -70,7 +70,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid StateBase: sroot, Epoch: 0, Rand: &fakeRand{}, - Bstore: cs.Blockstore(), + Bstore: cs.StateBlockstore(), Syscalls: mkFakedSigSyscalls(cs.VMSys()), CircSupplyCalc: csc, NtwkVersion: genesisNetworkVersion, diff --git a/chain/gen/mining.go b/chain/gen/mining.go index 5de0fec0ed0..3c6a8987362 100644 --- a/chain/gen/mining.go +++ b/chain/gen/mining.go @@ -79,7 +79,7 @@ func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w api.WalletA } } - store := sm.ChainStore().Store(ctx) + store := sm.ChainStore().ActorStore(ctx) blsmsgroot, err := toArray(store, blsMsgCids) if err != nil { return nil, xerrors.Errorf("building bls amt: %w", err) diff --git a/chain/stmgr/call.go b/chain/stmgr/call.go index bb0f0e5ecd1..89f91b0b7ba 100644 --- a/chain/stmgr/call.go +++ b/chain/stmgr/call.go @@ -59,7 +59,7 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types. StateBase: bstate, Epoch: bheight, Rand: store.NewChainRand(sm.cs, ts.Cids()), - Bstore: sm.cs.Blockstore(), + Bstore: sm.cs.StateBlockstore(), Syscalls: sm.cs.VMSys(), CircSupplyCalc: sm.GetVMCirculatingSupply, NtwkVersion: sm.GetNtwkVersion, @@ -174,7 +174,7 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri StateBase: state, Epoch: ts.Height() + 1, Rand: r, - Bstore: sm.cs.Blockstore(), + Bstore: sm.cs.StateBlockstore(), Syscalls: sm.cs.VMSys(), CircSupplyCalc: sm.GetVMCirculatingSupply, NtwkVersion: sm.GetNtwkVersion, diff --git a/chain/stmgr/forks.go b/chain/stmgr/forks.go index 3d5a0761096..899397940d5 100644 --- a/chain/stmgr/forks.go +++ b/chain/stmgr/forks.go @@ -15,6 +15,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" @@ -24,8 +25,6 @@ import ( "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" - bstore "github.com/filecoin-project/lotus/lib/blockstore" - "github.com/filecoin-project/lotus/lib/bufbstore" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" multisig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" @@ -505,7 +504,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ Migratio } case builtin0.StorageMinerActorCodeID: var st miner0.State - if err := sm.ChainStore().Store(ctx).Get(ctx, act.Head, &st); err != nil { + if err := sm.ChainStore().ActorStore(ctx).Get(ctx, act.Head, &st); err != nil { return xerrors.Errorf("failed to load miner state: %w", err) } @@ -549,7 +548,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ Migratio return cid.Undef, xerrors.Errorf("failed to load power actor: %w", err) } - cst := cbor.NewCborStore(sm.ChainStore().Blockstore()) + cst := cbor.NewCborStore(sm.ChainStore().StateBlockstore()) if err := cst.Get(ctx, powAct.Head, &ps); err != nil { return cid.Undef, xerrors.Errorf("failed to get power actor state: %w", err) } @@ -583,7 +582,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ Migratio } case builtin0.StorageMinerActorCodeID: var st miner0.State - if err := sm.ChainStore().Store(ctx).Get(ctx, act.Head, &st); err != nil { + if err := sm.ChainStore().ActorStore(ctx).Get(ctx, act.Head, &st); err != nil { return xerrors.Errorf("failed to load miner state: %w", err) } @@ -592,7 +591,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ Migratio return xerrors.Errorf("failed to get miner info: %w", err) } - sectorsArr, err := adt0.AsArray(sm.ChainStore().Store(ctx), st.Sectors) + sectorsArr, err := adt0.AsArray(sm.ChainStore().ActorStore(ctx), st.Sectors) if err != nil { return xerrors.Errorf("failed to load sectors array: %w", err) } @@ -612,11 +611,11 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ Migratio lbact, err := lbtree.GetActor(addr) if err == nil { var lbst miner0.State - if err := sm.ChainStore().Store(ctx).Get(ctx, lbact.Head, &lbst); err != nil { + if err := sm.ChainStore().ActorStore(ctx).Get(ctx, lbact.Head, &lbst); err != nil { return xerrors.Errorf("failed to load miner state: %w", err) } - lbsectors, err := adt0.AsArray(sm.ChainStore().Store(ctx), lbst.Sectors) + lbsectors, err := adt0.AsArray(sm.ChainStore().ActorStore(ctx), lbst.Sectors) if err != nil { return xerrors.Errorf("failed to load lb sectors array: %w", err) } @@ -712,7 +711,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ Migratio } func UpgradeIgnition(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { - store := sm.cs.Store(ctx) + store := sm.cs.ActorStore(ctx) if build.UpgradeLiftoffHeight <= epoch { return cid.Undef, xerrors.Errorf("liftoff height must be beyond ignition height") @@ -768,7 +767,7 @@ func UpgradeIgnition(ctx context.Context, sm *StateManager, _ MigrationCache, cb func UpgradeRefuel(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { - store := sm.cs.Store(ctx) + store := sm.cs.ActorStore(ctx) tree, err := sm.StateTree(root) if err != nil { return cid.Undef, xerrors.Errorf("getting state tree: %w", err) @@ -793,7 +792,7 @@ func UpgradeRefuel(ctx context.Context, sm *StateManager, _ MigrationCache, cb E } func UpgradeActorsV2(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { - buf := bufbstore.NewTieredBstore(sm.cs.Blockstore(), bstore.NewTemporarySync()) + buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync()) store := store.ActorStore(ctx, buf) info, err := store.Put(ctx, new(types.StateInfo0)) @@ -844,7 +843,7 @@ func UpgradeLiftoff(ctx context.Context, sm *StateManager, _ MigrationCache, cb return cid.Undef, xerrors.Errorf("getting state tree: %w", err) } - err = setNetworkName(ctx, sm.cs.Store(ctx), tree, "mainnet") + err = setNetworkName(ctx, sm.cs.ActorStore(ctx), tree, "mainnet") if err != nil { return cid.Undef, xerrors.Errorf("setting network name: %w", err) } @@ -853,7 +852,7 @@ func UpgradeLiftoff(ctx context.Context, sm *StateManager, _ MigrationCache, cb } func UpgradeCalico(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { - store := sm.cs.Store(ctx) + store := sm.cs.ActorStore(ctx) var stateRoot types.StateRoot if err := store.Get(ctx, root, &stateRoot); err != nil { return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err) @@ -1010,7 +1009,7 @@ func upgradeActorsV3Common( root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet, config nv10.Config, ) (cid.Cid, error) { - buf := bufbstore.NewTieredBstore(sm.cs.Blockstore(), bstore.NewTemporarySync()) + buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync()) store := store.ActorStore(ctx, buf) // Load the state root. @@ -1240,7 +1239,7 @@ func resetGenesisMsigs0(ctx context.Context, sm *StateManager, store adt0.Store, return xerrors.Errorf("getting genesis tipset: %w", err) } - cst := cbor.NewCborStore(sm.cs.Blockstore()) + cst := cbor.NewCborStore(sm.cs.StateBlockstore()) genesisTree, err := state.LoadStateTree(cst, gts.ParentState()) if err != nil { return xerrors.Errorf("loading state tree: %w", err) diff --git a/chain/stmgr/forks_test.go b/chain/stmgr/forks_test.go index 95e7ef69900..e456dc436de 100644 --- a/chain/stmgr/forks_test.go +++ b/chain/stmgr/forks_test.go @@ -125,7 +125,7 @@ func TestForkHeightTriggers(t *testing.T) { Height: testForkHeight, Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecCallback, root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { - cst := ipldcbor.NewCborStore(sm.ChainStore().Blockstore()) + cst := ipldcbor.NewCborStore(sm.ChainStore().StateBlockstore()) st, err := sm.StateTree(root) if err != nil { diff --git a/chain/stmgr/read.go b/chain/stmgr/read.go index 9a9b8026576..3c7fb5d91e8 100644 --- a/chain/stmgr/read.go +++ b/chain/stmgr/read.go @@ -22,7 +22,7 @@ func (sm *StateManager) ParentStateTsk(tsk types.TipSetKey) (*state.StateTree, e } func (sm *StateManager) ParentState(ts *types.TipSet) (*state.StateTree, error) { - cst := cbor.NewCborStore(sm.cs.Blockstore()) + cst := cbor.NewCborStore(sm.cs.StateBlockstore()) state, err := state.LoadStateTree(cst, sm.parentState(ts)) if err != nil { return nil, xerrors.Errorf("load state tree: %w", err) @@ -32,7 +32,7 @@ func (sm *StateManager) ParentState(ts *types.TipSet) (*state.StateTree, error) } func (sm *StateManager) StateTree(st cid.Cid) (*state.StateTree, error) { - cst := cbor.NewCborStore(sm.cs.Blockstore()) + cst := cbor.NewCborStore(sm.cs.StateBlockstore()) state, err := state.LoadStateTree(cst, st) if err != nil { return nil, xerrors.Errorf("load state tree: %w", err) diff --git a/chain/stmgr/stmgr.go b/chain/stmgr/stmgr.go index 73088ba2a8d..ffbe08474ff 100644 --- a/chain/stmgr/stmgr.go +++ b/chain/stmgr/stmgr.go @@ -286,7 +286,7 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp StateBase: base, Epoch: epoch, Rand: r, - Bstore: sm.cs.Blockstore(), + Bstore: sm.cs.StateBlockstore(), Syscalls: sm.cs.VMSys(), CircSupplyCalc: sm.GetVMCirculatingSupply, NtwkVersion: sm.GetNtwkVersion, @@ -430,7 +430,7 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp return cid.Cid{}, cid.Cid{}, err } - rectarr := blockadt.MakeEmptyArray(sm.cs.Store(ctx)) + rectarr := blockadt.MakeEmptyArray(sm.cs.ActorStore(ctx)) for i, receipt := range receipts { if err := rectarr.Set(uint64(i), receipt); err != nil { return cid.Undef, cid.Undef, xerrors.Errorf("failed to build receipts amt: %w", err) @@ -515,7 +515,7 @@ func (sm *StateManager) ResolveToKeyAddress(ctx context.Context, addr address.Ad ts = sm.cs.GetHeaviestTipSet() } - cst := cbor.NewCborStore(sm.cs.Blockstore()) + cst := cbor.NewCborStore(sm.cs.StateBlockstore()) // First try to resolve the actor in the parent state, so we don't have to compute anything. tree, err := state.LoadStateTree(cst, ts.ParentState()) @@ -556,7 +556,7 @@ func (sm *StateManager) GetBlsPublicKey(ctx context.Context, addr address.Addres } func (sm *StateManager) LookupID(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) { - cst := cbor.NewCborStore(sm.cs.Blockstore()) + cst := cbor.NewCborStore(sm.cs.StateBlockstore()) state, err := state.LoadStateTree(cst, sm.parentState(ts)) if err != nil { return address.Undef, xerrors.Errorf("load state tree: %w", err) @@ -882,7 +882,7 @@ func (sm *StateManager) MarketBalance(ctx context.Context, addr address.Address, return api.MarketBalance{}, err } - mstate, err := market.Load(sm.cs.Store(ctx), act) + mstate, err := market.Load(sm.cs.ActorStore(ctx), act) if err != nil { return api.MarketBalance{}, err } @@ -966,7 +966,7 @@ func (sm *StateManager) setupGenesisVestingSchedule(ctx context.Context) error { return xerrors.Errorf("getting genesis tipset state: %w", err) } - cst := cbor.NewCborStore(sm.cs.Blockstore()) + cst := cbor.NewCborStore(sm.cs.StateBlockstore()) sTree, err := state.LoadStateTree(cst, st) if err != nil { return xerrors.Errorf("loading state tree: %w", err) @@ -1325,7 +1325,7 @@ func (sm *StateManager) GetCirculatingSupply(ctx context.Context, height abi.Cha unCirc = big.Add(unCirc, actor.Balance) case a == market.Address: - mst, err := market.Load(sm.cs.Store(ctx), actor) + mst, err := market.Load(sm.cs.ActorStore(ctx), actor) if err != nil { return err } @@ -1342,7 +1342,7 @@ func (sm *StateManager) GetCirculatingSupply(ctx context.Context, height abi.Cha circ = big.Add(circ, actor.Balance) case builtin.IsStorageMinerActor(actor.Code): - mst, err := miner.Load(sm.cs.Store(ctx), actor) + mst, err := miner.Load(sm.cs.ActorStore(ctx), actor) if err != nil { return err } @@ -1359,7 +1359,7 @@ func (sm *StateManager) GetCirculatingSupply(ctx context.Context, height abi.Cha } case builtin.IsMultisigActor(actor.Code): - mst, err := multisig.Load(sm.cs.Store(ctx), actor) + mst, err := multisig.Load(sm.cs.ActorStore(ctx), actor) if err != nil { return err } @@ -1413,7 +1413,7 @@ func (sm *StateManager) GetPaychState(ctx context.Context, addr address.Address, return nil, nil, err } - actState, err := paych.Load(sm.cs.Store(ctx), act) + actState, err := paych.Load(sm.cs.ActorStore(ctx), act) if err != nil { return nil, nil, err } @@ -1431,7 +1431,7 @@ func (sm *StateManager) GetMarketState(ctx context.Context, ts *types.TipSet) (m return nil, err } - actState, err := market.Load(sm.cs.Store(ctx), act) + actState, err := market.Load(sm.cs.ActorStore(ctx), act) if err != nil { return nil, err } diff --git a/chain/stmgr/utils.go b/chain/stmgr/utils.go index 86bb3a6e093..947310c7569 100644 --- a/chain/stmgr/utils.go +++ b/chain/stmgr/utils.go @@ -48,7 +48,7 @@ func GetNetworkName(ctx context.Context, sm *StateManager, st cid.Cid) (dtypes.N if err != nil { return "", err } - ias, err := init_.Load(sm.cs.Store(ctx), act) + ias, err := init_.Load(sm.cs.ActorStore(ctx), act) if err != nil { return "", err } @@ -65,7 +65,7 @@ func GetMinerWorkerRaw(ctx context.Context, sm *StateManager, st cid.Cid, maddr if err != nil { return address.Undef, xerrors.Errorf("(get sset) failed to load miner actor: %w", err) } - mas, err := miner.Load(sm.cs.Store(ctx), act) + mas, err := miner.Load(sm.cs.ActorStore(ctx), act) if err != nil { return address.Undef, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err) } @@ -75,7 +75,7 @@ func GetMinerWorkerRaw(ctx context.Context, sm *StateManager, st cid.Cid, maddr return address.Undef, xerrors.Errorf("failed to load actor info: %w", err) } - return vm.ResolveToKeyAddr(state, sm.cs.Store(ctx), info.Worker) + return vm.ResolveToKeyAddr(state, sm.cs.ActorStore(ctx), info.Worker) } func GetPower(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) (power.Claim, power.Claim, bool, error) { @@ -88,7 +88,7 @@ func GetPowerRaw(ctx context.Context, sm *StateManager, st cid.Cid, maddr addres return power.Claim{}, power.Claim{}, false, xerrors.Errorf("(get sset) failed to load power actor state: %w", err) } - pas, err := power.Load(sm.cs.Store(ctx), act) + pas, err := power.Load(sm.cs.ActorStore(ctx), act) if err != nil { return power.Claim{}, power.Claim{}, false, err } @@ -123,7 +123,7 @@ func PreCommitInfo(ctx context.Context, sm *StateManager, maddr address.Address, return nil, xerrors.Errorf("(get sset) failed to load miner actor: %w", err) } - mas, err := miner.Load(sm.cs.Store(ctx), act) + mas, err := miner.Load(sm.cs.ActorStore(ctx), act) if err != nil { return nil, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err) } @@ -137,7 +137,7 @@ func MinerSectorInfo(ctx context.Context, sm *StateManager, maddr address.Addres return nil, xerrors.Errorf("(get sset) failed to load miner actor: %w", err) } - mas, err := miner.Load(sm.cs.Store(ctx), act) + mas, err := miner.Load(sm.cs.ActorStore(ctx), act) if err != nil { return nil, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err) } @@ -151,7 +151,7 @@ func GetSectorsForWinningPoSt(ctx context.Context, nv network.Version, pv ffiwra return nil, xerrors.Errorf("failed to load miner actor: %w", err) } - mas, err := miner.Load(sm.cs.Store(ctx), act) + mas, err := miner.Load(sm.cs.ActorStore(ctx), act) if err != nil { return nil, xerrors.Errorf("failed to load miner actor state: %w", err) } @@ -249,7 +249,7 @@ func GetMinerSlashed(ctx context.Context, sm *StateManager, ts *types.TipSet, ma return false, xerrors.Errorf("failed to load power actor: %w", err) } - spas, err := power.Load(sm.cs.Store(ctx), act) + spas, err := power.Load(sm.cs.ActorStore(ctx), act) if err != nil { return false, xerrors.Errorf("failed to load power actor state: %w", err) } @@ -272,7 +272,7 @@ func GetStorageDeal(ctx context.Context, sm *StateManager, dealID abi.DealID, ts return nil, xerrors.Errorf("failed to load market actor: %w", err) } - state, err := market.Load(sm.cs.Store(ctx), act) + state, err := market.Load(sm.cs.ActorStore(ctx), act) if err != nil { return nil, xerrors.Errorf("failed to load market actor state: %w", err) } @@ -320,7 +320,7 @@ func ListMinerActors(ctx context.Context, sm *StateManager, ts *types.TipSet) ([ return nil, xerrors.Errorf("failed to load power actor: %w", err) } - powState, err := power.Load(sm.cs.Store(ctx), act) + powState, err := power.Load(sm.cs.ActorStore(ctx), act) if err != nil { return nil, xerrors.Errorf("failed to load power actor state: %w", err) } @@ -353,7 +353,7 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch, StateBase: base, Epoch: height, Rand: r, - Bstore: sm.cs.Blockstore(), + Bstore: sm.cs.StateBlockstore(), Syscalls: sm.cs.VMSys(), CircSupplyCalc: sm.GetVMCirculatingSupply, NtwkVersion: sm.GetNtwkVersion, @@ -474,7 +474,7 @@ func MinerGetBaseInfo(ctx context.Context, sm *StateManager, bcs beacon.Schedule return nil, xerrors.Errorf("failed to load miner actor: %w", err) } - mas, err := miner.Load(sm.cs.Store(ctx), act) + mas, err := miner.Load(sm.cs.ActorStore(ctx), act) if err != nil { return nil, xerrors.Errorf("failed to load miner actor state: %w", err) } @@ -623,7 +623,7 @@ func minerHasMinPower(ctx context.Context, sm *StateManager, addr address.Addres return false, xerrors.Errorf("loading power actor state: %w", err) } - ps, err := power.Load(sm.cs.Store(ctx), pact) + ps, err := power.Load(sm.cs.ActorStore(ctx), pact) if err != nil { return false, err } @@ -654,7 +654,7 @@ func MinerEligibleToMine(ctx context.Context, sm *StateManager, addr address.Add return false, xerrors.Errorf("loading power actor state: %w", err) } - pstate, err := power.Load(sm.cs.Store(ctx), pact) + pstate, err := power.Load(sm.cs.ActorStore(ctx), pact) if err != nil { return false, err } @@ -664,7 +664,7 @@ func MinerEligibleToMine(ctx context.Context, sm *StateManager, addr address.Add return false, xerrors.Errorf("loading miner actor state: %w", err) } - mstate, err := miner.Load(sm.cs.Store(ctx), mact) + mstate, err := miner.Load(sm.cs.ActorStore(ctx), mact) if err != nil { return false, err } @@ -696,7 +696,7 @@ func MinerEligibleToMine(ctx context.Context, sm *StateManager, addr address.Add } func CheckTotalFIL(ctx context.Context, sm *StateManager, ts *types.TipSet) (abi.TokenAmount, error) { - str, err := state.LoadStateTree(sm.ChainStore().Store(ctx), ts.ParentState()) + str, err := state.LoadStateTree(sm.ChainStore().ActorStore(ctx), ts.ParentState()) if err != nil { return abi.TokenAmount{}, err } diff --git a/chain/store/index_test.go b/chain/store/index_test.go index 89756a252df..4470719016c 100644 --- a/chain/store/index_test.go +++ b/chain/store/index_test.go @@ -6,10 +6,10 @@ import ( "testing" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/gen" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types/mock" - "github.com/filecoin-project/lotus/lib/blockstore" datastore "github.com/ipfs/go-datastore" syncds "github.com/ipfs/go-datastore/sync" "github.com/stretchr/testify/assert" @@ -30,7 +30,7 @@ func TestIndexSeeks(t *testing.T) { ctx := context.TODO() - nbs := blockstore.NewTemporarySync() + nbs := blockstore.NewMemorySync() cs := store.NewChainStore(nbs, nbs, syncds.MutexWrap(datastore.NewMapDatastore()), nil, nil) defer cs.Close() //nolint:errcheck diff --git a/chain/store/store.go b/chain/store/store.go index ec7714734b5..e0d71f0304b 100644 --- a/chain/store/store.go +++ b/chain/store/store.go @@ -23,12 +23,12 @@ import ( blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" "github.com/filecoin-project/lotus/api" + bstore "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/vm" "github.com/filecoin-project/lotus/journal" - bstore "github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/metrics" "go.opencensus.io/stats" @@ -81,7 +81,7 @@ func init() { } // ReorgNotifee represents a callback that gets called upon reorgs. -type ReorgNotifee func(rev, app []*types.TipSet) error +type ReorgNotifee = func(rev, app []*types.TipSet) error // Journal event types. const ( @@ -107,11 +107,11 @@ type HeadChangeEvt struct { // 1. a tipset cache // 2. a block => messages references cache. type ChainStore struct { - bs bstore.Blockstore - localbs bstore.Blockstore - ds dstore.Batching + chainBlockstore bstore.Blockstore + stateBlockstore bstore.Blockstore + metadataDs dstore.Batching - localviewer bstore.Viewer + chainLocalBlockstore bstore.Blockstore heaviestLk sync.Mutex heaviest *types.TipSet @@ -139,30 +139,29 @@ type ChainStore struct { wg sync.WaitGroup } -// localbs is guaranteed to fail Get* if requested block isn't stored locally -func NewChainStore(bs bstore.Blockstore, localbs bstore.Blockstore, ds dstore.Batching, vmcalls vm.SyscallBuilder, j journal.Journal) *ChainStore { - mmCache, _ := lru.NewARC(DefaultMsgMetaCacheSize) - tsCache, _ := lru.NewARC(DefaultTipSetCacheSize) +func NewChainStore(chainBs bstore.Blockstore, stateBs bstore.Blockstore, ds dstore.Batching, vmcalls vm.SyscallBuilder, j journal.Journal) *ChainStore { + c, _ := lru.NewARC(DefaultMsgMetaCacheSize) + tsc, _ := lru.NewARC(DefaultTipSetCacheSize) if j == nil { j = journal.NilJournal() } ctx, cancel := context.WithCancel(context.Background()) + // unwraps the fallback store in case one is configured. + // some methods _need_ to operate on a local blockstore only. + localbs, _ := bstore.UnwrapFallbackStore(chainBs) cs := &ChainStore{ - bs: bs, - localbs: localbs, - ds: ds, - bestTips: pubsub.New(64), - tipsets: make(map[abi.ChainEpoch][]cid.Cid), - mmCache: mmCache, - tsCache: tsCache, - vmcalls: vmcalls, - cancelFn: cancel, - journal: j, - } - - if v, ok := localbs.(bstore.Viewer); ok { - cs.localviewer = v + chainBlockstore: chainBs, + stateBlockstore: stateBs, + chainLocalBlockstore: localbs, + metadataDs: ds, + bestTips: pubsub.New(64), + tipsets: make(map[abi.ChainEpoch][]cid.Cid), + mmCache: c, + tsCache: tsc, + vmcalls: vmcalls, + cancelFn: cancel, + journal: j, } cs.evtTypes = [1]journal.EventType{ @@ -216,7 +215,7 @@ func (cs *ChainStore) Close() error { } func (cs *ChainStore) Load() error { - head, err := cs.ds.Get(chainHeadKey) + head, err := cs.metadataDs.Get(chainHeadKey) if err == dstore.ErrNotFound { log.Warn("no previous chain state found") return nil @@ -246,7 +245,7 @@ func (cs *ChainStore) writeHead(ts *types.TipSet) error { return xerrors.Errorf("failed to marshal tipset: %w", err) } - if err := cs.ds.Put(chainHeadKey, data); err != nil { + if err := cs.metadataDs.Put(chainHeadKey, data); err != nil { return xerrors.Errorf("failed to write chain head to datastore: %w", err) } @@ -306,13 +305,13 @@ func (cs *ChainStore) SubscribeHeadChanges(f ReorgNotifee) { func (cs *ChainStore) IsBlockValidated(ctx context.Context, blkid cid.Cid) (bool, error) { key := blockValidationCacheKeyPrefix.Instance(blkid.String()) - return cs.ds.Has(key) + return cs.metadataDs.Has(key) } func (cs *ChainStore) MarkBlockAsValidated(ctx context.Context, blkid cid.Cid) error { key := blockValidationCacheKeyPrefix.Instance(blkid.String()) - if err := cs.ds.Put(key, []byte{0}); err != nil { + if err := cs.metadataDs.Put(key, []byte{0}); err != nil { return xerrors.Errorf("cache block validation: %w", err) } @@ -322,7 +321,7 @@ func (cs *ChainStore) MarkBlockAsValidated(ctx context.Context, blkid cid.Cid) e func (cs *ChainStore) UnmarkBlockAsValidated(ctx context.Context, blkid cid.Cid) error { key := blockValidationCacheKeyPrefix.Instance(blkid.String()) - if err := cs.ds.Delete(key); err != nil { + if err := cs.metadataDs.Delete(key); err != nil { return xerrors.Errorf("removing from valid block cache: %w", err) } @@ -339,7 +338,7 @@ func (cs *ChainStore) SetGenesis(b *types.BlockHeader) error { return err } - return cs.ds.Put(dstore.NewKey("0"), b.Cid().Bytes()) + return cs.metadataDs.Put(dstore.NewKey("0"), b.Cid().Bytes()) } func (cs *ChainStore) PutTipSet(ctx context.Context, ts *types.TipSet) error { @@ -594,7 +593,7 @@ func (cs *ChainStore) takeHeaviestTipSet(ctx context.Context, ts *types.TipSet) // FlushValidationCache removes all results of block validation from the // chain metadata store. Usually the first step after a new chain import. func (cs *ChainStore) FlushValidationCache() error { - return FlushValidationCache(cs.ds) + return FlushValidationCache(cs.metadataDs) } func FlushValidationCache(ds datastore.Batching) error { @@ -653,7 +652,7 @@ func (cs *ChainStore) SetHead(ts *types.TipSet) error { // Contains returns whether our BlockStore has all blocks in the supplied TipSet. func (cs *ChainStore) Contains(ts *types.TipSet) (bool, error) { for _, c := range ts.Cids() { - has, err := cs.bs.Has(c) + has, err := cs.chainBlockstore.Has(c) if err != nil { return false, err } @@ -668,16 +667,8 @@ func (cs *ChainStore) Contains(ts *types.TipSet) (bool, error) { // GetBlock fetches a BlockHeader with the supplied CID. It returns // blockstore.ErrNotFound if the block was not found in the BlockStore. func (cs *ChainStore) GetBlock(c cid.Cid) (*types.BlockHeader, error) { - if cs.localviewer == nil { - sb, err := cs.localbs.Get(c) - if err != nil { - return nil, err - } - return types.DecodeBlock(sb.RawData()) - } - var blk *types.BlockHeader - err := cs.localviewer.View(c, func(b []byte) (err error) { + err := cs.chainLocalBlockstore.View(c, func(b []byte) (err error) { blk, err = types.DecodeBlock(b) return err }) @@ -851,7 +842,7 @@ func (cs *ChainStore) PersistBlockHeaders(b ...*types.BlockHeader) error { end = len(b) } - err = multierr.Append(err, cs.bs.PutMany(sbs[start:end])) + err = multierr.Append(err, cs.chainLocalBlockstore.PutMany(sbs[start:end])) } return err @@ -875,7 +866,7 @@ func PutMessage(bs bstore.Blockstore, m storable) (cid.Cid, error) { } func (cs *ChainStore) PutMessage(m storable) (cid.Cid, error) { - return PutMessage(cs.bs, m) + return PutMessage(cs.chainBlockstore, m) } func (cs *ChainStore) expandTipset(b *types.BlockHeader) (*types.TipSet, error) { @@ -936,7 +927,7 @@ func (cs *ChainStore) AddBlock(ctx context.Context, b *types.BlockHeader) error } func (cs *ChainStore) GetGenesis() (*types.BlockHeader, error) { - data, err := cs.ds.Get(dstore.NewKey("0")) + data, err := cs.metadataDs.Get(dstore.NewKey("0")) if err != nil { return nil, err } @@ -962,17 +953,8 @@ func (cs *ChainStore) GetCMessage(c cid.Cid) (types.ChainMsg, error) { } func (cs *ChainStore) GetMessage(c cid.Cid) (*types.Message, error) { - if cs.localviewer == nil { - sb, err := cs.localbs.Get(c) - if err != nil { - log.Errorf("get message get failed: %s: %s", c, err) - return nil, err - } - return types.DecodeMessage(sb.RawData()) - } - var msg *types.Message - err := cs.localviewer.View(c, func(b []byte) (err error) { + err := cs.chainLocalBlockstore.View(c, func(b []byte) (err error) { msg, err = types.DecodeMessage(b) return err }) @@ -980,17 +962,8 @@ func (cs *ChainStore) GetMessage(c cid.Cid) (*types.Message, error) { } func (cs *ChainStore) GetSignedMessage(c cid.Cid) (*types.SignedMessage, error) { - if cs.localviewer == nil { - sb, err := cs.localbs.Get(c) - if err != nil { - log.Errorf("get message get failed: %s: %s", c, err) - return nil, err - } - return types.DecodeSignedMessage(sb.RawData()) - } - var msg *types.SignedMessage - err := cs.localviewer.View(c, func(b []byte) (err error) { + err := cs.chainLocalBlockstore.View(c, func(b []byte) (err error) { msg, err = types.DecodeSignedMessage(b) return err }) @@ -1000,7 +973,7 @@ func (cs *ChainStore) GetSignedMessage(c cid.Cid) (*types.SignedMessage, error) func (cs *ChainStore) readAMTCids(root cid.Cid) ([]cid.Cid, error) { ctx := context.TODO() // block headers use adt0, for now. - a, err := blockadt.AsArray(cs.Store(ctx), root) + a, err := blockadt.AsArray(cs.ActorStore(ctx), root) if err != nil { return nil, xerrors.Errorf("amt load: %w", err) } @@ -1124,7 +1097,7 @@ func (cs *ChainStore) ReadMsgMetaCids(mmc cid.Cid) ([]cid.Cid, []cid.Cid, error) return mmcids.bls, mmcids.secpk, nil } - cst := cbor.NewCborStore(cs.localbs) + cst := cbor.NewCborStore(cs.chainLocalBlockstore) var msgmeta types.MsgMeta if err := cst.Get(context.TODO(), mmc, &msgmeta); err != nil { return nil, nil, xerrors.Errorf("failed to load msgmeta (%s): %w", mmc, err) @@ -1194,7 +1167,7 @@ func (cs *ChainStore) MessagesForBlock(b *types.BlockHeader) ([]*types.Message, func (cs *ChainStore) GetParentReceipt(b *types.BlockHeader, i int) (*types.MessageReceipt, error) { ctx := context.TODO() // block headers use adt0, for now. - a, err := blockadt.AsArray(cs.Store(ctx), b.ParentMessageReceipts) + a, err := blockadt.AsArray(cs.ActorStore(ctx), b.ParentMessageReceipts) if err != nil { return nil, xerrors.Errorf("amt load: %w", err) } @@ -1237,16 +1210,26 @@ func (cs *ChainStore) LoadSignedMessagesFromCids(cids []cid.Cid) ([]*types.Signe return msgs, nil } -func (cs *ChainStore) Blockstore() bstore.Blockstore { - return cs.bs +// ChainBlockstore returns the chain blockstore. Currently the chain and state +// // stores are both backed by the same physical store, albeit with different +// // caching policies, but in the future they will segregate. +func (cs *ChainStore) ChainBlockstore() bstore.Blockstore { + return cs.chainBlockstore +} + +// StateBlockstore returns the state blockstore. Currently the chain and state +// stores are both backed by the same physical store, albeit with different +// caching policies, but in the future they will segregate. +func (cs *ChainStore) StateBlockstore() bstore.Blockstore { + return cs.stateBlockstore } func ActorStore(ctx context.Context, bs bstore.Blockstore) adt.Store { return adt.WrapStore(ctx, cbor.NewCborStore(bs)) } -func (cs *ChainStore) Store(ctx context.Context) adt.Store { - return ActorStore(ctx, cs.bs) +func (cs *ChainStore) ActorStore(ctx context.Context) adt.Store { + return ActorStore(ctx, cs.stateBlockstore) } func (cs *ChainStore) VMSys() vm.SyscallBuilder { @@ -1444,8 +1427,9 @@ func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRo return xerrors.Errorf("failed to write car header: %s", err) } - return cs.WalkSnapshot(ctx, ts, inclRecentRoots, skipOldMsgs, func(c cid.Cid) error { - blk, err := cs.bs.Get(c) + unionBs := bstore.Union(cs.stateBlockstore, cs.chainBlockstore) + return cs.WalkSnapshot(ctx, ts, inclRecentRoots, skipOldMsgs, true, func(c cid.Cid) error { + blk, err := unionBs.Get(c) if err != nil { return xerrors.Errorf("writing object to car, bs.Get: %w", err) } @@ -1458,7 +1442,7 @@ func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRo }) } -func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRecentRoots abi.ChainEpoch, skipOldMsgs bool, cb func(cid.Cid) error) error { +func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRecentRoots abi.ChainEpoch, skipOldMsgs, skipMsgReceipts bool, cb func(cid.Cid) error) error { if ts == nil { ts = cs.GetHeaviestTipSet() } @@ -1478,7 +1462,7 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe return err } - data, err := cs.bs.Get(blk) + data, err := cs.chainBlockstore.Get(blk) if err != nil { return xerrors.Errorf("getting block: %w", err) } @@ -1498,7 +1482,7 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe var cids []cid.Cid if !skipOldMsgs || b.Height > ts.Height()-inclRecentRoots { if walked.Visit(b.Messages) { - mcids, err := recurseLinks(cs.bs, walked, b.Messages, []cid.Cid{b.Messages}) + mcids, err := recurseLinks(cs.chainBlockstore, walked, b.Messages, []cid.Cid{b.Messages}) if err != nil { return xerrors.Errorf("recursing messages failed: %w", err) } @@ -1519,13 +1503,17 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe if b.Height == 0 || b.Height > ts.Height()-inclRecentRoots { if walked.Visit(b.ParentStateRoot) { - cids, err := recurseLinks(cs.bs, walked, b.ParentStateRoot, []cid.Cid{b.ParentStateRoot}) + cids, err := recurseLinks(cs.stateBlockstore, walked, b.ParentStateRoot, []cid.Cid{b.ParentStateRoot}) if err != nil { return xerrors.Errorf("recursing genesis state failed: %w", err) } out = append(out, cids...) } + + if !skipMsgReceipts && walked.Visit(b.ParentMessageReceipts) { + out = append(out, b.ParentMessageReceipts) + } } for _, c := range out { @@ -1561,7 +1549,12 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe } func (cs *ChainStore) Import(r io.Reader) (*types.TipSet, error) { - header, err := car.LoadCar(cs.Blockstore(), r) + // TODO: writing only to the state blockstore is incorrect. + // At this time, both the state and chain blockstores are backed by the + // universal store. When we physically segregate the stores, we will need + // to route state objects to the state blockstore, and chain objects to + // the chain blockstore. + header, err := car.LoadCar(cs.StateBlockstore(), r) if err != nil { return nil, xerrors.Errorf("loadcar failed: %w", err) } diff --git a/chain/store/store_test.go b/chain/store/store_test.go index 5723b1380e4..51e2e08d0c9 100644 --- a/chain/store/store_test.go +++ b/chain/store/store_test.go @@ -11,12 +11,12 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/gen" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/node/repo" ) @@ -52,7 +52,7 @@ func BenchmarkGetRandomness(b *testing.B) { b.Fatal(err) } - bs, err := lr.Blockstore(context.TODO(), repo.BlockstoreChain) + bs, err := lr.Blockstore(context.TODO(), repo.UniversalBlockstore) if err != nil { b.Fatal(err) } @@ -104,7 +104,7 @@ func TestChainExportImport(t *testing.T) { t.Fatal(err) } - nbs := blockstore.NewTemporary() + nbs := blockstore.NewMemory() cs := store.NewChainStore(nbs, nbs, datastore.NewMapDatastore(), nil, nil) defer cs.Close() //nolint:errcheck @@ -139,7 +139,7 @@ func TestChainExportImportFull(t *testing.T) { t.Fatal(err) } - nbs := blockstore.NewTemporary() + nbs := blockstore.NewMemory() cs := store.NewChainStore(nbs, nbs, datastore.NewMapDatastore(), nil, nil) defer cs.Close() //nolint:errcheck diff --git a/chain/store/weight.go b/chain/store/weight.go index 9100df31547..42546d5e3d9 100644 --- a/chain/store/weight.go +++ b/chain/store/weight.go @@ -28,7 +28,7 @@ func (cs *ChainStore) Weight(ctx context.Context, ts *types.TipSet) (types.BigIn tpow := big2.Zero() { - cst := cbor.NewCborStore(cs.Blockstore()) + cst := cbor.NewCborStore(cs.StateBlockstore()) state, err := state.LoadStateTree(cst, ts.ParentState()) if err != nil { return types.NewInt(0), xerrors.Errorf("load state tree: %w", err) @@ -39,7 +39,7 @@ func (cs *ChainStore) Weight(ctx context.Context, ts *types.TipSet) (types.BigIn return types.NewInt(0), xerrors.Errorf("get power actor: %w", err) } - powState, err := power.Load(cs.Store(ctx), act) + powState, err := power.Load(cs.ActorStore(ctx), act) if err != nil { return types.NewInt(0), xerrors.Errorf("failed to load power actor state: %w", err) } diff --git a/chain/sub/incoming.go b/chain/sub/incoming.go index eeaa9af72f5..d1c6414a12d 100644 --- a/chain/sub/incoming.go +++ b/chain/sub/incoming.go @@ -7,13 +7,13 @@ import ( "time" address "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain" "github.com/filecoin-project/lotus/chain/messagepool" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/lib/sigs" "github.com/filecoin-project/lotus/metrics" "github.com/filecoin-project/lotus/node/impl/client" @@ -101,7 +101,7 @@ func HandleIncomingBlocks(ctx context.Context, bsub *pubsub.Subscription, s *cha []tag.Mutator{tag.Insert(metrics.MinerID, blk.Header.Miner.String())}, metrics.BlockDelay.M(delay), ) - log.Warnf("Received block with large delay %d from miner %s", delay, blk.Header.Miner) + log.Warnw("received block with large delay from miner", "block", blk.Cid(), "delay", delay, "miner", blk.Header.Miner) } if s.InformNewBlock(msg.ReceivedFrom, &types.FullBlock{ @@ -392,7 +392,7 @@ func (bv *BlockValidator) isChainNearSynced() bool { func (bv *BlockValidator) validateMsgMeta(ctx context.Context, msg *types.BlockMsg) error { // TODO there has to be a simpler way to do this without the blockstore dance // block headers use adt0 - store := blockadt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewTemporary())) + store := blockadt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewMemory())) bmArr := blockadt.MakeEmptyArray(store) smArr := blockadt.MakeEmptyArray(store) diff --git a/chain/sync.go b/chain/sync.go index 7d9c24d2646..88237eb5ab1 100644 --- a/chain/sync.go +++ b/chain/sync.go @@ -44,6 +44,7 @@ import ( proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" "github.com/filecoin-project/lotus/api" + bstore "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/builtin/power" "github.com/filecoin-project/lotus/chain/beacon" @@ -54,7 +55,6 @@ import ( "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" - bstore "github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/lib/sigs" "github.com/filecoin-project/lotus/metrics" ) @@ -321,7 +321,7 @@ func (syncer *Syncer) ValidateMsgMeta(fblk *types.FullBlock) error { // We use a temporary bstore here to avoid writing intermediate pieces // into the blockstore. - blockstore := bstore.NewTemporary() + blockstore := bstore.NewMemory() cst := cbor.NewCborStore(blockstore) var bcids, scids []cid.Cid @@ -354,7 +354,7 @@ func (syncer *Syncer) ValidateMsgMeta(fblk *types.FullBlock) error { } // Finally, flush. - return vm.Copy(context.TODO(), blockstore, syncer.store.Blockstore(), smroot) + return vm.Copy(context.TODO(), blockstore, syncer.store.ChainBlockstore(), smroot) } func (syncer *Syncer) LocalPeer() peer.ID { @@ -640,7 +640,7 @@ func (syncer *Syncer) minerIsValid(ctx context.Context, maddr address.Address, b return xerrors.Errorf("failed to load power actor: %w", err) } - powState, err := power.Load(syncer.store.Store(ctx), act) + powState, err := power.Load(syncer.store.ActorStore(ctx), act) if err != nil { return xerrors.Errorf("failed to load power actor state: %w", err) } @@ -1055,7 +1055,7 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock return err } - st, err := state.LoadStateTree(syncer.store.Store(ctx), stateroot) + st, err := state.LoadStateTree(syncer.store.ActorStore(ctx), stateroot) if err != nil { return xerrors.Errorf("failed to load base state tree: %w", err) } @@ -1102,7 +1102,7 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock } // Validate message arrays in a temporary blockstore. - tmpbs := bstore.NewTemporary() + tmpbs := bstore.NewMemory() tmpstore := blockadt.WrapStore(ctx, cbor.NewCborStore(tmpbs)) bmArr := blockadt.MakeEmptyArray(tmpstore) @@ -1172,7 +1172,7 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock } // Finally, flush. - return vm.Copy(ctx, tmpbs, syncer.store.Blockstore(), mrcid) + return vm.Copy(ctx, tmpbs, syncer.store.ChainBlockstore(), mrcid) } func (syncer *Syncer) verifyBlsAggregate(ctx context.Context, sig *crypto.Signature, msgs []cid.Cid, pubks [][]byte) error { @@ -1553,7 +1553,7 @@ func (syncer *Syncer) iterFullTipsets(ctx context.Context, headers []*types.TipS for bsi := 0; bsi < len(bstout); bsi++ { // temp storage so we don't persist data we dont want to - bs := bstore.NewTemporary() + bs := bstore.NewMemory() blks := cbor.NewCborStore(bs) this := headers[i-bsi] @@ -1574,7 +1574,7 @@ func (syncer *Syncer) iterFullTipsets(ctx context.Context, headers []*types.TipS return err } - if err := copyBlockstore(ctx, bs, syncer.store.Blockstore()); err != nil { + if err := copyBlockstore(ctx, bs, syncer.store.ChainBlockstore()); err != nil { return xerrors.Errorf("message processing failed: %w", err) } } diff --git a/chain/vm/vm.go b/chain/vm/vm.go index 522bc22988b..afc74e744f1 100644 --- a/chain/vm/vm.go +++ b/chain/vm/vm.go @@ -28,6 +28,7 @@ import ( "github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/aerrors" @@ -36,9 +37,6 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin/reward" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/blockstore" - bstore "github.com/filecoin-project/lotus/lib/blockstore" - "github.com/filecoin-project/lotus/lib/bufbstore" ) const MaxCallDepth = 4096 @@ -208,7 +206,7 @@ type VM struct { cstate *state.StateTree base cid.Cid cst *cbor.BasicIpldStore - buf *bufbstore.BufferedBS + buf *blockstore.BufferedBlockstore blockHeight abi.ChainEpoch areg *ActorRegistry rand Rand @@ -224,7 +222,7 @@ type VMOpts struct { StateBase cid.Cid Epoch abi.ChainEpoch Rand Rand - Bstore bstore.Blockstore + Bstore blockstore.Blockstore Syscalls SyscallBuilder CircSupplyCalc CircSupplyCalculator NtwkVersion NtwkVersionGetter // TODO: stebalien: In what cases do we actually need this? It seems like even when creating new networks we want to use the 'global'/build-default version getter @@ -233,7 +231,7 @@ type VMOpts struct { } func NewVM(ctx context.Context, opts *VMOpts) (*VM, error) { - buf := bufbstore.NewBufferedBstore(opts.Bstore) + buf := blockstore.NewBuffered(opts.Bstore) cst := cbor.NewCborStore(buf) state, err := state.LoadStateTree(cst, opts.StateBase) if err != nil { diff --git a/cli/auth.go b/cli/auth.go index ba20b2bccf6..2f41b38d15f 100644 --- a/cli/auth.go +++ b/cli/auth.go @@ -9,6 +9,7 @@ import ( "github.com/filecoin-project/go-jsonrpc/auth" "github.com/filecoin-project/lotus/api/apistruct" + cliutil "github.com/filecoin-project/lotus/cli/util" "github.com/filecoin-project/lotus/node/repo" ) @@ -127,7 +128,7 @@ var authApiInfoToken = &cli.Command{ // TODO: Log in audit log when it is implemented - fmt.Printf("%s=%s:%s\n", envForRepo(t), string(token), ainfo.Addr) + fmt.Printf("%s=%s:%s\n", cliutil.EnvForRepo(t), string(token), ainfo.Addr) return nil }, } diff --git a/cli/backup.go b/cli/backup.go index 1ee4157278c..856e098dd36 100644 --- a/cli/backup.go +++ b/cli/backup.go @@ -51,7 +51,10 @@ func BackupCmd(repoFlag string, rt repo.RepoType, getApi BackupApiFn) *cli.Comma return xerrors.Errorf("getting metadata datastore: %w", err) } - bds := backupds.Wrap(mds) + bds, err := backupds.Wrap(mds, backupds.NoLogdir) + if err != nil { + return err + } fpath, err := homedir.Expand(cctx.Args().First()) if err != nil { diff --git a/cli/chain.go b/cli/chain.go index 539ad1a7968..e86cac9b339 100644 --- a/cli/chain.go +++ b/cli/chain.go @@ -642,7 +642,10 @@ var chainListCmd = &cli.Command{ gasUsed += r.GasUsed } - fmt.Printf("\ttipset: \t%d msgs, %d / %d (%0.2f%%)\n", len(msgs), gasUsed, limitSum, 100*float64(gasUsed)/float64(limitSum)) + gasEfficiency := 100 * float64(gasUsed) / float64(limitSum) + gasCapacity := 100 * float64(limitSum) / float64(build.BlockGasLimit) + + fmt.Printf("\ttipset: \t%d msgs, %d (%0.2f%%) / %d (%0.2f%%)\n", len(msgs), gasUsed, gasEfficiency, limitSum, gasCapacity) } fmt.Println() } diff --git a/cli/client.go b/cli/client.go index 60729f2e5a4..98f4b022927 100644 --- a/cli/client.go +++ b/cli/client.go @@ -1195,6 +1195,11 @@ var clientListAsksCmd = &cli.Command{ &cli.BoolFlag{ Name: "by-ping", }, + &cli.StringFlag{ + Name: "output-format", + Value: "text", + Usage: "Either 'text' or 'csv'", + }, }, Action: func(cctx *cli.Context) error { api, closer, err := GetFullNodeAPI(cctx) @@ -1214,11 +1219,16 @@ var clientListAsksCmd = &cli.Command{ return asks[i].Ping < asks[j].Ping }) } + pfmt := "%s: min:%s max:%s price:%s/GiB/Epoch verifiedPrice:%s/GiB/Epoch ping:%s\n" + if cctx.String("output-format") == "csv" { + fmt.Printf("Miner,Min,Max,Price,VerifiedPrice,Ping\n") + pfmt = "%s,%s,%s,%s,%s,%s\n" + } for _, a := range asks { ask := a.Ask - fmt.Printf("%s: min:%s max:%s price:%s/GiB/Epoch verifiedPrice:%s/GiB/Epoch ping:%s\n", ask.Miner, + fmt.Printf(pfmt, ask.Miner, types.SizeStr(types.NewInt(uint64(ask.MinPieceSize))), types.SizeStr(types.NewInt(uint64(ask.MaxPieceSize))), types.FIL(ask.Price), @@ -1237,7 +1247,13 @@ type QueriedAsk struct { } func GetAsks(ctx context.Context, api lapi.FullNode) ([]QueriedAsk, error) { - color.Blue(".. getting miner list") + isTTY := true + if fileInfo, _ := os.Stdout.Stat(); (fileInfo.Mode() & os.ModeCharDevice) == 0 { + isTTY = false + } + if isTTY { + color.Blue(".. getting miner list") + } miners, err := api.StateListMiners(ctx, types.EmptyTSK) if err != nil { return nil, xerrors.Errorf("getting miner list: %w", err) @@ -1282,14 +1298,18 @@ loop: for { select { case <-time.After(150 * time.Millisecond): - fmt.Printf("\r* Found %d miners with power", atomic.LoadInt64(&found)) + if isTTY { + fmt.Printf("\r* Found %d miners with power", atomic.LoadInt64(&found)) + } case <-done: break loop } } - fmt.Printf("\r* Found %d miners with power\n", atomic.LoadInt64(&found)) + if isTTY { + fmt.Printf("\r* Found %d miners with power\n", atomic.LoadInt64(&found)) - color.Blue(".. querying asks") + color.Blue(".. querying asks") + } var asks []QueriedAsk var queried, got int64 @@ -1349,12 +1369,16 @@ loop2: for { select { case <-time.After(150 * time.Millisecond): - fmt.Printf("\r* Queried %d asks, got %d responses", atomic.LoadInt64(&queried), atomic.LoadInt64(&got)) + if isTTY { + fmt.Printf("\r* Queried %d asks, got %d responses", atomic.LoadInt64(&queried), atomic.LoadInt64(&got)) + } case <-done: break loop2 } } - fmt.Printf("\r* Queried %d asks, got %d responses\n", atomic.LoadInt64(&queried), atomic.LoadInt64(&got)) + if isTTY { + fmt.Printf("\r* Queried %d asks, got %d responses\n", atomic.LoadInt64(&queried), atomic.LoadInt64(&got)) + } sort.Slice(asks, func(i, j int) bool { return asks[i].Ask.Price.LessThan(asks[j].Ask.Price) diff --git a/cli/cmd.go b/cli/cmd.go index 12ea768554f..19006702f01 100644 --- a/cli/cmd.go +++ b/cli/cmd.go @@ -1,34 +1,17 @@ package cli import ( - "context" - "fmt" - "net/http" - "net/url" - "os" - "os/signal" "strings" - "syscall" logging "github.com/ipfs/go-log/v2" - "github.com/mitchellh/go-homedir" "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-jsonrpc" "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/client" cliutil "github.com/filecoin-project/lotus/cli/util" - "github.com/filecoin-project/lotus/node/repo" ) var log = logging.Logger("cli") -const ( - metadataTraceContext = "traceContext" -) - // custom CLI error type ErrCmdFailed struct { @@ -46,253 +29,31 @@ func NewCliError(s string) error { // ApiConnector returns API instance type ApiConnector func() api.FullNode -// The flag passed on the command line with the listen address of the API -// server (only used by the tests) -func flagForAPI(t repo.RepoType) string { - switch t { - case repo.FullNode: - return "api-url" - case repo.StorageMiner: - return "miner-api-url" - case repo.Worker: - return "worker-api-url" - default: - panic(fmt.Sprintf("Unknown repo type: %v", t)) - } -} - -func flagForRepo(t repo.RepoType) string { - switch t { - case repo.FullNode: - return "repo" - case repo.StorageMiner: - return "miner-repo" - case repo.Worker: - return "worker-repo" - default: - panic(fmt.Sprintf("Unknown repo type: %v", t)) - } -} - -func envForRepo(t repo.RepoType) string { - switch t { - case repo.FullNode: - return "FULLNODE_API_INFO" - case repo.StorageMiner: - return "MINER_API_INFO" - case repo.Worker: - return "WORKER_API_INFO" - default: - panic(fmt.Sprintf("Unknown repo type: %v", t)) - } -} - -// TODO remove after deprecation period -func envForRepoDeprecation(t repo.RepoType) string { - switch t { - case repo.FullNode: - return "FULLNODE_API_INFO" - case repo.StorageMiner: - return "STORAGE_API_INFO" - case repo.Worker: - return "WORKER_API_INFO" - default: - panic(fmt.Sprintf("Unknown repo type: %v", t)) - } -} - -func GetAPIInfo(ctx *cli.Context, t repo.RepoType) (cliutil.APIInfo, error) { - // Check if there was a flag passed with the listen address of the API - // server (only used by the tests) - apiFlag := flagForAPI(t) - if ctx.IsSet(apiFlag) { - strma := ctx.String(apiFlag) - strma = strings.TrimSpace(strma) - - return cliutil.APIInfo{Addr: strma}, nil - } - - envKey := envForRepo(t) - env, ok := os.LookupEnv(envKey) - if !ok { - // TODO remove after deprecation period - envKey = envForRepoDeprecation(t) - env, ok = os.LookupEnv(envKey) - if ok { - log.Warnf("Use deprecation env(%s) value, please use env(%s) instead.", envKey, envForRepo(t)) - } - } - if ok { - return cliutil.ParseApiInfo(env), nil - } - - repoFlag := flagForRepo(t) - - p, err := homedir.Expand(ctx.String(repoFlag)) - if err != nil { - return cliutil.APIInfo{}, xerrors.Errorf("could not expand home dir (%s): %w", repoFlag, err) - } - - r, err := repo.NewFS(p) - if err != nil { - return cliutil.APIInfo{}, xerrors.Errorf("could not open repo at path: %s; %w", p, err) - } - - ma, err := r.APIEndpoint() - if err != nil { - return cliutil.APIInfo{}, xerrors.Errorf("could not get api endpoint: %w", err) - } - - token, err := r.APIToken() - if err != nil { - log.Warnf("Couldn't load CLI token, capabilities may be limited: %v", err) - } - - return cliutil.APIInfo{ - Addr: ma.String(), - Token: token, - }, nil -} - -func GetRawAPI(ctx *cli.Context, t repo.RepoType) (string, http.Header, error) { - ainfo, err := GetAPIInfo(ctx, t) - if err != nil { - return "", nil, xerrors.Errorf("could not get API info: %w", err) +func GetFullNodeServices(ctx *cli.Context) (ServicesAPI, error) { + if tn, ok := ctx.App.Metadata["test-services"]; ok { + return tn.(ServicesAPI), nil } - addr, err := ainfo.DialArgs() + api, c, err := GetFullNodeAPI(ctx) if err != nil { - return "", nil, xerrors.Errorf("could not get DialArgs: %w", err) + return nil, err } - return addr, ainfo.AuthHeader(), nil + return &ServicesImpl{api: api, closer: c}, nil } -func GetAPI(ctx *cli.Context) (api.Common, jsonrpc.ClientCloser, error) { - ti, ok := ctx.App.Metadata["repoType"] - if !ok { - log.Errorf("unknown repo type, are you sure you want to use GetAPI?") - ti = repo.FullNode - } - t, ok := ti.(repo.RepoType) - if !ok { - log.Errorf("repoType type does not match the type of repo.RepoType") - } +var GetAPIInfo = cliutil.GetAPIInfo +var GetRawAPI = cliutil.GetRawAPI +var GetAPI = cliutil.GetAPI - if tn, ok := ctx.App.Metadata["testnode-storage"]; ok { - return tn.(api.StorageMiner), func() {}, nil - } - if tn, ok := ctx.App.Metadata["testnode-full"]; ok { - return tn.(api.FullNode), func() {}, nil - } +var DaemonContext = cliutil.DaemonContext +var ReqContext = cliutil.ReqContext - addr, headers, err := GetRawAPI(ctx, t) - if err != nil { - return nil, nil, err - } +var GetFullNodeAPI = cliutil.GetFullNodeAPI +var GetGatewayAPI = cliutil.GetGatewayAPI - return client.NewCommonRPC(ctx.Context, addr, headers) -} - -func GetFullNodeAPI(ctx *cli.Context) (api.FullNode, jsonrpc.ClientCloser, error) { - if tn, ok := ctx.App.Metadata["testnode-full"]; ok { - return tn.(api.FullNode), func() {}, nil - } - - addr, headers, err := GetRawAPI(ctx, repo.FullNode) - if err != nil { - return nil, nil, err - } - - return client.NewFullNodeRPC(ctx.Context, addr, headers) -} - -type GetStorageMinerOptions struct { - PreferHttp bool -} - -type GetStorageMinerOption func(*GetStorageMinerOptions) - -func StorageMinerUseHttp(opts *GetStorageMinerOptions) { - opts.PreferHttp = true -} - -func GetStorageMinerAPI(ctx *cli.Context, opts ...GetStorageMinerOption) (api.StorageMiner, jsonrpc.ClientCloser, error) { - var options GetStorageMinerOptions - for _, opt := range opts { - opt(&options) - } - - if tn, ok := ctx.App.Metadata["testnode-storage"]; ok { - return tn.(api.StorageMiner), func() {}, nil - } - - addr, headers, err := GetRawAPI(ctx, repo.StorageMiner) - if err != nil { - return nil, nil, err - } - - if options.PreferHttp { - u, err := url.Parse(addr) - if err != nil { - return nil, nil, xerrors.Errorf("parsing miner api URL: %w", err) - } - - switch u.Scheme { - case "ws": - u.Scheme = "http" - case "wss": - u.Scheme = "https" - } - - addr = u.String() - } - - return client.NewStorageMinerRPC(ctx.Context, addr, headers) -} - -func GetWorkerAPI(ctx *cli.Context) (api.WorkerAPI, jsonrpc.ClientCloser, error) { - addr, headers, err := GetRawAPI(ctx, repo.Worker) - if err != nil { - return nil, nil, err - } - - return client.NewWorkerRPC(ctx.Context, addr, headers) -} - -func GetGatewayAPI(ctx *cli.Context) (api.GatewayAPI, jsonrpc.ClientCloser, error) { - addr, headers, err := GetRawAPI(ctx, repo.FullNode) - if err != nil { - return nil, nil, err - } - - return client.NewGatewayRPC(ctx.Context, addr, headers) -} - -func DaemonContext(cctx *cli.Context) context.Context { - if mtCtx, ok := cctx.App.Metadata[metadataTraceContext]; ok { - return mtCtx.(context.Context) - } - - return context.Background() -} - -// ReqContext returns context for cli execution. Calling it for the first time -// installs SIGTERM handler that will close returned context. -// Not safe for concurrent execution. -func ReqContext(cctx *cli.Context) context.Context { - tCtx := DaemonContext(cctx) - - ctx, done := context.WithCancel(tCtx) - sigChan := make(chan os.Signal, 2) - go func() { - <-sigChan - done() - }() - signal.Notify(sigChan, syscall.SIGTERM, syscall.SIGINT, syscall.SIGHUP) - - return ctx -} +var GetStorageMinerAPI = cliutil.GetStorageMinerAPI +var GetWorkerAPI = cliutil.GetWorkerAPI var CommonCommands = []*cli.Command{ netCmd, diff --git a/cli/multisig.go b/cli/multisig.go index c3a062ed49b..f6caa6ee034 100644 --- a/cli/multisig.go +++ b/cli/multisig.go @@ -29,7 +29,7 @@ import ( init2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/init" msig2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig" - "github.com/filecoin-project/lotus/api/apibstore" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin/multisig" @@ -202,7 +202,7 @@ var msigInspectCmd = &cli.Command{ defer closer() ctx := ReqContext(cctx) - store := adt.WrapStore(ctx, cbor.NewCborStore(apibstore.NewAPIBlockstore(api))) + store := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(api))) maddr, err := address.NewFromString(cctx.Args().First()) if err != nil { @@ -1275,7 +1275,7 @@ var msigLockApproveCmd = &cli.Command{ params, actErr := actors.SerializeParams(&msig2.LockBalanceParams{ StartEpoch: abi.ChainEpoch(start), UnlockDuration: abi.ChainEpoch(duration), - Amount: abi.NewTokenAmount(amount.Int64()), + Amount: big.Int(amount), }) if actErr != nil { @@ -1367,7 +1367,7 @@ var msigLockCancelCmd = &cli.Command{ params, actErr := actors.SerializeParams(&msig2.LockBalanceParams{ StartEpoch: abi.ChainEpoch(start), UnlockDuration: abi.ChainEpoch(duration), - Amount: abi.NewTokenAmount(amount.Int64()), + Amount: big.Int(amount), }) if actErr != nil { diff --git a/cli/net.go b/cli/net.go index 56f0bf5f98f..de9dbd76706 100644 --- a/cli/net.go +++ b/cli/net.go @@ -48,6 +48,11 @@ var NetPeers = &cli.Command{ Aliases: []string{"a"}, Usage: "Print agent name", }, + &cli.BoolFlag{ + Name: "extended", + Aliases: []string{"x"}, + Usage: "Print extended peer information in json", + }, }, Action: func(cctx *cli.Context) error { api, closer, err := GetAPI(cctx) @@ -65,18 +70,42 @@ var NetPeers = &cli.Command{ return strings.Compare(string(peers[i].ID), string(peers[j].ID)) > 0 }) - for _, peer := range peers { - var agent string - if cctx.Bool("agent") { - agent, err = api.NetAgentVersion(ctx, peer.ID) + if cctx.Bool("extended") { + // deduplicate + seen := make(map[peer.ID]struct{}) + + for _, peer := range peers { + _, dup := seen[peer.ID] + if dup { + continue + } + seen[peer.ID] = struct{}{} + + info, err := api.NetPeerInfo(ctx, peer.ID) if err != nil { - log.Warnf("getting agent version: %s", err) + log.Warnf("error getting extended peer info: %s", err) } else { - agent = ", " + agent + bytes, err := json.Marshal(&info) + if err != nil { + log.Warnf("error marshalling extended peer info: %s", err) + } else { + fmt.Println(string(bytes)) + } } } - - fmt.Printf("%s, %s%s\n", peer.ID, peer.Addrs, agent) + } else { + for _, peer := range peers { + var agent string + if cctx.Bool("agent") { + agent, err = api.NetAgentVersion(ctx, peer.ID) + if err != nil { + log.Warnf("getting agent version: %s", err) + } else { + agent = ", " + agent + } + } + fmt.Printf("%s, %s%s\n", peer.ID, peer.Addrs, agent) + } } return nil @@ -88,8 +117,9 @@ var netScores = &cli.Command{ Usage: "Print peers' pubsub scores", Flags: []cli.Flag{ &cli.BoolFlag{ - Name: "extended", - Usage: "print extended peer scores in json", + Name: "extended", + Aliases: []string{"x"}, + Usage: "print extended peer scores in json", }, }, Action: func(cctx *cli.Context) error { diff --git a/cli/paych_test.go b/cli/paych_test.go index dac8411c5df..44d0a41e7a0 100644 --- a/cli/paych_test.go +++ b/cli/paych_test.go @@ -20,8 +20,8 @@ import ( cbor "github.com/ipfs/go-ipld-cbor" "github.com/stretchr/testify/require" - "github.com/filecoin-project/lotus/api/apibstore" "github.com/filecoin-project/lotus/api/test" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/events" "github.com/filecoin-project/lotus/chain/types" @@ -400,7 +400,7 @@ func getPaychState(ctx context.Context, t *testing.T, node test.TestNode, chAddr act, err := node.StateGetActor(ctx, chAddr, types.EmptyTSK) require.NoError(t, err) - store := cbor.NewCborStore(apibstore.NewAPIBlockstore(node)) + store := cbor.NewCborStore(blockstore.NewAPIBlockstore(node)) chState, err := paych.Load(adt.WrapStore(ctx, store), act) require.NoError(t, err) diff --git a/cli/send.go b/cli/send.go index d15dd5fb226..daf73ccad1b 100644 --- a/cli/send.go +++ b/cli/send.go @@ -1,22 +1,17 @@ package cli import ( - "bytes" - "context" "encoding/hex" - "encoding/json" + "errors" "fmt" - "reflect" "github.com/urfave/cli/v2" - cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/builtin" - "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" ) @@ -72,15 +67,16 @@ var sendCmd = &cli.Command{ return ShowHelp(cctx, fmt.Errorf("'send' expects two arguments, target and amount")) } - api, closer, err := GetFullNodeAPI(cctx) + srv, err := GetFullNodeServices(cctx) if err != nil { return err } - defer closer() + defer srv.Close() //nolint:errcheck ctx := ReqContext(cctx) + var params SendParams - toAddr, err := address.NewFromString(cctx.Args().Get(0)) + params.To, err = address.NewFromString(cctx.Args().Get(0)) if err != nil { return ShowHelp(cctx, fmt.Errorf("failed to parse target address: %w", err)) } @@ -89,123 +85,75 @@ var sendCmd = &cli.Command{ if err != nil { return ShowHelp(cctx, fmt.Errorf("failed to parse amount: %w", err)) } + params.Val = abi.TokenAmount(val) - var fromAddr address.Address - if from := cctx.String("from"); from == "" { - defaddr, err := api.WalletDefaultAddress(ctx) + if from := cctx.String("from"); from != "" { + addr, err := address.NewFromString(from) if err != nil { return err } - fromAddr = defaddr - } else { - addr, err := address.NewFromString(from) + params.From = addr + } + + if cctx.IsSet("gas-premium") { + gp, err := types.BigFromString(cctx.String("gas-premium")) if err != nil { return err } - - fromAddr = addr + params.GasPremium = &gp } - gp, err := types.BigFromString(cctx.String("gas-premium")) - if err != nil { - return err + if cctx.IsSet("gas-feecap") { + gfc, err := types.BigFromString(cctx.String("gas-feecap")) + if err != nil { + return err + } + params.GasFeeCap = &gfc } - gfc, err := types.BigFromString(cctx.String("gas-feecap")) - if err != nil { - return err + + if cctx.IsSet("gas-limit") { + limit := cctx.Int64("gas-limit") + params.GasLimit = &limit } - method := abi.MethodNum(cctx.Uint64("method")) + params.Method = abi.MethodNum(cctx.Uint64("method")) - var params []byte if cctx.IsSet("params-json") { - decparams, err := decodeTypedParams(ctx, api, toAddr, method, cctx.String("params-json")) + decparams, err := srv.DecodeTypedParamsFromJSON(ctx, params.To, params.Method, cctx.String("params-json")) if err != nil { return fmt.Errorf("failed to decode json params: %w", err) } - params = decparams + params.Params = decparams } if cctx.IsSet("params-hex") { - if params != nil { + if params.Params != nil { return fmt.Errorf("can only specify one of 'params-json' and 'params-hex'") } decparams, err := hex.DecodeString(cctx.String("params-hex")) if err != nil { return fmt.Errorf("failed to decode hex params: %w", err) } - params = decparams + params.Params = decparams } - msg := &types.Message{ - From: fromAddr, - To: toAddr, - Value: types.BigInt(val), - GasPremium: gp, - GasFeeCap: gfc, - GasLimit: cctx.Int64("gas-limit"), - Method: method, - Params: params, - } - - if !cctx.Bool("force") { - // Funds insufficient check - fromBalance, err := api.WalletBalance(ctx, msg.From) - if err != nil { - return err - } - totalCost := types.BigAdd(types.BigMul(msg.GasFeeCap, types.NewInt(uint64(msg.GasLimit))), msg.Value) + params.Force = cctx.Bool("force") - if fromBalance.LessThan(totalCost) { - fmt.Printf("WARNING: From balance %s less than total cost %s\n", types.FIL(fromBalance), types.FIL(totalCost)) - return fmt.Errorf("--force must be specified for this action to have an effect; you have been warned") - } + if cctx.IsSet("nonce") { + n := cctx.Uint64("nonce") + params.Nonce = &n } - if cctx.IsSet("nonce") { - msg.Nonce = cctx.Uint64("nonce") - sm, err := api.WalletSignMessage(ctx, fromAddr, msg) - if err != nil { - return err - } + msgCid, err := srv.Send(ctx, params) - _, err = api.MpoolPush(ctx, sm) - if err != nil { - return err - } - fmt.Println(sm.Cid()) - } else { - sm, err := api.MpoolPushMessage(ctx, msg, nil) - if err != nil { - return err + if err != nil { + if errors.Is(err, ErrSendBalanceTooLow) { + return fmt.Errorf("--force must be specified for this action to have an effect; you have been warned: %w", err) } - fmt.Println(sm.Cid()) + return xerrors.Errorf("executing send: %w", err) } + fmt.Fprintf(cctx.App.Writer, "%s\n", msgCid) return nil }, } - -func decodeTypedParams(ctx context.Context, fapi api.FullNode, to address.Address, method abi.MethodNum, paramstr string) ([]byte, error) { - act, err := fapi.StateGetActor(ctx, to, types.EmptyTSK) - if err != nil { - return nil, err - } - - methodMeta, found := stmgr.MethodsMap[act.Code][method] - if !found { - return nil, fmt.Errorf("method %d not found on actor %s", method, act.Code) - } - - p := reflect.New(methodMeta.Params.Elem()).Interface().(cbg.CBORMarshaler) - - if err := json.Unmarshal([]byte(paramstr), p); err != nil { - return nil, fmt.Errorf("unmarshaling input into params type: %w", err) - } - - buf := new(bytes.Buffer) - if err := p.MarshalCBOR(buf); err != nil { - return nil, err - } - return buf.Bytes(), nil -} diff --git a/cli/send_test.go b/cli/send_test.go new file mode 100644 index 00000000000..ff258346aab --- /dev/null +++ b/cli/send_test.go @@ -0,0 +1,128 @@ +package cli + +import ( + "bytes" + "errors" + "testing" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + types "github.com/filecoin-project/lotus/chain/types" + gomock "github.com/golang/mock/gomock" + cid "github.com/ipfs/go-cid" + "github.com/stretchr/testify/assert" + ucli "github.com/urfave/cli/v2" +) + +var arbtCid = (&types.Message{ + From: mustAddr(address.NewIDAddress(2)), + To: mustAddr(address.NewIDAddress(1)), + Value: types.NewInt(1000), +}).Cid() + +func mustAddr(a address.Address, err error) address.Address { + if err != nil { + panic(err) + } + return a +} + +func newMockApp(t *testing.T, cmd *ucli.Command) (*ucli.App, *MockServicesAPI, *bytes.Buffer, func()) { + app := ucli.NewApp() + app.Commands = ucli.Commands{cmd} + app.Setup() + + mockCtrl := gomock.NewController(t) + mockSrvcs := NewMockServicesAPI(mockCtrl) + app.Metadata["test-services"] = mockSrvcs + + buf := &bytes.Buffer{} + app.Writer = buf + + return app, mockSrvcs, buf, mockCtrl.Finish +} + +func TestSendCLI(t *testing.T) { + oneFil := abi.TokenAmount(types.MustParseFIL("1")) + + t.Run("simple", func(t *testing.T) { + app, mockSrvcs, buf, done := newMockApp(t, sendCmd) + defer done() + + gomock.InOrder( + mockSrvcs.EXPECT().Send(gomock.Any(), SendParams{ + To: mustAddr(address.NewIDAddress(1)), + Val: oneFil, + }).Return(arbtCid, nil), + mockSrvcs.EXPECT().Close(), + ) + err := app.Run([]string{"lotus", "send", "t01", "1"}) + assert.NoError(t, err) + assert.EqualValues(t, arbtCid.String()+"\n", buf.String()) + }) + t.Run("ErrSendBalanceTooLow", func(t *testing.T) { + app, mockSrvcs, _, done := newMockApp(t, sendCmd) + defer done() + + gomock.InOrder( + mockSrvcs.EXPECT().Send(gomock.Any(), SendParams{ + To: mustAddr(address.NewIDAddress(1)), + Val: oneFil, + }).Return(cid.Undef, ErrSendBalanceTooLow), + mockSrvcs.EXPECT().Close(), + ) + err := app.Run([]string{"lotus", "send", "t01", "1"}) + assert.ErrorIs(t, err, ErrSendBalanceTooLow) + }) + t.Run("generic-err-is-forwarded", func(t *testing.T) { + app, mockSrvcs, _, done := newMockApp(t, sendCmd) + defer done() + + errMark := errors.New("something") + gomock.InOrder( + mockSrvcs.EXPECT().Send(gomock.Any(), SendParams{ + To: mustAddr(address.NewIDAddress(1)), + Val: oneFil, + }).Return(cid.Undef, errMark), + mockSrvcs.EXPECT().Close(), + ) + err := app.Run([]string{"lotus", "send", "t01", "1"}) + assert.ErrorIs(t, err, errMark) + }) + + t.Run("from-specific", func(t *testing.T) { + app, mockSrvcs, buf, done := newMockApp(t, sendCmd) + defer done() + + gomock.InOrder( + mockSrvcs.EXPECT().Send(gomock.Any(), SendParams{ + To: mustAddr(address.NewIDAddress(1)), + From: mustAddr(address.NewIDAddress(2)), + Val: oneFil, + }).Return(arbtCid, nil), + mockSrvcs.EXPECT().Close(), + ) + err := app.Run([]string{"lotus", "send", "--from=t02", "t01", "1"}) + assert.NoError(t, err) + assert.EqualValues(t, arbtCid.String()+"\n", buf.String()) + }) + + t.Run("nonce-specific", func(t *testing.T) { + app, mockSrvcs, buf, done := newMockApp(t, sendCmd) + defer done() + zero := uint64(0) + + gomock.InOrder( + mockSrvcs.EXPECT().Send(gomock.Any(), SendParams{ + To: mustAddr(address.NewIDAddress(1)), + Nonce: &zero, + Val: oneFil, + }).Return(arbtCid, nil), + mockSrvcs.EXPECT().Close(), + ) + err := app.Run([]string{"lotus", "send", "--nonce=0", "t01", "1"}) + assert.NoError(t, err) + assert.EqualValues(t, arbtCid.String()+"\n", buf.String()) + }) + +} diff --git a/cli/services.go b/cli/services.go new file mode 100644 index 00000000000..069bed81159 --- /dev/null +++ b/cli/services.go @@ -0,0 +1,166 @@ +package cli + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "reflect" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-jsonrpc" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/stmgr" + types "github.com/filecoin-project/lotus/chain/types" + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" +) + +//go:generate go run github.com/golang/mock/mockgen -destination=servicesmock_test.go -package=cli -self_package github.com/filecoin-project/lotus/cli . ServicesAPI + +type ServicesAPI interface { + // Sends executes a send given SendParams + Send(ctx context.Context, params SendParams) (cid.Cid, error) + // DecodeTypedParamsFromJSON takes in information needed to identify a method and converts JSON + // parameters to bytes of their CBOR encoding + DecodeTypedParamsFromJSON(ctx context.Context, to address.Address, method abi.MethodNum, paramstr string) ([]byte, error) + + // Close ends the session of services and disconnects from RPC, using Services after Close is called + // most likely will result in an error + // Should not be called concurrently + Close() error +} + +type ServicesImpl struct { + api api.FullNode + closer jsonrpc.ClientCloser +} + +func (s *ServicesImpl) Close() error { + if s.closer == nil { + return xerrors.Errorf("Services already closed") + } + s.closer() + s.closer = nil + return nil +} + +func (s *ServicesImpl) DecodeTypedParamsFromJSON(ctx context.Context, to address.Address, method abi.MethodNum, paramstr string) ([]byte, error) { + act, err := s.api.StateGetActor(ctx, to, types.EmptyTSK) + if err != nil { + return nil, err + } + + methodMeta, found := stmgr.MethodsMap[act.Code][method] + if !found { + return nil, fmt.Errorf("method %d not found on actor %s", method, act.Code) + } + + p := reflect.New(methodMeta.Params.Elem()).Interface().(cbg.CBORMarshaler) + + if err := json.Unmarshal([]byte(paramstr), p); err != nil { + return nil, fmt.Errorf("unmarshaling input into params type: %w", err) + } + + buf := new(bytes.Buffer) + if err := p.MarshalCBOR(buf); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +type SendParams struct { + To address.Address + From address.Address + Val abi.TokenAmount + + GasPremium *abi.TokenAmount + GasFeeCap *abi.TokenAmount + GasLimit *int64 + + Nonce *uint64 + Method abi.MethodNum + Params []byte + + Force bool +} + +// This is specialised Send for Send command +// There might be room for generic Send that other commands can use to send their messages +// We will see + +var ErrSendBalanceTooLow = errors.New("balance too low") + +func (s *ServicesImpl) Send(ctx context.Context, params SendParams) (cid.Cid, error) { + if params.From == address.Undef { + defaddr, err := s.api.WalletDefaultAddress(ctx) + if err != nil { + return cid.Undef, err + } + params.From = defaddr + } + + msg := &types.Message{ + From: params.From, + To: params.To, + Value: params.Val, + + Method: params.Method, + Params: params.Params, + } + + if params.GasPremium != nil { + msg.GasPremium = *params.GasPremium + } else { + msg.GasPremium = types.NewInt(0) + } + if params.GasFeeCap != nil { + msg.GasFeeCap = *params.GasFeeCap + } else { + msg.GasFeeCap = types.NewInt(0) + } + if params.GasLimit != nil { + msg.GasLimit = *params.GasLimit + } else { + msg.GasLimit = 0 + } + + if !params.Force { + // Funds insufficient check + fromBalance, err := s.api.WalletBalance(ctx, msg.From) + if err != nil { + return cid.Undef, err + } + totalCost := types.BigAdd(types.BigMul(msg.GasFeeCap, types.NewInt(uint64(msg.GasLimit))), msg.Value) + + if fromBalance.LessThan(totalCost) { + return cid.Undef, xerrors.Errorf("From balance %s less than total cost %s: %w", types.FIL(fromBalance), types.FIL(totalCost), ErrSendBalanceTooLow) + + } + } + + if params.Nonce != nil { + msg.Nonce = *params.Nonce + sm, err := s.api.WalletSignMessage(ctx, params.From, msg) + if err != nil { + return cid.Undef, err + } + + _, err = s.api.MpoolPush(ctx, sm) + if err != nil { + return cid.Undef, err + } + + return sm.Cid(), nil + } + + sm, err := s.api.MpoolPushMessage(ctx, msg, nil) + if err != nil { + return cid.Undef, err + } + + return sm.Cid(), nil +} diff --git a/cli/services_send_test.go b/cli/services_send_test.go new file mode 100644 index 00000000000..9dfc3b38a5f --- /dev/null +++ b/cli/services_send_test.go @@ -0,0 +1,266 @@ +package cli + +import ( + "context" + "fmt" + "testing" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/mocks" + types "github.com/filecoin-project/lotus/chain/types" + gomock "github.com/golang/mock/gomock" + cid "github.com/ipfs/go-cid" + "github.com/stretchr/testify/assert" +) + +type markerKeyType struct{} + +var markerKey = markerKeyType{} + +type contextMatcher struct { + marker *int +} + +// Matches returns whether x is a match. +func (cm contextMatcher) Matches(x interface{}) bool { + ctx, ok := x.(context.Context) + if !ok { + return false + } + maybeMarker, ok := ctx.Value(markerKey).(*int) + if !ok { + return false + } + + return cm.marker == maybeMarker +} + +func (cm contextMatcher) String() string { + return fmt.Sprintf("Context with Value(%v/%T, %p)", markerKey, markerKey, cm.marker) +} + +func ContextWithMarker(ctx context.Context) (context.Context, gomock.Matcher) { + marker := new(int) + outCtx := context.WithValue(ctx, markerKey, marker) + return outCtx, contextMatcher{marker: marker} + +} + +func setupMockSrvcs(t *testing.T) (*ServicesImpl, *mocks.MockFullNode) { + mockCtrl := gomock.NewController(t) + + mockApi := mocks.NewMockFullNode(mockCtrl) + + srvcs := &ServicesImpl{ + api: mockApi, + closer: mockCtrl.Finish, + } + return srvcs, mockApi +} + +func fakeSign(msg *types.Message) *types.SignedMessage { + return &types.SignedMessage{ + Message: *msg, + Signature: crypto.Signature{Type: crypto.SigTypeSecp256k1, Data: make([]byte, 32)}, + } +} + +func makeMessageSigner() (*cid.Cid, interface{}) { + smCid := cid.Undef + return &smCid, + func(_ context.Context, msg *types.Message, _ *api.MessageSendSpec) (*types.SignedMessage, error) { + sm := fakeSign(msg) + smCid = sm.Cid() + return sm, nil + } +} + +type MessageMatcher SendParams + +var _ gomock.Matcher = MessageMatcher{} + +// Matches returns whether x is a match. +func (mm MessageMatcher) Matches(x interface{}) bool { + m, ok := x.(*types.Message) + if !ok { + return false + } + + if mm.From != address.Undef && mm.From != m.From { + return false + } + if mm.To != address.Undef && mm.To != m.To { + return false + } + + if types.BigCmp(mm.Val, m.Value) != 0 { + return false + } + + if mm.Nonce != nil && *mm.Nonce != m.Nonce { + return false + } + + if mm.GasPremium != nil && big.Cmp(*mm.GasPremium, m.GasPremium) != 0 { + return false + } + if mm.GasPremium == nil && m.GasPremium.Sign() != 0 { + return false + } + + if mm.GasFeeCap != nil && big.Cmp(*mm.GasFeeCap, m.GasFeeCap) != 0 { + return false + } + if mm.GasFeeCap == nil && m.GasFeeCap.Sign() != 0 { + return false + } + + if mm.GasLimit != nil && *mm.GasLimit != m.GasLimit { + return false + } + + if mm.GasLimit == nil && m.GasLimit != 0 { + return false + } + // handle rest of options + return true +} + +// String describes what the matcher matches. +func (mm MessageMatcher) String() string { + return fmt.Sprintf("%#v", SendParams(mm)) +} + +func TestSendService(t *testing.T) { + addrGen := address.NewForTestGetter() + a1 := addrGen() + a2 := addrGen() + + const balance = 10000 + + params := SendParams{ + From: a1, + To: a2, + Val: types.NewInt(balance - 100), + } + + ctx, ctxM := ContextWithMarker(context.Background()) + + t.Run("happy", func(t *testing.T) { + params := params + srvcs, mockApi := setupMockSrvcs(t) + defer srvcs.Close() //nolint:errcheck + msgCid, sign := makeMessageSigner() + gomock.InOrder( + mockApi.EXPECT().WalletBalance(ctxM, params.From).Return(types.NewInt(balance), nil), + mockApi.EXPECT().MpoolPushMessage(ctxM, MessageMatcher(params), nil).DoAndReturn(sign), + ) + + c, err := srvcs.Send(ctx, params) + assert.NoError(t, err) + assert.Equal(t, *msgCid, c) + }) + + t.Run("balance-too-low", func(t *testing.T) { + params := params + srvcs, mockApi := setupMockSrvcs(t) + defer srvcs.Close() //nolint:errcheck + gomock.InOrder( + mockApi.EXPECT().WalletBalance(ctxM, a1).Return(types.NewInt(balance-200), nil), + // no MpoolPushMessage + ) + + c, err := srvcs.Send(ctx, params) + assert.Equal(t, c, cid.Undef) + assert.ErrorIs(t, err, ErrSendBalanceTooLow) + }) + + t.Run("force", func(t *testing.T) { + params := params + params.Force = true + srvcs, mockApi := setupMockSrvcs(t) + defer srvcs.Close() //nolint:errcheck + msgCid, sign := makeMessageSigner() + gomock.InOrder( + mockApi.EXPECT().WalletBalance(ctxM, a1).Return(types.NewInt(balance-200), nil).AnyTimes(), + mockApi.EXPECT().MpoolPushMessage(ctxM, MessageMatcher(params), nil).DoAndReturn(sign), + ) + + c, err := srvcs.Send(ctx, params) + assert.NoError(t, err) + assert.Equal(t, *msgCid, c) + }) + + t.Run("default-from", func(t *testing.T) { + params := params + params.From = address.Undef + mm := MessageMatcher(params) + mm.From = a1 + + srvcs, mockApi := setupMockSrvcs(t) + defer srvcs.Close() //nolint:errcheck + msgCid, sign := makeMessageSigner() + gomock.InOrder( + mockApi.EXPECT().WalletDefaultAddress(ctxM).Return(a1, nil), + mockApi.EXPECT().WalletBalance(ctxM, a1).Return(types.NewInt(balance), nil), + mockApi.EXPECT().MpoolPushMessage(ctxM, mm, nil).DoAndReturn(sign), + ) + + c, err := srvcs.Send(ctx, params) + assert.NoError(t, err) + assert.Equal(t, *msgCid, c) + }) + + t.Run("set-nonce", func(t *testing.T) { + params := params + n := uint64(5) + params.Nonce = &n + mm := MessageMatcher(params) + + srvcs, mockApi := setupMockSrvcs(t) + defer srvcs.Close() //nolint:errcheck + _, _ = mm, mockApi + + var sm *types.SignedMessage + gomock.InOrder( + mockApi.EXPECT().WalletBalance(ctxM, a1).Return(types.NewInt(balance), nil), + mockApi.EXPECT().WalletSignMessage(ctxM, a1, mm).DoAndReturn( + func(_ context.Context, _ address.Address, msg *types.Message) (*types.SignedMessage, error) { + sm = fakeSign(msg) + + // now we expect MpoolPush with that SignedMessage + mockApi.EXPECT().MpoolPush(ctxM, sm).Return(sm.Cid(), nil) + return sm, nil + }), + ) + + c, err := srvcs.Send(ctx, params) + assert.NoError(t, err) + assert.Equal(t, sm.Cid(), c) + }) + + t.Run("gas-params", func(t *testing.T) { + params := params + limit := int64(1) + params.GasLimit = &limit + gfc := big.NewInt(100) + params.GasFeeCap = &gfc + gp := big.NewInt(10) + params.GasPremium = &gp + + srvcs, mockApi := setupMockSrvcs(t) + defer srvcs.Close() //nolint:errcheck + msgCid, sign := makeMessageSigner() + gomock.InOrder( + mockApi.EXPECT().WalletBalance(ctxM, params.From).Return(types.NewInt(balance), nil), + mockApi.EXPECT().MpoolPushMessage(ctxM, MessageMatcher(params), nil).DoAndReturn(sign), + ) + + c, err := srvcs.Send(ctx, params) + assert.NoError(t, err) + assert.Equal(t, *msgCid, c) + }) +} diff --git a/cli/servicesmock_test.go b/cli/servicesmock_test.go new file mode 100644 index 00000000000..48f1a95ec19 --- /dev/null +++ b/cli/servicesmock_test.go @@ -0,0 +1,81 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/filecoin-project/lotus/cli (interfaces: ServicesAPI) + +// Package cli is a generated GoMock package. +package cli + +import ( + context "context" + go_address "github.com/filecoin-project/go-address" + abi "github.com/filecoin-project/go-state-types/abi" + gomock "github.com/golang/mock/gomock" + go_cid "github.com/ipfs/go-cid" + reflect "reflect" +) + +// MockServicesAPI is a mock of ServicesAPI interface +type MockServicesAPI struct { + ctrl *gomock.Controller + recorder *MockServicesAPIMockRecorder +} + +// MockServicesAPIMockRecorder is the mock recorder for MockServicesAPI +type MockServicesAPIMockRecorder struct { + mock *MockServicesAPI +} + +// NewMockServicesAPI creates a new mock instance +func NewMockServicesAPI(ctrl *gomock.Controller) *MockServicesAPI { + mock := &MockServicesAPI{ctrl: ctrl} + mock.recorder = &MockServicesAPIMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockServicesAPI) EXPECT() *MockServicesAPIMockRecorder { + return m.recorder +} + +// Close mocks base method +func (m *MockServicesAPI) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close +func (mr *MockServicesAPIMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockServicesAPI)(nil).Close)) +} + +// DecodeTypedParamsFromJSON mocks base method +func (m *MockServicesAPI) DecodeTypedParamsFromJSON(arg0 context.Context, arg1 go_address.Address, arg2 abi.MethodNum, arg3 string) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DecodeTypedParamsFromJSON", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DecodeTypedParamsFromJSON indicates an expected call of DecodeTypedParamsFromJSON +func (mr *MockServicesAPIMockRecorder) DecodeTypedParamsFromJSON(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DecodeTypedParamsFromJSON", reflect.TypeOf((*MockServicesAPI)(nil).DecodeTypedParamsFromJSON), arg0, arg1, arg2, arg3) +} + +// Send mocks base method +func (m *MockServicesAPI) Send(arg0 context.Context, arg1 SendParams) (go_cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Send", arg0, arg1) + ret0, _ := ret[0].(go_cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Send indicates an expected call of Send +func (mr *MockServicesAPIMockRecorder) Send(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockServicesAPI)(nil).Send), arg0, arg1) +} diff --git a/cli/state.go b/cli/state.go index 47ce53a3cd9..df64c7ddf88 100644 --- a/cli/state.go +++ b/cli/state.go @@ -34,7 +34,7 @@ import ( "github.com/filecoin-project/lotus/api" lapi "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/apibstore" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/stmgr" @@ -1010,7 +1010,7 @@ var stateComputeStateCmd = &cli.Command{ } if cctx.Bool("html") { - st, err := state.LoadStateTree(cbor.NewCborStore(apibstore.NewAPIBlockstore(api)), stout.Root) + st, err := state.LoadStateTree(cbor.NewCborStore(blockstore.NewAPIBlockstore(api)), stout.Root) if err != nil { return xerrors.Errorf("loading state tree: %w", err) } diff --git a/cli/util/api.go b/cli/util/api.go new file mode 100644 index 00000000000..6a4982894f5 --- /dev/null +++ b/cli/util/api.go @@ -0,0 +1,274 @@ +package cliutil + +import ( + "context" + "fmt" + "net/http" + "net/url" + "os" + "os/signal" + "strings" + "syscall" + + "github.com/mitchellh/go-homedir" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-jsonrpc" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/client" + "github.com/filecoin-project/lotus/node/repo" +) + +const ( + metadataTraceContext = "traceContext" +) + +// The flag passed on the command line with the listen address of the API +// server (only used by the tests) +func flagForAPI(t repo.RepoType) string { + switch t { + case repo.FullNode: + return "api-url" + case repo.StorageMiner: + return "miner-api-url" + case repo.Worker: + return "worker-api-url" + default: + panic(fmt.Sprintf("Unknown repo type: %v", t)) + } +} + +func flagForRepo(t repo.RepoType) string { + switch t { + case repo.FullNode: + return "repo" + case repo.StorageMiner: + return "miner-repo" + case repo.Worker: + return "worker-repo" + default: + panic(fmt.Sprintf("Unknown repo type: %v", t)) + } +} + +func EnvForRepo(t repo.RepoType) string { + switch t { + case repo.FullNode: + return "FULLNODE_API_INFO" + case repo.StorageMiner: + return "MINER_API_INFO" + case repo.Worker: + return "WORKER_API_INFO" + default: + panic(fmt.Sprintf("Unknown repo type: %v", t)) + } +} + +// TODO remove after deprecation period +func envForRepoDeprecation(t repo.RepoType) string { + switch t { + case repo.FullNode: + return "FULLNODE_API_INFO" + case repo.StorageMiner: + return "STORAGE_API_INFO" + case repo.Worker: + return "WORKER_API_INFO" + default: + panic(fmt.Sprintf("Unknown repo type: %v", t)) + } +} + +func GetAPIInfo(ctx *cli.Context, t repo.RepoType) (APIInfo, error) { + // Check if there was a flag passed with the listen address of the API + // server (only used by the tests) + apiFlag := flagForAPI(t) + if ctx.IsSet(apiFlag) { + strma := ctx.String(apiFlag) + strma = strings.TrimSpace(strma) + + return APIInfo{Addr: strma}, nil + } + + envKey := EnvForRepo(t) + env, ok := os.LookupEnv(envKey) + if !ok { + // TODO remove after deprecation period + envKey = envForRepoDeprecation(t) + env, ok = os.LookupEnv(envKey) + if ok { + log.Warnf("Use deprecation env(%s) value, please use env(%s) instead.", envKey, EnvForRepo(t)) + } + } + if ok { + return ParseApiInfo(env), nil + } + + repoFlag := flagForRepo(t) + + p, err := homedir.Expand(ctx.String(repoFlag)) + if err != nil { + return APIInfo{}, xerrors.Errorf("could not expand home dir (%s): %w", repoFlag, err) + } + + r, err := repo.NewFS(p) + if err != nil { + return APIInfo{}, xerrors.Errorf("could not open repo at path: %s; %w", p, err) + } + + ma, err := r.APIEndpoint() + if err != nil { + return APIInfo{}, xerrors.Errorf("could not get api endpoint: %w", err) + } + + token, err := r.APIToken() + if err != nil { + log.Warnf("Couldn't load CLI token, capabilities may be limited: %v", err) + } + + return APIInfo{ + Addr: ma.String(), + Token: token, + }, nil +} + +func GetRawAPI(ctx *cli.Context, t repo.RepoType) (string, http.Header, error) { + ainfo, err := GetAPIInfo(ctx, t) + if err != nil { + return "", nil, xerrors.Errorf("could not get API info: %w", err) + } + + addr, err := ainfo.DialArgs() + if err != nil { + return "", nil, xerrors.Errorf("could not get DialArgs: %w", err) + } + + return addr, ainfo.AuthHeader(), nil +} + +func GetAPI(ctx *cli.Context) (api.Common, jsonrpc.ClientCloser, error) { + ti, ok := ctx.App.Metadata["repoType"] + if !ok { + log.Errorf("unknown repo type, are you sure you want to use GetAPI?") + ti = repo.FullNode + } + t, ok := ti.(repo.RepoType) + if !ok { + log.Errorf("repoType type does not match the type of repo.RepoType") + } + + if tn, ok := ctx.App.Metadata["testnode-storage"]; ok { + return tn.(api.StorageMiner), func() {}, nil + } + if tn, ok := ctx.App.Metadata["testnode-full"]; ok { + return tn.(api.FullNode), func() {}, nil + } + + addr, headers, err := GetRawAPI(ctx, t) + if err != nil { + return nil, nil, err + } + + return client.NewCommonRPC(ctx.Context, addr, headers) +} + +func GetFullNodeAPI(ctx *cli.Context) (api.FullNode, jsonrpc.ClientCloser, error) { + if tn, ok := ctx.App.Metadata["testnode-full"]; ok { + return tn.(api.FullNode), func() {}, nil + } + + addr, headers, err := GetRawAPI(ctx, repo.FullNode) + if err != nil { + return nil, nil, err + } + + return client.NewFullNodeRPC(ctx.Context, addr, headers) +} + +type GetStorageMinerOptions struct { + PreferHttp bool +} + +type GetStorageMinerOption func(*GetStorageMinerOptions) + +func StorageMinerUseHttp(opts *GetStorageMinerOptions) { + opts.PreferHttp = true +} + +func GetStorageMinerAPI(ctx *cli.Context, opts ...GetStorageMinerOption) (api.StorageMiner, jsonrpc.ClientCloser, error) { + var options GetStorageMinerOptions + for _, opt := range opts { + opt(&options) + } + + if tn, ok := ctx.App.Metadata["testnode-storage"]; ok { + return tn.(api.StorageMiner), func() {}, nil + } + + addr, headers, err := GetRawAPI(ctx, repo.StorageMiner) + if err != nil { + return nil, nil, err + } + + if options.PreferHttp { + u, err := url.Parse(addr) + if err != nil { + return nil, nil, xerrors.Errorf("parsing miner api URL: %w", err) + } + + switch u.Scheme { + case "ws": + u.Scheme = "http" + case "wss": + u.Scheme = "https" + } + + addr = u.String() + } + + return client.NewStorageMinerRPC(ctx.Context, addr, headers) +} + +func GetWorkerAPI(ctx *cli.Context) (api.WorkerAPI, jsonrpc.ClientCloser, error) { + addr, headers, err := GetRawAPI(ctx, repo.Worker) + if err != nil { + return nil, nil, err + } + + return client.NewWorkerRPC(ctx.Context, addr, headers) +} + +func GetGatewayAPI(ctx *cli.Context) (api.GatewayAPI, jsonrpc.ClientCloser, error) { + addr, headers, err := GetRawAPI(ctx, repo.FullNode) + if err != nil { + return nil, nil, err + } + + return client.NewGatewayRPC(ctx.Context, addr, headers) +} + +func DaemonContext(cctx *cli.Context) context.Context { + if mtCtx, ok := cctx.App.Metadata[metadataTraceContext]; ok { + return mtCtx.(context.Context) + } + + return context.Background() +} + +// ReqContext returns context for cli execution. Calling it for the first time +// installs SIGTERM handler that will close returned context. +// Not safe for concurrent execution. +func ReqContext(cctx *cli.Context) context.Context { + tCtx := DaemonContext(cctx) + + ctx, done := context.WithCancel(tCtx) + sigChan := make(chan os.Signal, 2) + go func() { + <-sigChan + done() + }() + signal.Notify(sigChan, syscall.SIGTERM, syscall.SIGINT, syscall.SIGHUP) + + return ctx +} diff --git a/cmd/lotus-bench/import.go b/cmd/lotus-bench/import.go index 9fa6731aa53..4b464bebeb1 100644 --- a/cmd/lotus-bench/import.go +++ b/cmd/lotus-bench/import.go @@ -20,18 +20,17 @@ import ( "github.com/cockroachdb/pebble" "github.com/cockroachdb/pebble/bloom" "github.com/ipfs/go-cid" - metricsi "github.com/ipfs/go-metrics-interface" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/blockstore" + badgerbs "github.com/filecoin-project/lotus/blockstore/badger" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" lcli "github.com/filecoin-project/lotus/cli" - "github.com/filecoin-project/lotus/lib/blockstore" - badgerbs "github.com/filecoin-project/lotus/lib/blockstore/badger" _ "github.com/filecoin-project/lotus/lib/sigs/bls" _ "github.com/filecoin-project/lotus/lib/sigs/secp" "github.com/filecoin-project/lotus/node/repo" @@ -204,7 +203,7 @@ var importBenchCmd = &cli.Command{ case cctx.Bool("use-native-badger"): log.Info("using native badger") var opts badgerbs.Options - if opts, err = repo.BadgerBlockstoreOptions(repo.BlockstoreChain, tdir, false); err != nil { + if opts, err = repo.BadgerBlockstoreOptions(repo.UniversalBlockstore, tdir, false); err != nil { return err } opts.SyncWrites = false @@ -229,21 +228,13 @@ var importBenchCmd = &cli.Command{ if ds != nil { ds = measure.New("dsbench", ds) defer ds.Close() //nolint:errcheck - bs = blockstore.NewBlockstore(ds) + bs = blockstore.FromDatastore(ds) } if c, ok := bs.(io.Closer); ok { defer c.Close() //nolint:errcheck } - ctx := metricsi.CtxScope(context.Background(), "lotus") - cacheOpts := blockstore.DefaultCacheOpts() - cacheOpts.HasBloomFilterSize = 0 - bs, err = blockstore.CachedBlockstore(ctx, bs, cacheOpts) - if err != nil { - return err - } - var verifier ffiwrapper.Verifier = ffiwrapper.ProofVerifier if cctx.IsSet("syscall-cache") { scds, err := badger.NewDatastore(cctx.String("syscall-cache"), &badger.DefaultOptions) @@ -267,6 +258,15 @@ var importBenchCmd = &cli.Command{ stm := stmgr.NewStateManager(cs) + var carFile *os.File + // open the CAR file if one is provided. + if path := cctx.String("car"); path != "" { + var err error + if carFile, err = os.Open(path); err != nil { + return xerrors.Errorf("failed to open provided CAR file: %w", err) + } + } + startTime := time.Now() // register a gauge that reports how long since the measurable @@ -308,18 +308,7 @@ var importBenchCmd = &cli.Command{ writeProfile("allocs") }() - var carFile *os.File - - // open the CAR file if one is provided. - if path := cctx.String("car"); path != "" { - var err error - if carFile, err = os.Open(path); err != nil { - return xerrors.Errorf("failed to open provided CAR file: %w", err) - } - } - var head *types.TipSet - // --- IMPORT --- if !cctx.Bool("no-import") { if cctx.Bool("global-profile") { diff --git a/cmd/lotus-chainwatch/processor/miner.go b/cmd/lotus-chainwatch/processor/miner.go index 3a37a82f800..5f2ef55dd19 100644 --- a/cmd/lotus-chainwatch/processor/miner.go +++ b/cmd/lotus-chainwatch/processor/miner.go @@ -15,7 +15,7 @@ import ( "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/apibstore" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/power" "github.com/filecoin-project/lotus/chain/events/state" @@ -202,7 +202,7 @@ func (p *Processor) processMiners(ctx context.Context, minerTips map[types.TipSe log.Debugw("Processed Miners", "duration", time.Since(start).String()) }() - stor := store.ActorStore(ctx, apibstore.NewAPIBlockstore(p.node)) + stor := store.ActorStore(ctx, blockstore.NewAPIBlockstore(p.node)) var out []minerActorInfo // TODO add parallel calls if this becomes slow @@ -649,7 +649,7 @@ func (p *Processor) getMinerStateAt(ctx context.Context, maddr address.Address, if err != nil { return nil, err } - return miner.Load(store.ActorStore(ctx, apibstore.NewAPIBlockstore(p.node)), prevActor) + return miner.Load(store.ActorStore(ctx, blockstore.NewAPIBlockstore(p.node)), prevActor) } func (p *Processor) getMinerPreCommitChanges(ctx context.Context, m minerActorInfo) (*miner.PreCommitChanges, error) { diff --git a/cmd/lotus-gateway/api.go b/cmd/lotus-gateway/api.go index 11e78a606e4..6f6cf27e673 100644 --- a/cmd/lotus-gateway/api.go +++ b/cmd/lotus-gateway/api.go @@ -34,7 +34,7 @@ var ( // gatewayDepsAPI defines the API methods that the GatewayAPI depends on // (to make it easy to mock for tests) type gatewayDepsAPI interface { - Version(context.Context) (api.Version, error) + Version(context.Context) (api.APIVersion, error) ChainGetBlockMessages(context.Context, cid.Cid) (*api.BlockMessages, error) ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) ChainGetNode(ctx context.Context, p string) (*api.IpldObject, error) @@ -130,7 +130,7 @@ func (a *GatewayAPI) checkTimestamp(at time.Time) error { return nil } -func (a *GatewayAPI) Version(ctx context.Context) (api.Version, error) { +func (a *GatewayAPI) Version(ctx context.Context) (api.APIVersion, error) { return a.api.Version(ctx) } diff --git a/cmd/lotus-gateway/main.go b/cmd/lotus-gateway/main.go index 5190ea798da..23b743d7392 100644 --- a/cmd/lotus-gateway/main.go +++ b/cmd/lotus-gateway/main.go @@ -77,7 +77,7 @@ var runCmd = &cli.Command{ // Register all metric views if err := view.Register( - metrics.DefaultViews..., + metrics.ChainNodeViews..., ); err != nil { log.Fatalf("Cannot register the view: %v", err) } diff --git a/cmd/lotus-seal-worker/main.go b/cmd/lotus-seal-worker/main.go index 8a17a10a3c6..24918e52a39 100644 --- a/cmd/lotus-seal-worker/main.go +++ b/cmd/lotus-seal-worker/main.go @@ -31,6 +31,7 @@ import ( "github.com/filecoin-project/lotus/api/apistruct" "github.com/filecoin-project/lotus/build" lcli "github.com/filecoin-project/lotus/cli" + cliutil "github.com/filecoin-project/lotus/cli/util" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" "github.com/filecoin-project/lotus/extern/sector-storage/stores" @@ -49,7 +50,7 @@ const FlagWorkerRepo = "worker-repo" const FlagWorkerRepoDeprecation = "workerrepo" func main() { - build.RunningNodeType = build.NodeWorker + api.RunningNodeType = api.NodeWorker lotuslog.SetupLogLevels() @@ -183,7 +184,7 @@ var runCmd = &cli.Command{ var closer func() var err error for { - nodeApi, closer, err = lcli.GetStorageMinerAPI(cctx, lcli.StorageMinerUseHttp) + nodeApi, closer, err = lcli.GetStorageMinerAPI(cctx, cliutil.StorageMinerUseHttp) if err == nil { _, err = nodeApi.Version(ctx) if err == nil { @@ -210,8 +211,8 @@ var runCmd = &cli.Command{ if err != nil { return err } - if v.APIVersion != build.MinerAPIVersion { - return xerrors.Errorf("lotus-miner API version doesn't match: expected: %s", api.Version{APIVersion: build.MinerAPIVersion}) + if v.APIVersion != api.MinerAPIVersion { + return xerrors.Errorf("lotus-miner API version doesn't match: expected: %s", api.APIVersion{APIVersion: api.MinerAPIVersion}) } log.Infof("Remote version %s", v) diff --git a/cmd/lotus-seal-worker/rpc.go b/cmd/lotus-seal-worker/rpc.go index f4e8494d07d..f69129c5022 100644 --- a/cmd/lotus-seal-worker/rpc.go +++ b/cmd/lotus-seal-worker/rpc.go @@ -8,7 +8,7 @@ import ( "github.com/mitchellh/go-homedir" "golang.org/x/xerrors" - "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/api" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" "github.com/filecoin-project/lotus/extern/sector-storage/stores" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" @@ -23,8 +23,8 @@ type worker struct { disabled int64 } -func (w *worker) Version(context.Context) (build.Version, error) { - return build.WorkerAPIVersion, nil +func (w *worker) Version(context.Context) (api.Version, error) { + return api.WorkerAPIVersion, nil } func (w *worker) StorageAddLocal(ctx context.Context, path string) error { diff --git a/cmd/lotus-seal-worker/storage.go b/cmd/lotus-seal-worker/storage.go index 39cd3ad5afb..afb566166c0 100644 --- a/cmd/lotus-seal-worker/storage.go +++ b/cmd/lotus-seal-worker/storage.go @@ -6,6 +6,7 @@ import ( "os" "path/filepath" + "github.com/docker/go-units" "github.com/google/uuid" "github.com/mitchellh/go-homedir" "github.com/urfave/cli/v2" @@ -46,6 +47,10 @@ var storageAttachCmd = &cli.Command{ Name: "store", Usage: "(for init) use path for long-term storage", }, + &cli.StringFlag{ + Name: "max-storage", + Usage: "(for init) limit storage space for sectors (expensive for very large paths!)", + }, }, Action: func(cctx *cli.Context) error { nodeApi, closer, err := lcli.GetWorkerAPI(cctx) @@ -79,11 +84,20 @@ var storageAttachCmd = &cli.Command{ return err } + var maxStor int64 + if cctx.IsSet("max-storage") { + maxStor, err = units.RAMInBytes(cctx.String("max-storage")) + if err != nil { + return xerrors.Errorf("parsing max-storage: %w", err) + } + } + cfg := &stores.LocalStorageMeta{ - ID: stores.ID(uuid.New().String()), - Weight: cctx.Uint64("weight"), - CanSeal: cctx.Bool("seal"), - CanStore: cctx.Bool("store"), + ID: stores.ID(uuid.New().String()), + Weight: cctx.Uint64("weight"), + CanSeal: cctx.Bool("seal"), + CanStore: cctx.Bool("store"), + MaxStorage: uint64(maxStor), } if !(cfg.CanStore || cfg.CanSeal) { diff --git a/cmd/lotus-seed/genesis.go b/cmd/lotus-seed/genesis.go index c688678916a..6f2c2214732 100644 --- a/cmd/lotus-seed/genesis.go +++ b/cmd/lotus-seed/genesis.go @@ -9,10 +9,10 @@ import ( "strconv" "strings" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/vm" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/journal" - "github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/node/modules/testing" "github.com/google/uuid" "github.com/mitchellh/go-homedir" @@ -37,6 +37,8 @@ var genesisCmd = &cli.Command{ genesisNewCmd, genesisAddMinerCmd, genesisAddMsigsCmd, + genesisSetVRKCmd, + genesisSetRemainderCmd, genesisCarCmd, }, } @@ -309,6 +311,200 @@ func parseMultisigCsv(csvf string) ([]GenAccountEntry, error) { return entries, nil } +var genesisSetVRKCmd = &cli.Command{ + Name: "set-vrk", + Usage: "Set the verified registry's root key", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "multisig", + Usage: "CSV file to parse the multisig that will be set as the root key", + }, + &cli.StringFlag{ + Name: "account", + Usage: "pubkey address that will be set as the root key (must NOT be declared anywhere else, since it must be given ID 80)", + }, + }, + Action: func(cctx *cli.Context) error { + if cctx.Args().Len() != 1 { + return fmt.Errorf("must specify template file") + } + + genf, err := homedir.Expand(cctx.Args().First()) + if err != nil { + return err + } + + csvf, err := homedir.Expand(cctx.Args().Get(1)) + if err != nil { + return err + } + + var template genesis.Template + b, err := ioutil.ReadFile(genf) + if err != nil { + return xerrors.Errorf("read genesis template: %w", err) + } + + if err := json.Unmarshal(b, &template); err != nil { + return xerrors.Errorf("unmarshal genesis template: %w", err) + } + + if cctx.IsSet("account") { + addr, err := address.NewFromString(cctx.String("account")) + if err != nil { + return err + } + + am := genesis.AccountMeta{Owner: addr} + + template.VerifregRootKey = genesis.Actor{ + Type: genesis.TAccount, + Balance: big.Zero(), + Meta: am.ActorMeta(), + } + } else if cctx.IsSet("multisig") { + + entries, err := parseMultisigCsv(csvf) + if err != nil { + return xerrors.Errorf("parsing multisig csv file: %w", err) + } + + if len(entries) == 0 { + return xerrors.Errorf("no msig entries in csv file: %w", err) + } + + e := entries[0] + if len(e.Addresses) != e.N { + return fmt.Errorf("entry had mismatch between 'N' and number of addresses") + } + + msig := &genesis.MultisigMeta{ + Signers: e.Addresses, + Threshold: e.M, + VestingDuration: monthsToBlocks(e.VestingMonths), + VestingStart: 0, + } + + act := genesis.Actor{ + Type: genesis.TMultisig, + Balance: abi.TokenAmount(e.Amount), + Meta: msig.ActorMeta(), + } + + template.VerifregRootKey = act + } else { + return xerrors.Errorf("must include either --account or --multisig flag") + } + + b, err = json.MarshalIndent(&template, "", " ") + if err != nil { + return err + } + + if err := ioutil.WriteFile(genf, b, 0644); err != nil { + return err + } + return nil + }, +} + +var genesisSetRemainderCmd = &cli.Command{ + Name: "set-remainder", + Usage: "Set the remainder actor", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "multisig", + Usage: "CSV file to parse the multisig that will be set as the remainder actor", + }, + &cli.StringFlag{ + Name: "account", + Usage: "pubkey address that will be set as the remainder key (must NOT be declared anywhere else, since it must be given ID 90)", + }, + }, + Action: func(cctx *cli.Context) error { + if cctx.Args().Len() != 1 { + return fmt.Errorf("must specify template file") + } + + genf, err := homedir.Expand(cctx.Args().First()) + if err != nil { + return err + } + + csvf, err := homedir.Expand(cctx.Args().Get(1)) + if err != nil { + return err + } + + var template genesis.Template + b, err := ioutil.ReadFile(genf) + if err != nil { + return xerrors.Errorf("read genesis template: %w", err) + } + + if err := json.Unmarshal(b, &template); err != nil { + return xerrors.Errorf("unmarshal genesis template: %w", err) + } + + if cctx.IsSet("account") { + addr, err := address.NewFromString(cctx.String("account")) + if err != nil { + return err + } + + am := genesis.AccountMeta{Owner: addr} + + template.RemainderAccount = genesis.Actor{ + Type: genesis.TAccount, + Balance: big.Zero(), + Meta: am.ActorMeta(), + } + } else if cctx.IsSet("multisig") { + + entries, err := parseMultisigCsv(csvf) + if err != nil { + return xerrors.Errorf("parsing multisig csv file: %w", err) + } + + if len(entries) == 0 { + return xerrors.Errorf("no msig entries in csv file: %w", err) + } + + e := entries[0] + if len(e.Addresses) != e.N { + return fmt.Errorf("entry had mismatch between 'N' and number of addresses") + } + + msig := &genesis.MultisigMeta{ + Signers: e.Addresses, + Threshold: e.M, + VestingDuration: monthsToBlocks(e.VestingMonths), + VestingStart: 0, + } + + act := genesis.Actor{ + Type: genesis.TMultisig, + Balance: abi.TokenAmount(e.Amount), + Meta: msig.ActorMeta(), + } + + template.RemainderAccount = act + } else { + return xerrors.Errorf("must include either --account or --multisig flag") + } + + b, err = json.MarshalIndent(&template, "", " ") + if err != nil { + return err + } + + if err := ioutil.WriteFile(genf, b, 0644); err != nil { + return err + } + return nil + }, +} + var genesisCarCmd = &cli.Command{ Name: "car", Description: "write genesis car file", @@ -327,7 +523,7 @@ var genesisCarCmd = &cli.Command{ } ofile := c.String("out") jrnl := journal.NilJournal() - bstor := blockstore.NewTemporarySync() + bstor := blockstore.NewMemorySync() sbldr := vm.Syscalls(ffiwrapper.ProofVerifier) _, err := testing.MakeGenesis(ofile, c.Args().First())(bstor, sbldr, jrnl)() return err diff --git a/cmd/lotus-shed/balances.go b/cmd/lotus-shed/balances.go index 140effb3d83..8c5bfefb8d6 100644 --- a/cmd/lotus-shed/balances.go +++ b/cmd/lotus-shed/balances.go @@ -175,7 +175,7 @@ var chainBalanceStateCmd = &cli.Command{ defer lkrepo.Close() //nolint:errcheck - bs, err := lkrepo.Blockstore(ctx, repo.BlockstoreChain) + bs, err := lkrepo.Blockstore(ctx, repo.UniversalBlockstore) if err != nil { return fmt.Errorf("failed to open blockstore: %w", err) } @@ -396,7 +396,7 @@ var chainPledgeCmd = &cli.Command{ defer lkrepo.Close() //nolint:errcheck - bs, err := lkrepo.Blockstore(ctx, repo.BlockstoreChain) + bs, err := lkrepo.Blockstore(ctx, repo.UniversalBlockstore) if err != nil { return xerrors.Errorf("failed to open blockstore: %w", err) } diff --git a/cmd/lotus-shed/consensus.go b/cmd/lotus-shed/consensus.go index 1fe7756c1fa..c78c9c00f03 100644 --- a/cmd/lotus-shed/consensus.go +++ b/cmd/lotus-shed/consensus.go @@ -36,7 +36,7 @@ type consensusItem struct { targetTipset *types.TipSet headTipset *types.TipSet peerID peer.ID - version api.Version + version api.APIVersion api api.FullNode } diff --git a/cmd/lotus-shed/datastore.go b/cmd/lotus-shed/datastore.go index 1189b5a3a35..1086e8260bc 100644 --- a/cmd/lotus-shed/datastore.go +++ b/cmd/lotus-shed/datastore.go @@ -319,7 +319,7 @@ var datastoreRewriteCmd = &cli.Command{ ) // open the destination (to) store. - opts, err := repo.BadgerBlockstoreOptions(repo.BlockstoreChain, toPath, false) + opts, err := repo.BadgerBlockstoreOptions(repo.UniversalBlockstore, toPath, false) if err != nil { return xerrors.Errorf("failed to get badger options: %w", err) } @@ -329,7 +329,7 @@ var datastoreRewriteCmd = &cli.Command{ } // open the source (from) store. - opts, err = repo.BadgerBlockstoreOptions(repo.BlockstoreChain, fromPath, true) + opts, err = repo.BadgerBlockstoreOptions(repo.UniversalBlockstore, fromPath, true) if err != nil { return xerrors.Errorf("failed to get badger options: %w", err) } diff --git a/cmd/lotus-shed/export.go b/cmd/lotus-shed/export.go index 4820381b5c6..e711ba2bb05 100644 --- a/cmd/lotus-shed/export.go +++ b/cmd/lotus-shed/export.go @@ -72,7 +72,7 @@ var exportChainCmd = &cli.Command{ defer fi.Close() //nolint:errcheck - bs, err := lr.Blockstore(ctx, repo.BlockstoreChain) + bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore) if err != nil { return fmt.Errorf("failed to open blockstore: %w", err) } diff --git a/cmd/lotus-shed/genesis-verify.go b/cmd/lotus-shed/genesis-verify.go index 20561eb5a25..32e4e14ad0b 100644 --- a/cmd/lotus-shed/genesis-verify.go +++ b/cmd/lotus-shed/genesis-verify.go @@ -17,6 +17,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin/account" @@ -26,7 +27,6 @@ import ( "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/blockstore" ) type addrInfo struct { @@ -50,7 +50,7 @@ var genesisVerifyCmd = &cli.Command{ if !cctx.Args().Present() { return fmt.Errorf("must pass genesis car file") } - bs := blockstore.NewBlockstore(datastore.NewMapDatastore()) + bs := blockstore.FromDatastore(datastore.NewMapDatastore()) cs := store.NewChainStore(bs, bs, datastore.NewMapDatastore(), nil, nil) defer cs.Close() //nolint:errcheck diff --git a/cmd/lotus-shed/import-car.go b/cmd/lotus-shed/import-car.go index 7f3fa7c8953..4e465029f2d 100644 --- a/cmd/lotus-shed/import-car.go +++ b/cmd/lotus-shed/import-car.go @@ -47,7 +47,7 @@ var importCarCmd = &cli.Command{ return xerrors.Errorf("opening the car file: %w", err) } - bs, err := lr.Blockstore(ctx, repo.BlockstoreChain) + bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore) if err != nil { return err } @@ -118,7 +118,7 @@ var importObjectCmd = &cli.Command{ } defer lr.Close() //nolint:errcheck - bs, err := lr.Blockstore(ctx, repo.BlockstoreChain) + bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore) if err != nil { return fmt.Errorf("failed to open blockstore: %w", err) } diff --git a/cmd/lotus-shed/main.go b/cmd/lotus-shed/main.go index 10b2b4d89f9..b2a676a8d66 100644 --- a/cmd/lotus-shed/main.go +++ b/cmd/lotus-shed/main.go @@ -40,7 +40,7 @@ func main() { mpoolStatsCmd, exportChainCmd, consensusCmd, - rollupDealStatsCmd, + storageStatsCmd, syncCmd, stateTreePruneCmd, datastoreCmd, diff --git a/cmd/lotus-shed/pruning.go b/cmd/lotus-shed/pruning.go index c7fc97c30da..1afe76c4d38 100644 --- a/cmd/lotus-shed/pruning.go +++ b/cmd/lotus-shed/pruning.go @@ -11,10 +11,10 @@ import ( "github.com/urfave/cli/v2" "golang.org/x/xerrors" + badgerbs "github.com/filecoin-project/lotus/blockstore/badger" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/vm" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" - badgerbs "github.com/filecoin-project/lotus/lib/blockstore/badger" "github.com/filecoin-project/lotus/node/repo" ) @@ -131,7 +131,7 @@ var stateTreePruneCmd = &cli.Command{ defer lkrepo.Close() //nolint:errcheck - bs, err := lkrepo.Blockstore(ctx, repo.BlockstoreChain) + bs, err := lkrepo.Blockstore(ctx, repo.UniversalBlockstore) if err != nil { return fmt.Errorf("failed to open blockstore: %w", err) } @@ -191,7 +191,7 @@ var stateTreePruneCmd = &cli.Command{ rrLb := abi.ChainEpoch(cctx.Int64("keep-from-lookback")) - if err := cs.WalkSnapshot(ctx, ts, rrLb, true, func(c cid.Cid) error { + if err := cs.WalkSnapshot(ctx, ts, rrLb, true, true, func(c cid.Cid) error { if goodSet.Len()%20 == 0 { fmt.Printf("\renumerating keep set: %d ", goodSet.Len()) } diff --git a/cmd/lotus-shed/sr2-dealstats-rollup.go b/cmd/lotus-shed/sr2-dealstats-rollup.go deleted file mode 100644 index a2c6d03d936..00000000000 --- a/cmd/lotus-shed/sr2-dealstats-rollup.go +++ /dev/null @@ -1,455 +0,0 @@ -package main - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "os" - "sort" - "strings" - - "github.com/Jeffail/gabs" - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - lcli "github.com/filecoin-project/lotus/cli" - "github.com/ipfs/go-cid" - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" -) - -// Requested by @jbenet -// How many epochs back to look at for dealstats -var epochLookback = abi.ChainEpoch(10) - -var resolvedWallets = map[address.Address]address.Address{} -var knownAddrMap = map[address.Address]string{} - -// -// contents of basic_stats.json -type competitionTotalOutput struct { - Epoch int64 `json:"epoch"` - Endpoint string `json:"endpoint"` - Payload competitionTotal `json:"payload"` -} -type competitionTotal struct { - UniqueCids int `json:"total_unique_cids"` - UniqueProviders int `json:"total_unique_providers"` - UniqueProjects int `json:"total_unique_projects"` - UniqueClients int `json:"total_unique_clients"` - TotalDeals int `json:"total_num_deals"` - TotalBytes int64 `json:"total_stored_data_size"` - FilplusTotalDeals int `json:"filplus_total_num_deals"` - FilplusTotalBytes int64 `json:"filplus_total_stored_data_size"` - - seenProject map[string]bool - seenClient map[address.Address]bool - seenProvider map[address.Address]bool - seenPieceCid map[cid.Cid]bool -} - -// -// contents of client_stats.json -type projectAggregateStatsOutput struct { - Epoch int64 `json:"epoch"` - Endpoint string `json:"endpoint"` - Payload map[string]*projectAggregateStats `json:"payload"` -} -type projectAggregateStats struct { - ProjectID string `json:"project_id"` - DataSizeMaxProvider int64 `json:"max_data_size_stored_with_single_provider"` - HighestCidDealCount int `json:"max_same_cid_deals"` - DataSize int64 `json:"total_data_size"` - NumCids int `json:"total_num_cids"` - NumDeals int `json:"total_num_deals"` - NumProviders int `json:"total_num_providers"` - ClientStats map[string]*clientAggregateStats `json:"clients"` - - dataPerProvider map[address.Address]int64 - cidDeals map[cid.Cid]int -} -type clientAggregateStats struct { - Client string `json:"client"` - DataSize int64 `json:"total_data_size"` - NumCids int `json:"total_num_cids"` - NumDeals int `json:"total_num_deals"` - NumProviders int `json:"total_num_providers"` - - providers map[address.Address]bool - cids map[cid.Cid]bool -} - -// -// contents of deals_list_{{projid}}.json -type dealListOutput struct { - Epoch int64 `json:"epoch"` - Endpoint string `json:"endpoint"` - Payload []*individualDeal `json:"payload"` -} -type individualDeal struct { - ProjectID string `json:"project_id"` - Client string `json:"client"` - DealID string `json:"deal_id"` - DealStartEpoch int64 `json:"deal_start_epoch"` - MinerID string `json:"miner_id"` - PayloadCID string `json:"payload_cid"` - PaddedSize int64 `json:"data_size"` -} - -var rollupDealStatsCmd = &cli.Command{ - Name: "rollup-deal-stats", - Flags: []cli.Flag{}, - Action: func(cctx *cli.Context) error { - - if cctx.Args().Len() != 2 || cctx.Args().Get(0) == "" || cctx.Args().Get(1) == "" { - return errors.New("must supply 2 arguments: a nonexistent target directory to write results to and a source of currently active projects") - } - - outDirName := cctx.Args().Get(0) - if _, err := os.Stat(outDirName); err == nil { - return fmt.Errorf("unable to proceed: supplied stat target '%s' already exists", outDirName) - } - - if err := os.MkdirAll(outDirName, 0755); err != nil { - return fmt.Errorf("creation of destination '%s' failed: %s", outDirName, err) - } - - ctx := lcli.ReqContext(cctx) - - projListName := cctx.Args().Get(1) - var projListFh *os.File - - { - // Parses JSON input in the form: - // { - // "payload": [ - // { - // "project": "5fb5f5b3ad3275e236287ce3", - // "address": "f3w3r2c6iukyh3u6f6kx62s5g6n2gf54aqp33ukqrqhje2y6xhf7k55przg4xqgahpcdal6laljz6zonma5pka" - // }, - // { - // "project": "5fb608c4ad3275e236287ced", - // "address": "f3rs2khurnubol6ent27lpggidxxujqo2lg5aap5d5bmtam6yjb5wfla5cxxdgj45tqoaawgpzt5lofc3vpzfq" - // }, - // ... - // ] - // } - if strings.HasPrefix(projListName, "http://") || strings.HasPrefix(projListName, "https://") { - req, err := http.NewRequestWithContext(ctx, "GET", projListName, nil) - if err != nil { - return err - } - resp, err := http.DefaultClient.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() //nolint:errcheck - - if resp.StatusCode != http.StatusOK { - return xerrors.Errorf("non-200 response: %d", resp.StatusCode) - } - - projListFh, err = os.Create(outDirName + "/client_list.json") - if err != nil { - return err - } - - _, err = io.Copy(projListFh, resp.Body) - if err != nil { - return err - } - } else { - return errors.New("file inputs not yet supported") - } - - if _, err := projListFh.Seek(0, 0); err != nil { - return err - } - defer projListFh.Close() //nolint:errcheck - - projList, err := gabs.ParseJSONBuffer(projListFh) - if err != nil { - return err - } - proj, err := projList.Search("payload").Children() - if err != nil { - return err - } - for _, p := range proj { - a, err := address.NewFromString(p.S("address").Data().(string)) - if err != nil { - return err - } - - knownAddrMap[a] = p.S("project").Data().(string) - } - - if len(knownAddrMap) == 0 { - return fmt.Errorf("no active projects/clients found in '%s': unable to continue", projListName) - } - } - - outClientStatsFd, err := os.Create(outDirName + "/client_stats.json") - if err != nil { - return err - } - defer outClientStatsFd.Close() //nolint:errcheck - - outBasicStatsFd, err := os.Create(outDirName + "/basic_stats.json") - if err != nil { - return err - } - defer outBasicStatsFd.Close() //nolint:errcheck - - outUnfilteredStatsFd, err := os.Create(outDirName + "/unfiltered_basic_stats.json") - if err != nil { - return err - } - defer outUnfilteredStatsFd.Close() //nolint:errcheck - - api, apiCloser, err := lcli.GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer apiCloser() - - head, err := api.ChainHead(ctx) - if err != nil { - return err - } - - head, err = api.ChainGetTipSetByHeight(ctx, head.Height()-epochLookback, head.Key()) - if err != nil { - return err - } - - grandTotals := competitionTotal{ - seenProject: make(map[string]bool), - seenClient: make(map[address.Address]bool), - seenProvider: make(map[address.Address]bool), - seenPieceCid: make(map[cid.Cid]bool), - } - - unfilteredGrandTotals := competitionTotal{ - seenClient: make(map[address.Address]bool), - seenProvider: make(map[address.Address]bool), - seenPieceCid: make(map[cid.Cid]bool), - } - - projStats := make(map[string]*projectAggregateStats) - projDealLists := make(map[string][]*individualDeal) - - deals, err := api.StateMarketDeals(ctx, head.Key()) - if err != nil { - return err - } - - for dealID, dealInfo := range deals { - - // Only count deals that have properly started, not past/future ones - // https://github.com/filecoin-project/specs-actors/blob/v0.9.9/actors/builtin/market/deal.go#L81-L85 - // Bail on 0 as well in case SectorStartEpoch is uninitialized due to some bug - if dealInfo.State.SectorStartEpoch <= 0 || - dealInfo.State.SectorStartEpoch > head.Height() { - continue - } - - clientAddr, found := resolvedWallets[dealInfo.Proposal.Client] - if !found { - var err error - clientAddr, err = api.StateAccountKey(ctx, dealInfo.Proposal.Client, head.Key()) - if err != nil { - log.Warnf("failed to resolve id '%s' to wallet address: %s", dealInfo.Proposal.Client, err) - continue - } - - resolvedWallets[dealInfo.Proposal.Client] = clientAddr - } - - unfilteredGrandTotals.seenClient[clientAddr] = true - unfilteredGrandTotals.TotalBytes += int64(dealInfo.Proposal.PieceSize) - unfilteredGrandTotals.seenProvider[dealInfo.Proposal.Provider] = true - unfilteredGrandTotals.seenPieceCid[dealInfo.Proposal.PieceCID] = true - unfilteredGrandTotals.TotalDeals++ - - if dealInfo.Proposal.VerifiedDeal { - unfilteredGrandTotals.FilplusTotalDeals++ - unfilteredGrandTotals.FilplusTotalBytes += int64(dealInfo.Proposal.PieceSize) - } - - // perl -E 'say scalar gmtime ( 166560 * 30 + 1598306400 )' - // Wed Oct 21 18:00:00 2020 - if dealInfo.Proposal.StartEpoch <= 166560 { - continue - } - - projID, projKnown := knownAddrMap[clientAddr] - if !projKnown { - continue - } - - grandTotals.seenProject[projID] = true - projStatEntry, ok := projStats[projID] - if !ok { - projStatEntry = &projectAggregateStats{ - ProjectID: projID, - ClientStats: make(map[string]*clientAggregateStats), - cidDeals: make(map[cid.Cid]int), - dataPerProvider: make(map[address.Address]int64), - } - projStats[projID] = projStatEntry - } - - if projStatEntry.cidDeals[dealInfo.Proposal.PieceCID] >= 10 { - continue - } - - grandTotals.seenClient[clientAddr] = true - clientStatEntry, ok := projStatEntry.ClientStats[clientAddr.String()] - if !ok { - clientStatEntry = &clientAggregateStats{ - Client: clientAddr.String(), - cids: make(map[cid.Cid]bool), - providers: make(map[address.Address]bool), - } - projStatEntry.ClientStats[clientAddr.String()] = clientStatEntry - } - - grandTotals.TotalBytes += int64(dealInfo.Proposal.PieceSize) - projStatEntry.DataSize += int64(dealInfo.Proposal.PieceSize) - clientStatEntry.DataSize += int64(dealInfo.Proposal.PieceSize) - - grandTotals.seenProvider[dealInfo.Proposal.Provider] = true - projStatEntry.dataPerProvider[dealInfo.Proposal.Provider] += int64(dealInfo.Proposal.PieceSize) - clientStatEntry.providers[dealInfo.Proposal.Provider] = true - - grandTotals.seenPieceCid[dealInfo.Proposal.PieceCID] = true - projStatEntry.cidDeals[dealInfo.Proposal.PieceCID]++ - clientStatEntry.cids[dealInfo.Proposal.PieceCID] = true - - grandTotals.TotalDeals++ - projStatEntry.NumDeals++ - clientStatEntry.NumDeals++ - - if dealInfo.Proposal.VerifiedDeal { - grandTotals.FilplusTotalDeals++ - grandTotals.FilplusTotalBytes += int64(dealInfo.Proposal.PieceSize) - } - - payloadCid := "unknown" - if c, err := cid.Parse(dealInfo.Proposal.Label); err == nil { - payloadCid = c.String() - } - - projDealLists[projID] = append(projDealLists[projID], &individualDeal{ - DealID: dealID, - ProjectID: projID, - Client: clientAddr.String(), - MinerID: dealInfo.Proposal.Provider.String(), - PayloadCID: payloadCid, - PaddedSize: int64(dealInfo.Proposal.PieceSize), - DealStartEpoch: int64(dealInfo.State.SectorStartEpoch), - }) - } - - // - // Write out per-project deal lists - for proj, dl := range projDealLists { - err := func() error { - outListFd, err := os.Create(fmt.Sprintf(outDirName+"/deals_list_%s.json", proj)) - if err != nil { - return err - } - - defer outListFd.Close() //nolint:errcheck - - ridiculousLintMandatedRebind := dl - sort.Slice(dl, func(i, j int) bool { - return ridiculousLintMandatedRebind[j].PaddedSize < ridiculousLintMandatedRebind[i].PaddedSize - }) - - if err := json.NewEncoder(outListFd).Encode( - dealListOutput{ - Epoch: int64(head.Height()), - Endpoint: "DEAL_LIST", - Payload: dl, - }, - ); err != nil { - return err - } - - return nil - }() - - if err != nil { - return err - } - } - - // - // write out basic_stats.json and unfiltered_basic_stats.json - for _, st := range []*competitionTotal{&grandTotals, &unfilteredGrandTotals} { - st.UniqueCids = len(st.seenPieceCid) - st.UniqueClients = len(st.seenClient) - st.UniqueProviders = len(st.seenProvider) - if st.seenProject != nil { - st.UniqueProjects = len(st.seenProject) - } - } - - if err := json.NewEncoder(outBasicStatsFd).Encode( - competitionTotalOutput{ - Epoch: int64(head.Height()), - Endpoint: "COMPETITION_TOTALS", - Payload: grandTotals, - }, - ); err != nil { - return err - } - - if err := json.NewEncoder(outUnfilteredStatsFd).Encode( - competitionTotalOutput{ - Epoch: int64(head.Height()), - Endpoint: "NETWORK_WIDE_TOTALS", - Payload: unfilteredGrandTotals, - }, - ); err != nil { - return err - } - - // - // write out client_stats.json - for _, ps := range projStats { - ps.NumCids = len(ps.cidDeals) - ps.NumProviders = len(ps.dataPerProvider) - for _, dealsForCid := range ps.cidDeals { - if ps.HighestCidDealCount < dealsForCid { - ps.HighestCidDealCount = dealsForCid - } - } - for _, dataForProvider := range ps.dataPerProvider { - if ps.DataSizeMaxProvider < dataForProvider { - ps.DataSizeMaxProvider = dataForProvider - } - } - - for _, cs := range ps.ClientStats { - cs.NumCids = len(cs.cids) - cs.NumProviders = len(cs.providers) - } - } - - if err := json.NewEncoder(outClientStatsFd).Encode( - projectAggregateStatsOutput{ - Epoch: int64(head.Height()), - Endpoint: "PROJECT_DEAL_STATS", - Payload: projStats, - }, - ); err != nil { - return err - } - - return nil - }, -} diff --git a/cmd/lotus-shed/storage-stats.go b/cmd/lotus-shed/storage-stats.go new file mode 100644 index 00000000000..a40f082be5e --- /dev/null +++ b/cmd/lotus-shed/storage-stats.go @@ -0,0 +1,114 @@ +package main + +import ( + "encoding/json" + "os" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + lcli "github.com/filecoin-project/lotus/cli" + "github.com/ipfs/go-cid" + "github.com/urfave/cli/v2" +) + +// How many epochs back to look at for dealstats +var defaultEpochLookback = abi.ChainEpoch(10) + +type networkTotalsOutput struct { + Epoch int64 `json:"epoch"` + Endpoint string `json:"endpoint"` + Payload networkTotals `json:"payload"` +} + +type networkTotals struct { + UniqueCids int `json:"total_unique_cids"` + UniqueProviders int `json:"total_unique_providers"` + UniqueClients int `json:"total_unique_clients"` + TotalDeals int `json:"total_num_deals"` + TotalBytes int64 `json:"total_stored_data_size"` + FilplusTotalDeals int `json:"filplus_total_num_deals"` + FilplusTotalBytes int64 `json:"filplus_total_stored_data_size"` + + seenClient map[address.Address]bool + seenProvider map[address.Address]bool + seenPieceCid map[cid.Cid]bool +} + +var storageStatsCmd = &cli.Command{ + Name: "storage-stats", + Usage: "Translates current lotus state into a json summary suitable for driving https://storage.filecoin.io/", + Flags: []cli.Flag{ + &cli.Int64Flag{ + Name: "height", + }, + }, + Action: func(cctx *cli.Context) error { + ctx := lcli.ReqContext(cctx) + + api, apiCloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer apiCloser() + + head, err := api.ChainHead(ctx) + if err != nil { + return err + } + + requestedHeight := cctx.Int64("height") + if requestedHeight > 0 { + head, err = api.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(requestedHeight), head.Key()) + } else { + head, err = api.ChainGetTipSetByHeight(ctx, head.Height()-defaultEpochLookback, head.Key()) + } + if err != nil { + return err + } + + netTotals := networkTotals{ + seenClient: make(map[address.Address]bool), + seenProvider: make(map[address.Address]bool), + seenPieceCid: make(map[cid.Cid]bool), + } + + deals, err := api.StateMarketDeals(ctx, head.Key()) + if err != nil { + return err + } + + for _, dealInfo := range deals { + + // Only count deals that have properly started, not past/future ones + // https://github.com/filecoin-project/specs-actors/blob/v0.9.9/actors/builtin/market/deal.go#L81-L85 + // Bail on 0 as well in case SectorStartEpoch is uninitialized due to some bug + if dealInfo.State.SectorStartEpoch <= 0 || + dealInfo.State.SectorStartEpoch > head.Height() { + continue + } + + netTotals.seenClient[dealInfo.Proposal.Client] = true + netTotals.TotalBytes += int64(dealInfo.Proposal.PieceSize) + netTotals.seenProvider[dealInfo.Proposal.Provider] = true + netTotals.seenPieceCid[dealInfo.Proposal.PieceCID] = true + netTotals.TotalDeals++ + + if dealInfo.Proposal.VerifiedDeal { + netTotals.FilplusTotalDeals++ + netTotals.FilplusTotalBytes += int64(dealInfo.Proposal.PieceSize) + } + } + + netTotals.UniqueCids = len(netTotals.seenPieceCid) + netTotals.UniqueClients = len(netTotals.seenClient) + netTotals.UniqueProviders = len(netTotals.seenProvider) + + return json.NewEncoder(os.Stdout).Encode( + networkTotalsOutput{ + Epoch: int64(head.Height()), + Endpoint: "NETWORK_WIDE_TOTALS", + Payload: netTotals, + }, + ) + }, +} diff --git a/cmd/lotus-shed/verifreg.go b/cmd/lotus-shed/verifreg.go index df1f0d99012..426827ad265 100644 --- a/cmd/lotus-shed/verifreg.go +++ b/cmd/lotus-shed/verifreg.go @@ -13,7 +13,7 @@ import ( verifreg2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/verifreg" - "github.com/filecoin-project/lotus/api/apibstore" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" @@ -190,7 +190,7 @@ var verifRegListVerifiersCmd = &cli.Command{ return err } - apibs := apibstore.NewAPIBlockstore(api) + apibs := blockstore.NewAPIBlockstore(api) store := adt.WrapStore(ctx, cbor.NewCborStore(apibs)) st, err := verifreg.Load(store, act) @@ -220,7 +220,7 @@ var verifRegListClientsCmd = &cli.Command{ return err } - apibs := apibstore.NewAPIBlockstore(api) + apibs := blockstore.NewAPIBlockstore(api) store := adt.WrapStore(ctx, cbor.NewCborStore(apibs)) st, err := verifreg.Load(store, act) @@ -303,7 +303,7 @@ var verifRegCheckVerifierCmd = &cli.Command{ return err } - apibs := apibstore.NewAPIBlockstore(api) + apibs := blockstore.NewAPIBlockstore(api) store := adt.WrapStore(ctx, cbor.NewCborStore(apibs)) st, err := verifreg.Load(store, act) diff --git a/cmd/lotus-storage-miner/actor.go b/cmd/lotus-storage-miner/actor.go index bcd29ea60b2..8c2ab95bd71 100644 --- a/cmd/lotus-storage-miner/actor.go +++ b/cmd/lotus-storage-miner/actor.go @@ -19,7 +19,7 @@ import ( miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" - "github.com/filecoin-project/lotus/api/apibstore" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" @@ -307,7 +307,7 @@ var actorRepayDebtCmd = &cli.Command{ return err } - store := adt.WrapStore(ctx, cbor.NewCborStore(apibstore.NewAPIBlockstore(api))) + store := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(api))) mst, err := miner.Load(store, mact) if err != nil { @@ -420,6 +420,7 @@ var actorControlList = &cli.Command{ commit := map[address.Address]struct{}{} precommit := map[address.Address]struct{}{} + terminate := map[address.Address]struct{}{} post := map[address.Address]struct{}{} for _, ca := range mi.ControlAddresses { @@ -446,6 +447,16 @@ var actorControlList = &cli.Command{ commit[ca] = struct{}{} } + for _, ca := range ac.TerminateControl { + ca, err := api.StateLookupID(ctx, ca, types.EmptyTSK) + if err != nil { + return err + } + + delete(post, ca) + terminate[ca] = struct{}{} + } + printKey := func(name string, a address.Address) { b, err := api.WalletBalance(ctx, a) if err != nil { @@ -487,6 +498,9 @@ var actorControlList = &cli.Command{ if _, ok := commit[a]; ok { uses = append(uses, color.BlueString("commit")) } + if _, ok := terminate[a]; ok { + uses = append(uses, color.YellowString("terminate")) + } tw.Write(map[string]interface{}{ "name": name, diff --git a/cmd/lotus-storage-miner/actor_test.go b/cmd/lotus-storage-miner/actor_test.go index 1816c1eab93..02b41202cb1 100644 --- a/cmd/lotus-storage-miner/actor_test.go +++ b/cmd/lotus-storage-miner/actor_test.go @@ -17,6 +17,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api/test" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/policy" @@ -70,7 +71,7 @@ func TestWorkerKeyChange(t *testing.T) { "testnode-storage": sn[0], } app.Writer = output - build.RunningNodeType = build.NodeMiner + api.RunningNodeType = api.NodeMiner fs := flag.NewFlagSet("", flag.ContinueOnError) for _, f := range cmd.Flags { diff --git a/cmd/lotus-storage-miner/allinfo_test.go b/cmd/lotus-storage-miner/allinfo_test.go index 51aba14a913..6fa3136d330 100644 --- a/cmd/lotus-storage-miner/allinfo_test.go +++ b/cmd/lotus-storage-miner/allinfo_test.go @@ -11,8 +11,8 @@ import ( "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api/test" - "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/lib/lotuslog" "github.com/filecoin-project/lotus/node/repo" @@ -55,7 +55,7 @@ func TestMinerAllInfo(t *testing.T) { "testnode-full": n[0], "testnode-storage": sn[0], } - build.RunningNodeType = build.NodeMiner + api.RunningNodeType = api.NodeMiner cctx := cli.NewContext(app, flag.NewFlagSet("", flag.ContinueOnError), nil) diff --git a/cmd/lotus-storage-miner/info.go b/cmd/lotus-storage-miner/info.go index 30c2924f2de..cf39e5516da 100644 --- a/cmd/lotus-storage-miner/info.go +++ b/cmd/lotus-storage-miner/info.go @@ -18,14 +18,12 @@ import ( sealing "github.com/filecoin-project/lotus/extern/storage-sealing" "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/apibstore" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" - "github.com/filecoin-project/lotus/lib/blockstore" - "github.com/filecoin-project/lotus/lib/bufbstore" ) var infoCmd = &cli.Command{ @@ -102,7 +100,7 @@ func infoCmdAct(cctx *cli.Context) error { return err } - tbs := bufbstore.NewTieredBstore(apibstore.NewAPIBlockstore(api), blockstore.NewTemporary()) + tbs := blockstore.NewTieredBstore(blockstore.NewAPIBlockstore(api), blockstore.NewMemory()) mas, err := miner.Load(adt.WrapStore(ctx, cbor.NewCborStore(tbs)), mact) if err != nil { return err @@ -284,6 +282,7 @@ var stateList = []stateMeta{ {col: color.FgBlue, state: sealing.Empty}, {col: color.FgBlue, state: sealing.WaitDeals}, + {col: color.FgBlue, state: sealing.AddPiece}, {col: color.FgRed, state: sealing.UndefinedSectorState}, {col: color.FgYellow, state: sealing.Packing}, @@ -306,6 +305,7 @@ var stateList = []stateMeta{ {col: color.FgCyan, state: sealing.Removed}, {col: color.FgRed, state: sealing.FailedUnrecoverable}, + {col: color.FgRed, state: sealing.AddPieceFailed}, {col: color.FgRed, state: sealing.SealPreCommit1Failed}, {col: color.FgRed, state: sealing.SealPreCommit2Failed}, {col: color.FgRed, state: sealing.PreCommitFailed}, diff --git a/cmd/lotus-storage-miner/init.go b/cmd/lotus-storage-miner/init.go index 13f946d78eb..2e38dcc06ca 100644 --- a/cmd/lotus-storage-miner/init.go +++ b/cmd/lotus-storage-miner/init.go @@ -186,8 +186,8 @@ var initCmd = &cli.Command{ return err } - if !v.APIVersion.EqMajorMinor(build.FullAPIVersion) { - return xerrors.Errorf("Remote API version didn't match (expected %s, remote %s)", build.FullAPIVersion, v.APIVersion) + if !v.APIVersion.EqMajorMinor(lapi.FullAPIVersion) { + return xerrors.Errorf("Remote API version didn't match (expected %s, remote %s)", lapi.FullAPIVersion, v.APIVersion) } log.Info("Initializing repo") diff --git a/cmd/lotus-storage-miner/init_restore.go b/cmd/lotus-storage-miner/init_restore.go index 9591129b8d6..12358e63a75 100644 --- a/cmd/lotus-storage-miner/init_restore.go +++ b/cmd/lotus-storage-miner/init_restore.go @@ -18,6 +18,7 @@ import ( paramfetch "github.com/filecoin-project/go-paramfetch" "github.com/filecoin-project/go-state-types/big" + lapi "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" @@ -68,8 +69,8 @@ var initRestoreCmd = &cli.Command{ return err } - if !v.APIVersion.EqMajorMinor(build.FullAPIVersion) { - return xerrors.Errorf("Remote API version didn't match (expected %s, remote %s)", build.FullAPIVersion, v.APIVersion) + if !v.APIVersion.EqMajorMinor(lapi.FullAPIVersion) { + return xerrors.Errorf("Remote API version didn't match (expected %s, remote %s)", lapi.FullAPIVersion, v.APIVersion) } if !cctx.Bool("nosync") { diff --git a/cmd/lotus-storage-miner/main.go b/cmd/lotus-storage-miner/main.go index 671f75cf0fc..84654d7899b 100644 --- a/cmd/lotus-storage-miner/main.go +++ b/cmd/lotus-storage-miner/main.go @@ -26,7 +26,7 @@ const FlagMinerRepo = "miner-repo" const FlagMinerRepoDeprecation = "storagerepo" func main() { - build.RunningNodeType = build.NodeMiner + api.RunningNodeType = api.NodeMiner lotuslog.SetupLogLevels() diff --git a/cmd/lotus-storage-miner/proving.go b/cmd/lotus-storage-miner/proving.go index 3d60f4b7662..f6bc74318e8 100644 --- a/cmd/lotus-storage-miner/proving.go +++ b/cmd/lotus-storage-miner/proving.go @@ -12,7 +12,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/api/apibstore" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" @@ -52,7 +52,7 @@ var provingFaultsCmd = &cli.Command{ ctx := lcli.ReqContext(cctx) - stor := store.ActorStore(ctx, apibstore.NewAPIBlockstore(api)) + stor := store.ActorStore(ctx, blockstore.NewAPIBlockstore(api)) maddr, err := getActorAddress(ctx, nodeApi, cctx.String("actor")) if err != nil { @@ -127,7 +127,7 @@ var provingInfoCmd = &cli.Command{ return err } - stor := store.ActorStore(ctx, apibstore.NewAPIBlockstore(api)) + stor := store.ActorStore(ctx, blockstore.NewAPIBlockstore(api)) mas, err := miner.Load(stor, mact) if err != nil { diff --git a/cmd/lotus-storage-miner/run.go b/cmd/lotus-storage-miner/run.go index 0c2fba8b387..cdcc4d88f59 100644 --- a/cmd/lotus-storage-miner/run.go +++ b/cmd/lotus-storage-miner/run.go @@ -13,6 +13,7 @@ import ( "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" "github.com/urfave/cli/v2" + "go.opencensus.io/stats" "go.opencensus.io/stats/view" "go.opencensus.io/tag" "golang.org/x/xerrors" @@ -68,14 +69,20 @@ var runCmd = &cli.Command{ return xerrors.Errorf("getting full node api: %w", err) } defer ncloser() - ctx := lcli.DaemonContext(cctx) + ctx, _ := tag.New(lcli.DaemonContext(cctx), + tag.Insert(metrics.Version, build.BuildVersion), + tag.Insert(metrics.Commit, build.CurrentCommit), + tag.Insert(metrics.NodeType, "miner"), + ) // Register all metric views - if err := view.Register( - metrics.DefaultViews..., + if err = view.Register( + metrics.MinerNodeViews..., ); err != nil { log.Fatalf("Cannot register the view: %v", err) } + // Set the metric to one so it is published to the exporter + stats.Record(ctx, metrics.LotusInfo.M(1)) v, err := nodeApi.Version(ctx) if err != nil { @@ -88,8 +95,8 @@ var runCmd = &cli.Command{ } } - if v.APIVersion != build.FullAPIVersion { - return xerrors.Errorf("lotus-daemon API version doesn't match: expected: %s", api.Version{APIVersion: build.FullAPIVersion}) + if v.APIVersion != api.FullAPIVersion { + return xerrors.Errorf("lotus-daemon API version doesn't match: expected: %s", api.APIVersion{APIVersion: api.FullAPIVersion}) } log.Info("Checking full node sync status") @@ -162,6 +169,7 @@ var runCmd = &cli.Command{ mux.Handle("/rpc/v0", rpcServer) mux.PathPrefix("/remote").HandlerFunc(minerapi.(*impl.StorageMinerAPI).ServeRemote) + mux.Handle("/debug/metrics", metrics.Exporter()) mux.PathPrefix("/").Handler(http.DefaultServeMux) // pprof ah := &auth.Handler{ diff --git a/cmd/lotus-storage-miner/sectors.go b/cmd/lotus-storage-miner/sectors.go index 6bb3ca1661f..8f6fd374f42 100644 --- a/cmd/lotus-storage-miner/sectors.go +++ b/cmd/lotus-storage-miner/sectors.go @@ -13,10 +13,13 @@ import ( "github.com/urfave/cli/v2" "golang.org/x/xerrors" + "github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/types" @@ -35,6 +38,7 @@ var sectorsCmd = &cli.Command{ sectorsRefsCmd, sectorsUpdateCmd, sectorsPledgeCmd, + sectorsExtendCmd, sectorsTerminateCmd, sectorsRemoveCmd, sectorsMarkForUpgradeCmd, @@ -55,7 +59,14 @@ var sectorsPledgeCmd = &cli.Command{ defer closer() ctx := lcli.ReqContext(cctx) - return nodeApi.PledgeSector(ctx) + id, err := nodeApi.PledgeSector(ctx) + if err != nil { + return err + } + + fmt.Println("Created CC sector: ", id.Number) + + return nil }, } @@ -403,6 +414,100 @@ var sectorsRefsCmd = &cli.Command{ }, } +var sectorsExtendCmd = &cli.Command{ + Name: "extend", + Usage: "Extend sector expiration", + ArgsUsage: "", + Flags: []cli.Flag{ + &cli.Int64Flag{ + Name: "new-expiration", + Usage: "new expiration epoch", + Required: true, + }, + &cli.StringFlag{}, + }, + Action: func(cctx *cli.Context) error { + nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + + api, nCloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer nCloser() + + ctx := lcli.ReqContext(cctx) + if !cctx.Args().Present() { + return xerrors.Errorf("must pass at least one sector number") + } + + maddr, err := nodeApi.ActorAddress(ctx) + if err != nil { + return xerrors.Errorf("getting miner actor address: %w", err) + } + + sectors := map[miner.SectorLocation][]uint64{} + + for i, s := range cctx.Args().Slice() { + id, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return xerrors.Errorf("could not parse sector %d: %w", i, err) + } + + p, err := api.StateSectorPartition(ctx, maddr, abi.SectorNumber(id), types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting sector location for sector %d: %w", id, err) + } + + if p == nil { + return xerrors.Errorf("sector %d not found in any partition", id) + } + + sectors[*p] = append(sectors[*p], id) + } + + params := &miner0.ExtendSectorExpirationParams{} + for l, numbers := range sectors { + + params.Extensions = append(params.Extensions, miner0.ExpirationExtension{ + Deadline: l.Deadline, + Partition: l.Partition, + Sectors: bitfield.NewFromSet(numbers), + NewExpiration: abi.ChainEpoch(cctx.Int64("new-expiration")), + }) + } + + sp, err := actors.SerializeParams(params) + if err != nil { + return xerrors.Errorf("serializing params: %w", err) + } + + mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting miner info: %w", err) + } + + smsg, err := api.MpoolPushMessage(ctx, &types.Message{ + From: mi.Worker, + To: maddr, + Method: miner.Methods.ExtendSectorExpiration, + + Value: big.Zero(), + Params: sp, + }, nil) + if err != nil { + return xerrors.Errorf("mpool push message: %w", err) + } + + fmt.Println(smsg.Cid()) + + return nil + }, +} + var sectorsTerminateCmd = &cli.Command{ Name: "terminate", Usage: "Terminate sector on-chain then remove (WARNING: This means losing power and collateral for the removed sector)", diff --git a/cmd/lotus-storage-miner/storage.go b/cmd/lotus-storage-miner/storage.go index e6986f8c764..2f42fd530e4 100644 --- a/cmd/lotus-storage-miner/storage.go +++ b/cmd/lotus-storage-miner/storage.go @@ -12,6 +12,7 @@ import ( "strings" "time" + "github.com/docker/go-units" "github.com/fatih/color" "github.com/google/uuid" "github.com/mitchellh/go-homedir" @@ -88,6 +89,10 @@ over time Name: "store", Usage: "(for init) use path for long-term storage", }, + &cli.StringFlag{ + Name: "max-storage", + Usage: "(for init) limit storage space for sectors (expensive for very large paths!)", + }, }, Action: func(cctx *cli.Context) error { nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) @@ -121,11 +126,20 @@ over time return err } + var maxStor int64 + if cctx.IsSet("max-storage") { + maxStor, err = units.RAMInBytes(cctx.String("max-storage")) + if err != nil { + return xerrors.Errorf("parsing max-storage: %w", err) + } + } + cfg := &stores.LocalStorageMeta{ - ID: stores.ID(uuid.New().String()), - Weight: cctx.Uint64("weight"), - CanSeal: cctx.Bool("seal"), - CanStore: cctx.Bool("store"), + ID: stores.ID(uuid.New().String()), + Weight: cctx.Uint64("weight"), + CanSeal: cctx.Bool("seal"), + CanStore: cctx.Bool("store"), + MaxStorage: uint64(maxStor), } if !(cfg.CanStore || cfg.CanSeal) { @@ -220,26 +234,66 @@ var storageListCmd = &cli.Command{ } ping := time.Now().Sub(pingStart) - usedPercent := (st.Capacity - st.Available) * 100 / st.Capacity - - percCol := color.FgGreen - switch { - case usedPercent > 98: - percCol = color.FgRed - case usedPercent > 90: - percCol = color.FgYellow + safeRepeat := func(s string, count int) string { + if count < 0 { + return "" + } + return strings.Repeat(s, count) } var barCols = int64(50) - set := (st.Capacity - st.Available) * barCols / st.Capacity - used := (st.Capacity - (st.Available + st.Reserved)) * barCols / st.Capacity - reserved := set - used - bar := strings.Repeat("#", int(used)) + strings.Repeat("*", int(reserved)) + strings.Repeat(" ", int(barCols-set)) - - fmt.Printf("\t[%s] %s/%s %s\n", color.New(percCol).Sprint(bar), - types.SizeStr(types.NewInt(uint64(st.Capacity-st.Available))), - types.SizeStr(types.NewInt(uint64(st.Capacity))), - color.New(percCol).Sprintf("%d%%", usedPercent)) + + // filesystem use bar + { + usedPercent := (st.Capacity - st.FSAvailable) * 100 / st.Capacity + + percCol := color.FgGreen + switch { + case usedPercent > 98: + percCol = color.FgRed + case usedPercent > 90: + percCol = color.FgYellow + } + + set := (st.Capacity - st.FSAvailable) * barCols / st.Capacity + used := (st.Capacity - (st.FSAvailable + st.Reserved)) * barCols / st.Capacity + reserved := set - used + bar := safeRepeat("#", int(used)) + safeRepeat("*", int(reserved)) + safeRepeat(" ", int(barCols-set)) + + desc := "" + if st.Max > 0 { + desc = " (filesystem)" + } + + fmt.Printf("\t[%s] %s/%s %s%s\n", color.New(percCol).Sprint(bar), + types.SizeStr(types.NewInt(uint64(st.Capacity-st.FSAvailable))), + types.SizeStr(types.NewInt(uint64(st.Capacity))), + color.New(percCol).Sprintf("%d%%", usedPercent), desc) + } + + // optional configured limit bar + if st.Max > 0 { + usedPercent := st.Used * 100 / st.Max + + percCol := color.FgGreen + switch { + case usedPercent > 98: + percCol = color.FgRed + case usedPercent > 90: + percCol = color.FgYellow + } + + set := st.Used * barCols / st.Max + used := (st.Used + st.Reserved) * barCols / st.Max + reserved := set - used + bar := safeRepeat("#", int(used)) + safeRepeat("*", int(reserved)) + safeRepeat(" ", int(barCols-set)) + + fmt.Printf("\t[%s] %s/%s %s (limit)\n", color.New(percCol).Sprint(bar), + types.SizeStr(types.NewInt(uint64(st.Used))), + types.SizeStr(types.NewInt(uint64(st.Max))), + color.New(percCol).Sprintf("%d%%", usedPercent)) + } + fmt.Printf("\t%s; %s; %s; Reserved: %s\n", color.YellowString("Unsealed: %d", cnt[0]), color.GreenString("Sealed: %d", cnt[1]), diff --git a/cmd/lotus-townhall/main.go b/cmd/lotus-townhall/main.go index 7e8f6df7ff3..1e0460deee1 100644 --- a/cmd/lotus-townhall/main.go +++ b/cmd/lotus-townhall/main.go @@ -15,8 +15,8 @@ import ( "github.com/libp2p/go-libp2p-core/peer" pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/lib/blockstore" ) var topic = "/fil/headnotifs/" @@ -28,7 +28,7 @@ func init() { return } - bs := blockstore.NewTemporary() + bs := blockstore.NewMemory() c, err := car.LoadCar(bs, bytes.NewReader(genBytes)) if err != nil { diff --git a/cmd/lotus/daemon.go b/cmd/lotus/daemon.go index 4226c33f775..e84fb519592 100644 --- a/cmd/lotus/daemon.go +++ b/cmd/lotus/daemon.go @@ -192,7 +192,20 @@ var DaemonCmd = &cli.Command{ return fmt.Errorf("unrecognized profile type: %q", profile) } - ctx, _ := tag.New(context.Background(), tag.Insert(metrics.Version, build.BuildVersion), tag.Insert(metrics.Commit, build.CurrentCommit)) + ctx, _ := tag.New(context.Background(), + tag.Insert(metrics.Version, build.BuildVersion), + tag.Insert(metrics.Commit, build.CurrentCommit), + tag.Insert(metrics.NodeType, "chain"), + ) + // Register all metric views + if err = view.Register( + metrics.ChainNodeViews..., + ); err != nil { + log.Fatalf("Cannot register the view: %v", err) + } + // Set the metric to one so it is published to the exporter + stats.Record(ctx, metrics.LotusInfo.M(1)) + { dir, err := homedir.Expand(cctx.String("repo")) if err != nil { @@ -300,11 +313,12 @@ var DaemonCmd = &cli.Command{ stop, err := node.New(ctx, node.FullAPI(&api, node.Lite(isLite)), - node.Override(new(dtypes.Bootstrapper), isBootstrapper), - node.Override(new(dtypes.ShutdownChan), shutdownChan), node.Online(), node.Repo(r), + node.Override(new(dtypes.Bootstrapper), isBootstrapper), + node.Override(new(dtypes.ShutdownChan), shutdownChan), + genesis, liteModeDeps, @@ -332,16 +346,6 @@ var DaemonCmd = &cli.Command{ } } - // Register all metric views - if err = view.Register( - metrics.DefaultViews..., - ); err != nil { - log.Fatalf("Cannot register the view: %v", err) - } - - // Set the metric to one so it is published to the exporter - stats.Record(ctx, metrics.LotusInfo.M(1)) - endpoint, err := r.APIEndpoint() if err != nil { return xerrors.Errorf("getting api endpoint: %w", err) @@ -400,7 +404,7 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) defer resp.Body.Close() //nolint:errcheck if resp.StatusCode != http.StatusOK { - return xerrors.Errorf("non-200 response: %d", resp.StatusCode) + return xerrors.Errorf("fetching chain CAR failed with non-200 response: %d", resp.StatusCode) } rd = resp.Body @@ -432,7 +436,7 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) } defer lr.Close() //nolint:errcheck - bs, err := lr.Blockstore(ctx, repo.BlockstoreChain) + bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore) if err != nil { return xerrors.Errorf("failed to open blockstore: %w", err) } diff --git a/cmd/lotus/main.go b/cmd/lotus/main.go index eb97045eeb1..af9c567357e 100644 --- a/cmd/lotus/main.go +++ b/cmd/lotus/main.go @@ -6,6 +6,7 @@ import ( "github.com/urfave/cli/v2" "go.opencensus.io/trace" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/lib/lotuslog" @@ -16,7 +17,7 @@ import ( var AdvanceBlockCmd *cli.Command func main() { - build.RunningNodeType = build.NodeFull + api.RunningNodeType = api.NodeFull lotuslog.SetupLogLevels() diff --git a/cmd/lotus/rpc.go b/cmd/lotus/rpc.go index 82a1fb4805f..48720d83397 100644 --- a/cmd/lotus/rpc.go +++ b/cmd/lotus/rpc.go @@ -15,12 +15,9 @@ import ( logging "github.com/ipfs/go-log/v2" "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" - promclient "github.com/prometheus/client_golang/prometheus" "go.opencensus.io/tag" "golang.org/x/xerrors" - "contrib.go.opencensus.io/exporter/prometheus" - "github.com/filecoin-project/go-jsonrpc" "github.com/filecoin-project/go-jsonrpc/auth" @@ -55,23 +52,7 @@ func serveRPC(a api.FullNode, stop node.StopFunc, addr multiaddr.Multiaddr, shut http.Handle("/rest/v0/import", importAH) - // Prometheus globals are exposed as interfaces, but the prometheus - // OpenCensus exporter expects a concrete *Registry. The concrete type of - // the globals are actually *Registry, so we downcast them, staying - // defensive in case things change under the hood. - registry, ok := promclient.DefaultRegisterer.(*promclient.Registry) - if !ok { - log.Warnf("failed to export default prometheus registry; some metrics will be unavailable; unexpected type: %T", promclient.DefaultRegisterer) - } - exporter, err := prometheus.NewExporter(prometheus.Options{ - Registry: registry, - Namespace: "lotus", - }) - if err != nil { - log.Fatalf("could not create the prometheus stats exporter: %v", err) - } - - http.Handle("/debug/metrics", exporter) + http.Handle("/debug/metrics", metrics.Exporter()) http.Handle("/debug/pprof-set/block", handleFractionOpt("BlockProfileRate", runtime.SetBlockProfileRate)) http.Handle("/debug/pprof-set/mutex", handleFractionOpt("MutexProfileFraction", func(x int) { runtime.SetMutexProfileFraction(x) }, diff --git a/cmd/tvx/exec.go b/cmd/tvx/exec.go index e2fb787fbd5..15bb543a50e 100644 --- a/cmd/tvx/exec.go +++ b/cmd/tvx/exec.go @@ -17,10 +17,10 @@ import ( "github.com/filecoin-project/test-vectors/schema" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/conformance" - "github.com/filecoin-project/lotus/lib/blockstore" ) var execFlags struct { diff --git a/cmd/tvx/simulate.go b/cmd/tvx/simulate.go index 7a33707dc11..da9a034e923 100644 --- a/cmd/tvx/simulate.go +++ b/cmd/tvx/simulate.go @@ -154,7 +154,7 @@ func runSimulateCmd(_ *cli.Context) error { version, err := FullAPI.Version(ctx) if err != nil { log.Printf("failed to get node version: %s; falling back to unknown", err) - version = api.Version{} + version = api.APIVersion{} } nv, err := FullAPI.StateNetworkVersion(ctx, ts.Key()) diff --git a/cmd/tvx/stores.go b/cmd/tvx/stores.go index e160929daab..66445be70b2 100644 --- a/cmd/tvx/stores.go +++ b/cmd/tvx/stores.go @@ -9,7 +9,7 @@ import ( dssync "github.com/ipfs/go-datastore/sync" "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/lib/blockstore" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/actors/adt" @@ -45,7 +45,7 @@ func NewProxyingStores(ctx context.Context, api api.FullNode) *Stores { bs := &proxyingBlockstore{ ctx: ctx, api: api, - Blockstore: blockstore.NewBlockstore(ds), + Blockstore: blockstore.FromDatastore(ds), } return NewStores(ctx, ds, bs) } diff --git a/conformance/driver.go b/conformance/driver.go index 98436cf9610..70100700e83 100644 --- a/conformance/driver.go +++ b/conformance/driver.go @@ -5,6 +5,7 @@ import ( gobig "math/big" "os" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" @@ -12,7 +13,6 @@ import ( "github.com/filecoin-project/lotus/chain/vm" "github.com/filecoin-project/lotus/conformance/chaos" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" - "github.com/filecoin-project/lotus/lib/blockstore" _ "github.com/filecoin-project/lotus/lib/sigs/bls" // enable bls signatures _ "github.com/filecoin-project/lotus/lib/sigs/secp" // enable secp signatures diff --git a/conformance/runner.go b/conformance/runner.go index 8ced484c9b2..1044bb329e8 100644 --- a/conformance/runner.go +++ b/conformance/runner.go @@ -26,9 +26,9 @@ import ( "github.com/filecoin-project/test-vectors/schema" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/lib/blockstore" ) // FallbackBlockstoreGetter is a fallback blockstore to use for resolving CIDs @@ -306,7 +306,7 @@ func writeStateToTempCAR(bs blockstore.Blockstore, roots ...cid.Cid) (string, er } func LoadBlockstore(vectorCAR schema.Base64EncodedBytes) (blockstore.Blockstore, error) { - bs := blockstore.Blockstore(blockstore.NewTemporary()) + bs := blockstore.Blockstore(blockstore.NewMemory()) // Read the base64-encoded CAR from the vector, and inflate the gzip. buf := bytes.NewReader(vectorCAR) diff --git a/documentation/en/api-methods-miner.md b/documentation/en/api-methods-miner.md index 08d249a10ac..da10a8bcd5e 100644 --- a/documentation/en/api-methods-miner.md +++ b/documentation/en/api-methods-miner.md @@ -69,6 +69,7 @@ * [NetConnectedness](#NetConnectedness) * [NetDisconnect](#NetDisconnect) * [NetFindPeer](#NetFindPeer) + * [NetPeerInfo](#NetPeerInfo) * [NetPeers](#NetPeers) * [NetPubsubScores](#NetPubsubScores) * [Pieces](#Pieces) @@ -199,7 +200,9 @@ Response: { "PreCommitControl": null, "CommitControl": null, - "TerminateControl": null + "TerminateControl": null, + "DisableOwnerFallback": true, + "DisableWorkerFallback": true } ``` @@ -1045,6 +1048,38 @@ Response: } ``` +### NetPeerInfo + + +Perms: read + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +Response: +```json +{ + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Agent": "string value", + "Addrs": null, + "Protocols": null, + "ConnMgrMeta": { + "FirstSeen": "0001-01-01T00:00:00Z", + "Value": 123, + "Tags": { + "name": 42 + }, + "Conns": { + "name": "2021-03-08T22:52:18Z" + } + } +} +``` + ### NetPeers @@ -1143,7 +1178,13 @@ Perms: write Inputs: `null` -Response: `{}` +Response: +```json +{ + "Miner": 1000, + "Number": 9 +} +``` ## Return @@ -1773,13 +1814,17 @@ Inputs: "ID": "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8", "URLs": null, "Weight": 42, + "MaxStorage": 42, "CanSeal": true, "CanStore": true }, { "Capacity": 9, "Available": 9, - "Reserved": 9 + "FSAvailable": 9, + "Reserved": 9, + "Max": 9, + "Used": 9 } ] ``` @@ -1879,6 +1924,7 @@ Response: "ID": "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8", "URLs": null, "Weight": 42, + "MaxStorage": 42, "CanSeal": true, "CanStore": true } @@ -1950,7 +1996,10 @@ Inputs: "Stat": { "Capacity": 9, "Available": 9, - "Reserved": 9 + "FSAvailable": 9, + "Reserved": 9, + "Max": 9, + "Used": 9 }, "Err": "string value" } @@ -1976,7 +2025,10 @@ Response: { "Capacity": 9, "Available": 9, - "Reserved": 9 + "FSAvailable": 9, + "Reserved": 9, + "Max": 9, + "Used": 9 } ``` diff --git a/documentation/en/api-methods.md b/documentation/en/api-methods.md index 84a86f943cd..49afeeb1a0a 100644 --- a/documentation/en/api-methods.md +++ b/documentation/en/api-methods.md @@ -121,6 +121,7 @@ * [NetConnectedness](#NetConnectedness) * [NetDisconnect](#NetDisconnect) * [NetFindPeer](#NetFindPeer) + * [NetPeerInfo](#NetPeerInfo) * [NetPeers](#NetPeers) * [NetPubsubScores](#NetPubsubScores) * [Paych](#Paych) @@ -2887,6 +2888,38 @@ Response: } ``` +### NetPeerInfo + + +Perms: read + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +Response: +```json +{ + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Agent": "string value", + "Addrs": null, + "Protocols": null, + "ConnMgrMeta": { + "FirstSeen": "0001-01-01T00:00:00Z", + "Value": 123, + "Tags": { + "name": 42 + }, + "Conns": { + "name": "2021-03-08T22:52:18Z" + } + } +} +``` + ### NetPeers diff --git a/documentation/en/architecture/architecture.md b/documentation/en/architecture/architecture.md index 5a9eee3c275..64914d53996 100644 --- a/documentation/en/architecture/architecture.md +++ b/documentation/en/architecture/architecture.md @@ -311,7 +311,7 @@ FIXME: Maybe mention the `Batching` interface as the developer will stumble upon FIXME: IPFS blocks vs Filecoin blocks ideally happens before this / here -The [`Blockstore` interface](`github.com/filecoin-project/lotus/lib/blockstore.go`) structures the key-value pair +The [`Blockstore` interface](`github.com/filecoin-project/lotus/blockstore/blockstore.go`) structures the key-value pair into the CID format for the key and the [`Block` interface](`github.com/ipfs/go-block-format/blocks.go`) for the value. The `Block` value is just a raw string of bytes addressed by its hash, which is included in the CID key. diff --git a/extern/sector-storage/fr32/readers_test.go b/extern/sector-storage/fr32/readers_test.go index 706af5fee79..2411955529a 100644 --- a/extern/sector-storage/fr32/readers_test.go +++ b/extern/sector-storage/fr32/readers_test.go @@ -1,6 +1,7 @@ package fr32_test import ( + "bufio" "bytes" "io/ioutil" "testing" @@ -25,7 +26,8 @@ func TestUnpadReader(t *testing.T) { t.Fatal(err) } - readered, err := ioutil.ReadAll(r) + // using bufio reader to make sure reads are big enough for the padreader - it can't handle small reads right now + readered, err := ioutil.ReadAll(bufio.NewReaderSize(r, 512)) if err != nil { t.Fatal(err) } diff --git a/extern/sector-storage/fsutil/statfs.go b/extern/sector-storage/fsutil/statfs.go index 2a00ccb9aba..50ec86d463f 100644 --- a/extern/sector-storage/fsutil/statfs.go +++ b/extern/sector-storage/fsutil/statfs.go @@ -1,7 +1,12 @@ package fsutil type FsStat struct { - Capacity int64 - Available int64 // Available to use for sector storage - Reserved int64 + Capacity int64 + Available int64 // Available to use for sector storage + FSAvailable int64 // Available in the filesystem + Reserved int64 + + // non-zero when storage has configured MaxStorage + Max int64 + Used int64 } diff --git a/extern/sector-storage/fsutil/statfs_unix.go b/extern/sector-storage/fsutil/statfs_unix.go index 831fd8b4f10..da09c5c60fe 100644 --- a/extern/sector-storage/fsutil/statfs_unix.go +++ b/extern/sector-storage/fsutil/statfs_unix.go @@ -15,7 +15,9 @@ func Statfs(path string) (FsStat, error) { // force int64 to handle platform specific differences //nolint:unconvert return FsStat{ - Capacity: int64(stat.Blocks) * int64(stat.Bsize), - Available: int64(stat.Bavail) * int64(stat.Bsize), + Capacity: int64(stat.Blocks) * int64(stat.Bsize), + + Available: int64(stat.Bavail) * int64(stat.Bsize), + FSAvailable: int64(stat.Bavail) * int64(stat.Bsize), }, nil } diff --git a/extern/sector-storage/fsutil/statfs_windows.go b/extern/sector-storage/fsutil/statfs_windows.go index d785651826e..87ff75708d0 100644 --- a/extern/sector-storage/fsutil/statfs_windows.go +++ b/extern/sector-storage/fsutil/statfs_windows.go @@ -22,7 +22,8 @@ func Statfs(volumePath string) (FsStat, error) { uintptr(unsafe.Pointer(&availBytes))) return FsStat{ - Capacity: totalBytes, - Available: availBytes, + Capacity: totalBytes, + Available: availBytes, + FSAvailable: availBytes, }, nil } diff --git a/extern/sector-storage/manager.go b/extern/sector-storage/manager.go index a9b31f38a6a..3db7ac9ec91 100644 --- a/extern/sector-storage/manager.go +++ b/extern/sector-storage/manager.go @@ -632,47 +632,47 @@ func (m *Manager) Remove(ctx context.Context, sector storage.SectorRef) error { } func (m *Manager) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error { - return m.returnResult(callID, pi, err) + return m.returnResult(ctx, callID, pi, err) } func (m *Manager) ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err *storiface.CallError) error { - return m.returnResult(callID, p1o, err) + return m.returnResult(ctx, callID, p1o, err) } func (m *Manager) ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err *storiface.CallError) error { - return m.returnResult(callID, sealed, err) + return m.returnResult(ctx, callID, sealed, err) } func (m *Manager) ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err *storiface.CallError) error { - return m.returnResult(callID, out, err) + return m.returnResult(ctx, callID, out, err) } func (m *Manager) ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err *storiface.CallError) error { - return m.returnResult(callID, proof, err) + return m.returnResult(ctx, callID, proof, err) } func (m *Manager) ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { - return m.returnResult(callID, nil, err) + return m.returnResult(ctx, callID, nil, err) } func (m *Manager) ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { - return m.returnResult(callID, nil, err) + return m.returnResult(ctx, callID, nil, err) } func (m *Manager) ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { - return m.returnResult(callID, nil, err) + return m.returnResult(ctx, callID, nil, err) } func (m *Manager) ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { - return m.returnResult(callID, nil, err) + return m.returnResult(ctx, callID, nil, err) } func (m *Manager) ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err *storiface.CallError) error { - return m.returnResult(callID, ok, err) + return m.returnResult(ctx, callID, ok, err) } func (m *Manager) ReturnFetch(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { - return m.returnResult(callID, nil, err) + return m.returnResult(ctx, callID, nil, err) } func (m *Manager) StorageLocal(ctx context.Context) (map[stores.ID]string, error) { diff --git a/extern/sector-storage/manager_calltracker.go b/extern/sector-storage/manager_calltracker.go index c3b2e3190dc..332a08817bb 100644 --- a/extern/sector-storage/manager_calltracker.go +++ b/extern/sector-storage/manager_calltracker.go @@ -349,7 +349,7 @@ func (m *Manager) waitCall(ctx context.Context, callID storiface.CallID) (interf } } -func (m *Manager) returnResult(callID storiface.CallID, r interface{}, cerr *storiface.CallError) error { +func (m *Manager) returnResult(ctx context.Context, callID storiface.CallID, r interface{}, cerr *storiface.CallError) error { res := result{ r: r, } @@ -357,7 +357,7 @@ func (m *Manager) returnResult(callID storiface.CallID, r interface{}, cerr *sto res.err = cerr } - m.sched.workTracker.onDone(callID) + m.sched.workTracker.onDone(ctx, callID) m.workLk.Lock() defer m.workLk.Unlock() @@ -413,5 +413,5 @@ func (m *Manager) returnResult(callID storiface.CallID, r interface{}, cerr *sto func (m *Manager) Abort(ctx context.Context, call storiface.CallID) error { // TODO: Allow temp error - return m.returnResult(call, nil, storiface.Err(storiface.ErrUnknown, xerrors.New("task aborted"))) + return m.returnResult(ctx, call, nil, storiface.Err(storiface.ErrUnknown, xerrors.New("task aborted"))) } diff --git a/extern/sector-storage/sched_test.go b/extern/sector-storage/sched_test.go index a87d403b7cf..63f3de64d00 100644 --- a/extern/sector-storage/sched_test.go +++ b/extern/sector-storage/sched_test.go @@ -154,9 +154,10 @@ func addTestWorker(t *testing.T, sched *scheduler, index *stores.Index, name str CanSeal: path.CanSeal, CanStore: path.CanStore, }, fsutil.FsStat{ - Capacity: 1 << 40, - Available: 1 << 40, - Reserved: 3, + Capacity: 1 << 40, + Available: 1 << 40, + FSAvailable: 1 << 40, + Reserved: 3, }) require.NoError(t, err) } diff --git a/extern/sector-storage/sched_worker.go b/extern/sector-storage/sched_worker.go index 40cf2fcf4ee..4e18e5c6f2b 100644 --- a/extern/sector-storage/sched_worker.go +++ b/extern/sector-storage/sched_worker.go @@ -398,7 +398,7 @@ func (sw *schedWorker) startProcessingTask(taskDone chan struct{}, req *workerRe go func() { // first run the prepare step (e.g. fetching sector data from other worker) - err := req.prepare(req.ctx, sh.workTracker.worker(sw.wid, w.workerRpc)) + err := req.prepare(req.ctx, sh.workTracker.worker(sw.wid, w.info, w.workerRpc)) sh.workersLk.Lock() if err != nil { @@ -437,7 +437,7 @@ func (sw *schedWorker) startProcessingTask(taskDone chan struct{}, req *workerRe } // Do the work! - err = req.work(req.ctx, sh.workTracker.worker(sw.wid, w.workerRpc)) + err = req.work(req.ctx, sh.workTracker.worker(sw.wid, w.info, w.workerRpc)) select { case req.ret <- workerResponse{err: err}: diff --git a/extern/sector-storage/stores/index.go b/extern/sector-storage/stores/index.go index 4f3af552bfd..7f35418dd8f 100644 --- a/extern/sector-storage/stores/index.go +++ b/extern/sector-storage/stores/index.go @@ -26,9 +26,10 @@ var SkippedHeartbeatThresh = HeartbeatInterval * 5 type ID string type StorageInfo struct { - ID ID - URLs []string // TODO: Support non-http transports - Weight uint64 + ID ID + URLs []string // TODO: Support non-http transports + Weight uint64 + MaxStorage uint64 CanSeal bool CanStore bool @@ -156,6 +157,7 @@ func (i *Index) StorageAttach(ctx context.Context, si StorageInfo, st fsutil.FsS } i.stores[si.ID].info.Weight = si.Weight + i.stores[si.ID].info.MaxStorage = si.MaxStorage i.stores[si.ID].info.CanSeal = si.CanSeal i.stores[si.ID].info.CanStore = si.CanStore diff --git a/extern/sector-storage/stores/local.go b/extern/sector-storage/stores/local.go index c39e76f18c3..a7df5ae21f4 100644 --- a/extern/sector-storage/stores/local.go +++ b/extern/sector-storage/stores/local.go @@ -42,6 +42,10 @@ type LocalStorageMeta struct { // Finalized sectors that will be proved over time will be stored here CanStore bool + + // MaxStorage specifies the maximum number of bytes to use for sector storage + // (0 = unlimited) + MaxStorage uint64 } // StorageConfig .lotusstorage/storage.json @@ -77,7 +81,8 @@ type Local struct { } type path struct { - local string // absolute local path + local string // absolute local path + maxStorage uint64 reserved int64 reservations map[abi.SectorID]storiface.SectorFileType @@ -127,6 +132,25 @@ func (p *path) stat(ls LocalStorage) (fsutil.FsStat, error) { stat.Available = 0 } + if p.maxStorage > 0 { + used, err := ls.DiskUsage(p.local) + if err != nil { + return fsutil.FsStat{}, err + } + + stat.Max = int64(p.maxStorage) + stat.Used = used + + avail := int64(p.maxStorage) - used + if uint64(used) > p.maxStorage { + avail = 0 + } + + if avail < stat.Available { + stat.Available = avail + } + } + return stat, err } @@ -164,6 +188,7 @@ func (st *Local) OpenPath(ctx context.Context, p string) error { out := &path{ local: p, + maxStorage: meta.MaxStorage, reserved: 0, reservations: map[abi.SectorID]storiface.SectorFileType{}, } @@ -174,11 +199,12 @@ func (st *Local) OpenPath(ctx context.Context, p string) error { } err = st.index.StorageAttach(ctx, StorageInfo{ - ID: meta.ID, - URLs: st.urls, - Weight: meta.Weight, - CanSeal: meta.CanSeal, - CanStore: meta.CanStore, + ID: meta.ID, + URLs: st.urls, + Weight: meta.Weight, + MaxStorage: meta.MaxStorage, + CanSeal: meta.CanSeal, + CanStore: meta.CanStore, }, fst) if err != nil { return xerrors.Errorf("declaring storage in index: %w", err) @@ -237,11 +263,12 @@ func (st *Local) Redeclare(ctx context.Context) error { } err = st.index.StorageAttach(ctx, StorageInfo{ - ID: id, - URLs: st.urls, - Weight: meta.Weight, - CanSeal: meta.CanSeal, - CanStore: meta.CanStore, + ID: id, + URLs: st.urls, + Weight: meta.Weight, + MaxStorage: meta.MaxStorage, + CanSeal: meta.CanSeal, + CanStore: meta.CanStore, }, fst) if err != nil { return xerrors.Errorf("redeclaring storage in index: %w", err) diff --git a/extern/sector-storage/stores/local_test.go b/extern/sector-storage/stores/local_test.go index 1c31e8c0952..ac5f6f3413f 100644 --- a/extern/sector-storage/stores/local_test.go +++ b/extern/sector-storage/stores/local_test.go @@ -36,8 +36,9 @@ func (t *TestingLocalStorage) SetStorage(f func(*StorageConfig)) error { func (t *TestingLocalStorage) Stat(path string) (fsutil.FsStat, error) { return fsutil.FsStat{ - Capacity: pathSize, - Available: pathSize, + Capacity: pathSize, + Available: pathSize, + FSAvailable: pathSize, }, nil } diff --git a/extern/sector-storage/worker_tracked.go b/extern/sector-storage/worker_tracked.go index febb190c5b9..aeb3eea748a 100644 --- a/extern/sector-storage/worker_tracked.go +++ b/extern/sector-storage/worker_tracked.go @@ -7,17 +7,21 @@ import ( "time" "github.com/ipfs/go-cid" + "go.opencensus.io/stats" + "go.opencensus.io/tag" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + "github.com/filecoin-project/lotus/metrics" ) type trackedWork struct { - job storiface.WorkerJob - worker WorkerID + job storiface.WorkerJob + worker WorkerID + workerHostname string } type workTracker struct { @@ -29,20 +33,31 @@ type workTracker struct { // TODO: done, aggregate stats, queue stats, scheduler feedback } -func (wt *workTracker) onDone(callID storiface.CallID) { +func (wt *workTracker) onDone(ctx context.Context, callID storiface.CallID) { wt.lk.Lock() defer wt.lk.Unlock() - _, ok := wt.running[callID] + t, ok := wt.running[callID] if !ok { wt.done[callID] = struct{}{} + + stats.Record(ctx, metrics.WorkerUntrackedCallsReturned.M(1)) return } + took := metrics.SinceInMilliseconds(t.job.Start) + + ctx, _ = tag.New( + ctx, + tag.Upsert(metrics.TaskType, string(t.job.Task)), + tag.Upsert(metrics.WorkerHostname, t.workerHostname), + ) + stats.Record(ctx, metrics.WorkerCallsReturnedCount.M(1), metrics.WorkerCallsReturnedDuration.M(took)) + delete(wt.running, callID) } -func (wt *workTracker) track(wid WorkerID, sid storage.SectorRef, task sealtasks.TaskType) func(storiface.CallID, error) (storiface.CallID, error) { +func (wt *workTracker) track(ctx context.Context, wid WorkerID, wi storiface.WorkerInfo, sid storage.SectorRef, task sealtasks.TaskType) func(storiface.CallID, error) (storiface.CallID, error) { return func(callID storiface.CallID, err error) (storiface.CallID, error) { if err != nil { return callID, err @@ -64,17 +79,26 @@ func (wt *workTracker) track(wid WorkerID, sid storage.SectorRef, task sealtasks Task: task, Start: time.Now(), }, - worker: wid, + worker: wid, + workerHostname: wi.Hostname, } + ctx, _ = tag.New( + ctx, + tag.Upsert(metrics.TaskType, string(task)), + tag.Upsert(metrics.WorkerHostname, wi.Hostname), + ) + stats.Record(ctx, metrics.WorkerCallsStarted.M(1)) + return callID, err } } -func (wt *workTracker) worker(wid WorkerID, w Worker) Worker { +func (wt *workTracker) worker(wid WorkerID, wi storiface.WorkerInfo, w Worker) Worker { return &trackedWorker{ - Worker: w, - wid: wid, + Worker: w, + wid: wid, + workerInfo: wi, tracker: wt, } @@ -94,45 +118,46 @@ func (wt *workTracker) Running() []trackedWork { type trackedWorker struct { Worker - wid WorkerID + wid WorkerID + workerInfo storiface.WorkerInfo tracker *workTracker } func (t *trackedWorker) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) { - return t.tracker.track(t.wid, sector, sealtasks.TTPreCommit1)(t.Worker.SealPreCommit1(ctx, sector, ticket, pieces)) + return t.tracker.track(ctx, t.wid, t.workerInfo, sector, sealtasks.TTPreCommit1)(t.Worker.SealPreCommit1(ctx, sector, ticket, pieces)) } func (t *trackedWorker) SealPreCommit2(ctx context.Context, sector storage.SectorRef, pc1o storage.PreCommit1Out) (storiface.CallID, error) { - return t.tracker.track(t.wid, sector, sealtasks.TTPreCommit2)(t.Worker.SealPreCommit2(ctx, sector, pc1o)) + return t.tracker.track(ctx, t.wid, t.workerInfo, sector, sealtasks.TTPreCommit2)(t.Worker.SealPreCommit2(ctx, sector, pc1o)) } func (t *trackedWorker) SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) { - return t.tracker.track(t.wid, sector, sealtasks.TTCommit1)(t.Worker.SealCommit1(ctx, sector, ticket, seed, pieces, cids)) + return t.tracker.track(ctx, t.wid, t.workerInfo, sector, sealtasks.TTCommit1)(t.Worker.SealCommit1(ctx, sector, ticket, seed, pieces, cids)) } func (t *trackedWorker) SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storiface.CallID, error) { - return t.tracker.track(t.wid, sector, sealtasks.TTCommit2)(t.Worker.SealCommit2(ctx, sector, c1o)) + return t.tracker.track(ctx, t.wid, t.workerInfo, sector, sealtasks.TTCommit2)(t.Worker.SealCommit2(ctx, sector, c1o)) } func (t *trackedWorker) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) { - return t.tracker.track(t.wid, sector, sealtasks.TTFinalize)(t.Worker.FinalizeSector(ctx, sector, keepUnsealed)) + return t.tracker.track(ctx, t.wid, t.workerInfo, sector, sealtasks.TTFinalize)(t.Worker.FinalizeSector(ctx, sector, keepUnsealed)) } func (t *trackedWorker) AddPiece(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) { - return t.tracker.track(t.wid, sector, sealtasks.TTAddPiece)(t.Worker.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData)) + return t.tracker.track(ctx, t.wid, t.workerInfo, sector, sealtasks.TTAddPiece)(t.Worker.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData)) } func (t *trackedWorker) Fetch(ctx context.Context, s storage.SectorRef, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) { - return t.tracker.track(t.wid, s, sealtasks.TTFetch)(t.Worker.Fetch(ctx, s, ft, ptype, am)) + return t.tracker.track(ctx, t.wid, t.workerInfo, s, sealtasks.TTFetch)(t.Worker.Fetch(ctx, s, ft, ptype, am)) } func (t *trackedWorker) UnsealPiece(ctx context.Context, id storage.SectorRef, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) (storiface.CallID, error) { - return t.tracker.track(t.wid, id, sealtasks.TTUnseal)(t.Worker.UnsealPiece(ctx, id, index, size, randomness, cid)) + return t.tracker.track(ctx, t.wid, t.workerInfo, id, sealtasks.TTUnseal)(t.Worker.UnsealPiece(ctx, id, index, size, randomness, cid)) } func (t *trackedWorker) ReadPiece(ctx context.Context, writer io.Writer, id storage.SectorRef, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) { - return t.tracker.track(t.wid, id, sealtasks.TTReadUnsealed)(t.Worker.ReadPiece(ctx, writer, id, index, size)) + return t.tracker.track(ctx, t.wid, t.workerInfo, id, sealtasks.TTReadUnsealed)(t.Worker.ReadPiece(ctx, writer, id, index, size)) } var _ Worker = &trackedWorker{} diff --git a/extern/storage-sealing/cbor_gen.go b/extern/storage-sealing/cbor_gen.go index 02dcd8c8d9a..9e12b8649e9 100644 --- a/extern/storage-sealing/cbor_gen.go +++ b/extern/storage-sealing/cbor_gen.go @@ -519,7 +519,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write([]byte{184, 25}); err != nil { + if _, err := w.Write([]byte{184, 26}); err != nil { return err } @@ -586,6 +586,28 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } } + // t.CreationTime (int64) (int64) + if len("CreationTime") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"CreationTime\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("CreationTime"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("CreationTime")); err != nil { + return err + } + + if t.CreationTime >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.CreationTime)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.CreationTime-1)); err != nil { + return err + } + } + // t.Pieces ([]sealing.Piece) (slice) if len("Pieces") > cbg.MaxLength { return xerrors.Errorf("Value in field \"Pieces\" was too long") @@ -1151,6 +1173,32 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) error { t.SectorType = abi.RegisteredSealProof(extraI) } + // t.CreationTime (int64) (int64) + case "CreationTime": + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.CreationTime = int64(extraI) + } // t.Pieces ([]sealing.Piece) (slice) case "Pieces": diff --git a/extern/storage-sealing/fsm.go b/extern/storage-sealing/fsm.go index c989d02967b..c38101e6cf3 100644 --- a/extern/storage-sealing/fsm.go +++ b/extern/storage-sealing/fsm.go @@ -37,14 +37,22 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto // Sealing UndefinedSectorState: planOne( - on(SectorStart{}, Empty), + on(SectorStart{}, WaitDeals), on(SectorStartCC{}, Packing), ), - Empty: planOne(on(SectorAddPiece{}, WaitDeals)), + Empty: planOne( // deprecated + on(SectorAddPiece{}, AddPiece), + on(SectorStartPacking{}, Packing), + ), WaitDeals: planOne( - on(SectorAddPiece{}, WaitDeals), + on(SectorAddPiece{}, AddPiece), on(SectorStartPacking{}, Packing), ), + AddPiece: planOne( + on(SectorPieceAdded{}, WaitDeals), + apply(SectorStartPacking{}), + on(SectorAddPieceFailed{}, AddPieceFailed), + ), Packing: planOne(on(SectorPacked{}, GetTicket)), GetTicket: planOne( on(SectorTicket{}, PreCommit1), @@ -97,6 +105,7 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto // Sealing errors + AddPieceFailed: planOne(), SealPreCommit1Failed: planOne( on(SectorRetrySealPreCommit1{}, PreCommit1), ), @@ -238,12 +247,11 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta /* - * Empty <- incoming deals - | | - | v - *<- WaitDeals <- incoming deals - | | - | v + UndefinedSectorState (start) + v | + *<- WaitDeals <-> AddPiece | + | | /--------------------/ + | v v *<- Packing <- incoming committed capacity | | | v @@ -282,10 +290,6 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta v FailedUnrecoverable - UndefinedSectorState <- ¯\_(ツ)_/¯ - | ^ - *---------------------/ - */ m.stats.updateSector(m.minerSectorID(state.SectorNumber), state.State) @@ -295,7 +299,9 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta case Empty: fallthrough case WaitDeals: - log.Infof("Waiting for deals %d", state.SectorNumber) + return m.handleWaitDeals, processed, nil + case AddPiece: + return m.handleAddPiece, processed, nil case Packing: return m.handlePacking, processed, nil case GetTicket: @@ -418,60 +424,10 @@ func (m *Sealing) restartSectors(ctx context.Context) error { log.Errorf("loading sector list: %+v", err) } - cfg, err := m.getConfig() - if err != nil { - return xerrors.Errorf("getting the sealing delay: %w", err) - } - - spt, err := m.currentSealProof(ctx) - if err != nil { - return xerrors.Errorf("getting current seal proof: %w", err) - } - ssize, err := spt.SectorSize() - if err != nil { - return err - } - - // m.unsealedInfoMap.lk.Lock() taken early in .New to prevent races - defer m.unsealedInfoMap.lk.Unlock() - for _, sector := range trackedSectors { if err := m.sectors.Send(uint64(sector.SectorNumber), SectorRestart{}); err != nil { log.Errorf("restarting sector %d: %+v", sector.SectorNumber, err) } - - if sector.State == WaitDeals { - - // put the sector in the unsealedInfoMap - if _, ok := m.unsealedInfoMap.infos[sector.SectorNumber]; ok { - // something's funky here, but probably safe to move on - log.Warnf("sector %v was already in the unsealedInfoMap when restarting", sector.SectorNumber) - } else { - ui := UnsealedSectorInfo{ - ssize: ssize, - } - for _, p := range sector.Pieces { - if p.DealInfo != nil { - ui.numDeals++ - } - ui.stored += p.Piece.Size - ui.pieceSizes = append(ui.pieceSizes, p.Piece.Size.Unpadded()) - } - - m.unsealedInfoMap.infos[sector.SectorNumber] = ui - } - - // start a fresh timer for the sector - if cfg.WaitDealsDelay > 0 { - timer := time.NewTimer(cfg.WaitDealsDelay) - go func() { - <-timer.C - if err := m.StartPacking(sector.SectorNumber); err != nil { - log.Errorf("starting sector %d: %+v", sector.SectorNumber, err) - } - }() - } - } } // TODO: Grab on-chain sector set and diff with trackedSectors @@ -494,56 +450,72 @@ func final(events []statemachine.Event, state *SectorInfo) (uint64, error) { return 0, xerrors.Errorf("didn't expect any events in state %s, got %+v", state.State, events) } -func on(mut mutator, next SectorState) func() (mutator, func(*SectorInfo) error) { - return func() (mutator, func(*SectorInfo) error) { - return mut, func(state *SectorInfo) error { +func on(mut mutator, next SectorState) func() (mutator, func(*SectorInfo) (bool, error)) { + return func() (mutator, func(*SectorInfo) (bool, error)) { + return mut, func(state *SectorInfo) (bool, error) { state.State = next - return nil + return false, nil + } + } +} + +// like `on`, but doesn't change state +func apply(mut mutator) func() (mutator, func(*SectorInfo) (bool, error)) { + return func() (mutator, func(*SectorInfo) (bool, error)) { + return mut, func(state *SectorInfo) (bool, error) { + return true, nil } } } -func onReturning(mut mutator) func() (mutator, func(*SectorInfo) error) { - return func() (mutator, func(*SectorInfo) error) { - return mut, func(state *SectorInfo) error { +func onReturning(mut mutator) func() (mutator, func(*SectorInfo) (bool, error)) { + return func() (mutator, func(*SectorInfo) (bool, error)) { + return mut, func(state *SectorInfo) (bool, error) { if state.Return == "" { - return xerrors.Errorf("return state not set") + return false, xerrors.Errorf("return state not set") } state.State = SectorState(state.Return) state.Return = "" - return nil + return false, nil } } } -func planOne(ts ...func() (mut mutator, next func(*SectorInfo) error)) func(events []statemachine.Event, state *SectorInfo) (uint64, error) { +func planOne(ts ...func() (mut mutator, next func(*SectorInfo) (more bool, err error))) func(events []statemachine.Event, state *SectorInfo) (uint64, error) { return func(events []statemachine.Event, state *SectorInfo) (uint64, error) { - if gm, ok := events[0].User.(globalMutator); ok { - gm.applyGlobal(state) - return 1, nil - } + for i, event := range events { + if gm, ok := event.User.(globalMutator); ok { + gm.applyGlobal(state) + return uint64(i + 1), nil + } - for _, t := range ts { - mut, next := t() + for _, t := range ts { + mut, next := t() - if reflect.TypeOf(events[0].User) != reflect.TypeOf(mut) { - continue - } + if reflect.TypeOf(event.User) != reflect.TypeOf(mut) { + continue + } + + if err, iserr := event.User.(error); iserr { + log.Warnf("sector %d got error event %T: %+v", state.SectorNumber, event.User, err) + } - if err, iserr := events[0].User.(error); iserr { - log.Warnf("sector %d got error event %T: %+v", state.SectorNumber, events[0].User, err) + event.User.(mutator).apply(state) + more, err := next(state) + if err != nil || !more { + return uint64(i + 1), err + } } - events[0].User.(mutator).apply(state) - return 1, next(state) - } + _, ok := event.User.(Ignorable) + if ok { + continue + } - _, ok := events[0].User.(Ignorable) - if ok { - return 1, nil + return uint64(i + 1), xerrors.Errorf("planner for state %s received unexpected event %T (%+v)", state.State, event.User, event) } - return 0, xerrors.Errorf("planner for state %s received unexpected event %T (%+v)", state.State, events[0].User, events[0]) + return uint64(len(events)), nil } } diff --git a/extern/storage-sealing/fsm_events.go b/extern/storage-sealing/fsm_events.go index e2836672197..8d11b248b35 100644 --- a/extern/storage-sealing/fsm_events.go +++ b/extern/storage-sealing/fsm_events.go @@ -1,13 +1,16 @@ package sealing import ( - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "time" + "github.com/ipfs/go-cid" "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/specs-storage/storage" + + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" ) type mutator interface { @@ -67,23 +70,34 @@ func (evt SectorStart) apply(state *SectorInfo) { type SectorStartCC struct { ID abi.SectorNumber SectorType abi.RegisteredSealProof - Pieces []Piece } func (evt SectorStartCC) apply(state *SectorInfo) { state.SectorNumber = evt.ID - state.Pieces = evt.Pieces state.SectorType = evt.SectorType } -type SectorAddPiece struct { - NewPiece Piece -} +type SectorAddPiece struct{} func (evt SectorAddPiece) apply(state *SectorInfo) { - state.Pieces = append(state.Pieces, evt.NewPiece) + if state.CreationTime == 0 { + state.CreationTime = time.Now().Unix() + } } +type SectorPieceAdded struct { + NewPieces []Piece +} + +func (evt SectorPieceAdded) apply(state *SectorInfo) { + state.Pieces = append(state.Pieces, evt.NewPieces...) +} + +type SectorAddPieceFailed struct{ error } + +func (evt SectorAddPieceFailed) FormatError(xerrors.Printer) (next error) { return evt.error } +func (evt SectorAddPieceFailed) apply(si *SectorInfo) {} + type SectorStartPacking struct{} func (evt SectorStartPacking) apply(*SectorInfo) {} diff --git a/extern/storage-sealing/garbage.go b/extern/storage-sealing/garbage.go index 185bebe3560..398040e6ed0 100644 --- a/extern/storage-sealing/garbage.go +++ b/extern/storage-sealing/garbage.go @@ -5,91 +5,42 @@ import ( "golang.org/x/xerrors" - "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/specs-storage/storage" ) -func (m *Sealing) pledgeSector(ctx context.Context, sectorID storage.SectorRef, existingPieceSizes []abi.UnpaddedPieceSize, sizes ...abi.UnpaddedPieceSize) ([]abi.PieceInfo, error) { - if len(sizes) == 0 { - return nil, nil - } - - log.Infof("Pledge %d, contains %+v", sectorID, existingPieceSizes) - - out := make([]abi.PieceInfo, len(sizes)) - for i, size := range sizes { - ppi, err := m.sealer.AddPiece(ctx, sectorID, existingPieceSizes, size, NewNullReader(size)) - if err != nil { - return nil, xerrors.Errorf("add piece: %w", err) - } - - existingPieceSizes = append(existingPieceSizes, size) - - out[i] = ppi - } +func (m *Sealing) PledgeSector(ctx context.Context) (storage.SectorRef, error) { + m.inputLk.Lock() + defer m.inputLk.Unlock() - return out, nil -} - -func (m *Sealing) PledgeSector() error { cfg, err := m.getConfig() if err != nil { - return xerrors.Errorf("getting config: %w", err) + return storage.SectorRef{}, xerrors.Errorf("getting config: %w", err) } if cfg.MaxSealingSectors > 0 { if m.stats.curSealing() >= cfg.MaxSealingSectors { - return xerrors.Errorf("too many sectors sealing (curSealing: %d, max: %d)", m.stats.curSealing(), cfg.MaxSealingSectors) + return storage.SectorRef{}, xerrors.Errorf("too many sectors sealing (curSealing: %d, max: %d)", m.stats.curSealing(), cfg.MaxSealingSectors) } } - go func() { - ctx := context.TODO() // we can't use the context from command which invokes - // this, as we run everything here async, and it's cancelled when the - // command exits - - spt, err := m.currentSealProof(ctx) - if err != nil { - log.Errorf("%+v", err) - return - } - - size, err := spt.SectorSize() - if err != nil { - log.Errorf("%+v", err) - return - } - - sid, err := m.sc.Next() - if err != nil { - log.Errorf("%+v", err) - return - } - sectorID := m.minerSector(spt, sid) - err = m.sealer.NewSector(ctx, sectorID) - if err != nil { - log.Errorf("%+v", err) - return - } - - pieces, err := m.pledgeSector(ctx, sectorID, []abi.UnpaddedPieceSize{}, abi.PaddedPieceSize(size).Unpadded()) - if err != nil { - log.Errorf("%+v", err) - return - } + spt, err := m.currentSealProof(ctx) + if err != nil { + return storage.SectorRef{}, xerrors.Errorf("getting seal proof type: %w", err) + } - ps := make([]Piece, len(pieces)) - for idx := range ps { - ps[idx] = Piece{ - Piece: pieces[idx], - DealInfo: nil, - } - } + sid, err := m.sc.Next() + if err != nil { + return storage.SectorRef{}, xerrors.Errorf("generating sector number: %w", err) + } + sectorID := m.minerSector(spt, sid) + err = m.sealer.NewSector(ctx, sectorID) + if err != nil { + return storage.SectorRef{}, xerrors.Errorf("notifying sealer of the new sector: %w", err) + } - if err := m.newSectorCC(ctx, sid, ps); err != nil { - log.Errorf("%+v", err) - return - } - }() - return nil + log.Infof("Creating CC sector %d", sid) + return sectorID, m.sectors.Send(uint64(sid), SectorStartCC{ + ID: sid, + SectorType: spt, + }) } diff --git a/extern/storage-sealing/input.go b/extern/storage-sealing/input.go new file mode 100644 index 00000000000..ae1d6f0ddc4 --- /dev/null +++ b/extern/storage-sealing/input.go @@ -0,0 +1,424 @@ +package sealing + +import ( + "context" + "sort" + "time" + + "golang.org/x/xerrors" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-padreader" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-statemachine" + "github.com/filecoin-project/specs-storage/storage" + + sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" +) + +func (m *Sealing) handleWaitDeals(ctx statemachine.Context, sector SectorInfo) error { + var used abi.UnpaddedPieceSize + for _, piece := range sector.Pieces { + used += piece.Piece.Size.Unpadded() + } + + m.inputLk.Lock() + + started, err := m.maybeStartSealing(ctx, sector, used) + if err != nil || started { + delete(m.openSectors, m.minerSectorID(sector.SectorNumber)) + + m.inputLk.Unlock() + + return err + } + + m.openSectors[m.minerSectorID(sector.SectorNumber)] = &openSector{ + used: used, + maybeAccept: func(cid cid.Cid) error { + // todo check deal start deadline (configurable) + + sid := m.minerSectorID(sector.SectorNumber) + m.assignedPieces[sid] = append(m.assignedPieces[sid], cid) + + return ctx.Send(SectorAddPiece{}) + }, + } + + go func() { + defer m.inputLk.Unlock() + if err := m.updateInput(ctx.Context(), sector.SectorType); err != nil { + log.Errorf("%+v", err) + } + }() + + return nil +} + +func (m *Sealing) maybeStartSealing(ctx statemachine.Context, sector SectorInfo, used abi.UnpaddedPieceSize) (bool, error) { + now := time.Now() + st := m.sectorTimers[m.minerSectorID(sector.SectorNumber)] + if st != nil { + if !st.Stop() { // timer expired, SectorStartPacking was/is being sent + // we send another SectorStartPacking in case one was sent in the handleAddPiece state + log.Infow("starting to seal deal sector", "sector", sector.SectorNumber, "trigger", "wait-timeout") + return true, ctx.Send(SectorStartPacking{}) + } + } + + ssize, err := sector.SectorType.SectorSize() + if err != nil { + return false, xerrors.Errorf("getting sector size") + } + + maxDeals, err := getDealPerSectorLimit(ssize) + if err != nil { + return false, xerrors.Errorf("getting per-sector deal limit: %w", err) + } + + if len(sector.dealIDs()) >= maxDeals { + // can't accept more deals + log.Infow("starting to seal deal sector", "sector", sector.SectorNumber, "trigger", "maxdeals") + return true, ctx.Send(SectorStartPacking{}) + } + + if used.Padded() == abi.PaddedPieceSize(ssize) { + // sector full + log.Infow("starting to seal deal sector", "sector", sector.SectorNumber, "trigger", "filled") + return true, ctx.Send(SectorStartPacking{}) + } + + if sector.CreationTime != 0 { + cfg, err := m.getConfig() + if err != nil { + m.inputLk.Unlock() + return false, xerrors.Errorf("getting storage config: %w", err) + } + + // todo check deal age, start sealing if any deal has less than X (configurable) to start deadline + sealTime := time.Unix(sector.CreationTime, 0).Add(cfg.WaitDealsDelay) + + if now.After(sealTime) { + m.inputLk.Unlock() + log.Infow("starting to seal deal sector", "sector", sector.SectorNumber, "trigger", "wait-timeout") + return true, ctx.Send(SectorStartPacking{}) + } + + m.sectorTimers[m.minerSectorID(sector.SectorNumber)] = time.AfterFunc(sealTime.Sub(now), func() { + log.Infow("starting to seal deal sector", "sector", sector.SectorNumber, "trigger", "wait-timer") + + if err := ctx.Send(SectorStartPacking{}); err != nil { + log.Errorw("sending SectorStartPacking event failed", "sector", sector.SectorNumber, "error", err) + } + }) + } + + return false, nil +} + +func (m *Sealing) handleAddPiece(ctx statemachine.Context, sector SectorInfo) error { + ssize, err := sector.SectorType.SectorSize() + if err != nil { + return err + } + + res := SectorPieceAdded{} + + m.inputLk.Lock() + + pending, ok := m.assignedPieces[m.minerSectorID(sector.SectorNumber)] + if ok { + delete(m.assignedPieces, m.minerSectorID(sector.SectorNumber)) + } + m.inputLk.Unlock() + if !ok { + // nothing to do here (might happen after a restart in AddPiece) + return ctx.Send(res) + } + + var offset abi.UnpaddedPieceSize + pieceSizes := make([]abi.UnpaddedPieceSize, len(sector.Pieces)) + for i, p := range sector.Pieces { + pieceSizes[i] = p.Piece.Size.Unpadded() + offset += p.Piece.Size.Unpadded() + } + + maxDeals, err := getDealPerSectorLimit(ssize) + if err != nil { + return xerrors.Errorf("getting per-sector deal limit: %w", err) + } + + for i, piece := range pending { + m.inputLk.Lock() + deal, ok := m.pendingPieces[piece] + m.inputLk.Unlock() + if !ok { + return xerrors.Errorf("piece %s assigned to sector %d not found", piece, sector.SectorNumber) + } + + if len(sector.dealIDs())+(i+1) > maxDeals { + // todo: this is rather unlikely to happen, but in case it does, return the deal to waiting queue instead of failing it + deal.accepted(sector.SectorNumber, offset, xerrors.Errorf("too many deals assigned to sector %d, dropping deal", sector.SectorNumber)) + continue + } + + pads, padLength := ffiwrapper.GetRequiredPadding(offset.Padded(), deal.size.Padded()) + + if offset.Padded()+padLength+deal.size.Padded() > abi.PaddedPieceSize(ssize) { + // todo: this is rather unlikely to happen, but in case it does, return the deal to waiting queue instead of failing it + deal.accepted(sector.SectorNumber, offset, xerrors.Errorf("piece %s assigned to sector %d with not enough space", piece, sector.SectorNumber)) + continue + } + + offset += padLength.Unpadded() + + for _, p := range pads { + ppi, err := m.sealer.AddPiece(sectorstorage.WithPriority(ctx.Context(), DealSectorPriority), + m.minerSector(sector.SectorType, sector.SectorNumber), + pieceSizes, + p.Unpadded(), + NewNullReader(p.Unpadded())) + if err != nil { + err = xerrors.Errorf("writing padding piece: %w", err) + deal.accepted(sector.SectorNumber, offset, err) + return ctx.Send(SectorAddPieceFailed{err}) + } + + pieceSizes = append(pieceSizes, p.Unpadded()) + res.NewPieces = append(res.NewPieces, Piece{ + Piece: ppi, + }) + } + + ppi, err := m.sealer.AddPiece(sectorstorage.WithPriority(ctx.Context(), DealSectorPriority), + m.minerSector(sector.SectorType, sector.SectorNumber), + pieceSizes, + deal.size, + deal.data) + if err != nil { + err = xerrors.Errorf("writing piece: %w", err) + deal.accepted(sector.SectorNumber, offset, err) + return ctx.Send(SectorAddPieceFailed{err}) + } + + log.Infow("deal added to a sector", "deal", deal.deal.DealID, "sector", sector.SectorNumber, "piece", ppi.PieceCID) + + deal.accepted(sector.SectorNumber, offset, nil) + + offset += deal.size + pieceSizes = append(pieceSizes, deal.size) + + res.NewPieces = append(res.NewPieces, Piece{ + Piece: ppi, + DealInfo: &deal.deal, + }) + } + + return ctx.Send(res) +} + +func (m *Sealing) handleAddPieceFailed(ctx statemachine.Context, sector SectorInfo) error { + log.Errorf("No recovery plan for AddPiece failing") + // todo: cleanup sector / just go retry (requires adding offset param to AddPiece in sector-storage for this to be safe) + return nil +} + +func (m *Sealing) AddPieceToAnySector(ctx context.Context, size abi.UnpaddedPieceSize, data storage.Data, deal DealInfo) (abi.SectorNumber, abi.PaddedPieceSize, error) { + log.Infof("Adding piece for deal %d (publish msg: %s)", deal.DealID, deal.PublishCid) + if (padreader.PaddedSize(uint64(size))) != size { + return 0, 0, xerrors.Errorf("cannot allocate unpadded piece") + } + + sp, err := m.currentSealProof(ctx) + if err != nil { + return 0, 0, xerrors.Errorf("getting current seal proof type: %w", err) + } + + ssize, err := sp.SectorSize() + if err != nil { + return 0, 0, err + } + + if size > abi.PaddedPieceSize(ssize).Unpadded() { + return 0, 0, xerrors.Errorf("piece cannot fit into a sector") + } + + if _, err := deal.DealProposal.Cid(); err != nil { + return 0, 0, xerrors.Errorf("getting proposal CID: %w", err) + } + + m.inputLk.Lock() + if _, exist := m.pendingPieces[proposalCID(deal)]; exist { + m.inputLk.Unlock() + return 0, 0, xerrors.Errorf("piece for deal %s already pending", proposalCID(deal)) + } + + resCh := make(chan struct { + sn abi.SectorNumber + offset abi.UnpaddedPieceSize + err error + }, 1) + + m.pendingPieces[proposalCID(deal)] = &pendingPiece{ + size: size, + deal: deal, + data: data, + assigned: false, + accepted: func(sn abi.SectorNumber, offset abi.UnpaddedPieceSize, err error) { + resCh <- struct { + sn abi.SectorNumber + offset abi.UnpaddedPieceSize + err error + }{sn: sn, offset: offset, err: err} + }, + } + + go func() { + defer m.inputLk.Unlock() + if err := m.updateInput(ctx, sp); err != nil { + log.Errorf("%+v", err) + } + }() + + res := <-resCh + + return res.sn, res.offset.Padded(), res.err +} + +// called with m.inputLk +func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) error { + ssize, err := sp.SectorSize() + if err != nil { + return err + } + + type match struct { + sector abi.SectorID + deal cid.Cid + + size abi.UnpaddedPieceSize + padding abi.UnpaddedPieceSize + } + + var matches []match + toAssign := map[cid.Cid]struct{}{} // used to maybe create new sectors + + // todo: this is distinctly O(n^2), may need to be optimized for tiny deals and large scale miners + // (unlikely to be a problem now) + for proposalCid, piece := range m.pendingPieces { + if piece.assigned { + continue // already assigned to a sector, skip + } + + toAssign[proposalCid] = struct{}{} + + for id, sector := range m.openSectors { + avail := abi.PaddedPieceSize(ssize).Unpadded() - sector.used + + if piece.size <= avail { // (note: if we have enough space for the piece, we also have enough space for inter-piece padding) + matches = append(matches, match{ + sector: id, + deal: proposalCid, + + size: piece.size, + padding: avail % piece.size, + }) + } + } + } + sort.Slice(matches, func(i, j int) bool { + if matches[i].padding != matches[j].padding { // less padding is better + return matches[i].padding < matches[j].padding + } + + if matches[i].size != matches[j].size { // larger pieces are better + return matches[i].size < matches[j].size + } + + return matches[i].sector.Number < matches[j].sector.Number // prefer older sectors + }) + + var assigned int + for _, mt := range matches { + if m.pendingPieces[mt.deal].assigned { + assigned++ + continue + } + + if _, found := m.openSectors[mt.sector]; !found { + continue + } + + err := m.openSectors[mt.sector].maybeAccept(mt.deal) + if err != nil { + m.pendingPieces[mt.deal].accepted(mt.sector.Number, 0, err) // non-error case in handleAddPiece + } + + m.pendingPieces[mt.deal].assigned = true + delete(toAssign, mt.deal) + + if err != nil { + log.Errorf("sector %d rejected deal %s: %+v", mt.sector, mt.deal, err) + continue + } + + delete(m.openSectors, mt.sector) + } + + if len(toAssign) > 0 { + if err := m.tryCreateDealSector(ctx, sp); err != nil { + log.Errorw("Failed to create a new sector for deals", "error", err) + } + } + + return nil +} + +func (m *Sealing) tryCreateDealSector(ctx context.Context, sp abi.RegisteredSealProof) error { + cfg, err := m.getConfig() + if err != nil { + return xerrors.Errorf("getting storage config: %w", err) + } + + if cfg.MaxSealingSectorsForDeals > 0 && m.stats.curSealing() >= cfg.MaxSealingSectorsForDeals { + return nil + } + + if cfg.MaxWaitDealsSectors > 0 && m.stats.curStaging() >= cfg.MaxWaitDealsSectors { + return nil + } + + // Now actually create a new sector + + sid, err := m.sc.Next() + if err != nil { + return xerrors.Errorf("getting sector number: %w", err) + } + + err = m.sealer.NewSector(ctx, m.minerSector(sp, sid)) + if err != nil { + return xerrors.Errorf("initializing sector: %w", err) + } + + log.Infow("Creating sector", "number", sid, "type", "deal", "proofType", sp) + return m.sectors.Send(uint64(sid), SectorStart{ + ID: sid, + SectorType: sp, + }) +} + +func (m *Sealing) StartPacking(sid abi.SectorNumber) error { + return m.sectors.Send(uint64(sid), SectorStartPacking{}) +} + +func proposalCID(deal DealInfo) cid.Cid { + pc, err := deal.DealProposal.Cid() + if err != nil { + log.Errorf("DealProposal.Cid error: %+v", err) + return cid.Undef + } + + return pc +} diff --git a/extern/storage-sealing/sealing.go b/extern/storage-sealing/sealing.go index a69f0466b1e..8feca3b7b11 100644 --- a/extern/storage-sealing/sealing.go +++ b/extern/storage-sealing/sealing.go @@ -3,8 +3,6 @@ package sealing import ( "context" "errors" - "io" - "math" "sync" "time" @@ -15,7 +13,6 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/go-address" - padreader "github.com/filecoin-project/go-padreader" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" @@ -89,9 +86,13 @@ type Sealing struct { sectors *statemachine.StateGroup sc SectorIDCounter verif ffiwrapper.Verifier + pcp PreCommitPolicy - pcp PreCommitPolicy - unsealedInfoMap UnsealedSectorMap + inputLk sync.Mutex + openSectors map[abi.SectorID]*openSector + sectorTimers map[abi.SectorID]*time.Timer + pendingPieces map[cid.Cid]*pendingPiece + assignedPieces map[abi.SectorID][]cid.Cid upgradeLk sync.Mutex toUpgrade map[abi.SectorNumber]struct{} @@ -113,17 +114,20 @@ type FeeConfig struct { MaxTerminateGasFee abi.TokenAmount } -type UnsealedSectorMap struct { - infos map[abi.SectorNumber]UnsealedSectorInfo - lk sync.Mutex +type openSector struct { + used abi.UnpaddedPieceSize // change to bitfield/rle when AddPiece gains offset support to better fill sectors + + maybeAccept func(cid.Cid) error // called with inputLk } -type UnsealedSectorInfo struct { - numDeals uint64 - // stored should always equal sum of pieceSizes.Padded() - stored abi.PaddedPieceSize - pieceSizes []abi.UnpaddedPieceSize - ssize abi.SectorSize +type pendingPiece struct { + size abi.UnpaddedPieceSize + deal DealInfo + + data storage.Data + + assigned bool // assigned to a sector? + accepted func(abi.SectorNumber, abi.UnpaddedPieceSize, error) } func New(api SealingAPI, fc FeeConfig, events Events, maddr address.Address, ds datastore.Batching, sealer sectorstorage.SectorManager, sc SectorIDCounter, verif ffiwrapper.Verifier, pcp PreCommitPolicy, gc GetSealingConfigFunc, notifee SectorStateNotifee, as AddrSel) *Sealing { @@ -137,12 +141,12 @@ func New(api SealingAPI, fc FeeConfig, events Events, maddr address.Address, ds sc: sc, verif: verif, pcp: pcp, - unsealedInfoMap: UnsealedSectorMap{ - infos: make(map[abi.SectorNumber]UnsealedSectorInfo), - lk: sync.Mutex{}, - }, - toUpgrade: map[abi.SectorNumber]struct{}{}, + openSectors: map[abi.SectorID]*openSector{}, + sectorTimers: map[abi.SectorID]*time.Timer{}, + pendingPieces: map[cid.Cid]*pendingPiece{}, + assignedPieces: map[abi.SectorID][]cid.Cid{}, + toUpgrade: map[abi.SectorNumber]struct{}{}, notifee: notifee, addrSel: as, @@ -159,8 +163,6 @@ func New(api SealingAPI, fc FeeConfig, events Events, maddr address.Address, ds s.sectors = statemachine.New(namespace.Wrap(ds, datastore.NewKey(SectorStorePrefix)), s, SectorInfo{}) - s.unsealedInfoMap.lk.Lock() // released after initialized in .Run() - return s } @@ -184,104 +186,6 @@ func (m *Sealing) Stop(ctx context.Context) error { return nil } -func (m *Sealing) AddPieceToAnySector(ctx context.Context, size abi.UnpaddedPieceSize, r io.Reader, d DealInfo) (abi.SectorNumber, abi.PaddedPieceSize, error) { - log.Infof("Adding piece for deal %d (publish msg: %s)", d.DealID, d.PublishCid) - if (padreader.PaddedSize(uint64(size))) != size { - return 0, 0, xerrors.Errorf("cannot allocate unpadded piece") - } - - sp, err := m.currentSealProof(ctx) - if err != nil { - return 0, 0, xerrors.Errorf("getting current seal proof type: %w", err) - } - - ssize, err := sp.SectorSize() - if err != nil { - return 0, 0, err - } - - if size > abi.PaddedPieceSize(ssize).Unpadded() { - return 0, 0, xerrors.Errorf("piece cannot fit into a sector") - } - - m.unsealedInfoMap.lk.Lock() - - sid, pads, err := m.getSectorAndPadding(ctx, size) - if err != nil { - m.unsealedInfoMap.lk.Unlock() - return 0, 0, xerrors.Errorf("getting available sector: %w", err) - } - - for _, p := range pads { - err = m.addPiece(ctx, sid, p.Unpadded(), NewNullReader(p.Unpadded()), nil) - if err != nil { - m.unsealedInfoMap.lk.Unlock() - return 0, 0, xerrors.Errorf("writing pads: %w", err) - } - } - - offset := m.unsealedInfoMap.infos[sid].stored - err = m.addPiece(ctx, sid, size, r, &d) - - if err != nil { - m.unsealedInfoMap.lk.Unlock() - return 0, 0, xerrors.Errorf("adding piece to sector: %w", err) - } - - startPacking := m.unsealedInfoMap.infos[sid].numDeals >= getDealPerSectorLimit(ssize) - - m.unsealedInfoMap.lk.Unlock() - - if startPacking { - if err := m.StartPacking(sid); err != nil { - return 0, 0, xerrors.Errorf("start packing: %w", err) - } - } - - return sid, offset, nil -} - -// Caller should hold m.unsealedInfoMap.lk -func (m *Sealing) addPiece(ctx context.Context, sectorID abi.SectorNumber, size abi.UnpaddedPieceSize, r io.Reader, di *DealInfo) error { - log.Infof("Adding piece to sector %d", sectorID) - sp, err := m.currentSealProof(ctx) - if err != nil { - return xerrors.Errorf("getting current seal proof type: %w", err) - } - ssize, err := sp.SectorSize() - if err != nil { - return err - } - - ppi, err := m.sealer.AddPiece(sectorstorage.WithPriority(ctx, DealSectorPriority), m.minerSector(sp, sectorID), m.unsealedInfoMap.infos[sectorID].pieceSizes, size, r) - if err != nil { - return xerrors.Errorf("writing piece: %w", err) - } - piece := Piece{ - Piece: ppi, - DealInfo: di, - } - - err = m.sectors.Send(uint64(sectorID), SectorAddPiece{NewPiece: piece}) - if err != nil { - return err - } - - ui := m.unsealedInfoMap.infos[sectorID] - num := m.unsealedInfoMap.infos[sectorID].numDeals - if di != nil { - num = num + 1 - } - m.unsealedInfoMap.infos[sectorID] = UnsealedSectorInfo{ - numDeals: num, - stored: ui.stored + piece.Piece.Size, - pieceSizes: append(ui.pieceSizes, piece.Piece.Size.Unpadded()), - ssize: ssize, - } - - return nil -} - func (m *Sealing) Remove(ctx context.Context, sid abi.SectorNumber) error { return m.sectors.Send(uint64(sid), SectorRemove{}) } @@ -298,183 +202,6 @@ func (m *Sealing) TerminatePending(ctx context.Context) ([]abi.SectorID, error) return m.terminator.Pending(ctx) } -// Caller should NOT hold m.unsealedInfoMap.lk -func (m *Sealing) StartPacking(sectorID abi.SectorNumber) error { - // locking here ensures that when the SectorStartPacking event is sent, the sector won't be picked up anywhere else - m.unsealedInfoMap.lk.Lock() - defer m.unsealedInfoMap.lk.Unlock() - - // cannot send SectorStartPacking to sectors that have already been packed, otherwise it will cause the state machine to exit - if _, ok := m.unsealedInfoMap.infos[sectorID]; !ok { - log.Warnf("call start packing, but sector %v not in unsealedInfoMap.infos, maybe have called", sectorID) - return nil - } - log.Infof("Starting packing sector %d", sectorID) - err := m.sectors.Send(uint64(sectorID), SectorStartPacking{}) - if err != nil { - return err - } - log.Infof("send Starting packing event success sector %d", sectorID) - - delete(m.unsealedInfoMap.infos, sectorID) - - return nil -} - -// Caller should hold m.unsealedInfoMap.lk -func (m *Sealing) getSectorAndPadding(ctx context.Context, size abi.UnpaddedPieceSize) (abi.SectorNumber, []abi.PaddedPieceSize, error) { - for tries := 0; tries < 100; tries++ { - for k, v := range m.unsealedInfoMap.infos { - pads, padLength := ffiwrapper.GetRequiredPadding(v.stored, size.Padded()) - - if v.stored+size.Padded()+padLength <= abi.PaddedPieceSize(v.ssize) { - return k, pads, nil - } - } - - if len(m.unsealedInfoMap.infos) > 0 { - log.Infow("tried to put a piece into an open sector, found none with enough space", "open", len(m.unsealedInfoMap.infos), "size", size, "tries", tries) - } - - ns, ssize, err := m.newDealSector(ctx) - switch err { - case nil: - m.unsealedInfoMap.infos[ns] = UnsealedSectorInfo{ - numDeals: 0, - stored: 0, - pieceSizes: nil, - ssize: ssize, - } - case errTooManySealing: - m.unsealedInfoMap.lk.Unlock() - - select { - case <-time.After(2 * time.Second): - case <-ctx.Done(): - m.unsealedInfoMap.lk.Lock() - return 0, nil, xerrors.Errorf("getting sector for piece: %w", ctx.Err()) - } - - m.unsealedInfoMap.lk.Lock() - continue - default: - return 0, nil, xerrors.Errorf("creating new sector: %w", err) - } - - return ns, nil, nil - } - - return 0, nil, xerrors.Errorf("failed to allocate piece to a sector") -} - -var errTooManySealing = errors.New("too many sectors sealing") - -// newDealSector creates a new sector for deal storage -func (m *Sealing) newDealSector(ctx context.Context) (abi.SectorNumber, abi.SectorSize, error) { - // First make sure we don't have too many 'open' sectors - - cfg, err := m.getConfig() - if err != nil { - return 0, 0, xerrors.Errorf("getting config: %w", err) - } - - if cfg.MaxSealingSectorsForDeals > 0 { - if m.stats.curSealing() > cfg.MaxSealingSectorsForDeals { - return 0, 0, ErrTooManySectorsSealing - } - } - - if cfg.MaxWaitDealsSectors > 0 && uint64(len(m.unsealedInfoMap.infos)) >= cfg.MaxWaitDealsSectors { - // Too many sectors are sealing in parallel. Start sealing one, and retry - // allocating the piece to a sector (we're dropping the lock here, so in - // case other goroutines are also trying to create a sector, we retry in - // getSectorAndPadding instead of here - otherwise if we have lots of - // parallel deals in progress, we can start creating a ton of sectors - // with just a single deal in them) - var mostStored abi.PaddedPieceSize = math.MaxUint64 - var best abi.SectorNumber = math.MaxUint64 - - for sn, info := range m.unsealedInfoMap.infos { - if info.stored+1 > mostStored+1 { // 18446744073709551615 + 1 = 0 - best = sn - } - } - - if best != math.MaxUint64 { - m.unsealedInfoMap.lk.Unlock() - err := m.StartPacking(best) - m.unsealedInfoMap.lk.Lock() - - if err != nil { - log.Errorf("newDealSector StartPacking error: %+v", err) - // let's pretend this is fine - } - } - - return 0, 0, errTooManySealing // will wait a bit and retry - } - - spt, err := m.currentSealProof(ctx) - if err != nil { - return 0, 0, xerrors.Errorf("getting current seal proof type: %w", err) - } - - // Now actually create a new sector - - sid, err := m.sc.Next() - if err != nil { - return 0, 0, xerrors.Errorf("getting sector number: %w", err) - } - - err = m.sealer.NewSector(context.TODO(), m.minerSector(spt, sid)) - if err != nil { - return 0, 0, xerrors.Errorf("initializing sector: %w", err) - } - - log.Infof("Creating sector %d", sid) - err = m.sectors.Send(uint64(sid), SectorStart{ - ID: sid, - SectorType: spt, - }) - - if err != nil { - return 0, 0, xerrors.Errorf("starting the sector fsm: %w", err) - } - - cf, err := m.getConfig() - if err != nil { - return 0, 0, xerrors.Errorf("getting the sealing delay: %w", err) - } - - if cf.WaitDealsDelay > 0 { - timer := time.NewTimer(cf.WaitDealsDelay) - go func() { - <-timer.C - if err := m.StartPacking(sid); err != nil { - log.Errorf("starting sector %d: %+v", sid, err) - } - }() - } - - ssize, err := spt.SectorSize() - return sid, ssize, err -} - -// newSectorCC accepts a slice of pieces with no deal (junk data) -func (m *Sealing) newSectorCC(ctx context.Context, sid abi.SectorNumber, pieces []Piece) error { - spt, err := m.currentSealProof(ctx) - if err != nil { - return xerrors.Errorf("getting current seal proof type: %w", err) - } - - log.Infof("Creating CC sector %d", sid) - return m.sectors.Send(uint64(sid), SectorStartCC{ - ID: sid, - Pieces: pieces, - SectorType: spt, - }) -} - func (m *Sealing) currentSealProof(ctx context.Context) (abi.RegisteredSealProof, error) { mi, err := m.api.StateMinerInfo(ctx, m.maddr, nil) if err != nil { @@ -512,9 +239,9 @@ func (m *Sealing) Address() address.Address { return m.maddr } -func getDealPerSectorLimit(size abi.SectorSize) uint64 { +func getDealPerSectorLimit(size abi.SectorSize) (int, error) { if size < 64<<30 { - return 256 + return 256, nil } - return 512 + return 512, nil } diff --git a/extern/storage-sealing/sector_state.go b/extern/storage-sealing/sector_state.go index 49a60795895..b636614d1e8 100644 --- a/extern/storage-sealing/sector_state.go +++ b/extern/storage-sealing/sector_state.go @@ -6,6 +6,8 @@ var ExistSectorStateList = map[SectorState]struct{}{ Empty: {}, WaitDeals: {}, Packing: {}, + AddPiece: {}, + AddPieceFailed: {}, GetTicket: {}, PreCommit1: {}, PreCommit2: {}, @@ -43,8 +45,9 @@ const ( UndefinedSectorState SectorState = "" // happy path - Empty SectorState = "Empty" + Empty SectorState = "Empty" // deprecated WaitDeals SectorState = "WaitDeals" // waiting for more pieces (deals) to be added to the sector + AddPiece SectorState = "AddPiece" // put deal data (and padding if required) into the sector Packing SectorState = "Packing" // sector not in sealStore, and not on chain GetTicket SectorState = "GetTicket" // generate ticket PreCommit1 SectorState = "PreCommit1" // do PreCommit1 @@ -59,6 +62,7 @@ const ( Proving SectorState = "Proving" // error modes FailedUnrecoverable SectorState = "FailedUnrecoverable" + AddPieceFailed SectorState = "AddPieceFailed" SealPreCommit1Failed SectorState = "SealPreCommit1Failed" SealPreCommit2Failed SectorState = "SealPreCommit2Failed" PreCommitFailed SectorState = "PreCommitFailed" @@ -85,7 +89,9 @@ const ( func toStatState(st SectorState) statSectorState { switch st { - case Empty, WaitDeals, Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, WaitSeed, Committing, SubmitCommit, CommitWait, FinalizeSector: + case UndefinedSectorState, Empty, WaitDeals, AddPiece: + return sstStaging + case Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, WaitSeed, Committing, SubmitCommit, CommitWait, FinalizeSector: return sstSealing case Proving, Removed, Removing, Terminating, TerminateWait, TerminateFinality, TerminateFailed: return sstProving diff --git a/extern/storage-sealing/states_failed.go b/extern/storage-sealing/states_failed.go index 24008a8039c..7bef19b92af 100644 --- a/extern/storage-sealing/states_failed.go +++ b/extern/storage-sealing/states_failed.go @@ -3,6 +3,7 @@ package sealing import ( "time" + "github.com/hashicorp/go-multierror" "golang.org/x/xerrors" "github.com/filecoin-project/lotus/chain/actors/builtin/market" @@ -352,6 +353,7 @@ func (m *Sealing) handleRecoverDealIDs(ctx statemachine.Context, sector SectorIn } var toFix []int + paddingPieces := 0 for i, p := range sector.Pieces { // if no deal is associated with the piece, ensure that we added it as @@ -361,6 +363,7 @@ func (m *Sealing) handleRecoverDealIDs(ctx statemachine.Context, sector SectorIn if !p.Piece.PieceCID.Equals(exp) { return xerrors.Errorf("sector %d piece %d had non-zero PieceCID %+v", sector.SectorNumber, i, p.Piece.PieceCID) } + paddingPieces++ continue } @@ -396,6 +399,7 @@ func (m *Sealing) handleRecoverDealIDs(ctx statemachine.Context, sector SectorIn } } + failed := map[int]error{} updates := map[int]abi.DealID{} for _, i := range toFix { p := sector.Pieces[i] @@ -414,12 +418,27 @@ func (m *Sealing) handleRecoverDealIDs(ctx statemachine.Context, sector SectorIn } res, err := m.dealInfo.GetCurrentDealInfo(ctx.Context(), tok, dp, *p.DealInfo.PublishCid) if err != nil { - return xerrors.Errorf("recovering deal ID for publish deal message %s (sector %d, piece %d): %w", *p.DealInfo.PublishCid, sector.SectorNumber, i, err) + failed[i] = xerrors.Errorf("getting current deal info for piece %d: %w", i, err) } updates[i] = res.DealID } + if len(failed) > 0 { + var merr error + for _, e := range failed { + merr = multierror.Append(merr, e) + } + + if len(failed)+paddingPieces == len(sector.Pieces) { + log.Errorf("removing sector %d: all deals expired or unrecoverable: %+v", sector.SectorNumber, merr) + return ctx.Send(SectorRemove{}) + } + + // todo: try to remove bad pieces (hard; see the todo above) + return xerrors.Errorf("failed to recover some deals: %w", merr) + } + // Not much to do here, we can't go back in time to commit this sector return ctx.Send(SectorUpdateDealIDs{Updates: updates}) } diff --git a/extern/storage-sealing/states_sealing.go b/extern/storage-sealing/states_sealing.go index 1aedcdfb469..f5bb4cae6e9 100644 --- a/extern/storage-sealing/states_sealing.go +++ b/extern/storage-sealing/states_sealing.go @@ -25,6 +25,24 @@ var DealSectorPriority = 1024 var MaxTicketAge = abi.ChainEpoch(builtin0.EpochsInDay * 2) func (m *Sealing) handlePacking(ctx statemachine.Context, sector SectorInfo) error { + m.inputLk.Lock() + // make sure we not accepting deals into this sector + for _, c := range m.assignedPieces[m.minerSectorID(sector.SectorNumber)] { + pp := m.pendingPieces[c] + delete(m.pendingPieces, c) + if pp == nil { + log.Errorf("nil assigned pending piece %s", c) + continue + } + + // todo: return to the sealing queue (this is extremely unlikely to happen) + pp.accepted(sector.SectorNumber, 0, xerrors.Errorf("sector entered packing state early")) + } + + delete(m.openSectors, m.minerSectorID(sector.SectorNumber)) + delete(m.assignedPieces, m.minerSectorID(sector.SectorNumber)) + m.inputLk.Unlock() + log.Infow("performing filling up rest of the sector...", "sector", sector.SectorNumber) var allocated abi.UnpaddedPieceSize @@ -52,7 +70,7 @@ func (m *Sealing) handlePacking(ctx statemachine.Context, sector SectorInfo) err log.Warnf("Creating %d filler pieces for sector %d", len(fillerSizes), sector.SectorNumber) } - fillerPieces, err := m.pledgeSector(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.existingPieceSizes(), fillerSizes...) + fillerPieces, err := m.padSector(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.existingPieceSizes(), fillerSizes...) if err != nil { return xerrors.Errorf("filling up the sector (%v): %w", fillerSizes, err) } @@ -60,6 +78,28 @@ func (m *Sealing) handlePacking(ctx statemachine.Context, sector SectorInfo) err return ctx.Send(SectorPacked{FillerPieces: fillerPieces}) } +func (m *Sealing) padSector(ctx context.Context, sectorID storage.SectorRef, existingPieceSizes []abi.UnpaddedPieceSize, sizes ...abi.UnpaddedPieceSize) ([]abi.PieceInfo, error) { + if len(sizes) == 0 { + return nil, nil + } + + log.Infof("Pledge %d, contains %+v", sectorID, existingPieceSizes) + + out := make([]abi.PieceInfo, len(sizes)) + for i, size := range sizes { + ppi, err := m.sealer.AddPiece(ctx, sectorID, existingPieceSizes, size, NewNullReader(size)) + if err != nil { + return nil, xerrors.Errorf("add piece: %w", err) + } + + existingPieceSizes = append(existingPieceSizes, size) + + out[i] = ppi + } + + return out, nil +} + func checkTicketExpired(sector SectorInfo, epoch abi.ChainEpoch) bool { return epoch-sector.TicketEpoch > MaxTicketAge // TODO: allow configuring expected seal durations } diff --git a/extern/storage-sealing/stats.go b/extern/storage-sealing/stats.go index 78630c216be..10852937572 100644 --- a/extern/storage-sealing/stats.go +++ b/extern/storage-sealing/stats.go @@ -9,7 +9,8 @@ import ( type statSectorState int const ( - sstSealing statSectorState = iota + sstStaging statSectorState = iota + sstSealing sstFailed sstProving nsst @@ -41,5 +42,13 @@ func (ss *SectorStats) curSealing() uint64 { ss.lk.Lock() defer ss.lk.Unlock() - return ss.totals[sstSealing] + ss.totals[sstFailed] + return ss.totals[sstStaging] + ss.totals[sstSealing] + ss.totals[sstFailed] +} + +// return the number of sectors waiting to enter the sealing pipeline +func (ss *SectorStats) curStaging() uint64 { + ss.lk.Lock() + defer ss.lk.Unlock() + + return ss.totals[sstStaging] } diff --git a/extern/storage-sealing/terminate_batch.go b/extern/storage-sealing/terminate_batch.go index 31ccef93c2c..0e96e838406 100644 --- a/extern/storage-sealing/terminate_batch.go +++ b/extern/storage-sealing/terminate_batch.go @@ -143,6 +143,18 @@ func (b *TerminateBatcher) processBatch(notif, after bool) (*cid.Cid, error) { continue } + ps, err := b.api.StateMinerPartitions(b.mctx, b.maddr, loc.Deadline, nil) + if err != nil { + log.Warnw("TerminateBatcher: getting miner partitions", "deadline", loc.Deadline, "partition", loc.Partition, "error", err) + continue + } + + toTerminate, err = bitfield.IntersectBitField(ps[loc.Partition].LiveSectors, toTerminate) + if err != nil { + log.Warnw("TerminateBatcher: intersecting liveSectors and toTerminate bitfields", "deadline", loc.Deadline, "partition", loc.Partition, "error", err) + continue + } + if total+n > uint64(miner.AddressedSectorsMax) { n = uint64(miner.AddressedSectorsMax) - total diff --git a/extern/storage-sealing/types.go b/extern/storage-sealing/types.go index fc9299499a8..58c35cf36ff 100644 --- a/extern/storage-sealing/types.go +++ b/extern/storage-sealing/types.go @@ -72,7 +72,8 @@ type SectorInfo struct { SectorType abi.RegisteredSealProof // Packing - Pieces []Piece + CreationTime int64 // unix seconds + Pieces []Piece // PreCommit1 TicketValue abi.SealRandomness diff --git a/gen/main.go b/gen/main.go index 9009172b96a..61f41beecec 100644 --- a/gen/main.go +++ b/gen/main.go @@ -4,15 +4,15 @@ import ( "fmt" "os" - "github.com/filecoin-project/lotus/chain/market" - gen "github.com/whyrusleeping/cbor-gen" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/exchange" + "github.com/filecoin-project/lotus/chain/market" "github.com/filecoin-project/lotus/chain/types" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + "github.com/filecoin-project/lotus/lib/backupds" "github.com/filecoin-project/lotus/node/hello" "github.com/filecoin-project/lotus/paychmgr" ) @@ -105,4 +105,12 @@ func main() { fmt.Println(err) os.Exit(1) } + + err = gen.WriteTupleEncodersToFile("./lib/backupds/cbor_gen.go", "backupds", + backupds.Entry{}, + ) + if err != nil { + fmt.Println(err) + os.Exit(1) + } } diff --git a/go.mod b/go.mod index 93af360f8ef..5b656d4c8b9 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,6 @@ require ( github.com/BurntSushi/toml v0.3.1 github.com/GeertJohan/go.rice v1.0.0 github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee - github.com/Jeffail/gabs v1.4.0 github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d github.com/buger/goterm v0.0.0-20200322175922-2f3e71b85129 @@ -50,6 +49,7 @@ require ( github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1 github.com/go-kit/kit v0.10.0 github.com/go-ole/go-ole v1.2.4 // indirect + github.com/golang/mock v1.4.4 github.com/google/uuid v1.1.2 github.com/gorilla/mux v1.7.4 github.com/gorilla/websocket v1.4.2 @@ -71,7 +71,7 @@ require ( github.com/ipfs/go-ds-pebble v0.0.2-0.20200921225637-ce220f8ac459 github.com/ipfs/go-filestore v1.0.0 github.com/ipfs/go-fs-lock v0.0.6 - github.com/ipfs/go-graphsync v0.5.2 + github.com/ipfs/go-graphsync v0.6.0 github.com/ipfs/go-ipfs-blockstore v1.0.3 github.com/ipfs/go-ipfs-chunker v0.0.5 github.com/ipfs/go-ipfs-ds-help v1.0.0 @@ -105,7 +105,7 @@ require ( github.com/libp2p/go-libp2p-mplex v0.3.0 github.com/libp2p/go-libp2p-noise v0.1.2 github.com/libp2p/go-libp2p-peerstore v0.2.6 - github.com/libp2p/go-libp2p-pubsub v0.4.1 + github.com/libp2p/go-libp2p-pubsub v0.4.2-0.20210212194758-6c1addf493eb github.com/libp2p/go-libp2p-quic-transport v0.9.0 github.com/libp2p/go-libp2p-record v0.1.3 github.com/libp2p/go-libp2p-routing-helpers v0.2.3 @@ -136,6 +136,7 @@ require ( github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 github.com/whyrusleeping/pubsub v0.0.0-20190708150250-92bcb0691325 github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542 + go.etcd.io/bbolt v1.3.4 go.opencensus.io v0.22.5 go.uber.org/dig v1.10.0 // indirect go.uber.org/fx v1.9.0 @@ -143,7 +144,7 @@ require ( go.uber.org/zap v1.16.0 golang.org/x/net v0.0.0-20201021035429-f5854403a974 golang.org/x/sync v0.0.0-20201207232520-09787c993a3a - golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f + golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 golang.org/x/time v0.0.0-20191024005414-555d28b269f0 golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect diff --git a/go.sum b/go.sum index 4d0ecd0e479..3c902f7980a 100644 --- a/go.sum +++ b/go.sum @@ -40,8 +40,6 @@ github.com/GeertJohan/go.rice v1.0.0 h1:KkI6O9uMaQU3VEKaj01ulavtF7o1fWT7+pk/4voi github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtixis4Tib9if0= github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee h1:8doiS7ib3zi6/K172oDhSKU0dJ/miJramo9NITOMyZQ= github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee/go.mod h1:W0GbEAA4uFNYOGG2cJpmFJ04E6SD1NLELPYZB57/7AY= -github.com/Jeffail/gabs v1.4.0 h1://5fYRRTq1edjfIrQGvdkcd22pkYUrHZ5YC/H2GJVAo= -github.com/Jeffail/gabs v1.4.0/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= @@ -241,6 +239,8 @@ github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349/ github.com/filecoin-project/go-amt-ipld/v3 v3.0.0 h1:Ou/q82QeHGOhpkedvaxxzpBYuqTxLCcj5OChkDNx4qc= github.com/filecoin-project/go-amt-ipld/v3 v3.0.0/go.mod h1:Qa95YNAbtoVCTSVtX38aAC1ptBnJfPma1R/zZsKmx4o= github.com/filecoin-project/go-bitfield v0.2.0/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= +github.com/filecoin-project/go-bitfield v0.2.3 h1:pedK/7maYF06Z+BYJf2OeFFqIDEh6SP6mIOlLFpYXGs= +github.com/filecoin-project/go-bitfield v0.2.3/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= github.com/filecoin-project/go-bitfield v0.2.3/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= github.com/filecoin-project/go-bitfield v0.2.4 h1:uZ7MeE+XfM5lqrHJZ93OnhQKc/rveW8p9au0C68JPgk= github.com/filecoin-project/go-bitfield v0.2.4/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= @@ -560,8 +560,9 @@ github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28 github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE= github.com/ipfs/go-graphsync v0.4.2/go.mod h1:/VmbZTUdUMTbNkgzAiCEucIIAU3BkLE2cZrDCVUhyi0= github.com/ipfs/go-graphsync v0.4.3/go.mod h1:mPOwDYv128gf8gxPFgXnz4fNrSYPsWyqisJ7ych+XDY= -github.com/ipfs/go-graphsync v0.5.2 h1:USD+daaSC+7pLHCxROThSaF6SF7WYXF03sjrta0rCfA= github.com/ipfs/go-graphsync v0.5.2/go.mod h1:e2ZxnClqBBYAtd901g9vXMJzS47labjAtOzsWtOzKNk= +github.com/ipfs/go-graphsync v0.6.0 h1:x6UvDUGA7wjaKNqx5Vbo7FGT8aJ5ryYA0dMQ5jN3dF0= +github.com/ipfs/go-graphsync v0.6.0/go.mod h1:e2ZxnClqBBYAtd901g9vXMJzS47labjAtOzsWtOzKNk= github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk= github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= @@ -923,8 +924,8 @@ github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1 github.com/libp2p/go-libp2p-protocol v0.1.0/go.mod h1:KQPHpAabB57XQxGrXCNvbL6UEXfQqUgC/1adR2Xtflk= github.com/libp2p/go-libp2p-pubsub v0.1.1/go.mod h1:ZwlKzRSe1eGvSIdU5bD7+8RZN/Uzw0t1Bp9R1znpR/Q= github.com/libp2p/go-libp2p-pubsub v0.3.2-0.20200527132641-c0712c6e92cf/go.mod h1:TxPOBuo1FPdsTjFnv+FGZbNbWYsp74Culx+4ViQpato= -github.com/libp2p/go-libp2p-pubsub v0.4.1 h1:j4umIg5nyus+sqNfU+FWvb9aeYFQH/A+nDFhWj+8yy8= -github.com/libp2p/go-libp2p-pubsub v0.4.1/go.mod h1:izkeMLvz6Ht8yAISXjx60XUQZMq9ZMe5h2ih4dLIBIQ= +github.com/libp2p/go-libp2p-pubsub v0.4.2-0.20210212194758-6c1addf493eb h1:HExLcdXn8fgtXPciUw97O5NNhBn31dt6d9fVUD4cngo= +github.com/libp2p/go-libp2p-pubsub v0.4.2-0.20210212194758-6c1addf493eb/go.mod h1:izkeMLvz6Ht8yAISXjx60XUQZMq9ZMe5h2ih4dLIBIQ= github.com/libp2p/go-libp2p-quic-transport v0.1.1/go.mod h1:wqG/jzhF3Pu2NrhJEvE+IE0NTHNXslOPn9JQzyCAxzU= github.com/libp2p/go-libp2p-quic-transport v0.5.0/go.mod h1:IEcuC5MLxvZ5KuHKjRu+dr3LjCT1Be3rcD/4d8JrX8M= github.com/libp2p/go-libp2p-quic-transport v0.9.0 h1:WPuq5nV/chmIZIzvrkC2ulSdAQ0P0BDvgvAhZFOZ59E= @@ -1754,8 +1755,9 @@ golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200926100807-9d91bd62050c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1933,6 +1935,8 @@ howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCU modernc.org/cc v1.0.0 h1:nPibNuDEx6tvYrUAtvDTTw98rx5juGsa5zuDnKwEEQQ= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/fileutil v1.0.0/go.mod h1:JHsWpkrk/CnVV1H/eGlFf85BEpfkrp56ro8nojIq9Q8= +modernc.org/golex v1.0.0 h1:wWpDlbK8ejRfSyi0frMyhilD3JBvtcx2AdGDnU+JtsE= +modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/golex v1.0.1 h1:EYKY1a3wStt0RzHaH8mdSRNg78Ub0OHxYfCRWw35YtM= modernc.org/golex v1.0.1/go.mod h1:QCA53QtsT1NdGkaZZkF5ezFwk4IXh4BGNafAARTC254= diff --git a/lib/backupds/cbor_gen.go b/lib/backupds/cbor_gen.go new file mode 100644 index 00000000000..d6cb6f4d3b0 --- /dev/null +++ b/lib/backupds/cbor_gen.go @@ -0,0 +1,157 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package backupds + +import ( + "fmt" + "io" + "sort" + + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = sort.Sort + +var lengthBufEntry = []byte{131} + +func (t *Entry) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufEntry); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Key ([]uint8) (slice) + if len(t.Key) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.Key was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Key))); err != nil { + return err + } + + if _, err := w.Write(t.Key[:]); err != nil { + return err + } + + // t.Value ([]uint8) (slice) + if len(t.Value) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.Value was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Value))); err != nil { + return err + } + + if _, err := w.Write(t.Value[:]); err != nil { + return err + } + + // t.Timestamp (int64) (int64) + if t.Timestamp >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Timestamp)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Timestamp-1)); err != nil { + return err + } + } + return nil +} + +func (t *Entry) UnmarshalCBOR(r io.Reader) error { + *t = Entry{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Key ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.Key: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.Key = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.Key[:]); err != nil { + return err + } + // t.Value ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.Value: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.Value = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.Value[:]); err != nil { + return err + } + // t.Timestamp (int64) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Timestamp = int64(extraI) + } + return nil +} diff --git a/lib/backupds/datastore.go b/lib/backupds/datastore.go index 1555577f346..350988a501f 100644 --- a/lib/backupds/datastore.go +++ b/lib/backupds/datastore.go @@ -4,27 +4,50 @@ import ( "crypto/sha256" "io" "sync" + "time" - logging "github.com/ipfs/go-log/v2" - cbg "github.com/whyrusleeping/cbor-gen" + "go.uber.org/multierr" "golang.org/x/xerrors" "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/query" + logging "github.com/ipfs/go-log/v2" + cbg "github.com/whyrusleeping/cbor-gen" ) var log = logging.Logger("backupds") +const NoLogdir = "" + type Datastore struct { child datastore.Batching backupLk sync.RWMutex + + log chan Entry + closing, closed chan struct{} } -func Wrap(child datastore.Batching) *Datastore { - return &Datastore{ +type Entry struct { + Key, Value []byte + Timestamp int64 +} + +func Wrap(child datastore.Batching, logdir string) (*Datastore, error) { + ds := &Datastore{ child: child, } + + if logdir != NoLogdir { + ds.closing, ds.closed = make(chan struct{}), make(chan struct{}) + ds.log = make(chan Entry) + + if err := ds.startLog(logdir); err != nil { + return nil, err + } + } + + return ds, nil } // Writes a datastore dump into the provided writer as @@ -129,6 +152,14 @@ func (d *Datastore) Put(key datastore.Key, value []byte) error { d.backupLk.RLock() defer d.backupLk.RUnlock() + if d.log != nil { + d.log <- Entry{ + Key: []byte(key.String()), + Value: value, + Timestamp: time.Now().Unix(), + } + } + return d.child.Put(key, value) } @@ -146,11 +177,23 @@ func (d *Datastore) Sync(prefix datastore.Key) error { return d.child.Sync(prefix) } -func (d *Datastore) Close() error { +func (d *Datastore) CloseLog() error { d.backupLk.RLock() defer d.backupLk.RUnlock() - return d.child.Close() + if d.closing != nil { + close(d.closing) + <-d.closed + } + + return nil +} + +func (d *Datastore) Close() error { + return multierr.Combine( + d.child.Close(), + d.CloseLog(), + ) } func (d *Datastore) Batch() (datastore.Batch, error) { @@ -160,17 +203,27 @@ func (d *Datastore) Batch() (datastore.Batch, error) { } return &bbatch{ + d: d, b: b, rlk: d.backupLk.RLocker(), }, nil } type bbatch struct { + d *Datastore b datastore.Batch rlk sync.Locker } func (b *bbatch) Put(key datastore.Key, value []byte) error { + if b.d.log != nil { + b.d.log <- Entry{ + Key: []byte(key.String()), + Value: value, + Timestamp: time.Now().Unix(), + } + } + return b.b.Put(key, value) } diff --git a/lib/backupds/log.go b/lib/backupds/log.go new file mode 100644 index 00000000000..85db600ef62 --- /dev/null +++ b/lib/backupds/log.go @@ -0,0 +1,223 @@ +package backupds + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/google/uuid" + "golang.org/x/xerrors" + + "github.com/ipfs/go-datastore" +) + +var loghead = datastore.NewKey("/backupds/log/head") // string([logfile base name];[uuid];[unix ts]) + +func (d *Datastore) startLog(logdir string) error { + if err := os.MkdirAll(logdir, 0755); err != nil && !os.IsExist(err) { + return xerrors.Errorf("mkdir logdir ('%s'): %w", logdir, err) + } + + files, err := ioutil.ReadDir(logdir) + if err != nil { + return xerrors.Errorf("read logdir ('%s'): %w", logdir, err) + } + + var latest string + var latestTs int64 + + for _, file := range files { + fn := file.Name() + if !strings.HasSuffix(fn, ".log.cbor") { + log.Warn("logfile with wrong file extension", fn) + continue + } + sec, err := strconv.ParseInt(fn[:len(".log.cbor")], 10, 64) + if err != nil { + return xerrors.Errorf("parsing logfile as a number: %w", err) + } + + if sec > latestTs { + latestTs = sec + latest = file.Name() + } + } + + var l *logfile + if latest == "" { + l, latest, err = d.createLog(logdir) + if err != nil { + return xerrors.Errorf("creating log: %w", err) + } + } else { + l, err = d.openLog(filepath.Join(logdir, latest)) + if err != nil { + return xerrors.Errorf("opening log: %w", err) + } + } + + if err := l.writeLogHead(latest, d.child); err != nil { + return xerrors.Errorf("writing new log head: %w", err) + } + + go d.runLog(l) + + return nil +} + +func (d *Datastore) runLog(l *logfile) { + defer close(d.closed) + for { + select { + case ent := <-d.log: + if err := l.writeEntry(&ent); err != nil { + log.Errorw("failed to write log entry", "error", err) + // todo try to do something, maybe start a new log file (but not when we're out of disk space) + } + + // todo: batch writes when multiple are pending; flush on a timer + if err := l.file.Sync(); err != nil { + log.Errorw("failed to sync log", "error", err) + } + case <-d.closing: + if err := l.Close(); err != nil { + log.Errorw("failed to close log", "error", err) + } + return + } + } +} + +type logfile struct { + file *os.File +} + +func (d *Datastore) createLog(logdir string) (*logfile, string, error) { + p := filepath.Join(logdir, strconv.FormatInt(time.Now().Unix(), 10)+".log.cbor") + log.Infow("creating log", "file", p) + + f, err := os.OpenFile(p, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0644) + if err != nil { + return nil, "", err + } + + if err := d.Backup(f); err != nil { + return nil, "", xerrors.Errorf("writing log base: %w", err) + } + if err := f.Sync(); err != nil { + return nil, "", xerrors.Errorf("sync log base: %w", err) + } + log.Infow("log opened", "file", p) + + return &logfile{ + file: f, + }, filepath.Base(p), nil +} + +func (d *Datastore) openLog(p string) (*logfile, error) { + log.Infow("opening log", "file", p) + lh, err := d.child.Get(loghead) + if err != nil { + return nil, xerrors.Errorf("checking log head (logfile '%s'): %w", p, err) + } + + lhp := strings.Split(string(lh), ";") + if len(lhp) != 3 { + return nil, xerrors.Errorf("expected loghead to have 3 parts") + } + + if lhp[0] != filepath.Base(p) { + return nil, xerrors.Errorf("loghead log file doesn't match, opening %s, expected %s", p, lhp[0]) + } + + f, err := os.OpenFile(p, os.O_RDWR, 0644) + if err != nil { + return nil, err + } + + var lastLogHead string + var openCount, logvals int64 + // check file integrity + err = ReadBackup(f, func(k datastore.Key, v []byte) error { + logvals++ + if k == loghead { + lastLogHead = string(v) + openCount++ + } + return nil + }) + if err != nil { + return nil, xerrors.Errorf("reading backup part of the logfile: %w", err) + } + if string(lh) != lastLogHead { + return nil, xerrors.Errorf("loghead didn't match, expected '%s', last in logfile '%s'", string(lh), lastLogHead) + } + + // make sure we're at the end of the file + at, err := f.Seek(0, io.SeekCurrent) + if err != nil { + return nil, xerrors.Errorf("get current logfile offset: %w", err) + } + end, err := f.Seek(0, io.SeekEnd) + if err != nil { + return nil, xerrors.Errorf("get current logfile offset: %w", err) + } + if at != end { + return nil, xerrors.Errorf("logfile %s validated %d bytes, but the file has %d bytes (%d more)", p, at, end, end-at) + } + + log.Infow("log opened", "file", p, "openCount", openCount, "logValues", logvals) + + // todo: maybe write a magic 'opened at' entry; pad the log to filesystem page to prevent more exotic types of corruption + + return &logfile{ + file: f, + }, nil +} + +func (l *logfile) writeLogHead(logname string, ds datastore.Batching) error { + lval := []byte(fmt.Sprintf("%s;%s;%d", logname, uuid.New(), time.Now().Unix())) + + err := l.writeEntry(&Entry{ + Key: loghead.Bytes(), + Value: lval, + Timestamp: time.Now().Unix(), + }) + if err != nil { + return xerrors.Errorf("writing loghead to the log: %w", err) + } + + if err := ds.Put(loghead, lval); err != nil { + return xerrors.Errorf("writing loghead to the datastore: %w", err) + } + + log.Infow("new log head", "loghead", string(lval)) + + return nil +} + +func (l *logfile) writeEntry(e *Entry) error { + // todo: maybe marshal to some temp buffer, then put into the file? + if err := e.MarshalCBOR(l.file); err != nil { + return xerrors.Errorf("writing log entry: %w", err) + } + + return nil +} + +func (l *logfile) Close() error { + // todo: maybe write a magic 'close at' entry; pad the log to filesystem page to prevent more exotic types of corruption + + if err := l.file.Close(); err != nil { + return err + } + + l.file = nil + + return nil +} diff --git a/lib/backupds/read.go b/lib/backupds/read.go index f9a4336374c..7c8e33e740f 100644 --- a/lib/backupds/read.go +++ b/lib/backupds/read.go @@ -4,6 +4,7 @@ import ( "bytes" "crypto/sha256" "io" + "os" "github.com/ipfs/go-datastore" cbg "github.com/whyrusleeping/cbor-gen" @@ -13,6 +14,7 @@ import ( func ReadBackup(r io.Reader, cb func(key datastore.Key, value []byte) error) error { scratch := make([]byte, 9) + // read array[2]( if _, err := r.Read(scratch[:1]); err != nil { return xerrors.Errorf("reading array header: %w", err) } @@ -24,6 +26,7 @@ func ReadBackup(r io.Reader, cb func(key datastore.Key, value []byte) error) err hasher := sha256.New() hr := io.TeeReader(r, hasher) + // read array[*]( if _, err := hr.Read(scratch[:1]); err != nil { return xerrors.Errorf("reading array header: %w", err) } @@ -37,10 +40,12 @@ func ReadBackup(r io.Reader, cb func(key datastore.Key, value []byte) error) err return xerrors.Errorf("reading tuple header: %w", err) } + // close array[*] if scratch[0] == 0xff { break } + // read array[2](key:[]byte, value:[]byte) if scratch[0] != 0x82 { return xerrors.Errorf("expected array(2) header 0x82, got %x", scratch[0]) } @@ -63,6 +68,7 @@ func ReadBackup(r io.Reader, cb func(key datastore.Key, value []byte) error) err sum := hasher.Sum(nil) + // read the [32]byte checksum expSum, err := cbg.ReadByteArray(r, 32) if err != nil { return xerrors.Errorf("reading expected checksum: %w", err) @@ -72,7 +78,42 @@ func ReadBackup(r io.Reader, cb func(key datastore.Key, value []byte) error) err return xerrors.Errorf("checksum didn't match; expected %x, got %x", expSum, sum) } - return nil + // read the log, set of Entry-ies + + var ent Entry + bp := cbg.GetPeeker(r) + for { + _, err := bp.ReadByte() + switch err { + case io.EOF, io.ErrUnexpectedEOF: + return nil + case nil: + default: + return xerrors.Errorf("peek log: %w", err) + } + if err := bp.UnreadByte(); err != nil { + return xerrors.Errorf("unread log byte: %w", err) + } + + if err := ent.UnmarshalCBOR(bp); err != nil { + switch err { + case io.EOF, io.ErrUnexpectedEOF: + if os.Getenv("LOTUS_ALLOW_TRUNCATED_LOG") == "1" { + panic("handleme; just ignore and tell the caller about the corrupted file") // todo + } else { + return xerrors.Errorf("log entry potentially truncated, set LOTUS_ALLOW_TRUNCATED_LOG=1 to proceed: %w", err) + } + default: + return xerrors.Errorf("unmarshaling log entry: %w", err) + } + } + + key := datastore.NewKey(string(ent.Key)) + + if err := cb(key, ent.Value); err != nil { + return err + } + } } func RestoreInto(r io.Reader, dest datastore.Batching) error { diff --git a/lib/blockstore/blockstore.go b/lib/blockstore/blockstore.go deleted file mode 100644 index eb28f1bf0c2..00000000000 --- a/lib/blockstore/blockstore.go +++ /dev/null @@ -1,66 +0,0 @@ -// blockstore contains all the basic blockstore constructors used by lotus. Any -// blockstores not ultimately constructed out of the building blocks in this -// package may not work properly. -// -// * This package correctly wraps blockstores with the IdBlockstore. This blockstore: -// * Filters out all puts for blocks with CIDs using the "identity" hash function. -// * Extracts inlined blocks from CIDs using the identity hash function and -// returns them on get/has, ignoring the contents of the blockstore. -// * In the future, this package may enforce additional restrictions on block -// sizes, CID validity, etc. -// -// To make auditing for misuse of blockstores tractable, this package re-exports -// parts of the go-ipfs-blockstore package such that no other package needs to -// import it directly. -package blockstore - -import ( - "context" - - ds "github.com/ipfs/go-datastore" - - blockstore "github.com/ipfs/go-ipfs-blockstore" -) - -// NewTemporary returns a temporary blockstore. -func NewTemporary() MemStore { - return make(MemStore) -} - -// NewTemporarySync returns a thread-safe temporary blockstore. -func NewTemporarySync() *SyncStore { - return &SyncStore{bs: make(MemStore)} -} - -// WrapIDStore wraps the underlying blockstore in an "identity" blockstore. -func WrapIDStore(bstore blockstore.Blockstore) blockstore.Blockstore { - return blockstore.NewIdStore(bstore) -} - -// NewBlockstore creates a new blockstore wrapped by the given datastore. -func NewBlockstore(dstore ds.Batching) blockstore.Blockstore { - return WrapIDStore(blockstore.NewBlockstore(dstore)) -} - -// Alias so other packages don't have to import go-ipfs-blockstore -type Blockstore = blockstore.Blockstore -type Viewer = blockstore.Viewer -type CacheOpts = blockstore.CacheOpts - -var ErrNotFound = blockstore.ErrNotFound - -func DefaultCacheOpts() CacheOpts { - return CacheOpts{ - HasBloomFilterSize: 0, - HasBloomFilterHashes: 0, - HasARCCacheSize: 512 << 10, - } -} - -func CachedBlockstore(ctx context.Context, bs Blockstore, opts CacheOpts) (Blockstore, error) { - bs, err := blockstore.CachedBlockstore(ctx, bs, opts) - if err != nil { - return nil, err - } - return WrapIDStore(bs), nil -} diff --git a/lib/blockstore/syncstore.go b/lib/blockstore/syncstore.go deleted file mode 100644 index 86786a0c472..00000000000 --- a/lib/blockstore/syncstore.go +++ /dev/null @@ -1,79 +0,0 @@ -package blockstore - -import ( - "context" - "sync" - - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" -) - -// SyncStore is a terminal blockstore that is a synchronized version -// of MemStore. -type SyncStore struct { - mu sync.RWMutex - bs MemStore // specifically use a memStore to save indirection overhead. -} - -func (m *SyncStore) DeleteBlock(k cid.Cid) error { - m.mu.Lock() - defer m.mu.Unlock() - return m.bs.DeleteBlock(k) -} - -func (m *SyncStore) Has(k cid.Cid) (bool, error) { - m.mu.RLock() - defer m.mu.RUnlock() - return m.bs.Has(k) -} - -func (m *SyncStore) View(k cid.Cid, callback func([]byte) error) error { - m.mu.RLock() - defer m.mu.RUnlock() - - return m.bs.View(k, callback) -} - -func (m *SyncStore) Get(k cid.Cid) (blocks.Block, error) { - m.mu.RLock() - defer m.mu.RUnlock() - return m.bs.Get(k) -} - -// GetSize returns the CIDs mapped BlockSize -func (m *SyncStore) GetSize(k cid.Cid) (int, error) { - m.mu.RLock() - defer m.mu.RUnlock() - return m.bs.GetSize(k) -} - -// Put puts a given block to the underlying datastore -func (m *SyncStore) Put(b blocks.Block) error { - m.mu.Lock() - defer m.mu.Unlock() - return m.bs.Put(b) -} - -// PutMany puts a slice of blocks at the same time using batching -// capabilities of the underlying datastore whenever possible. -func (m *SyncStore) PutMany(bs []blocks.Block) error { - m.mu.Lock() - defer m.mu.Unlock() - return m.bs.PutMany(bs) -} - -// AllKeysChan returns a channel from which -// the CIDs in the Blockstore can be read. It should respect -// the given context, closing the channel if it becomes Done. -func (m *SyncStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { - m.mu.RLock() - defer m.mu.RUnlock() - // this blockstore implementation doesn't do any async work. - return m.bs.AllKeysChan(ctx) -} - -// HashOnRead specifies if every read block should be -// rehashed to make sure it matches its CID. -func (m *SyncStore) HashOnRead(enabled bool) { - // noop -} diff --git a/lib/bufbstore/buf_bstore.go b/lib/bufbstore/buf_bstore.go deleted file mode 100644 index 5b21ace5ba9..00000000000 --- a/lib/bufbstore/buf_bstore.go +++ /dev/null @@ -1,186 +0,0 @@ -package bufbstore - -import ( - "context" - "os" - - block "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - - bstore "github.com/filecoin-project/lotus/lib/blockstore" -) - -var log = logging.Logger("bufbs") - -type BufferedBS struct { - read bstore.Blockstore - write bstore.Blockstore - - readviewer bstore.Viewer - writeviewer bstore.Viewer -} - -func NewBufferedBstore(base bstore.Blockstore) *BufferedBS { - var buf bstore.Blockstore - if os.Getenv("LOTUS_DISABLE_VM_BUF") == "iknowitsabadidea" { - log.Warn("VM BLOCKSTORE BUFFERING IS DISABLED") - buf = base - } else { - buf = bstore.NewTemporary() - } - - bs := &BufferedBS{ - read: base, - write: buf, - } - if v, ok := base.(bstore.Viewer); ok { - bs.readviewer = v - } - if v, ok := buf.(bstore.Viewer); ok { - bs.writeviewer = v - } - if (bs.writeviewer == nil) != (bs.readviewer == nil) { - log.Warnf("one of the stores is not viewable; running less efficiently") - } - return bs -} - -func NewTieredBstore(r bstore.Blockstore, w bstore.Blockstore) *BufferedBS { - return &BufferedBS{ - read: r, - write: w, - } -} - -var _ bstore.Blockstore = (*BufferedBS)(nil) -var _ bstore.Viewer = (*BufferedBS)(nil) - -func (bs *BufferedBS) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { - a, err := bs.read.AllKeysChan(ctx) - if err != nil { - return nil, err - } - - b, err := bs.write.AllKeysChan(ctx) - if err != nil { - return nil, err - } - - out := make(chan cid.Cid) - go func() { - defer close(out) - for a != nil || b != nil { - select { - case val, ok := <-a: - if !ok { - a = nil - } else { - select { - case out <- val: - case <-ctx.Done(): - return - } - } - case val, ok := <-b: - if !ok { - b = nil - } else { - select { - case out <- val: - case <-ctx.Done(): - return - } - } - } - } - }() - - return out, nil -} - -func (bs *BufferedBS) DeleteBlock(c cid.Cid) error { - if err := bs.read.DeleteBlock(c); err != nil { - return err - } - - return bs.write.DeleteBlock(c) -} - -func (bs *BufferedBS) View(c cid.Cid, callback func([]byte) error) error { - if bs.writeviewer == nil || bs.readviewer == nil { - // one of the stores isn't Viewer; fall back to pure Get behaviour. - blk, err := bs.Get(c) - if err != nil { - return err - } - return callback(blk.RawData()) - } - - // both stores are viewable. - if err := bs.writeviewer.View(c, callback); err == bstore.ErrNotFound { - // not found in write blockstore; fall through. - } else { - return err // propagate errors, or nil, i.e. found. - } - return bs.readviewer.View(c, callback) -} - -func (bs *BufferedBS) Get(c cid.Cid) (block.Block, error) { - if out, err := bs.write.Get(c); err != nil { - if err != bstore.ErrNotFound { - return nil, err - } - } else { - return out, nil - } - - return bs.read.Get(c) -} - -func (bs *BufferedBS) GetSize(c cid.Cid) (int, error) { - s, err := bs.read.GetSize(c) - if err == bstore.ErrNotFound || s == 0 { - return bs.write.GetSize(c) - } - - return s, err -} - -func (bs *BufferedBS) Put(blk block.Block) error { - has, err := bs.read.Has(blk.Cid()) // TODO: consider dropping this check - if err != nil { - return err - } - - if has { - return nil - } - - return bs.write.Put(blk) -} - -func (bs *BufferedBS) Has(c cid.Cid) (bool, error) { - has, err := bs.write.Has(c) - if err != nil { - return false, err - } - if has { - return true, nil - } - - return bs.read.Has(c) -} - -func (bs *BufferedBS) HashOnRead(hor bool) { - bs.read.HashOnRead(hor) - bs.write.HashOnRead(hor) -} - -func (bs *BufferedBS) PutMany(blks []block.Block) error { - return bs.write.PutMany(blks) -} - -func (bs *BufferedBS) Read() bstore.Blockstore { - return bs.read -} diff --git a/markets/storageadapter/api.go b/markets/storageadapter/api.go index 9d89c7aa402..c49a96f885b 100644 --- a/markets/storageadapter/api.go +++ b/markets/storageadapter/api.go @@ -10,7 +10,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/lotus/chain/actors/adt" - "github.com/filecoin-project/lotus/api/apibstore" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" ) @@ -24,7 +24,7 @@ type apiWrapper struct { } func (ca *apiWrapper) diffPreCommits(ctx context.Context, actor address.Address, pre, cur types.TipSetKey) (*miner.PreCommitChanges, error) { - store := adt.WrapStore(ctx, cbor.NewCborStore(apibstore.NewAPIBlockstore(ca.api))) + store := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(ca.api))) preAct, err := ca.api.StateGetActor(ctx, actor, pre) if err != nil { diff --git a/markets/storageadapter/dealstatematcher_test.go b/markets/storageadapter/dealstatematcher_test.go index d0c5277d5e3..cb036077842 100644 --- a/markets/storageadapter/dealstatematcher_test.go +++ b/markets/storageadapter/dealstatematcher_test.go @@ -14,8 +14,8 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + bstore "github.com/filecoin-project/lotus/blockstore" test "github.com/filecoin-project/lotus/chain/events/state/mock" - bstore "github.com/filecoin-project/lotus/lib/blockstore" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" @@ -28,7 +28,7 @@ import ( func TestDealStateMatcher(t *testing.T) { ctx := context.Background() - bs := bstore.NewTemporarySync() + bs := bstore.NewMemorySync() store := adt2.WrapStore(ctx, cbornode.NewCborStore(bs)) deal1 := &market2.DealState{ diff --git a/markets/storageadapter/provider.go b/markets/storageadapter/provider.go index 4e361cf1cf8..dcfcdcbcf07 100644 --- a/markets/storageadapter/provider.go +++ b/markets/storageadapter/provider.go @@ -38,6 +38,7 @@ import ( var addPieceRetryWait = 5 * time.Minute var addPieceRetryTimeout = 6 * time.Hour +var defaultMaxProviderCollateralMultiplier = uint64(2) var log = logging.Logger("storageadapter") type ProviderNodeAdapter struct { @@ -51,12 +52,13 @@ type ProviderNodeAdapter struct { dealPublisher *DealPublisher - addBalanceSpec *api.MessageSendSpec - dsMatcher *dealStateMatcher - scMgr *SectorCommittedManager + addBalanceSpec *api.MessageSendSpec + maxDealCollateralMultiplier uint64 + dsMatcher *dealStateMatcher + scMgr *SectorCommittedManager } -func NewProviderNodeAdapter(fc *config.MinerFeeConfig) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, dag dtypes.StagingDAG, secb *sectorblocks.SectorBlocks, full api.FullNode, dealPublisher *DealPublisher) storagemarket.StorageProviderNode { +func NewProviderNodeAdapter(fc *config.MinerFeeConfig, dc *config.DealmakingConfig) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, dag dtypes.StagingDAG, secb *sectorblocks.SectorBlocks, full api.FullNode, dealPublisher *DealPublisher) storagemarket.StorageProviderNode { return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, dag dtypes.StagingDAG, secb *sectorblocks.SectorBlocks, full api.FullNode, dealPublisher *DealPublisher) storagemarket.StorageProviderNode { ctx := helpers.LifecycleCtx(mctx, lc) @@ -73,6 +75,10 @@ func NewProviderNodeAdapter(fc *config.MinerFeeConfig) func(mctx helpers.Metrics if fc != nil { na.addBalanceSpec = &api.MessageSendSpec{MaxFee: abi.TokenAmount(fc.MaxMarketBalanceAddFee)} } + na.maxDealCollateralMultiplier = defaultMaxProviderCollateralMultiplier + if dc != nil { + na.maxDealCollateralMultiplier = dc.MaxProviderCollateralMultiplier + } na.scMgr = NewSectorCommittedManager(ev, na, &apiWrapper{api: full}) return na @@ -257,7 +263,11 @@ func (n *ProviderNodeAdapter) DealProviderCollateralBounds(ctx context.Context, return abi.TokenAmount{}, abi.TokenAmount{}, err } - return bounds.Min, bounds.Max, nil + // The maximum amount of collateral that the provider will put into escrow + // for a deal is calculated as a multiple of the minimum bounded amount + max := types.BigMul(bounds.Min, types.NewInt(n.maxDealCollateralMultiplier)) + + return bounds.Min, max, nil } // TODO: Remove dealID parameter, change publishCid to be cid.Cid (instead of pointer) diff --git a/metrics/exporter.go b/metrics/exporter.go new file mode 100644 index 00000000000..92786c26b56 --- /dev/null +++ b/metrics/exporter.go @@ -0,0 +1,32 @@ +package metrics + +import ( + "net/http" + _ "net/http/pprof" + + "contrib.go.opencensus.io/exporter/prometheus" + logging "github.com/ipfs/go-log/v2" + promclient "github.com/prometheus/client_golang/prometheus" +) + +var log = logging.Logger("metrics") + +func Exporter() http.Handler { + // Prometheus globals are exposed as interfaces, but the prometheus + // OpenCensus exporter expects a concrete *Registry. The concrete type of + // the globals are actually *Registry, so we downcast them, staying + // defensive in case things change under the hood. + registry, ok := promclient.DefaultRegisterer.(*promclient.Registry) + if !ok { + log.Warnf("failed to export default prometheus registry; some metrics will be unavailable; unexpected type: %T", promclient.DefaultRegisterer) + } + exporter, err := prometheus.NewExporter(prometheus.Options{ + Registry: registry, + Namespace: "lotus", + }) + if err != nil { + log.Errorf("could not create the prometheus stats exporter: %v", err) + } + + return exporter +} diff --git a/metrics/metrics.go b/metrics/metrics.go index 996fa95b90c..5428a81bcc7 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -9,18 +9,30 @@ import ( "go.opencensus.io/tag" rpcmetrics "github.com/filecoin-project/go-jsonrpc/metrics" + + "github.com/filecoin-project/lotus/blockstore" ) // Distribution -var defaultMillisecondsDistribution = view.Distribution(0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) +var defaultMillisecondsDistribution = view.Distribution(0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 3000, 4000, 5000, 7500, 10000, 20000, 50000, 100000) +var workMillisecondsDistribution = view.Distribution( + 250, 500, 1000, 2000, 5000, 10_000, 30_000, 60_000, 2*60_000, 5*60_000, 10*60_000, 15*60_000, 30*60_000, // short sealing tasks + 40*60_000, 45*60_000, 50*60_000, 55*60_000, 60*60_000, 65*60_000, 70*60_000, 75*60_000, 80*60_000, 85*60_000, 100*60_000, 120*60_000, // PC2 / C2 range + 130*60_000, 140*60_000, 150*60_000, 160*60_000, 180*60_000, 200*60_000, 220*60_000, 260*60_000, 300*60_000, // PC1 range + 350*60_000, 400*60_000, 600*60_000, 800*60_000, 1000*60_000, 1300*60_000, 1800*60_000, 4000*60_000, 10000*60_000, // intel PC1 range +) // Global Tags var ( - Version, _ = tag.NewKey("version") - Commit, _ = tag.NewKey("commit") - PeerID, _ = tag.NewKey("peer_id") - MinerID, _ = tag.NewKey("miner_id") - FailureType, _ = tag.NewKey("failure_type") + // common + Version, _ = tag.NewKey("version") + Commit, _ = tag.NewKey("commit") + NodeType, _ = tag.NewKey("node_type") + PeerID, _ = tag.NewKey("peer_id") + MinerID, _ = tag.NewKey("miner_id") + FailureType, _ = tag.NewKey("failure_type") + + // chain Local, _ = tag.NewKey("local") MessageFrom, _ = tag.NewKey("message_from") MessageTo, _ = tag.NewKey("message_to") @@ -28,11 +40,20 @@ var ( ReceivedFrom, _ = tag.NewKey("received_from") Endpoint, _ = tag.NewKey("endpoint") APIInterface, _ = tag.NewKey("api") // to distinguish between gateway api and full node api endpoint calls + + // miner + TaskType, _ = tag.NewKey("task_type") + WorkerHostname, _ = tag.NewKey("worker_hostname") ) // Measures var ( - LotusInfo = stats.Int64("info", "Arbitrary counter to tag lotus info to", stats.UnitDimensionless) + // common + LotusInfo = stats.Int64("info", "Arbitrary counter to tag lotus info to", stats.UnitDimensionless) + PeerCount = stats.Int64("peer/count", "Current number of FIL peers", stats.UnitDimensionless) + APIRequestDuration = stats.Float64("api/request_duration_ms", "Duration of API requests", stats.UnitMilliseconds) + + // chain ChainNodeHeight = stats.Int64("chain/node_height", "Current Height of the node", stats.UnitDimensionless) ChainNodeHeightExpected = stats.Int64("chain/node_height_expected", "Expected Height of the node", stats.UnitDimensionless) ChainNodeWorkerHeight = stats.Int64("chain/node_worker_height", "Current Height of workers on the node", stats.UnitDimensionless) @@ -46,7 +67,6 @@ var ( BlockValidationSuccess = stats.Int64("block/success", "Counter for block validation successes", stats.UnitDimensionless) BlockValidationDurationMilliseconds = stats.Float64("block/validation_ms", "Duration for Block Validation in ms", stats.UnitMilliseconds) BlockDelay = stats.Int64("block/delay", "Delay of accepted blocks, where delay is >5s", stats.UnitMilliseconds) - PeerCount = stats.Int64("peer/count", "Current number of FIL peers", stats.UnitDimensionless) PubsubPublishMessage = stats.Int64("pubsub/published", "Counter for total published messages", stats.UnitDimensionless) PubsubDeliverMessage = stats.Int64("pubsub/delivered", "Counter for total delivered messages", stats.UnitDimensionless) PubsubRejectMessage = stats.Int64("pubsub/rejected", "Counter for total rejected messages", stats.UnitDimensionless) @@ -54,9 +74,21 @@ var ( PubsubRecvRPC = stats.Int64("pubsub/recv_rpc", "Counter for total received RPCs", stats.UnitDimensionless) PubsubSendRPC = stats.Int64("pubsub/send_rpc", "Counter for total sent RPCs", stats.UnitDimensionless) PubsubDropRPC = stats.Int64("pubsub/drop_rpc", "Counter for total dropped RPCs", stats.UnitDimensionless) - APIRequestDuration = stats.Float64("api/request_duration_ms", "Duration of API requests", stats.UnitMilliseconds) VMFlushCopyDuration = stats.Float64("vm/flush_copy_ms", "Time spent in VM Flush Copy", stats.UnitMilliseconds) VMFlushCopyCount = stats.Int64("vm/flush_copy_count", "Number of copied objects", stats.UnitDimensionless) + + // miner + WorkerCallsStarted = stats.Int64("sealing/worker_calls_started", "Counter of started worker tasks", stats.UnitDimensionless) + WorkerCallsReturnedCount = stats.Int64("sealing/worker_calls_returned_count", "Counter of returned worker tasks", stats.UnitDimensionless) + WorkerCallsReturnedDuration = stats.Float64("sealing/worker_calls_returned_ms", "Counter of returned worker tasks", stats.UnitMilliseconds) + WorkerUntrackedCallsReturned = stats.Int64("sealing/worker_untracked_calls_returned", "Counter of returned untracked worker tasks", stats.UnitDimensionless) + + // splitstore + SplitstoreMiss = stats.Int64("splitstore/miss", "Number of misses in hotstre access", stats.UnitDimensionless) + SplitstoreCompactionTimeSeconds = stats.Float64("splitstore/compaction_time", "Compaction time in seconds", stats.UnitSeconds) + SplitstoreCompactionHot = stats.Int64("splitstore/hot", "Number of hot blocks in last compaction", stats.UnitDimensionless) + SplitstoreCompactionCold = stats.Int64("splitstore/cold", "Number of cold blocks in last compaction", stats.UnitDimensionless) + SplitstoreCompactionDead = stats.Int64("splitstore/dead", "Number of dead blocks in last compaction", stats.UnitDimensionless) ) var ( @@ -176,11 +208,64 @@ var ( Measure: VMFlushCopyCount, Aggregation: view.Sum(), } + + // miner + WorkerCallsStartedView = &view.View{ + Measure: WorkerCallsStarted, + Aggregation: view.Count(), + TagKeys: []tag.Key{TaskType, WorkerHostname}, + } + WorkerCallsReturnedCountView = &view.View{ + Measure: WorkerCallsReturnedCount, + Aggregation: view.Count(), + TagKeys: []tag.Key{TaskType, WorkerHostname}, + } + WorkerUntrackedCallsReturnedView = &view.View{ + Measure: WorkerUntrackedCallsReturned, + Aggregation: view.Count(), + } + WorkerCallsReturnedDurationView = &view.View{ + Measure: WorkerCallsReturnedDuration, + Aggregation: workMillisecondsDistribution, + TagKeys: []tag.Key{TaskType, WorkerHostname}, + } + + // splitstore + SplitstoreMissView = &view.View{ + Measure: SplitstoreMiss, + Aggregation: view.Count(), + } + SplitstoreCompactionTimeSecondsView = &view.View{ + Measure: SplitstoreCompactionTimeSeconds, + Aggregation: view.LastValue(), + } + SplitstoreCompactionHotView = &view.View{ + Measure: SplitstoreCompactionHot, + Aggregation: view.LastValue(), + } + SplitstoreCompactionColdView = &view.View{ + Measure: SplitstoreCompactionCold, + Aggregation: view.Sum(), + } + SplitstoreCompactionDeadView = &view.View{ + Measure: SplitstoreCompactionDead, + Aggregation: view.Sum(), + } ) // DefaultViews is an array of OpenCensus views for metric gathering purposes -var DefaultViews = append([]*view.View{ - InfoView, +var DefaultViews = func() []*view.View { + views := []*view.View{ + InfoView, + PeerCountView, + APIRequestDurationView, + } + views = append(views, blockstore.DefaultViews...) + views = append(views, rpcmetrics.DefaultViews...) + return views +}() + +var ChainNodeViews = append([]*view.View{ ChainNodeHeightView, ChainNodeHeightExpectedView, ChainNodeWorkerHeightView, @@ -193,7 +278,6 @@ var DefaultViews = append([]*view.View{ MessageReceivedView, MessageValidationFailureView, MessageValidationSuccessView, - PeerCountView, PubsubPublishMessageView, PubsubDeliverMessageView, PubsubRejectMessageView, @@ -201,11 +285,21 @@ var DefaultViews = append([]*view.View{ PubsubRecvRPCView, PubsubSendRPCView, PubsubDropRPCView, - APIRequestDurationView, VMFlushCopyCountView, VMFlushCopyDurationView, -}, - rpcmetrics.DefaultViews...) + SplitstoreMissView, + SplitstoreCompactionTimeSecondsView, + SplitstoreCompactionHotView, + SplitstoreCompactionColdView, + SplitstoreCompactionDeadView, +}, DefaultViews...) + +var MinerNodeViews = append([]*view.View{ + WorkerCallsStartedView, + WorkerCallsReturnedCountView, + WorkerUntrackedCallsReturnedView, + WorkerCallsReturnedDurationView, +}, DefaultViews...) // SinceInMilliseconds returns the duration of time since the provide time as a float64. func SinceInMilliseconds(startTime time.Time) float64 { diff --git a/node/builder.go b/node/builder.go index 0766d934afe..ee0a66e5322 100644 --- a/node/builder.go +++ b/node/builder.go @@ -145,7 +145,7 @@ const ( HeadMetricsKey SettlePaymentChannelsKey RunPeerTaggerKey - SetupFallbackBlockstoreKey + SetupFallbackBlockstoresKey SetApiEndpointKey @@ -408,7 +408,7 @@ var MinerNode = Options( Override(new(dtypes.StorageDealFilter), modules.BasicDealFilter(nil)), Override(new(storagemarket.StorageProvider), modules.StorageProvider), Override(new(*storageadapter.DealPublisher), storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{})), - Override(new(storagemarket.StorageProviderNode), storageadapter.NewProviderNodeAdapter(nil)), + Override(new(storagemarket.StorageProviderNode), storageadapter.NewProviderNodeAdapter(nil, nil)), Override(HandleMigrateProviderFundsKey, modules.HandleMigrateProviderFunds), Override(HandleDealsKey, modules.HandleDeals), @@ -508,6 +508,7 @@ func ConfigCommon(cfg *config.Common) Option { Override(AddrsFactoryKey, lp2p.AddrsFactory( cfg.Libp2p.AnnounceAddresses, cfg.Libp2p.NoAnnounceAddresses)), + Override(new(dtypes.MetadataDS), modules.Datastore(cfg.Backup.DisableMetadataLog)), ) } @@ -567,7 +568,7 @@ func ConfigStorageMiner(c interface{}) Option { Period: time.Duration(cfg.Dealmaking.PublishMsgPeriod), MaxDealsPerMsg: cfg.Dealmaking.MaxDealsPerPublishMsg, })), - Override(new(storagemarket.StorageProviderNode), storageadapter.NewProviderNodeAdapter(&cfg.Fees)), + Override(new(storagemarket.StorageProviderNode), storageadapter.NewProviderNodeAdapter(&cfg.Fees, &cfg.Dealmaking)), Override(new(sectorstorage.SealerConfig), cfg.Storage), Override(new(*storage.AddressSelector), modules.AddressSelector(&cfg.Addresses)), @@ -586,16 +587,43 @@ func Repo(r repo.Repo) Option { return err } + var cfg *config.Chainstore + switch settings.nodeType { + case repo.FullNode: + cfgp, ok := c.(*config.FullNode) + if !ok { + return xerrors.Errorf("invalid config from repo, got: %T", c) + } + cfg = &cfgp.Chainstore + default: + cfg = &config.Chainstore{} + } + return Options( Override(new(repo.LockedRepo), modules.LockedRepo(lr)), // module handles closing - Override(new(dtypes.MetadataDS), modules.Datastore), - Override(new(dtypes.ChainRawBlockstore), modules.ChainRawBlockstore), - Override(new(dtypes.ChainBlockstore), From(new(dtypes.ChainRawBlockstore))), + Override(new(dtypes.UniversalBlockstore), modules.UniversalBlockstore), + + If(cfg.EnableSplitstore, + If(cfg.Splitstore.HotStoreType == "badger", + Override(new(dtypes.HotBlockstore), modules.BadgerHotBlockstore)), + Override(new(dtypes.SplitBlockstore), modules.SplitBlockstore(cfg)), + Override(new(dtypes.ChainBlockstore), modules.ChainSplitBlockstore), + Override(new(dtypes.StateBlockstore), modules.StateSplitBlockstore), + Override(new(dtypes.BaseBlockstore), From(new(dtypes.SplitBlockstore))), + Override(new(dtypes.ExposedBlockstore), From(new(dtypes.SplitBlockstore))), + ), + If(!cfg.EnableSplitstore, + Override(new(dtypes.ChainBlockstore), modules.ChainFlatBlockstore), + Override(new(dtypes.StateBlockstore), modules.StateFlatBlockstore), + Override(new(dtypes.BaseBlockstore), From(new(dtypes.UniversalBlockstore))), + Override(new(dtypes.ExposedBlockstore), From(new(dtypes.UniversalBlockstore))), + ), If(os.Getenv("LOTUS_ENABLE_CHAINSTORE_FALLBACK") == "1", Override(new(dtypes.ChainBlockstore), modules.FallbackChainBlockstore), - Override(SetupFallbackBlockstoreKey, modules.SetupFallbackBlockstore), + Override(new(dtypes.StateBlockstore), modules.FallbackStateBlockstore), + Override(SetupFallbackBlockstoresKey, modules.InitFallbackBlockstores), ), Override(new(dtypes.ClientImportMgr), modules.ClientImportMgr), diff --git a/node/config/def.go b/node/config/def.go index 3be7733679b..ec7d0ad6057 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -13,6 +13,7 @@ import ( // Common is common config between full node and miner type Common struct { API API + Backup Backup Libp2p Libp2p Pubsub Pubsub } @@ -20,14 +21,19 @@ type Common struct { // FullNode is a full node config type FullNode struct { Common - Client Client - Metrics Metrics - Wallet Wallet - Fees FeeConfig + Client Client + Metrics Metrics + Wallet Wallet + Fees FeeConfig + Chainstore Chainstore } // // Common +type Backup struct { + DisableMetadataLog bool +} + // StorageMiner is a miner config type StorageMiner struct { Common @@ -54,6 +60,9 @@ type DealmakingConfig struct { // The maximum number of deals to include in a single PublishStorageDeals // message MaxDealsPerPublishMsg uint64 + // The maximum collateral that the provider will put up against a deal, + // as a multiplier of the minimum collateral bound + MaxProviderCollateralMultiplier uint64 Filter string RetrievalFilter string @@ -72,6 +81,11 @@ type SealingConfig struct { WaitDealsDelay Duration AlwaysKeepUnsealedCopy bool + + // Keep this many sectors in sealing pipeline, start CC if needed + // todo TargetSealingSectors uint64 + + // todo TargetSectors - stop auto-pleding new sectors after this many sectors are sealed, default CC upgrade for deals sectors if above } type MinerFeeConfig struct { @@ -86,6 +100,16 @@ type MinerFeeConfig struct { type MinerAddressConfig struct { PreCommitControl []string CommitControl []string + TerminateControl []string + + // DisableOwnerFallback disables usage of the owner address for messages + // sent automatically + DisableOwnerFallback bool + // DisableWorkerFallback disables usage of the worker address for messages + // sent automatically, if control addresses are configured. + // A control address that doesn't have enough funds will still be chosen + // over the worker address if this flag is set. + DisableWorkerFallback bool } // API contains configs for API endpoint @@ -109,9 +133,24 @@ type Libp2p struct { } type Pubsub struct { - Bootstrapper bool - DirectPeers []string - RemoteTracer string + Bootstrapper bool + DirectPeers []string + IPColocationWhitelist []string + RemoteTracer string +} + +type Chainstore struct { + EnableSplitstore bool + Splitstore Splitstore +} + +type Splitstore struct { + HotStoreType string + TrackingStoreType string + MarkSetType string + EnableFullCompaction bool + EnableGC bool // EXPERIMENTAL + Archival bool } // // Full Node @@ -179,6 +218,12 @@ func DefaultFullNode() *FullNode { Client: Client{ SimultaneousTransfers: DefaultSimultaneousTransfers, }, + Chainstore: Chainstore{ + EnableSplitstore: false, + Splitstore: Splitstore{ + HotStoreType: "badger", + }, + }, } } @@ -214,9 +259,10 @@ func DefaultStorageMiner() *StorageMiner { ConsiderUnverifiedStorageDeals: true, PieceCidBlocklist: []cid.Cid{}, // TODO: It'd be nice to set this based on sector size - ExpectedSealDuration: Duration(time.Hour * 24), - PublishMsgPeriod: Duration(time.Hour), - MaxDealsPerPublishMsg: 8, + ExpectedSealDuration: Duration(time.Hour * 24), + PublishMsgPeriod: Duration(time.Hour), + MaxDealsPerPublishMsg: 8, + MaxProviderCollateralMultiplier: 2, }, Fees: MinerFeeConfig{ diff --git a/node/impl/common/common.go b/node/impl/common/common.go index 94bcd5acef4..389e2fbc621 100644 --- a/node/impl/common/common.go +++ b/node/impl/common/common.go @@ -7,6 +7,9 @@ import ( "github.com/gbrlsnchs/jwt/v3" "github.com/google/uuid" + "go.uber.org/fx" + "golang.org/x/xerrors" + logging "github.com/ipfs/go-log/v2" "github.com/libp2p/go-libp2p-core/host" metrics "github.com/libp2p/go-libp2p-core/metrics" @@ -17,8 +20,6 @@ import ( basichost "github.com/libp2p/go-libp2p/p2p/host/basic" "github.com/libp2p/go-libp2p/p2p/net/conngater" ma "github.com/multiformats/go-multiaddr" - "go.uber.org/fx" - "golang.org/x/xerrors" "github.com/filecoin-project/go-jsonrpc/auth" @@ -99,6 +100,37 @@ func (a *CommonAPI) NetPeers(context.Context) ([]peer.AddrInfo, error) { return out, nil } +func (a *CommonAPI) NetPeerInfo(_ context.Context, p peer.ID) (*api.ExtendedPeerInfo, error) { + info := &api.ExtendedPeerInfo{ID: p} + + agent, err := a.Host.Peerstore().Get(p, "AgentVersion") + if err == nil { + info.Agent = agent.(string) + } + + for _, a := range a.Host.Peerstore().Addrs(p) { + info.Addrs = append(info.Addrs, a.String()) + } + sort.Strings(info.Addrs) + + protocols, err := a.Host.Peerstore().GetProtocols(p) + if err == nil { + sort.Strings(protocols) + info.Protocols = protocols + } + + if cm := a.Host.ConnManager().GetTagInfo(p); cm != nil { + info.ConnMgrMeta = &api.ConnMgrInfo{ + FirstSeen: cm.FirstSeen, + Value: cm.Value, + Tags: cm.Tags, + Conns: cm.Conns, + } + } + + return info, nil +} + func (a *CommonAPI) NetConnect(ctx context.Context, p peer.AddrInfo) error { if swrm, ok := a.Host.Network().(*swarm.Swarm); ok { swrm.Backoff().Clear(p.ID) @@ -179,13 +211,13 @@ func (a *CommonAPI) ID(context.Context) (peer.ID, error) { return a.Host.ID(), nil } -func (a *CommonAPI) Version(context.Context) (api.Version, error) { - v, err := build.VersionForType(build.RunningNodeType) +func (a *CommonAPI) Version(context.Context) (api.APIVersion, error) { + v, err := api.VersionForType(api.RunningNodeType) if err != nil { - return api.Version{}, err + return api.APIVersion{}, err } - return api.Version{ + return api.APIVersion{ Version: build.UserVersion(), APIVersion: v, diff --git a/node/impl/full/chain.go b/node/impl/full/chain.go index a3410b8db1d..25d366a87b0 100644 --- a/node/impl/full/chain.go +++ b/node/impl/full/chain.go @@ -31,10 +31,11 @@ import ( "github.com/filecoin-project/specs-actors/actors/util/adt" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/lib/blockstore" + "github.com/filecoin-project/lotus/node/modules/dtypes" ) var log = logging.Logger("fullnode") @@ -57,6 +58,11 @@ type ChainModule struct { fx.In Chain *store.ChainStore + + // ExposedBlockstore is the global monolith blockstore that is safe to + // expose externally. In the future, this will be segregated into two + // blockstores. + ExposedBlockstore dtypes.ExposedBlockstore } var _ ChainModuleAPI = (*ChainModule)(nil) @@ -68,6 +74,11 @@ type ChainAPI struct { ChainModuleAPI Chain *store.ChainStore + + // ExposedBlockstore is the global monolith blockstore that is safe to + // expose externally. In the future, this will be segregated into two + // blockstores. + ExposedBlockstore dtypes.ExposedBlockstore } func (m *ChainModule) ChainNotify(ctx context.Context) (<-chan []*api.HeadChange, error) { @@ -212,7 +223,7 @@ func (m *ChainModule) ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpo } func (m *ChainModule) ChainReadObj(ctx context.Context, obj cid.Cid) ([]byte, error) { - blk, err := m.Chain.Blockstore().Get(obj) + blk, err := m.ExposedBlockstore.Get(obj) if err != nil { return nil, xerrors.Errorf("blockstore get: %w", err) } @@ -221,15 +232,15 @@ func (m *ChainModule) ChainReadObj(ctx context.Context, obj cid.Cid) ([]byte, er } func (a *ChainAPI) ChainDeleteObj(ctx context.Context, obj cid.Cid) error { - return a.Chain.Blockstore().DeleteBlock(obj) + return a.ExposedBlockstore.DeleteBlock(obj) } func (m *ChainModule) ChainHasObj(ctx context.Context, obj cid.Cid) (bool, error) { - return m.Chain.Blockstore().Has(obj) + return m.ExposedBlockstore.Has(obj) } func (a *ChainAPI) ChainStatObj(ctx context.Context, obj cid.Cid, base cid.Cid) (api.ObjStat, error) { - bs := a.Chain.Blockstore() + bs := a.ExposedBlockstore bsvc := blockservice.New(bs, offline.Exchange(bs)) dag := merkledag.NewDAGService(bsvc) @@ -514,7 +525,7 @@ func (a *ChainAPI) ChainGetNode(ctx context.Context, p string) (*api.IpldObject, return nil, xerrors.Errorf("parsing path: %w", err) } - bs := a.Chain.Blockstore() + bs := a.ExposedBlockstore bsvc := blockservice.New(bs, offline.Exchange(bs)) dag := merkledag.NewDAGService(bsvc) diff --git a/node/impl/full/state.go b/node/impl/full/state.go index 0f5d16ab2a6..90e059f809a 100644 --- a/node/impl/full/state.go +++ b/node/impl/full/state.go @@ -97,7 +97,7 @@ func (a *StateAPI) StateMinerSectors(ctx context.Context, addr address.Address, return nil, xerrors.Errorf("failed to load miner actor: %w", err) } - mas, err := miner.Load(a.StateManager.ChainStore().Store(ctx), act) + mas, err := miner.Load(a.StateManager.ChainStore().ActorStore(ctx), act) if err != nil { return nil, xerrors.Errorf("failed to load miner actor state: %w", err) } @@ -111,7 +111,7 @@ func (a *StateAPI) StateMinerActiveSectors(ctx context.Context, maddr address.Ad return nil, xerrors.Errorf("failed to load miner actor: %w", err) } - mas, err := miner.Load(a.StateManager.ChainStore().Store(ctx), act) + mas, err := miner.Load(a.StateManager.ChainStore().ActorStore(ctx), act) if err != nil { return nil, xerrors.Errorf("failed to load miner actor state: %w", err) } @@ -135,7 +135,7 @@ func (m *StateModule) StateMinerInfo(ctx context.Context, actor address.Address, return miner.MinerInfo{}, xerrors.Errorf("failed to load miner actor: %w", err) } - mas, err := miner.Load(m.StateManager.ChainStore().Store(ctx), act) + mas, err := miner.Load(m.StateManager.ChainStore().ActorStore(ctx), act) if err != nil { return miner.MinerInfo{}, xerrors.Errorf("failed to load miner actor state: %w", err) } @@ -153,7 +153,7 @@ func (a *StateAPI) StateMinerDeadlines(ctx context.Context, m address.Address, t return nil, xerrors.Errorf("failed to load miner actor: %w", err) } - mas, err := miner.Load(a.StateManager.ChainStore().Store(ctx), act) + mas, err := miner.Load(a.StateManager.ChainStore().ActorStore(ctx), act) if err != nil { return nil, xerrors.Errorf("failed to load miner actor state: %w", err) } @@ -192,7 +192,7 @@ func (a *StateAPI) StateMinerPartitions(ctx context.Context, m address.Address, return nil, xerrors.Errorf("failed to load miner actor: %w", err) } - mas, err := miner.Load(a.StateManager.ChainStore().Store(ctx), act) + mas, err := miner.Load(a.StateManager.ChainStore().ActorStore(ctx), act) if err != nil { return nil, xerrors.Errorf("failed to load miner actor state: %w", err) } @@ -253,7 +253,7 @@ func (m *StateModule) StateMinerProvingDeadline(ctx context.Context, addr addres return nil, xerrors.Errorf("failed to load miner actor: %w", err) } - mas, err := miner.Load(m.StateManager.ChainStore().Store(ctx), act) + mas, err := miner.Load(m.StateManager.ChainStore().ActorStore(ctx), act) if err != nil { return nil, xerrors.Errorf("failed to load miner actor state: %w", err) } @@ -272,7 +272,7 @@ func (a *StateAPI) StateMinerFaults(ctx context.Context, addr address.Address, t return bitfield.BitField{}, xerrors.Errorf("failed to load miner actor: %w", err) } - mas, err := miner.Load(a.StateManager.ChainStore().Store(ctx), act) + mas, err := miner.Load(a.StateManager.ChainStore().ActorStore(ctx), act) if err != nil { return bitfield.BitField{}, xerrors.Errorf("failed to load miner actor state: %w", err) } @@ -329,7 +329,7 @@ func (a *StateAPI) StateMinerRecoveries(ctx context.Context, addr address.Addres return bitfield.BitField{}, xerrors.Errorf("failed to load miner actor: %w", err) } - mas, err := miner.Load(a.StateManager.ChainStore().Store(ctx), act) + mas, err := miner.Load(a.StateManager.ChainStore().ActorStore(ctx), act) if err != nil { return bitfield.BitField{}, xerrors.Errorf("failed to load miner actor state: %w", err) } @@ -461,7 +461,7 @@ func (a *StateAPI) StateReadState(ctx context.Context, actor address.Address, ts return nil, xerrors.Errorf("getting actor: %w", err) } - blk, err := a.Chain.Blockstore().Get(act.Head) + blk, err := a.Chain.StateBlockstore().Get(act.Head) if err != nil { return nil, xerrors.Errorf("getting actor head: %w", err) } @@ -707,7 +707,7 @@ func (m *StateModule) StateMarketStorageDeal(ctx context.Context, dealId abi.Dea } func (a *StateAPI) StateChangedActors(ctx context.Context, old cid.Cid, new cid.Cid) (map[string]types.Actor, error) { - store := a.Chain.Store(ctx) + store := a.Chain.ActorStore(ctx) oldTree, err := state.LoadStateTree(store, old) if err != nil { @@ -727,7 +727,7 @@ func (a *StateAPI) StateMinerSectorCount(ctx context.Context, addr address.Addre if err != nil { return api.MinerSectors{}, err } - mas, err := miner.Load(a.Chain.Store(ctx), act) + mas, err := miner.Load(a.Chain.ActorStore(ctx), act) if err != nil { return api.MinerSectors{}, err } @@ -792,7 +792,7 @@ func (a *StateAPI) StateSectorExpiration(ctx context.Context, maddr address.Addr if err != nil { return nil, err } - mas, err := miner.Load(a.StateManager.ChainStore().Store(ctx), act) + mas, err := miner.Load(a.StateManager.ChainStore().ActorStore(ctx), act) if err != nil { return nil, err } @@ -804,7 +804,7 @@ func (a *StateAPI) StateSectorPartition(ctx context.Context, maddr address.Addre if err != nil { return nil, err } - mas, err := miner.Load(a.StateManager.ChainStore().Store(ctx), act) + mas, err := miner.Load(a.StateManager.ChainStore().ActorStore(ctx), act) if err != nil { return nil, err } @@ -890,7 +890,7 @@ func (m *StateModule) MsigGetAvailableBalance(ctx context.Context, addr address. if err != nil { return types.EmptyInt, xerrors.Errorf("failed to load multisig actor: %w", err) } - msas, err := multisig.Load(m.Chain.Store(ctx), act) + msas, err := multisig.Load(m.Chain.ActorStore(ctx), act) if err != nil { return types.EmptyInt, xerrors.Errorf("failed to load multisig actor state: %w", err) } @@ -912,7 +912,7 @@ func (a *StateAPI) MsigGetVestingSchedule(ctx context.Context, addr address.Addr return api.EmptyVesting, xerrors.Errorf("failed to load multisig actor: %w", err) } - msas, err := multisig.Load(a.Chain.Store(ctx), act) + msas, err := multisig.Load(a.Chain.ActorStore(ctx), act) if err != nil { return api.EmptyVesting, xerrors.Errorf("failed to load multisig actor state: %w", err) } @@ -961,7 +961,7 @@ func (m *StateModule) MsigGetVested(ctx context.Context, addr address.Address, s return types.EmptyInt, xerrors.Errorf("failed to load multisig actor at end epoch: %w", err) } - msas, err := multisig.Load(m.Chain.Store(ctx), act) + msas, err := multisig.Load(m.Chain.ActorStore(ctx), act) if err != nil { return types.EmptyInt, xerrors.Errorf("failed to load multisig actor state: %w", err) } @@ -989,7 +989,7 @@ func (m *StateModule) MsigGetPending(ctx context.Context, addr address.Address, if err != nil { return nil, xerrors.Errorf("failed to load multisig actor: %w", err) } - msas, err := multisig.Load(m.Chain.Store(ctx), act) + msas, err := multisig.Load(m.Chain.ActorStore(ctx), act) if err != nil { return nil, xerrors.Errorf("failed to load multisig actor state: %w", err) } @@ -1032,7 +1032,7 @@ func (a *StateAPI) StateMinerPreCommitDepositForPower(ctx context.Context, maddr return types.EmptyInt, xerrors.Errorf("failed to get resolve size: %w", err) } - store := a.Chain.Store(ctx) + store := a.Chain.ActorStore(ctx) var sectorWeight abi.StoragePower if act, err := state.GetActor(market.Address); err != nil { @@ -1093,13 +1093,13 @@ func (a *StateAPI) StateMinerInitialPledgeCollateral(ctx context.Context, maddr return types.EmptyInt, xerrors.Errorf("failed to get resolve size: %w", err) } - store := a.Chain.Store(ctx) + store := a.Chain.ActorStore(ctx) var sectorWeight abi.StoragePower if act, err := state.GetActor(market.Address); err != nil { - return types.EmptyInt, xerrors.Errorf("loading miner actor %s: %w", maddr, err) + return types.EmptyInt, xerrors.Errorf("loading market actor: %w", err) } else if s, err := market.Load(store, act); err != nil { - return types.EmptyInt, xerrors.Errorf("loading market actor state %s: %w", maddr, err) + return types.EmptyInt, xerrors.Errorf("loading market actor state: %w", err) } else if w, vw, err := s.VerifyDealsForActivation(maddr, pci.DealIDs, ts.Height(), pci.Expiration); err != nil { return types.EmptyInt, xerrors.Errorf("verifying deals for activation: %w", err) } else { @@ -1113,7 +1113,7 @@ func (a *StateAPI) StateMinerInitialPledgeCollateral(ctx context.Context, maddr pledgeCollateral abi.TokenAmount ) if act, err := state.GetActor(power.Address); err != nil { - return types.EmptyInt, xerrors.Errorf("loading miner actor: %w", err) + return types.EmptyInt, xerrors.Errorf("loading power actor: %w", err) } else if s, err := power.Load(store, act); err != nil { return types.EmptyInt, xerrors.Errorf("loading power actor state: %w", err) } else if p, err := s.TotalPowerSmoothed(); err != nil { @@ -1127,7 +1127,7 @@ func (a *StateAPI) StateMinerInitialPledgeCollateral(ctx context.Context, maddr rewardActor, err := state.GetActor(reward.Address) if err != nil { - return types.EmptyInt, xerrors.Errorf("loading miner actor: %w", err) + return types.EmptyInt, xerrors.Errorf("loading reward actor: %w", err) } rewardState, err := reward.Load(store, rewardActor) @@ -1164,7 +1164,7 @@ func (a *StateAPI) StateMinerAvailableBalance(ctx context.Context, maddr address return types.EmptyInt, xerrors.Errorf("failed to load miner actor: %w", err) } - mas, err := miner.Load(a.StateManager.ChainStore().Store(ctx), act) + mas, err := miner.Load(a.StateManager.ChainStore().ActorStore(ctx), act) if err != nil { return types.EmptyInt, xerrors.Errorf("failed to load miner actor state: %w", err) } @@ -1193,7 +1193,7 @@ func (a *StateAPI) StateMinerSectorAllocated(ctx context.Context, maddr address. return false, xerrors.Errorf("failed to load miner actor: %w", err) } - mas, err := miner.Load(a.StateManager.ChainStore().Store(ctx), act) + mas, err := miner.Load(a.StateManager.ChainStore().ActorStore(ctx), act) if err != nil { return false, xerrors.Errorf("failed to load miner actor state: %w", err) } @@ -1216,7 +1216,7 @@ func (a *StateAPI) StateVerifierStatus(ctx context.Context, addr address.Address return nil, err } - vrs, err := verifreg.Load(a.StateManager.ChainStore().Store(ctx), act) + vrs, err := verifreg.Load(a.StateManager.ChainStore().ActorStore(ctx), act) if err != nil { return nil, xerrors.Errorf("failed to load verified registry state: %w", err) } @@ -1247,7 +1247,7 @@ func (m *StateModule) StateVerifiedClientStatus(ctx context.Context, addr addres return nil, err } - vrs, err := verifreg.Load(m.StateManager.ChainStore().Store(ctx), act) + vrs, err := verifreg.Load(m.StateManager.ChainStore().ActorStore(ctx), act) if err != nil { return nil, xerrors.Errorf("failed to load verified registry state: %w", err) } @@ -1269,7 +1269,7 @@ func (a *StateAPI) StateVerifiedRegistryRootKey(ctx context.Context, tsk types.T return address.Undef, err } - vst, err := verifreg.Load(a.StateManager.ChainStore().Store(ctx), vact) + vst, err := verifreg.Load(a.StateManager.ChainStore().ActorStore(ctx), vact) if err != nil { return address.Undef, err } @@ -1298,12 +1298,12 @@ func (m *StateModule) StateDealProviderCollateralBounds(ctx context.Context, siz return api.DealCollateralBounds{}, xerrors.Errorf("failed to load reward actor: %w", err) } - pst, err := power.Load(m.StateManager.ChainStore().Store(ctx), pact) + pst, err := power.Load(m.StateManager.ChainStore().ActorStore(ctx), pact) if err != nil { return api.DealCollateralBounds{}, xerrors.Errorf("failed to load power actor state: %w", err) } - rst, err := reward.Load(m.StateManager.ChainStore().Store(ctx), ract) + rst, err := reward.Load(m.StateManager.ChainStore().ActorStore(ctx), ract) if err != nil { return api.DealCollateralBounds{}, xerrors.Errorf("failed to load reward actor state: %w", err) } diff --git a/node/impl/storminer.go b/node/impl/storminer.go index aedf93530cb..cde168bea4f 100644 --- a/node/impl/storminer.go +++ b/node/impl/storminer.go @@ -121,8 +121,30 @@ func (sm *StorageMinerAPI) ActorSectorSize(ctx context.Context, addr address.Add return mi.SectorSize, nil } -func (sm *StorageMinerAPI) PledgeSector(ctx context.Context) error { - return sm.Miner.PledgeSector() +func (sm *StorageMinerAPI) PledgeSector(ctx context.Context) (abi.SectorID, error) { + sr, err := sm.Miner.PledgeSector(ctx) + if err != nil { + return abi.SectorID{}, err + } + + // wait for the sector to enter the Packing state + // TODO: instead of polling implement some pubsub-type thing in storagefsm + for { + info, err := sm.Miner.GetSectorInfo(sr.ID.Number) + if err != nil { + return abi.SectorID{}, xerrors.Errorf("getting pledged sector info: %w", err) + } + + if info.State != sealing.UndefinedSectorState { + return sr.ID, nil + } + + select { + case <-time.After(10 * time.Millisecond): + case <-ctx.Done(): + return abi.SectorID{}, ctx.Err() + } + } } func (sm *StorageMinerAPI) SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) { @@ -219,6 +241,10 @@ func (sm *StorageMinerAPI) SectorsList(context.Context) ([]abi.SectorNumber, err out := make([]abi.SectorNumber, len(sectors)) for i, sector := range sectors { + if sector.State == sealing.UndefinedSectorState { + continue // sector ID not set yet + } + out[i] = sector.SectorNumber } return out, nil diff --git a/node/modules/blockstore.go b/node/modules/blockstore.go new file mode 100644 index 00000000000..c1c52fafe75 --- /dev/null +++ b/node/modules/blockstore.go @@ -0,0 +1,130 @@ +package modules + +import ( + "context" + "io" + "os" + "path/filepath" + + bstore "github.com/ipfs/go-ipfs-blockstore" + "go.uber.org/fx" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/blockstore" + badgerbs "github.com/filecoin-project/lotus/blockstore/badger" + "github.com/filecoin-project/lotus/blockstore/splitstore" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/node/modules/helpers" + "github.com/filecoin-project/lotus/node/repo" +) + +// UniversalBlockstore returns a single universal blockstore that stores both +// chain data and state data. It can be backed by a blockstore directly +// (e.g. Badger), or by a Splitstore. +func UniversalBlockstore(lc fx.Lifecycle, mctx helpers.MetricsCtx, r repo.LockedRepo) (dtypes.UniversalBlockstore, error) { + bs, err := r.Blockstore(helpers.LifecycleCtx(mctx, lc), repo.UniversalBlockstore) + if err != nil { + return nil, err + } + if c, ok := bs.(io.Closer); ok { + lc.Append(fx.Hook{ + OnStop: func(_ context.Context) error { + return c.Close() + }, + }) + } + return bs, err +} + +func BadgerHotBlockstore(lc fx.Lifecycle, r repo.LockedRepo) (dtypes.HotBlockstore, error) { + path, err := r.SplitstorePath() + if err != nil { + return nil, err + } + + path = filepath.Join(path, "hot.badger") + if err := os.MkdirAll(path, 0755); err != nil { + return nil, err + } + + opts, err := repo.BadgerBlockstoreOptions(repo.HotBlockstore, path, r.Readonly()) + if err != nil { + return nil, err + } + + bs, err := badgerbs.Open(opts) + if err != nil { + return nil, err + } + + lc.Append(fx.Hook{ + OnStop: func(_ context.Context) error { + return bs.Close() + }}) + + return bs, nil +} + +func SplitBlockstore(cfg *config.Chainstore) func(lc fx.Lifecycle, r repo.LockedRepo, ds dtypes.MetadataDS, cold dtypes.UniversalBlockstore, hot dtypes.HotBlockstore) (dtypes.SplitBlockstore, error) { + return func(lc fx.Lifecycle, r repo.LockedRepo, ds dtypes.MetadataDS, cold dtypes.UniversalBlockstore, hot dtypes.HotBlockstore) (dtypes.SplitBlockstore, error) { + path, err := r.SplitstorePath() + if err != nil { + return nil, err + } + + cfg := &splitstore.Config{ + TrackingStoreType: cfg.Splitstore.TrackingStoreType, + MarkSetType: cfg.Splitstore.MarkSetType, + EnableFullCompaction: cfg.Splitstore.EnableFullCompaction, + EnableGC: cfg.Splitstore.EnableGC, + Archival: cfg.Splitstore.Archival, + } + ss, err := splitstore.Open(path, ds, hot, cold, cfg) + if err != nil { + return nil, err + } + lc.Append(fx.Hook{ + OnStop: func(context.Context) error { + return ss.Close() + }, + }) + + return ss, err + } +} + +func StateFlatBlockstore(_ fx.Lifecycle, _ helpers.MetricsCtx, bs dtypes.UniversalBlockstore) (dtypes.StateBlockstore, error) { + return bs, nil +} + +func StateSplitBlockstore(_ fx.Lifecycle, _ helpers.MetricsCtx, bs dtypes.SplitBlockstore) (dtypes.StateBlockstore, error) { + return bs, nil +} + +func ChainFlatBlockstore(_ fx.Lifecycle, _ helpers.MetricsCtx, bs dtypes.UniversalBlockstore) (dtypes.ChainBlockstore, error) { + return bs, nil +} + +func ChainSplitBlockstore(_ fx.Lifecycle, _ helpers.MetricsCtx, bs dtypes.SplitBlockstore) (dtypes.ChainBlockstore, error) { + return bs, nil +} + +func FallbackChainBlockstore(cbs dtypes.ChainBlockstore) dtypes.ChainBlockstore { + return &blockstore.FallbackStore{Blockstore: cbs} +} + +func FallbackStateBlockstore(sbs dtypes.StateBlockstore) dtypes.StateBlockstore { + return &blockstore.FallbackStore{Blockstore: sbs} +} + +func InitFallbackBlockstores(cbs dtypes.ChainBlockstore, sbs dtypes.StateBlockstore, rem dtypes.ChainBitswap) error { + for _, bs := range []bstore.Blockstore{cbs, sbs} { + if fbs, ok := bs.(*blockstore.FallbackStore); ok { + fbs.SetFallback(rem.GetBlock) + continue + } + return xerrors.Errorf("expected a FallbackStore") + } + return nil +} diff --git a/node/modules/chain.go b/node/modules/chain.go index 782e0b32f02..ffdf3aa3a2d 100644 --- a/node/modules/chain.go +++ b/node/modules/chain.go @@ -1,25 +1,20 @@ package modules import ( - "bytes" "context" - "os" "time" "github.com/ipfs/go-bitswap" "github.com/ipfs/go-bitswap/network" "github.com/ipfs/go-blockservice" - "github.com/ipfs/go-datastore" - "github.com/ipld/go-car" "github.com/libp2p/go-libp2p-core/host" "github.com/libp2p/go-libp2p-core/routing" pubsub "github.com/libp2p/go-libp2p-pubsub" "go.uber.org/fx" "golang.org/x/xerrors" - "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" - "github.com/filecoin-project/lotus/journal" - + "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/blockstore/splitstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain" "github.com/filecoin-project/lotus/chain/beacon" @@ -28,17 +23,15 @@ import ( "github.com/filecoin-project/lotus/chain/messagepool" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/lib/blockstore" - "github.com/filecoin-project/lotus/lib/bufbstore" - "github.com/filecoin-project/lotus/lib/timedbs" + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + "github.com/filecoin-project/lotus/journal" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/helpers" - "github.com/filecoin-project/lotus/node/repo" ) -func ChainBitswap(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host, rt routing.Routing, bs dtypes.ChainBlockstore) dtypes.ChainBitswap { +// ChainBitswap uses a blockstore that bypasses all caches. +func ChainBitswap(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host, rt routing.Routing, bs dtypes.ExposedBlockstore) dtypes.ChainBitswap { // prefix protocol for chain bitswap // (so bitswap uses /chain/ipfs/bitswap/1.0.0 internally for chain sync stuff) bitswapNetwork := network.NewFromIpfsHost(host, rt, network.Prefix("/chain")) @@ -46,10 +39,10 @@ func ChainBitswap(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host, rt r // Write all incoming bitswap blocks into a temporary blockstore for two // block times. If they validate, they'll be persisted later. - cache := timedbs.NewTimedCacheBS(2 * time.Duration(build.BlockDelaySecs) * time.Second) + cache := blockstore.NewTimedCacheBlockstore(2 * time.Duration(build.BlockDelaySecs) * time.Second) lc.Append(fx.Hook{OnStop: cache.Stop, OnStart: cache.Start}) - bitswapBs := bufbstore.NewTieredBstore(bs, cache) + bitswapBs := blockstore.NewTieredBstore(bs, cache) // Use just exch.Close(), closing the context is not needed exch := bitswap.New(mctx, bitswapNetwork, bitswapBs, bitswapOptions...) @@ -62,6 +55,10 @@ func ChainBitswap(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host, rt r return exch } +func ChainBlockService(bs dtypes.ExposedBlockstore, rem dtypes.ChainBitswap) dtypes.ChainBlockService { + return blockservice.New(bs, rem) +} + func MessagePool(lc fx.Lifecycle, sm *stmgr.StateManager, ps *pubsub.PubSub, ds dtypes.MetadataDS, nn dtypes.NetworkName, j journal.Journal) (*messagepool.MessagePool, error) { mpp := messagepool.NewProvider(sm, ps) mp, err := messagepool.New(mpp, ds, nn, j) @@ -76,49 +73,26 @@ func MessagePool(lc fx.Lifecycle, sm *stmgr.StateManager, ps *pubsub.PubSub, ds return mp, nil } -func ChainRawBlockstore(lc fx.Lifecycle, mctx helpers.MetricsCtx, r repo.LockedRepo) (dtypes.ChainRawBlockstore, error) { - bs, err := r.Blockstore(helpers.LifecycleCtx(mctx, lc), repo.BlockstoreChain) - if err != nil { - return nil, err - } - - // TODO potentially replace this cached blockstore by a CBOR cache. - cbs, err := blockstore.CachedBlockstore(helpers.LifecycleCtx(mctx, lc), bs, blockstore.DefaultCacheOpts()) - if err != nil { - return nil, err - } - - return cbs, nil -} - -func ChainBlockService(bs dtypes.ChainRawBlockstore, rem dtypes.ChainBitswap) dtypes.ChainBlockService { - return blockservice.New(bs, rem) -} - -func FallbackChainBlockstore(rbs dtypes.ChainRawBlockstore) dtypes.ChainBlockstore { - return &blockstore.FallbackStore{ - Blockstore: rbs, - } -} - -func SetupFallbackBlockstore(cbs dtypes.ChainBlockstore, rem dtypes.ChainBitswap) error { - fbs, ok := cbs.(*blockstore.FallbackStore) - if !ok { - return xerrors.Errorf("expected a FallbackStore") - } - - fbs.SetFallback(rem.GetBlock) - return nil -} - -func ChainStore(lc fx.Lifecycle, bs dtypes.ChainBlockstore, lbs dtypes.ChainRawBlockstore, ds dtypes.MetadataDS, syscalls vm.SyscallBuilder, j journal.Journal) *store.ChainStore { - chain := store.NewChainStore(bs, lbs, ds, syscalls, j) +func ChainStore(lc fx.Lifecycle, cbs dtypes.ChainBlockstore, sbs dtypes.StateBlockstore, ds dtypes.MetadataDS, basebs dtypes.BaseBlockstore, syscalls vm.SyscallBuilder, j journal.Journal) *store.ChainStore { + chain := store.NewChainStore(cbs, sbs, ds, syscalls, j) if err := chain.Load(); err != nil { log.Warnf("loading chain state from disk: %s", err) } + var startHook func(context.Context) error + if ss, ok := basebs.(*splitstore.SplitStore); ok { + startHook = func(_ context.Context) error { + err := ss.Start(chain) + if err != nil { + err = xerrors.Errorf("error starting splitstore: %w", err) + } + return err + } + } + lc.Append(fx.Hook{ + OnStart: startHook, OnStop: func(_ context.Context) error { return chain.Close() }, @@ -127,65 +101,6 @@ func ChainStore(lc fx.Lifecycle, bs dtypes.ChainBlockstore, lbs dtypes.ChainRawB return chain } -func ErrorGenesis() Genesis { - return func() (header *types.BlockHeader, e error) { - return nil, xerrors.New("No genesis block provided, provide the file with 'lotus daemon --genesis=[genesis file]'") - } -} - -func LoadGenesis(genBytes []byte) func(dtypes.ChainBlockstore) Genesis { - return func(bs dtypes.ChainBlockstore) Genesis { - return func() (header *types.BlockHeader, e error) { - c, err := car.LoadCar(bs, bytes.NewReader(genBytes)) - if err != nil { - return nil, xerrors.Errorf("loading genesis car file failed: %w", err) - } - if len(c.Roots) != 1 { - return nil, xerrors.New("expected genesis file to have one root") - } - root, err := bs.Get(c.Roots[0]) - if err != nil { - return nil, err - } - - h, err := types.DecodeBlock(root.RawData()) - if err != nil { - return nil, xerrors.Errorf("decoding block failed: %w", err) - } - return h, nil - } - } -} - -func DoSetGenesis(_ dtypes.AfterGenesisSet) {} - -func SetGenesis(cs *store.ChainStore, g Genesis) (dtypes.AfterGenesisSet, error) { - genFromRepo, err := cs.GetGenesis() - if err == nil { - if os.Getenv("LOTUS_SKIP_GENESIS_CHECK") != "_yes_" { - expectedGenesis, err := g() - if err != nil { - return dtypes.AfterGenesisSet{}, xerrors.Errorf("getting expected genesis failed: %w", err) - } - - if genFromRepo.Cid() != expectedGenesis.Cid() { - return dtypes.AfterGenesisSet{}, xerrors.Errorf("genesis in the repo is not the one expected by this version of Lotus!") - } - } - return dtypes.AfterGenesisSet{}, nil // already set, noop - } - if err != datastore.ErrNotFound { - return dtypes.AfterGenesisSet{}, xerrors.Errorf("getting genesis block failed: %w", err) - } - - genesis, err := g() - if err != nil { - return dtypes.AfterGenesisSet{}, xerrors.Errorf("genesis func failed: %w", err) - } - - return dtypes.AfterGenesisSet{}, cs.SetGenesis(genesis) -} - func NetworkName(mctx helpers.MetricsCtx, lc fx.Lifecycle, cs *store.ChainStore, us stmgr.UpgradeSchedule, _ dtypes.AfterGenesisSet) (dtypes.NetworkName, error) { if !build.Devnet { return "testnetnet", nil diff --git a/node/modules/client.go b/node/modules/client.go index 677fe690839..da6a4cd8315 100644 --- a/node/modules/client.go +++ b/node/modules/client.go @@ -30,9 +30,9 @@ import ( "github.com/ipfs/go-datastore/namespace" "github.com/libp2p/go-libp2p-core/host" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/market" "github.com/filecoin-project/lotus/journal" - "github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/markets" marketevents "github.com/filecoin-project/lotus/markets/loggers" "github.com/filecoin-project/lotus/markets/retrievaladapter" @@ -83,7 +83,7 @@ func ClientMultiDatastore(lc fx.Lifecycle, mctx helpers.MetricsCtx, r repo.Locke ctx := helpers.LifecycleCtx(mctx, lc) ds, err := r.Datastore(ctx, "/client") if err != nil { - return nil, xerrors.Errorf("getting datastore out of reop: %w", err) + return nil, xerrors.Errorf("getting datastore out of repo: %w", err) } mds, err := multistore.NewMultiDstore(ds) diff --git a/node/modules/dtypes/storage.go b/node/modules/dtypes/storage.go index 05b8309209e..216ccc1b186 100644 --- a/node/modules/dtypes/storage.go +++ b/node/modules/dtypes/storage.go @@ -14,24 +14,55 @@ import ( "github.com/filecoin-project/go-fil-markets/piecestore" "github.com/filecoin-project/go-statestore" - "github.com/filecoin-project/lotus/lib/blockstore" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/node/repo/importmgr" "github.com/filecoin-project/lotus/node/repo/retrievalstoremgr" ) -// MetadataDS stores metadata -// dy default it's namespaced under /metadata in main repo datastore +// MetadataDS stores metadata. By default it's namespaced under /metadata in +// main repo datastore. type MetadataDS datastore.Batching -type ChainRawBlockstore blockstore.Blockstore -type ChainBlockstore blockstore.Blockstore // optionally bitswap backed +type ( + // UniversalBlockstore is the cold blockstore. + UniversalBlockstore blockstore.Blockstore + + // HotBlockstore is the Hot blockstore abstraction for the splitstore + HotBlockstore blockstore.Blockstore + + // SplitBlockstore is the hot/cold blockstore that sits on top of the ColdBlockstore. + SplitBlockstore blockstore.Blockstore + + // BaseBlockstore is something, coz DI + BaseBlockstore blockstore.Blockstore + + // ChainBlockstore is a blockstore to store chain data (tipsets, blocks, + // messages). It is physically backed by the BareMonolithBlockstore, but it + // has a cache on top that is specially tuned for chain data access + // patterns. + ChainBlockstore blockstore.Blockstore + + // StateBlockstore is a blockstore to store state data (state tree). It is + // physically backed by the BareMonolithBlockstore, but it has a cache on + // top that is specially tuned for state data access patterns. + StateBlockstore blockstore.Blockstore + + // ExposedBlockstore is a blockstore that interfaces directly with the + // network or with users, from which queries are served, and where incoming + // data is deposited. For security reasons, this store is disconnected from + // any internal caches. If blocks are added to this store in a way that + // could render caches dirty (e.g. a block is added when an existence cache + // holds a 'false' for that block), the process should signal so by calling + // blockstore.AllCaches.Dirty(cid). + ExposedBlockstore blockstore.Blockstore +) type ChainBitswap exchange.Interface type ChainBlockService bserv.BlockService type ClientMultiDstore *multistore.MultiStore type ClientImportMgr *importmgr.Mgr -type ClientBlockstore blockstore.Blockstore +type ClientBlockstore blockstore.BasicBlockstore type ClientDealStore *statestore.StateStore type ClientRequestValidator *requestvalidation.UnifiedRequestValidator type ClientDatastore datastore.Batching @@ -50,6 +81,6 @@ type ProviderRequestValidator *requestvalidation.UnifiedRequestValidator type ProviderDataTransfer datatransfer.Manager type StagingDAG format.DAGService -type StagingBlockstore blockstore.Blockstore +type StagingBlockstore blockstore.BasicBlockstore type StagingGraphsync graphsync.GraphExchange type StagingMultiDstore *multistore.MultiStore diff --git a/node/modules/genesis.go b/node/modules/genesis.go new file mode 100644 index 00000000000..43443b125a8 --- /dev/null +++ b/node/modules/genesis.go @@ -0,0 +1,73 @@ +package modules + +import ( + "bytes" + "os" + + "github.com/ipfs/go-datastore" + "github.com/ipld/go-car" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/node/modules/dtypes" +) + +func ErrorGenesis() Genesis { + return func() (header *types.BlockHeader, e error) { + return nil, xerrors.New("No genesis block provided, provide the file with 'lotus daemon --genesis=[genesis file]'") + } +} + +func LoadGenesis(genBytes []byte) func(dtypes.ChainBlockstore) Genesis { + return func(bs dtypes.ChainBlockstore) Genesis { + return func() (header *types.BlockHeader, e error) { + c, err := car.LoadCar(bs, bytes.NewReader(genBytes)) + if err != nil { + return nil, xerrors.Errorf("loading genesis car file failed: %w", err) + } + if len(c.Roots) != 1 { + return nil, xerrors.New("expected genesis file to have one root") + } + root, err := bs.Get(c.Roots[0]) + if err != nil { + return nil, err + } + + h, err := types.DecodeBlock(root.RawData()) + if err != nil { + return nil, xerrors.Errorf("decoding block failed: %w", err) + } + return h, nil + } + } +} + +func DoSetGenesis(_ dtypes.AfterGenesisSet) {} + +func SetGenesis(cs *store.ChainStore, g Genesis) (dtypes.AfterGenesisSet, error) { + genFromRepo, err := cs.GetGenesis() + if err == nil { + if os.Getenv("LOTUS_SKIP_GENESIS_CHECK") != "_yes_" { + expectedGenesis, err := g() + if err != nil { + return dtypes.AfterGenesisSet{}, xerrors.Errorf("getting expected genesis failed: %w", err) + } + + if genFromRepo.Cid() != expectedGenesis.Cid() { + return dtypes.AfterGenesisSet{}, xerrors.Errorf("genesis in the repo is not the one expected by this version of Lotus!") + } + } + return dtypes.AfterGenesisSet{}, nil // already set, noop + } + if err != datastore.ErrNotFound { + return dtypes.AfterGenesisSet{}, xerrors.Errorf("getting genesis block failed: %w", err) + } + + genesis, err := g() + if err != nil { + return dtypes.AfterGenesisSet{}, xerrors.Errorf("genesis func failed: %w", err) + } + + return dtypes.AfterGenesisSet{}, cs.SetGenesis(genesis) +} diff --git a/node/modules/graphsync.go b/node/modules/graphsync.go index bbb039957e1..a7f62db76ce 100644 --- a/node/modules/graphsync.go +++ b/node/modules/graphsync.go @@ -14,8 +14,8 @@ import ( ) // Graphsync creates a graphsync instance from the given loader and storer -func Graphsync(parallelTransfers uint64) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, r repo.LockedRepo, clientBs dtypes.ClientBlockstore, chainBs dtypes.ChainBlockstore, h host.Host) (dtypes.Graphsync, error) { - return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, r repo.LockedRepo, clientBs dtypes.ClientBlockstore, chainBs dtypes.ChainBlockstore, h host.Host) (dtypes.Graphsync, error) { +func Graphsync(parallelTransfers uint64) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, r repo.LockedRepo, clientBs dtypes.ClientBlockstore, chainBs dtypes.ExposedBlockstore, h host.Host) (dtypes.Graphsync, error) { + return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, r repo.LockedRepo, clientBs dtypes.ClientBlockstore, chainBs dtypes.ExposedBlockstore, h host.Host) (dtypes.Graphsync, error) { graphsyncNetwork := gsnet.NewFromLibp2pHost(h) loader := storeutil.LoaderForBlockstore(clientBs) storer := storeutil.StorerForBlockstore(clientBs) diff --git a/node/modules/ipfsclient.go b/node/modules/ipfsclient.go index a2d5de88d84..24c5c96783e 100644 --- a/node/modules/ipfsclient.go +++ b/node/modules/ipfsclient.go @@ -6,8 +6,7 @@ import ( "github.com/multiformats/go-multiaddr" - "github.com/filecoin-project/lotus/lib/blockstore" - "github.com/filecoin-project/lotus/lib/ipfsbstore" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/helpers" ) @@ -19,16 +18,16 @@ import ( func IpfsClientBlockstore(ipfsMaddr string, onlineMode bool) func(helpers.MetricsCtx, fx.Lifecycle, dtypes.ClientImportMgr) (dtypes.ClientBlockstore, error) { return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, localStore dtypes.ClientImportMgr) (dtypes.ClientBlockstore, error) { var err error - var ipfsbs blockstore.Blockstore + var ipfsbs blockstore.BasicBlockstore if ipfsMaddr != "" { var ma multiaddr.Multiaddr ma, err = multiaddr.NewMultiaddr(ipfsMaddr) if err != nil { return nil, xerrors.Errorf("parsing ipfs multiaddr: %w", err) } - ipfsbs, err = ipfsbstore.NewRemoteIpfsBstore(helpers.LifecycleCtx(mctx, lc), ma, onlineMode) + ipfsbs, err = blockstore.NewRemoteIPFSBlockstore(helpers.LifecycleCtx(mctx, lc), ma, onlineMode) } else { - ipfsbs, err = ipfsbstore.NewIpfsBstore(helpers.LifecycleCtx(mctx, lc), onlineMode) + ipfsbs, err = blockstore.NewLocalIPFSBlockstore(helpers.LifecycleCtx(mctx, lc), onlineMode) } if err != nil { return nil, xerrors.Errorf("constructing ipfs blockstore: %w", err) diff --git a/node/modules/lp2p/pubsub.go b/node/modules/lp2p/pubsub.go index 9724eb3b4cf..748167d95f3 100644 --- a/node/modules/lp2p/pubsub.go +++ b/node/modules/lp2p/pubsub.go @@ -3,6 +3,7 @@ package lp2p import ( "context" "encoding/json" + "net" "time" host "github.com/libp2p/go-libp2p-core/host" @@ -198,6 +199,16 @@ func GossipSub(in GossipIn) (service *pubsub.PubSub, err error) { drandTopics = append(drandTopics, topic) } + // IP colocation whitelist + var ipcoloWhitelist []*net.IPNet + for _, cidr := range in.Cfg.IPColocationWhitelist { + _, ipnet, err := net.ParseCIDR(cidr) + if err != nil { + return nil, xerrors.Errorf("error parsing IPColocation subnet %s: %w", cidr, err) + } + ipcoloWhitelist = append(ipcoloWhitelist, ipnet) + } + options := []pubsub.Option{ // Gossipsubv1.1 configuration pubsub.WithFloodPublish(true), @@ -228,8 +239,7 @@ func GossipSub(in GossipIn) (service *pubsub.PubSub, err error) { // This sets the IP colocation threshold to 5 peers before we apply penalties IPColocationFactorThreshold: 5, IPColocationFactorWeight: -100, - // TODO we want to whitelist IPv6 /64s that belong to datacenters etc - // IPColocationFactorWhitelist: map[string]struct{}{}, + IPColocationFactorWhitelist: ipcoloWhitelist, // P7: behavioural penalties, decay after 1hr BehaviourPenaltyThreshold: 6, diff --git a/node/modules/rpcstatemanager.go b/node/modules/rpcstatemanager.go index 7d7b9243798..b14e1dc8008 100644 --- a/node/modules/rpcstatemanager.go +++ b/node/modules/rpcstatemanager.go @@ -7,7 +7,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/apibstore" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin/paych" "github.com/filecoin-project/lotus/chain/stmgr" @@ -21,7 +21,7 @@ type RPCStateManager struct { } func NewRPCStateManager(api api.GatewayAPI) *RPCStateManager { - cstore := cbor.NewCborStore(apibstore.NewAPIBlockstore(api)) + cstore := cbor.NewCborStore(blockstore.NewAPIBlockstore(api)) return &RPCStateManager{gapi: api, cstore: cstore} } diff --git a/node/modules/storage.go b/node/modules/storage.go index c0e9192f57e..cb30eb8c29d 100644 --- a/node/modules/storage.go +++ b/node/modules/storage.go @@ -2,8 +2,10 @@ package modules import ( "context" + "path/filepath" "go.uber.org/fx" + "golang.org/x/xerrors" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/lib/backupds" @@ -28,12 +30,30 @@ func KeyStore(lr repo.LockedRepo) (types.KeyStore, error) { return lr.KeyStore() } -func Datastore(lc fx.Lifecycle, mctx helpers.MetricsCtx, r repo.LockedRepo) (dtypes.MetadataDS, error) { - ctx := helpers.LifecycleCtx(mctx, lc) - mds, err := r.Datastore(ctx, "/metadata") - if err != nil { - return nil, err - } +func Datastore(disableLog bool) func(lc fx.Lifecycle, mctx helpers.MetricsCtx, r repo.LockedRepo) (dtypes.MetadataDS, error) { + return func(lc fx.Lifecycle, mctx helpers.MetricsCtx, r repo.LockedRepo) (dtypes.MetadataDS, error) { + ctx := helpers.LifecycleCtx(mctx, lc) + mds, err := r.Datastore(ctx, "/metadata") + if err != nil { + return nil, err + } + + var logdir string + if !disableLog { + logdir = filepath.Join(r.Path(), "kvlog/metadata") + } + + bds, err := backupds.Wrap(mds, logdir) + if err != nil { + return nil, xerrors.Errorf("opening backupds: %w", err) + } - return backupds.Wrap(mds), nil + lc.Append(fx.Hook{ + OnStop: func(_ context.Context) error { + return bds.CloseLog() + }, + }) + + return bds, nil + } } diff --git a/node/modules/storageminer.go b/node/modules/storageminer.go index ba38d501bdc..d89474eeeba 100644 --- a/node/modules/storageminer.go +++ b/node/modules/storageminer.go @@ -56,6 +56,7 @@ import ( "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" lapi "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" @@ -63,7 +64,6 @@ import ( "github.com/filecoin-project/lotus/chain/gen/slashfilter" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/journal" - "github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/markets" marketevents "github.com/filecoin-project/lotus/markets/loggers" "github.com/filecoin-project/lotus/markets/retrievaladapter" @@ -157,6 +157,9 @@ func AddressSelector(addrConf *config.MinerAddressConfig) func() (*storage.Addre return as, nil } + as.DisableOwnerFallback = addrConf.DisableOwnerFallback + as.DisableWorkerFallback = addrConf.DisableWorkerFallback + for _, s := range addrConf.PreCommitControl { addr, err := address.NewFromString(s) if err != nil { @@ -175,6 +178,15 @@ func AddressSelector(addrConf *config.MinerAddressConfig) func() (*storage.Addre as.CommitControl = append(as.CommitControl, addr) } + for _, s := range addrConf.TerminateControl { + addr, err := address.NewFromString(s) + if err != nil { + return nil, xerrors.Errorf("parsing terminate control address: %w", err) + } + + as.TerminateControl = append(as.TerminateControl, addr) + } + return as, nil } } @@ -392,7 +404,7 @@ func StagingBlockstore(lc fx.Lifecycle, mctx helpers.MetricsCtx, r repo.LockedRe return nil, err } - return blockstore.NewBlockstore(stagingds), nil + return blockstore.FromDatastore(stagingds), nil } // StagingDAG is a DAGService for the StagingBlockstore diff --git a/node/repo/blockstore_opts.go b/node/repo/blockstore_opts.go index 775b41266ba..1705217d304 100644 --- a/node/repo/blockstore_opts.go +++ b/node/repo/blockstore_opts.go @@ -1,14 +1,10 @@ package repo -import badgerbs "github.com/filecoin-project/lotus/lib/blockstore/badger" +import badgerbs "github.com/filecoin-project/lotus/blockstore/badger" // BadgerBlockstoreOptions returns the badger options to apply for the provided // domain. func BadgerBlockstoreOptions(domain BlockstoreDomain, path string, readonly bool) (badgerbs.Options, error) { - if domain != BlockstoreChain { - return badgerbs.Options{}, ErrInvalidBlockstoreDomain - } - opts := badgerbs.DefaultOptions(path) // Due to legacy usage of blockstore.Blockstore, over a datastore, all diff --git a/node/repo/fsrepo.go b/node/repo/fsrepo.go index b57fb64afcb..d96a5e64513 100644 --- a/node/repo/fsrepo.go +++ b/node/repo/fsrepo.go @@ -13,7 +13,7 @@ import ( "sync" "github.com/BurntSushi/toml" - "github.com/filecoin-project/lotus/lib/blockstore" + "github.com/ipfs/go-datastore" fslock "github.com/ipfs/go-fs-lock" logging "github.com/ipfs/go-log/v2" @@ -22,10 +22,10 @@ import ( "github.com/multiformats/go-multiaddr" "golang.org/x/xerrors" + "github.com/filecoin-project/lotus/blockstore" + badgerbs "github.com/filecoin-project/lotus/blockstore/badger" "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" "github.com/filecoin-project/lotus/extern/sector-storage/stores" - lblockstore "github.com/filecoin-project/lotus/lib/blockstore" - badgerbs "github.com/filecoin-project/lotus/lib/blockstore/badger" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/node/config" @@ -264,11 +264,18 @@ type fsLockedRepo struct { bs blockstore.Blockstore bsErr error bsOnce sync.Once + ssPath string + ssErr error + ssOnce sync.Once storageLk sync.Mutex configLk sync.Mutex } +func (fsr *fsLockedRepo) Readonly() bool { + return fsr.readonly +} + func (fsr *fsLockedRepo) Path() string { return fsr.path } @@ -301,7 +308,7 @@ func (fsr *fsLockedRepo) Close() error { // Blockstore returns a blockstore for the provided data domain. func (fsr *fsLockedRepo) Blockstore(ctx context.Context, domain BlockstoreDomain) (blockstore.Blockstore, error) { - if domain != BlockstoreChain { + if domain != UniversalBlockstore { return nil, ErrInvalidBlockstoreDomain } @@ -325,12 +332,27 @@ func (fsr *fsLockedRepo) Blockstore(ctx context.Context, domain BlockstoreDomain fsr.bsErr = err return } - fsr.bs = lblockstore.WrapIDStore(bs) + fsr.bs = blockstore.WrapIDStore(bs) }) return fsr.bs, fsr.bsErr } +func (fsr *fsLockedRepo) SplitstorePath() (string, error) { + fsr.ssOnce.Do(func() { + path := fsr.join(filepath.Join(fsDatastore, "splitstore")) + + if err := os.MkdirAll(path, 0755); err != nil { + fsr.ssErr = err + return + } + + fsr.ssPath = path + }) + + return fsr.ssPath, fsr.ssErr +} + // join joins path elements with fsr.path func (fsr *fsLockedRepo) join(paths ...string) string { return filepath.Join(append([]string{fsr.path}, paths...)...) diff --git a/node/repo/importmgr/mgr.go b/node/repo/importmgr/mgr.go index 31991617add..936d9b60662 100644 --- a/node/repo/importmgr/mgr.go +++ b/node/repo/importmgr/mgr.go @@ -7,7 +7,7 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/go-multistore" - "github.com/filecoin-project/lotus/lib/blockstore" + "github.com/filecoin-project/lotus/blockstore" "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/namespace" ) @@ -16,7 +16,7 @@ type Mgr struct { mds *multistore.MultiStore ds datastore.Batching - Blockstore blockstore.Blockstore + Blockstore blockstore.BasicBlockstore } type Label string @@ -31,7 +31,7 @@ const ( func New(mds *multistore.MultiStore, ds datastore.Batching) *Mgr { return &Mgr{ mds: mds, - Blockstore: mds.MultiReadBlockstore(), + Blockstore: blockstore.Adapt(mds.MultiReadBlockstore()), ds: datastore.NewLogDatastore(namespace.Wrap(ds, datastore.NewKey("/stores")), "storess"), } diff --git a/node/repo/interface.go b/node/repo/interface.go index b58168ecfaa..b169ee5cc78 100644 --- a/node/repo/interface.go +++ b/node/repo/interface.go @@ -4,10 +4,10 @@ import ( "context" "errors" - "github.com/filecoin-project/lotus/lib/blockstore" "github.com/ipfs/go-datastore" "github.com/multiformats/go-multiaddr" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" "github.com/filecoin-project/lotus/extern/sector-storage/stores" @@ -18,11 +18,12 @@ import ( type BlockstoreDomain string const ( - // BlockstoreChain represents the blockstore domain for chain data. + // UniversalBlockstore represents the blockstore domain for all data. // Right now, this includes chain objects (tipsets, blocks, messages), as // well as state. In the future, they may get segregated into different // domains. - BlockstoreChain = BlockstoreDomain("chain") + UniversalBlockstore = BlockstoreDomain("universal") + HotBlockstore = BlockstoreDomain("hot") ) var ( @@ -63,6 +64,9 @@ type LockedRepo interface { // the lifecycle. Blockstore(ctx context.Context, domain BlockstoreDomain) (blockstore.Blockstore, error) + // SplitstorePath returns the path for the SplitStore + SplitstorePath() (string, error) + // Returns config in this repo Config() (interface{}, error) SetConfig(func(interface{})) error @@ -84,4 +88,7 @@ type LockedRepo interface { // Path returns absolute path of the repo Path() string + + // Readonly returns true if the repo is readonly + Readonly() bool } diff --git a/node/repo/memrepo.go b/node/repo/memrepo.go index a202c0b8017..00ea32b88b5 100644 --- a/node/repo/memrepo.go +++ b/node/repo/memrepo.go @@ -15,10 +15,10 @@ import ( "github.com/multiformats/go-multiaddr" "golang.org/x/xerrors" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" "github.com/filecoin-project/lotus/extern/sector-storage/stores" - "github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/node/config" ) @@ -161,7 +161,7 @@ func NewMemory(opts *MemRepoOptions) *MemRepo { return &MemRepo{ repoLock: make(chan struct{}, 1), - blockstore: blockstore.WrapIDStore(blockstore.NewTemporarySync()), + blockstore: blockstore.WrapIDStore(blockstore.NewMemorySync()), datastore: opts.Ds, configF: opts.ConfigF, keystore: opts.KeyStore, @@ -201,6 +201,10 @@ func (mem *MemRepo) Lock(t RepoType) (LockedRepo, error) { }, nil } +func (lmem *lockedMemRepo) Readonly() bool { + return false +} + func (lmem *lockedMemRepo) checkToken() error { lmem.RLock() defer lmem.RUnlock() @@ -246,12 +250,16 @@ func (lmem *lockedMemRepo) Datastore(_ context.Context, ns string) (datastore.Ba } func (lmem *lockedMemRepo) Blockstore(ctx context.Context, domain BlockstoreDomain) (blockstore.Blockstore, error) { - if domain != BlockstoreChain { + if domain != UniversalBlockstore { return nil, ErrInvalidBlockstoreDomain } return lmem.mem.blockstore, nil } +func (lmem *lockedMemRepo) SplitstorePath() (string, error) { + return ioutil.TempDir("", "splitstore.*") +} + func (lmem *lockedMemRepo) ListDatastores(ns string) ([]int64, error) { return nil, nil } diff --git a/node/repo/retrievalstoremgr/retrievalstoremgr.go b/node/repo/retrievalstoremgr/retrievalstoremgr.go index e791150d953..ba86ccee540 100644 --- a/node/repo/retrievalstoremgr/retrievalstoremgr.go +++ b/node/repo/retrievalstoremgr/retrievalstoremgr.go @@ -4,7 +4,7 @@ import ( "errors" "github.com/filecoin-project/go-multistore" - "github.com/filecoin-project/lotus/lib/blockstore" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/node/repo/importmgr" "github.com/ipfs/go-blockservice" offline "github.com/ipfs/go-ipfs-exchange-offline" @@ -73,13 +73,13 @@ func (mrs *multiStoreRetrievalStore) DAGService() ipldformat.DAGService { // BlockstoreRetrievalStoreManager manages a single blockstore as if it were multiple stores type BlockstoreRetrievalStoreManager struct { - bs blockstore.Blockstore + bs blockstore.BasicBlockstore } var _ RetrievalStoreManager = &BlockstoreRetrievalStoreManager{} // NewBlockstoreRetrievalStoreManager returns a new blockstore based RetrievalStoreManager -func NewBlockstoreRetrievalStoreManager(bs blockstore.Blockstore) RetrievalStoreManager { +func NewBlockstoreRetrievalStoreManager(bs blockstore.BasicBlockstore) RetrievalStoreManager { return &BlockstoreRetrievalStoreManager{ bs: bs, } diff --git a/node/repo/retrievalstoremgr/retrievalstoremgr_test.go b/node/repo/retrievalstoremgr/retrievalstoremgr_test.go index a848f62e2d9..0a44fa0729e 100644 --- a/node/repo/retrievalstoremgr/retrievalstoremgr_test.go +++ b/node/repo/retrievalstoremgr/retrievalstoremgr_test.go @@ -15,7 +15,7 @@ import ( "github.com/filecoin-project/go-multistore" - "github.com/filecoin-project/lotus/lib/blockstore" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/node/repo/importmgr" "github.com/filecoin-project/lotus/node/repo/retrievalstoremgr" ) @@ -71,7 +71,7 @@ func TestMultistoreRetrievalStoreManager(t *testing.T) { func TestBlockstoreRetrievalStoreManager(t *testing.T) { ctx := context.Background() ds := dss.MutexWrap(datastore.NewMapDatastore()) - bs := blockstore.NewBlockstore(ds) + bs := blockstore.FromDatastore(ds) retrievalStoreMgr := retrievalstoremgr.NewBlockstoreRetrievalStoreManager(bs) var stores []retrievalstoremgr.RetrievalStore var cids []cid.Cid diff --git a/storage/adapter_storage_miner.go b/storage/adapter_storage_miner.go index a648c9fcc4f..41d7461a811 100644 --- a/storage/adapter_storage_miner.go +++ b/storage/adapter_storage_miner.go @@ -18,7 +18,7 @@ import ( market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/apibstore" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/builtin/market" @@ -188,7 +188,7 @@ func (s SealingAPIAdapter) StateSectorPreCommitInfo(ctx context.Context, maddr a return nil, xerrors.Errorf("handleSealFailed(%d): temp error: %+v", sectorNumber, err) } - stor := store.ActorStore(ctx, apibstore.NewAPIBlockstore(s.delegate)) + stor := store.ActorStore(ctx, blockstore.NewAPIBlockstore(s.delegate)) state, err := miner.Load(stor, act) if err != nil { diff --git a/storage/addresses.go b/storage/addresses.go index ad0c6d68369..a8e5e7101e2 100644 --- a/storage/addresses.go +++ b/storage/addresses.go @@ -40,7 +40,11 @@ func (as *AddressSelector) AddressFor(ctx context.Context, a addrSelectApi, mi m delete(defaultCtl, mi.Owner) delete(defaultCtl, mi.Worker) - for _, addr := range append(append([]address.Address{}, as.PreCommitControl...), as.CommitControl...) { + configCtl := append([]address.Address{}, as.PreCommitControl...) + configCtl = append(configCtl, as.CommitControl...) + configCtl = append(configCtl, as.TerminateControl...) + + for _, addr := range configCtl { if addr.Protocol() != address.ID { var err error addr, err = a.StateLookupID(ctx, addr, types.EmptyTSK) @@ -57,7 +61,13 @@ func (as *AddressSelector) AddressFor(ctx context.Context, a addrSelectApi, mi m addrs = append(addrs, a) } } - addrs = append(addrs, mi.Owner, mi.Worker) + + if len(addrs) == 0 || !as.DisableWorkerFallback { + addrs = append(addrs, mi.Worker) + } + if !as.DisableOwnerFallback { + addrs = append(addrs, mi.Owner) + } return pickAddress(ctx, a, mi, goodFunds, minFunds, addrs) } @@ -91,7 +101,7 @@ func pickAddress(ctx context.Context, a addrSelectApi, mi miner.MinerInfo, goodF } } - log.Warnw("No address had enough funds to for full PoSt message Fee, selecting least bad address", "address", leastBad, "balance", types.FIL(bestAvail), "optimalFunds", types.FIL(goodFunds), "minFunds", types.FIL(minFunds)) + log.Warnw("No address had enough funds to for full message Fee, selecting least bad address", "address", leastBad, "balance", types.FIL(bestAvail), "optimalFunds", types.FIL(goodFunds), "minFunds", types.FIL(minFunds)) return leastBad, bestAvail, nil } diff --git a/storage/sealing.go b/storage/sealing.go index d07a14810db..8981c373866 100644 --- a/storage/sealing.go +++ b/storage/sealing.go @@ -8,6 +8,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-storage/storage" sealing "github.com/filecoin-project/lotus/extern/storage-sealing" ) @@ -34,8 +35,8 @@ func (m *Miner) GetSectorInfo(sid abi.SectorNumber) (sealing.SectorInfo, error) return m.sealing.GetSectorInfo(sid) } -func (m *Miner) PledgeSector() error { - return m.sealing.PledgeSector() +func (m *Miner) PledgeSector(ctx context.Context) (storage.SectorRef, error) { + return m.sealing.PledgeSector(ctx) } func (m *Miner) ForceSectorState(ctx context.Context, id abi.SectorNumber, state sealing.SectorState) error { diff --git a/testplans/lotus-soup/rfwp/chain_state.go b/testplans/lotus-soup/rfwp/chain_state.go index fe6d799a2bc..676dca03db6 100644 --- a/testplans/lotus-soup/rfwp/chain_state.go +++ b/testplans/lotus-soup/rfwp/chain_state.go @@ -14,7 +14,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/lotus/api/apibstore" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/api" @@ -699,7 +699,7 @@ func info(t *testkit.TestEnvironment, m *testkit.LotusMiner, maddr address.Addre i.FaultyBytes = types.BigMul(types.NewInt(nfaults), types.NewInt(uint64(mi.SectorSize))) } - stor := store.ActorStore(ctx, apibstore.NewAPIBlockstore(api)) + stor := store.ActorStore(ctx, blockstore.NewAPIBlockstore(api)) mas, err := miner.Load(stor, mact) if err != nil { return nil, err diff --git a/tools/packer/etc/motd b/tools/packer/etc/motd new file mode 100644 index 00000000000..78e97e1c6da --- /dev/null +++ b/tools/packer/etc/motd @@ -0,0 +1,57 @@ +Your lotus node is up and running! + +This image contains the two most important pieces of the lotus filecoin suite, the +daemon and the miner. The daemon is is configured to download a snapshot and start +running. In fact, by the time you read this, the daemon may already be in sync. +Go ahead and make sure everything is working correctly with the following commands. + + + +To check if the daemon is running: + + systemctl status lotus-daemon + + + +To check if the daemon is in sync: + + lotus sync status + + **note: When starting lotus for the first time, it will download a chain snapshot. + This is a large download and will take several minutes to complete. During + this time, the lotus API will not be up yet. Give it time! You can see + progress by looking at the systemd journal. + + +To check if the daemon is connecting to other lotus nodes: + + lotus net peers + + + +No wallets are crated by default. You can view, create, and delete wallets with +the lotus command. On this image, lotus is running as the user `fc`. +Be careful, now. Don't delete a wallet with funds! + + sudo -E -u fc lotus wallet list + sudo -E -u fc lotus wallet new bls + + + +The lotus miner is also installed, but it's not running by default. If you have no +special disk or worker requirements, you can initialize the lotus-miner repo like this: + + sudo -E -u fc lotus-miner init -o + + + +You only need to do this once, after which, you can enable and start the miner. + + sudo systemctl enable lotus-miner + sudo systemctl start lotus-miner + + + +For more information, see https://docs.filecoin.io/ +Found a bug? let us know! https://github.com/filecoin-project/lotus +Chat with us on slack! https://filecoinproject.slack.com/archives/CEGN061C5 diff --git a/tools/packer/homedir/bashrc b/tools/packer/homedir/bashrc new file mode 100644 index 00000000000..8bda25044be --- /dev/null +++ b/tools/packer/homedir/bashrc @@ -0,0 +1,5 @@ +PS1="[\h \w] ⨎ " + +export PROMT_DIRTRIM=1 +export LOTUS_PATH=/var/lib/lotus +export LOTUS_MINER_PATH=/var/lib/lotus-miner diff --git a/tools/packer/lotus.pkr.hcl b/tools/packer/lotus.pkr.hcl new file mode 100644 index 00000000000..b67e59f7691 --- /dev/null +++ b/tools/packer/lotus.pkr.hcl @@ -0,0 +1,100 @@ +variable "ci_workspace_bins" { + type = string + default = "./linux" +} + +variable "lotus_network" { + type = string + default = "mainnet" +} + +variable "git_tag" { + type = string + default = "" +} + +locals { + timestamp = regex_replace(timestamp(), "[- TZ:]", "") +} + +source "amazon-ebs" "lotus" { + ami_name = "lotus-${var.lotus_network}-${var.git_tag}-${local.timestamp}" + ami_regions = [ + "us-east-1", + "us-west-2", + ] + ami_groups = [ + # This causes the ami to be publicly-accessable. + "all", + ] + ami_description = "Lotus Filecoin AMI" + launch_block_device_mappings { + device_name = "/dev/sda1" + volume_size = 100 + delete_on_termination = true + } + + instance_type = "t2.micro" + source_ami_filter { + filters = { + name = "ubuntu-minimal/images/*ubuntu-focal-20.04-amd64-minimal*" + root-device-type = "ebs" + virtualization-type = "hvm" + } + most_recent = true + owners = ["099720109477"] + } + ssh_username = "ubuntu" +} + +source "digitalocean" "lotus" { + droplet_name = "lotus-${var.lotus_network}" + size = "s-1vcpu-1gb" + region = "nyc3" + image = "ubuntu-20-04-x64" + snapshot_name = "lotus-${var.lotus_network}-${var.git_tag}-${local.timestamp}" + ssh_username = "root" +} + +build { + sources = [ + "source.amazon-ebs.lotus", + "source.digitalocean.lotus", + ] + + # Lotus software (from CI workspace) + provisioner "file" { + source = "${var.ci_workspace_bins}/lotus" + destination = "lotus" + } + provisioner "file" { + source = "${var.ci_workspace_bins}/lotus-miner" + destination = "lotus-miner" + } + # First run script + provisioner "file" { + source = "./tools/packer/scripts/${var.lotus_network}/lotus-init.sh" + destination = "lotus-init.sh" + } + # Systemd service units. + provisioner "file" { + source = "./tools/packer/systemd/lotus-daemon.service" + destination = "lotus-daemon.service" + } + provisioner "file" { + source = "./tools/packer/systemd/lotus-miner.service" + destination = "lotus-miner.service" + } + provisioner "file" { + source = "./tools/packer/etc/motd" + destination = "motd" + } + provisioner "file" { + source = "./tools/packer/homedir/bashrc" + destination = ".bashrc" + } + # build it. + provisioner "shell" { + script = "./tools/packer/setup.sh" + } +} diff --git a/tools/packer/scripts/calibrationnet/lotus-init.sh b/tools/packer/scripts/calibrationnet/lotus-init.sh new file mode 100755 index 00000000000..d68b3357cc8 --- /dev/null +++ b/tools/packer/scripts/calibrationnet/lotus-init.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +# This script sets up an initial configuraiton for the lotus daemon and miner +# It will only run once. + +GATE="$LOTUS_PATH"/date_initialized + +# Don't init if already initialized. +if [ -f "GATE" ]; then + echo lotus already initialized. + exit 0 +fi + +# Not importing snapshot on calibrationnet. +# +# echo importing minimal snapshot +# lotus daemon --import-snapshot https://fil-chain-snapshots-fallback.s3.amazonaws.com/mainnet/minimal_finality_stateroots_latest.car --halt-after-import + +# Block future inits +date > "$GATE" diff --git a/tools/packer/scripts/mainnet/lotus-init.sh b/tools/packer/scripts/mainnet/lotus-init.sh new file mode 100755 index 00000000000..a014f617e23 --- /dev/null +++ b/tools/packer/scripts/mainnet/lotus-init.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +# This script sets up an initial configuraiton for the lotus daemon and miner +# It will only run once. + +GATE="$LOTUS_PATH"/date_initialized + +# Don't init if already initialized. +if [ -f "GATE" ]; then + echo lotus already initialized. + exit 0 +fi + +echo importing minimal snapshot +lotus daemon --import-snapshot https://fil-chain-snapshots-fallback.s3.amazonaws.com/mainnet/minimal_finality_stateroots_latest.car --halt-after-import + +# Block future inits +date > "$GATE" diff --git a/tools/packer/setup.sh b/tools/packer/setup.sh new file mode 100644 index 00000000000..d7d21664a88 --- /dev/null +++ b/tools/packer/setup.sh @@ -0,0 +1,57 @@ +#!/usr/bin/env bash + +# This script is executed by packer to setup the image. +# When this script is run, packer will have already copied binaries into the home directory of +# whichever user it has access too. This script is executed from within the home directory of that +# user. Bear in mind that different cloud providers, and different images on the same cloud +# provider will have a different initial user account. + +set -x + +# Become root, if we aren't already. +# Docker images will already be root. AMIs will have an SSH user account. +UID=$(id -u) +if [ x$UID != x0 ] +then + printf -v cmd_str '%q ' "$0" "$@" + exec sudo su -c "$cmd_str" +fi + +MANAGED_BINS=( lotus lotus-miner lotus-init.sh ) +MANAGED_FILES=( + /lib/systemd/system/lotus-daemon.service + /lib/systemd/system/lotus-miner.service + /etc/motd +) + +# install libs. +apt update +apt -y install libhwloc15 ocl-icd-libopencl1 +ln -s /usr/lib/x86_64-linux-gnu/libhwloc.so.15 /usr/lib/x86_64-linux-gnu/libhwloc.so.5 + +# Create lotus user +useradd -c "lotus system account" -r fc +install -o fc -g fc -d /var/lib/lotus +install -o fc -g fc -d /var/lib/lotus-miner + +# Install software +for i in "${MANAGED_BINS[@]}" +do + install -o root -g root -m 755 -t /usr/local/bin $i + rm $i +done + +# Install systemd and other files. +# Because packer doesn't copy files with root permisison, +# files are in the home directory of the ssh user. Copy +# these files into the right position. +for i in "${MANAGED_FILES[@]}" +do + fn=$(basename $i) + install -o root -g root -m 644 $fn $i + rm $fn +done + +# Enable services +systemctl daemon-reload +systemctl enable lotus-daemon diff --git a/tools/packer/systemd/lotus-daemon.service b/tools/packer/systemd/lotus-daemon.service new file mode 100644 index 00000000000..edbc91151d0 --- /dev/null +++ b/tools/packer/systemd/lotus-daemon.service @@ -0,0 +1,17 @@ +[Unit] +Description=Lotus Daemon +After=network.target + +[Service] +User=fc +Group=fc +ExecStartPre=/usr/local/bin/lotus-init.sh +ExecStart=/usr/local/bin/lotus daemon +ExecStop=/usr/local/bin/lotus daemon stop +Environment=LOTUS_PATH=/var/lib/lotus +Restart=always +RestartSec=30 +TimeoutSec=infinity + +[Install] +WantedBy=multi-user.target diff --git a/tools/packer/systemd/lotus-miner.service b/tools/packer/systemd/lotus-miner.service new file mode 100644 index 00000000000..d7289c888b3 --- /dev/null +++ b/tools/packer/systemd/lotus-miner.service @@ -0,0 +1,15 @@ +[Unit] +Description=Lotus Miner +After=network.target + +[Service] +User=fc +Group=fc +ExecStart=/usr/local/bin/lotus-miner run +Environment=LOTUS_PATH=/var/lib/lotus +Environment=LOTUS_MINER_PATH=/var/lib/lotus-miner +Restart=always +RestartSec=30 + +[Install] +WantedBy=multi-user.target