diff --git a/Makefile b/Makefile index 9af844b1..c70366d7 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,21 @@ +SHELL=/usr/bin/env bash + +CLEAN:= +BINS:= + +github.com/filecoin-project/venus-sealer/build.CurrentCommit=+git.$(subst -,.,$(shell git describe --always --match=NeVeRmAtCh --dirty 2>/dev/null || git rev-parse --short HEAD 2>/dev/null)) + +## FFI + +FFI_PATH:=extern/filecoin-ffi/ + +CLEAN+=build/.filecoin-install + build: go build -o venus-sealer ./app/venus-sealer go build -o venus-worker ./app/venus-worker + BINS+=venus-sealer + BINS+=venus-worker deps: git submodule update --init @@ -9,3 +24,7 @@ deps: lint: go run github.com/golangci/golangci-lint/cmd/golangci-lint run +clean: + rm -rf $(CLEAN) $(BINS) + -$(MAKE) -C $(FFI_PATH) clean +.PHONY: clean diff --git a/api/impl/common.go b/api/impl/common.go index f06e2277..ab0cbead 100644 --- a/api/impl/common.go +++ b/api/impl/common.go @@ -51,7 +51,7 @@ func (a *CommonAPI) AuthNew(ctx context.Context, perms []auth.Permission) ([]byt func (a *CommonAPI) Version(context.Context) (api.Version, error) { return api.Version{ Version: constants.MinerVersion.String(), - APIVersion: constants.MinerAPIVersion, + APIVersion: constants.MinerAPIVersion0, BlockDelay: a.NetworkParams.BlockDelaySecs, }, nil } diff --git a/app/venus-sealer/info.go b/app/venus-sealer/info.go index 5d4d2410..ac71c9ac 100644 --- a/app/venus-sealer/info.go +++ b/app/venus-sealer/info.go @@ -3,8 +3,8 @@ package main import ( "context" "fmt" - "github.com/filecoin-project/venus-sealer/constants" - types2 "github.com/filecoin-project/venus-sealer/types" + "math" + corebig "math/big" "sort" "time" @@ -16,10 +16,14 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/venus-sealer/api" + "github.com/filecoin-project/venus-sealer/constants" "github.com/filecoin-project/venus-sealer/lib/blockstore" "github.com/filecoin-project/venus-sealer/lib/bufbstore" + types2 "github.com/filecoin-project/venus-sealer/types" "github.com/filecoin-project/venus/pkg/specactors/adt" + "github.com/filecoin-project/venus/pkg/specactors/builtin" "github.com/filecoin-project/venus/pkg/specactors/builtin/miner" "github.com/filecoin-project/venus/pkg/types" ) @@ -60,6 +64,7 @@ func infoCmdAct(cctx *cli.Context) error { if err != nil { return err } + fmt.Print("Chain: ") head, err := nodeAPI.ChainHead(ctx) @@ -122,18 +127,23 @@ func infoCmdAct(cctx *cli.Context) error { return err } - rpercI := types.BigDiv(types.BigMul(pow.MinerPower.RawBytePower, types.NewInt(1000000)), pow.TotalPower.RawBytePower) - qpercI := types.BigDiv(types.BigMul(pow.MinerPower.QualityAdjPower, types.NewInt(1000000)), pow.TotalPower.QualityAdjPower) - fmt.Printf("Power: %s / %s (%0.4f%%)\n", color.GreenString(types.DeciStr(pow.MinerPower.QualityAdjPower)), types.DeciStr(pow.TotalPower.QualityAdjPower), - float64(qpercI.Int64())/10000) + types.BigDivFloat( + types.BigMul(pow.MinerPower.QualityAdjPower, big.NewInt(100)), + pow.TotalPower.QualityAdjPower, + ), + ) fmt.Printf("\tRaw: %s / %s (%0.4f%%)\n", color.BlueString(types.SizeStr(pow.MinerPower.RawBytePower)), types.SizeStr(pow.TotalPower.RawBytePower), - float64(rpercI.Int64())/10000) + types.BigDivFloat( + types.BigMul(pow.MinerPower.RawBytePower, big.NewInt(100)), + pow.TotalPower.RawBytePower, + ), + ) secCounts, err := nodeAPI.StateMinerSectorCount(ctx, maddr, types.EmptyTSK) if err != nil { @@ -148,7 +158,7 @@ func infoCmdAct(cctx *cli.Context) error { } else { var faultyPercentage float64 if secCounts.Live != 0 { - faultyPercentage = float64(10000*nfaults/secCounts.Live) / 100. + faultyPercentage = float64(100*nfaults) / float64(secCounts.Live) } fmt.Printf("\tProving: %s (%s Faulty, %.2f%%)\n", types.SizeStr(types.BigMul(types.NewInt(proving), types.NewInt(uint64(mi.SectorSize)))), @@ -159,16 +169,53 @@ func infoCmdAct(cctx *cli.Context) error { if !pow.HasMinPower { fmt.Print("Below minimum power threshold, no blocks will be won") } else { - expWinChance := float64(types.BigMul(qpercI, types.NewInt(constants.BlocksPerEpoch)).Int64()) / 1000000 - if expWinChance > 0 { - if expWinChance > 1 { - expWinChance = 1 - } - winRate := time.Duration(float64(time.Second*time.Duration(params.BlockDelaySecs)) / expWinChance) - winPerDay := float64(time.Hour*24) / float64(winRate) - fmt.Print("Expected block win rate: ") - color.Blue("%.4f/day (every %s)", winPerDay, winRate.Truncate(time.Second)) + winRatio := new(corebig.Rat).SetFrac( + types.BigMul(pow.MinerPower.QualityAdjPower, types.NewInt(constants.BlocksPerEpoch)).Int, + pow.TotalPower.QualityAdjPower.Int, + ) + + if winRatioFloat, _ := winRatio.Float64(); winRatioFloat > 0 { + + // if the corresponding poisson distribution isn't infinitely small then + // throw it into the mix as well, accounting for multi-wins + winRationWithPoissonFloat := -math.Expm1(-winRatioFloat) + winRationWithPoisson := new(corebig.Rat).SetFloat64(winRationWithPoissonFloat) + if winRationWithPoisson != nil { + winRatio = winRationWithPoisson + winRatioFloat = winRationWithPoissonFloat + } + weekly, _ := new(corebig.Rat).Mul( + winRatio, + new(corebig.Rat).SetInt64(7*builtin.EpochsInDay), + ).Float64() + + avgDuration, _ := new(corebig.Rat).Mul( + new(corebig.Rat).SetInt64(builtin.EpochDurationSeconds), + new(corebig.Rat).Inv(winRatio), + ).Float64() + + fmt.Print("Projected average block win rate: ") + color.Blue( + "%.02f/week (every %s)", + weekly, + (time.Second * time.Duration(avgDuration)).Truncate(time.Second).String(), + ) + + // Geometric distribution of P(Y < k) calculated as described in https://en.wikipedia.org/wiki/Geometric_distribution#Probability_Outcomes_Examples + // https://www.wolframalpha.com/input/?i=t+%3E+0%3B+p+%3E+0%3B+p+%3C+1%3B+c+%3E+0%3B+c+%3C1%3B+1-%281-p%29%5E%28t%29%3Dc%3B+solve+t + // t == how many dice-rolls (epochs) before win + // p == winRate == ( minerPower / netPower ) + // c == target probability of win ( 99.9% in this case ) + fmt.Print("Projected block win with ") + color.Green( + "99.9%% probability every %s", + (time.Second * time.Duration( + builtin.EpochDurationSeconds*math.Log(1-0.999)/ + math.Log(1-winRatioFloat), + )).Truncate(time.Second).String(), + ) + fmt.Println("(projections DO NOT account for future network and miner growth)") } } diff --git a/app/venus-sealer/init.go b/app/venus-sealer/init.go index d3876462..cc55b19b 100644 --- a/app/venus-sealer/init.go +++ b/app/venus-sealer/init.go @@ -6,21 +6,20 @@ import ( "crypto/rand" "encoding/json" "fmt" - market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" - types3 "github.com/filecoin-project/venus-messager/types" - "github.com/filecoin-project/venus-sealer/api" - "github.com/filecoin-project/venus-sealer/config" - "github.com/filecoin-project/venus-sealer/constants" - "github.com/filecoin-project/venus-sealer/models" - "github.com/filecoin-project/venus-sealer/service" - types2 "github.com/filecoin-project/venus-sealer/types" - "github.com/filecoin-project/venus/fixtures/asset" - "github.com/filecoin-project/venus/pkg/gen/genesis" "io/ioutil" "os" "path/filepath" "strconv" + "github.com/filecoin-project/go-address" + paramfetch "github.com/filecoin-project/go-paramfetch" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" + power2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/power" + "github.com/filecoin-project/venus/fixtures/asset" + "github.com/filecoin-project/venus/pkg/gen/genesis" + "github.com/docker/go-units" "github.com/google/uuid" "github.com/libp2p/go-libp2p-core/crypto" @@ -29,17 +28,21 @@ import ( "github.com/urfave/cli/v2" "golang.org/x/xerrors" - "github.com/filecoin-project/go-address" - paramfetch "github.com/filecoin-project/go-paramfetch" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - power2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/power" - "github.com/filecoin-project/venus-sealer/sector-storage/stores" actors "github.com/filecoin-project/venus/pkg/specactors" "github.com/filecoin-project/venus/pkg/specactors/builtin/miner" "github.com/filecoin-project/venus/pkg/specactors/builtin/power" "github.com/filecoin-project/venus/pkg/specactors/policy" "github.com/filecoin-project/venus/pkg/types" + + types3 "github.com/filecoin-project/venus-messager/types" + + "github.com/filecoin-project/venus-sealer/api" + "github.com/filecoin-project/venus-sealer/config" + "github.com/filecoin-project/venus-sealer/constants" + "github.com/filecoin-project/venus-sealer/models" + "github.com/filecoin-project/venus-sealer/sector-storage/stores" + "github.com/filecoin-project/venus-sealer/service" + types2 "github.com/filecoin-project/venus-sealer/types" ) var initCmd = &cli.Command{ @@ -58,7 +61,7 @@ var initCmd = &cli.Command{ &cli.StringFlag{ Name: "worker", Aliases: []string{"w"}, - Usage: "worker key to use (overrides --create-worker-key)", + Usage: "worker key to use", }, &cli.StringFlag{ Name: "owner", @@ -101,7 +104,7 @@ var initCmd = &cli.Command{ }, &cli.StringFlag{ Name: "network", - Usage: "set network type mainnet calibration 2k", + Usage: "network type: one of mainnet,calibration,2k&nerpa", Value: "mainnet", DefaultText: "mainnet", }, @@ -160,6 +163,7 @@ var initCmd = &cli.Command{ ctx := api.ReqContext(cctx) log.Info("Checking proof parameters") + ps, err := asset.Asset("fixtures/_assets/proof-params/parameters.json") if err != nil { return err @@ -170,10 +174,11 @@ var initCmd = &cli.Command{ } if err := paramfetch.GetParams(ctx, ps, srs, uint64(ssize)); err != nil { - return xerrors.Errorf("get params: %w", err) + return xerrors.Errorf("fetching proof parameters: %w", err) } log.Info("Trying to connect to full node RPC") + setAuthToken(cctx) fullNode, closer, err := api.GetFullNodeAPIV2(cctx) // TODO: consider storing full node address in config if err != nil { @@ -187,6 +192,7 @@ var initCmd = &cli.Command{ return err } parserFlag(defaultCfg, cctx) + log.Info("Checking full node sync status") if !cctx.Bool("genesis-miner") && !cctx.Bool("nosync") { @@ -196,7 +202,8 @@ var initCmd = &cli.Command{ } log.Info("Checking if repo exists") - cfgPath := cctx.String("config") + + cfgPath := cctx.String(FlagMinerRepo) defaultCfg.ConfigPath = cfgPath exit, err := config.ConfigExist(defaultCfg.DataDir) @@ -204,10 +211,11 @@ var initCmd = &cli.Command{ return err } if exit { - return xerrors.Errorf("data has exit in %s", defaultCfg.DataDir) + return xerrors.Errorf("repo is already initialized at %s", defaultCfg.DataDir) } log.Info("Checking full node version") + v, err := fullNode.Version(ctx) if err != nil { return err @@ -224,6 +232,7 @@ var initCmd = &cli.Command{ defer closer() log.Info("Initializing repo") + { //write config err = config.SaveConfig(cfgPath, defaultCfg) @@ -301,9 +310,9 @@ var initCmd = &cli.Command{ func setAuthToken(cctx *cli.Context) { if cctx.IsSet("auth-token") { authToken := cctx.String("auth-token") - cctx.Set("node-token", authToken) - cctx.Set("messager-token", authToken) - cctx.Set("gateway-token", authToken) + _ = cctx.Set("node-token", authToken) + _ = cctx.Set("messager-token", authToken) + _ = cctx.Set("gateway-token", authToken) } } @@ -331,12 +340,14 @@ func parserFlag(cfg *config.StorageMiner, cctx *cli.Context) { if cctx.IsSet("messager-token") { cfg.Messager.Token = cctx.String("messager-token") } + if cctx.IsSet("gateway-token") { cfg.RegisterProof.Token = cctx.String("gateway-token") } } func storageMinerInit(ctx context.Context, cctx *cli.Context, api api.FullNode, messagerClient api.IMessager, cfg *config.StorageMiner, ssize abi.SectorSize, gasPrice types.BigInt) error { log.Info("Initializing libp2p identity") + repo, err := models.SetDataBase(config.HomeDir(cfg.DataDir), &cfg.DB) if err != nil { return err @@ -369,6 +380,7 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api api.FullNode, if err := metaDataService.SaveMinerAddress(a); err != nil { return err } + if pssb := cctx.String("pre-sealed-metadata"); pssb != "" { pssb, err := homedir.Expand(pssb) if err != nil { diff --git a/app/venus-sealer/main.go b/app/venus-sealer/main.go index 2f5ed015..04e74cbd 100644 --- a/app/venus-sealer/main.go +++ b/app/venus-sealer/main.go @@ -14,6 +14,8 @@ import ( var log = logging.Logger("main") +const FlagMinerRepo = "miner-repo" + func main() { sealer.SetupLogLevels() diff --git a/app/venus-sealer/proving.go b/app/venus-sealer/proving.go index ccfa2a41..2ba22fcb 100644 --- a/app/venus-sealer/proving.go +++ b/app/venus-sealer/proving.go @@ -186,7 +186,7 @@ var provingInfoCmd = &cli.Command{ var faultPerc float64 if proving > 0 { - faultPerc = float64(faults*10000/proving) / 100 + faultPerc = float64(faults * 100 / proving) } fmt.Printf("Current Epoch: %d\n", cd.CurrentEpoch) diff --git a/app/venus-sealer/run.go b/app/venus-sealer/run.go index a4debc8a..bcf37ac2 100644 --- a/app/venus-sealer/run.go +++ b/app/venus-sealer/run.go @@ -2,35 +2,25 @@ package main import ( "context" - "github.com/filecoin-project/venus-sealer/config" - "github.com/filecoin-project/venus-sealer/constants" - "github.com/filecoin-project/venus-sealer/types" - "github.com/mitchellh/go-homedir" - "github.com/zbiljic/go-filelock" - "net" - "net/http" + "fmt" _ "net/http/pprof" "os" - "os/signal" "path" "regexp" - "syscall" - mux "github.com/gorilla/mux" + "github.com/mitchellh/go-homedir" "github.com/multiformats/go-multiaddr" - manet "github.com/multiformats/go-multiaddr/net" "github.com/urfave/cli/v2" + "github.com/zbiljic/go-filelock" "go.opencensus.io/stats/view" - "go.opencensus.io/tag" "golang.org/x/xerrors" - "github.com/filecoin-project/go-jsonrpc" - "github.com/filecoin-project/go-jsonrpc/auth" - sealer "github.com/filecoin-project/venus-sealer" "github.com/filecoin-project/venus-sealer/api" - "github.com/filecoin-project/venus-sealer/api/impl" + "github.com/filecoin-project/venus-sealer/config" + "github.com/filecoin-project/venus-sealer/constants" "github.com/filecoin-project/venus-sealer/lib/ulimit" + "github.com/filecoin-project/venus-sealer/types" ) var runCmd = &cli.Command{ @@ -124,6 +114,7 @@ var runCmd = &cli.Command{ var minerapi api.StorageMiner stop, err := sealer.New(ctx, sealer.ConfigStorageAPIImpl(&minerapi), + sealer.Override(new(types.ShutdownChan), shutdownChan), sealer.Repo(cfg), sealer.Online(cfg), sealer.ApplyIf(func(s *sealer.Settings) bool { return cctx.IsSet("miner-api") }, @@ -134,7 +125,6 @@ var runCmd = &cli.Command{ return multiaddr.NewMultiaddr(cfg.API.ListenAddress) })), sealer.Override(new(api.FullNode), nodeApi), - sealer.Override(new(types.ShutdownChan), shutdownChan), ) if err != nil { return xerrors.Errorf("creating node: %w", err) @@ -145,55 +135,26 @@ var runCmd = &cli.Command{ return xerrors.Errorf("getting API endpoint: %w", err) } - lst, err := manet.Listen(endpoint) + // Instantiate the miner node handler. + handler, err := sealer.MinerHandler(minerapi, true) if err != nil { - return xerrors.Errorf("could not listen: %w", err) - } - - mux := mux.NewRouter() - - rpcServer := jsonrpc.NewServer() - rpcServer.Register("Filecoin", minerapi) - - mux.Handle("/rpc/v0", rpcServer) - mux.PathPrefix("/remote").HandlerFunc(minerapi.(*impl.StorageMinerAPI).ServeRemote) - mux.PathPrefix("/").Handler(http.DefaultServeMux) // pprof - - ah := &auth.Handler{ - Verify: minerapi.AuthVerify, - Next: mux.ServeHTTP, + return xerrors.Errorf("failed to instantiate rpc handler: %w", err) } - srv := &http.Server{ - Handler: ah, - BaseContext: func(listener net.Listener) context.Context { - key, _ := tag.NewKey("api") - ctx, _ := tag.New(context.Background(), tag.Upsert(key, "venus-sealer")) - return ctx - }, + // Serve the RPC. + rpcStopper, err := sealer.ServeRPC(handler, "venus-miner", endpoint) + if err != nil { + return fmt.Errorf("failed to start json-rpc endpoint: %s", err) } - sigChan := make(chan os.Signal, 2) - go func() { - select { - case sig := <-sigChan: - log.Warnw("received shutdown", "signal", sig) - case <-shutdownChan: - log.Warn("received shutdown") - } - - log.Warn("Shutting down...") - if err := stop(context.TODO()); err != nil { - log.Errorf("graceful shutting down failed: %s", err) - } - if err := srv.Shutdown(context.TODO()); err != nil { - log.Errorf("shutting down RPC server failed: %s", err) - } - log.Warn("Graceful shutdown successful") - }() - signal.Notify(sigChan, syscall.SIGTERM, syscall.SIGINT) + // Monitor for shutdown. + finishCh := MonitorShutdown(shutdownChan, + ShutdownHandler{Component: "rpc server", StopFunc: rpcStopper}, + ShutdownHandler{Component: "miner", StopFunc: stop}, + ) - return srv.Serve(manet.NetListener(lst)) + <-finishCh + return nil }, } diff --git a/app/venus-sealer/shutdown.go b/app/venus-sealer/shutdown.go new file mode 100644 index 00000000..8f819032 --- /dev/null +++ b/app/venus-sealer/shutdown.go @@ -0,0 +1,58 @@ +package main + +import ( + "context" + "os" + "os/signal" + "syscall" + + sealer "github.com/filecoin-project/venus-sealer" +) + +type ShutdownHandler struct { + Component string + StopFunc sealer.StopFunc +} + +// MonitorShutdown manages shutdown requests, by watching signals and invoking +// the supplied handlers in order. +// +// It watches SIGTERM and SIGINT OS signals, as well as the trigger channel. +// When any of them fire, it calls the supplied handlers in order. If any of +// them errors, it merely logs the error. +// +// Once the shutdown has completed, it closes the returned channel. The caller +// can watch this channel +func MonitorShutdown(triggerCh <-chan struct{}, handlers ...ShutdownHandler) <-chan struct{} { + sigCh := make(chan os.Signal, 2) + out := make(chan struct{}) + + go func() { + select { + case sig := <-sigCh: + log.Warnw("received shutdown", "signal", sig) + case <-triggerCh: + log.Warn("received shutdown") + } + + log.Warn("Shutting down...") + + // Call all the handlers, logging on failure and success. + for _, h := range handlers { + if err := h.StopFunc(context.TODO()); err != nil { + log.Errorf("shutting down %s failed: %s", h.Component, err) + continue + } + log.Infof("%s shut down successfully ", h.Component) + } + + log.Warn("Graceful shutdown successful") + + // Sync all loggers. + _ = log.Sync() //nolint:errcheck + close(out) + }() + + signal.Notify(sigCh, syscall.SIGTERM, syscall.SIGINT) + return out +} diff --git a/app/venus-worker/main.go b/app/venus-worker/main.go index 7c42f94f..6254ba2f 100644 --- a/app/venus-worker/main.go +++ b/app/venus-worker/main.go @@ -55,9 +55,10 @@ func main() { } app := &cli.App{ - Name: "lotus-worker", + Name: "venus-worker", Usage: "Remote miner worker", Version: constants.UserVersion(), + EnableBashCompletion: true, Flags: []cli.Flag{ &cli.StringFlag{ Name: "data", @@ -239,8 +240,8 @@ var runCmd = &cli.Command{ if err != nil { return err } - if v.APIVersion != constants.MinerAPIVersion { - return xerrors.Errorf("lotus-miner API version doesn't match: expected: %s", api.Version{APIVersion: constants.MinerAPIVersion}) + if v.APIVersion != constants.MinerAPIVersion0 { + return xerrors.Errorf("lotus-miner API version doesn't match: expected: %s", api.Version{APIVersion: constants.MinerAPIVersion0}) } log.Infof("Remote version %s", v) @@ -362,9 +363,10 @@ var runCmd = &cli.Command{ return err } - remote := stores.NewRemote(localStore, nodeApi, cfg.Sealer.AuthHeader(), cctx.Int("parallel-fetch-limit")) + remote := stores.NewRemote(localStore, nodeApi, cfg.Sealer.AuthHeader(), cctx.Int("parallel-fetch-limit"), + &stores.DefaultPartialFileHandler{}) - fh := &stores.FetchHandler{Local: localStore} + fh := &stores.FetchHandler{Local: localStore, PfHandler: &stores.DefaultPartialFileHandler{}} remoteHandler := func(w http.ResponseWriter, r *http.Request) { if !auth.HasPerm(r.Context(), nil, api.PermAdmin) { w.WriteHeader(401) diff --git a/app/venus-worker/rpc.go b/app/venus-worker/rpc.go index a5c710c9..6517b8b6 100644 --- a/app/venus-worker/rpc.go +++ b/app/venus-worker/rpc.go @@ -24,7 +24,7 @@ type worker struct { } func (w *worker) Version(context.Context) (constants.Version, error) { - return constants.WorkerAPIVersion, nil + return constants.WorkerAPIVersion0, nil } func (w *worker) StorageAddLocal(ctx context.Context, path string) error { diff --git a/builder.go b/builder.go index 0b761a4f..a6098c9f 100644 --- a/builder.go +++ b/builder.go @@ -129,6 +129,8 @@ func Online(cfg *config.StorageMiner) Option { Override(new(abi.RegisteredSealProof), SealProofType), Override(new(stores.LocalStorage), cfg.LocalStorage()), //todo Override(new(types.SectorIDCounter), SectorIDCounter), + Override(new(*stores.Local), LocalStorage), + Override(new(*stores.Remote), RemoteStorage), Override(new(*sectorstorage.Manager), SectorStorage), Override(new(ffiwrapper.Verifier), ffiwrapper.ProofVerifier), Override(new(ffiwrapper.Prover), ffiwrapper.ProofProver), diff --git a/constants/constants.go b/constants/constants.go index e47c209d..8a44e068 100644 --- a/constants/constants.go +++ b/constants/constants.go @@ -1,23 +1,26 @@ package constants import ( - "github.com/filecoin-project/go-state-types/network" - "github.com/filecoin-project/venus/pkg/specactors/policy" "github.com/raulk/clock" + + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + + "github.com/filecoin-project/go-state-types/network" ) const ( - NewestNetworkVersion = network.Version9 + NewestNetworkVersion = network.Version13 MessageConfidence = uint64(5) - BlocksPerEpoch = uint64(5) - ForkLengthThreshold2 = policy.ChainFinality ) +// Blocks (e) +var BlocksPerEpoch = uint64(builtin2.ExpectedLeadersPerEpoch) + var ( FullAPIVersion0 = newVer(1, 3, 0) FullAPIVersion1 = newVer(2, 1, 0) - MinerAPIVersion = newVer(2, 1, 0) - WorkerAPIVersion = newVer(1, 0, 0) + MinerAPIVersion0 = newVer(1, 2, 0) + WorkerAPIVersion0 = newVer(1, 1, 0) MinerVersion = newVer(1, 0, 2) ) diff --git a/constants/version.go b/constants/version.go index dc3b9a20..f8a2ac0c 100644 --- a/constants/version.go +++ b/constants/version.go @@ -1,6 +1,9 @@ package constants -import "fmt" +import ( + "fmt" + "os" +) var CurrentCommit string @@ -37,8 +40,12 @@ func (ve Version) EqMajorMinor(v2 Version) bool { } // BuildVersion is the local build version, set by build system -const BuildVersion = "1.2.1" +const BuildVersion = "1.2.5" func UserVersion() string { + if os.Getenv("VENUS_SEALER_VERSION_IGNORE_COMMIT") == "1" { + return BuildVersion + } + return BuildVersion + CurrentCommit } diff --git a/go.mod b/go.mod index d9f6aa0d..5879519d 100644 --- a/go.mod +++ b/go.mod @@ -15,10 +15,10 @@ require ( github.com/filecoin-project/go-address v0.0.5 github.com/filecoin-project/go-bitfield v0.2.4 github.com/filecoin-project/go-cbor-util v0.0.0-20201016124514-d0bbec7bfcc4 - github.com/filecoin-project/go-commp-utils v0.1.0 - github.com/filecoin-project/go-data-transfer v1.5.0 + github.com/filecoin-project/go-commp-utils v0.1.1-0.20210427191551-70bf140d31c7 + github.com/filecoin-project/go-data-transfer v1.6.0 github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a - github.com/filecoin-project/go-fil-markets v1.2.5 + github.com/filecoin-project/go-fil-markets v1.5.0 github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20 github.com/filecoin-project/go-paramfetch v0.0.2-0.20210614165157-25a6c7769498 @@ -31,14 +31,15 @@ require ( github.com/filecoin-project/specs-actors/v3 v3.1.1 github.com/filecoin-project/specs-actors/v5 v5.0.1 github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 - github.com/filecoin-project/venus v1.0.2 - github.com/filecoin-project/venus-messager v1.1.0 + github.com/filecoin-project/venus v1.0.4-0.20210729132421-f042d91e180e + github.com/filecoin-project/venus-messager v1.1.1 github.com/gbrlsnchs/jwt/v3 v3.0.0 + github.com/golang/mock v1.4.4 github.com/google/uuid v1.2.0 github.com/gorilla/mux v1.8.0 github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026 github.com/hashicorp/go-multierror v1.1.0 - github.com/ipfs-force-community/venus-common-utils v0.0.0-20210714031758-ea0e25ff0ec4 + github.com/ipfs-force-community/venus-common-utils v0.0.0-20210714054928-2042a9040759 github.com/ipfs-force-community/venus-gateway v1.1.0 github.com/ipfs/go-block-format v0.0.3 github.com/ipfs/go-cid v0.0.7 @@ -47,12 +48,11 @@ require ( github.com/ipfs/go-ipfs-ds-help v1.0.0 github.com/ipfs/go-ipfs-util v0.0.2 github.com/ipfs/go-ipld-cbor v0.0.5 - github.com/ipfs/go-log v1.0.5 github.com/ipfs/go-log/v2 v2.3.0 github.com/ipfs/go-metrics-interface v0.0.1 github.com/kelseyhightower/envconfig v1.4.0 github.com/libp2p/go-buffer-pool v0.0.2 - github.com/libp2p/go-libp2p-core v0.7.0 + github.com/libp2p/go-libp2p-core v0.8.5 github.com/libp2p/go-libp2p-pubsub v0.4.2-0.20210212194758-6c1addf493eb github.com/minio/sha256-simd v1.0.0 // indirect github.com/mitchellh/go-homedir v1.1.0 @@ -68,12 +68,12 @@ require ( go.uber.org/fx v1.13.1 go.uber.org/multierr v1.6.0 go.uber.org/zap v1.16.0 - golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 + golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6 golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40 golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 gorm.io/driver/mysql v1.1.1 gorm.io/driver/sqlite v1.1.4 - gorm.io/gorm v1.21.11 + gorm.io/gorm v1.21.12 gotest.tools v2.2.0+incompatible ) diff --git a/go.sum b/go.sum index 41a606b7..294d795d 100644 --- a/go.sum +++ b/go.sum @@ -72,7 +72,6 @@ github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAE github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/Stebalien/go-bitfield v0.0.1 h1:X3kbSSPUaJK60wV2hjOPZwmpljr6VGCqdq4cBLhbQBo= github.com/Stebalien/go-bitfield v0.0.1/go.mod h1:GNjFpasyUVkHMsfEOk8EFLJ9syQ6SI+XWrX9Wf2XH0s= @@ -132,7 +131,6 @@ github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJm github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= github.com/bluele/gcache v0.0.0-20190518031135-bc40bd653833 h1:yCfXxYaelOyqnia8F/Yng47qhmfC9nKTRIbYRrRueq4= github.com/bluele/gcache v0.0.0-20190518031135-bc40bd653833/go.mod h1:8c4/i2VlovMO2gBnHGQPN5EJw+H0lx1u/5p+cgsXtCk= -github.com/bombsimon/wsl v1.2.5/go.mod h1:43lEF/i0kpXbLCeDXL9LMT8c92HyBywXb0AsgMHYngM= github.com/bombsimon/wsl/v3 v3.2.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/briandowns/spinner v1.11.1/go.mod h1:QOuQk7x+EaDASo80FEXwlwiA+j/PPIcX3FScO+3/ZPQ= @@ -140,14 +138,18 @@ github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dm github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= github.com/btcsuite/btcd v0.0.0-20190605094302-a0d1e3e36d50/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= -github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btcd v0.21.0-beta h1:At9hIZdJW0s9E/fAz28nrz6AmcNlSVucCH796ZteX1M= +github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= @@ -159,6 +161,7 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/charithe/durationcheck v0.0.6/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= +github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE= github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= @@ -205,6 +208,7 @@ github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQY github.com/davidlazar/go-crypto v0.0.0-20190912175916-7055855a373f/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= +github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= github.com/deepmap/oapi-codegen v1.3.13 h1:9HKGCsdJqE4dnrQ8VerFS0/1ZOJPmAhN+g8xgp8y3K4= github.com/deepmap/oapi-codegen v1.3.13/go.mod h1:WAmG5dWY8/PYHt4vKxlt90NsbHMAOCiteYKZMiIRfOo= github.com/denis-tingajkin/go-header v0.4.2/go.mod h1:eLRHAVXzE5atsKAnNRDB90WHCFFnBUn4RN0nRcs1LJA= @@ -229,7 +233,6 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v0.7.3-0.20190315170154-87d593639c77/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= @@ -272,7 +275,6 @@ github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg= github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/filecoin-project/go-address v0.0.3/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= -github.com/filecoin-project/go-address v0.0.5-0.20201103152444-f2023ef3f5bb/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= github.com/filecoin-project/go-address v0.0.5 h1:SSaFT/5aLfPXycUlFyemoHYhRgdyXClXCyDdNJKPlDM= github.com/filecoin-project/go-address v0.0.5/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= github.com/filecoin-project/go-amt-ipld/v2 v2.1.0/go.mod h1:nfFPoGyX0CU9SkXX8EoCcSuHN1XcbN0c6KBh7yvP5fs= @@ -282,7 +284,6 @@ github.com/filecoin-project/go-amt-ipld/v3 v3.0.0/go.mod h1:Qa95YNAbtoVCTSVtX38a github.com/filecoin-project/go-amt-ipld/v3 v3.1.0 h1:ZNJ9tEG5bE72vBWYiuh5bkxJVM3ViHNOmQ7qew9n6RE= github.com/filecoin-project/go-amt-ipld/v3 v3.1.0/go.mod h1:UjM2QhDFrrjD5s1CdnkJkat4ga+LqZBZgTMniypABRo= github.com/filecoin-project/go-bitfield v0.2.0/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= -github.com/filecoin-project/go-bitfield v0.2.3-0.20201110211213-fe2c1862e816/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= github.com/filecoin-project/go-bitfield v0.2.3/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= github.com/filecoin-project/go-bitfield v0.2.4 h1:uZ7MeE+XfM5lqrHJZ93OnhQKc/rveW8p9au0C68JPgk= github.com/filecoin-project/go-bitfield v0.2.4/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= @@ -290,25 +291,25 @@ github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.m github.com/filecoin-project/go-cbor-util v0.0.0-20201016124514-d0bbec7bfcc4 h1:YmE80qPn5K0txSqxnRNiCRAWyXI1LTO//I4c4H0QwbM= github.com/filecoin-project/go-cbor-util v0.0.0-20201016124514-d0bbec7bfcc4/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= github.com/filecoin-project/go-commp-utils v0.0.0-20201119054358-b88f7a96a434/go.mod h1:6s95K91mCyHY51RPWECZieD3SGWTqIFLf1mPOes9l5U= -github.com/filecoin-project/go-commp-utils v0.1.0 h1:PaDxoXYh1TXnnz5kA/xSObpAQwcJSUs4Szb72nuaNdk= github.com/filecoin-project/go-commp-utils v0.1.0/go.mod h1:6s95K91mCyHY51RPWECZieD3SGWTqIFLf1mPOes9l5U= +github.com/filecoin-project/go-commp-utils v0.1.1-0.20210427191551-70bf140d31c7 h1:U9Z+76pHCKBmtdxFV7JFZJj7OVm12I6dEKwtMVbq5p0= +github.com/filecoin-project/go-commp-utils v0.1.1-0.20210427191551-70bf140d31c7/go.mod h1:6s95K91mCyHY51RPWECZieD3SGWTqIFLf1mPOes9l5U= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= github.com/filecoin-project/go-data-transfer v1.0.1/go.mod h1:UxvfUAY9v3ub0a21BSK9u3pB2aq30Y0KMsG+w9/ysyo= github.com/filecoin-project/go-data-transfer v1.2.7/go.mod h1:mvjZ+C3NkBX10JP4JMu27DCjUouHFjHwUGh+Xc4yvDA= -github.com/filecoin-project/go-data-transfer v1.4.3/go.mod h1:n8kbDQXWrY1c4UgfMa9KERxNCWbOTDwdNhf2MpN9dpo= -github.com/filecoin-project/go-data-transfer v1.5.0 h1:eXmcq7boRl/S3plV0/h4qdxkM6EgFIXF9y3UdOL0VXE= github.com/filecoin-project/go-data-transfer v1.5.0/go.mod h1:E3WW4mCEYwU2y65swPEajSZoFWFmfXt7uwGduoACZQc= +github.com/filecoin-project/go-data-transfer v1.6.0 h1:DHIzEc23ydRCCBwtFet3MfgO8gMpZEnw60Y+s71oX6o= +github.com/filecoin-project/go-data-transfer v1.6.0/go.mod h1:E3WW4mCEYwU2y65swPEajSZoFWFmfXt7uwGduoACZQc= github.com/filecoin-project/go-ds-versioning v0.1.0 h1:y/X6UksYTsK8TLCI7rttCKEvl8btmWxyFMEeeWGUxIQ= github.com/filecoin-project/go-ds-versioning v0.1.0/go.mod h1:mp16rb4i2QPmxBnmanUx8i/XANp+PFCCJWiAb+VW4/s= github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a h1:hyJ+pUm/4U4RdEZBlg6k8Ma4rDiuvqyGpoICXAxwsTg= github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335/go.mod h1:AJySOJC00JRWEZzRG2KsfUnqEf5ITXxeX09BE9N4f9c= -github.com/filecoin-project/go-fil-markets v1.1.7/go.mod h1:6oTRaAsHnCqhi3mpZqdvnWIzH6QzHQc4dbhJrI9/BfQ= github.com/filecoin-project/go-fil-markets v1.1.9/go.mod h1:0yQu5gvrjFoAIyzPSSJ+xUdCG83vjInAFbTswIB5/hk= -github.com/filecoin-project/go-fil-markets v1.2.5 h1:bQgtXbwxKyPxSEQoUI5EaTHJ0qfzyd5NosspuADCm6Y= -github.com/filecoin-project/go-fil-markets v1.2.5/go.mod h1:7JIqNBmFvOyBzk/EiPYnweVdQnWhshixb5B9b1653Ag= +github.com/filecoin-project/go-fil-markets v1.5.0 h1:3KEs01L8XFCEgujZ6ggFjr1XWjpjTQcmSSeo3I99I0k= +github.com/filecoin-project/go-fil-markets v1.5.0/go.mod h1:7be6zzFwaN8kxVeYZf/UUj/JilHC0ogPvWqE1TW8Ptk= github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM= @@ -316,7 +317,6 @@ github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0/go.mod h1:7aWZdaQ1b16BVoQUYR+ github.com/filecoin-project/go-hamt-ipld/v3 v3.0.1/go.mod h1:gXpNmr3oQx8l3o7qkGyDjJjYSRX7hp/FGOStdqrWyDI= github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0 h1:rVVNq0x6RGQIzCo1iiJlGFm9AGIZzeifggxtKMU7zmI= github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0/go.mod h1:bxmzgT8tmeVQA1/gvBwFmYdT8SOFUwB3ovSUfG1Ux0g= -github.com/filecoin-project/go-jsonrpc v0.1.2/go.mod h1:XBBpuKIMaXIIzeqzO1iucq4GvbF8CxmXRFoezRh+Cx4= github.com/filecoin-project/go-jsonrpc v0.1.3/go.mod h1:XBBpuKIMaXIIzeqzO1iucq4GvbF8CxmXRFoezRh+Cx4= github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec h1:rGI5I7fdU4viManxmDdbk5deZO7afe6L1Wc04dAmlOM= github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec/go.mod h1:XBBpuKIMaXIIzeqzO1iucq4GvbF8CxmXRFoezRh+Cx4= @@ -325,14 +325,12 @@ github.com/filecoin-project/go-multistore v0.0.3 h1:vaRBY4YiA2UZFPK57RNuewypB8u0 github.com/filecoin-project/go-multistore v0.0.3/go.mod h1:kaNqCC4IhU4B1uyr7YWFHd23TL4KM32aChS0jNkyUvQ= github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20 h1:+/4aUeUoKr6AKfPE3mBhXA5spIV6UcKdTYDPNU2Tdmg= github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20/go.mod h1:mPn+LRRd5gEKNAtc+r3ScpW2JRU/pj4NBKdADYWHiak= -github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= github.com/filecoin-project/go-paramfetch v0.0.2-0.20210614165157-25a6c7769498 h1:G10ezOvpH1CLXQ19EA9VWNwyL0mg536ujSayjV0yg0k= github.com/filecoin-project/go-paramfetch v0.0.2-0.20210614165157-25a6c7769498/go.mod h1:1FH85P8U+DUEmWk1Jkw3Bw7FrwTVUNHk/95PSPG+dts= github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= github.com/filecoin-project/go-state-types v0.0.0-20200904021452-1883f36ca2f4/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= -github.com/filecoin-project/go-state-types v0.0.0-20210119062722-4adba5aaea71/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48 h1:Jc4OprDp3bRDxbsrXNHPwJabZJM3iDy+ri8/1e0ZnX4= github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= @@ -345,11 +343,9 @@ github.com/filecoin-project/specs-actors v0.9.14 h1:68PVstg2UB3ZsMLF+DKFTAs/YKsq github.com/filecoin-project/specs-actors v0.9.14/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= github.com/filecoin-project/specs-actors/v2 v2.0.1/go.mod h1:v2NZVYinNIKA9acEMBm5wWXxqv5+frFEbekBFemYghY= github.com/filecoin-project/specs-actors/v2 v2.3.2/go.mod h1:UuJQLoTx/HPvvWeqlIFmC/ywlOLHNe8SNQ3OunFbu2Y= -github.com/filecoin-project/specs-actors/v2 v2.3.4/go.mod h1:UuJQLoTx/HPvvWeqlIFmC/ywlOLHNe8SNQ3OunFbu2Y= github.com/filecoin-project/specs-actors/v2 v2.3.5-0.20210114162132-5b58b773f4fb/go.mod h1:LljnY2Mn2homxZsmokJZCpRuhOPxfXhvcek5gWkmqAc= github.com/filecoin-project/specs-actors/v2 v2.3.5 h1:PbT4tPlSXZ8sRgajhb4D8AOEmiaaZ+jg6tc6BBv8VQc= github.com/filecoin-project/specs-actors/v2 v2.3.5/go.mod h1:LljnY2Mn2homxZsmokJZCpRuhOPxfXhvcek5gWkmqAc= -github.com/filecoin-project/specs-actors/v3 v3.0.3/go.mod h1:oMcmEed6B7H/wHabM3RQphTIhq0ibAKsbpYs+bQ/uxQ= github.com/filecoin-project/specs-actors/v3 v3.1.0/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww= github.com/filecoin-project/specs-actors/v3 v3.1.1 h1:BE8fsns1GnEOxt1DTE5LxBK2FThXtWmCChgcJoHTg0E= github.com/filecoin-project/specs-actors/v3 v3.1.1/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww= @@ -362,19 +358,23 @@ github.com/filecoin-project/specs-actors/v5 v5.0.1/go.mod h1:74euMDIXorusOBs/QL/ github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 h1:Ur/l2+6qN+lQiqjozWWc5p9UDaAMDZKTlDS98oRnlIw= github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g= github.com/filecoin-project/test-vectors/schema v0.0.5/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E= -github.com/filecoin-project/venus v0.9.2-0.20210603072509-1a7e6e11d39c/go.mod h1:9IY4iNo5At3GMBUNa8we3aGL7zM3cFUNxy2rMpb86C0= -github.com/filecoin-project/venus v1.0.2 h1:IoLqGWzDmY9BUzxLZxr+1PgAi2Ls2eezY96StsXPsIc= -github.com/filecoin-project/venus v1.0.2/go.mod h1:65wz9APQjP6/BmJGD/pHTFs1704kboD9PW6yS5/zOyU= +github.com/filecoin-project/venus v1.0.1-0.20210707073618-62e8cf9a7834/go.mod h1:BVcA3ztDkxMbLPoMJKUKlkcbDUwI2dktfiu4rxhzYXc= +github.com/filecoin-project/venus v1.0.4-0.20210729132421-f042d91e180e h1:kNW10kKF45EQlrf7+IqUbP/+d44Sd23FBu2eTjPCi80= +github.com/filecoin-project/venus v1.0.4-0.20210729132421-f042d91e180e/go.mod h1:j4PCXCaOhXIXD2Idxrny6ZqIYy+WYY2ab5/BwugGvyo= github.com/filecoin-project/venus-auth v1.1.0/go.mod h1:LXrT3H5dbcG/N+Ze5jMLGwAyGic9s1y3J/JV2/qM0sU= +github.com/filecoin-project/venus-auth v1.1.1-0.20210601064545-55f3162444fd/go.mod h1:5tTtug4j0zXhaHBt731KyPpV5r8BG4z64huN4uZHmsE= github.com/filecoin-project/venus-auth v1.2.0/go.mod h1:m12HUiZhSWOB1UBs7rcRRXdm89J88aaNBnnLDle7UkI= -github.com/filecoin-project/venus-auth v1.2.1 h1:Osv1vEtT+XlF+VCHCN2Sm5ovz8UZMENP3WlK/lz6/hQ= -github.com/filecoin-project/venus-auth v1.2.1/go.mod h1:seL0Da6grKQP91NmwJbrxlI7QX/BbhLaeLeUCUS/dmA= -github.com/filecoin-project/venus-messager v1.1.0 h1:/eWA8A+XLreVJHPoyPjIN2qqnHmAFC5/+MnLYOAg7Z0= -github.com/filecoin-project/venus-messager v1.1.0/go.mod h1:jfD2k4MF5DKn3SZMVSm7PT1wPA/OAAi6r78nHQTRJlQ= +github.com/filecoin-project/venus-auth v1.2.2-0.20210716071053-565a8999f9e5/go.mod h1:jUtjoz4kKz+VtilL4900JbWf+OQoxF7KZlnfQrvKKa8= +github.com/filecoin-project/venus-auth v1.2.2-0.20210721103851-593a379c4916 h1:5CshZxLVln0WtsZ1F0+kxzx1RL+lHqJOXRXUxBqgi0k= +github.com/filecoin-project/venus-auth v1.2.2-0.20210721103851-593a379c4916/go.mod h1:Ly135gUqZWf6dOYloHRq0L1a6CXRJfiRmxVBwN1y8zg= +github.com/filecoin-project/venus-messager v1.1.1 h1:8Ntm+LAdONePZ8+LdNhVflDV9QuJZ0pEKfHmNtzhUBw= +github.com/filecoin-project/venus-messager v1.1.1/go.mod h1:l2cnhrnnnREf/gMAbYkA74ByYFtiitZzH3OVWvnPHAI= github.com/filecoin-project/venus-wallet v1.1.0/go.mod h1:blo4MxTA+G6ed/Rkvji1JNwVjGKb1VZzqETChn5ffkQ= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6 h1:u/UEqS66A5ckRmS4yNpjmVH56sVtS/RfclBAYocb4as= github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ= +github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= +github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= +github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= @@ -395,7 +395,6 @@ github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwv github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= github.com/go-chi/chi v4.0.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= -github.com/go-critic/go-critic v0.3.5-0.20190904082202-d79a9f0c64db/go.mod h1:+sE8vrLDS2M0pZkBk0wy6+nLdKexVDrl/jBqQOTDThA= github.com/go-critic/go-critic v0.5.5/go.mod h1:eMs1Oc/oIP+CYNVN09M+XZYffIPuRHawxzlggAPN9Kk= github.com/go-delve/delve v1.5.0/go.mod h1:c6b3a1Gry6x8a4LGCe/CWzrocrfaHvkUxCj3k4bvSUQ= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= @@ -407,12 +406,10 @@ github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= @@ -425,9 +422,8 @@ github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7a github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= -github.com/go-redis/redis/v8 v8.3.4/go.mod h1:jszGxBCez8QA1HWSmQxJO9Y82kNibbUmeYhKWrBejTU= -github.com/go-redis/redis/v8 v8.11.0/go.mod h1:DLomh7y2e3ggQXQLd1YgmvIfecPJoFl7WU5SOQ/r06M= -github.com/go-redis/redis_rate/v9 v9.1.1/go.mod h1:jjU9YxOSZ3cz0yj1QJVAJiy5ueKmL9o4AySJHcKyTSE= +github.com/go-redis/redis/v7 v7.0.0-beta/go.mod h1:dohSoK1cSNPaisjbZhSk7RYyPhVx2k+4sAbJdPK5KPs= +github.com/go-redis/redis_rate/v7 v7.0.1/go.mod h1:IWxoSa694TQvppZ53Y5yZtqSfHKflOx+xtSw1TsSoT4= github.com/go-resty/resty/v2 v2.4.0/go.mod h1:B88+xCTEwvfD94NOuE6GS1wMlnoKNY8eEiNizfNwOwA= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= @@ -437,14 +433,10 @@ github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= -github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= -github.com/go-toolsmith/astfmt v0.0.0-20180903215011-8f8ee99c3086/go.mod h1:mP93XdblcopXwlyN4X4uodxXQhldPGZbcEJIimQHrkg= github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= -github.com/go-toolsmith/astp v0.0.0-20180903215135-0af7e3c24f30/go.mod h1:SV2ur98SGypH1UjcPpCatrV5hPazG6+IfNHbkDXBRrk= github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= -github.com/go-toolsmith/pkgload v0.0.0-20181119091011-e9e65178eee8/go.mod h1:WoMrjiy4zvdS+Bg6z9jZH82QXwkcgCBX6nOfnmdaHks= github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= @@ -452,7 +444,6 @@ github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2 github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/gofrs/flock v0.0.0-20190320160742-5135e617513b/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= @@ -472,6 +463,7 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -482,6 +474,7 @@ github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFU github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -508,21 +501,13 @@ github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf h1:gFVkHXmVAhEbxZV github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= -github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0= github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= -github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3/go.mod h1:JXrF4TWy4tXYn62/9x8Wm/K/dm06p8tCKwFRDPZG/1o= -github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.21.0/go.mod h1:phxpHK52q7SE+5KpPnti4oZTdFCEsn/tKN+nFvCKXfk= github.com/golangci/golangci-lint v1.39.0/go.mod h1:mzMK3FGyk8LKTOxpRDcDqxwHVudnYemESTt5rpUxqCM= -github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU= github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= -github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= -github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI= -github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4= github.com/golangci/revgrep v0.0.0-20210208091834-cd28932614b5/go.mod h1:LK+zW4MpyytAWQRz0M4xnzEk50lSvqDQKfx304apFkY= github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -550,8 +535,9 @@ github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+u github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= -github.com/google/gopacket v1.1.18 h1:lum7VRA9kdlvBi7/v2p7/zcbkduHaCH/SVVyurs7OpY= github.com/google/gopacket v1.1.18/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -680,15 +666,16 @@ github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 h1:W9WBk7wlPfJLvMCdtV4zPulc4uCPrlywQOmbFOhgQNU= github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= github.com/ipfs-force-community/go-ipfs-cmds v0.6.1-0.20210521090123-4587df7fa0ab/go.mod h1:ZgYiWVnCk43ChwoH8hAmI1IRbuVtq3GSTHwtRB/Kqhk= -github.com/ipfs-force-community/metrics v0.0.0-20210705093944-918711d7932a/go.mod h1:ajvaa2Yv3kernFGYkOVugnapIKawekjQsBPoY+AXMf0= -github.com/ipfs-force-community/metrics v0.0.0-20210708103859-bb5906dadf00 h1:nS5cK3ml1THyYX0bDdyGROhhyJf4xQ0+V/SJ46LFmu0= -github.com/ipfs-force-community/metrics v0.0.0-20210708103859-bb5906dadf00/go.mod h1:ajvaa2Yv3kernFGYkOVugnapIKawekjQsBPoY+AXMf0= -github.com/ipfs-force-community/venus-common-utils v0.0.0-20210714031758-ea0e25ff0ec4 h1:WinGXFDwEGr9GJJSWwC4XXCruQ4EHV9QpnlRPDtjOkk= -github.com/ipfs-force-community/venus-common-utils v0.0.0-20210714031758-ea0e25ff0ec4/go.mod h1:AWIt7L1UiUeIzqQdjQ/quZcVcCdnr6GXabGLBgEIMzU= +github.com/ipfs-force-community/metrics v0.0.0-20210714124606-3d65e2d7cec1/go.mod h1:mn40SioMuKtjmRumHFy/fJ26Pn028XuDjUJE9dorjyw= +github.com/ipfs-force-community/metrics v0.0.0-20210721095706-e644662d8554/go.mod h1:mn40SioMuKtjmRumHFy/fJ26Pn028XuDjUJE9dorjyw= +github.com/ipfs-force-community/metrics v1.0.0 h1:9YTmQCVcguY+hqWq5lwpSK41dhizk5IntBFvQ6YnYNI= +github.com/ipfs-force-community/metrics v1.0.0/go.mod h1:mn40SioMuKtjmRumHFy/fJ26Pn028XuDjUJE9dorjyw= +github.com/ipfs-force-community/venus-common-utils v0.0.0-20210714051450-5b18e20bb913/go.mod h1:AWIt7L1UiUeIzqQdjQ/quZcVcCdnr6GXabGLBgEIMzU= +github.com/ipfs-force-community/venus-common-utils v0.0.0-20210714054928-2042a9040759 h1:dI1FSoq0C85B1Y+G2ZYrhf5TeIkumzmtz4tjakoqSxU= +github.com/ipfs-force-community/venus-common-utils v0.0.0-20210714054928-2042a9040759/go.mod h1:AWIt7L1UiUeIzqQdjQ/quZcVcCdnr6GXabGLBgEIMzU= github.com/ipfs-force-community/venus-gateway v0.0.0-20210528060921-460ec6185a7d/go.mod h1:m1gMbwD/mEw3jxIkaTWx/TCWQ3e4V6ccJ2Vw7q/lvVQ= github.com/ipfs-force-community/venus-gateway v1.1.0 h1:5WTA0+nlPlRHHrPu4uSmcRB4ADLJGNYu5wkSi9/+S8I= github.com/ipfs-force-community/venus-gateway v1.1.0/go.mod h1:Xwmj2l5w6qeOXER4qCeT48zDFmvHetmBN1swHEeqnOQ= -github.com/ipfs-force-community/venus-wallet v0.0.0-20210222095647-d0fab90dc77f/go.mod h1:7pzkUzgqrHOQQSewIwvCuDzahMFJgaqx3pkLt/5yyI8= github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI= github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= @@ -783,7 +770,6 @@ github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjN github.com/ipfs/go-ipfs-files v0.0.4/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= github.com/ipfs/go-ipfs-files v0.0.8 h1:8o0oFJkJ8UkO/ABl8T6ac6tKF3+NIpj67aAB6ZpusRg= github.com/ipfs/go-ipfs-files v0.0.8/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= -github.com/ipfs/go-ipfs-keystore v0.0.1/go.mod h1:5WjcKN1ESzCVzYKo5JvO1iYHLE0n626HL/cr3dSkqBs= github.com/ipfs/go-ipfs-posinfo v0.0.1 h1:Esoxj+1JgSjX0+ylc0hUmJCOv6V2vFoZiETLR6OtpRs= github.com/ipfs/go-ipfs-posinfo v0.0.1/go.mod h1:SwyeVP+jCwiDu0C313l/8jg6ZxM0qqtlt2a0vILTc1A= github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= @@ -937,13 +923,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/errcheck v1.6.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= -github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/klauspost/cpuid v1.2.0 h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE= -github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.0.4 h1:g0I61F2K2DjRHz1cnxlkNSBIaePVoJIjjnHui8QHbiw= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -955,10 +936,10 @@ github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfo github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= @@ -988,8 +969,9 @@ github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0 github.com/libp2p/go-conn-security v0.0.1/go.mod h1:bGmu51N0KU9IEjX7kl2PQjgZa40JQWnayTvNMgD/vyk= github.com/libp2p/go-conn-security-multistream v0.0.2/go.mod h1:nc9vud7inQ+d6SO0I/6dSWrdMnHnzZNHeyUQqrAJulE= github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc= -github.com/libp2p/go-conn-security-multistream v0.2.0 h1:uNiDjS58vrvJTg9jO6bySd1rMKejieG7v45ekqHbZ1M= github.com/libp2p/go-conn-security-multistream v0.2.0/go.mod h1:hZN4MjlNetKD3Rq5Jb/P5ohUnFLNzEAR4DLSzpn2QLU= +github.com/libp2p/go-conn-security-multistream v0.2.1 h1:ft6/POSK7F+vl/2qzegnHDaXFU0iWB4yVTYrioC6Zy0= +github.com/libp2p/go-conn-security-multistream v0.2.1/go.mod h1:cR1d8gA0Hr59Fj6NhaTpFhJZrjSYuNmhpT2r25zYR70= github.com/libp2p/go-eventbus v0.1.0/go.mod h1:vROgu5cs5T7cv7POWlWxBaVLxfSegC5UGQf8A2eEmx4= github.com/libp2p/go-eventbus v0.2.1 h1:VanAdErQnpTioN2TowqNcOijf6YwhuODe4pPKSDpxGc= github.com/libp2p/go-eventbus v0.2.1/go.mod h1:jc2S4SoEVPP48H9Wpzm5aiGwUCBMfGhVhhBjyhhCJs8= @@ -1008,8 +990,9 @@ github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qD github.com/libp2p/go-libp2p v0.8.3/go.mod h1:EsH1A+8yoWK+L4iKcbPYu6MPluZ+CHWI9El8cTaefiM= github.com/libp2p/go-libp2p v0.9.2/go.mod h1:cunHNLDVus66Ct9iXXcjKRLdmHdFdHVe1TAnbubJQqQ= github.com/libp2p/go-libp2p v0.10.0/go.mod h1:yBJNpb+mGJdgrwbKAKrhPU0u3ogyNFTfjJ6bdM+Q/G8= -github.com/libp2p/go-libp2p v0.12.0 h1:+xai9RQnQ9l5elFOKvp5wRyjyWisSwEx+6nU2+onpUA= github.com/libp2p/go-libp2p v0.12.0/go.mod h1:FpHZrfC1q7nA8jitvdjKBDF31hguaC676g/nT9PgQM0= +github.com/libp2p/go-libp2p v0.14.2 h1:qs0ABtjjNjS+RIXT1uM7sMJEvIc0pq2nKR0VQxFXhHI= +github.com/libp2p/go-libp2p v0.14.2/go.mod h1:0PQMADQEjCM2l8cSMYDpTgsb8gr6Zq7i4LUgq1mlW2E= github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052 h1:BM7aaOF7RpmNn9+9g6uTjGJ0cTzWr5j9i9IKeun2M8U= github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052/go.mod h1:nRMRTab+kZuk0LnKZpxhOVH/ndsdr2Nr//Zltc/vwgo= github.com/libp2p/go-libp2p-autonat v0.0.6/go.mod h1:uZneLdOkZHro35xIhpbtTzLlgYturpu4J5+0cZK3MqE= @@ -1019,8 +1002,9 @@ github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQ github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI= github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/nBwNHoeyyT4IWV6A= github.com/libp2p/go-libp2p-autonat v0.2.3/go.mod h1:2U6bNWCNsAG9LEbwccBDQbjzQ8Krdjge1jLTE9rdoMM= -github.com/libp2p/go-libp2p-autonat v0.4.0 h1:3y8XQbpr+ssX8QfZUHekjHCYK64sj6/4hnf/awD4+Ug= github.com/libp2p/go-libp2p-autonat v0.4.0/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= +github.com/libp2p/go-libp2p-autonat v0.4.2 h1:YMp7StMi2dof+baaxkbxaizXjY1RPvU71CXfxExzcUU= +github.com/libp2p/go-libp2p-autonat v0.4.2/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= github.com/libp2p/go-libp2p-blankhost v0.0.1/go.mod h1:Ibpbw/7cPPYwFb7PACIWdvxxv0t0XCCI10t7czjAjTc= github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= github.com/libp2p/go-libp2p-blankhost v0.1.4/go.mod h1:oJF0saYsAXQCSfDq254GMNmLNz6ZTHTOvtF4ZydUvwU= @@ -1059,8 +1043,12 @@ github.com/libp2p/go-libp2p-core v0.5.6/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX github.com/libp2p/go-libp2p-core v0.5.7/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= github.com/libp2p/go-libp2p-core v0.6.0/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= github.com/libp2p/go-libp2p-core v0.6.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.7.0 h1:4a0TMjrWNTZlNvcqxZmrMRDi/NQWrhwO2pkTuLSQ/IQ= github.com/libp2p/go-libp2p-core v0.7.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.2/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.5 h1:aEgbIcPGsKy6zYcC+5AJivYFedhYa4sW7mIpWpUaLKw= +github.com/libp2p/go-libp2p-core v0.8.5/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-crypto v0.0.1/go.mod h1:yJkNyDmO341d5wwXxDUGO0LykUVT72ImHNUqh5D/dBE= github.com/libp2p/go-libp2p-crypto v0.0.2/go.mod h1:eETI5OUfBnvARGOHrJz2eWNyTUxEGZnBxMcbUjfIj4I= github.com/libp2p/go-libp2p-crypto v0.1.0 h1:k9MFy+o2zGDNGsaoZl0MA3iZ75qXxr9OOoAZF+sD5OQ= @@ -1091,8 +1079,10 @@ github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3 github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= github.com/libp2p/go-libp2p-mplex v0.2.2/go.mod h1:74S9eum0tVQdAfFiKxAyKzNdSuLqw5oadDq7+L/FELo= github.com/libp2p/go-libp2p-mplex v0.2.3/go.mod h1:CK3p2+9qH9x+7ER/gWWDYJ3QW5ZxWDkm+dVvjfuG3ek= -github.com/libp2p/go-libp2p-mplex v0.3.0 h1:CZyqqKP0BSGQyPLvpRQougbfXaaaJZdGgzhCpJNuNSk= github.com/libp2p/go-libp2p-mplex v0.3.0/go.mod h1:l9QWxRbbb5/hQMECEb908GbS9Sm2UAR2KFZKUJEynEs= +github.com/libp2p/go-libp2p-mplex v0.4.0/go.mod h1:yCyWJE2sc6TBTnFpjvLuEJgTSw/u+MamvzILKdX7asw= +github.com/libp2p/go-libp2p-mplex v0.4.1 h1:/pyhkP1nLwjG3OM+VuaNJkQT/Pqq73WzB3aDN3Fx1sc= +github.com/libp2p/go-libp2p-mplex v0.4.1/go.mod h1:cmy+3GfqfM1PceHTLL7zQzAAYaryDu6iPSC+CIb094g= github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= github.com/libp2p/go-libp2p-nat v0.0.6 h1:wMWis3kYynCbHoyKLPBEMu4YRLltbm8Mk08HGSfvTkU= @@ -1103,8 +1093,9 @@ github.com/libp2p/go-libp2p-netutil v0.0.1/go.mod h1:GdusFvujWZI9Vt0X5BKqwWWmZFx github.com/libp2p/go-libp2p-netutil v0.1.0 h1:zscYDNVEcGxyUpMd0JReUZTrpMfia8PmLKcKF72EAMQ= github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= github.com/libp2p/go-libp2p-noise v0.1.1/go.mod h1:QDFLdKX7nluB7DEnlVPbz7xlLHdwHFA9HiohJRr3vwM= -github.com/libp2p/go-libp2p-noise v0.1.2 h1:IH9GRihQJTx56obm+GnpdPX4KeVIlvpXrP6xnJ0wxWk= github.com/libp2p/go-libp2p-noise v0.1.2/go.mod h1:9B10b7ueo7TIxZHHcjcDCo5Hd6kfKT2m77by82SFRfE= +github.com/libp2p/go-libp2p-noise v0.2.0 h1:wmk5nhB9a2w2RxMOyvsoKjizgJOEaJdfAakr0jN8gds= +github.com/libp2p/go-libp2p-noise v0.2.0/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q= github.com/libp2p/go-libp2p-peer v0.0.1/go.mod h1:nXQvOBbwVqoP+T5Y5nCjeH4sP9IX/J0AMzcDUVruVoo= github.com/libp2p/go-libp2p-peer v0.1.1/go.mod h1:jkF12jGB4Gk/IOo+yomm+7oLWxF278F7UnrYUQ1Q8es= github.com/libp2p/go-libp2p-peer v0.2.0 h1:EQ8kMjaCUwt/Y5uLgjT8iY2qg0mGUT0N1zUjer50DsY= @@ -1119,17 +1110,19 @@ github.com/libp2p/go-libp2p-peerstore v0.2.1/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRj github.com/libp2p/go-libp2p-peerstore v0.2.2/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= github.com/libp2p/go-libp2p-peerstore v0.2.3/go.mod h1:K8ljLdFn590GMttg/luh4caB/3g0vKuY01psze0upRw= github.com/libp2p/go-libp2p-peerstore v0.2.4/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= -github.com/libp2p/go-libp2p-peerstore v0.2.6 h1:2ACefBX23iMdJU9Ke+dcXt3w86MIryes9v7In4+Qq3U= github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= +github.com/libp2p/go-libp2p-peerstore v0.2.7 h1:83JoLxyR9OYTnNfB5vvFqvMUv/xDNa6NoPHnENhBsGw= +github.com/libp2p/go-libp2p-peerstore v0.2.7/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-pnet v0.2.0 h1:J6htxttBipJujEjz1y0a5+eYoiPcFHhSYHH6na5f0/k= github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1VZNHYcK8cLgFJLZ4s= github.com/libp2p/go-libp2p-protocol v0.1.0/go.mod h1:KQPHpAabB57XQxGrXCNvbL6UEXfQqUgC/1adR2Xtflk= github.com/libp2p/go-libp2p-pubsub v0.3.2-0.20200527132641-c0712c6e92cf/go.mod h1:TxPOBuo1FPdsTjFnv+FGZbNbWYsp74Culx+4ViQpato= -github.com/libp2p/go-libp2p-pubsub v0.4.1/go.mod h1:izkeMLvz6Ht8yAISXjx60XUQZMq9ZMe5h2ih4dLIBIQ= github.com/libp2p/go-libp2p-pubsub v0.4.2-0.20210212194758-6c1addf493eb h1:HExLcdXn8fgtXPciUw97O5NNhBn31dt6d9fVUD4cngo= github.com/libp2p/go-libp2p-pubsub v0.4.2-0.20210212194758-6c1addf493eb/go.mod h1:izkeMLvz6Ht8yAISXjx60XUQZMq9ZMe5h2ih4dLIBIQ= github.com/libp2p/go-libp2p-quic-transport v0.5.0/go.mod h1:IEcuC5MLxvZ5KuHKjRu+dr3LjCT1Be3rcD/4d8JrX8M= +github.com/libp2p/go-libp2p-quic-transport v0.10.0 h1:koDCbWD9CCHwcHZL3/WEvP2A+e/o5/W5L3QS/2SPMA0= +github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqUEJqjiiY8xmEuq3HUDS993MkA= github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q= github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= github.com/libp2p/go-libp2p-record v0.1.1/go.mod h1:VRgKajOyMVgP/F0L5g3kH7SVskp17vFi2xheb5uMJtg= @@ -1151,8 +1144,9 @@ github.com/libp2p/go-libp2p-swarm v0.2.4/go.mod h1:/xIpHFPPh3wmSthtxdGbkHZ0OET1h github.com/libp2p/go-libp2p-swarm v0.2.7/go.mod h1:ZSJ0Q+oq/B1JgfPHJAT2HTall+xYRNYp1xs4S2FBWKA= github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM= github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= -github.com/libp2p/go-libp2p-swarm v0.3.1 h1:UTobu+oQHGdXTOGpZ4RefuVqYoJXcT0EBtSR74m2LkI= github.com/libp2p/go-libp2p-swarm v0.3.1/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= +github.com/libp2p/go-libp2p-swarm v0.5.0 h1:HIK0z3Eqoo8ugmN8YqWAhD2RORgR+3iNXYG4U2PFd1E= +github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat6JknzUehFx8xW4= github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= @@ -1160,8 +1154,9 @@ github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MB github.com/libp2p/go-libp2p-testing v0.1.0/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc= -github.com/libp2p/go-libp2p-testing v0.3.0 h1:ZiBYstPamsi7y6NJZebRudUzsYmVkt998hltyLqf8+g= github.com/libp2p/go-libp2p-testing v0.3.0/go.mod h1:efZkql4UZ7OVsEfaxNHZPzIehtsBXMrXnCfJIgDti5g= +github.com/libp2p/go-libp2p-testing v0.4.0 h1:PrwHRi0IGqOwVQWR3xzgigSlhlLfxgfXgkHxr77EghQ= +github.com/libp2p/go-libp2p-testing v0.4.0/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= github.com/libp2p/go-libp2p-tls v0.1.3 h1:twKMhMu44jQO+HgQK9X8NHO5HkeJu2QbhLzLJpa8oNM= github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= github.com/libp2p/go-libp2p-transport v0.0.1/go.mod h1:UzbUs9X+PHOSw7S3ZmeOxfnwaQY5vGDzZmKPod3N3tk= @@ -1169,8 +1164,9 @@ github.com/libp2p/go-libp2p-transport v0.0.5/go.mod h1:StoY3sx6IqsP6XKoabsPnHCwq github.com/libp2p/go-libp2p-transport-upgrader v0.0.4/go.mod h1:RGq+tupk+oj7PzL2kn/m1w6YXxcIAYJYeI90h6BGgUc= github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns= -github.com/libp2p/go-libp2p-transport-upgrader v0.3.0 h1:q3ULhsknEQ34eVDhv4YwKS8iet69ffs9+Fir6a7weN4= github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o= +github.com/libp2p/go-libp2p-transport-upgrader v0.4.2 h1:4JsnbfJzgZeRS9AWN7B9dPqn/LY/HoQTlO9gtdJTIYM= +github.com/libp2p/go-libp2p-transport-upgrader v0.4.2/go.mod h1:NR8ne1VwfreD5VIWIU62Agt/J18ekORFU/j1i2y8zvk= github.com/libp2p/go-libp2p-yamux v0.1.2/go.mod h1:xUoV/RmYkg6BW/qGxA9XJyg+HzXFYkeXbnhjmnYzKp8= github.com/libp2p/go-libp2p-yamux v0.1.3/go.mod h1:VGSQVrqkh6y4nm0189qqxMtvyBft44MOYYPpYKXiVt4= github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8MEXOxFcRoXUODaK8= @@ -1180,19 +1176,23 @@ github.com/libp2p/go-libp2p-yamux v0.2.5/go.mod h1:Zpgj6arbyQrmZ3wxSZxfBmbdnWtbZ github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhLEn0XhIoZ5viCwU= github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4= github.com/libp2p/go-libp2p-yamux v0.4.0/go.mod h1:+DWDjtFMzoAwYLVkNZftoucn7PelNoy5nm3tZ3/Zw30= -github.com/libp2p/go-libp2p-yamux v0.4.1 h1:TJxRVPY9SjH7TNrNC80l1OJMBiWhs1qpKmeB+1Ug3xU= github.com/libp2p/go-libp2p-yamux v0.4.1/go.mod h1:FA/NjRYRVNjqOzpGuGqcruH7jAU2mYIjtKBicVOL3dc= +github.com/libp2p/go-libp2p-yamux v0.5.0/go.mod h1:AyR8k5EzyM2QN9Bbdg6X1SkVVuqLwTGf0L4DFq9g6po= +github.com/libp2p/go-libp2p-yamux v0.5.4 h1:/UOPtT/6DHPtr3TtKXBHa6g0Le0szYuI33Xc/Xpd7fQ= +github.com/libp2p/go-libp2p-yamux v0.5.4/go.mod h1:tfrXbyaTqqSU654GTvK3ocnSZL3BuHoeTSqhcel1wsE= github.com/libp2p/go-maddr-filter v0.0.1/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= +github.com/libp2p/go-maddr-filter v0.1.0 h1:4ACqZKw8AqiuJfwFGq1CYDFugfXTOos+qQ3DETkhtCE= github.com/libp2p/go-maddr-filter v0.1.0/go.mod h1:VzZhTXkMucEGGEOSKddrwGiOv0tUhgnKqNEmIAz/bPU= github.com/libp2p/go-mplex v0.0.3/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= github.com/libp2p/go-mplex v0.0.4/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6aiKgxDU= github.com/libp2p/go-mplex v0.1.1/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= github.com/libp2p/go-mplex v0.1.2/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= -github.com/libp2p/go-mplex v0.2.0 h1:Ov/D+8oBlbRkjBs1R1Iua8hJ8cUfbdiW8EOdZuxcgaI= github.com/libp2p/go-mplex v0.2.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= +github.com/libp2p/go-mplex v0.3.0 h1:U1T+vmCYJaEoDJPV1aq31N56hS+lJgb397GsylNSgrU= +github.com/libp2p/go-mplex v0.3.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.3/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= @@ -1203,8 +1203,9 @@ github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/ github.com/libp2p/go-nat v0.0.5 h1:qxnwkco8RLKqVh1NmjQ+tJ8p8khNLFxuElYG/TwqW4Q= github.com/libp2p/go-nat v0.0.5/go.mod h1:B7NxsVNPZmRLvMOwiEO1scOSyjA56zxYAGv1yQgRkEU= github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= -github.com/libp2p/go-netroute v0.1.3 h1:1ngWRx61us/EpaKkdqkMjKk/ufr/JlIFYQAxV2XX8Ig= github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= +github.com/libp2p/go-netroute v0.1.6 h1:ruPJStbYyXVYGQ81uzEDzuvbYRLKRrLvTYd33yomC38= +github.com/libp2p/go-netroute v0.1.6/go.mod h1:AqhkMh0VuWmfgtxKPp3Oc1LdU5QSWS7wl0QLhSZqXxQ= github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0= github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= @@ -1219,8 +1220,9 @@ github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2 github.com/libp2p/go-reuseport-transport v0.0.4 h1:OZGz0RB620QDGpv300n1zaOcKGGAoGVf8h9txtt/1uM= github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw= github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= -github.com/libp2p/go-sockaddr v0.1.0 h1:Y4s3/jNoryVRKEBrkJ576F17CPOaMIzUeCsg7dlTDj0= github.com/libp2p/go-sockaddr v0.1.0/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= +github.com/libp2p/go-sockaddr v0.1.1 h1:yD80l2ZOdGksnOyHrhxDdTDFrf7Oy+v3FMVArIRgZxQ= +github.com/libp2p/go-sockaddr v0.1.1/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14= github.com/libp2p/go-stream-muxer v0.1.0/go.mod h1:8JAVsjeRBCWwPoZeH0W1imLOcriqXJyFvB0mR4A04sQ= github.com/libp2p/go-stream-muxer-multistream v0.1.1/go.mod h1:zmGdfkQ1AzOECIAcccoL8L//laqawOsO03zX8Sa+eGw= @@ -1239,8 +1241,9 @@ github.com/libp2p/go-ws-transport v0.0.5/go.mod h1:Qbl4BxPfXXhhd/o0wcrgoaItHqA9t github.com/libp2p/go-ws-transport v0.1.0/go.mod h1:rjw1MG1LU9YDC6gzmwObkPd/Sqwhw7yT74kj3raBFuo= github.com/libp2p/go-ws-transport v0.2.0/go.mod h1:9BHJz/4Q5A9ludYWKoGCFC5gUElzlHoKzu0yY9p/klM= github.com/libp2p/go-ws-transport v0.3.0/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= -github.com/libp2p/go-ws-transport v0.3.1 h1:ZX5rWB8nhRRJVaPO6tmkGI/Xx8XNboYX20PW5hXIscw= github.com/libp2p/go-ws-transport v0.3.1/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= +github.com/libp2p/go-ws-transport v0.4.0 h1:9tvtQ9xbws6cA5LvqdE6Ne3vcmGB4f1z9SByggk4s0k= +github.com/libp2p/go-ws-transport v0.4.0/go.mod h1:EcIEKqf/7GDjth6ksuS/6p7R49V4CBY6/E7R/iyhYUA= github.com/libp2p/go-yamux v1.2.1/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= @@ -1252,10 +1255,14 @@ github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/h github.com/libp2p/go-yamux v1.4.0/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= github.com/libp2p/go-yamux v1.4.1 h1:P1Fe9vF4th5JOxxgQvfbOHkrGqIZniTLf+ddhZp8YTI= github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= +github.com/libp2p/go-yamux/v2 v2.2.0 h1:RwtpYZ2/wVviZ5+3pjC8qdQ4TKnrak0/E01N1UWoAFU= +github.com/libp2p/go-yamux/v2 v2.2.0/go.mod h1:3So6P6TV6r75R9jiBpiIKgU/66lOarCZjqROGxzPpPQ= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/lucas-clemente/quic-go v0.16.0/go.mod h1:I0+fcNTdb9eS1ZcjQZbDVPGchJ86chcIxPALn9lEJqE= +github.com/lucas-clemente/quic-go v0.19.3 h1:eCDQqvGBB+kCTkA0XrAFtNe81FMa0/fn4QSoeAbmiF4= +github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= @@ -1269,8 +1276,12 @@ github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= github.com/marten-seemann/qpack v0.1.0/go.mod h1:LFt1NU/Ptjip0C2CPkhimBz5CGE3WGDAUWqna+CNTrI= +github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= github.com/marten-seemann/qtls v0.9.1/go.mod h1:T1MmAdDPyISzxlK6kjRr0pcZFBVd1OZbBb/j3cvzHhk= -github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= +github.com/marten-seemann/qtls v0.10.0 h1:ECsuYUKalRL240rRD4Ri33ISb7kAQ3qGDlrrl55b2pc= +github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs= +github.com/marten-seemann/qtls-go1-15 v0.1.1 h1:LIH6K34bPVttyXnUWixk0bzH6/N07VxbSabxn5A5gZQ= +github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= github.com/mattn/go-colorable v0.0.0-20170327083344-ded68f7a9561/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= @@ -1278,7 +1289,6 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= @@ -1295,7 +1305,6 @@ github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= @@ -1322,6 +1331,8 @@ github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3N github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= @@ -1338,7 +1349,6 @@ github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFW github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk= github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= @@ -1360,7 +1370,6 @@ github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwd github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mozilla/scribe v0.0.0-20180711195314-fb71baf557c1/go.mod h1:FIczTrinKo8VaLxe6PWTPEXRXDIHz2QAwiaBaP5/4a8= -github.com/mozilla/tls-observatory v0.0.0-20190404164649-a3c1b6cfecfd/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= github.com/mozilla/tls-observatory v0.0.0-20210209181001-cf43108d6880/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s= github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= @@ -1386,8 +1395,9 @@ github.com/multiformats/go-multiaddr v0.3.3 h1:vo2OTSAqnENB2rLk79pLtr+uhj+VAzSe3 github.com/multiformats/go-multiaddr v0.3.3/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9xQkib3fyB+nZXHLag0= github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= -github.com/multiformats/go-multiaddr-dns v0.2.0 h1:YWJoIDwLePniH7OU5hBnDZV6SWuvJqJ0YtN6pLeH9zA= github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= +github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= +github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= github.com/multiformats/go-multiaddr-fmt v0.0.1/go.mod h1:aBYjqL4T/7j4Qx+R73XSv/8JsgnRFlf0w2KGLCmXl3Q= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= @@ -1416,8 +1426,10 @@ github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wS github.com/multiformats/go-multistream v0.0.4/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= -github.com/multiformats/go-multistream v0.2.0 h1:6AuNmQVKUkRnddw2YiDjt5Elit40SFxMJkVnhmETXtU= github.com/multiformats/go-multistream v0.2.0/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= +github.com/multiformats/go-multistream v0.2.1/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= +github.com/multiformats/go-multistream v0.2.2 h1:TCYu1BHTDr1F/Qm75qwYISQdzGcRdC21nFgQW7l7GBo= +github.com/multiformats/go-multistream v0.2.2/go.mod h1:UIcnm7Zuo8HKG+HkWgfQsGL+/MIEhyTqbODbIUwSXKs= github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= @@ -1435,11 +1447,9 @@ github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzE github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nikkolasg/hexjson v0.0.0-20181101101858-78e39397e00c h1:5bFTChQxSKNwy8ALwOebjekYExl9HTT9urdawqC95tA= github.com/nikkolasg/hexjson v0.0.0-20181101101858-78e39397e00c/go.mod h1:7qN3Y0BvzRUf4LofcoJplQL10lsFDb4PYlePTVwrP28= @@ -1458,22 +1468,19 @@ github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6 github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg= +github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= @@ -1537,6 +1544,7 @@ github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3O github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= +github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -1556,6 +1564,7 @@ github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt2 github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.25.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q= github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= @@ -1610,7 +1619,6 @@ github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sanposhiho/wastedassign v0.2.0/go.mod h1:LGpq5Hsv74QaqM47WtIsRSF/ik9kqk07kchgv66tLVE= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/securego/gosec v0.0.0-20191002120514-e680875ea14d/go.mod h1:w5+eXa0mYznDkHaMCXA4XYffjlH+cy1oyKbfzJXa2Do= github.com/securego/gosec/v2 v2.7.0/go.mod h1:xNbGArrGUspJLuz3LS5XCY1EBW/0vABAl/LWfSklmiM= github.com/sercand/kuberesolver v2.1.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJPl/ZshwPsX/n4Y7u0CW9E7aQIQ= github.com/sercand/kuberesolver v2.4.0+incompatible h1:WE2OlRf6wjLxHwNkkFLQGaZcVLEXjMjBPjjEU5vksH8= @@ -1618,9 +1626,7 @@ github.com/sercand/kuberesolver v2.4.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJP github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= -github.com/shirou/gopsutil v0.0.0-20190901111213-e4ec7b275ada/go.mod h1:WWnYX4lzhCH5h/3YBfyVA3VbLYjlMZZAQcW9ojMexNc= github.com/shirou/gopsutil/v3 v3.21.2/go.mod h1:ghfMypLDrFSWN2c9cDYFLHyynQ+QUht0cv/18ZqVczw= -github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= @@ -1667,7 +1673,6 @@ github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4l github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/soundcloud/go-runit v0.0.0-20150630195641-06ad41a06c4a/go.mod h1:LeFCbQYJ3KJlPs/FvPz2dy1tkpxyeNESVyCNNzRXFR0= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= -github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE= github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0= @@ -1695,7 +1700,6 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk= github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= @@ -1725,7 +1729,6 @@ github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cb github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= github.com/tetafro/godot v1.4.4/go.mod h1:FVDd4JuKliW3UgjswZfJfHq4vAx0bD/Jd5brJjGeaz4= github.com/texttheater/golang-levenshtein v0.0.0-20180516184445-d188e65d659e/go.mod h1:XDKHRm5ThF8YJjx001LtgelzsoaEcvnA7lVWz9EeX3g= -github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= github.com/tj/go-spin v1.1.0 h1:lhdWZsvImxvZ3q1C5OIB7d72DuOwP4O2NdBg9PyzNds= github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= @@ -1745,7 +1748,6 @@ github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMW github.com/uber/jaeger-lib v1.5.1-0.20181102163054-1fc5c315e03c/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/GfSYVCjK7dyaw= github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go v1.2.4 h1:cTciPbZ/VSOzCLKclmssnfQ/jyoVyOcJ3aoJyUV1Urc= github.com/ugorji/go v1.2.4/go.mod h1:EuaSCk8iZMdIspsu6HXH7X2UGKw1ezO4wCfGszGmmo4= @@ -1753,7 +1755,6 @@ github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljT github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/ugorji/go/codec v1.2.4 h1:C5VurWRRCKjuENsbM6GYVw8W++WVW9rSxoACKIvxzz8= github.com/ugorji/go/codec v1.2.4/go.mod h1:bWBu1+kIRWcF8uMklKaJrR6fTWQOwAlrIzX22pHwryA= -github.com/ultraware/funlen v0.0.2/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= @@ -1763,14 +1764,11 @@ github.com/urfave/cli/v2 v2.0.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2 github.com/urfave/cli/v2 v2.2.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= github.com/urfave/cli/v2 v2.3.0 h1:qph92Y649prgesehzOrQjdWyxFOp/QVM+6imKHad91M= github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= -github.com/uudashr/gocognit v0.0.0-20190926065955-1655d0de0517/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM= github.com/uudashr/gocognit v1.0.1/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s= github.com/valyala/fasthttp v1.16.0/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= github.com/valyala/fasttemplate v1.1.0/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= -github.com/valyala/quicktemplate v1.2.0/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4= github.com/valyala/quicktemplate v1.6.3/go.mod h1:fwPzK2fHuYEODzJ9pkw0ipCPNHZ2tD5KW4lOuSdPKzY= github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= @@ -1865,7 +1863,6 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/otel v0.13.0/go.mod h1:dlSNewoRYikTkotEnxdmuBHgzT+k/idJSfDv/FxEnOY= go.starlark.net v0.0.0-20190702223751-32f345186213/go.mod h1:c1/X6cHgvdXj6pUlmWKMkuqRnW4K8x2vwt6JAaaircg= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -1920,7 +1917,6 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1982,10 +1978,10 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2032,15 +2028,16 @@ golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6 h1:0PC75Fz/kyMGhL0e1QnypqK2kQMqKt9csD1GnMJR+Zk= +golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -2099,7 +2096,6 @@ golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190902133755-9109b7679e13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2144,8 +2140,12 @@ golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210217105451-b926d437f341/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210228012217-479acdf4ea46/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40 h1:JWgyZ1qgdTaF3N3oxC+MdTV7qvEEgHo3otj+HB5CM7Q= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2156,8 +2156,9 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2169,7 +2170,6 @@ golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -2184,18 +2184,15 @@ golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190719005602-e377ae9d6386/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190916130336-e45ffcd953cc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190930201159-7c411dea38b0/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -2385,8 +2382,9 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk= gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= @@ -2423,8 +2421,8 @@ gorm.io/gorm v1.20.7/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= gorm.io/gorm v1.20.12/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= gorm.io/gorm v1.21.3/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= gorm.io/gorm v1.21.9/go.mod h1:F+OptMscr0P2F2qU97WT1WimdH9GaQPoDW7AYd5i2Y0= -gorm.io/gorm v1.21.11 h1:CxkXW6Cc+VIBlL8yJEHq+Co4RYXdSLiMKNvgoZPjLK4= -gorm.io/gorm v1.21.11/go.mod h1:F+OptMscr0P2F2qU97WT1WimdH9GaQPoDW7AYd5i2Y0= +gorm.io/gorm v1.21.12 h1:3fQM0Eiz7jcJEhPggHEpoYnsGZqynMzverL77DV40RM= +gorm.io/gorm v1.21.12/go.mod h1:F+OptMscr0P2F2qU97WT1WimdH9GaQPoDW7AYd5i2Y0= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= @@ -2457,7 +2455,6 @@ modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= mvdan.cc/gofumpt v0.1.1/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= -mvdan.cc/unparam v0.0.0-20190720180237-d51796306d8f/go.mod h1:4G1h5nDURzA3bwVMZIVpwbkw+04kSxk3rAtzlimaUJw= mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7/go.mod h1:hBpJkZE8H/sb+VRFvw2+rBpHNsTBcvSpk61hr8mzXZE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/journal/types.go b/journal/types.go index 4c5ba54d..2f141a3e 100644 --- a/journal/types.go +++ b/journal/types.go @@ -5,7 +5,7 @@ import ( "strings" "time" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" ) var log = logging.Logger("journal") diff --git a/lib/blockstore/fallbackstore.go b/lib/blockstore/fallbackstore.go index 0ce397d4..edb008b4 100644 --- a/lib/blockstore/fallbackstore.go +++ b/lib/blockstore/fallbackstore.go @@ -10,7 +10,7 @@ import ( blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" blockstore "github.com/ipfs/go-ipfs-blockstore" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" ) var log = logging.Logger("blockstore") diff --git a/lib/ulimit/ulimit.go b/lib/ulimit/ulimit.go index f9999cf6..16bd4c9c 100644 --- a/lib/ulimit/ulimit.go +++ b/lib/ulimit/ulimit.go @@ -8,7 +8,7 @@ import ( "strconv" "syscall" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" ) var log = logging.Logger("ulimit") diff --git a/models/sqlite/worker_call.go b/models/sqlite/worker_call.go index 15fbce68..616d8866 100644 --- a/models/sqlite/worker_call.go +++ b/models/sqlite/worker_call.go @@ -10,7 +10,7 @@ import ( type workerCall struct { Id string `gorm:"column:id;type:varchar(36);primary_key;" json:"id"` // 主键 - //storiface.CallID + //types.CallID WorkId string `gorm:"uniqueIndex:call_id;column:work_id;type:varchar(36);" json:"work_id"` MinerID uint64 `gorm:"uniqueIndex:call_id;column:miner_id;type:unsigned bigint;" json:"miner_id"` SectorId uint64 `gorm:"uniqueIndex:call_id;column:sector_id;type:unsigned bigint;" json:"sector_id"` diff --git a/modules.go b/modules.go index 8ed84c21..8a1b2b72 100644 --- a/modules.go +++ b/modules.go @@ -143,13 +143,22 @@ func SectorIDCounter(metaDataService *service.MetadataService) types2.SectorIDCo var WorkerCallsPrefix = datastore.NewKey("/worker/calls") var ManagerWorkPrefix = datastore.NewKey("/stmgr/calls") -func SectorStorage(mctx MetricsCtx, lc fx.Lifecycle, ls stores.LocalStorage, si stores.SectorIndex, sc sectorstorage.SealerConfig, urls sectorstorage.URLs, sa sectorstorage.StorageAuth, repo repo.Repo) (*sectorstorage.Manager, error) { +func LocalStorage(mctx MetricsCtx, lc fx.Lifecycle, ls stores.LocalStorage, si stores.SectorIndex, urls sectorstorage.URLs) (*stores.Local, error) { + ctx := LifecycleCtx(mctx, lc) + return stores.NewLocal(ctx, ls, si, urls) +} + +func RemoteStorage(lstor *stores.Local, si stores.SectorIndex, sa sectorstorage.StorageAuth, sc sectorstorage.SealerConfig) *stores.Remote { + return stores.NewRemote(lstor, si, http.Header(sa), sc.ParallelFetchLimit, &stores.DefaultPartialFileHandler{}) +} + +func SectorStorage(mctx MetricsCtx, lc fx.Lifecycle, lstor *stores.Local, stor *stores.Remote, ls stores.LocalStorage, si stores.SectorIndex, sc sectorstorage.SealerConfig, repo repo.Repo) (*sectorstorage.Manager, error) { ctx := LifecycleCtx(mctx, lc) wsts := service.NewWorkCallService(repo, "sealer") smsts := service.NewWorkStateService(repo) - sst, err := sectorstorage.New(ctx, ls, si, sc, urls, sa, wsts, smsts) + sst, err := sectorstorage.New(ctx, lstor, stor, ls, si, sc, wsts, smsts) if err != nil { return nil, err } diff --git a/rpc.go b/rpc.go new file mode 100644 index 00000000..cc949cba --- /dev/null +++ b/rpc.go @@ -0,0 +1,80 @@ +package venus_sealer + +import ( + "context" + "net" + "net/http" + _ "net/http/pprof" + + mux "github.com/gorilla/mux" + "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr/net" + "go.opencensus.io/tag" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-jsonrpc" + "github.com/filecoin-project/go-jsonrpc/auth" + + "github.com/filecoin-project/venus-sealer/api" + "github.com/filecoin-project/venus-sealer/api/impl" +) + +// ServeRPC serves an HTTP handler over the supplied listen multiaddr. +// +// This function spawns a goroutine to run the server, and returns immediately. +// It returns the stop function to be called to terminate the endpoint. +// +// The supplied ID is used in tracing, by inserting a tag in the context. +func ServeRPC(h http.Handler, id string, addr multiaddr.Multiaddr) (StopFunc, error) { + // Start listening to the addr; if invalid or occupied, we will fail early. + lst, err := manet.Listen(addr) + if err != nil { + return nil, xerrors.Errorf("could not listen: %w", err) + } + + // Instantiate the server and start listening. + srv := &http.Server{ + Handler: h, + BaseContext: func(listener net.Listener) context.Context { + key, _ := tag.NewKey("api") + ctx, _ := tag.New(context.Background(), tag.Upsert(key, id)) + return ctx + }, + } + + go func() { + err = srv.Serve(manet.NetListener(lst)) + if err != http.ErrServerClosed { + log.Warnf("rpc server failed: %s", err) + } + }() + + return srv.Shutdown, err +} + +func MinerHandler(mapi api.StorageMiner, permissioned bool) (http.Handler, error) { + m := mux.NewRouter() + + rpcServer := jsonrpc.NewServer() + rpcServer.Register("Filecoin", mapi) + + mux := mux.NewRouter() + mux.Handle("/rpc/v0", rpcServer) + mux.PathPrefix("/remote").HandlerFunc(mapi.(*impl.StorageMinerAPI).ServeRemote) + + // debugging + // m.Handle("/debug/metrics", metrics.Exporter()) + m.PathPrefix("/").Handler(http.DefaultServeMux) // pprof + + if !permissioned { + return rpcServer, nil + } + + ah := &auth.Handler{ + Verify: mapi.AuthVerify, + Next: mux.ServeHTTP, + } + + return ah, nil +} + diff --git a/sector-storage/ffiwrapper/sealer_cgo.go b/sector-storage/ffiwrapper/sealer_cgo.go index 84c69caa..4806edf2 100644 --- a/sector-storage/ffiwrapper/sealer_cgo.go +++ b/sector-storage/ffiwrapper/sealer_cgo.go @@ -16,13 +16,14 @@ import ( ffi "github.com/filecoin-project/filecoin-ffi" rlepluslazy "github.com/filecoin-project/go-bitfield/rle" + commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper" + "github.com/filecoin-project/go-commp-utils/zerocomm" commcid "github.com/filecoin-project/go-fil-commcid" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/specs-storage/storage" - commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper" - "github.com/filecoin-project/go-commp-utils/zerocomm" "github.com/filecoin-project/venus-sealer/sector-storage/fr32" + "github.com/filecoin-project/venus-sealer/sector-storage/partialfile" "github.com/filecoin-project/venus-sealer/sector-storage/storiface" ) @@ -66,7 +67,7 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector storage.SectorRef, existi } var done func() - var stagedFile *partialFile + var stagedFile *partialfile.PartialFile defer func() { if done != nil { @@ -87,7 +88,7 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector storage.SectorRef, existi return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err) } - stagedFile, err = createPartialFile(maxPieceSize, stagedPath.Unsealed) + stagedFile, err = partialfile.CreatePartialFile(maxPieceSize, stagedPath.Unsealed) if err != nil { return abi.PieceInfo{}, xerrors.Errorf("creating unsealed sector file: %w", err) } @@ -97,7 +98,7 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector storage.SectorRef, existi return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err) } - stagedFile, err = openPartialFile(maxPieceSize, stagedPath.Unsealed) + stagedFile, err = partialfile.OpenPartialFile(maxPieceSize, stagedPath.Unsealed) if err != nil { return abi.PieceInfo{}, xerrors.Errorf("opening unsealed sector file: %w", err) } @@ -195,12 +196,16 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector storage.SectorRef, existi return piecePromises[0]() } + var payloadRoundedBytes abi.PaddedPieceSize pieceCids := make([]abi.PieceInfo, len(piecePromises)) for i, promise := range piecePromises { - pieceCids[i], err = promise() + pinfo, err := promise() if err != nil { return abi.PieceInfo{}, err } + + pieceCids[i] = pinfo + payloadRoundedBytes += pinfo.Size } pieceCID, err := ffi.GenerateUnsealedCID(sector.ProofType, pieceCids) @@ -213,6 +218,15 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector storage.SectorRef, existi return abi.PieceInfo{}, err } + if payloadRoundedBytes < pieceSize.Padded() { + paddedCid, err := commpffi.ZeroPadPieceCommitment(pieceCID, payloadRoundedBytes.Unpadded(), pieceSize) + if err != nil { + return abi.PieceInfo{}, xerrors.Errorf("failed to pad data: %w", err) + } + + pieceCID = paddedCid + } + return abi.PieceInfo{ Size: pieceSize.Padded(), PieceCID: pieceCID, @@ -244,7 +258,7 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector storage.SectorRef, off // try finding existing unsealedPath, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, storiface.FTNone, storiface.PathStorage) - var pf *partialFile + var pf *partialfile.PartialFile switch { case xerrors.Is(err, storiface.ErrSectorNotFound): @@ -254,7 +268,7 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector storage.SectorRef, off } defer done() - pf, err = createPartialFile(maxPieceSize, unsealedPath.Unsealed) + pf, err = partialfile.CreatePartialFile(maxPieceSize, unsealedPath.Unsealed) if err != nil { return xerrors.Errorf("create unsealed file: %w", err) } @@ -262,7 +276,7 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector storage.SectorRef, off case err == nil: defer done() - pf, err = openPartialFile(maxPieceSize, unsealedPath.Unsealed) + pf, err = partialfile.OpenPartialFile(maxPieceSize, unsealedPath.Unsealed) if err != nil { return xerrors.Errorf("opening partial file: %w", err) } @@ -414,7 +428,7 @@ func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector storag } maxPieceSize := abi.PaddedPieceSize(ssize) - pf, err := openPartialFile(maxPieceSize, path.Unsealed) + pf, err := partialfile.OpenPartialFile(maxPieceSize, path.Unsealed) if err != nil { if xerrors.Is(err, os.ErrNotExist) { return false, nil @@ -576,7 +590,7 @@ func (sb *Sealer) FinalizeSector(ctx context.Context, sector storage.SectorRef, if len(keepUnsealed) > 0 { - sr := pieceRun(0, maxPieceSize) + sr := partialfile.PieceRun(0, maxPieceSize) for _, s := range keepUnsealed { si := &rlepluslazy.RunSliceIterator{} @@ -598,7 +612,7 @@ func (sb *Sealer) FinalizeSector(ctx context.Context, sector storage.SectorRef, } defer done() - pf, err := openPartialFile(maxPieceSize, paths.Unsealed) + pf, err := partialfile.OpenPartialFile(maxPieceSize, paths.Unsealed) if err == nil { var at uint64 for sr.HasNext() { diff --git a/sector-storage/ffiwrapper/sealer_test.go b/sector-storage/ffiwrapper/sealer_test.go index 32795224..7e223ec8 100644 --- a/sector-storage/ffiwrapper/sealer_test.go +++ b/sector-storage/ffiwrapper/sealer_test.go @@ -31,6 +31,7 @@ import ( "github.com/filecoin-project/specs-storage/storage" ffi "github.com/filecoin-project/filecoin-ffi" + "github.com/filecoin-project/filecoin-ffi/generated" "github.com/filecoin-project/venus/pkg/specactors/policy" @@ -253,7 +254,7 @@ func getGrothParamFileAndVerifyingKeys(s abi.SectorSize) { // go test -run=^TestDownloadParams // func TestDownloadParams(t *testing.T) { - defer requireFDsClosed(t, openFDs(t)) + // defer requireFDsClosed(t, openFDs(t)) flaky likely cause of how go-embed works with param files getGrothParamFileAndVerifyingKeys(sectorSize) } @@ -812,3 +813,130 @@ func BenchmarkAddPiece512M(b *testing.B) { fmt.Println(c) } } + +func TestAddPiece512MPadded(t *testing.T) { + sz := abi.PaddedPieceSize(512 << 20).Unpadded() + + cdir, err := ioutil.TempDir("", "sbtest-c-") + if err != nil { + t.Fatal(err) + } + miner := abi.ActorID(123) + + sp := &basicfs.Provider{ + Root: cdir, + } + sb, err := New(sp) + if err != nil { + t.Fatalf("%+v", err) + } + cleanup := func() { + if t.Failed() { + fmt.Printf("not removing %s\n", cdir) + return + } + if err := os.RemoveAll(cdir); err != nil { + t.Error(err) + } + } + t.Cleanup(cleanup) + + r := rand.New(rand.NewSource(0x7e5)) + + c, err := sb.AddPiece(context.TODO(), storage.SectorRef{ + ID: abi.SectorID{ + Miner: miner, + Number: 0, + }, + ProofType: abi.RegisteredSealProof_StackedDrg512MiBV1_1, + }, nil, sz, io.LimitReader(r, int64(sz/4))) + if err != nil { + t.Fatalf("add piece failed: %s", err) + } + + require.Equal(t, "baga6ea4seaqonenxyku4o7hr5xkzbqsceipf6xgli3on54beqbk6k246sbooobq", c.PieceCID.String()) +} + +func setupLogger(t *testing.T) *bytes.Buffer { + _ = os.Setenv("RUST_LOG", "info") + + var bb bytes.Buffer + r, w, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + + go func() { + _, _ = io.Copy(&bb, r) + runtime.KeepAlive(w) + }() + + resp := generated.FilInitLogFd(int32(w.Fd())) + resp.Deref() + + defer generated.FilDestroyInitLogFdResponse(resp) + + if resp.StatusCode != generated.FCPResponseStatusFCPNoError { + t.Fatal(generated.RawString(resp.ErrorMsg).Copy()) + } + + return &bb +} + +func TestMulticoreSDR(t *testing.T) { + if os.Getenv("TEST_RUSTPROOFS_LOGS") != "1" { + t.Skip("skipping test without TEST_RUSTPROOFS_LOGS=1") + } + + rustLogger := setupLogger(t) + + getGrothParamFileAndVerifyingKeys(sectorSize) + + dir, err := ioutil.TempDir("", "sbtest") + if err != nil { + t.Fatal(err) + } + + miner := abi.ActorID(123) + + sp := &basicfs.Provider{ + Root: dir, + } + sb, err := New(sp) + if err != nil { + t.Fatalf("%+v", err) + } + + cleanup := func() { + if t.Failed() { + fmt.Printf("not removing %s\n", dir) + return + } + if err := os.RemoveAll(dir); err != nil { + t.Error(err) + } + } + defer cleanup() + + si := storage.SectorRef{ + ID: abi.SectorID{Miner: miner, Number: 1}, + ProofType: sealProofType, + } + + s := seal{ref: si} + + // check multicore + _ = os.Setenv("FIL_PROOFS_USE_MULTICORE_SDR", "1") + rustLogger.Reset() + s.precommit(t, sb, si, func() {}) + + ok := false + for _, s := range strings.Split(rustLogger.String(), "\n") { + if strings.Contains(s, "create_label::multi") { + ok = true + break + } + } + + require.True(t, ok) +} diff --git a/sector-storage/ffiwrapper/unseal_ranges.go b/sector-storage/ffiwrapper/unseal_ranges.go index 2c73787e..17028ccc 100644 --- a/sector-storage/ffiwrapper/unseal_ranges.go +++ b/sector-storage/ffiwrapper/unseal_ranges.go @@ -7,6 +7,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/venus-sealer/sector-storage/partialfile" "github.com/filecoin-project/venus-sealer/sector-storage/storiface" ) @@ -17,7 +18,7 @@ const mergeGaps = 32 << 20 // TODO const expandRuns = 16 << 20 // unseal more than requested for future requests func computeUnsealRanges(unsealed rlepluslazy.RunIterator, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (rlepluslazy.RunIterator, error) { - todo := pieceRun(offset.Padded(), size.Padded()) + todo := partialfile.PieceRun(offset.Padded(), size.Padded()) todo, err := rlepluslazy.Subtract(todo, unsealed) if err != nil { return nil, xerrors.Errorf("compute todo-unsealed: %w", err) diff --git a/sector-storage/fr32/readers.go b/sector-storage/fr32/readers.go index 20f3e9b3..f14d5bf1 100644 --- a/sector-storage/fr32/readers.go +++ b/sector-storage/fr32/readers.go @@ -51,13 +51,12 @@ func (r *unpadReader) Read(out []byte) (int, error) { r.left -= uint64(todo) - n, err := r.src.Read(r.work[:todo]) + n, err := io.ReadAtLeast(r.src, r.work[:todo], int(todo)) if err != nil && err != io.EOF { return n, err } - - if n != int(todo) { - return 0, xerrors.Errorf("didn't read enough: %w", err) + if n < int(todo) { + return 0, xerrors.Errorf("didn't read enough: %d / %d, left %d, out %d", n, todo, r.left, len(out)) } Unpad(r.work[:todo], out[:todo.Unpadded()]) diff --git a/sector-storage/manager.go b/sector-storage/manager.go index 09f81312..21b5c710 100644 --- a/sector-storage/manager.go +++ b/sector-storage/manager.go @@ -47,8 +47,6 @@ type Worker interface { } type SectorManager interface { - ReadPiece(context.Context, io.Writer, storage.SectorRef, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error - ffiwrapper.StorageSealer storage.Prover storiface.WorkerReturn @@ -89,6 +87,20 @@ type result struct { err error } +// ResourceFilteringStrategy is an enum indicating the kinds of resource +// filtering strategies that can be configured for workers. +type ResourceFilteringStrategy string + +const ( + // ResourceFilteringHardware specifies that available hardware resources + // should be evaluated when scheduling a task against the worker. + ResourceFilteringHardware = ResourceFilteringStrategy("hardware") + + // ResourceFilteringDisabled disables resource filtering against this + // worker. The scheduler may assign any task to this worker. + ResourceFilteringDisabled = ResourceFilteringStrategy("disabled") +) + type SealerConfig struct { ParallelFetchLimit int @@ -98,6 +110,11 @@ type SealerConfig struct { AllowPreCommit2 bool AllowCommit bool AllowUnseal bool + + // ResourceFiltering instructs the system which resource filtering strategy + // to use when evaluating tasks against this worker. An empty value defaults + // to "hardware". + ResourceFiltering ResourceFilteringStrategy } type StorageAuth http.Header @@ -105,24 +122,17 @@ type StorageAuth http.Header type WorkerStateStore statestore.StateStore type ManagerStateStore statestore.StateStore -func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, sc SealerConfig, urls URLs, sa StorageAuth, wss WorkerStateStore, mss ManagerStateStore) (*Manager, error) { - lstor, err := stores.NewLocal(ctx, ls, si, urls) - if err != nil { - return nil, err - } - +func New(ctx context.Context, lstor *stores.Local, stor *stores.Remote, ls stores.LocalStorage, si stores.SectorIndex, sc SealerConfig, wss WorkerStateStore, mss ManagerStateStore) (*Manager, error) { prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor, index: si}) if err != nil { return nil, xerrors.Errorf("creating prover instance: %w", err) } - stor := stores.NewRemote(lstor, si, http.Header(sa), sc.ParallelFetchLimit) - m := &Manager{ ls: ls, storage: stor, localStore: lstor, - remoteHnd: &stores.FetchHandler{Local: lstor}, + remoteHnd: &stores.FetchHandler{Local: lstor, PfHandler: &stores.DefaultPartialFileHandler{}}, index: si, sched: newScheduler(), @@ -141,7 +151,7 @@ func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, sc go m.sched.runSched() localTasks := []types.TaskType{ - types.TTCommit1, types.TTFinalize, types.TTFetch, types.TTReadUnsealed, + types.TTCommit1, types.TTFinalize, types.TTFetch, } if sc.AllowAddPiece { localTasks = append(localTasks, types.TTAddPiece) @@ -159,9 +169,12 @@ func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, sc localTasks = append(localTasks, types.TTUnseal) } - err = m.AddWorker(ctx, NewLocalWorker(WorkerConfig{ - TaskTypes: localTasks, - }, stor, lstor, si, m, wss)) + wcfg := WorkerConfig{ + IgnoreResourceFiltering: sc.ResourceFiltering == ResourceFilteringDisabled, + TaskTypes: localTasks, + } + worker := NewLocalWorker(wcfg, stor, lstor, si, m, wss) + err = m.AddWorker(ctx, worker) if err != nil { return nil, xerrors.Errorf("adding local worker: %w", err) } @@ -206,71 +219,11 @@ func (m *Manager) schedFetch(sector storage.SectorRef, ft storiface.SectorFileTy } } -func (m *Manager) readPiece(sink io.Writer, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, rok *bool) func(ctx context.Context, w Worker) error { - return func(ctx context.Context, w Worker) error { - log.Debugf("read piece data from sector %d, offset %d, size %d", sector.ID, offset, size) - r, err := m.waitSimpleCall(ctx)(w.ReadPiece(ctx, sink, sector, offset, size)) - if err != nil { - return err - } - if r != nil { - *rok = r.(bool) - } - log.Debugf("completed read piece data from sector %d, offset %d, size %d: read ok? %t", sector.ID, offset, size, *rok) - return nil - } -} - -func (m *Manager) tryReadUnsealedPiece(ctx context.Context, sink io.Writer, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (foundUnsealed bool, readOk bool, selector WorkerSelector, returnErr error) { - - // acquire a lock purely for reading unsealed sectors - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - log.Debugf("acquire read sector lock for sector %d", sector.ID) - if err := m.index.StorageLock(ctx, sector.ID, storiface.FTUnsealed, storiface.FTNone); err != nil { - returnErr = xerrors.Errorf("acquiring read sector lock: %w", err) - return - } - - log.Debugf("find unsealed sector %d", sector.ID) - // passing 0 spt because we only need it when allowFetch is true - best, err := m.index.StorageFindSector(ctx, sector.ID, storiface.FTUnsealed, 0, false) - if err != nil { - returnErr = xerrors.Errorf("read piece: checking for already existing unsealed sector: %w", err) - return - } - - foundUnsealed = len(best) > 0 - if foundUnsealed { // append to existing - // There is unsealed sector, see if we can read from it - log.Debugf("found unsealed sector %d", sector.ID) - - selector = newExistingSelector(m.index, sector.ID, storiface.FTUnsealed, false) - - log.Debugf("scheduling read of unsealed sector %d", sector.ID) - err = m.sched.Schedule(ctx, sector, types.TTReadUnsealed, selector, m.schedFetch(sector, storiface.FTUnsealed, storiface.PathSealing, storiface.AcquireMove), - m.readPiece(sink, sector, offset, size, &readOk)) - if err != nil { - returnErr = xerrors.Errorf("reading piece from sealed sector: %w", err) - } - } else { - log.Debugf("did not find unsealed sector %d", sector.ID) - selector = newAllocSelector(m.index, storiface.FTUnsealed, storiface.PathSealing) - } - return -} - -func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) error { - log.Debugf("fetch and read piece in sector %d, offset %d, size %d", sector.ID, offset, size) - foundUnsealed, readOk, selector, err := m.tryReadUnsealedPiece(ctx, sink, sector, offset, size) - if err != nil { - return err - } - if readOk { - log.Debugf("completed read of unsealed piece in sector %d, offset %d, size %d", sector.ID, offset, size) - return nil - } +// SectorsUnsealPiece will Unseal the Sealed sector file for the given sector. +// It will schedule the Unsealing task on a worker that either already has the sealed sector files or has space in +// one of it's sealing scratch spaces to store them after fetching them from another worker. +// If the chosen worker already has the Unsealed sector file, we will NOT Unseal the sealed sector file again. +func (m *Manager) SectorsUnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed *cid.Cid) error { ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -279,22 +232,18 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector storage. return xerrors.Errorf("acquiring unseal sector lock: %w", err) } - unsealFetch := func(ctx context.Context, worker Worker) error { + // if the selected worker does NOT have the sealed files for the sector, instruct it to fetch it from a worker that has them and + // put it in the sealing scratch space. + sealFetch := func(ctx context.Context, worker Worker) error { log.Debugf("copy sealed/cache sector data for sector %d", sector.ID) if _, err := m.waitSimpleCall(ctx)(worker.Fetch(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.PathSealing, storiface.AcquireCopy)); err != nil { return xerrors.Errorf("copy sealed/cache sector data: %w", err) } - if foundUnsealed { - log.Debugf("copy unsealed sector data for sector %d", sector.ID) - if _, err := m.waitSimpleCall(ctx)(worker.Fetch(ctx, sector, storiface.FTUnsealed, storiface.PathSealing, storiface.AcquireMove)); err != nil { - return xerrors.Errorf("copy unsealed sector data: %w", err) - } - } return nil } - if unsealed == cid.Undef { + if unsealed == nil { return xerrors.Errorf("cannot unseal piece (sector: %d, offset: %d size: %d) - unsealed cid is undefined", sector, offset, size) } @@ -303,36 +252,28 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector storage. return xerrors.Errorf("getting sector size: %w", err) } - log.Debugf("schedule unseal for sector %d", sector.ID) - err = m.sched.Schedule(ctx, sector, types.TTUnseal, selector, unsealFetch, func(ctx context.Context, w Worker) error { + // selector will schedule the Unseal task on a worker that either already has the sealed sector files or has space in + // one of it's sealing scratch spaces to store them after fetching them from another worker. + selector := newExistingSelector(m.index, sector.ID, storiface.FTSealed|storiface.FTCache, true) + + log.Debugf("will schedule unseal for sector %d", sector.ID) + err = m.sched.Schedule(ctx, sector, types.TTUnseal, selector, sealFetch, func(ctx context.Context, w Worker) error { // TODO: make restartable // NOTE: we're unsealing the whole sector here as with SDR we can't really // unseal the sector partially. Requesting the whole sector here can // save us some work in case another piece is requested from here - log.Debugf("unseal sector %d", sector.ID) - _, err := m.waitSimpleCall(ctx)(w.UnsealPiece(ctx, sector, 0, abi.PaddedPieceSize(ssize).Unpadded(), ticket, unsealed)) + log.Debugf("calling unseal sector on worker, sectoID=%d", sector.ID) + + // Note: This unseal piece call will essentially become a no-op if the worker already has an Unsealed sector file for the given sector. + _, err := m.waitSimpleCall(ctx)(w.UnsealPiece(ctx, sector, 0, abi.PaddedPieceSize(ssize).Unpadded(), ticket, *unsealed)) log.Debugf("completed unseal sector %d", sector.ID) return err }) if err != nil { - return err - } - - selector = newExistingSelector(m.index, sector.ID, storiface.FTUnsealed, false) - - log.Debugf("schedule read piece for sector %d, offset %d, size %d", sector.ID, offset, size) - err = m.sched.Schedule(ctx, sector, types.TTReadUnsealed, selector, m.schedFetch(sector, storiface.FTUnsealed, storiface.PathSealing, storiface.AcquireMove), - m.readPiece(sink, sector, offset, size, &readOk)) - if err != nil { - return xerrors.Errorf("reading piece from sealed sector: %w", err) - } - - if !readOk { - return xerrors.Errorf("failed to read unsealed piece") + return xerrors.Errorf("worker UnsealPiece call: %s", err) } - log.Debugf("completed read of piece in sector %d, offset %d, size %d", sector.ID, offset, size) return nil } @@ -782,4 +723,5 @@ func (m *Manager) Close(ctx context.Context) error { return m.sched.Close(ctx) } +var _ Unsealer = &Manager{} var _ SectorManager = &Manager{} diff --git a/sector-storage/manager_calltracker.go b/sector-storage/manager_calltracker.go index 591e91ad..3c68a612 100644 --- a/sector-storage/manager_calltracker.go +++ b/sector-storage/manager_calltracker.go @@ -39,7 +39,7 @@ type WorkState struct { Status WorkStatus - WorkerCall storiface.CallID // Set when entering wsRunning + WorkerCall types.CallID // Set when entering wsRunning WorkError string // Status = wsDone, set when failed to start work WorkerHostname string // hostname of last worker handling this job diff --git a/sector-storage/manager_test.go b/sector-storage/manager_test.go index 8a08a23f..1c3d26ea 100644 --- a/sector-storage/manager_test.go +++ b/sector-storage/manager_test.go @@ -97,7 +97,7 @@ func newTestMgr(ctx context.Context, t *testing.T, ds datastore.Datastore) (*Man prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor, index: si}) require.NoError(t, err) - stor := stores.NewRemote(lstor, si, nil, 6000) + stor := stores.NewRemote(lstor, si, nil, 6000, &stores.DefaultPartialFileHandler{}) m := &Manager{ ls: st, diff --git a/sector-storage/mock/mock.go b/sector-storage/mock/mock.go index e542dc12..15bc5865 100644 --- a/sector-storage/mock/mock.go +++ b/sector-storage/mock/mock.go @@ -6,6 +6,7 @@ import ( "crypto/sha256" "fmt" "io" + "io/ioutil" "math/rand" "sync" @@ -34,7 +35,9 @@ type SectorMgr struct { lk sync.Mutex } -type mockVerifProver struct{} +type mockVerifProver struct { + aggregates map[string]proof5.AggregateSealVerifyProofAndInfos // used for logging bad verifies +} func NewMockSectorMgr(genesisSectors []abi.SectorID) *SectorMgr { sectors := make(map[abi.SectorID]*sectorState) @@ -116,6 +119,10 @@ func (mgr *SectorMgr) AcquireSectorNumber() (abi.SectorNumber, error) { return id, nil } +func (mgr *SectorMgr) IsUnsealed(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { + return false, nil +} + func (mgr *SectorMgr) ForceState(sid storage.SectorRef, st int) error { mgr.lk.Lock() ss, ok := mgr.sectors[sid.ID] @@ -373,13 +380,12 @@ func generateFakePoSt(sectorInfo []proof5.SectorInfo, rpt func(abi.RegisteredSea } } -func (mgr *SectorMgr) ReadPiece(ctx context.Context, w io.Writer, sectorID storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, c cid.Cid) error { +func (mgr *SectorMgr) ReadPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (io.ReadCloser, bool, error) { if offset != 0 { panic("implme") } - _, err := io.CopyN(w, bytes.NewReader(mgr.pieces[mgr.sectors[sectorID.ID].pieces[0]]), int64(size)) - return err + return ioutil.NopCloser(bytes.NewReader(mgr.pieces[mgr.sectors[sector.ID].pieces[0]][:size])), false, nil } func (mgr *SectorMgr) StageFakeData(mid abi.ActorID, spt abi.RegisteredSealProof) (storage.SectorRef, []abi.PieceInfo, error) { @@ -446,50 +452,54 @@ func (mgr *SectorMgr) CheckProvable(ctx context.Context, pp abi.RegisteredPoStPr return bad, nil } -func (mgr *SectorMgr) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error { +func (mgr *SectorMgr) ReturnAddPiece(ctx context.Context, callID types.CallID, pi abi.PieceInfo, err *storiface.CallError) error { panic("not supported") } -func (mgr *SectorMgr) ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err *storiface.CallError) error { +func (mgr *SectorMgr) ReturnSealPreCommit1(ctx context.Context, callID types.CallID, p1o storage.PreCommit1Out, err *storiface.CallError) error { panic("not supported") } -func (mgr *SectorMgr) ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err *storiface.CallError) error { +func (mgr *SectorMgr) ReturnSealPreCommit2(ctx context.Context, callID types.CallID, sealed storage.SectorCids, err *storiface.CallError) error { panic("not supported") } -func (mgr *SectorMgr) ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err *storiface.CallError) error { +func (mgr *SectorMgr) ReturnSealCommit1(ctx context.Context, callID types.CallID, out storage.Commit1Out, err *storiface.CallError) error { panic("not supported") } -func (mgr *SectorMgr) ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err *storiface.CallError) error { +func (mgr *SectorMgr) ReturnSealCommit2(ctx context.Context, callID types.CallID, proof storage.Proof, err *storiface.CallError) error { panic("not supported") } -func (mgr *SectorMgr) ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { +func (mgr *SectorMgr) ReturnFinalizeSector(ctx context.Context, callID types.CallID, err *storiface.CallError) error { panic("not supported") } -func (mgr *SectorMgr) ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { +func (mgr *SectorMgr) ReturnReleaseUnsealed(ctx context.Context, callID types.CallID, err *storiface.CallError) error { panic("not supported") } -func (mgr *SectorMgr) ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { +func (mgr *SectorMgr) ReturnMoveStorage(ctx context.Context, callID types.CallID, err *storiface.CallError) error { panic("not supported") } -func (mgr *SectorMgr) ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { +func (mgr *SectorMgr) ReturnUnsealPiece(ctx context.Context, callID types.CallID, err *storiface.CallError) error { panic("not supported") } -func (mgr *SectorMgr) ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err *storiface.CallError) error { +func (mgr *SectorMgr) ReturnReadPiece(ctx context.Context, callID types.CallID, ok bool, err *storiface.CallError) error { panic("not supported") } -func (mgr *SectorMgr) ReturnFetch(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { +func (mgr *SectorMgr) ReturnFetch(ctx context.Context, callID types.CallID, err *storiface.CallError) error { panic("not supported") } +func (mgr *SectorMgr) SectorsUnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd *cid.Cid) error { + return nil +} + func (m mockVerifProver) VerifySeal(svi proof5.SealVerifyInfo) (bool, error) { plen, err := svi.SealProof.ProofSize() if err != nil { @@ -522,7 +532,19 @@ func (m mockVerifProver) VerifyAggregateSeals(aggregate proof5.AggregateSealVeri } } - return bytes.Equal(aggregate.Proof, out), nil + ok := bytes.Equal(aggregate.Proof, out) + if !ok { + genInfo, found := m.aggregates[string(aggregate.Proof)] + if !found { + log.Errorf("BAD AGGREGATE: saved generate inputs not found; agg.Proof: %x; expected: %x", aggregate.Proof, out) + } else { + log.Errorf("BAD AGGREGATE (1): agg.Proof: %x; expected: %x", aggregate.Proof, out) + log.Errorf("BAD AGGREGATE (2): Verify Infos: %+v", aggregate.Infos) + log.Errorf("BAD AGGREGATE (3): Generate Infos: %+v", genInfo.Infos) + } + } + + return ok, nil } func (m mockVerifProver) AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) { @@ -533,6 +555,8 @@ func (m mockVerifProver) AggregateSealProofs(aggregateInfo proof5.AggregateSealV } } + m.aggregates[string(out)] = aggregateInfo + return out, nil } @@ -592,8 +616,11 @@ func (m mockVerifProver) GenerateWinningPoStSectorChallenge(ctx context.Context, return []uint64{0}, nil } -var MockVerifier = mockVerifProver{} -var MockProver = mockVerifProver{} +var MockVerifier = mockVerifProver{ + aggregates: map[string]proof5.AggregateSealVerifyProofAndInfos{}, +} + +var MockProver = MockVerifier var _ storage.Sealer = &SectorMgr{} var _ ffiwrapper.Verifier = MockVerifier diff --git a/sector-storage/ffiwrapper/partialfile.go b/sector-storage/partialfile/partialfile.go similarity index 85% rename from sector-storage/ffiwrapper/partialfile.go rename to sector-storage/partialfile/partialfile.go index d68ec2dd..60c990f0 100644 --- a/sector-storage/ffiwrapper/partialfile.go +++ b/sector-storage/partialfile/partialfile.go @@ -1,4 +1,4 @@ -package ffiwrapper +package partialfile import ( "encoding/binary" @@ -7,6 +7,7 @@ import ( "syscall" "github.com/detailyang/go-fallocate" + logging "github.com/ipfs/go-log/v2" "golang.org/x/xerrors" rlepluslazy "github.com/filecoin-project/go-bitfield/rle" @@ -16,6 +17,8 @@ import ( "github.com/filecoin-project/venus-sealer/sector-storage/storiface" ) +var log = logging.Logger("partialfile") + const veryLargeRle = 1 << 20 // Sectors can be partially unsealed. We support this by appending a small @@ -25,7 +28,7 @@ const veryLargeRle = 1 << 20 // unsealed sector files internally have this structure // [unpadded (raw) data][rle+][4B LE length fo the rle+ field] -type partialFile struct { +type PartialFile struct { maxPiece abi.PaddedPieceSize path string @@ -57,7 +60,7 @@ func writeTrailer(maxPieceSize int64, w *os.File, r rlepluslazy.RunIterator) err return w.Truncate(maxPieceSize + int64(rb) + 4) } -func createPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialFile, error) { +func CreatePartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*PartialFile, error) { f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644) // nolint if err != nil { return nil, xerrors.Errorf("openning partial file '%s': %w", path, err) @@ -89,10 +92,10 @@ func createPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialF return nil, xerrors.Errorf("close empty partial file: %w", err) } - return openPartialFile(maxPieceSize, path) + return OpenPartialFile(maxPieceSize, path) } -func openPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialFile, error) { +func OpenPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*PartialFile, error) { f, err := os.OpenFile(path, os.O_RDWR, 0644) // nolint if err != nil { return nil, xerrors.Errorf("openning partial file '%s': %w", path, err) @@ -165,7 +168,7 @@ func openPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialFil return nil, err } - return &partialFile{ + return &PartialFile{ maxPiece: maxPieceSize, path: path, allocated: rle, @@ -173,11 +176,11 @@ func openPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialFil }, nil } -func (pf *partialFile) Close() error { +func (pf *PartialFile) Close() error { return pf.file.Close() } -func (pf *partialFile) Writer(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) (io.Writer, error) { +func (pf *PartialFile) Writer(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) (io.Writer, error) { if _, err := pf.file.Seek(int64(offset), io.SeekStart); err != nil { return nil, xerrors.Errorf("seek piece start: %w", err) } @@ -188,7 +191,7 @@ func (pf *partialFile) Writer(offset storiface.PaddedByteIndex, size abi.PaddedP return nil, err } - and, err := rlepluslazy.And(have, pieceRun(offset, size)) + and, err := rlepluslazy.And(have, PieceRun(offset, size)) if err != nil { return nil, err } @@ -206,13 +209,13 @@ func (pf *partialFile) Writer(offset storiface.PaddedByteIndex, size abi.PaddedP return pf.file, nil } -func (pf *partialFile) MarkAllocated(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) error { +func (pf *PartialFile) MarkAllocated(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) error { have, err := pf.allocated.RunIterator() if err != nil { return err } - ored, err := rlepluslazy.Or(have, pieceRun(offset, size)) + ored, err := rlepluslazy.Or(have, PieceRun(offset, size)) if err != nil { return err } @@ -224,7 +227,7 @@ func (pf *partialFile) MarkAllocated(offset storiface.PaddedByteIndex, size abi. return nil } -func (pf *partialFile) Free(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) error { +func (pf *PartialFile) Free(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) error { have, err := pf.allocated.RunIterator() if err != nil { return err @@ -234,7 +237,7 @@ func (pf *partialFile) Free(offset storiface.PaddedByteIndex, size abi.PaddedPie return xerrors.Errorf("deallocating: %w", err) } - s, err := rlepluslazy.Subtract(have, pieceRun(offset, size)) + s, err := rlepluslazy.Subtract(have, PieceRun(offset, size)) if err != nil { return err } @@ -246,7 +249,7 @@ func (pf *partialFile) Free(offset storiface.PaddedByteIndex, size abi.PaddedPie return nil } -func (pf *partialFile) Reader(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) (*os.File, error) { +func (pf *PartialFile) Reader(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) (*os.File, error) { if _, err := pf.file.Seek(int64(offset), io.SeekStart); err != nil { return nil, xerrors.Errorf("seek piece start: %w", err) } @@ -257,7 +260,7 @@ func (pf *partialFile) Reader(offset storiface.PaddedByteIndex, size abi.PaddedP return nil, err } - and, err := rlepluslazy.And(have, pieceRun(offset, size)) + and, err := rlepluslazy.And(have, PieceRun(offset, size)) if err != nil { return nil, err } @@ -275,17 +278,17 @@ func (pf *partialFile) Reader(offset storiface.PaddedByteIndex, size abi.PaddedP return pf.file, nil } -func (pf *partialFile) Allocated() (rlepluslazy.RunIterator, error) { +func (pf *PartialFile) Allocated() (rlepluslazy.RunIterator, error) { return pf.allocated.RunIterator() } -func (pf *partialFile) HasAllocated(offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { +func (pf *PartialFile) HasAllocated(offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { have, err := pf.Allocated() if err != nil { return false, err } - u, err := rlepluslazy.And(have, pieceRun(offset.Padded(), size.Padded())) + u, err := rlepluslazy.And(have, PieceRun(offset.Padded(), size.Padded())) if err != nil { return false, err } @@ -298,7 +301,7 @@ func (pf *partialFile) HasAllocated(offset storiface.UnpaddedByteIndex, size abi return abi.PaddedPieceSize(uc) == size.Padded(), nil } -func pieceRun(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) rlepluslazy.RunIterator { +func PieceRun(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) rlepluslazy.RunIterator { var runs []rlepluslazy.Run if offset > 0 { runs = append(runs, rlepluslazy.Run{ diff --git a/sector-storage/piece_provider.go b/sector-storage/piece_provider.go new file mode 100644 index 00000000..f40d8f7e --- /dev/null +++ b/sector-storage/piece_provider.go @@ -0,0 +1,176 @@ +package sectorstorage + +import ( + "bufio" + "context" + "io" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-storage/storage" + + "github.com/filecoin-project/venus-sealer/sector-storage/fr32" + "github.com/filecoin-project/venus-sealer/sector-storage/stores" + "github.com/filecoin-project/venus-sealer/sector-storage/storiface" +) + +type Unsealer interface { + // SectorsUnsealPiece will Unseal a Sealed sector file for the given sector. + SectorsUnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd *cid.Cid) error +} + +type PieceProvider interface { + // ReadPiece is used to read an Unsealed piece at the given offset and of the given size from a Sector + ReadPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (io.ReadCloser, bool, error) + IsUnsealed(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) +} + +var _ PieceProvider = &pieceProvider{} + +type pieceProvider struct { + storage *stores.Remote + index stores.SectorIndex + uns Unsealer +} + +func NewPieceProvider(storage *stores.Remote, index stores.SectorIndex, uns Unsealer) PieceProvider { + return &pieceProvider{ + storage: storage, + index: index, + uns: uns, + } +} + +// IsUnsealed checks if we have the unsealed piece at the given offset in an already +// existing unsealed file either locally or on any of the workers. +func (p *pieceProvider) IsUnsealed(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { + if err := offset.Valid(); err != nil { + return false, xerrors.Errorf("offset is not valid: %w", err) + } + if err := size.Validate(); err != nil { + return false, xerrors.Errorf("size is not a valid piece size: %w", err) + } + + ctxLock, cancel := context.WithCancel(ctx) + defer cancel() + + if err := p.index.StorageLock(ctxLock, sector.ID, storiface.FTUnsealed, storiface.FTNone); err != nil { + return false, xerrors.Errorf("acquiring read sector lock: %w", err) + } + + return p.storage.CheckIsUnsealed(ctxLock, sector, abi.PaddedPieceSize(offset.Padded()), size.Padded()) +} + +// tryReadUnsealedPiece will try to read the unsealed piece from an existing unsealed sector file for the given sector from any worker that has it. +// It will NOT try to schedule an Unseal of a sealed sector file for the read. +// +// Returns a nil reader if the piece does NOT exist in any unsealed file or there is no unsealed file for the given sector on any of the workers. +func (p *pieceProvider) tryReadUnsealedPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (io.ReadCloser, context.CancelFunc, error) { + // acquire a lock purely for reading unsealed sectors + ctx, cancel := context.WithCancel(ctx) + if err := p.index.StorageLock(ctx, sector.ID, storiface.FTUnsealed, storiface.FTNone); err != nil { + cancel() + return nil, nil, xerrors.Errorf("acquiring read sector lock: %w", err) + } + + // Reader returns a reader for an unsealed piece at the given offset in the given sector. + // The returned reader will be nil if none of the workers has an unsealed sector file containing + // the unsealed piece. + r, err := p.storage.Reader(ctx, sector, abi.PaddedPieceSize(offset.Padded()), size.Padded()) + if err != nil { + log.Debugf("did not get storage reader;sector=%+v, err:%s", sector.ID, err) + cancel() + return nil, nil, err + } + if r == nil { + cancel() + } + + return r, cancel, nil +} + +// ReadPiece is used to read an Unsealed piece at the given offset and of the given size from a Sector +// If an Unsealed sector file exists with the Piece Unsealed in it, we'll use that for the read. +// Otherwise, we will Unseal a Sealed sector file for the given sector and read the Unsealed piece from it. +// If we do NOT have an existing unsealed file containing the given piece thus causing us to schedule an Unseal, +// the returned boolean parameter will be set to true. +// If we have an existing unsealed file containing the given piece, the returned boolean will be set to false. +func (p *pieceProvider) ReadPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (io.ReadCloser, bool, error) { + if err := offset.Valid(); err != nil { + return nil, false, xerrors.Errorf("offset is not valid: %w", err) + } + if err := size.Validate(); err != nil { + return nil, false, xerrors.Errorf("size is not a valid piece size: %w", err) + } + + r, unlock, err := p.tryReadUnsealedPiece(ctx, sector, offset, size) + + log.Debugf("result of first tryReadUnsealedPiece: r=%+v, err=%s", r, err) + + if xerrors.Is(err, storiface.ErrSectorNotFound) { + log.Debugf("no unsealed sector file with unsealed piece, sector=%+v, offset=%d, size=%d", sector, offset, size) + err = nil + } + if err != nil { + log.Errorf("returning error from ReadPiece:%s", err) + return nil, false, err + } + + var uns bool + + if r == nil { + // a nil reader means that none of the workers has an unsealed sector file + // containing the unsealed piece. + // we now need to unseal a sealed sector file for the given sector to read the unsealed piece from it. + uns = true + commd := &unsealed + if unsealed == cid.Undef { + commd = nil + } + if err := p.uns.SectorsUnsealPiece(ctx, sector, offset, size, ticket, commd); err != nil { + log.Errorf("failed to SectorsUnsealPiece: %s", err) + return nil, false, xerrors.Errorf("unsealing piece: %w", err) + } + + log.Debugf("unsealed a sector file to read the piece, sector=%+v, offset=%d, size=%d", sector, offset, size) + + r, unlock, err = p.tryReadUnsealedPiece(ctx, sector, offset, size) + if err != nil { + log.Errorf("failed to tryReadUnsealedPiece after SectorsUnsealPiece: %s", err) + return nil, true, xerrors.Errorf("read after unsealing: %w", err) + } + if r == nil { + log.Errorf("got no reader after unsealing piece") + return nil, true, xerrors.Errorf("got no reader after unsealing piece") + } + log.Debugf("got a reader to read unsealed piece, sector=%+v, offset=%d, size=%d", sector, offset, size) + } else { + log.Debugf("unsealed piece already exists, no need to unseal, sector=%+v, offset=%d, size=%d", sector, offset, size) + } + + upr, err := fr32.NewUnpadReader(r, size.Padded()) + if err != nil { + unlock() + return nil, uns, xerrors.Errorf("creating unpadded reader: %w", err) + } + + log.Debugf("returning reader to read unsealed piece, sector=%+v, offset=%d, size=%d", sector, offset, size) + + return &funcCloser{ + Reader: bufio.NewReaderSize(upr, 127), + close: func() error { + err = r.Close() + unlock() + return err + }, + }, uns, nil +} + +type funcCloser struct { + io.Reader + close func() error +} + +func (fc *funcCloser) Close() error { return fc.close() } diff --git a/sector-storage/piece_provider_test.go b/sector-storage/piece_provider_test.go new file mode 100644 index 00000000..9dbe8447 --- /dev/null +++ b/sector-storage/piece_provider_test.go @@ -0,0 +1,362 @@ +package sectorstorage + +import ( + "bytes" + "context" + "io/ioutil" + "math/rand" + "net" + "net/http" + "testing" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-statestore" + specstorage "github.com/filecoin-project/specs-storage/storage" + "github.com/gorilla/mux" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + ds_sync "github.com/ipfs/go-datastore/sync" + logging "github.com/ipfs/go-log/v2" + "github.com/stretchr/testify/require" + + + "github.com/filecoin-project/venus-sealer/sector-storage/stores" + "github.com/filecoin-project/venus-sealer/sector-storage/storiface" + "github.com/filecoin-project/venus-sealer/types" +) + +// TestPieceProviderReadPiece verifies that the ReadPiece method works correctly +// only uses miner and does NOT use any remote worker. +func TestPieceProviderSimpleNoRemoteWorker(t *testing.T) { + // Set up sector storage manager + sealerCfg := SealerConfig{ + ParallelFetchLimit: 10, + AllowAddPiece: true, + AllowPreCommit1: true, + AllowPreCommit2: true, + AllowCommit: true, + AllowUnseal: true, + } + + ppt := newPieceProviderTestHarness(t, sealerCfg, abi.RegisteredSealProof_StackedDrg8MiBV1) + defer ppt.shutdown(t) + + // Create some padded data that aligns with the piece boundaries. + pieceData := generatePieceData(8 * 127 * 1024 * 8) + size := abi.UnpaddedPieceSize(len(pieceData)) + ppt.addPiece(t, pieceData) + + // read piece + ppt.readPiece(t, storiface.UnpaddedByteIndex(0), size, + false, pieceData) + + // pre-commit 1 + preCommit1 := ppt.preCommit1(t) + + // check if IsUnsealed -> true + require.True(t, ppt.isUnsealed(t, storiface.UnpaddedByteIndex(0), size)) + // read piece + ppt.readPiece(t, storiface.UnpaddedByteIndex(0), size, + false, pieceData) + + // pre-commit 2 + ppt.preCommit2(t, preCommit1) + + // check if IsUnsealed -> true + require.True(t, ppt.isUnsealed(t, storiface.UnpaddedByteIndex(0), size)) + // read piece + ppt.readPiece(t, storiface.UnpaddedByteIndex(0), size, + false, pieceData) + + // finalize -> nil here will remove unsealed file + ppt.finalizeSector(t, nil) + + // check if IsUnsealed -> false + require.False(t, ppt.isUnsealed(t, storiface.UnpaddedByteIndex(0), size)) + // Read the piece -> will have to unseal + ppt.readPiece(t, storiface.UnpaddedByteIndex(0), size, + true, pieceData) + + // check if IsUnsealed -> true + require.True(t, ppt.isUnsealed(t, storiface.UnpaddedByteIndex(0), size)) + // read the piece -> will not have to unseal + ppt.readPiece(t, storiface.UnpaddedByteIndex(0), size, + false, pieceData) + +} +func TestReadPieceRemoteWorkers(t *testing.T) { + logging.SetAllLoggers(logging.LevelDebug) + + // miner's worker can only add pieces to an unsealed sector. + sealerCfg := SealerConfig{ + ParallelFetchLimit: 10, + AllowAddPiece: true, + AllowPreCommit1: false, + AllowPreCommit2: false, + AllowCommit: false, + AllowUnseal: false, + } + + // test harness for an 8M sector. + ppt := newPieceProviderTestHarness(t, sealerCfg, abi.RegisteredSealProof_StackedDrg8MiBV1) + defer ppt.shutdown(t) + + // worker 2 will ONLY help with the sealing by first fetching + // the unsealed file from the miner. + ppt.addRemoteWorker(t, []types.TaskType{ + types.TTPreCommit1, types.TTPreCommit2, types.TTCommit1, + types.TTFetch, types.TTFinalize, + }) + + // create a worker that can ONLY unseal and fetch + ppt.addRemoteWorker(t, []types.TaskType{ + types.TTUnseal, types.TTFetch, + }) + + // run the test + + // add one piece that aligns with the padding/piece boundaries. + pd1 := generatePieceData(8 * 127 * 4 * 1024) + pi1 := ppt.addPiece(t, pd1) + pd1size := pi1.Size.Unpadded() + + pd2 := generatePieceData(8 * 127 * 4 * 1024) + pi2 := ppt.addPiece(t, pd2) + pd2size := pi2.Size.Unpadded() + + // pre-commit 1 + pC1 := ppt.preCommit1(t) + + // check if IsUnsealed -> true + require.True(t, ppt.isUnsealed(t, storiface.UnpaddedByteIndex(0), pd1size)) + // Read the piece -> no need to unseal + ppt.readPiece(t, storiface.UnpaddedByteIndex(0), pd1size, + false, pd1) + + // pre-commit 2 + ppt.preCommit2(t, pC1) + + // check if IsUnsealed -> true + require.True(t, ppt.isUnsealed(t, storiface.UnpaddedByteIndex(0), pd1size)) + // Read the piece -> no need to unseal + ppt.readPiece(t, storiface.UnpaddedByteIndex(0), pd1size, + false, pd1) + + // finalize the sector so we declare to the index we have the sealed file + // so the unsealing worker can later look it up and fetch it if needed + // sending nil here will remove all unsealed files after sector is finalized. + ppt.finalizeSector(t, nil) + + // check if IsUnsealed -> false + require.False(t, ppt.isUnsealed(t, storiface.UnpaddedByteIndex(0), pd1size)) + // Read the piece -> have to unseal since we removed the file. + ppt.readPiece(t, storiface.UnpaddedByteIndex(0), pd1size, + true, pd1) + + // Read the same piece again -> will NOT have to unseal. + ppt.readPiece(t, storiface.UnpaddedByteIndex(0), pd1size, false, pd1) + + // remove the unsealed file and read again -> will have to unseal. + ppt.removeAllUnsealedSectorFiles(t) + // check if IsUnsealed -> false + require.False(t, ppt.isUnsealed(t, storiface.UnpaddedByteIndex(0), pd1size)) + ppt.readPiece(t, storiface.UnpaddedByteIndex(0), pd1size, + true, pd1) + + // check if IsUnsealed -> true + require.True(t, ppt.isUnsealed(t, storiface.UnpaddedByteIndex(pd1size), pd2size)) + // Read Piece 2 -> no unsealing as it got unsealed above. + ppt.readPiece(t, storiface.UnpaddedByteIndex(pd1size), pd2size, false, pd2) + + // remove all unseal files -> Read Piece 2 -> will have to Unseal. + ppt.removeAllUnsealedSectorFiles(t) + + // check if IsUnsealed -> false + require.False(t, ppt.isUnsealed(t, storiface.UnpaddedByteIndex(pd1size), pd2size)) + ppt.readPiece(t, storiface.UnpaddedByteIndex(pd1size), pd2size, true, pd2) +} + +type pieceProviderTestHarness struct { + ctx context.Context + index *stores.Index + pp PieceProvider + sector specstorage.SectorRef + mgr *Manager + ticket abi.SealRandomness + commD cid.Cid + localStores []*stores.Local + + servers []*http.Server + + addedPieces []abi.PieceInfo +} + +func generatePieceData(size uint64) []byte { + bz := make([]byte, size) + rand.Read(bz) + return bz +} + +func newPieceProviderTestHarness(t *testing.T, mgrConfig SealerConfig, sectorProofType abi.RegisteredSealProof) *pieceProviderTestHarness { + ctx := context.Background() + // listen on tcp socket to create an http server later + address := "0.0.0.0:0" + nl, err := net.Listen("tcp", address) + require.NoError(t, err) + + // create index, storage, local store & remote store. + index := stores.NewIndex() + storage := newTestStorage(t) + localStore, err := stores.NewLocal(ctx, storage, index, []string{"http://" + nl.Addr().String() + "/remote"}) + require.NoError(t, err) + remoteStore := stores.NewRemote(localStore, index, nil, 6000, &stores.DefaultPartialFileHandler{}) + + // data stores for state tracking. + dstore := ds_sync.MutexWrap(datastore.NewMapDatastore()) + wsts := statestore.NewDsStateStore(namespace.Wrap(dstore, datastore.NewKey("/worker/calls"))) + smsts := statestore.NewDsStateStore(namespace.Wrap(dstore, datastore.NewKey("/stmgr/calls"))) + + mgr, err := New(ctx, localStore, remoteStore, storage, index, mgrConfig, wsts, smsts) + require.NoError(t, err) + + // start a http server on the manager to serve sector file requests. + svc := &http.Server{ + Addr: nl.Addr().String(), + Handler: mgr, + } + go func() { + _ = svc.Serve(nl) + }() + + pp := NewPieceProvider(remoteStore, index, mgr) + + sector := specstorage.SectorRef{ + ID: abi.SectorID{ + Miner: 100, + Number: 10, + }, + ProofType: sectorProofType, + } + + ticket := abi.SealRandomness{9, 9, 9, 9, 9, 9, 9, 9} + + ppt := &pieceProviderTestHarness{ + ctx: ctx, + index: index, + pp: pp, + sector: sector, + mgr: mgr, + ticket: ticket, + } + ppt.servers = append(ppt.servers, svc) + ppt.localStores = append(ppt.localStores, localStore) + return ppt +} + +func (p *pieceProviderTestHarness) addRemoteWorker(t *testing.T, tasks []types.TaskType) { + // start an http Server + address := "0.0.0.0:0" + nl, err := net.Listen("tcp", address) + require.NoError(t, err) + + localStore, err := stores.NewLocal(p.ctx, newTestStorage(t), p.index, []string{"http://" + nl.Addr().String() + "/remote"}) + require.NoError(t, err) + + fh := &stores.FetchHandler{ + Local: localStore, + PfHandler: &stores.DefaultPartialFileHandler{}, + } + + mux := mux.NewRouter() + mux.PathPrefix("/remote").HandlerFunc(fh.ServeHTTP) + svc := &http.Server{ + Addr: nl.Addr().String(), + Handler: mux, + } + + go func() { + _ = svc.Serve(nl) + }() + + remote := stores.NewRemote(localStore, p.index, nil, 1000, + &stores.DefaultPartialFileHandler{}) + + dstore := ds_sync.MutexWrap(datastore.NewMapDatastore()) + csts := statestore.NewDsStateStore(namespace.Wrap(dstore, datastore.NewKey("/stmgr/calls"))) + + worker := newLocalWorker(nil, WorkerConfig{ + TaskTypes: tasks, + }, remote, localStore, p.index, p.mgr, csts) + + p.servers = append(p.servers, svc) + p.localStores = append(p.localStores, localStore) + + // register self with manager + require.NoError(t, p.mgr.AddWorker(p.ctx, worker)) +} + +func (p *pieceProviderTestHarness) removeAllUnsealedSectorFiles(t *testing.T) { + for i := range p.localStores { + ls := p.localStores[i] + require.NoError(t, ls.Remove(p.ctx, p.sector.ID, storiface.FTUnsealed, false)) + } +} + +func (p *pieceProviderTestHarness) addPiece(t *testing.T, pieceData []byte) abi.PieceInfo { + var existing []abi.UnpaddedPieceSize + for _, pi := range p.addedPieces { + existing = append(existing, pi.Size.Unpadded()) + } + + size := abi.UnpaddedPieceSize(len(pieceData)) + pieceInfo, err := p.mgr.AddPiece(p.ctx, p.sector, existing, size, bytes.NewReader(pieceData)) + require.NoError(t, err) + + p.addedPieces = append(p.addedPieces, pieceInfo) + return pieceInfo +} + +func (p *pieceProviderTestHarness) preCommit1(t *testing.T) specstorage.PreCommit1Out { + preCommit1, err := p.mgr.SealPreCommit1(p.ctx, p.sector, p.ticket, p.addedPieces) + require.NoError(t, err) + return preCommit1 +} + +func (p *pieceProviderTestHarness) preCommit2(t *testing.T, pc1 specstorage.PreCommit1Out) { + sectorCids, err := p.mgr.SealPreCommit2(p.ctx, p.sector, pc1) + require.NoError(t, err) + commD := sectorCids.Unsealed + p.commD = commD +} + +func (p *pieceProviderTestHarness) isUnsealed(t *testing.T, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) bool { + b, err := p.pp.IsUnsealed(p.ctx, p.sector, offset, size) + require.NoError(t, err) + return b +} + +func (p *pieceProviderTestHarness) readPiece(t *testing.T, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, + expectedHadToUnseal bool, expectedBytes []byte) { + rd, isUnsealed, err := p.pp.ReadPiece(p.ctx, p.sector, offset, size, p.ticket, p.commD) + require.NoError(t, err) + require.NotNil(t, rd) + require.Equal(t, expectedHadToUnseal, isUnsealed) + defer func() { _ = rd.Close() }() + + // Make sure the input matches the output + readData, err := ioutil.ReadAll(rd) + require.NoError(t, err) + require.Equal(t, expectedBytes, readData) +} + +func (p *pieceProviderTestHarness) finalizeSector(t *testing.T, keepUnseal []specstorage.Range) { + require.NoError(t, p.mgr.FinalizeSector(p.ctx, p.sector, keepUnseal)) +} + +func (p *pieceProviderTestHarness) shutdown(t *testing.T) { + for _, svc := range p.servers { + s := svc + require.NoError(t, s.Shutdown(p.ctx)) + } +} diff --git a/sector-storage/resources.go b/sector-storage/resources.go index c32b0d16..e673ada5 100644 --- a/sector-storage/resources.go +++ b/sector-storage/resources.go @@ -312,7 +312,6 @@ var ResourceTable = map[types.TaskType]map[abi.RegisteredSealProof]Resources{ func init() { ResourceTable[types.TTUnseal] = ResourceTable[types.TTPreCommit1] // TODO: measure accurately - ResourceTable[types.TTReadUnsealed] = ResourceTable[types.TTFetch] // V1_1 is the same as V1 for _, m := range ResourceTable { diff --git a/sector-storage/sched.go b/sector-storage/sched.go index 9da36df2..3587639a 100644 --- a/sector-storage/sched.go +++ b/sector-storage/sched.go @@ -349,24 +349,24 @@ func (sh *scheduler) trySched() { defer sh.workersLk.RUnlock() windowsLen := len(sh.openWindows) - queuneLen := sh.schedQueue.Len() + queueLen := sh.schedQueue.Len() - log.Debugf("SCHED %d queued; %d open windows", queuneLen, windowsLen) + log.Debugf("SCHED %d queued; %d open windows", queueLen, windowsLen) - if windowsLen == 0 || queuneLen == 0 { + if windowsLen == 0 || queueLen == 0 { // nothing to schedule on return } windows := make([]schedWindow, windowsLen) - acceptableWindows := make([][]int, queuneLen) + acceptableWindows := make([][]int, queueLen) // Step 1 throttle := make(chan struct{}, windowsLen) var wg sync.WaitGroup - wg.Add(queuneLen) - for i := 0; i < queuneLen; i++ { + wg.Add(queueLen) + for i := 0; i < queueLen; i++ { throttle <- struct{}{} go func(sqi int) { @@ -393,7 +393,7 @@ func (sh *scheduler) trySched() { } // TODO: allow bigger windows - if !windows[wnd].allocated.canHandleRequest(needRes, windowRequest.worker, "schedAcceptable", worker.info.Resources) { + if !windows[wnd].allocated.canHandleRequest(needRes, windowRequest.worker, "schedAcceptable", worker.info) { continue } @@ -451,27 +451,27 @@ func (sh *scheduler) trySched() { // Step 2 scheduled := 0 - rmQueue := make([]int, 0, queuneLen) + rmQueue := make([]int, 0, queueLen) - for sqi := 0; sqi < queuneLen; sqi++ { + for sqi := 0; sqi < queueLen; sqi++ { task := (*sh.schedQueue)[sqi] needRes := ResourceTable[task.taskType][task.sector.ProofType] selectedWindow := -1 for _, wnd := range acceptableWindows[task.indexHeap] { wid := sh.openWindows[wnd].worker - wr := sh.workers[wid].info.Resources + info := sh.workers[wid].info log.Debugf("SCHED try assign sqi:%d sector %d to window %d", sqi, task.sector.ID.Number, wnd) // TODO: allow bigger windows - if !windows[wnd].allocated.canHandleRequest(needRes, wid, "schedAssign", wr) { + if !windows[wnd].allocated.canHandleRequest(needRes, wid, "schedAssign", info) { continue } log.Debugf("SCHED ASSIGNED sqi:%d sector %d task %s to window %d", sqi, task.sector.ID.Number, task.taskType, wnd) - windows[wnd].allocated.add(wr, needRes) + windows[wnd].allocated.add(info.Resources, needRes) // TODO: We probably want to re-sort acceptableWindows here based on new // workerHandle.utilization + windows[wnd].allocated.utilization (workerHandle.utilization is used in all // task selectors, but not in the same way, so need to figure out how to do that in a non-O(n^2 way), and diff --git a/sector-storage/sched_resources.go b/sector-storage/sched_resources.go index 6bb84fa3..7d9712ba 100644 --- a/sector-storage/sched_resources.go +++ b/sector-storage/sched_resources.go @@ -6,7 +6,7 @@ import ( "github.com/filecoin-project/venus-sealer/sector-storage/storiface" ) -func (a *activeResources) withResources(id WorkerID, wr storiface.WorkerResources, r Resources, locker sync.Locker, cb func() error) error { +func (a *activeResources) withResources(id WorkerID, wr storiface.WorkerInfo, r Resources, locker sync.Locker, cb func() error) error { for !a.canHandleRequest(r, id, "withResources", wr) { if a.cond == nil { a.cond = sync.NewCond(locker) @@ -14,11 +14,11 @@ func (a *activeResources) withResources(id WorkerID, wr storiface.WorkerResource a.cond.Wait() } - a.add(wr, r) + a.add(wr.Resources, r) err := cb() - a.free(wr, r) + a.free(wr.Resources, r) if a.cond != nil { a.cond.Broadcast() } @@ -44,8 +44,15 @@ func (a *activeResources) free(wr storiface.WorkerResources, r Resources) { a.memUsedMax -= r.MaxMemory } -func (a *activeResources) canHandleRequest(needRes Resources, wid WorkerID, caller string, res storiface.WorkerResources) bool { +// canHandleRequest evaluates if the worker has enough available resources to +// handle the request. +func (a *activeResources) canHandleRequest(needRes Resources, wid WorkerID, caller string, info storiface.WorkerInfo) bool { + if info.IgnoreResources { + // shortcircuit; if this worker is ignoring resources, it can always handle the request. + return true + } + res := info.Resources // TODO: dedupe needRes.BaseMinMemory per task type (don't add if that task is already running) minNeedMem := res.MemReserved + a.memUsedMin + needRes.MinMemory + needRes.BaseMinMemory if minNeedMem > res.MemPhysical { diff --git a/sector-storage/sched_test.go b/sector-storage/sched_test.go index e7f5fcab..2000bf72 100644 --- a/sector-storage/sched_test.go +++ b/sector-storage/sched_test.go @@ -38,6 +38,20 @@ func TestWithPriority(t *testing.T) { require.Equal(t, 2222, types.GetPriority(ctx)) } +var decentWorkerResources = storiface.WorkerResources{ + MemPhysical: 128 << 30, + MemSwap: 200 << 30, + MemReserved: 2 << 30, + CPUs: 32, + GPUs: []string{"a GPU"}, +} + +var constrainedWorkerResources = storiface.WorkerResources{ + MemPhysical: 1 << 30, + MemReserved: 2 << 30, + CPUs: 1, +} + type schedTestWorker struct { name string taskTypes map[types.TaskType]struct{} @@ -45,6 +59,9 @@ type schedTestWorker struct { closed bool session uuid.UUID + + resources storiface.WorkerResources + ignoreResources bool } func (s *schedTestWorker) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (types.CallID, error) { @@ -107,18 +124,11 @@ func (s *schedTestWorker) Paths(ctx context.Context) ([]stores.StoragePath, erro return s.paths, nil } -var decentWorkerResources = storiface.WorkerResources{ - MemPhysical: 128 << 30, - MemSwap: 200 << 30, - MemReserved: 2 << 30, - CPUs: 32, - GPUs: []string{"a GPU"}, -} - func (s *schedTestWorker) Info(ctx context.Context) (storiface.WorkerInfo, error) { return storiface.WorkerInfo{ - Hostname: s.name, - Resources: decentWorkerResources, + Hostname: s.name, + IgnoreResources: s.ignoreResources, + Resources: s.resources, }, nil } @@ -137,13 +147,16 @@ func (s *schedTestWorker) Close() error { var _ Worker = &schedTestWorker{} -func addTestWorker(t *testing.T, sched *scheduler, index *stores.Index, name string, taskTypes map[types.TaskType]struct{}) { +func addTestWorker(t *testing.T, sched *scheduler, index *stores.Index, name string, taskTypes map[types.TaskType]struct{}, resources storiface.WorkerResources, ignoreResources bool) { w := &schedTestWorker{ name: name, taskTypes: taskTypes, paths: []stores.StoragePath{{ID: "bb-8", Weight: 2, LocalPath: "food", CanSeal: true, CanStore: true}}, session: uuid.New(), + + resources: resources, + ignoreResources: ignoreResources, } for _, path := range w.paths { @@ -154,9 +167,10 @@ func addTestWorker(t *testing.T, sched *scheduler, index *stores.Index, name str CanSeal: path.CanSeal, CanStore: path.CanStore, }, fsutil.FsStat{ - Capacity: 1 << 40, - Available: 1 << 40, - Reserved: 3, + Capacity: 1 << 40, + Available: 1 << 40, + FSAvailable: 1 << 40, + Reserved: 3, }) require.NoError(t, err) } @@ -168,7 +182,7 @@ func TestSchedStartStop(t *testing.T) { sched := newScheduler() go sched.runSched() - addTestWorker(t, sched, stores.NewIndex(), "fred", nil) + addTestWorker(t, sched, stores.NewIndex(), "fred", nil, decentWorkerResources, false) require.NoError(t, sched.Close(context.TODO())) } @@ -182,6 +196,9 @@ func TestSched(t *testing.T) { type workerSpec struct { name string taskTypes map[types.TaskType]struct{} + + resources storiface.WorkerResources + ignoreResources bool } noopAction := func(ctx context.Context, w Worker) error { @@ -294,7 +311,7 @@ func TestSched(t *testing.T) { go sched.runSched() for _, worker := range workers { - addTestWorker(t, sched, index, worker.name, worker.taskTypes) + addTestWorker(t, sched, index, worker.name, worker.taskTypes, worker.resources, worker.ignoreResources) } rm := runMeta{ @@ -321,31 +338,42 @@ func TestSched(t *testing.T) { } } + // checks behaviour with workers with constrained resources + // the first one is not ignoring resource constraints, so we assign to the second worker, who is + t.Run("constrained-resources", testFunc([]workerSpec{ + {name: "fred1", resources: constrainedWorkerResources, taskTypes: map[types.TaskType]struct{}{types.TTPreCommit1: {}}}, + {name: "fred2", resources: constrainedWorkerResources, ignoreResources: true, taskTypes: map[types.TaskType]struct{}{types.TTPreCommit1: {}}}, + }, []task{ + sched("pc1-1", "fred2", 8, types.TTPreCommit1), + taskStarted("pc1-1"), + taskDone("pc1-1"), + })) + t.Run("one-pc1", testFunc([]workerSpec{ - {name: "fred", taskTypes: map[types.TaskType]struct{}{types.TTPreCommit1: {}}}, + {name: "fred", resources: decentWorkerResources, taskTypes: map[types.TaskType]struct{}{types.TTPreCommit1: {}}}, }, []task{ sched("pc1-1", "fred", 8, types.TTPreCommit1), taskDone("pc1-1"), })) t.Run("pc1-2workers-1", testFunc([]workerSpec{ - {name: "fred2", taskTypes: map[types.TaskType]struct{}{types.TTPreCommit2: {}}}, - {name: "fred1", taskTypes: map[types.TaskType]struct{}{types.TTPreCommit1: {}}}, + {name: "fred2", resources: decentWorkerResources, taskTypes: map[types.TaskType]struct{}{types.TTPreCommit2: {}}}, + {name: "fred1", resources: decentWorkerResources, taskTypes: map[types.TaskType]struct{}{types.TTPreCommit1: {}}}, }, []task{ sched("pc1-1", "fred1", 8, types.TTPreCommit1), taskDone("pc1-1"), })) t.Run("pc1-2workers-2", testFunc([]workerSpec{ - {name: "fred1", taskTypes: map[types.TaskType]struct{}{types.TTPreCommit1: {}}}, - {name: "fred2", taskTypes: map[types.TaskType]struct{}{types.TTPreCommit2: {}}}, + {name: "fred1", resources: decentWorkerResources, taskTypes: map[types.TaskType]struct{}{types.TTPreCommit1: {}}}, + {name: "fred2", resources: decentWorkerResources, taskTypes: map[types.TaskType]struct{}{types.TTPreCommit2: {}}}, }, []task{ sched("pc1-1", "fred1", 8, types.TTPreCommit1), taskDone("pc1-1"), })) t.Run("pc1-block-pc2", testFunc([]workerSpec{ - {name: "fred", taskTypes: map[types.TaskType]struct{}{types.TTPreCommit1: {}, types.TTPreCommit2: {}}}, + {name: "fred", resources: decentWorkerResources, taskTypes: map[types.TaskType]struct{}{types.TTPreCommit1: {}, types.TTPreCommit2: {}}}, }, []task{ sched("pc1", "fred", 8, types.TTPreCommit1), taskStarted("pc1"), @@ -358,7 +386,7 @@ func TestSched(t *testing.T) { })) t.Run("pc2-block-pc1", testFunc([]workerSpec{ - {name: "fred", taskTypes: map[types.TaskType]struct{}{types.TTPreCommit1: {}, types.TTPreCommit2: {}}}, + {name: "fred", resources: decentWorkerResources, taskTypes: map[types.TaskType]struct{}{types.TTPreCommit1: {}, types.TTPreCommit2: {}}}, }, []task{ sched("pc2", "fred", 8, types.TTPreCommit2), taskStarted("pc2"), @@ -371,7 +399,7 @@ func TestSched(t *testing.T) { })) t.Run("pc1-batching", testFunc([]workerSpec{ - {name: "fred", taskTypes: map[types.TaskType]struct{}{types.TTPreCommit1: {}}}, + {name: "fred", resources: decentWorkerResources, taskTypes: map[types.TaskType]struct{}{types.TTPreCommit1: {}}}, }, []task{ sched("t1", "fred", 8, types.TTPreCommit1), taskStarted("t1"), @@ -458,7 +486,7 @@ func TestSched(t *testing.T) { // run this one a bunch of times, it had a very annoying tendency to fail randomly for i := 0; i < 40; i++ { t.Run("pc1-pc2-prio", testFunc([]workerSpec{ - {name: "fred", taskTypes: map[types.TaskType]struct{}{types.TTPreCommit1: {}, types.TTPreCommit2: {}}}, + {name: "fred", resources: decentWorkerResources, taskTypes: map[types.TaskType]struct{}{types.TTPreCommit1: {}, types.TTPreCommit2: {}}}, }, []task{ // fill queues twoPC1("w0", 0, taskStarted), diff --git a/sector-storage/sched_worker.go b/sector-storage/sched_worker.go index c910cc0a..2915b6c0 100644 --- a/sector-storage/sched_worker.go +++ b/sector-storage/sched_worker.go @@ -296,7 +296,7 @@ func (sw *schedWorker) workerCompactWindows() { for ti, todo := range window.todo { needRes := ResourceTable[todo.taskType][todo.sector.ProofType] - if !lower.allocated.canHandleRequest(needRes, sw.wid, "compactWindows", worker.info.Resources) { + if !lower.allocated.canHandleRequest(needRes, sw.wid, "compactWindows", worker.info) { continue } @@ -352,7 +352,7 @@ assignLoop: worker.lk.Lock() for t, todo := range firstWindow.todo { needRes := ResourceTable[todo.taskType][todo.sector.ProofType] - if worker.preparing.canHandleRequest(needRes, sw.wid, "startPreparing", worker.info.Resources) { + if worker.preparing.canHandleRequest(needRes, sw.wid, "startPreparing", worker.info) { tidx = t break } @@ -424,7 +424,7 @@ func (sw *schedWorker) startProcessingTask(taskDone chan struct{}, req *workerRe } // wait (if needed) for resources in the 'active' window - err = w.active.withResources(sw.wid, w.info.Resources, needRes, &sh.workersLk, func() error { + err = w.active.withResources(sw.wid, w.info, needRes, &sh.workersLk, func() error { w.lk.Lock() w.preparing.free(w.info.Resources, needRes) w.lk.Unlock() diff --git a/sector-storage/sealtasks/task.go b/sector-storage/sealtasks/task.go deleted file mode 100644 index 8dd14ca3..00000000 --- a/sector-storage/sealtasks/task.go +++ /dev/null @@ -1,63 +0,0 @@ -package sealtasks - -type TaskType string - -const ( - TTAddPiece TaskType = "seal/v0/addpiece" - TTPreCommit1 TaskType = "seal/v0/precommit/1" - TTPreCommit2 TaskType = "seal/v0/precommit/2" - TTCommit1 TaskType = "seal/v0/commit/1" // NOTE: We use this to transfer the sector into miner-local storage for now; Don't use on workers! - TTCommit2 TaskType = "seal/v0/commit/2" - - TTFinalize TaskType = "seal/v0/finalize" - - TTFetch TaskType = "seal/v0/fetch" - TTUnseal TaskType = "seal/v0/unseal" - TTReadUnsealed TaskType = "seal/v0/unsealread" -) - -var order = map[TaskType]int{ - TTAddPiece: 6, // least priority - TTPreCommit1: 5, - TTPreCommit2: 4, - TTCommit2: 3, - TTCommit1: 2, - TTUnseal: 1, - TTFetch: -1, - TTReadUnsealed: -1, - TTFinalize: -2, // most priority -} - -var shortNames = map[TaskType]string{ - TTAddPiece: "AP", - - TTPreCommit1: "PC1", - TTPreCommit2: "PC2", - TTCommit1: "C1", - TTCommit2: "C2", - - TTFinalize: "FIN", - - TTFetch: "GET", - TTUnseal: "UNS", - TTReadUnsealed: "RD", -} - -func (a TaskType) MuchLess(b TaskType) (bool, bool) { - oa, ob := order[a], order[b] - oneNegative := oa^ob < 0 - return oneNegative, oa < ob -} - -func (a TaskType) Less(b TaskType) bool { - return order[a] < order[b] -} - -func (a TaskType) Short() string { - n, ok := shortNames[a] - if !ok { - return "UNK" - } - - return n -} diff --git a/sector-storage/stores/http_handler.go b/sector-storage/stores/http_handler.go index 2042e169..989c805b 100644 --- a/sector-storage/stores/http_handler.go +++ b/sector-storage/stores/http_handler.go @@ -5,11 +5,15 @@ import ( "io" "net/http" "os" + "strconv" "github.com/gorilla/mux" logging "github.com/ipfs/go-log/v2" "golang.org/x/xerrors" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/venus-sealer/sector-storage/partialfile" "github.com/filecoin-project/venus-sealer/sector-storage/storiface" "github.com/filecoin-project/venus-sealer/sector-storage/tarutil" @@ -18,8 +22,32 @@ import ( var log = logging.Logger("stores") +var _ partialFileHandler = &DefaultPartialFileHandler{} + +// DefaultPartialFileHandler is the default implementation of the partialFileHandler interface. +// This is probably the only implementation we'll ever use because the purpose of the +// interface to is to mock out partial file related functionality during testing. +type DefaultPartialFileHandler struct{} + +func (d *DefaultPartialFileHandler) OpenPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialfile.PartialFile, error) { + return partialfile.OpenPartialFile(maxPieceSize, path) +} +func (d *DefaultPartialFileHandler) HasAllocated(pf *partialfile.PartialFile, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { + return pf.HasAllocated(offset, size) +} + +func (d *DefaultPartialFileHandler) Reader(pf *partialfile.PartialFile, offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) (*os.File, error) { + return pf.Reader(offset, size) +} + +// Close closes the partial file +func (d *DefaultPartialFileHandler) Close(pf *partialfile.PartialFile) error { + return pf.Close() +} + type FetchHandler struct { - *Local + Local Store + PfHandler partialFileHandler } func (handler *FetchHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // /remote/ @@ -29,6 +57,8 @@ func (handler *FetchHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { mux.HandleFunc("/remote/{type}/{id}", handler.remoteGetSector).Methods("GET") mux.HandleFunc("/remote/{type}/{id}", handler.remoteDeleteSector).Methods("DELETE") + mux.HandleFunc("/remote/{type}/{id}/{spt}/allocated/{offset}/{size}", handler.remoteGetAllocated).Methods("GET") + mux.ServeHTTP(w, r) } @@ -54,6 +84,8 @@ func (handler *FetchHandler) remoteStatFs(w http.ResponseWriter, r *http.Request } } +// remoteGetSector returns the sector file/tared directory byte stream for the sectorID and sector file type sent in the request. +// returns an error if it does NOT have the required sector file/dir. func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Request) { log.Infof("SERVE GET %s", r.URL) vars := mux.Vars(r) @@ -82,7 +114,7 @@ func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Requ paths, _, err := handler.Local.AcquireSector(r.Context(), si, ft, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) if err != nil { - log.Errorf("%+v", err) + log.Errorf("AcquireSector: %+v", err) w.WriteHeader(500) return } @@ -98,62 +130,170 @@ func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Requ stat, err := os.Stat(path) if err != nil { - log.Errorf("%+v", err) + log.Errorf("os.Stat: %+v", err) w.WriteHeader(500) return } - var rd io.Reader if stat.IsDir() { - rd, err = tarutil.TarDirectory(path) + if _, has := r.Header["Range"]; has { + log.Error("Range not supported on directories") + w.WriteHeader(500) + return + } + + rd, err := tarutil.TarDirectory(path) + if err != nil { + log.Errorf("%+v", err) + w.WriteHeader(500) + return + } + w.Header().Set("Content-Type", "application/x-tar") + w.WriteHeader(200) + if _, err := io.CopyBuffer(w, rd, make([]byte, CopyBuf)); err != nil { + log.Errorf("%+v", err) + return + } } else { - rd, err = os.OpenFile(path, os.O_RDONLY, 0644) // nolint w.Header().Set("Content-Type", "application/octet-stream") + // will do a ranged read over the file at the given path if the caller has asked for a ranged read in the request headers. + http.ServeFile(w, r, path) } + + log.Debugf("served sector file/dir, sectorID=%+v, fileType=%s, path=%s", id, ft, path) +} + +func (handler *FetchHandler) remoteDeleteSector(w http.ResponseWriter, r *http.Request) { + log.Infof("SERVE DELETE %s", r.URL) + vars := mux.Vars(r) + + id, err := storiface.ParseSectorID(vars["id"]) if err != nil { log.Errorf("%+v", err) w.WriteHeader(500) return } - if !stat.IsDir() { - defer func() { - if err := rd.(*os.File).Close(); err != nil { - log.Errorf("closing source file: %+v", err) - } - }() + + ft, err := ftFromString(vars["type"]) + if err != nil { + log.Errorf("%+v", err) + w.WriteHeader(500) + return } - w.WriteHeader(200) - if _, err := io.CopyBuffer(w, rd, make([]byte, CopyBuf)); err != nil { + if err := handler.Local.Remove(r.Context(), id, ft, false); err != nil { log.Errorf("%+v", err) + w.WriteHeader(500) return } } -func (handler *FetchHandler) remoteDeleteSector(w http.ResponseWriter, r *http.Request) { - log.Infof("SERVE DELETE %s", r.URL) +// remoteGetAllocated returns `http.StatusOK` if the worker already has an Unsealed sector file +// containing the Unsealed piece sent in the request. +// returns `http.StatusRequestedRangeNotSatisfiable` otherwise. +func (handler *FetchHandler) remoteGetAllocated(w http.ResponseWriter, r *http.Request) { + log.Infof("SERVE Alloc check %s", r.URL) vars := mux.Vars(r) id, err := storiface.ParseSectorID(vars["id"]) if err != nil { - log.Errorf("%+v", err) + log.Errorf("parsing sectorID: %+v", err) w.WriteHeader(500) return } ft, err := ftFromString(vars["type"]) if err != nil { - log.Errorf("%+v", err) + log.Errorf("ftFromString: %+v", err) + w.WriteHeader(500) + return + } + if ft != storiface.FTUnsealed { + log.Errorf("/allocated only supports unsealed sector files") w.WriteHeader(500) return } - if err := handler.Remove(r.Context(), id, ft, false); err != nil { - log.Errorf("%+v", err) + spti, err := strconv.ParseInt(vars["spt"], 10, 64) + if err != nil { + log.Errorf("parsing spt: %+v", err) w.WriteHeader(500) return } + spt := abi.RegisteredSealProof(spti) + ssize, err := spt.SectorSize() + if err != nil { + log.Errorf("spt.SectorSize(): %+v", err) + w.WriteHeader(500) + return + } + + offi, err := strconv.ParseInt(vars["offset"], 10, 64) + if err != nil { + log.Errorf("parsing offset: %+v", err) + w.WriteHeader(500) + return + } + szi, err := strconv.ParseInt(vars["size"], 10, 64) + if err != nil { + log.Errorf("parsing size: %+v", err) + w.WriteHeader(500) + return + } + + // The caller has a lock on this sector already, no need to get one here + + // passing 0 spt because we don't allocate anything + si := storage.SectorRef{ + ID: id, + ProofType: 0, + } + + // get the path of the local Unsealed file for the given sector. + // return error if we do NOT have it. + paths, _, err := handler.Local.AcquireSector(r.Context(), si, ft, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) + if err != nil { + log.Errorf("AcquireSector: %+v", err) + w.WriteHeader(500) + return + } + + path := storiface.PathByType(paths, ft) + if path == "" { + log.Error("acquired path was empty") + w.WriteHeader(500) + return + } + + // open the Unsealed file and check if it has the Unsealed sector for the piece at the given offset and size. + pf, err := handler.PfHandler.OpenPartialFile(abi.PaddedPieceSize(ssize), path) + if err != nil { + log.Error("opening partial file: ", err) + w.WriteHeader(500) + return + } + defer func() { + if err := pf.Close(); err != nil { + log.Error("closing partial file: ", err) + } + }() + + has, err := handler.PfHandler.HasAllocated(pf, storiface.UnpaddedByteIndex(offi), abi.UnpaddedPieceSize(szi)) + if err != nil { + log.Error("has allocated: ", err) + w.WriteHeader(500) + return + } + + if has { + log.Debugf("returning ok: worker has unsealed file with unsealed piece, sector:%+v, offset:%d, size:%d", id, offi, szi) + w.WriteHeader(http.StatusOK) + return + } + + log.Debugf("returning StatusRequestedRangeNotSatisfiable: worker does NOT have unsealed file with unsealed piece, sector:%+v, offset:%d, size:%d", id, offi, szi) + w.WriteHeader(http.StatusRequestedRangeNotSatisfiable) } func ftFromString(t string) (storiface.SectorFileType, error) { diff --git a/sector-storage/stores/http_handler_test.go b/sector-storage/stores/http_handler_test.go new file mode 100644 index 00000000..1aaadc2a --- /dev/null +++ b/sector-storage/stores/http_handler_test.go @@ -0,0 +1,458 @@ +package stores_test + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "testing" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-storage/storage" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + "golang.org/x/xerrors" + + "github.com/filecoin-project/venus-sealer/sector-storage/partialfile" + "github.com/filecoin-project/venus-sealer/sector-storage/stores" + "github.com/filecoin-project/venus-sealer/sector-storage/stores/mocks" + "github.com/filecoin-project/venus-sealer/sector-storage/storiface" +) + +func TestRemoteGetAllocated(t *testing.T) { + + emptyPartialFile := &partialfile.PartialFile{} + pfPath := "path" + expectedSectorRef := storage.SectorRef{ + ID: abi.SectorID{ + Miner: 123, + Number: 123, + }, + ProofType: 0, + } + + validSectorName := fmt.Sprintf("s-t0%d-%d", 123, 123) + validSectorFileType := storiface.FTUnsealed.String() + validSectorType := "1" + sectorSize := abi.SealProofInfos[1].SectorSize + + validOffset := "100" + validOffsetInt := 100 + + validSize := "1000" + validSizeInt := 1000 + + type pieceInfo struct { + sectorName string + fileType string + sectorType string + + // piece info + offset string + size string + } + validPieceInfo := pieceInfo{ + sectorName: validSectorName, + fileType: validSectorFileType, + sectorType: validSectorType, + offset: validOffset, + size: validSize, + } + + tcs := map[string]struct { + piFnc func(pi *pieceInfo) + storeFnc func(s *mocks.MockStore) + pfFunc func(s *mocks.MockpartialFileHandler) + + // expectation + expectedStatusCode int + }{ + "fails when sector name is invalid": { + piFnc: func(pi *pieceInfo) { + pi.sectorName = "invalid" + }, + expectedStatusCode: http.StatusInternalServerError, + }, + "fails when file type is invalid": { + piFnc: func(pi *pieceInfo) { + pi.fileType = "invalid" + }, + expectedStatusCode: http.StatusInternalServerError, + }, + "fails when sector proof type is invalid": { + piFnc: func(pi *pieceInfo) { + pi.sectorType = "invalid" + }, + expectedStatusCode: http.StatusInternalServerError, + }, + "fails when offset is invalid": { + piFnc: func(pi *pieceInfo) { + pi.offset = "invalid" + }, + expectedStatusCode: http.StatusInternalServerError, + }, + "fails when size is invalid": { + piFnc: func(pi *pieceInfo) { + pi.size = "invalid" + }, + expectedStatusCode: http.StatusInternalServerError, + }, + "fails when errors out during acquiring unsealed sector file": { + expectedStatusCode: http.StatusInternalServerError, + storeFnc: func(l *mocks.MockStore) { + + l.EXPECT().AcquireSector(gomock.Any(), expectedSectorRef, storiface.FTUnsealed, + storiface.FTNone, storiface.PathStorage, storiface.AcquireMove).Return(storiface.SectorPaths{ + Unsealed: "path", + }, + storiface.SectorPaths{}, xerrors.New("some error")).Times(1) + }, + }, + "fails when unsealed sector file is not found locally": { + expectedStatusCode: http.StatusInternalServerError, + storeFnc: func(l *mocks.MockStore) { + + l.EXPECT().AcquireSector(gomock.Any(), expectedSectorRef, storiface.FTUnsealed, + storiface.FTNone, storiface.PathStorage, storiface.AcquireMove).Return(storiface.SectorPaths{}, + storiface.SectorPaths{}, nil).Times(1) + }, + }, + "fails when error while opening partial file": { + expectedStatusCode: http.StatusInternalServerError, + storeFnc: func(l *mocks.MockStore) { + + l.EXPECT().AcquireSector(gomock.Any(), expectedSectorRef, storiface.FTUnsealed, + storiface.FTNone, storiface.PathStorage, storiface.AcquireMove).Return(storiface.SectorPaths{ + Unsealed: pfPath, + }, + storiface.SectorPaths{}, nil).Times(1) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + pf.EXPECT().OpenPartialFile(abi.PaddedPieceSize(sectorSize), pfPath).Return(&partialfile.PartialFile{}, + xerrors.New("some error")).Times(1) + }, + }, + + "fails when determining partial file allocation returns an error": { + expectedStatusCode: http.StatusInternalServerError, + storeFnc: func(l *mocks.MockStore) { + + l.EXPECT().AcquireSector(gomock.Any(), expectedSectorRef, storiface.FTUnsealed, + storiface.FTNone, storiface.PathStorage, storiface.AcquireMove).Return(storiface.SectorPaths{ + Unsealed: pfPath, + }, + storiface.SectorPaths{}, nil).Times(1) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + pf.EXPECT().OpenPartialFile(abi.PaddedPieceSize(sectorSize), pfPath).Return(emptyPartialFile, + nil).Times(1) + + pf.EXPECT().HasAllocated(emptyPartialFile, storiface.UnpaddedByteIndex(validOffsetInt), + abi.UnpaddedPieceSize(validSizeInt)).Return(true, xerrors.New("some error")).Times(1) + }, + }, + "StatusRequestedRangeNotSatisfiable when piece is NOT allocated in partial file": { + expectedStatusCode: http.StatusRequestedRangeNotSatisfiable, + storeFnc: func(l *mocks.MockStore) { + + l.EXPECT().AcquireSector(gomock.Any(), expectedSectorRef, storiface.FTUnsealed, + storiface.FTNone, storiface.PathStorage, storiface.AcquireMove).Return(storiface.SectorPaths{ + Unsealed: pfPath, + }, + storiface.SectorPaths{}, nil).Times(1) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + pf.EXPECT().OpenPartialFile(abi.PaddedPieceSize(sectorSize), pfPath).Return(emptyPartialFile, + nil).Times(1) + + pf.EXPECT().HasAllocated(emptyPartialFile, storiface.UnpaddedByteIndex(validOffsetInt), + abi.UnpaddedPieceSize(validSizeInt)).Return(false, nil).Times(1) + }, + }, + "OK when piece is allocated in partial file": { + expectedStatusCode: http.StatusOK, + storeFnc: func(l *mocks.MockStore) { + + l.EXPECT().AcquireSector(gomock.Any(), expectedSectorRef, storiface.FTUnsealed, + storiface.FTNone, storiface.PathStorage, storiface.AcquireMove).Return(storiface.SectorPaths{ + Unsealed: pfPath, + }, + storiface.SectorPaths{}, nil).Times(1) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + pf.EXPECT().OpenPartialFile(abi.PaddedPieceSize(sectorSize), pfPath).Return(emptyPartialFile, + nil).Times(1) + + pf.EXPECT().HasAllocated(emptyPartialFile, storiface.UnpaddedByteIndex(validOffsetInt), + abi.UnpaddedPieceSize(validSizeInt)).Return(true, nil).Times(1) + }, + }, + } + + for name, tc := range tcs { + tc := tc + t.Run(name, func(t *testing.T) { + // create go mock controller here + mockCtrl := gomock.NewController(t) + // when test is done, assert expectations on all mock objects. + defer mockCtrl.Finish() + + lstore := mocks.NewMockStore(mockCtrl) + pfhandler := mocks.NewMockpartialFileHandler(mockCtrl) + + handler := &stores.FetchHandler{ + lstore, + pfhandler, + } + + // run http server + ts := httptest.NewServer(handler) + defer ts.Close() + + pi := validPieceInfo + + if tc.piFnc != nil { + tc.piFnc(&pi) + } + + if tc.storeFnc != nil { + tc.storeFnc(lstore) + } + if tc.pfFunc != nil { + tc.pfFunc(pfhandler) + } + + // call remoteGetAllocated + url := fmt.Sprintf("%s/remote/%s/%s/%s/allocated/%s/%s", + ts.URL, + pi.fileType, + pi.sectorName, + pi.sectorType, + pi.offset, + pi.size) + resp, err := http.Get(url) + require.NoError(t, err) + defer func() { + _ = resp.Body.Close() + }() + + // assert expected status code + require.Equal(t, tc.expectedStatusCode, resp.StatusCode) + }) + } +} + +func TestRemoteGetSector(t *testing.T) { + str := "hello-world" + fileBytes := []byte(str) + + validSectorName := fmt.Sprintf("s-t0%d-%d", 123, 123) + validSectorFileType := storiface.FTUnsealed.String() + expectedSectorRef := storage.SectorRef{ + ID: abi.SectorID{ + Miner: 123, + Number: 123, + }, + ProofType: 0, + } + + type sectorInfo struct { + sectorName string + fileType string + } + validSectorInfo := sectorInfo{ + sectorName: validSectorName, + fileType: validSectorFileType, + } + + tcs := map[string]struct { + siFnc func(pi *sectorInfo) + storeFnc func(s *mocks.MockStore, path string) + + // reading a file or a dir + isDir bool + + // expectation + noResponseBytes bool + expectedContentType string + expectedStatusCode int + expectedResponseBytes []byte + }{ + "fails when sector name is invalid": { + siFnc: func(si *sectorInfo) { + si.sectorName = "invalid" + }, + expectedStatusCode: http.StatusInternalServerError, + noResponseBytes: true, + }, + "fails when file type is invalid": { + siFnc: func(si *sectorInfo) { + si.fileType = "invalid" + }, + expectedStatusCode: http.StatusInternalServerError, + noResponseBytes: true, + }, + "fails when error while acquiring sector file": { + storeFnc: func(l *mocks.MockStore, _ string) { + + l.EXPECT().AcquireSector(gomock.Any(), expectedSectorRef, storiface.FTUnsealed, + storiface.FTNone, storiface.PathStorage, storiface.AcquireMove).Return(storiface.SectorPaths{ + Unsealed: "path", + }, + storiface.SectorPaths{}, xerrors.New("some error")).Times(1) + }, + expectedStatusCode: http.StatusInternalServerError, + noResponseBytes: true, + }, + "fails when acquired sector file path is empty": { + expectedStatusCode: http.StatusInternalServerError, + storeFnc: func(l *mocks.MockStore, _ string) { + + l.EXPECT().AcquireSector(gomock.Any(), expectedSectorRef, storiface.FTUnsealed, + storiface.FTNone, storiface.PathStorage, storiface.AcquireMove).Return(storiface.SectorPaths{}, + storiface.SectorPaths{}, nil).Times(1) + }, + noResponseBytes: true, + }, + "fails when acquired file does not exist": { + expectedStatusCode: http.StatusInternalServerError, + storeFnc: func(l *mocks.MockStore, _ string) { + + l.EXPECT().AcquireSector(gomock.Any(), expectedSectorRef, storiface.FTUnsealed, + storiface.FTNone, storiface.PathStorage, storiface.AcquireMove).Return(storiface.SectorPaths{ + Unsealed: "path", + }, + storiface.SectorPaths{}, nil) + }, + noResponseBytes: true, + }, + "successfully read a sector file": { + storeFnc: func(l *mocks.MockStore, path string) { + + l.EXPECT().AcquireSector(gomock.Any(), expectedSectorRef, storiface.FTUnsealed, + storiface.FTNone, storiface.PathStorage, storiface.AcquireMove).Return(storiface.SectorPaths{ + Unsealed: path, + }, + storiface.SectorPaths{}, nil) + }, + + noResponseBytes: false, + expectedContentType: "application/octet-stream", + expectedStatusCode: 200, + expectedResponseBytes: fileBytes, + }, + "successfully read a sector dir": { + storeFnc: func(l *mocks.MockStore, path string) { + + l.EXPECT().AcquireSector(gomock.Any(), expectedSectorRef, storiface.FTUnsealed, + storiface.FTNone, storiface.PathStorage, storiface.AcquireMove).Return(storiface.SectorPaths{ + Unsealed: path, + }, + storiface.SectorPaths{}, nil) + }, + + isDir: true, + noResponseBytes: false, + expectedContentType: "application/x-tar", + expectedStatusCode: 200, + expectedResponseBytes: fileBytes, + }, + } + + for name, tc := range tcs { + tc := tc + t.Run(name, func(t *testing.T) { + mockCtrl := gomock.NewController(t) + // when test is done, assert expectations on all mock objects. + defer mockCtrl.Finish() + lstore := mocks.NewMockStore(mockCtrl) + pfhandler := mocks.NewMockpartialFileHandler(mockCtrl) + + var path string + + if !tc.isDir { + // create file + tempFile, err := ioutil.TempFile("", "TestRemoteGetSector-") + require.NoError(t, err) + + defer func() { + _ = os.Remove(tempFile.Name()) + }() + + _, err = tempFile.Write(fileBytes) + require.NoError(t, err) + path = tempFile.Name() + } else { + // create dir with a file + tempFile2, err := ioutil.TempFile("", "TestRemoteGetSector-") + require.NoError(t, err) + defer func() { + _ = os.Remove(tempFile2.Name()) + }() + + stat, err := os.Stat(tempFile2.Name()) + require.NoError(t, err) + tempDir, err := ioutil.TempDir("", "TestRemoteGetSector-") + require.NoError(t, err) + + defer func() { + _ = os.RemoveAll(tempDir) + }() + + require.NoError(t, os.Rename(tempFile2.Name(), filepath.Join(tempDir, stat.Name()))) + + path = tempDir + } + + handler := &stores.FetchHandler{ + lstore, + pfhandler, + } + + // run http server + ts := httptest.NewServer(handler) + defer ts.Close() + + si := validSectorInfo + if tc.siFnc != nil { + tc.siFnc(&si) + } + + if tc.storeFnc != nil { + tc.storeFnc(lstore, path) + } + + // call remoteGetAllocated + url := fmt.Sprintf("%s/remote/%s/%s", + ts.URL, + si.fileType, + si.sectorName, + ) + resp, err := http.Get(url) + require.NoError(t, err) + defer func() { + _ = resp.Body.Close() + }() + + bz, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + // assert expected status code + require.Equal(t, tc.expectedStatusCode, resp.StatusCode) + + if !tc.noResponseBytes { + if !tc.isDir { + require.EqualValues(t, tc.expectedResponseBytes, bz) + } + } + + require.Equal(t, tc.expectedContentType, resp.Header.Get("Content-Type")) + }) + } +} diff --git a/sector-storage/stores/index.go b/sector-storage/stores/index.go index 28e09f16..5da5fa07 100644 --- a/sector-storage/stores/index.go +++ b/sector-storage/stores/index.go @@ -3,6 +3,7 @@ package stores import ( "context" "errors" + "fmt" "net/url" gopath "path" "sort" @@ -383,7 +384,16 @@ func (i *Index) StorageBestAlloc(ctx context.Context, allocate storiface.SectorF var candidates []storageEntry - spaceReq, err := allocate.SealSpaceUse(ssize) + var err error + var spaceReq uint64 + switch pathType { + case storiface.PathSealing: + spaceReq, err = allocate.SealSpaceUse(ssize) + case storiface.PathStorage: + spaceReq, err = allocate.StoreSpaceUse(ssize) + default: + panic(fmt.Sprintf("unexpected pathType: %s", pathType)) + } if err != nil { return nil, xerrors.Errorf("estimating required space: %w", err) } diff --git a/sector-storage/stores/interface.go b/sector-storage/stores/interface.go index 8a1d0fe9..b4990e11 100644 --- a/sector-storage/stores/interface.go +++ b/sector-storage/stores/interface.go @@ -2,15 +2,34 @@ package stores import ( "context" + "os" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/venus-sealer/sector-storage/fsutil" + "github.com/filecoin-project/venus-sealer/sector-storage/partialfile" "github.com/filecoin-project/venus-sealer/sector-storage/storiface" ) +// PartialFileHandler helps mock out the partial file functionality during testing. +type partialFileHandler interface { + // OpenPartialFile opens and returns a partial file at the given path and also verifies it has the given + // size + OpenPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialfile.PartialFile, error) + + // HasAllocated returns true if the given partial file has an unsealed piece starting at the given offset with the given size. + // returns false otherwise. + HasAllocated(pf *partialfile.PartialFile, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) + + // Reader returns a file from which we can read the unsealed piece in the partial file. + Reader(pf *partialfile.PartialFile, offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) (*os.File, error) + + // Close closes the partial file + Close(pf *partialfile.PartialFile) error +} + type Store interface { AcquireSector(ctx context.Context, s storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType, op storiface.AcquireMode) (paths storiface.SectorPaths, stores storiface.SectorPaths, err error) Remove(ctx context.Context, s abi.SectorID, types storiface.SectorFileType, force bool) error @@ -23,4 +42,6 @@ type Store interface { MoveStorage(ctx context.Context, s storage.SectorRef, types storiface.SectorFileType) error FsStat(ctx context.Context, id ID) (fsutil.FsStat, error) + + Reserve(ctx context.Context, sid storage.SectorRef, ft storiface.SectorFileType, storageIDs storiface.SectorPaths, overheadTab map[storiface.SectorFileType]int) (func(), error) } diff --git a/sector-storage/stores/local.go b/sector-storage/stores/local.go index 1acce9a0..31e7b5c0 100644 --- a/sector-storage/stores/local.go +++ b/sector-storage/stores/local.go @@ -114,7 +114,7 @@ func (p *path) stat(ls LocalStorage) (fsutil.FsStat, error) { used, err = ls.DiskUsage(p) } if err != nil { - log.Errorf("getting disk usage of '%s': %+v", p.sectorPath(id, fileType), err) + log.Warnf("getting disk usage of '%s': %+v", p.sectorPath(id, fileType), err) continue } @@ -392,8 +392,10 @@ func (st *Local) Reserve(ctx context.Context, sid storage.SectorRef, ft storifac } p.reserved += overhead + p.reservations[sid.ID] |= fileType prevDone := done + saveFileType := fileType done = func() { prevDone() @@ -401,6 +403,10 @@ func (st *Local) Reserve(ctx context.Context, sid storage.SectorRef, ft storifac defer st.localLk.Unlock() p.reserved -= overhead + p.reservations[sid.ID] ^= saveFileType + if p.reservations[sid.ID] == storiface.FTNone { + delete(p.reservations, sid.ID) + } } } diff --git a/sector-storage/stores/mocks/index.go b/sector-storage/stores/mocks/index.go new file mode 100644 index 00000000..b33ffc64 --- /dev/null +++ b/sector-storage/stores/mocks/index.go @@ -0,0 +1,169 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: index.go + +// Package mock_stores is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + abi "github.com/filecoin-project/go-state-types/abi" + fsutil "github.com/filecoin-project/venus-sealer/sector-storage/fsutil" + stores "github.com/filecoin-project/venus-sealer/sector-storage/stores" + storiface "github.com/filecoin-project/venus-sealer/sector-storage/storiface" + gomock "github.com/golang/mock/gomock" +) + +// MockSectorIndex is a mock of SectorIndex interface. +type MockSectorIndex struct { + ctrl *gomock.Controller + recorder *MockSectorIndexMockRecorder +} + +// MockSectorIndexMockRecorder is the mock recorder for MockSectorIndex. +type MockSectorIndexMockRecorder struct { + mock *MockSectorIndex +} + +// NewMockSectorIndex creates a new mock instance. +func NewMockSectorIndex(ctrl *gomock.Controller) *MockSectorIndex { + mock := &MockSectorIndex{ctrl: ctrl} + mock.recorder = &MockSectorIndexMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSectorIndex) EXPECT() *MockSectorIndexMockRecorder { + return m.recorder +} + +// StorageAttach mocks base method. +func (m *MockSectorIndex) StorageAttach(arg0 context.Context, arg1 stores.StorageInfo, arg2 fsutil.FsStat) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StorageAttach", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// StorageAttach indicates an expected call of StorageAttach. +func (mr *MockSectorIndexMockRecorder) StorageAttach(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageAttach", reflect.TypeOf((*MockSectorIndex)(nil).StorageAttach), arg0, arg1, arg2) +} + +// StorageBestAlloc mocks base method. +func (m *MockSectorIndex) StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType) ([]stores.StorageInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StorageBestAlloc", ctx, allocate, ssize, pathType) + ret0, _ := ret[0].([]stores.StorageInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StorageBestAlloc indicates an expected call of StorageBestAlloc. +func (mr *MockSectorIndexMockRecorder) StorageBestAlloc(ctx, allocate, ssize, pathType interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageBestAlloc", reflect.TypeOf((*MockSectorIndex)(nil).StorageBestAlloc), ctx, allocate, ssize, pathType) +} + +// StorageDeclareSector mocks base method. +func (m *MockSectorIndex) StorageDeclareSector(ctx context.Context, storageID stores.ID, s abi.SectorID, ft storiface.SectorFileType, primary bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StorageDeclareSector", ctx, storageID, s, ft, primary) + ret0, _ := ret[0].(error) + return ret0 +} + +// StorageDeclareSector indicates an expected call of StorageDeclareSector. +func (mr *MockSectorIndexMockRecorder) StorageDeclareSector(ctx, storageID, s, ft, primary interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageDeclareSector", reflect.TypeOf((*MockSectorIndex)(nil).StorageDeclareSector), ctx, storageID, s, ft, primary) +} + +// StorageDropSector mocks base method. +func (m *MockSectorIndex) StorageDropSector(ctx context.Context, storageID stores.ID, s abi.SectorID, ft storiface.SectorFileType) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StorageDropSector", ctx, storageID, s, ft) + ret0, _ := ret[0].(error) + return ret0 +} + +// StorageDropSector indicates an expected call of StorageDropSector. +func (mr *MockSectorIndexMockRecorder) StorageDropSector(ctx, storageID, s, ft interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageDropSector", reflect.TypeOf((*MockSectorIndex)(nil).StorageDropSector), ctx, storageID, s, ft) +} + +// StorageFindSector mocks base method. +func (m *MockSectorIndex) StorageFindSector(ctx context.Context, sector abi.SectorID, ft storiface.SectorFileType, ssize abi.SectorSize, allowFetch bool) ([]stores.SectorStorageInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StorageFindSector", ctx, sector, ft, ssize, allowFetch) + ret0, _ := ret[0].([]stores.SectorStorageInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StorageFindSector indicates an expected call of StorageFindSector. +func (mr *MockSectorIndexMockRecorder) StorageFindSector(ctx, sector, ft, ssize, allowFetch interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageFindSector", reflect.TypeOf((*MockSectorIndex)(nil).StorageFindSector), ctx, sector, ft, ssize, allowFetch) +} + +// StorageInfo mocks base method. +func (m *MockSectorIndex) StorageInfo(arg0 context.Context, arg1 stores.ID) (stores.StorageInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StorageInfo", arg0, arg1) + ret0, _ := ret[0].(stores.StorageInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StorageInfo indicates an expected call of StorageInfo. +func (mr *MockSectorIndexMockRecorder) StorageInfo(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageInfo", reflect.TypeOf((*MockSectorIndex)(nil).StorageInfo), arg0, arg1) +} + +// StorageLock mocks base method. +func (m *MockSectorIndex) StorageLock(ctx context.Context, sector abi.SectorID, read, write storiface.SectorFileType) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StorageLock", ctx, sector, read, write) + ret0, _ := ret[0].(error) + return ret0 +} + +// StorageLock indicates an expected call of StorageLock. +func (mr *MockSectorIndexMockRecorder) StorageLock(ctx, sector, read, write interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageLock", reflect.TypeOf((*MockSectorIndex)(nil).StorageLock), ctx, sector, read, write) +} + +// StorageReportHealth mocks base method. +func (m *MockSectorIndex) StorageReportHealth(arg0 context.Context, arg1 stores.ID, arg2 stores.HealthReport) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StorageReportHealth", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// StorageReportHealth indicates an expected call of StorageReportHealth. +func (mr *MockSectorIndexMockRecorder) StorageReportHealth(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageReportHealth", reflect.TypeOf((*MockSectorIndex)(nil).StorageReportHealth), arg0, arg1, arg2) +} + +// StorageTryLock mocks base method. +func (m *MockSectorIndex) StorageTryLock(ctx context.Context, sector abi.SectorID, read, write storiface.SectorFileType) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StorageTryLock", ctx, sector, read, write) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StorageTryLock indicates an expected call of StorageTryLock. +func (mr *MockSectorIndexMockRecorder) StorageTryLock(ctx, sector, read, write interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageTryLock", reflect.TypeOf((*MockSectorIndex)(nil).StorageTryLock), ctx, sector, read, write) +} diff --git a/sector-storage/stores/mocks/stores.go b/sector-storage/stores/mocks/stores.go new file mode 100644 index 00000000..78992efd --- /dev/null +++ b/sector-storage/stores/mocks/stores.go @@ -0,0 +1,212 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: interface.go + +// Package mock_stores is a generated GoMock package. +package mocks + +import ( + context "context" + os "os" + reflect "reflect" + + abi "github.com/filecoin-project/go-state-types/abi" + fsutil "github.com/filecoin-project/venus-sealer/sector-storage/fsutil" + partialfile "github.com/filecoin-project/venus-sealer/sector-storage/partialfile" + stores "github.com/filecoin-project/venus-sealer/sector-storage/stores" + storiface "github.com/filecoin-project/venus-sealer/sector-storage/storiface" + storage "github.com/filecoin-project/specs-storage/storage" + gomock "github.com/golang/mock/gomock" +) + +// MockpartialFileHandler is a mock of partialFileHandler interface. +type MockpartialFileHandler struct { + ctrl *gomock.Controller + recorder *MockpartialFileHandlerMockRecorder +} + +// MockpartialFileHandlerMockRecorder is the mock recorder for MockpartialFileHandler. +type MockpartialFileHandlerMockRecorder struct { + mock *MockpartialFileHandler +} + +// NewMockpartialFileHandler creates a new mock instance. +func NewMockpartialFileHandler(ctrl *gomock.Controller) *MockpartialFileHandler { + mock := &MockpartialFileHandler{ctrl: ctrl} + mock.recorder = &MockpartialFileHandlerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockpartialFileHandler) EXPECT() *MockpartialFileHandlerMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockpartialFileHandler) Close(pf *partialfile.PartialFile) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close", pf) + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockpartialFileHandlerMockRecorder) Close(pf interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockpartialFileHandler)(nil).Close), pf) +} + +// HasAllocated mocks base method. +func (m *MockpartialFileHandler) HasAllocated(pf *partialfile.PartialFile, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HasAllocated", pf, offset, size) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HasAllocated indicates an expected call of HasAllocated. +func (mr *MockpartialFileHandlerMockRecorder) HasAllocated(pf, offset, size interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasAllocated", reflect.TypeOf((*MockpartialFileHandler)(nil).HasAllocated), pf, offset, size) +} + +// OpenPartialFile mocks base method. +func (m *MockpartialFileHandler) OpenPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialfile.PartialFile, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "OpenPartialFile", maxPieceSize, path) + ret0, _ := ret[0].(*partialfile.PartialFile) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// OpenPartialFile indicates an expected call of OpenPartialFile. +func (mr *MockpartialFileHandlerMockRecorder) OpenPartialFile(maxPieceSize, path interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OpenPartialFile", reflect.TypeOf((*MockpartialFileHandler)(nil).OpenPartialFile), maxPieceSize, path) +} + +// Reader mocks base method. +func (m *MockpartialFileHandler) Reader(pf *partialfile.PartialFile, offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) (*os.File, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Reader", pf, offset, size) + ret0, _ := ret[0].(*os.File) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Reader indicates an expected call of Reader. +func (mr *MockpartialFileHandlerMockRecorder) Reader(pf, offset, size interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reader", reflect.TypeOf((*MockpartialFileHandler)(nil).Reader), pf, offset, size) +} + +// MockStore is a mock of Store interface. +type MockStore struct { + ctrl *gomock.Controller + recorder *MockStoreMockRecorder +} + +// MockStoreMockRecorder is the mock recorder for MockStore. +type MockStoreMockRecorder struct { + mock *MockStore +} + +// NewMockStore creates a new mock instance. +func NewMockStore(ctrl *gomock.Controller) *MockStore { + mock := &MockStore{ctrl: ctrl} + mock.recorder = &MockStoreMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockStore) EXPECT() *MockStoreMockRecorder { + return m.recorder +} + +// AcquireSector mocks base method. +func (m *MockStore) AcquireSector(ctx context.Context, s storage.SectorRef, existing, allocate storiface.SectorFileType, sealing storiface.PathType, op storiface.AcquireMode) (storiface.SectorPaths, storiface.SectorPaths, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AcquireSector", ctx, s, existing, allocate, sealing, op) + ret0, _ := ret[0].(storiface.SectorPaths) + ret1, _ := ret[1].(storiface.SectorPaths) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// AcquireSector indicates an expected call of AcquireSector. +func (mr *MockStoreMockRecorder) AcquireSector(ctx, s, existing, allocate, sealing, op interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AcquireSector", reflect.TypeOf((*MockStore)(nil).AcquireSector), ctx, s, existing, allocate, sealing, op) +} + +// FsStat mocks base method. +func (m *MockStore) FsStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FsStat", ctx, id) + ret0, _ := ret[0].(fsutil.FsStat) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FsStat indicates an expected call of FsStat. +func (mr *MockStoreMockRecorder) FsStat(ctx, id interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FsStat", reflect.TypeOf((*MockStore)(nil).FsStat), ctx, id) +} + +// MoveStorage mocks base method. +func (m *MockStore) MoveStorage(ctx context.Context, s storage.SectorRef, types storiface.SectorFileType) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MoveStorage", ctx, s, types) + ret0, _ := ret[0].(error) + return ret0 +} + +// MoveStorage indicates an expected call of MoveStorage. +func (mr *MockStoreMockRecorder) MoveStorage(ctx, s, types interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MoveStorage", reflect.TypeOf((*MockStore)(nil).MoveStorage), ctx, s, types) +} + +// Remove mocks base method. +func (m *MockStore) Remove(ctx context.Context, s abi.SectorID, types storiface.SectorFileType, force bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Remove", ctx, s, types, force) + ret0, _ := ret[0].(error) + return ret0 +} + +// Remove indicates an expected call of Remove. +func (mr *MockStoreMockRecorder) Remove(ctx, s, types, force interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Remove", reflect.TypeOf((*MockStore)(nil).Remove), ctx, s, types, force) +} + +// RemoveCopies mocks base method. +func (m *MockStore) RemoveCopies(ctx context.Context, s abi.SectorID, types storiface.SectorFileType) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveCopies", ctx, s, types) + ret0, _ := ret[0].(error) + return ret0 +} + +// RemoveCopies indicates an expected call of RemoveCopies. +func (mr *MockStoreMockRecorder) RemoveCopies(ctx, s, types interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveCopies", reflect.TypeOf((*MockStore)(nil).RemoveCopies), ctx, s, types) +} + +// Reserve mocks base method. +func (m *MockStore) Reserve(ctx context.Context, sid storage.SectorRef, ft storiface.SectorFileType, storageIDs storiface.SectorPaths, overheadTab map[storiface.SectorFileType]int) (func(), error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Reserve", ctx, sid, ft, storageIDs, overheadTab) + ret0, _ := ret[0].(func()) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Reserve indicates an expected call of Reserve. +func (mr *MockStoreMockRecorder) Reserve(ctx, sid, ft, storageIDs, overheadTab interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reserve", reflect.TypeOf((*MockStore)(nil).Reserve), ctx, sid, ft, storageIDs, overheadTab) +} diff --git a/sector-storage/stores/remote.go b/sector-storage/stores/remote.go index 3772f628..d5f4b7b1 100644 --- a/sector-storage/stores/remote.go +++ b/sector-storage/stores/remote.go @@ -3,6 +3,7 @@ package stores import ( "context" "encoding/json" + "fmt" "io" "io/ioutil" "math/bits" @@ -31,7 +32,7 @@ var FetchTempSubdir = "fetching" var CopyBuf = 1 << 20 type Remote struct { - local *Local + local Store index SectorIndex auth http.Header @@ -39,6 +40,8 @@ type Remote struct { fetchLk sync.Mutex fetching map[abi.SectorID]chan struct{} + + pfHandler partialFileHandler } func (r *Remote) RemoveCopies(ctx context.Context, s abi.SectorID, types storiface.SectorFileType) error { @@ -49,7 +52,7 @@ func (r *Remote) RemoveCopies(ctx context.Context, s abi.SectorID, types storifa return r.local.RemoveCopies(ctx, s, types) } -func NewRemote(local *Local, index SectorIndex, auth http.Header, fetchLimit int) *Remote { +func NewRemote(local Store, index SectorIndex, auth http.Header, fetchLimit int, pfHandler partialFileHandler) *Remote { return &Remote{ local: local, index: index, @@ -57,7 +60,8 @@ func NewRemote(local *Local, index SectorIndex, auth http.Header, fetchLimit int limit: make(chan struct{}, fetchLimit), - fetching: map[abi.SectorID]chan struct{}{}, + fetching: map[abi.SectorID]chan struct{}{}, + pfHandler: pfHandler, } } @@ -415,4 +419,265 @@ func (r *Remote) FsStat(ctx context.Context, id ID) (fsutil.FsStat, error) { return out, nil } +func (r *Remote) checkAllocated(ctx context.Context, url string, spt abi.RegisteredSealProof, offset, size abi.PaddedPieceSize) (bool, error) { + url = fmt.Sprintf("%s/%d/allocated/%d/%d", url, spt, offset.Unpadded(), size.Unpadded()) + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return false, xerrors.Errorf("request: %w", err) + } + req.Header = r.auth.Clone() + req = req.WithContext(ctx) + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return false, xerrors.Errorf("do request: %w", err) + } + defer resp.Body.Close() // nolint + + switch resp.StatusCode { + case http.StatusOK: + return true, nil + case http.StatusRequestedRangeNotSatisfiable: + return false, nil + default: + return false, xerrors.Errorf("unexpected http response: %d", resp.StatusCode) + } +} + +func (r *Remote) readRemote(ctx context.Context, url string, offset, size abi.PaddedPieceSize) (io.ReadCloser, error) { + if len(r.limit) >= cap(r.limit) { + log.Infof("Throttling remote read, %d already running", len(r.limit)) + } + + // TODO: Smarter throttling + // * Priority (just going sequentially is still pretty good) + // * Per interface + // * Aware of remote load + select { + case r.limit <- struct{}{}: + defer func() { <-r.limit }() + case <-ctx.Done(): + return nil, xerrors.Errorf("context error while waiting for fetch limiter: %w", ctx.Err()) + } + + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, xerrors.Errorf("request: %w", err) + } + + if r.auth != nil { + req.Header = r.auth.Clone() + } + req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+size-1)) + req = req.WithContext(ctx) + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, xerrors.Errorf("do request: %w", err) + } + + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { + resp.Body.Close() // nolint + return nil, xerrors.Errorf("non-200 code: %d", resp.StatusCode) + } + + return resp.Body, nil +} + +// CheckIsUnsealed checks if we have an unsealed piece at the given offset in an already unsealed sector file for the given piece +// either locally or on any of the workers. +// Returns true if we have the unsealed piece, false otherwise. +func (r *Remote) CheckIsUnsealed(ctx context.Context, s storage.SectorRef, offset, size abi.PaddedPieceSize) (bool, error) { + ft := storiface.FTUnsealed + + paths, _, err := r.local.AcquireSector(ctx, s, ft, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) + if err != nil { + return false, xerrors.Errorf("acquire local: %w", err) + } + + path := storiface.PathByType(paths, ft) + if path != "" { + // if we have the unsealed file locally, check if it has the unsealed piece. + log.Infof("Read local %s (+%d,%d)", path, offset, size) + ssize, err := s.ProofType.SectorSize() + if err != nil { + return false, err + } + + // open the unsealed sector file for the given sector size located at the given path. + pf, err := r.pfHandler.OpenPartialFile(abi.PaddedPieceSize(ssize), path) + if err != nil { + return false, xerrors.Errorf("opening partial file: %w", err) + } + log.Debugf("local partial file opened %s (+%d,%d)", path, offset, size) + + // even though we have an unsealed file for the given sector, we still need to determine if we have the unsealed piece + // in the unsealed sector file. That is what `HasAllocated` checks for. + has, err := r.pfHandler.HasAllocated(pf, storiface.UnpaddedByteIndex(offset.Unpadded()), size.Unpadded()) + if err != nil { + return false, xerrors.Errorf("has allocated: %w", err) + } + + // close the local unsealed file. + if err := r.pfHandler.Close(pf); err != nil { + return false, xerrors.Errorf("failed to close partial file: %s", err) + } + log.Debugf("checked if local partial file has the piece %s (+%d,%d), returning answer=%t", path, offset, size, has) + + // Sector files can technically not have a piece unsealed locally, but have it unsealed in remote storage, so we probably + // want to return only if has is true + if has { + return has, nil + } + } + + // --- We don't have the unsealed piece in an unsealed sector file locally + // Check if we have it in a remote cluster. + + si, err := r.index.StorageFindSector(ctx, s.ID, ft, 0, false) + if err != nil { + return false, xerrors.Errorf("StorageFindSector: %s", err) + } + + if len(si) == 0 { + return false, nil + } + + sort.Slice(si, func(i, j int) bool { + return si[i].Weight < si[j].Weight + }) + + for _, info := range si { + for _, url := range info.URLs { + ok, err := r.checkAllocated(ctx, url, s.ProofType, offset, size) + if err != nil { + log.Warnw("check if remote has piece", "url", url, "error", err) + continue + } + if !ok { + continue + } + + return true, nil + } + } + + return false, nil +} + +// Reader returns a reader for an unsealed piece at the given offset in the given sector. +// If the Miner has the unsealed piece locally, it will return a reader that reads from the local copy. +// If the Miner does NOT have the unsealed piece locally, it will query all workers that have the unsealed sector file +// to know if they have the unsealed piece and will then read the unsealed piece data from a worker that has it. +// +// Returns a nil reader if : +// 1. no worker(local worker included) has an unsealed file for the given sector OR +// 2. no worker(local worker included) has the unsealed piece in their unsealed sector file. +// Will return a nil reader and a nil error in such a case. +func (r *Remote) Reader(ctx context.Context, s storage.SectorRef, offset, size abi.PaddedPieceSize) (io.ReadCloser, error) { + ft := storiface.FTUnsealed + + // check if we have the unsealed sector file locally + paths, _, err := r.local.AcquireSector(ctx, s, ft, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) + if err != nil { + return nil, xerrors.Errorf("acquire local: %w", err) + } + + path := storiface.PathByType(paths, ft) + + if path != "" { + // if we have the unsealed file locally, return a reader that can be used to read the contents of the + // unsealed piece. + log.Debugf("Check local %s (+%d,%d)", path, offset, size) + ssize, err := s.ProofType.SectorSize() + if err != nil { + return nil, err + } + log.Debugf("fetched sector size %s (+%d,%d)", path, offset, size) + + // open the unsealed sector file for the given sector size located at the given path. + pf, err := r.pfHandler.OpenPartialFile(abi.PaddedPieceSize(ssize), path) + if err != nil { + return nil, xerrors.Errorf("opening partial file: %w", err) + } + log.Debugf("local partial file opened %s (+%d,%d)", path, offset, size) + + // even though we have an unsealed file for the given sector, we still need to determine if we have the unsealed piece + // in the unsealed sector file. That is what `HasAllocated` checks for. + has, err := r.pfHandler.HasAllocated(pf, storiface.UnpaddedByteIndex(offset.Unpadded()), size.Unpadded()) + if err != nil { + return nil, xerrors.Errorf("has allocated: %w", err) + } + log.Debugf("check if partial file is allocated %s (+%d,%d)", path, offset, size) + + if has { + log.Infof("returning piece reader for local unsealed piece sector=%+v, (offset=%d, size=%d)", s.ID, offset, size) + return r.pfHandler.Reader(pf, storiface.PaddedByteIndex(offset), size) + } + + log.Debugf("miner has unsealed file but not unseal piece, %s (+%d,%d)", path, offset, size) + if err := r.pfHandler.Close(pf); err != nil { + return nil, xerrors.Errorf("close partial file: %w", err) + } + } + + // --- We don't have the unsealed piece in an unsealed sector file locally + + // if we don't have the unsealed sector file locally, we'll first lookup the Miner Sector Store Index + // to determine which workers have the unsealed file and then query those workers to know + // if they have the unsealed piece in the unsealed sector file. + si, err := r.index.StorageFindSector(ctx, s.ID, ft, 0, false) + if err != nil { + log.Debugf("Reader, did not find unsealed file on any of the workers %s (+%d,%d)", path, offset, size) + return nil, err + } + + if len(si) == 0 { + return nil, xerrors.Errorf("failed to read sector %v from remote(%d): %w", s, ft, storiface.ErrSectorNotFound) + } + + sort.Slice(si, func(i, j int) bool { + return si[i].Weight > si[j].Weight + }) + + var lastErr error + for _, info := range si { + for _, url := range info.URLs { + // checkAllocated makes a JSON RPC query to a remote worker to determine if it has + // unsealed piece in their unsealed sector file. + ok, err := r.checkAllocated(ctx, url, s.ProofType, offset, size) + if err != nil { + log.Warnw("check if remote has piece", "url", url, "error", err) + lastErr = err + continue + } + if !ok { + continue + } + + // readRemote fetches a reader that we can use to read the unsealed piece from the remote worker. + // It uses a ranged HTTP query to ensure we ONLY read the unsealed piece and not the entire unsealed file. + rd, err := r.readRemote(ctx, url, offset, size) + if err != nil { + log.Warnw("reading from remote", "url", url, "error", err) + lastErr = err + continue + } + log.Infof("Read remote %s (+%d,%d)", url, offset, size) + return rd, nil + } + } + + // we couldn't find a unsealed file with the unsealed piece, will return a nil reader. + log.Debugf("returning nil reader, did not find unsealed piece for %+v (+%d,%d), last error=%s", s, offset, size, lastErr) + return nil, nil +} + +func (r *Remote) Reserve(ctx context.Context, sid storage.SectorRef, ft storiface.SectorFileType, storageIDs storiface.SectorPaths, overheadTab map[storiface.SectorFileType]int) (func(), error) { + log.Warnf("reserve called on remote store, sectorID: %v", sid.ID) + return func() { + + }, nil +} + var _ Store = &Remote{} diff --git a/sector-storage/stores/remote_test.go b/sector-storage/stores/remote_test.go new file mode 100644 index 00000000..067782e2 --- /dev/null +++ b/sector-storage/stores/remote_test.go @@ -0,0 +1,742 @@ +package stores_test + +import ( + "context" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-storage/storage" + "github.com/golang/mock/gomock" + "github.com/gorilla/mux" + logging "github.com/ipfs/go-log/v2" + "github.com/stretchr/testify/require" + "golang.org/x/xerrors" + + "github.com/filecoin-project/venus-sealer/sector-storage/partialfile" + "github.com/filecoin-project/venus-sealer/sector-storage/stores" + "github.com/filecoin-project/venus-sealer/sector-storage/stores/mocks" + "github.com/filecoin-project/venus-sealer/sector-storage/storiface" +) + +func TestReader(t *testing.T) { + logging.SetAllLoggers(logging.LevelDebug) + bz := []byte("Hello World") + + pfPath := "path" + emptyPartialFile := &partialfile.PartialFile{} + sectorSize := abi.SealProofInfos[1].SectorSize + + ft := storiface.FTUnsealed + + sectorRef := storage.SectorRef{ + ID: abi.SectorID{ + Miner: 123, + Number: 123, + }, + ProofType: 1, + } + + offset := abi.PaddedPieceSize(100) + size := abi.PaddedPieceSize(1000) + ctx := context.Background() + + tcs := map[string]struct { + storeFnc func(s *mocks.MockStore) + pfFunc func(s *mocks.MockpartialFileHandler) + indexFnc func(s *mocks.MockSectorIndex, serverURL string) + + needHttpServer bool + + getAllocatedReturnCode int + getSectorReturnCode int + + serverUrl string + + // expectation + errStr string + expectedNonNilReader bool + expectedSectorBytes []byte + }{ + + // -------- have the unsealed file locally + "fails when error while acquiring unsealed file": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, xerrors.New("acquire error")) + }, + + errStr: "acquire error", + }, + + "fails when error while opening local partial (unsealed) file": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, xerrors.New("pf open error")) + }, + errStr: "pf open error", + }, + + "fails when error while checking if local unsealed file has piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, nil) + mockCheckAllocation(pf, offset, size, emptyPartialFile, + true, xerrors.New("piece check error")) + }, + + errStr: "piece check error", + }, + + "fails when error while closing local unsealed file that does not have the piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, nil) + mockCheckAllocation(pf, offset, size, emptyPartialFile, + false, nil) + pf.EXPECT().Close(emptyPartialFile).Return(xerrors.New("close error")).Times(1) + }, + errStr: "close error", + }, + + "fails when error while fetching reader for the local unsealed file that has the unsealed piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, nil) + mockCheckAllocation(pf, offset, size, emptyPartialFile, + true, nil) + mockPfReader(pf, emptyPartialFile, offset, size, nil, xerrors.New("reader error")) + + }, + errStr: "reader error", + }, + + // ------------------- don't have the unsealed file locally + + "fails when error while finding sector": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, "", nil) + }, + + indexFnc: func(in *mocks.MockSectorIndex, _ string) { + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return(nil, xerrors.New("find sector error")) + }, + errStr: "find sector error", + }, + + "fails when no worker has unsealed file": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, "", nil) + }, + + indexFnc: func(in *mocks.MockSectorIndex, _ string) { + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return(nil, nil) + }, + errStr: storiface.ErrSectorNotFound.Error(), + }, + + // --- nil reader when local unsealed file does NOT have unsealed piece + "nil reader when local unsealed file does not have the unsealed piece and remote sector also dosen't have the unsealed piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, nil) + mockCheckAllocation(pf, offset, size, emptyPartialFile, + false, nil) + + pf.EXPECT().Close(emptyPartialFile).Return(nil).Times(1) + + }, + + indexFnc: func(in *mocks.MockSectorIndex, url string) { + si := stores.SectorStorageInfo{ + URLs: []string{url}, + } + + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return([]stores.SectorStorageInfo{si}, nil).Times(1) + }, + + needHttpServer: true, + getAllocatedReturnCode: 500, + }, + + // ---- nil reader when none of the remote unsealed file has unsealed piece + "nil reader when none of the worker has the unsealed piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, "", nil) + }, + + indexFnc: func(in *mocks.MockSectorIndex, url string) { + si := stores.SectorStorageInfo{ + URLs: []string{url}, + } + + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return([]stores.SectorStorageInfo{si}, nil).Times(1) + }, + + needHttpServer: true, + getAllocatedReturnCode: 500, + }, + + "nil reader when none of the worker is able to serve the unsealed piece even though they have it": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, "", nil) + }, + + indexFnc: func(in *mocks.MockSectorIndex, url string) { + si := stores.SectorStorageInfo{ + URLs: []string{url}, + } + + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return([]stores.SectorStorageInfo{si}, nil).Times(1) + }, + + needHttpServer: true, + getSectorReturnCode: 500, + getAllocatedReturnCode: 200, + }, + + // ---- Success for local unsealed file + "successfully fetches reader for piece from local unsealed file": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, nil) + mockCheckAllocation(pf, offset, size, emptyPartialFile, + true, nil) + + f, err := ioutil.TempFile("", "TestReader-") + require.NoError(t, err) + _, err = f.Write(bz) + require.NoError(t, err) + require.NoError(t, f.Close()) + f, err = os.Open(f.Name()) + require.NoError(t, err) + + mockPfReader(pf, emptyPartialFile, offset, size, f, nil) + + }, + + expectedNonNilReader: true, + expectedSectorBytes: bz, + }, + + // --- Success for remote unsealed file + // --- Success for remote unsealed file + "successfully fetches reader from remote unsealed piece when local unsealed file does NOT have the unsealed Piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, nil) + mockCheckAllocation(pf, offset, size, emptyPartialFile, + false, nil) + + pf.EXPECT().Close(emptyPartialFile).Return(nil).Times(1) + + }, + + indexFnc: func(in *mocks.MockSectorIndex, url string) { + si := stores.SectorStorageInfo{ + URLs: []string{url}, + } + + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return([]stores.SectorStorageInfo{si}, nil).Times(1) + }, + + needHttpServer: true, + getSectorReturnCode: 200, + getAllocatedReturnCode: 200, + expectedSectorBytes: bz, + expectedNonNilReader: true, + }, + + "successfully fetches reader for piece from remote unsealed piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, "", nil) + }, + + indexFnc: func(in *mocks.MockSectorIndex, url string) { + si := stores.SectorStorageInfo{ + URLs: []string{url}, + } + + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return([]stores.SectorStorageInfo{si}, nil).Times(1) + }, + + needHttpServer: true, + getSectorReturnCode: 200, + getAllocatedReturnCode: 200, + expectedSectorBytes: bz, + expectedNonNilReader: true, + }, + } + + for name, tc := range tcs { + tc := tc + t.Run(name, func(t *testing.T) { + // create go mock controller here + mockCtrl := gomock.NewController(t) + // when test is done, assert expectations on all mock objects. + defer mockCtrl.Finish() + + // create them mocks + lstore := mocks.NewMockStore(mockCtrl) + pfhandler := mocks.NewMockpartialFileHandler(mockCtrl) + index := mocks.NewMockSectorIndex(mockCtrl) + + if tc.storeFnc != nil { + tc.storeFnc(lstore) + } + if tc.pfFunc != nil { + tc.pfFunc(pfhandler) + } + + if tc.needHttpServer { + // run http server + ts := httptest.NewServer(&mockHttpServer{ + expectedSectorName: storiface.SectorName(sectorRef.ID), + expectedFileType: ft.String(), + expectedOffset: fmt.Sprintf("%d", offset.Unpadded()), + expectedSize: fmt.Sprintf("%d", size.Unpadded()), + expectedSectorType: fmt.Sprintf("%d", sectorRef.ProofType), + + getAllocatedReturnCode: tc.getAllocatedReturnCode, + getSectorReturnCode: tc.getSectorReturnCode, + getSectorBytes: tc.expectedSectorBytes, + }) + defer ts.Close() + tc.serverUrl = fmt.Sprintf("%s/remote/%s/%s", ts.URL, ft.String(), storiface.SectorName(sectorRef.ID)) + } + if tc.indexFnc != nil { + tc.indexFnc(index, tc.serverUrl) + } + + remoteStore := stores.NewRemote(lstore, index, nil, 6000, pfhandler) + + rd, err := remoteStore.Reader(ctx, sectorRef, offset, size) + + if tc.errStr != "" { + require.Error(t, err) + require.Nil(t, rd) + require.Contains(t, err.Error(), tc.errStr) + } else { + require.NoError(t, err) + } + + if !tc.expectedNonNilReader { + require.Nil(t, rd) + } else { + require.NotNil(t, rd) + defer func() { + require.NoError(t, rd.Close()) + }() + + if f, ok := rd.(*os.File); ok { + require.NoError(t, os.Remove(f.Name())) + } + + bz, err := ioutil.ReadAll(rd) + require.NoError(t, err) + require.Equal(t, tc.expectedSectorBytes, bz) + } + + }) + } +} + +func TestCheckIsUnsealed(t *testing.T) { + logging.SetAllLoggers(logging.LevelDebug) + + pfPath := "path" + ft := storiface.FTUnsealed + emptyPartialFile := &partialfile.PartialFile{} + + sectorRef := storage.SectorRef{ + ID: abi.SectorID{ + Miner: 123, + Number: 123, + }, + ProofType: 1, + } + sectorSize := abi.SealProofInfos[1].SectorSize + + offset := abi.PaddedPieceSize(100) + size := abi.PaddedPieceSize(1000) + ctx := context.Background() + + tcs := map[string]struct { + storeFnc func(s *mocks.MockStore) + pfFunc func(s *mocks.MockpartialFileHandler) + indexFnc func(s *mocks.MockSectorIndex, serverURL string) + + needHttpServer bool + + getAllocatedReturnCode int + + serverUrl string + + // expectation + errStr string + expectedIsUnealed bool + }{ + + // -------- have the unsealed file locally + "fails when error while acquiring unsealed file": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, xerrors.New("acquire error")) + }, + + errStr: "acquire error", + }, + + "fails when error while opening local partial (unsealed) file": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, xerrors.New("pf open error")) + }, + errStr: "pf open error", + }, + + "fails when error while checking if local unsealed file has piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, nil) + mockCheckAllocation(pf, offset, size, emptyPartialFile, + true, xerrors.New("piece check error")) + }, + + errStr: "piece check error", + }, + + "fails when error while closing local unsealed file": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, nil) + + mockCheckAllocation(pf, offset, size, emptyPartialFile, + false, nil) + + pf.EXPECT().Close(emptyPartialFile).Return(xerrors.New("close error")).Times(1) + }, + errStr: "close error", + }, + + // ------------------- don't have the unsealed file locally + + "fails when error while finding sector": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, "", nil) + }, + + indexFnc: func(in *mocks.MockSectorIndex, _ string) { + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return(nil, xerrors.New("find sector error")) + }, + errStr: "find sector error", + }, + + "false when no worker has unsealed file": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, "", nil) + }, + + indexFnc: func(in *mocks.MockSectorIndex, _ string) { + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return(nil, nil) + }, + }, + + // false when local unsealed file does NOT have unsealed piece + "false when local unsealed file does not have the piece and remote sector too dosen't have the piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, nil) + mockCheckAllocation(pf, offset, size, emptyPartialFile, + false, nil) + + pf.EXPECT().Close(emptyPartialFile).Return(nil).Times(1) + }, + + indexFnc: func(in *mocks.MockSectorIndex, url string) { + si := stores.SectorStorageInfo{ + URLs: []string{url}, + } + + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return([]stores.SectorStorageInfo{si}, nil).Times(1) + }, + + needHttpServer: true, + getAllocatedReturnCode: 500, + }, + + "false when none of the worker has the unsealed piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, "", nil) + }, + + indexFnc: func(in *mocks.MockSectorIndex, url string) { + si := stores.SectorStorageInfo{ + URLs: []string{url}, + } + + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return([]stores.SectorStorageInfo{si}, nil).Times(1) + }, + + needHttpServer: true, + getAllocatedReturnCode: 500, + }, + + // ---- Success for local unsealed file + "true when local unsealed file has the piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, nil) + mockCheckAllocation(pf, offset, size, emptyPartialFile, + true, nil) + pf.EXPECT().Close(emptyPartialFile).Return(nil).Times(1) + + }, + + expectedIsUnealed: true, + }, + + // --- Success for remote unsealed file + "true if we have a remote unsealed piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, "", nil) + }, + + indexFnc: func(in *mocks.MockSectorIndex, url string) { + si := stores.SectorStorageInfo{ + URLs: []string{url}, + } + + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return([]stores.SectorStorageInfo{si}, nil).Times(1) + }, + + needHttpServer: true, + getAllocatedReturnCode: 200, + expectedIsUnealed: true, + }, + + "true when local unsealed file does NOT have the unsealed Piece but remote sector has the unsealed piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, nil) + mockCheckAllocation(pf, offset, size, emptyPartialFile, + false, nil) + + pf.EXPECT().Close(emptyPartialFile).Return(nil).Times(1) + }, + + indexFnc: func(in *mocks.MockSectorIndex, url string) { + si := stores.SectorStorageInfo{ + URLs: []string{url}, + } + + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return([]stores.SectorStorageInfo{si}, nil).Times(1) + }, + + needHttpServer: true, + getAllocatedReturnCode: 200, + expectedIsUnealed: true, + }, + } + + for name, tc := range tcs { + tc := tc + t.Run(name, func(t *testing.T) { + // create go mock controller here + mockCtrl := gomock.NewController(t) + // when test is done, assert expectations on all mock objects. + defer mockCtrl.Finish() + + // create them mocks + lstore := mocks.NewMockStore(mockCtrl) + pfhandler := mocks.NewMockpartialFileHandler(mockCtrl) + index := mocks.NewMockSectorIndex(mockCtrl) + + if tc.storeFnc != nil { + tc.storeFnc(lstore) + } + if tc.pfFunc != nil { + tc.pfFunc(pfhandler) + } + + if tc.needHttpServer { + // run http server + ts := httptest.NewServer(&mockHttpServer{ + expectedSectorName: storiface.SectorName(sectorRef.ID), + expectedFileType: ft.String(), + expectedOffset: fmt.Sprintf("%d", offset.Unpadded()), + expectedSize: fmt.Sprintf("%d", size.Unpadded()), + expectedSectorType: fmt.Sprintf("%d", sectorRef.ProofType), + + getAllocatedReturnCode: tc.getAllocatedReturnCode, + }) + defer ts.Close() + tc.serverUrl = fmt.Sprintf("%s/remote/%s/%s", ts.URL, ft.String(), storiface.SectorName(sectorRef.ID)) + } + if tc.indexFnc != nil { + tc.indexFnc(index, tc.serverUrl) + } + + remoteStore := stores.NewRemote(lstore, index, nil, 6000, pfhandler) + + isUnsealed, err := remoteStore.CheckIsUnsealed(ctx, sectorRef, offset, size) + + if tc.errStr != "" { + require.Error(t, err) + require.False(t, isUnsealed) + require.Contains(t, err.Error(), tc.errStr) + } else { + require.NoError(t, err) + } + + require.Equal(t, tc.expectedIsUnealed, isUnsealed) + + }) + } +} + +func mockSectorAcquire(l *mocks.MockStore, sectorRef storage.SectorRef, pfPath string, err error) { + l.EXPECT().AcquireSector(gomock.Any(), sectorRef, storiface.FTUnsealed, + storiface.FTNone, storiface.PathStorage, storiface.AcquireMove).Return(storiface.SectorPaths{ + Unsealed: pfPath, + }, + storiface.SectorPaths{}, err).Times(1) +} + +func mockPartialFileOpen(pf *mocks.MockpartialFileHandler, sectorSize abi.SectorSize, pfPath string, err error) { + pf.EXPECT().OpenPartialFile(abi.PaddedPieceSize(sectorSize), pfPath).Return(&partialfile.PartialFile{}, + err).Times(1) +} + +func mockCheckAllocation(pf *mocks.MockpartialFileHandler, offset, size abi.PaddedPieceSize, file *partialfile.PartialFile, + out bool, err error) { + pf.EXPECT().HasAllocated(file, storiface.UnpaddedByteIndex(offset.Unpadded()), + size.Unpadded()).Return(out, err).Times(1) +} + +func mockPfReader(pf *mocks.MockpartialFileHandler, file *partialfile.PartialFile, offset, size abi.PaddedPieceSize, + outFile *os.File, err error) { + pf.EXPECT().Reader(file, storiface.PaddedByteIndex(offset), size).Return(outFile, err) +} + +type mockHttpServer struct { + expectedSectorName string + expectedFileType string + expectedOffset string + expectedSize string + expectedSectorType string + + getAllocatedReturnCode int + + getSectorReturnCode int + getSectorBytes []byte +} + +func (m *mockHttpServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { + mux := mux.NewRouter() + mux.HandleFunc("/remote/{type}/{id}", m.getSector).Methods("GET") + mux.HandleFunc("/remote/{type}/{id}/{spt}/allocated/{offset}/{size}", m.getAllocated).Methods("GET") + mux.ServeHTTP(w, r) +} + +func (m *mockHttpServer) getAllocated(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + + if vars["id"] != m.expectedSectorName { + w.WriteHeader(http.StatusBadRequest) + return + } + + if vars["type"] != m.expectedFileType { + w.WriteHeader(http.StatusBadRequest) + return + } + + if vars["spt"] != m.expectedSectorType { + w.WriteHeader(http.StatusBadRequest) + return + } + + if vars["offset"] != m.expectedOffset { + w.WriteHeader(http.StatusBadRequest) + return + } + + if vars["size"] != m.expectedSize { + w.WriteHeader(http.StatusBadRequest) + return + } + + w.WriteHeader(m.getAllocatedReturnCode) +} + +func (m *mockHttpServer) getSector(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + + if vars["id"] != m.expectedSectorName { + w.WriteHeader(http.StatusBadRequest) + return + } + + if vars["type"] != m.expectedFileType { + w.WriteHeader(http.StatusBadRequest) + return + } + + w.WriteHeader(m.getSectorReturnCode) + _, _ = w.Write(m.getSectorBytes) +} diff --git a/sector-storage/stores/util_unix.go b/sector-storage/stores/util_unix.go index 2b057468..943681b4 100644 --- a/sector-storage/stores/util_unix.go +++ b/sector-storage/stores/util_unix.go @@ -2,8 +2,10 @@ package stores import ( "bytes" + "os" "os/exec" "path/filepath" + "runtime" "strings" "github.com/mitchellh/go-homedir" @@ -33,7 +35,18 @@ func move(from, to string) error { // can do better var errOut bytes.Buffer - cmd := exec.Command("/usr/bin/env", "mv", "-t", toDir, from) // nolint + + var cmd *exec.Cmd + if runtime.GOOS == "darwin" { + if err := os.MkdirAll(toDir, 0777); err != nil { + return xerrors.Errorf("failed exec MkdirAll: %s", err) + } + + cmd = exec.Command("/usr/bin/env", "mv", from, toDir) // nolint + } else { + cmd = exec.Command("/usr/bin/env", "mv", "-t", toDir, from) // nolint + } + cmd.Stderr = &errOut if err := cmd.Run(); err != nil { return xerrors.Errorf("exec mv (stderr: %s): %w", strings.TrimSpace(errOut.String()), err) diff --git a/sector-storage/storiface/callid.go b/sector-storage/storiface/callid.go new file mode 100644 index 00000000..a5592d0a --- /dev/null +++ b/sector-storage/storiface/callid.go @@ -0,0 +1,20 @@ +package storiface + +import ( + "fmt" + "github.com/filecoin-project/go-state-types/abi" + "github.com/google/uuid" +) + +type CallID struct { + Sector abi.SectorID + ID uuid.UUID +} + +func (c CallID) String() string { + return fmt.Sprintf("%d-%d-%s", c.Sector.Miner, c.Sector.Number, c.ID) +} + +var _ fmt.Stringer = &CallID{} + +var UndefCall CallID diff --git a/sector-storage/storiface/ffi.go b/sector-storage/storiface/ffi.go index f6b2cbdd..2b6df667 100644 --- a/sector-storage/storiface/ffi.go +++ b/sector-storage/storiface/ffi.go @@ -5,6 +5,7 @@ import ( "errors" "github.com/ipfs/go-cid" + "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" ) @@ -17,6 +18,14 @@ func (i UnpaddedByteIndex) Padded() PaddedByteIndex { return PaddedByteIndex(abi.UnpaddedPieceSize(i).Padded()) } +func (i UnpaddedByteIndex) Valid() error { + if i%127 != 0 { + return xerrors.Errorf("unpadded byte index must be a multiple of 127") + } + + return nil +} + type PaddedByteIndex uint64 type RGetter func(ctx context.Context, id abi.SectorID) (cid.Cid, error) diff --git a/sector-storage/storiface/filetype.go b/sector-storage/storiface/filetype.go index 3f7c7455..2e099902 100644 --- a/sector-storage/storiface/filetype.go +++ b/sector-storage/storiface/filetype.go @@ -73,6 +73,24 @@ func (t SectorFileType) SealSpaceUse(ssize abi.SectorSize) (uint64, error) { return need, nil } +func (t SectorFileType) StoreSpaceUse(ssize abi.SectorSize) (uint64, error) { + var need uint64 + for _, pathType := range PathTypes { + if !t.Has(pathType) { + continue + } + + oh, ok := FsOverheadFinalized[pathType] + if !ok { + return 0, xerrors.Errorf("no finalized overhead info for %s", pathType) + } + + need += uint64(oh) * uint64(ssize) / FSOverheadDen + } + + return need, nil +} + func (t SectorFileType) All() [FileTypes]bool { var out [FileTypes]bool diff --git a/sector-storage/storiface/worker.go b/sector-storage/storiface/worker.go index d4c3e2f4..7e67e653 100644 --- a/sector-storage/storiface/worker.go +++ b/sector-storage/storiface/worker.go @@ -5,10 +5,8 @@ import ( "errors" "fmt" "github.com/filecoin-project/venus-sealer/types" - "io" "time" - "github.com/google/uuid" "github.com/ipfs/go-cid" "github.com/filecoin-project/go-state-types/abi" @@ -18,7 +16,12 @@ import ( type WorkerInfo struct { Hostname string - Resources WorkerResources + // IgnoreResources indicates whether the worker's available resources should + // be used ignored (true) or used (false) for the purposes of scheduling and + // task assignment. Only supported on local workers. Used for testing. + // Default should be false (zero value, i.e. resources taken into account). + IgnoreResources bool + Resources WorkerResources } type WorkerResources struct { @@ -63,19 +66,6 @@ type WorkerJob struct { Hostname string `json:",omitempty"` // optional, set for ret-wait jobs } -type CallID struct { - Sector abi.SectorID - ID uuid.UUID -} - -func (c CallID) String() string { - return fmt.Sprintf("%d-%d-%s", c.Sector.Miner, c.Sector.Number, c.ID) -} - -var _ fmt.Stringer = &CallID{} - -var UndefCall CallID - type WorkerCalls interface { AddPiece(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (types.CallID, error) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (types.CallID, error) @@ -86,7 +76,6 @@ type WorkerCalls interface { ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (types.CallID, error) MoveStorage(ctx context.Context, sector storage.SectorRef, types SectorFileType) (types.CallID, error) UnsealPiece(context.Context, storage.SectorRef, UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (types.CallID, error) - ReadPiece(context.Context, io.Writer, storage.SectorRef, UnpaddedByteIndex, abi.UnpaddedPieceSize) (types.CallID, error) Fetch(context.Context, storage.SectorRef, SectorFileType, PathType, AcquireMode) (types.CallID, error) } diff --git a/sector-storage/worker_local.go b/sector-storage/worker_local.go index 3679aa81..18527d1e 100644 --- a/sector-storage/worker_local.go +++ b/sector-storage/worker_local.go @@ -20,7 +20,7 @@ import ( ffi "github.com/filecoin-project/filecoin-ffi" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-statestore" - storage "github.com/filecoin-project/specs-storage/storage" + "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/venus-sealer/sector-storage/ffiwrapper" "github.com/filecoin-project/venus-sealer/sector-storage/stores" @@ -33,6 +33,11 @@ var pathTypes = []storiface.SectorFileType{storiface.FTUnsealed, storiface.FTSea type WorkerConfig struct { TaskTypes []types.TaskType NoSwap bool + + // IgnoreResourceFiltering enables task distribution to happen on this + // worker regardless of its currently available resources. Used in testing + // with the local worker. + IgnoreResourceFiltering bool } // used do provide custom proofs impl (mostly used in testing) @@ -46,6 +51,9 @@ type LocalWorker struct { executor ExecutorFunc noSwap bool + // see equivalent field on WorkerConfig. + ignoreResources bool + ct *workerCallTracker acceptTasks map[types.TaskType]struct{} running sync.WaitGroup @@ -71,12 +79,12 @@ func newLocalWorker(executor ExecutorFunc, wcfg WorkerConfig, store stores.Store ct: &workerCallTracker{ st: cst, }, - acceptTasks: acceptTasks, - executor: executor, - noSwap: wcfg.NoSwap, - - session: uuid.New(), - closing: make(chan struct{}), + acceptTasks: acceptTasks, + executor: executor, + noSwap: wcfg.NoSwap, + ignoreResources: wcfg.IgnoreResourceFiltering, + session: uuid.New(), + closing: make(chan struct{}), } if w.executor == nil { @@ -193,7 +201,6 @@ var returnFunc = map[types.ReturnType]func(context.Context, types.CallID, storif types.ReturnReleaseUnsealed: rfunc(storiface.WorkerReturn.ReturnReleaseUnsealed), types.ReturnMoveStorage: rfunc(storiface.WorkerReturn.ReturnMoveStorage), types.ReturnUnsealPiece: rfunc(storiface.WorkerReturn.ReturnUnsealPiece), - types.ReturnReadPiece: rfunc(storiface.WorkerReturn.ReturnReadPiece), types.ReturnFetch: rfunc(storiface.WorkerReturn.ReturnFetch), } @@ -414,6 +421,7 @@ func (l *LocalWorker) UnsealPiece(ctx context.Context, sector storage.SectorRef, } return l.asyncCall(ctx, sector, types.ReturnUnsealPiece, func(ctx context.Context, ci types.CallID) (interface{}, error) { + log.Debugf("worker will unseal piece now, sector=%+v", sector.ID) if err = sb.UnsealPiece(ctx, sector, index, size, randomness, cid); err != nil { return nil, xerrors.Errorf("unsealing sector: %w", err) } @@ -426,18 +434,9 @@ func (l *LocalWorker) UnsealPiece(ctx context.Context, sector storage.SectorRef, return nil, xerrors.Errorf("removing source data: %w", err) } - return nil, nil - }) -} - -func (l *LocalWorker) ReadPiece(ctx context.Context, writer io.Writer, sector storage.SectorRef, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (types.CallID, error) { - sb, err := l.executor() - if err != nil { - return types.UndefCall, err - } + log.Debugf("worker has unsealed piece, sector=%+v", sector.ID) - return l.asyncCall(ctx, sector, types.ReturnReadPiece, func(ctx context.Context, ci types.CallID) (interface{}, error) { - return sb.ReadPiece(ctx, writer, sector, index, size) + return nil, nil }) } @@ -495,7 +494,8 @@ func (l *LocalWorker) Info(context.Context) (storiface.WorkerInfo, error) { } return storiface.WorkerInfo{ - Hostname: hostname, + Hostname: hostname, + IgnoreResources: l.ignoreResources, Resources: storiface.WorkerResources{ MemPhysical: mem.Total, MemSwap: memSwap, diff --git a/sector-storage/worker_tracked.go b/sector-storage/worker_tracked.go index 5f40c5cf..6e61b399 100644 --- a/sector-storage/worker_tracked.go +++ b/sector-storage/worker_tracked.go @@ -2,7 +2,6 @@ package sectorstorage import ( "context" - "io" "sync" "time" @@ -135,8 +134,4 @@ func (t *trackedWorker) UnsealPiece(ctx context.Context, id storage.SectorRef, i return t.tracker.track(ctx, t.wid, t.workerInfo, id, types.TTUnseal)(t.Worker.UnsealPiece(ctx, id, index, size, randomness, cid)) } -func (t *trackedWorker) ReadPiece(ctx context.Context, writer io.Writer, id storage.SectorRef, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (types.CallID, error) { - return t.tracker.track(ctx, t.wid, t.workerInfo, id, types.TTReadUnsealed)(t.Worker.ReadPiece(ctx, writer, id, index, size)) -} - var _ Worker = &trackedWorker{} diff --git a/storage-sealing/commit_batch.go b/storage-sealing/commit_batch.go index 80fe3a4d..ca5bd772 100644 --- a/storage-sealing/commit_batch.go +++ b/storage-sealing/commit_batch.go @@ -33,6 +33,8 @@ const arp = abi.RegisteredAggregationProof_SnarkPackV1 var aggFeeNum = big.NewInt(110) var aggFeeDen = big.NewInt(100) +//go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_commit_batcher.go -package=mocks . CommitBatcherApi + type CommitBatcherApi interface { //for messager MessagerSendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (string, error) @@ -47,9 +49,9 @@ type CommitBatcherApi interface { } type AggregateInput struct { - spt abi.RegisteredSealProof - info proof5.AggregateSealVerifyInfo - proof []byte + Spt abi.RegisteredSealProof + Info proof5.AggregateSealVerifyInfo + Proof []byte } type CommitBatcher struct { @@ -291,7 +293,7 @@ func (b *CommitBatcher) processBatch(cfg sealiface.Config) ([]sealiface.CommitBa collateral = big.Add(collateral, sc) params.SectorNumbers.Set(uint64(id)) - infos = append(infos, p.info) + infos = append(infos, p.Info) } sort.Slice(infos, func(i, j int) bool { @@ -299,7 +301,7 @@ func (b *CommitBatcher) processBatch(cfg sealiface.Config) ([]sealiface.CommitBa }) for _, info := range infos { - proofs = append(proofs, b.todo[info.Number].proof) + proofs = append(proofs, b.todo[info.Number].Proof) } mid, err := address.IDFromAddress(b.maddr) @@ -309,7 +311,7 @@ func (b *CommitBatcher) processBatch(cfg sealiface.Config) ([]sealiface.CommitBa params.AggregateProof, err = b.prover.AggregateSealProofs(proof5.AggregateSealVerifyProofAndInfos{ Miner: abi.ActorID(mid), - SealProof: b.todo[infos[0].Number].spt, + SealProof: b.todo[infos[0].Number].Spt, AggregateProof: arp, Infos: infos, }, proofs) @@ -400,7 +402,7 @@ func (b *CommitBatcher) processSingle(mi miner.MinerInfo, sn abi.SectorNumber, i enc := new(bytes.Buffer) params := &miner.ProveCommitSectorParams{ SectorNumber: sn, - Proof: info.proof, + Proof: info.Proof, } if err := params.MarshalCBOR(enc); err != nil { @@ -485,7 +487,7 @@ func (b *CommitBatcher) Pending(ctx context.Context) ([]abi.SectorID, error) { for _, s := range b.todo { res = append(res, abi.SectorID{ Miner: abi.ActorID(mid), - Number: s.info.Number, + Number: s.Info.Number, }) } diff --git a/storage-sealing/commit_batch_test.go b/storage-sealing/commit_batch_test.go new file mode 100644 index 00000000..edb367c0 --- /dev/null +++ b/storage-sealing/commit_batch_test.go @@ -0,0 +1,392 @@ +package sealing_test + +import ( + "bytes" + "context" + "github.com/filecoin-project/venus/pkg/specactors/policy" + "sort" + "sync" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" + + "github.com/filecoin-project/venus/pkg/types" + + "github.com/filecoin-project/venus-sealer/api" + "github.com/filecoin-project/venus-sealer/config" + "github.com/filecoin-project/venus-sealer/sector-storage/ffiwrapper" + sealing "github.com/filecoin-project/venus-sealer/storage-sealing" + "github.com/filecoin-project/venus-sealer/storage-sealing/mocks" + "github.com/filecoin-project/venus-sealer/storage-sealing/sealiface" + types2 "github.com/filecoin-project/venus-sealer/types" + "github.com/filecoin-project/venus/pkg/specactors/builtin/miner" +) + +func TestCommitBatcher(t *testing.T) { + t0123, err := address.NewFromString("t0123") + require.NoError(t, err) + + ctx := context.Background() + + as := func(ctx context.Context, mi miner.MinerInfo, use api.AddrUse, goodFunds, minFunds abi.TokenAmount) (address.Address, abi.TokenAmount, error) { + return t0123, big.Zero(), nil + } + + maxBatch := miner5.MaxAggregatedSectors + minBatch := miner5.MinAggregatedSectors + + cfg := func() (sealiface.Config, error) { + return sealiface.Config{ + MaxWaitDealsSectors: 2, + MaxSealingSectors: 0, + MaxSealingSectorsForDeals: 0, + WaitDealsDelay: time.Hour * 6, + AlwaysKeepUnsealedCopy: true, + + BatchPreCommits: true, + MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize, + PreCommitBatchWait: 24 * time.Hour, + PreCommitBatchSlack: 3 * time.Hour, + + AggregateCommits: true, + MinCommitBatch: minBatch, + MaxCommitBatch: maxBatch, + CommitBatchWait: 24 * time.Hour, + CommitBatchSlack: 1 * time.Hour, + + AggregateAboveBaseFee: types.BigMul(types.PicoFil, types.NewInt(150)), // 0.15 nFIL + + TerminateBatchMin: 1, + TerminateBatchMax: 100, + TerminateBatchWait: 5 * time.Minute, + }, nil + } + + type promise func(t *testing.T) + type action func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *sealing.CommitBatcher) promise + + actions := func(as ...action) action { + return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *sealing.CommitBatcher) promise { + var ps []promise + for _, a := range as { + p := a(t, s, pcb) + if p != nil { + ps = append(ps, p) + } + } + + if len(ps) > 0 { + return func(t *testing.T) { + for _, p := range ps { + p(t) + } + } + } + return nil + } + } + + addSector := func(sn abi.SectorNumber) action { + return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *sealing.CommitBatcher) promise { + var pcres sealiface.CommitBatchRes + var pcerr error + done := sync.Mutex{} + done.Lock() + + si := types2.SectorInfo{ + SectorNumber: sn, + } + + s.EXPECT().ChainHead(gomock.Any()).Return(nil, abi.ChainEpoch(1), nil) + s.EXPECT().StateNetworkVersion(gomock.Any(), gomock.Any()).Return(network.Version13, nil) + s.EXPECT().StateSectorPreCommitInfo(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&miner.SectorPreCommitOnChainInfo{ + PreCommitDeposit: big.Zero(), + }, nil) + + go func() { + defer done.Unlock() + pcres, pcerr = pcb.AddCommit(ctx, si, sealing.AggregateInput{ + Info: proof5.AggregateSealVerifyInfo{ + Number: sn, + }, + }) + }() + + return func(t *testing.T) { + done.Lock() + require.NoError(t, pcerr) + require.Empty(t, pcres.Error) + require.Contains(t, pcres.Sectors, si.SectorNumber) + } + } + } + + addSectors := func(sectors []abi.SectorNumber) action { + as := make([]action, len(sectors)) + for i, sector := range sectors { + as[i] = addSector(sector) + } + return actions(as...) + } + + waitPending := func(n int) action { + return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *sealing.CommitBatcher) promise { + require.Eventually(t, func() bool { + p, err := pcb.Pending(ctx) + require.NoError(t, err) + return len(p) == n + }, time.Second*5, 10*time.Millisecond) + + return nil + } + } + + expectSend := func(expect []abi.SectorNumber, aboveBalancer, failOnePCI bool) action { + return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *sealing.CommitBatcher) promise { + s.EXPECT().StateMinerInfo(gomock.Any(), gomock.Any(), gomock.Any()).Return(miner.MinerInfo{Owner: t0123, Worker: t0123}, nil) + + ti := len(expect) + batch := false + if ti >= minBatch { + batch = true + ti = 1 + } + + basefee := types.PicoFil + if aboveBalancer { + basefee = types.NanoFil + } + + if batch { + s.EXPECT().ChainHead(gomock.Any()).Return(nil, abi.ChainEpoch(1), nil) + s.EXPECT().ChainBaseFee(gomock.Any(), gomock.Any()).Return(basefee, nil) + } + + if !aboveBalancer { + batch = false + ti = len(expect) + } + + s.EXPECT().ChainHead(gomock.Any()).Return(nil, abi.ChainEpoch(1), nil) + + pciC := len(expect) + if failOnePCI { + s.EXPECT().StateSectorPreCommitInfo(gomock.Any(), gomock.Any(), abi.SectorNumber(1), gomock.Any()).Return(nil, nil).Times(1) // not found + pciC = len(expect) - 1 + if !batch { + ti-- + } + } + s.EXPECT().StateSectorPreCommitInfo(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&miner.SectorPreCommitOnChainInfo{ + PreCommitDeposit: big.Zero(), + }, nil).Times(pciC) + s.EXPECT().StateMinerInitialPledgeCollateral(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(big.Zero(), nil).Times(pciC) + + if batch { + s.EXPECT().StateNetworkVersion(gomock.Any(), gomock.Any()).Return(network.Version13, nil) + s.EXPECT().ChainBaseFee(gomock.Any(), gomock.Any()).Return(basefee, nil) + } + + s.EXPECT().SendMsg(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), funMatcher(func(i interface{}) bool { + b := i.([]byte) + if batch { + var params miner5.ProveCommitAggregateParams + require.NoError(t, params.UnmarshalCBOR(bytes.NewReader(b))) + for _, number := range expect { + set, err := params.SectorNumbers.IsSet(uint64(number)) + require.NoError(t, err) + require.True(t, set) + } + } else { + var params miner5.ProveCommitSectorParams + require.NoError(t, params.UnmarshalCBOR(bytes.NewReader(b))) + } + return true + })).Times(ti) + return nil + } + } + + flush := func(expect []abi.SectorNumber, aboveBalancer, failOnePCI bool) action { + return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *sealing.CommitBatcher) promise { + _ = expectSend(expect, aboveBalancer, failOnePCI)(t, s, pcb) + + batch := len(expect) >= minBatch && aboveBalancer + + r, err := pcb.Flush(ctx) + require.NoError(t, err) + if batch { + require.Len(t, r, 1) + require.Empty(t, r[0].Error) + sort.Slice(r[0].Sectors, func(i, j int) bool { + return r[0].Sectors[i] < r[0].Sectors[j] + }) + require.Equal(t, expect, r[0].Sectors) + if !failOnePCI { + require.Len(t, r[0].FailedSectors, 0) + } else { + require.Len(t, r[0].FailedSectors, 1) + _, found := r[0].FailedSectors[1] + require.True(t, found) + } + } else { + require.Len(t, r, len(expect)) + for _, res := range r { + require.Len(t, res.Sectors, 1) + require.Empty(t, res.Error) + } + sort.Slice(r, func(i, j int) bool { + return r[i].Sectors[0] < r[j].Sectors[0] + }) + for i, res := range r { + require.Equal(t, abi.SectorNumber(i), res.Sectors[0]) + if failOnePCI && res.Sectors[0] == 1 { + require.Len(t, res.FailedSectors, 1) + _, found := res.FailedSectors[1] + require.True(t, found) + } else { + require.Empty(t, res.FailedSectors) + } + } + } + + return nil + } + } + + getSectors := func(n int) []abi.SectorNumber { + out := make([]abi.SectorNumber, n) + for i := range out { + out[i] = abi.SectorNumber(i) + } + return out + } + + tcs := map[string]struct { + actions []action + }{ + "addSingle-aboveBalancer": { + actions: []action{ + addSector(0), + waitPending(1), + flush([]abi.SectorNumber{0}, true, false), + }, + }, + "addTwo-aboveBalancer": { + actions: []action{ + addSectors(getSectors(2)), + waitPending(2), + flush(getSectors(2), true, false), + }, + }, + "addAte-aboveBalancer": { + actions: []action{ + addSectors(getSectors(8)), + waitPending(8), + flush(getSectors(8), true, false), + }, + }, + "addMax-aboveBalancer": { + actions: []action{ + expectSend(getSectors(maxBatch), true, false), + addSectors(getSectors(maxBatch)), + }, + }, + "addSingle-belowBalancer": { + actions: []action{ + addSector(0), + waitPending(1), + flush([]abi.SectorNumber{0}, false, false), + }, + }, + "addTwo-belowBalancer": { + actions: []action{ + addSectors(getSectors(2)), + waitPending(2), + flush(getSectors(2), false, false), + }, + }, + "addAte-belowBalancer": { + actions: []action{ + addSectors(getSectors(8)), + waitPending(8), + flush(getSectors(8), false, false), + }, + }, + "addMax-belowBalancer": { + actions: []action{ + expectSend(getSectors(maxBatch), false, false), + addSectors(getSectors(maxBatch)), + }, + }, + + "addAte-aboveBalancer-failOne": { + actions: []action{ + addSectors(getSectors(8)), + waitPending(8), + flush(getSectors(8), true, true), + }, + }, + "addAte-belowBalancer-failOne": { + actions: []action{ + addSectors(getSectors(8)), + waitPending(8), + flush(getSectors(8), false, true), + }, + }, + } + + for name, tc := range tcs { + tc := tc + + t.Run(name, func(t *testing.T) { + // create go mock controller here + mockCtrl := gomock.NewController(t) + // when test is done, assert expectations on all mock objects. + defer mockCtrl.Finish() + + // create them mocks + pcapi := mocks.NewMockCommitBatcherApi(mockCtrl) + + pcb := sealing.NewCommitBatcher(ctx, &config.NetParamsConfig{ + UpgradeIgnitionHeight: 94000, + ForkLengthThreshold: policy.ChainFinality, + InsecurePoStValidation: false, + BlockDelaySecs: 30, + }, t0123, pcapi, as, fc, cfg, &fakeProver{}) + + var promises []promise + + for _, a := range tc.actions { + p := a(t, pcapi, pcb) + if p != nil { + promises = append(promises, p) + } + } + + for _, p := range promises { + p(t) + } + + err := pcb.Stop(ctx) + require.NoError(t, err) + }) + } +} + +type fakeProver struct{} + +func (f fakeProver) AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) { + return []byte("Trust me, I'm a proof"), nil +} + +var _ ffiwrapper.Prover = &fakeProver{} diff --git a/storage-sealing/fsm.go b/storage-sealing/fsm.go index ee531e2b..373e96b6 100644 --- a/storage-sealing/fsm.go +++ b/storage-sealing/fsm.go @@ -159,7 +159,7 @@ var fsmPlanners = map[types.SectorState]func(events []statemachine.Event, state on(SectorSealPreCommit1Failed{}, types.SealPreCommit1Failed), ), types.CommitFinalizeFailed: planOne( - on(SectorRetryFinalize{}, types.CommitFinalizeFailed), + on(SectorRetryFinalize{}, types.CommitFinalize), ), types.CommitFailed: planOne( on(SectorSealPreCommit1Failed{}, types.SealPreCommit1Failed), @@ -478,15 +478,16 @@ func (m *Sealing) onUpdateSector(ctx context.Context, state *types.SectorInfo) e if err != nil { return xerrors.Errorf("getting config: %w", err) } - sp, err := m.currentSealProof(ctx) - if err != nil { - return xerrors.Errorf("getting seal proof type: %w", err) - } shouldUpdateInput := m.stats.UpdateSector(cfg, m.minerSectorID(state.SectorNumber), state.State) // trigger more input processing when we've dipped below max sealing limits if shouldUpdateInput { + sp, err := m.currentSealProof(ctx) + if err != nil { + return xerrors.Errorf("getting seal proof type: %w", err) + } + go func() { m.inputLk.Lock() defer m.inputLk.Unlock() diff --git a/storage-sealing/fsm_test.go b/storage-sealing/fsm_test.go index 5c6a4fd2..64a30d19 100644 --- a/storage-sealing/fsm_test.go +++ b/storage-sealing/fsm_test.go @@ -1,7 +1,7 @@ package sealing import ( - "github.com/filecoin-project/venus-sealer/types" + "testing" "github.com/filecoin-project/go-address" @@ -10,6 +10,8 @@ import ( "github.com/stretchr/testify/require" "github.com/filecoin-project/go-statemachine" + + "github.com/filecoin-project/venus-sealer/types" ) func init() { @@ -155,6 +157,45 @@ func TestHappyPathFinalizeEarly(t *testing.T) { } } +func TestCommitFinalizeFailed(t *testing.T) { + var notif []struct{ before, after types.SectorInfo } + ma, _ := address.NewIDAddress(55151) + m := test{ + s: &Sealing{ + maddr: ma, + stats: types.SectorStats{ + BySector: map[abi.SectorID]types.StatSectorState{}, + }, + notifee: func(before, after types.SectorInfo) { + notif = append(notif, struct{ before, after types.SectorInfo }{before, after}) + }, + }, + t: t, + state: &types.SectorInfo{State: types.Committing}, + } + + m.planSingle(SectorProofReady{}) + require.Equal(m.t, m.state.State, types.CommitFinalize) + + m.planSingle(SectorFinalizeFailed{}) + require.Equal(m.t, m.state.State, types.CommitFinalizeFailed) + + m.planSingle(SectorRetryFinalize{}) + require.Equal(m.t, m.state.State, types.CommitFinalize) + + m.planSingle(SectorFinalized{}) + require.Equal(m.t, m.state.State, types.SubmitCommit) + + expected := []types.SectorState{types.Committing, types.CommitFinalize, types.CommitFinalizeFailed, types.CommitFinalize, types.SubmitCommit} + for i, n := range notif { + if n.before.State != expected[i] { + t.Fatalf("expected before state: %s, got: %s", expected[i], n.before.State) + } + if n.after.State != expected[i+1] { + t.Fatalf("expected after state: %s, got: %s", expected[i+1], n.after.State) + } + } +} func TestSeedRevert(t *testing.T) { ma, _ := address.NewIDAddress(55151) m := test{ @@ -278,3 +319,43 @@ func TestBrokenState(t *testing.T) { } } } + +func TestTicketExpired(t *testing.T) { + var notif []struct{ before, after types.SectorInfo } + ma, _ := address.NewIDAddress(55151) + m := test{ + s: &Sealing{ + maddr: ma, + stats: types.SectorStats{ + BySector: map[abi.SectorID]types.StatSectorState{}, + }, + notifee: func(before, after types.SectorInfo) { + notif = append(notif, struct{ before, after types.SectorInfo }{before, after}) + }, + }, + t: t, + state: &types.SectorInfo{State: types.Packing}, + } + + m.planSingle(SectorPacked{}) + require.Equal(m.t, m.state.State, types.GetTicket) + + m.planSingle(SectorTicket{}) + require.Equal(m.t, m.state.State, types.PreCommit1) + + expired := checkTicketExpired(0, types.MaxTicketAge+1) + require.True(t, expired) + + m.planSingle(SectorOldTicket{}) + require.Equal(m.t, m.state.State, types.GetTicket) + + expected := []types.SectorState{types.Packing, types.GetTicket, types.PreCommit1, types.GetTicket} + for i, n := range notif { + if n.before.State != expected[i] { + t.Fatalf("expected before state: %s, got: %s", expected[i], n.before.State) + } + if n.after.State != expected[i+1] { + t.Fatalf("expected after state: %s, got: %s", expected[i+1], n.after.State) + } + } +} diff --git a/storage-sealing/mocks/mock_commit_batcher.go b/storage-sealing/mocks/mock_commit_batcher.go new file mode 100644 index 00000000..630a3e53 --- /dev/null +++ b/storage-sealing/mocks/mock_commit_batcher.go @@ -0,0 +1,164 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/filecoin-project/lotus/extern/storage-sealing (interfaces: CommitBatcherApi) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + address "github.com/filecoin-project/go-address" + abi "github.com/filecoin-project/go-state-types/abi" + big "github.com/filecoin-project/go-state-types/big" + network "github.com/filecoin-project/go-state-types/network" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + gomock "github.com/golang/mock/gomock" + cid "github.com/ipfs/go-cid" + + miner "github.com/filecoin-project/venus/pkg/specactors/builtin/miner" + sealing "github.com/filecoin-project/venus-sealer/storage-sealing" +) + +// MockCommitBatcherApi is a mock of CommitBatcherApi interface. +type MockCommitBatcherApi struct { + ctrl *gomock.Controller + recorder *MockCommitBatcherApiMockRecorder +} + +// MockCommitBatcherApiMockRecorder is the mock recorder for MockCommitBatcherApi. +type MockCommitBatcherApiMockRecorder struct { + mock *MockCommitBatcherApi +} + +// NewMockCommitBatcherApi creates a new mock instance. +func NewMockCommitBatcherApi(ctrl *gomock.Controller) *MockCommitBatcherApi { + mock := &MockCommitBatcherApi{ctrl: ctrl} + mock.recorder = &MockCommitBatcherApiMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockCommitBatcherApi) EXPECT() *MockCommitBatcherApiMockRecorder { + return m.recorder +} + +// ChainBaseFee mocks base method. +func (m *MockCommitBatcherApi) ChainBaseFee(arg0 context.Context, arg1 sealing.TipSetToken) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainBaseFee", arg0, arg1) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainBaseFee indicates an expected call of ChainBaseFee. +func (mr *MockCommitBatcherApiMockRecorder) ChainBaseFee(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainBaseFee", reflect.TypeOf((*MockCommitBatcherApi)(nil).ChainBaseFee), arg0, arg1) +} + +// ChainHead mocks base method. +func (m *MockCommitBatcherApi) ChainHead(arg0 context.Context) (sealing.TipSetToken, abi.ChainEpoch, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainHead", arg0) + ret0, _ := ret[0].(sealing.TipSetToken) + ret1, _ := ret[1].(abi.ChainEpoch) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// ChainHead indicates an expected call of ChainHead. +func (mr *MockCommitBatcherApiMockRecorder) ChainHead(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHead", reflect.TypeOf((*MockCommitBatcherApi)(nil).ChainHead), arg0) +} + +// SendMsg mocks base method. +func (m *MockCommitBatcherApi) SendMsg(arg0 context.Context, arg1, arg2 address.Address, arg3 abi.MethodNum, arg4, arg5 big.Int, arg6 []byte) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendMsg", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 + +} +// SendMsg indicates an expected call of SendMsg. +func (mr *MockCommitBatcherApiMockRecorder) SendMsg(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockCommitBatcherApi)(nil).SendMsg), arg0, arg1, arg2, arg3, arg4, arg5, arg6) +} + +func (s MockCommitBatcherApi) MessagerSendMsg(arg0 context.Context, arg1, arg2 address.Address, arg3 abi.MethodNum, arg4, arg5 abi.TokenAmount, arg6 []byte) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MessagerSendMsg", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MessagerSendMsg indicates an expected call of MessagerSendMsg. +func (mr *MockCommitBatcherApiMockRecorder) MessagerSendMsg(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MessagerSendMsg", reflect.TypeOf((*MockCommitBatcherApi)(nil).MessagerSendMsg), arg0, arg1, arg2, arg3, arg4, arg5, arg6) +} + +// StateMinerInfo mocks base method. +func (m *MockCommitBatcherApi) StateMinerInfo(arg0 context.Context, arg1 address.Address, arg2 sealing.TipSetToken) (miner.MinerInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerInfo", arg0, arg1, arg2) + ret0, _ := ret[0].(miner.MinerInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerInfo indicates an expected call of StateMinerInfo. +func (mr *MockCommitBatcherApiMockRecorder) StateMinerInfo(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerInfo", reflect.TypeOf((*MockCommitBatcherApi)(nil).StateMinerInfo), arg0, arg1, arg2) +} + +// StateMinerInitialPledgeCollateral mocks base method. +func (m *MockCommitBatcherApi) StateMinerInitialPledgeCollateral(arg0 context.Context, arg1 address.Address, arg2 miner0.SectorPreCommitInfo, arg3 sealing.TipSetToken) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerInitialPledgeCollateral", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerInitialPledgeCollateral indicates an expected call of StateMinerInitialPledgeCollateral. +func (mr *MockCommitBatcherApiMockRecorder) StateMinerInitialPledgeCollateral(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerInitialPledgeCollateral", reflect.TypeOf((*MockCommitBatcherApi)(nil).StateMinerInitialPledgeCollateral), arg0, arg1, arg2, arg3) +} + +// StateNetworkVersion mocks base method. +func (m *MockCommitBatcherApi) StateNetworkVersion(arg0 context.Context, arg1 sealing.TipSetToken) (network.Version, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateNetworkVersion", arg0, arg1) + ret0, _ := ret[0].(network.Version) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateNetworkVersion indicates an expected call of StateNetworkVersion. +func (mr *MockCommitBatcherApiMockRecorder) StateNetworkVersion(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateNetworkVersion", reflect.TypeOf((*MockCommitBatcherApi)(nil).StateNetworkVersion), arg0, arg1) +} + +// StateSectorPreCommitInfo mocks base method. +func (m *MockCommitBatcherApi) StateSectorPreCommitInfo(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 sealing.TipSetToken) (*miner.SectorPreCommitOnChainInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateSectorPreCommitInfo", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*miner.SectorPreCommitOnChainInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateSectorPreCommitInfo indicates an expected call of StateSectorPreCommitInfo. +func (mr *MockCommitBatcherApiMockRecorder) StateSectorPreCommitInfo(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorPreCommitInfo", reflect.TypeOf((*MockCommitBatcherApi)(nil).StateSectorPreCommitInfo), arg0, arg1, arg2, arg3) +} diff --git a/storage-sealing/mocks/mock_precommit_batcher.go b/storage-sealing/mocks/mock_precommit_batcher.go new file mode 100644 index 00000000..71ff8337 --- /dev/null +++ b/storage-sealing/mocks/mock_precommit_batcher.go @@ -0,0 +1,102 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/filecoin-project/lotus/extern/storage-sealing (interfaces: PreCommitBatcherApi) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + address "github.com/filecoin-project/go-address" + abi "github.com/filecoin-project/go-state-types/abi" + big "github.com/filecoin-project/go-state-types/big" + gomock "github.com/golang/mock/gomock" + cid "github.com/ipfs/go-cid" + + miner "github.com/filecoin-project/venus/pkg/specactors/builtin/miner" + sealing "github.com/filecoin-project/venus-sealer/storage-sealing" +) + +// MockPreCommitBatcherApi is a mock of PreCommitBatcherApi interface. +type MockPreCommitBatcherApi struct { + ctrl *gomock.Controller + recorder *MockPreCommitBatcherApiMockRecorder +} + +// MockPreCommitBatcherApiMockRecorder is the mock recorder for MockPreCommitBatcherApi. +type MockPreCommitBatcherApiMockRecorder struct { + mock *MockPreCommitBatcherApi +} + +// NewMockPreCommitBatcherApi creates a new mock instance. +func NewMockPreCommitBatcherApi(ctrl *gomock.Controller) *MockPreCommitBatcherApi { + mock := &MockPreCommitBatcherApi{ctrl: ctrl} + mock.recorder = &MockPreCommitBatcherApiMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockPreCommitBatcherApi) EXPECT() *MockPreCommitBatcherApiMockRecorder { + return m.recorder +} + +// ChainHead mocks base method. +func (m *MockPreCommitBatcherApi) ChainHead(arg0 context.Context) (sealing.TipSetToken, abi.ChainEpoch, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainHead", arg0) + ret0, _ := ret[0].(sealing.TipSetToken) + ret1, _ := ret[1].(abi.ChainEpoch) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// ChainHead indicates an expected call of ChainHead. +func (mr *MockPreCommitBatcherApiMockRecorder) ChainHead(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHead", reflect.TypeOf((*MockPreCommitBatcherApi)(nil).ChainHead), arg0) +} + +// SendMsg mocks base method. +func (m *MockPreCommitBatcherApi) SendMsg(arg0 context.Context, arg1, arg2 address.Address, arg3 abi.MethodNum, arg4, arg5 big.Int, arg6 []byte) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendMsg", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SendMsg indicates an expected call of SendMsg. +func (mr *MockPreCommitBatcherApiMockRecorder) SendMsg(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockPreCommitBatcherApi)(nil).SendMsg), arg0, arg1, arg2, arg3, arg4, arg5, arg6) +} + +func (s MockPreCommitBatcherApi) MessagerSendMsg(arg0 context.Context, arg1, arg2 address.Address, arg3 abi.MethodNum, arg4, arg5 abi.TokenAmount, arg6 []byte) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MessagerSendMsg", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MessagerSendMsg indicates an expected call of MessagerSendMsg. +func (mr *MockPreCommitBatcherApiMockRecorder) MessagerSendMsg(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MessagerSendMsg", reflect.TypeOf((*MockPreCommitBatcherApi)(nil).MessagerSendMsg), arg0, arg1, arg2, arg3, arg4, arg5, arg6) +} + +// StateMinerInfo mocks base method. +func (m *MockPreCommitBatcherApi) StateMinerInfo(arg0 context.Context, arg1 address.Address, arg2 sealing.TipSetToken) (miner.MinerInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerInfo", arg0, arg1, arg2) + ret0, _ := ret[0].(miner.MinerInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerInfo indicates an expected call of StateMinerInfo. +func (mr *MockPreCommitBatcherApiMockRecorder) StateMinerInfo(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerInfo", reflect.TypeOf((*MockPreCommitBatcherApi)(nil).StateMinerInfo), arg0, arg1, arg2) +} diff --git a/storage-sealing/precommit_batch.go b/storage-sealing/precommit_batch.go index 88803d47..6d924c1e 100644 --- a/storage-sealing/precommit_batch.go +++ b/storage-sealing/precommit_batch.go @@ -24,6 +24,8 @@ import ( "github.com/filecoin-project/venus-sealer/types" ) +//go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_precommit_batcher.go -package=mocks . PreCommitBatcherApi + type PreCommitBatcherApi interface { //for messager MessagerSendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (string, error) @@ -254,7 +256,7 @@ func (b *PreCommitBatcher) processBatch(cfg sealiface.Config) ([]sealiface.PreCo res.Msg = uid - log.Infow("Sent ProveCommitAggregate message", "uid", uid, "from", from, "sectors", len(b.todo)) + log.Infow("Sent PreCommitSectorBatch message", "uid", uid, "from", from, "sectors", len(b.todo)) return []sealiface.PreCommitBatchRes{res}, nil } diff --git a/storage-sealing/precommit_batch_test.go b/storage-sealing/precommit_batch_test.go new file mode 100644 index 00000000..0f55b2a5 --- /dev/null +++ b/storage-sealing/precommit_batch_test.go @@ -0,0 +1,265 @@ +package sealing_test + +import ( + "bytes" + "context" + "github.com/filecoin-project/venus/pkg/specactors/policy" + "sort" + "sync" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + + "github.com/filecoin-project/venus/pkg/types" + + "github.com/filecoin-project/venus-sealer/api" + "github.com/filecoin-project/venus-sealer/config" + sealing "github.com/filecoin-project/venus-sealer/storage-sealing" + "github.com/filecoin-project/venus-sealer/storage-sealing/mocks" + "github.com/filecoin-project/venus-sealer/storage-sealing/sealiface" + types2 "github.com/filecoin-project/venus-sealer/types" + "github.com/filecoin-project/venus/pkg/specactors/builtin/miner" +) + +var fc = config.MinerFeeConfig{ + MaxPreCommitGasFee: types.FIL(types.FromFil(1)), + MaxCommitGasFee: types.FIL(types.FromFil(1)), + MaxTerminateGasFee: types.FIL(types.FromFil(1)), + MaxPreCommitBatchGasFee: config.BatchFeeConfig{Base: types.FIL(types.FromFil(3)), PerSector: types.FIL(types.FromFil(1))}, + MaxCommitBatchGasFee: config.BatchFeeConfig{Base: types.FIL(types.FromFil(3)), PerSector: types.FIL(types.FromFil(1))}, +} + +func TestPrecommitBatcher(t *testing.T) { + t0123, err := address.NewFromString("t0123") + require.NoError(t, err) + + ctx := context.Background() + + as := func(ctx context.Context, mi miner.MinerInfo, use api.AddrUse, goodFunds, minFunds abi.TokenAmount) (address.Address, abi.TokenAmount, error) { + return t0123, big.Zero(), nil + } + + maxBatch := miner5.PreCommitSectorBatchMaxSize + + cfg := func() (sealiface.Config, error) { + return sealiface.Config{ + MaxWaitDealsSectors: 2, + MaxSealingSectors: 0, + MaxSealingSectorsForDeals: 0, + WaitDealsDelay: time.Hour * 6, + AlwaysKeepUnsealedCopy: true, + + BatchPreCommits: true, + MaxPreCommitBatch: maxBatch, + PreCommitBatchWait: 24 * time.Hour, + PreCommitBatchSlack: 3 * time.Hour, + + AggregateCommits: true, + MinCommitBatch: miner5.MinAggregatedSectors, + MaxCommitBatch: miner5.MaxAggregatedSectors, + CommitBatchWait: 24 * time.Hour, + CommitBatchSlack: 1 * time.Hour, + + TerminateBatchMin: 1, + TerminateBatchMax: 100, + TerminateBatchWait: 5 * time.Minute, + }, nil + } + + type promise func(t *testing.T) + type action func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *sealing.PreCommitBatcher) promise + + actions := func(as ...action) action { + return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *sealing.PreCommitBatcher) promise { + var ps []promise + for _, a := range as { + p := a(t, s, pcb) + if p != nil { + ps = append(ps, p) + } + } + + if len(ps) > 0 { + return func(t *testing.T) { + for _, p := range ps { + p(t) + } + } + } + return nil + } + } + + addSector := func(sn abi.SectorNumber) action { + return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *sealing.PreCommitBatcher) promise { + var pcres sealiface.PreCommitBatchRes + var pcerr error + done := sync.Mutex{} + done.Lock() + + si := types2.SectorInfo{ + SectorNumber: sn, + } + + s.EXPECT().ChainHead(gomock.Any()).Return(nil, abi.ChainEpoch(1), nil) + + go func() { + defer done.Unlock() + pcres, pcerr = pcb.AddPreCommit(ctx, si, big.Zero(), &miner0.SectorPreCommitInfo{ + SectorNumber: si.SectorNumber, + SealedCID: fakePieceCid(t), + DealIDs: nil, + Expiration: 0, + }) + }() + + return func(t *testing.T) { + done.Lock() + require.NoError(t, pcerr) + require.Empty(t, pcres.Error) + require.Contains(t, pcres.Sectors, si.SectorNumber) + } + } + } + + addSectors := func(sectors []abi.SectorNumber) action { + as := make([]action, len(sectors)) + for i, sector := range sectors { + as[i] = addSector(sector) + } + return actions(as...) + } + + waitPending := func(n int) action { + return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *sealing.PreCommitBatcher) promise { + require.Eventually(t, func() bool { + p, err := pcb.Pending(ctx) + require.NoError(t, err) + return len(p) == n + }, time.Second*5, 10*time.Millisecond) + + return nil + } + } + + expectSend := func(expect []abi.SectorNumber) action { + return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *sealing.PreCommitBatcher) promise { + s.EXPECT().StateMinerInfo(gomock.Any(), gomock.Any(), gomock.Any()).Return(miner.MinerInfo{Owner: t0123, Worker: t0123}, nil) + s.EXPECT().SendMsg(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), funMatcher(func(i interface{}) bool { + b := i.([]byte) + var params miner5.PreCommitSectorBatchParams + require.NoError(t, params.UnmarshalCBOR(bytes.NewReader(b))) + for s, number := range expect { + require.Equal(t, number, params.Sectors[s].SectorNumber) + } + return true + })) + return nil + } + } + + flush := func(expect []abi.SectorNumber) action { + return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *sealing.PreCommitBatcher) promise { + _ = expectSend(expect)(t, s, pcb) + + r, err := pcb.Flush(ctx) + require.NoError(t, err) + require.Len(t, r, 1) + require.Empty(t, r[0].Error) + sort.Slice(r[0].Sectors, func(i, j int) bool { + return r[0].Sectors[i] < r[0].Sectors[j] + }) + require.Equal(t, expect, r[0].Sectors) + + return nil + } + } + + getSectors := func(n int) []abi.SectorNumber { + out := make([]abi.SectorNumber, n) + for i := range out { + out[i] = abi.SectorNumber(i) + } + return out + } + + tcs := map[string]struct { + actions []action + }{ + "addSingle": { + actions: []action{ + addSector(0), + waitPending(1), + flush([]abi.SectorNumber{0}), + }, + }, + "addTwo": { + actions: []action{ + addSectors(getSectors(2)), + waitPending(2), + flush(getSectors(2)), + }, + }, + "addMax": { + actions: []action{ + expectSend(getSectors(maxBatch)), + addSectors(getSectors(maxBatch)), + }, + }, + } + + for name, tc := range tcs { + tc := tc + + t.Run(name, func(t *testing.T) { + // create go mock controller here + mockCtrl := gomock.NewController(t) + // when test is done, assert expectations on all mock objects. + defer mockCtrl.Finish() + + // create them mocks + pcapi := mocks.NewMockPreCommitBatcherApi(mockCtrl) + + pcb := sealing.NewPreCommitBatcher(ctx, &config.NetParamsConfig{ + UpgradeIgnitionHeight: 94000, + ForkLengthThreshold: policy.ChainFinality, + InsecurePoStValidation: false, + BlockDelaySecs: 30, + }, t0123, pcapi, as, fc, cfg) + + var promises []promise + + for _, a := range tc.actions { + p := a(t, pcapi, pcb) + if p != nil { + promises = append(promises, p) + } + } + + for _, p := range promises { + p(t) + } + + err := pcb.Stop(ctx) + require.NoError(t, err) + }) + } +} + +type funMatcher func(interface{}) bool + +func (funMatcher) Matches(interface{}) bool { + return true +} + +func (funMatcher) String() string { + return "fun" +} diff --git a/storage-sealing/precommit_policy.go b/storage-sealing/precommit_policy.go index e0e7f34d..67bbbea6 100644 --- a/storage-sealing/precommit_policy.go +++ b/storage-sealing/precommit_policy.go @@ -41,7 +41,10 @@ type BasicPreCommitPolicy struct { duration abi.ChainEpoch } -// NewBasicPreCommitPolicy produces a BasicPreCommitPolicy +// NewBasicPreCommitPolicy produces a BasicPreCommitPolicy. +// +// The provided duration is used as the default sector expiry when the sector +// contains no deals. The proving boundary is used to adjust/align the sector's expiration. func NewBasicPreCommitPolicy(api Chain, duration abi.ChainEpoch, provingBoundary abi.ChainEpoch) BasicPreCommitPolicy { return BasicPreCommitPolicy{ api: api, diff --git a/storage-sealing/states_sealing.go b/storage-sealing/states_sealing.go index 9f838c81..33d88bac 100644 --- a/storage-sealing/states_sealing.go +++ b/storage-sealing/states_sealing.go @@ -101,8 +101,8 @@ func (m *Sealing) padSector(ctx context.Context, sectorID storage.SectorRef, exi return out, nil } -func checkTicketExpired(sector types.SectorInfo, epoch abi.ChainEpoch) bool { - return epoch-sector.TicketEpoch > types.MaxTicketAge // TODO: allow configuring expected seal durations +func checkTicketExpired(ticket, head abi.ChainEpoch) bool { + return head-ticket > types.MaxTicketAge // TODO: allow configuring expected seal durations } func checkProveCommitExpired(preCommitEpoch, msd abi.ChainEpoch, currEpoch abi.ChainEpoch) bool { @@ -205,7 +205,7 @@ func (m *Sealing) handlePreCommit1(ctx statemachine.Context, sector types.Sector return nil } - if checkTicketExpired(sector, height) { + if checkTicketExpired(sector.TicketEpoch, height) { pci, err := m.api.StateSectorPreCommitInfo(ctx.Context(), m.maddr, sector.SectorNumber, tok) if err != nil { log.Errorf("handlePreCommit1: StateSectorPreCommitInfo: api error, not proceeding: %+v", err) @@ -312,7 +312,7 @@ func (m *Sealing) preCommitParams(ctx statemachine.Context, sector types.SectorI msd := policy.GetMaxProveCommitDuration(actors.VersionForNetwork(nv), sector.SectorType) - if minExpiration := height + msd + miner.MinSectorExpiration + 10; expiration < minExpiration { + if minExpiration := sector.TicketEpoch + policy.MaxPreCommitRandomnessLookback + msd + miner.MinSectorExpiration; expiration < minExpiration { expiration = minExpiration } // TODO: enforce a reasonable _maximum_ sector lifetime? @@ -655,15 +655,15 @@ func (m *Sealing) handleSubmitCommitAggregate(ctx statemachine.Context, sector t } res, err := m.commiter.AddCommit(ctx.Context(), sector, AggregateInput{ - info: proof.AggregateSealVerifyInfo{ + Info: proof.AggregateSealVerifyInfo{ Number: sector.SectorNumber, Randomness: sector.TicketValue, InteractiveRandomness: sector.SeedValue, SealedCID: *sector.CommR, UnsealedCID: *sector.CommD, }, - proof: sector.Proof, // todo: this correct?? - spt: sector.SectorType, + Proof: sector.Proof, // todo: this correct?? + Spt: sector.SectorType, }) if err != nil { return ctx.Send(SectorRetrySubmitCommit{}) diff --git a/storage/adapter_storage_miner.go b/storage/adapter_storage_miner.go index 12c45a94..82b745ec 100644 --- a/storage/adapter_storage_miner.go +++ b/storage/adapter_storage_miner.go @@ -37,11 +37,11 @@ import ( var _ sealing.SealingAPI = new(SealingAPIAdapter) type SealingAPIAdapter struct { - delegate storageMinerApi + delegate fullNodeFilteredAPI messager api.IMessager } -func NewSealingAPIAdapter(api storageMinerApi, messager api.IMessager) SealingAPIAdapter { +func NewSealingAPIAdapter(api fullNodeFilteredAPI, messager api.IMessager) SealingAPIAdapter { return SealingAPIAdapter{delegate: api, messager: messager} } diff --git a/storage/miner.go b/storage/miner.go index 3cab9a84..9b8f3e23 100644 --- a/storage/miner.go +++ b/storage/miner.go @@ -40,6 +40,14 @@ import ( var log = logging.Logger("storageminer") +// Miner is the central miner entrypoint object inside Lotus. It is +// instantiated in the node builder, along with the WindowPoStScheduler. +// +// This object is the owner of the sealing pipeline. Most of the actual logic +// lives in the storage-sealing module (sealing.Sealing), and the Miner object +// exposes it to the rest of the system by proxying calls. +// +// Miner#Run starts the sealing FSM. type Miner struct { messager api.IMessager metadataService *service.MetadataService @@ -47,7 +55,7 @@ type Miner struct { logService *service.LogService networkParams *config.NetParamsConfig - api storageMinerApi + api fullNodeFilteredAPI feeCfg config.MinerFeeConfig sealer sectorstorage.SectorManager @@ -75,7 +83,9 @@ type SealingStateEvt struct { Error string } -type storageMinerApi interface { +// fullNodeFilteredAPI is the subset of the full node API the Miner needs from +// a Lotus full node. +type fullNodeFilteredAPI interface { // Call a read only method on actors (no interaction with the chain required) StateCall(context.Context, *types.Message, types.TipSetKey) (*apitypes.InvocResult, error) StateMinerSectors(context.Context, address.Address, *bitfield.BitField, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) @@ -123,7 +133,21 @@ type storageMinerApi interface { WalletHas(context.Context, address.Address) (bool, error) } -func NewMiner(api storageMinerApi, messager api.IMessager, maddr address.Address, metaService *service.MetadataService, sectorInfoService *service.SectorInfoService, logService *service.LogService, sealer sectorstorage.SectorManager, sc types2.SectorIDCounter, verif ffiwrapper.Verifier, prover ffiwrapper.Prover, gsd types2.GetSealingConfigFunc, feeCfg config.MinerFeeConfig, journal journal.Journal, as *AddressSelector, networkParams *config.NetParamsConfig) (*Miner, error) { +func NewMiner(api fullNodeFilteredAPI, + messager api.IMessager, + maddr address.Address, + metaService *service.MetadataService, + sectorInfoService *service.SectorInfoService, + logService *service.LogService, + sealer sectorstorage.SectorManager, + sc types2.SectorIDCounter, + verif ffiwrapper.Verifier, + prover ffiwrapper.Prover, + gsd types2.GetSealingConfigFunc, + feeCfg config.MinerFeeConfig, + journal journal.Journal, + as *AddressSelector, + networkParams *config.NetParamsConfig) (*Miner, error) { m := &Miner{ api: api, messager: messager, @@ -146,6 +170,7 @@ func NewMiner(api storageMinerApi, messager api.IMessager, maddr address.Address return m, nil } +// Run starts the sealing FSM in the background, running preliminary checks first. func (m *Miner) Run(ctx context.Context) error { if err := m.runPreflightChecks(ctx); err != nil { return xerrors.Errorf("miner preflight checks failed: %w", err) @@ -156,18 +181,38 @@ func (m *Miner) Run(ctx context.Context) error { return xerrors.Errorf("getting miner info: %w", err) } - evts := events.NewEvents(ctx, m.api) - adaptedAPI := NewSealingAPIAdapter(m.api, m.messager) - // TODO: Maybe we update this policy after actor upgrades? - pcp := sealing.NewBasicPreCommitPolicy(adaptedAPI, policy.GetMaxSectorExpirationExtension()-(md.WPoStProvingPeriod*2), md.PeriodStart%md.WPoStProvingPeriod) + var ( + // consumer of chain head changes. + evts = events.NewEvents(ctx, m.api) + evtsAdapter = NewEventsAdapter(evts) - as := func(ctx context.Context, mi miner.MinerInfo, use api.AddrUse, goodFunds, minFunds abi.TokenAmount) (address.Address, abi.TokenAmount, error) { - return m.addrSel.AddressFor(ctx, m.api, m.messager, mi, use, goodFunds, minFunds) - } + // Create a shim to glue the API required by the sealing component + // with the API that Lotus is capable of providing. + // The shim translates between "tipset tokens" and tipset keys, and + // provides extra methods. + adaptedAPI = NewSealingAPIAdapter(m.api, m.messager) + + // Instantiate a precommit policy. + defaultDuration = policy.GetMaxSectorExpirationExtension() - (md.WPoStProvingPeriod * 2) + provingBoundary = md.PeriodStart % md.WPoStProvingPeriod + + // TODO: Maybe we update this policy after actor upgrades? + pcp = sealing.NewBasicPreCommitPolicy(adaptedAPI, defaultDuration, provingBoundary) + + // address selector. + as = func(ctx context.Context, mi miner.MinerInfo, use api.AddrUse, goodFunds, minFunds abi.TokenAmount) (address.Address, abi.TokenAmount, error) { + return m.addrSel.AddressFor(ctx, m.api, m.messager, mi, use, goodFunds, minFunds) + } + + // sealing configuration. + cfg = types2.GetSealingConfigFunc(m.getSealConfig) + ) - m.sealing = sealing.New(ctx, adaptedAPI, m.feeCfg, NewEventsAdapter(evts), m.maddr, m.metadataService, m.sectorInfoService, m.logService, m.sealer, m.sc, m.verif, m.prover, - &pcp, types2.GetSealingConfigFunc(m.getSealConfig), m.handleSealingNotifications, as, m.networkParams) + // Instantiate the sealing FSM. + m.sealing = sealing.New(ctx, adaptedAPI, m.feeCfg, evtsAdapter, m.maddr, m.metadataService, m.sectorInfoService, m.logService, m.sealer, m.sc, m.verif, m.prover, + &pcp, cfg, m.handleSealingNotifications, as, m.networkParams) + // Run the sealing FSM. go m.sealing.Run(ctx) //nolint:errcheck // logged intside the function return nil @@ -189,6 +234,7 @@ func (m *Miner) Stop(ctx context.Context) error { return m.sealing.Stop(ctx) } +// runPreflightChecks verifies that preconditions to run the miner are satisfied. func (m *Miner) runPreflightChecks(ctx context.Context) error { mi, err := m.api.StateMinerInfo(ctx, m.maddr, types.EmptyTSK) if err != nil { diff --git a/storage/sealing.go b/storage/miner_sealing.go similarity index 100% rename from storage/sealing.go rename to storage/miner_sealing.go diff --git a/storage/wdpost_changehandler.go b/storage/wdpost_changehandler.go index 2de87e62..3d84196f 100644 --- a/storage/wdpost_changehandler.go +++ b/storage/wdpost_changehandler.go @@ -21,22 +21,24 @@ const ( type CompleteGeneratePoSTCb func(posts []miner.SubmitWindowedPoStParams, err error) type CompleteSubmitPoSTCb func(err error) -type changeHandlerAPI interface { +// wdPoStCommands is the subset of the WindowPoStScheduler + full node APIs used +// by the changeHandler to execute actions and query state. +type wdPoStCommands interface { StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) startGeneratePoST(ctx context.Context, ts *types.TipSet, deadline *dline.Info, onComplete CompleteGeneratePoSTCb) context.CancelFunc startSubmitPoST(ctx context.Context, ts *types.TipSet, deadline *dline.Info, posts []miner.SubmitWindowedPoStParams, onComplete CompleteSubmitPoSTCb) context.CancelFunc onAbort(ts *types.TipSet, deadline *dline.Info) - failPost(err error, ts *types.TipSet, deadline *dline.Info) + recordPoStFailure(err error, ts *types.TipSet, deadline *dline.Info) } type changeHandler struct { - api changeHandlerAPI + api wdPoStCommands actor address.Address proveHdlr *proveHandler submitHdlr *submitHandler } -func newChangeHandler(api changeHandlerAPI, actor address.Address) *changeHandler { +func newChangeHandler(api wdPoStCommands, actor address.Address) *changeHandler { posts := newPostsCache() p := newProver(api, posts) s := newSubmitter(api, posts) @@ -146,7 +148,7 @@ type postResult struct { // proveHandler generates proofs type proveHandler struct { - api changeHandlerAPI + api wdPoStCommands posts *postsCache postResults chan *postResult @@ -163,7 +165,7 @@ type proveHandler struct { } func newProver( - api changeHandlerAPI, + api wdPoStCommands, posts *postsCache, ) *proveHandler { ctx, cancel := context.WithCancel(context.Background()) @@ -248,7 +250,7 @@ func (p *proveHandler) processPostResult(res *postResult) { di := res.currPost.di if res.err != nil { // Proving failed so inform the API - p.api.failPost(res.err, res.ts, di) + p.api.recordPoStFailure(res.err, res.ts, di) log.Warnf("Aborted window post Proving (Deadline: %+v)", di) p.api.onAbort(res.ts, di) @@ -295,7 +297,7 @@ type postInfo struct { // submitHandler submits proofs on-chain type submitHandler struct { - api changeHandlerAPI + api wdPoStCommands posts *postsCache submitResults chan *submitResult @@ -319,7 +321,7 @@ type submitHandler struct { } func newSubmitter( - api changeHandlerAPI, + api wdPoStCommands, posts *postsCache, ) *submitHandler { ctx, cancel := context.WithCancel(context.Background()) @@ -488,7 +490,7 @@ func (s *submitHandler) submitIfReady(ctx context.Context, advance *types.TipSet func (s *submitHandler) processSubmitResult(res *submitResult) { if res.err != nil { // Submit failed so inform the API and go back to the start state - s.api.failPost(res.err, res.pw.ts, res.pw.di) + s.api.recordPoStFailure(res.err, res.pw.ts, res.pw.di) log.Warnf("Aborted window post Submitting (Deadline: %+v)", res.pw.di) s.api.onAbort(res.pw.ts, res.pw.di) diff --git a/storage/wdpost_changehandler_test.go b/storage/wdpost_changehandler_test.go index 65558113..3e854bbb 100644 --- a/storage/wdpost_changehandler_test.go +++ b/storage/wdpost_changehandler_test.go @@ -191,7 +191,7 @@ func (m *mockAPI) wasAbortCalled() bool { return m.abortCalled } -func (m *mockAPI) failPost(err error, ts *types.TipSet, deadline *dline.Info) { +func (m *mockAPI) recordPoStFailure(err error, ts *types.TipSet, deadline *dline.Info) { } func (m *mockAPI) setChangeHandler(ch *changeHandler) { diff --git a/storage/wdpost_run.go b/storage/wdpost_run.go index a0e3326a..5c1c2500 100644 --- a/storage/wdpost_run.go +++ b/storage/wdpost_run.go @@ -33,7 +33,8 @@ import ( "github.com/filecoin-project/venus/pkg/types" ) -func (s *WindowPoStScheduler) failPost(err error, ts *types.TipSet, deadline *dline.Info) { +// recordPoStFailure records a failure in the journal. +func (s *WindowPoStScheduler) recordPoStFailure(err error, ts *types.TipSet, deadline *dline.Info) { s.journal.RecordEvent(s.evtTypes[evtTypeWdPoStScheduler], func() interface{} { c := evtCommon{Error: err} if ts != nil { @@ -101,9 +102,9 @@ func (s *WindowPoStScheduler) runGeneratePoST( ctx, span := trace.StartSpan(ctx, "WindowPoStScheduler.generatePoST") defer span.End() - posts, err := s.runPost(ctx, *deadline, ts) + posts, err := s.runPoStCycle(ctx, *deadline, ts) if err != nil { - log.Errorf("runPost failed: %+v", err) + log.Errorf("runPoStCycle failed: %+v", err) return nil, err } @@ -169,7 +170,7 @@ func (s *WindowPoStScheduler) runSubmitPoST( commRand, err := s.api.ChainGetRandomnessFromTickets(ctx, ts.Key(), crypto.DomainSeparationTag_PoStChainCommit, commEpoch, nil) if err != nil { err = xerrors.Errorf("failed to get chain randomness from tickets for windowPost (ts=%d; deadline=%d): %w", ts.Height(), commEpoch, err) - log.Errorf("submitPost failed: %+v", err) + log.Errorf("submitPoStMessage failed: %+v", err) return err } @@ -182,7 +183,7 @@ func (s *WindowPoStScheduler) runSubmitPoST( post.ChainCommitRand = commRand // Submit PoST - uid, submitErr := s.submitPost(ctx, post) + uid, submitErr := s.submitPoStMessage(ctx, post) if submitErr != nil { log.Errorf("submit window post failed: %+v", submitErr) } else { @@ -235,8 +236,25 @@ func (s *WindowPoStScheduler) checkSectors(ctx context.Context, check bitfield.B return sbf, nil } -func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uint64, partitions []apitypes.Partition, tsk types.TipSetKey) ([]miner.RecoveryDeclaration, *types3.MessageWithUID, error) { - ctx, span := trace.StartSpan(ctx, "storage.checkNextRecoveries") +// declareRecoveries identifies sectors that were previously marked as faulty +// for our miner, but are now recovered (i.e. are now provable again) and +// still not reported as such. +// +// It then reports the recovery on chain via a `DeclareFaultsRecovered` +// message to our miner actor. +// +// This is always invoked ahead of time, before the deadline for the evaluated +// sectors arrives. That way, recoveries are declared in preparation for those +// sectors to be proven. +// +// If a declaration is made, it awaits for build.MessageConfidence confirmations +// on chain before returning. +// +// TODO: the waiting should happen in the background. Right now this +// is blocking/delaying the actual generation and submission of WindowPoSts in +// this deadline! +func (s *WindowPoStScheduler) declareRecoveries(ctx context.Context, dlIdx uint64, partitions []apitypes.Partition, tsk types.TipSetKey) ([]miner.RecoveryDeclaration, *types3.MessageWithUID, error) { + ctx, span := trace.StartSpan(ctx, "storage.declareRecoveries") defer span.End() faulty := uint64(0) @@ -304,7 +322,7 @@ func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uin Value: types.NewInt(0), } spec := &types.MessageSendSpec{MaxFee: abi.TokenAmount(s.feeCfg.MaxWindowPoStGasFee)} - if err := s.setSender(ctx, msg, spec); err != nil { + if err := s.prepareMessage(ctx, msg, spec); err != nil { return recoveries, nil, err } @@ -331,8 +349,21 @@ func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uin return recoveries, sm, nil } -func (s *WindowPoStScheduler) checkNextFaults(ctx context.Context, dlIdx uint64, partitions []apitypes.Partition, tsk types.TipSetKey) ([]miner.FaultDeclaration, *types3.MessageWithUID, error) { - ctx, span := trace.StartSpan(ctx, "storage.checkNextFaults") +// declareFaults identifies the sectors on the specified proving deadline that +// are faulty, and reports the faults on chain via the `DeclareFaults` message +// to our miner actor. +// +// This is always invoked ahead of time, before the deadline for the evaluated +// sectors arrives. That way, faults are declared before a penalty is accrued. +// +// If a declaration is made, it awaits for build.MessageConfidence confirmations +// on chain before returning. +// +// TODO: the waiting should happen in the background. Right now this +// is blocking/delaying the actual generation and submission of WindowPoSts in +// this deadline! +func (s *WindowPoStScheduler) declareFaults(ctx context.Context, dlIdx uint64, partitions []apitypes.Partition, tsk types.TipSetKey) ([]miner.FaultDeclaration, *types3.MessageWithUID, error) { + ctx, span := trace.StartSpan(ctx, "storage.declareFaults") defer span.End() bad := uint64(0) @@ -393,7 +424,7 @@ func (s *WindowPoStScheduler) checkNextFaults(ctx context.Context, dlIdx uint64, Value: types.NewInt(0), // TODO: Is there a fee? } spec := &types.MessageSendSpec{MaxFee: abi.TokenAmount(s.feeCfg.MaxWindowPoStGasFee)} - if err := s.setSender(ctx, msg, spec); err != nil { + if err := s.prepareMessage(ctx, msg, spec); err != nil { return faults, nil, err } @@ -419,12 +450,18 @@ func (s *WindowPoStScheduler) checkNextFaults(ctx context.Context, dlIdx uint64, return faults, sm, nil } -func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *types.TipSet) ([]miner.SubmitWindowedPoStParams, error) { - ctx, span := trace.StartSpan(ctx, "storage.runPost") +// runPoStCycle runs a full cycle of the PoSt process: +// +// 1. performs recovery declarations for the next deadline. +// 2. performs fault declarations for the next deadline. +// 3. computes and submits proofs, batching partitions and making sure they +// don't exceed message capacity. +func (s *WindowPoStScheduler) runPoStCycle(ctx context.Context, di dline.Info, ts *types.TipSet) ([]miner.SubmitWindowedPoStParams, error) { + ctx, span := trace.StartSpan(ctx, "storage.runPoStCycle") defer span.End() go func() { - // TODO: extract from runPost, run on fault cutoff boundaries + // TODO: extract from runPoStCycle, run on fault cutoff boundaries // check faults / recoveries for the *next* deadline. It's already too // late to declare them for this deadline @@ -452,7 +489,7 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty } ) - if recoveries, uidMsg, err = s.checkNextRecoveries(context.TODO(), declDeadline, partitions, ts.Key()); err != nil { + if recoveries, uidMsg, err = s.declareRecoveries(context.TODO(), declDeadline, partitions, ts.Key()); err != nil { // TODO: This is potentially quite bad, but not even trying to post when this fails is objectively worse log.Errorf("checking sector recoveries: %v", err) } @@ -471,7 +508,7 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty return // FORK: declaring faults after ignition upgrade makes no sense } - if faults, uidMsg, err = s.checkNextFaults(context.TODO(), declDeadline, partitions, ts.Key()); err != nil { + if faults, uidMsg, err = s.declareFaults(context.TODO(), declDeadline, partitions, ts.Key()); err != nil { // TODO: This is also potentially really bad, but we try to post anyways log.Errorf("checking sector faults: %v", err) } @@ -775,8 +812,10 @@ func (s *WindowPoStScheduler) sectorsForProof(ctx context.Context, goodSectors, } const RetrySubmitPoStCounts = 30 - -func (s *WindowPoStScheduler) submitPost(ctx context.Context, proof *miner.SubmitWindowedPoStParams) (string, error) { +// submitPoStMessage builds a SubmitWindowedPoSt message and submits it to +// the mpool. It doesn't synchronously block on confirmations, but it does +// monitor in the background simply for the purposes of logging. +func (s *WindowPoStScheduler) submitPoStMessage(ctx context.Context, proof *miner.SubmitWindowedPoStParams) (string, error) { ctx, span := trace.StartSpan(ctx, "storage.commitPost") defer span.End() @@ -799,7 +838,7 @@ func (s *WindowPoStScheduler) submitPost(ctx context.Context, proof *miner.Submi ) for idx := 0; idx < RetrySubmitPoStCounts; idx++ { - if err = s.setSender(ctx, msg, spec); err != nil { + if err = s.prepareMessage(ctx, msg, spec); err != nil { log.Errorf("[%d] submitPoSt set sender failed: %v", idx+1, err) time.Sleep(10 * time.Second) continue @@ -840,14 +879,20 @@ func (s *WindowPoStScheduler) submitPost(ctx context.Context, proof *miner.Submi return uid, nil } -func (s *WindowPoStScheduler) setSender(ctx context.Context, msg *types.Message, spec *types.MessageSendSpec) error { +// prepareMessage prepares a message before sending it, setting: +// +// * the sender (from the AddressSelector, falling back to the worker address if none set) +// * the right gas parameters +func (s *WindowPoStScheduler) prepareMessage(ctx context.Context, msg *types.Message, spec *types.MessageSendSpec) error { mi, err := s.api.StateMinerInfo(ctx, s.actor, types.EmptyTSK) if err != nil { return xerrors.Errorf("error getting miner info: %w", err) } - // use the worker as a fallback + // set the worker as a fallback msg.From = mi.Worker + // (optimal) initial estimation with some overestimation that guarantees + // block inclusion within the next 20 tipsets. gm, err := s.api.GasEstimateMessageGas(ctx, msg, spec, types.EmptyTSK) if err != nil { log.Errorw("estimating gas", "error", err) @@ -855,10 +900,12 @@ func (s *WindowPoStScheduler) setSender(ctx context.Context, msg *types.Message, } *msg = *gm - // estimate + // calculate a more frugal estimation; premium is estimated to guarantee + // inclusion within 5 tipsets, and fee cap is estimated for inclusion + // within 4 tipsets. minGasFeeMsg := *msg - minGasFeeMsg.GasPremium, err = s.api.GasEstimateGasPremium(ctx, 5, msg.From, msg.GasLimit, types.TipSetKey{}) + minGasFeeMsg.GasPremium, err = s.api.GasEstimateGasPremium(ctx, 5, msg.From, msg.GasLimit, types.EmptyTSK) if err != nil { log.Errorf("failed to estimate minimum gas premium: %+v", err) minGasFeeMsg.GasPremium = msg.GasPremium @@ -870,6 +917,8 @@ func (s *WindowPoStScheduler) setSender(ctx context.Context, msg *types.Message, minGasFeeMsg.GasFeeCap = msg.GasFeeCap } + // goodFunds = funds needed for optimal inclusion probability. + // minFunds = funds needed for more speculative inclusion probability. goodFunds := big.Add(msg.RequiredFunds(), msg.Value) minFunds := big.Min(big.Add(minGasFeeMsg.RequiredFunds(), minGasFeeMsg.Value), goodFunds) diff --git a/storage/wdpost_run_test.go b/storage/wdpost_run_test.go index 3a1aad78..f9a0d4b2 100644 --- a/storage/wdpost_run_test.go +++ b/storage/wdpost_run_test.go @@ -42,7 +42,7 @@ import ( type mockStorageMinerAPI struct { partitions []apitypes.Partition pushedMessages chan *types.Message - storageMinerApi + fullNodeFilteredAPI } func newMockStorageMinerAPI() *mockStorageMinerAPI { @@ -403,4 +403,4 @@ func (m *mockStorageMinerAPI) WalletHas(ctx context.Context, address address.Add return true, nil } -var _ storageMinerApi = &mockStorageMinerAPI{} +var _ fullNodeFilteredAPI = &mockStorageMinerAPI{} diff --git a/storage/wdpost_sched.go b/storage/wdpost_sched.go index 7bc81dd4..c18565af 100644 --- a/storage/wdpost_sched.go +++ b/storage/wdpost_sched.go @@ -23,11 +23,17 @@ import ( "github.com/filecoin-project/venus/pkg/types" ) +// WindowPoStScheduler is the coordinator for WindowPoSt submissions, fault +// declaration, and recovery declarations. It watches the chain for reverts and +// applies, and schedules/run those processes as partition deadlines arrive. +// +// WindowPoStScheduler watches the chain though the changeHandler, which in turn +// turn calls the scheduler when the time arrives to do work. type WindowPoStScheduler struct { Messager api.IMessager networkParams *config.NetParamsConfig - api storageMinerApi + api fullNodeFilteredAPI feeCfg config.MinerFeeConfig addrSel *AddressSelector prover storage.Prover @@ -46,7 +52,17 @@ type WindowPoStScheduler struct { // failLk sync.Mutex } -func NewWindowedPoStScheduler(api storageMinerApi, messager api.IMessager, fc config.MinerFeeConfig, as *AddressSelector, sb storage.Prover, verif ffiwrapper.Verifier, ft sectorstorage.FaultTracker, j journal.Journal, actor address.Address, networkParams *config.NetParamsConfig) (*WindowPoStScheduler, error) { +// NewWindowedPoStScheduler creates a new WindowPoStScheduler scheduler. +func NewWindowedPoStScheduler(api fullNodeFilteredAPI, + messager api.IMessager, + fc config.MinerFeeConfig, + as *AddressSelector, + sp storage.Prover, + verif ffiwrapper.Verifier, + ft sectorstorage.FaultTracker, + j journal.Journal, + actor address.Address, + networkParams *config.NetParamsConfig) (*WindowPoStScheduler, error) { mi, err := api.StateMinerInfo(context.TODO(), actor, types.EmptyTSK) if err != nil { return nil, xerrors.Errorf("getting sector size: %w", err) @@ -59,7 +75,7 @@ func NewWindowedPoStScheduler(api storageMinerApi, messager api.IMessager, fc co api: api, feeCfg: fc, addrSel: as, - prover: sb, + prover: sp, verifier: verif, faultTracker: ft, proofType: mi.WindowPoStProofType, @@ -76,21 +92,24 @@ func NewWindowedPoStScheduler(api storageMinerApi, messager api.IMessager, fc co }, nil } -type changeHandlerAPIImpl struct { - storageMinerApi - *WindowPoStScheduler -} - func (s *WindowPoStScheduler) Run(ctx context.Context) { - // Initialize change handler - chImpl := &changeHandlerAPIImpl{storageMinerApi: s.api, WindowPoStScheduler: s} - s.ch = newChangeHandler(chImpl, s.actor) + // Initialize change handler. + + // callbacks is a union of the fullNodeFilteredAPI and ourselves. + callbacks := struct { + fullNodeFilteredAPI + *WindowPoStScheduler + }{s.api, s} + + s.ch = newChangeHandler(callbacks, s.actor) defer s.ch.shutdown() s.ch.start() - var notifs <-chan []*chain.HeadChange - var err error - var gotCur bool + var ( + notifs <-chan []*chain.HeadChange + err error + gotCur bool + ) // not fine to panic after this point for { diff --git a/types/sector_state.go b/types/sector_state.go index 6cff0907..cab916d5 100644 --- a/types/sector_state.go +++ b/types/sector_state.go @@ -118,3 +118,4 @@ func toStatState(st SectorState) StatSectorState { return SstFailed } + diff --git a/types/task.go b/types/task.go index 75b8163b..8536e162 100644 --- a/types/task.go +++ b/types/task.go @@ -11,21 +11,19 @@ const ( TTFinalize TaskType = "seal/v0/finalize" - TTFetch TaskType = "seal/v0/fetch" - TTUnseal TaskType = "seal/v0/unseal" - TTReadUnsealed TaskType = "seal/v0/unsealread" + TTFetch TaskType = "seal/v0/fetch" + TTUnseal TaskType = "seal/v0/unseal" ) var order = map[TaskType]int{ - TTAddPiece: 6, // least priority - TTPreCommit1: 5, - TTPreCommit2: 4, - TTCommit2: 3, - TTCommit1: 2, - TTUnseal: 1, - TTFetch: -1, - TTReadUnsealed: -1, - TTFinalize: -2, // most priority + TTAddPiece: 6, // least priority + TTPreCommit1: 5, + TTPreCommit2: 4, + TTCommit2: 3, + TTCommit1: 2, + TTUnseal: 1, + TTFetch: -1, + TTFinalize: -2, // most priority } var shortNames = map[TaskType]string{ @@ -38,9 +36,8 @@ var shortNames = map[TaskType]string{ TTFinalize: "FIN", - TTFetch: "GET", - TTUnseal: "UNS", - TTReadUnsealed: "RD", + TTFetch: "GET", + TTUnseal: "UNS", } func (a TaskType) MuchLess(b TaskType) (bool, bool) {