diff --git a/.circleci/config.yml b/.circleci/config.yml index d151026f0f7..4d24e25b1f8 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -735,6 +735,45 @@ jobs: - packer/build: template: tools/packer/lotus.pkr.hcl args: "-var ci_workspace_bins=./linux-nerpanet -var lotus_network=nerpanet -var git_tag=$CIRCLE_TAG" + publish-dockerhub: + description: publish to dockerhub + machine: + image: ubuntu-2004:202010-01 + parameters: + tag: + type: string + default: latest + steps: + - checkout + - run: + name: dockerhub login + command: echo $DOCKERHUB_PASSWORD | docker login --username $DOCKERHUB_USERNAME --password-stdin + - run: + name: docker build + command: | + docker build --target lotus -t filecoin/lotus:<< parameters.tag >> -f Dockerfile.lotus . + docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:<< parameters.tag >> -f Dockerfile.lotus . + if [[ ! -z $CIRCLE_SHA1 ]]; then + docker build --target lotus -t filecoin/lotus:$CIRCLE_SHA1 -f Dockerfile.lotus . + docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:$CIRCLE_SHA1 -f Dockerfile.lotus . + fi + if [[ ! -z $CIRCLE_TAG ]]; then + docker build --target lotus -t filecoin/lotus:$CIRCLE_TAG -f Dockerfile.lotus . + docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:$CIRCLE_TAG -f Dockerfile.lotus . + fi + - run: + name: docker push + command: | + docker push filecoin/lotus:<< parameters.tag >> + docker push filecoin/lotus-all-in-one:<< parameters.tag >> + if [[ ! -z $CIRCLE_SHA1 ]]; then + docker push filecoin/lotus:$CIRCLE_SHA1 + docker push filecoin/lotus-all-in-one:$CIRCLE_SHA1 + fi + if [[ ! -z $CIRCLE_TAG ]]; then + docker push filecoin/lotus:$CIRCLE_TAG + docker push filecoin/lotus-all-in-one:$CIRCLE_TAG + fi workflows: version: 2.1 @@ -781,6 +820,11 @@ workflows: suite: itest-deals_offline target: "./itests/deals_offline_test.go" + - test: + name: test-itest-deals_padding + suite: itest-deals_padding + target: "./itests/deals_padding_test.go" + - test: name: test-itest-deals_power suite: itest-deals_power @@ -806,11 +850,21 @@ workflows: suite: itest-gateway target: "./itests/gateway_test.go" + - test: + name: test-itest-get_messages_in_ts + suite: itest-get_messages_in_ts + target: "./itests/get_messages_in_ts_test.go" + - test: name: test-itest-multisig suite: itest-multisig target: "./itests/multisig_test.go" + - test: + name: test-itest-nonce + suite: itest-nonce + target: "./itests/nonce_test.go" + - test: name: test-itest-paych_api suite: itest-paych_api @@ -831,6 +885,11 @@ workflows: suite: itest-sector_finalize_early target: "./itests/sector_finalize_early_test.go" + - test: + name: test-itest-sector_miner_collateral + suite: itest-sector_miner_collateral + target: "./itests/sector_miner_collateral_test.go" + - test: name: test-itest-sector_pledge suite: itest-sector_pledge @@ -1002,6 +1061,16 @@ workflows: tags: only: - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - publish-dockerhub: + name: publish-dockerhub + tag: stable + filters: + branches: + ignore: + - /.*/ + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ nightly: triggers: @@ -1015,3 +1084,6 @@ workflows: - publish-snapcraft: name: publish-snapcraft-nightly channel: edge + - publish-dockerhub: + name: publish-dockerhub-nightly + tag: nightly diff --git a/.circleci/template.yml b/.circleci/template.yml index fb59f23eafe..27036ab26bd 100644 --- a/.circleci/template.yml +++ b/.circleci/template.yml @@ -735,6 +735,45 @@ jobs: - packer/build: template: tools/packer/lotus.pkr.hcl args: "-var ci_workspace_bins=./linux-nerpanet -var lotus_network=nerpanet -var git_tag=$CIRCLE_TAG" + publish-dockerhub: + description: publish to dockerhub + machine: + image: ubuntu-2004:202010-01 + parameters: + tag: + type: string + default: latest + steps: + - checkout + - run: + name: dockerhub login + command: echo $DOCKERHUB_PASSWORD | docker login --username $DOCKERHUB_USERNAME --password-stdin + - run: + name: docker build + command: | + docker build --target lotus -t filecoin/lotus:<< parameters.tag >> -f Dockerfile.lotus . + docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:<< parameters.tag >> -f Dockerfile.lotus . + if [["[[ ! -z $CIRCLE_SHA1 ]]"]]; then + docker build --target lotus -t filecoin/lotus:$CIRCLE_SHA1 -f Dockerfile.lotus . + docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:$CIRCLE_SHA1 -f Dockerfile.lotus . + fi + if [["[[ ! -z $CIRCLE_TAG ]]"]]; then + docker build --target lotus -t filecoin/lotus:$CIRCLE_TAG -f Dockerfile.lotus . + docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:$CIRCLE_TAG -f Dockerfile.lotus . + fi + - run: + name: docker push + command: | + docker push filecoin/lotus:<< parameters.tag >> + docker push filecoin/lotus-all-in-one:<< parameters.tag >> + if [["[[ ! -z $CIRCLE_SHA1 ]]"]]; then + docker push filecoin/lotus:$CIRCLE_SHA1 + docker push filecoin/lotus-all-in-one:$CIRCLE_SHA1 + fi + if [["[[ ! -z $CIRCLE_TAG ]]"]]; then + docker push filecoin/lotus:$CIRCLE_TAG + docker push filecoin/lotus-all-in-one:$CIRCLE_TAG + fi workflows: version: 2.1 @@ -887,6 +926,16 @@ workflows: tags: only: - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - publish-dockerhub: + name: publish-dockerhub + tag: stable + filters: + branches: + ignore: + - /.*/ + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ nightly: triggers: @@ -900,3 +949,6 @@ workflows: - publish-snapcraft: name: publish-snapcraft-nightly channel: edge + - publish-dockerhub: + name: publish-dockerhub-nightly + tag: nightly diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 00000000000..b8ec66f00ea --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,6 @@ +# Reference +# https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/creating-a-repository-on-github/about-code-owners + +# Global owners +# Ensure maintainers team is a requested reviewer for non-draft PRs +* @filecoin-project/lotus-maintainers diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md deleted file mode 100644 index 23c7640b782..00000000000 --- a/.github/ISSUE_TEMPLATE/bug-report.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -name: Bug Report -about: Create a report to help us improve -title: "[BUG] " -labels: hint/needs-triaging, kind/bug -assignees: '' - ---- - -> Note: For security-related bugs/issues, please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy). - -**Describe the bug** -A clear and concise description of what the bug is. -(If you are not sure what the bug is, try to figure it out via a [discussion](https://github.com/filecoin-project/lotus/discussions/new) first! - -**Version (run `lotus version`):** - -**To Reproduce** -Steps to reproduce the behavior: -1. Run '...' -2. See error - -**Expected behavior** -A clear and concise description of what you expected to happen. - -**Logs** -Provide daemon/miner/worker logs, and goroutines(if available) for troubleshooting. - -**Screenshots** -If applicable, add screenshots to help explain your problem. - -**Additional context** -Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 00000000000..7876715e2fe --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,92 @@ +name: "Bug Report" +description: "File a bug report to help us improve" +labels: [need/triage, kind/bug] +body: +- type: checkboxes + attributes: + label: Checklist + description: Please check off the following boxes before continuing to file a bug report! + options: + - label: This is **not** a security-related bug/issue. If it is, please follow please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy). + required: true + - label: This is **not** a question or a support request. If you have any lotus related questions, please ask in the [lotus forum](https://github.com/filecoin-project/lotus/discussions). + required: true + - label: This is **not** a new feature request. If it is, please file a [feature request](https://github.com/filecoin-project/lotus/issues/new?assignees=&labels=need%2Ftriage%2Ckind%2Ffeature&template=feature_request.yml) instead. + required: true + - label: This is **not** an enhancement request. If it is, please file a [improvement suggestion](https://github.com/filecoin-project/lotus/issues/new?assignees=&labels=need%2Ftriage%2Ckind%2Fenhancement&template=enhancement.yml) instead. + required: true + - label: I **have** searched on the [issue tracker](https://github.com/filecoin-project/lotus/issues) and the [lotus forum](https://github.com/filecoin-project/lotus/discussions), and there is no existing related issue or discussion. + required: true + - label: I am running the [`Latest release`](https://github.com/filecoin-project/lotus/releases), or the most recent RC(release canadiate) for the upcoming release or the dev branch(master), or have an issue updating to any of these. + required: true + - label: I did not make any code changes to lotus. + required: false +- type: dropdown + id: component-and-area + validations: + required: true + attributes: + label: Lotus component + description: Please select the lotus component you are filing a bug for + options: + - lotus daemon - chain sync + - lotus miner - mining and block production + - lotus miner/worker - sealing + - lotus miner - proving(WindowPoSt) + - lotus miner/market - storage deal + - lotus miner/market - retrieval deal + - lotus client + - lotus JSON-RPC API + - lotus message management (mpool) + - Other +- type: textarea + id: version + attributes: + label: Lotus Version + description: Enter the output of `lotus version` and `lotus-miner version` if applicable. + placeholder: | + e.g. + Daemon:1.11.0-rc2+debug+git.0519cd371.dirty+api1.3.0 + Local: lotus version 1.11.0-rc2+debug+git.0519cd371.dirty + validations: + reuiqred: true +- type: textarea + id: Description + attributes: + label: Describe the Bug + description: | + This is where you get to tell us what went wrong, when doing so, please try to provide a clear and concise description of the bug with all related information: + * What you were doding when you experienced the bug? + * Any *error* messages you saw, *where* you saw them, and what you believe may have caused them (if you have any ideas). + * What is the expected behaviour? + * For sealing issues, include the output of `lotus-miner sectors status --log ` for the failed sector(s). + * For proving issues, include the output of `lotus-miner proving` info. + * For deal making issues, include the output of `lotus client list-deals -v` and/or `lotus-miner storage-deals|retrieval-deals|data-transfers list [-v]` commands for the deal(s) in question. + render: bash + validations: + required: true +- type: textarea + id: extraInfo + attributes: + label: Logging Information + description: | + Please provide debug logs of the problem, remember you can get set log level control for: + * lotus: use `lotus log list` to get all log systems available and set level by `lotus log set-level`. An example can be found [here](https://docs.filecoin.io/get-started/lotus/configuration-and-advanced-usage/#log-level-control). + * lotus-miner:`lotus-miner log list` to get all log systems available and set level by `lotus-miner log set-level + If you don't provide detailed logs when you raise the issue it will almost certainly be the first request I make before furthur diagnosing the problem. + render: bash + validations: + required: true +- type: textarea + id: RepoSteps + attributes: + label: Repo Steps + description: "Steps to reproduce the behavior" + value: | + 1. Run '...' + 2. Do '...' + 3. See error '...' + ... + render: bash + validations: + required: false \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/deal-making-issues.md b/.github/ISSUE_TEMPLATE/deal-making-issues.md deleted file mode 100644 index bec800cb7ce..00000000000 --- a/.github/ISSUE_TEMPLATE/deal-making-issues.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -name: Deal Making Issues -about: Create a report for help with deal making failures. -title: "[Deal Making Issue]" -labels: hint/needs-triaging, area/markets -assignees: '' - ---- - -> Note: For security-related bugs/issues, please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy). - -Please provide all the information requested here to help us troubleshoot "deal making failed" issues. -If the information requested is missing, we will probably have to just ask you to provide it anyway, -before we can help debug. - -**Basic Information** -Including information like, Are you the client or the miner? Is this a storage deal or a retrieval deal? Is it an offline deal? - -**Describe the problem** - -A brief description of the problem you encountered while trying to make a deal. - -**Version** - -The output of `lotus --version`. - -**Setup** - -You miner(if applicable) and daemon setup, i.e: What hardware do you use, how much ram and etc. - -**To Reproduce** - Steps to reproduce the behavior: - 1. Run '...' - 2. See error - -**Deal status** - -The output of `lotus client list-deals -v` and/or `lotus-miner storage-deals|retrieval-deals|data-transfers list [-v]` commands for the deal(s) in question. - -**Lotus daemon and miner logs** - -Please go through the logs of your daemon and miner(if applicable), and include screenshots of any error/warning-like messages you find. - -Alternatively please upload full log files and share a link here - -** Code modifications ** - -If you have modified parts of lotus, please describe which areas were modified, -and the scope of those modifications diff --git a/.github/ISSUE_TEMPLATE/enhancement.yml b/.github/ISSUE_TEMPLATE/enhancement.yml new file mode 100644 index 00000000000..7320fa5c542 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/enhancement.yml @@ -0,0 +1,44 @@ +name: Enhancement +description: Suggest an improvement to an existing lotus feature. +labels: [need/triage, kind/enhancement] +body: +- type: checkboxes + attributes: + label: Checklist + description: Please check off the following boxes before continuing to create an improvement suggestion! + options: + - label: This is **not** a new feature or an enhancement to the Filecoin protocol. If it is, please open an [FIP issue](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0001.md). + required: true + - label: This is **not** a new feature request. If it is, please file a [feature request](https://github.com/filecoin-project/lotus/issues/new?assignees=&labels=need%2Ftriage%2Ckind%2Ffeature&template=feature_request.yml) instead. + required: true + - label: This is **not** brainstorming ideas. If you have an idea you'd like to discuss, please open a new discussion on [the lotus forum](https://github.com/filecoin-project/lotus/discussions/categories/ideas) and select the category as `Ideas`. + required: true + - label: I **have** a specific, actionable, and well motivated improvement to propose. + required: true +- type: dropdown + id: component + validations: + required: true + attributes: + label: Lotus component + description: Please select the lotus component you are propoing improvement for + options: + - lotus daemon - chain sync + - lotus miner - mining and block production + - lotus miner/worker - sealing + - lotus miner - proving(WindowPoSt) + - lotus miner/market - storage deal + - lotus miner/market - retrieval deal + - lotus client + - lotus JSON-RPC API + - lotus message management (mpool) + - Other +- type: textarea + id: request + attributes: + label: Improvement Suggestion + description: A clear and concise description of what the motivation or the current problem is and what is the suggested improvement? + placeholder: Ex. Currently lotus... However, as a storage provider, I'd like... + validations: + required: true + diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index 0803a6db827..00000000000 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -name: Feature request -about: Suggest an idea for this project -title: "[Feature Request]" -labels: hint/needs-triaging -assignees: '' - ---- - -**Is your feature request related to a problem? Please describe.** -A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] - -**Describe the solution you'd like** -A clear and concise description of what you want to happen. - -**Describe alternatives you've considered** -A clear and concise description of any alternative solutions or features you've considered. - -**Additional context** -Add any other context or screenshots about the feature request here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 00000000000..5cb39b0d5a0 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,63 @@ +name: Feature request +description: Suggest an idea for lotus +labels: [need/triage, kind/feature] +body: +- type: checkboxes + attributes: + label: Checklist + description: Please check off the following boxes before continuing to create a new feature request! + options: + - label: This is **not** a new feature or an enhancement to the Filecoin protocol. If it is, please open an [FIP issue](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0001.md). + required: true + - label: This is **not** brainstorming ideas. If you have an idea you'd like to discuss, please open a new discussion on [the lotus forum](https://github.com/filecoin-project/lotus/discussions/categories/ideas) and select the category as `Ideas`. + required: true + - label: I **have** a specific, actionable, and well motivated feature request to propose. + required: true +- type: dropdown + id: component + validations: + required: true + attributes: + label: Lotus component + description: Please select the lotus component you are requesting a new feature for + options: + - lotus daemon - chain sync + - lotus miner - mining and block production + - lotus miner/worker - sealing + - lotus miner - proving(WindowPoSt) + - lotus miner/market - storage deal + - lotus miner/market - retrieval deal + - lotus client + - lotus JSON-RPC API + - lotus message management (mpool) + - Other +- type: textarea + id: request + attributes: + label: What is the motivation behind this feature request? Is your feature request related to a problem? Please describe. + description: A clear and concise description of what the motivation or the problem is. + placeholder: Ex. I'm always frustrated when [...] + validations: + required: true +- type: textarea + id: solution + attributes: + label: Describe the solution you'd like + description: A clear and concise description of what you want to happen. + validations: + required: true +- type: textarea + id: alternates + attributes: + label: Describe alternatives you've considered + description: A clear and concise description of any alternative solutions or features you've considered. + validations: + required: false +- type: textarea + id: extra + attributes: + label: Additional context + description: Add any other context, design docs or screenshots about the feature request here. + validations: + required: false + diff --git a/.github/ISSUE_TEMPLATE/m1_bug_report_deal.yml b/.github/ISSUE_TEMPLATE/m1_bug_report_deal.yml new file mode 100644 index 00000000000..4402e97da73 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/m1_bug_report_deal.yml @@ -0,0 +1,91 @@ +name: "M1 Bug Report For Deal Making" +description: "File a bug report around deal making for the M1 releases" +labels: [need/triage, kind/bug, M1-release] +body: +- type: checkboxes + id: checklist + attributes: + label: Checklist + description: Please check off the following boxes before continuing to file a bug report! + options: + - label: This is **not** a question or a support request. If you have any lotus related questions, please ask in the [lotus forum](https://github.com/filecoin-project/lotus/discussions). + required: true + - label: I **am** reporting a bug w.r.t one of the [M1 tags](https://github.com/filecoin-project/lotus/discussions/6852#discussioncomment-1043951). If not, choose another issue option [here](https://github.com/filecoin-project/lotus/issues/new/choose). + required: true + - label: I **am** reporting a bug around deal making. If not, create a [M1 Bug Report For Non Deal Making Issue](https://github.com/filecoin-project/lotus/issues/new?assignees=&labels=need%2Ftriage%2Ckind%2Fbug%2CM1-release&template=m1_bug_report_non_deal.yml). + required: true + - label: I have my log level set as instructed [here](https://github.com/filecoin-project/lotus/discussions/6852#discussioncomment-1043678) and have logs available for troubleshooting. + required: true + - label: The deal is coming from one of the M1 clients(communitcated in the coordination slack channel). + required: true + - label: I **have** searched on the [issue tracker](https://github.com/filecoin-project/lotus/issues) and the [lotus forum](https://github.com/filecoin-project/lotus/discussions), and there is no existing related issue or discussion. + required: true +- type: dropdown + id: lotus-componets + validations: + required: true + attributes: + label: Lotus Component + description: Please select the lotus component you are filing a bug for + options: + - lotus miner market subsystem - storage deal + - lotus miner market subsystem - retrieval deal + - lotus miner - storage deal + - lotus miner - retrieval deal +- type: textarea + id: version + attributes: + label: Lotus Tag and Version + description: Enter the lotus tag, output of `lotus version` and `lotus-miner version`. + validations: + reuiqred: true +- type: textarea + id: Description + attributes: + label: Describe the Bug + description: | + This is where you get to tell us what went wrong, when doing so, please try to provide a clear and concise description of the bug with all related information: + * What you were doding when you experienced the bug? + * Any *error* messages you saw, *where* you saw them, and what you believe may have caused them (if you have any ideas). + * What is the expected behaviour? + render: bash + validations: + required: true +- type: textarea + id: deal-status + attributes: + label: Deal Status + description: What's the status of the deal? + placeholder: | + Please share the output of `lotus-miner storage-deals|retrieval-deals list [-v]` commands for the deal(s) in question. + validations: + required: true +- type: textarea + id: data-transfer-status + attributes: + label: Data Transfer Status + description: What's the status of the data transfer? + placeholder: | + Please share the output of `lotus-miner data-transfers list -v` commands for the deal(s) in question. + validations: + required: true +- type: textarea + id: logging + attributes: + label: Logging Information + description: Please link to the whole of the miner logs on your side of the transaction. You can upload the logs to a [gist](https://gist.github.com). + validations: + required: true +- type: textarea + id: RepoSteps + attributes: + label: Repo Steps (optional) + description: "Steps to reproduce the behavior" + value: | + 1. Run '...' + 2. Do '...' + 3. See error '...' + ... + render: bash + validations: + required: false \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/m1_bug_report_non_deal.yml b/.github/ISSUE_TEMPLATE/m1_bug_report_non_deal.yml new file mode 100644 index 00000000000..ede3593e548 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/m1_bug_report_non_deal.yml @@ -0,0 +1,81 @@ +name: "M1 Bug Report For Non Deal Making Issue" +description: "File a bug report around non deal making issue for the M1 releases" +labels: [need/triage, kind/bug, M1-release] +body: +- type: checkboxes + id: checklist + attributes: + label: Checklist + description: Please check off the following boxes before continuing to file a bug report! + options: + - label: This is **not** a question or a support request. If you have any lotus related questions, please ask in the [lotus forum](https://github.com/filecoin-project/lotus/discussions). + required: true + - label: I **am** reporting a bug w.r.t one of the [M1 tags](https://github.com/filecoin-project/lotus/discussions/6852#discussioncomment-1043951). If not, choose another issue option [here](https://github.com/filecoin-project/lotus/issues/new/choose). + required: true + - label: I am **not** reporting a bug around deal making. If yes, create a [M1 Bug Report For Deal Making](https://github.com/filecoin-project/lotus/issues/new?assignees=&labels=need%2Ftriage%2Ckind%2Fbug%2CM1-release&template=m1_bug_report_deal.yml). + required: true + - label: I **have** searched on the [issue tracker](https://github.com/filecoin-project/lotus/issues) and the [lotus forum](https://github.com/filecoin-project/lotus/discussions), and there is no existing related issue or discussion. + required: true +- type: dropdown + id: component-and-area + validations: + required: true + attributes: + label: Lotus component + description: Please select the lotus component you are filing a bug for + options: + - lotus daemon - chain sync **with** splitstore enabled + - lotus daemon - chain sync **without** splitstore enabled + - lotus miner - mining and block production + - lotus miner/worker - sealing + - lotus miner - proving(WindowPoSt) + - lotus client + - lotus JSON-RPC API + - lotus message management (mpool) + - Other +- type: textarea + id: version + attributes: + label: Lotus Tag and Version + description: Enter the lotus tag, output of `lotus version` and `lotus-miner version`. + validations: + reuiqred: true +- type: textarea + id: Description + attributes: + label: Describe the Bug + description: | + This is where you get to tell us what went wrong, when doing so, please try to provide a clear and concise description of the bug with all related information: + * What you were doding when you experienced the bug? + * Any *error* messages you saw, *where* you saw them, and what you believe may have caused them (if you have any ideas). + * What is the expected behaviour? + * For sealing issues, include the output of `lotus-miner sectors status --log ` for the failed sector(s). + * For proving issues, include the output of `lotus-miner proving` info. + render: bash + validations: + required: true +- type: textarea + id: extraInfo + attributes: + label: Logging Information + description: | + Please provide debug logs of the problem, remember you can get set log level control for: + * lotus: use `lotus log list` to get all log systems available and set level by `lotus log set-level`. An example can be found [here](https://docs.filecoin.io/get-started/lotus/configuration-and-advanced-usage/#log-level-control). + * lotus-miner:`lotus-miner log list` to get all log systems available and set level by `lotus-miner log set-level + If you don't provide detailed logs when you raise the issue it will almost certainly be the first request I make before furthur diagnosing the problem. + render: bash + validations: + required: true +- type: textarea + id: RepoSteps + attributes: + label: Repo Steps + description: "Steps to reproduce the behavior" + value: | + 1. Run '...' + 2. Do '...' + 3. See error '...' + ... + render: bash + validations: + required: false \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/mining-issues.md b/.github/ISSUE_TEMPLATE/mining-issues.md deleted file mode 100644 index 434e160d411..00000000000 --- a/.github/ISSUE_TEMPLATE/mining-issues.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -name: Mining Issues -about: Create a report for help with mining failures. -title: "[Mining Issue]" -labels: hint/needs-triaging, area/mining -assignees: '' - ---- - -> Note: For security-related bugs/issues, please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy). - -Please provide all the information requested here to help us troubleshoot "mining/WinningPoSt failed" issues. -If the information requested is missing, you may be asked you to provide it. - -**Describe the problem** -A brief description of the problem you encountered while mining new blocks. - -**Version** - -The output of `lotus --version`. - -**Setup** - -You miner and daemon setup, including what hardware do you use, your environment variable settings, how do you run your miner and worker, do you use GPU and etc. - -**Lotus daemon and miner logs** - -Please go through the logs of your daemon and miner, and include screenshots of any error/warning-like messages you find, highlighting the one has "winning post" in it. - -Alternatively please upload full log files and share a link here - -** Code modifications ** - -If you have modified parts of lotus, please describe which areas were modified, -and the scope of those modifications diff --git a/.github/ISSUE_TEMPLATE/proving-issues.md b/.github/ISSUE_TEMPLATE/proving-issues.md deleted file mode 100644 index 6187d546ee0..00000000000 --- a/.github/ISSUE_TEMPLATE/proving-issues.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -name: Proving Issues -about: Create a report for help with proving failures. -title: "[Proving Issue]" -labels: area/proving, hint/needs-triaging -assignees: '' - ---- - -> Note: For security-related bugs/issues, please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy). - -Please provide all the information requested here to help us troubleshoot "proving/window PoSt failed" issues. -If the information requested is missing, we will probably have to just ask you to provide it anyway, -before we can help debug. - -**Describe the problem** -A brief description of the problem you encountered while proving the storage. - -**Version** - -The output of `lotus --version`. - -**Setup** - -You miner and daemon setup, including what hardware do you use, your environment variable settings, how do you run your miner and worker, do you use GPU and etc. - -**Proving status** - -The output of `lotus-miner proving` info. - -**Lotus miner logs** - -Please go through the logs of your miner, and include screenshots of any error-like messages you find, highlighting the one has "window post" in it. - -Alternatively please upload full log files and share a link here - -**Lotus miner diagnostic info** - -Please collect the following diagnostic information, and share a link here - -* lotus-miner diagnostic info `lotus-miner info all > allinfo.txt` - -** Code modifications ** - -If you have modified parts of lotus, please describe which areas were modified, -and the scope of those modifications diff --git a/.github/ISSUE_TEMPLATE/sealing-issues.md b/.github/ISSUE_TEMPLATE/sealing-issues.md deleted file mode 100644 index 7511849d3db..00000000000 --- a/.github/ISSUE_TEMPLATE/sealing-issues.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -name: Sealing Issues -about: Create a report for help with sealing (commit) failures. -title: "[Sealing Issue]" -labels: hint/needs-triaging, area/sealing -assignees: '' - ---- - -> Note: For security-related bugs/issues, please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy). - -Please provide all the information requested here to help us troubleshoot "commit failed" issues. -If the information requested is missing, we will probably have to just ask you to provide it anyway, -before we can help debug. - -**Describe the problem** -A brief description of the problem you encountered while sealing a sector. - -**Version** - -The output of `lotus --version`. - -**Setup** - -You miner and daemon setup, including what hardware do you use, your environment variable settings, how do you run your miner and worker, do you use GPU and etc. - -**Commands** - -Commands you ran. - -**Sectors status** - -The output of `lotus-miner sectors status --log ` for the failed sector(s). - -**Lotus miner logs** - -Please go through the logs of your miner, and include screenshots of any error-like messages you find. - -Alternatively please upload full log files and share a link here - -**Lotus miner diagnostic info** - -Please collect the following diagnostic information, and share a link here - -* lotus-miner diagnostic info `lotus-miner info all > allinfo` - -** Code modifications ** - -If you have modified parts of lotus, please describe which areas were modified, -and the scope of those modifications diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 00000000000..16a9feebeac --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,33 @@ +name: Close and mark stale issue + +on: + schedule: + - cron: '0 12 * * *' + +jobs: + stale: + + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + + steps: + - uses: actions/stale@v3 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + stale-issue-message: 'Oops, seems like we needed more information for this issue, please comment with more details or this issue will be closed in 24 hours.' + close-issue-message: 'This issue was closed because it is missing author input.' + stale-pr-message: 'Thank you for submitting the PR and contributing to lotus! Lotus maintainers need more of your input before merging it, please address the suggested changes or reply to the comments or this PR will be closed in 48 hours. You are always more than welcome to reopen the PR later as well!' + close-pr-message: 'This PR was closed because it is missing author input. Please feel free to reopen the PR when you get to it! Thank you for your interest in contributing to lotus!' + stale-issue-label: 'kind/stale' + stale-pr-label: 'kind/stale' + any-of-labels: 'need/author-input ' + days-before-issue-stale: 3 + days-before-issue-close: 1 + days-before-pr-stale: 5 + days-before-pr-close: 2 + remove-stale-when-updated: true + enable-statistics: true + + diff --git a/CHANGELOG.md b/CHANGELOG.md index 2aca7c841f4..457b0e1c0e3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,181 @@ # Lotus changelog +# 1.11.1 / 2021-08-16 + +> Note: for discussion about this release, please comment [here](https://github.com/filecoin-project/lotus/discussions/6904) + +This is a **highly recommended** but optional Lotus v1.11.1 release that introduces many deal making and datastore improvements and new features along with other bug fixes. + +## Highlights +- ⭐️⭐️⭐️[**lotus-miner market subsystem**](https://docs.filecoin.io/mine/lotus/split-markets-miners/#frontmatter-title) is introduced in this release! It is **highly recommended** for storage providers to run markets processes on a separate machine! Doing so, only this machine needs to exposes public ports for deal making. This also means that the other miner operations can now be completely isolated by from the deal making processes and storage providers can stop and restarts the markets process without affecting an ongoing Winning/Window PoSt! + - More details on the concepts, architecture and how to split the market process can be found [here](https://docs.filecoin.io/mine/lotus/split-markets-miners/#concepts). + - Base on your system setup(running on separate machines, same machine and so on), please see the suggested practice by community members [here](https://github.com/filecoin-project/lotus/discussions/7047#discussion-3515335). + - Note: if you are running lotus-worker on a different machine, you will need to set `MARKETS_API_INFO` for certain CLI to work properly. This will be improved by #7072. + - Huge thanks to MinerX fellows for [helping testing the implementation, reporting the issues so they were fixed by now and providing feedbacks](https://github.com/filecoin-project/lotus/discussions/6861) to user docs in the past three weeks! +- Config for collateral from miner available balance ([filecoin-project/lotus#6629](https://github.com/filecoin-project/lotus/pull/6629)) + - Better control your sector collateral payment by setting `CollateralFromMinerBalance`, `AvailableBalanceBuffer` and `DisableCollateralFallback`. + - `CollateralFromMinerBalance`: whether to use available miner balance for sector collateral instead of sending it with each message, default is `false`. + - `AvailableBalanceBuffer`: minimum available balance to keep in the miner actor before sending it with messages, default is 0FIL. + - `DisableCollateralFallback`: whether to send collateral with messages even if there is no available balance in the miner actor, default is `false`. +- Config for deal publishing control addresses ([filecoin-project/lotus#6697](https://github.com/filecoin-project/lotus/pull/6697)) + - Set `DealPublishControl` to set the wallet used for sending `PublishStorageDeals` messages, instructions [here](https://docs.filecoin.io/mine/lotus/miner-addresses/#control-addresses). +- Config UX improvements ([filecoin-project/lotus#6848](https://github.com/filecoin-project/lotus/pull/6848)) + - You can now preview the the default and updated node config by running `lotus/lotus-miner config default/updated` + +## New Features + - ⭐️⭐️⭐️ Support standalone miner-market process ([filecoin-project/lotus#6356](https://github.com/filecoin-project/lotus/pull/6356)) + - **⭐️⭐️ Experimental** [Splitstore]((https://github.com/filecoin-project/lotus/blob/master/blockstore/splitstore/README.md)) (more details coming in v1.11.2! Stay tuned! Join the discussion [here](https://github.com/filecoin-project/lotus/discussions/5788) if you have questions!) : + - Improve splitstore warmup ([filecoin-project/lotus#6867](https://github.com/filecoin-project/lotus/pull/6867)) + - Moving GC for badger ([filecoin-project/lotus#6854](https://github.com/filecoin-project/lotus/pull/6854)) + - splitstore shed utils ([filecoin-project/lotus#6811](https://github.com/filecoin-project/lotus/pull/6811)) + - fix warmup by decoupling state from message receipt walk ([filecoin-project/lotus#6841](https://github.com/filecoin-project/lotus/pull/6841)) + - Splitstore: support on-disk marksets using badger ([filecoin-project/lotus#6833](https://github.com/filecoin-project/lotus/pull/6833)) + - cache loaded block messages ([filecoin-project/lotus#6760](https://github.com/filecoin-project/lotus/pull/6760)) + - Splitstore: add retention policy option for keeping messages in the hotstore ([filecoin-project/lotus#6775](https://github.com/filecoin-project/lotus/pull/6775)) + - Introduce the LOTUS_CHAIN_BADGERSTORE_DISABLE_FSYNC envvar ([filecoin-project/lotus#6817](https://github.com/filecoin-project/lotus/pull/6817)) + - Splitstore: add support for protecting out of chain references in the blockstore ([filecoin-project/lotus#6777](https://github.com/filecoin-project/lotus/pull/6777)) + - Implement exposed splitstore ([filecoin-project/lotus#6762](https://github.com/filecoin-project/lotus/pull/6762)) + - Splitstore code reorg ([filecoin-project/lotus#6756](https://github.com/filecoin-project/lotus/pull/6756)) + - Splitstore: Some small fixes ([filecoin-project/lotus#6754](https://github.com/filecoin-project/lotus/pull/6754)) + - Splitstore Enhanchements ([filecoin-project/lotus#6474](https://github.com/filecoin-project/lotus/pull/6474)) + - lotus-shed: initial export cmd for markets related metadata ([filecoin-project/lotus#6840](https://github.com/filecoin-project/lotus/pull/6840)) + - add a very verbose -vv flag to lotus and lotus-miner. ([filecoin-project/lotus#6888](https://github.com/filecoin-project/lotus/pull/6888)) + - Add allocated sectorid vis ([filecoin-project/lotus#4638](https://github.com/filecoin-project/lotus/pull/4638)) + - add a command for compacting sector numbers bitfield ([filecoin-project/lotus#4640](https://github.com/filecoin-project/lotus/pull/4640)) + - Run `lotus-miner actor compact-allocated` to compact sector number allocations to reduce the size of the allocated sector number bitfield. + - Add ChainGetMessagesInTipset API ([filecoin-project/lotus#6642](https://github.com/filecoin-project/lotus/pull/6642)) + - Handle the --color flag via proper global state ([filecoin-project/lotus#6743](https://github.com/filecoin-project/lotus/pull/6743)) + - Enable color by default only if os.Stdout is a TTY ([filecoin-project/lotus#6696](https://github.com/filecoin-project/lotus/pull/6696)) + - Stop outputing ANSI color on non-TTY ([filecoin-project/lotus#6694](https://github.com/filecoin-project/lotus/pull/6694)) + - Envvar to disable slash filter ([filecoin-project/lotus#6620](https://github.com/filecoin-project/lotus/pull/6620)) + - commit batch: AggregateAboveBaseFee config ([filecoin-project/lotus#6650](https://github.com/filecoin-project/lotus/pull/6650)) + - shed tool to estimate aggregate network fees ([filecoin-project/lotus#6631](https://github.com/filecoin-project/lotus/pull/6631)) + +## Bug Fixes + - Fix padding of deals, which only partially shipped in #5988 ([filecoin-project/lotus#6683](https://github.com/filecoin-project/lotus/pull/6683)) + - fix deal concurrency test failures by upgrading graphsync and others ([filecoin-project/lotus#6724](https://github.com/filecoin-project/lotus/pull/6724)) + - fix: on randomness change, use new rand ([filecoin-project/lotus#6805](https://github.com/filecoin-project/lotus/pull/6805)) - fix: always check if StateSearchMessage returns nil ([filecoin-project/lotus#6802](https://github.com/filecoin-project/lotus/pull/6802)) + - test: fix flaky window post tests ([filecoin-project/lotus#6804](https://github.com/filecoin-project/lotus/pull/6804)) + - wrap close(wait) with sync.Once to avoid panic ([filecoin-project/lotus#6800](https://github.com/filecoin-project/lotus/pull/6800)) + - fixes #6786 segfault ([filecoin-project/lotus#6787](https://github.com/filecoin-project/lotus/pull/6787)) + - ClientRetrieve stops on cancel([filecoin-project/lotus#6739](https://github.com/filecoin-project/lotus/pull/6739)) + - Fix bugs in sectors extend --v1-sectors ([filecoin-project/lotus#6066](https://github.com/filecoin-project/lotus/pull/6066)) + - fix "lotus-seed genesis car" error "merkledag: not found" ([filecoin-project/lotus#6688](https://github.com/filecoin-project/lotus/pull/6688)) + - Get retrieval pricing input should not error out on a deal state fetch ([filecoin-project/lotus#6679](https://github.com/filecoin-project/lotus/pull/6679)) + - Fix more CID double-encoding as hex ([filecoin-project/lotus#6680](https://github.com/filecoin-project/lotus/pull/6680)) + - storage: Fix FinalizeSector with sectors in stoage paths ([filecoin-project/lotus#6653](https://github.com/filecoin-project/lotus/pull/6653)) + - Fix tiny error in check-client-datacap ([filecoin-project/lotus#6664](https://github.com/filecoin-project/lotus/pull/6664)) + - Fix: precommit_batch method used the wrong cfg.CommitBatchWait ([filecoin-project/lotus#6658](https://github.com/filecoin-project/lotus/pull/6658)) + - fix ticket expiration check ([filecoin-project/lotus#6635](https://github.com/filecoin-project/lotus/pull/6635)) + - remove precommit check in handleCommitFailed ([filecoin-project/lotus#6634](https://github.com/filecoin-project/lotus/pull/6634)) + - fix prove commit aggregate send token amount ([filecoin-project/lotus#6625](https://github.com/filecoin-project/lotus/pull/6625)) + +## Improvements + - Eliminate inefficiency in markets logging ([filecoin-project/lotus#6895](https://github.com/filecoin-project/lotus/pull/6895)) + - rename `cmd/lotus{-storage=>}-miner` to match binary. ([filecoin-project/lotus#6886](https://github.com/filecoin-project/lotus/pull/6886)) + - fix racy TestSimultanenousTransferLimit. ([filecoin-project/lotus#6862](https://github.com/filecoin-project/lotus/pull/6862)) + - ValidateBlock: Assert that block header height's are greater than parents ([filecoin-project/lotus#6872](https://github.com/filecoin-project/lotus/pull/6872)) + - feat: Don't panic when api impl is nil ([filecoin-project/lotus#6857](https://github.com/filecoin-project/lotus/pull/6857)) + - add docker-compose file ([filecoin-project/lotus#6544](https://github.com/filecoin-project/lotus/pull/6544)) + - easy way to make install app ([filecoin-project/lotus#5183](https://github.com/filecoin-project/lotus/pull/5183)) + - api: Separate the Net interface from Common ([filecoin-project/lotus#6627](https://github.com/filecoin-project/lotus/pull/6627)) - add StateReadState to gateway api ([filecoin-project/lotus#6818](https://github.com/filecoin-project/lotus/pull/6818)) + - add SealProof in SectorBuilder ([filecoin-project/lotus#6815](https://github.com/filecoin-project/lotus/pull/6815)) + - sealing: Handle preCommitParams errors more correctly ([filecoin-project/lotus#6763](https://github.com/filecoin-project/lotus/pull/6763)) + - ClientFindData: always fetch peer id from chain ([filecoin-project/lotus#6807](https://github.com/filecoin-project/lotus/pull/6807)) + - test: handle null blocks in TestForkRefuseCall ([filecoin-project/lotus#6758](https://github.com/filecoin-project/lotus/pull/6758)) + - Add more deal details to lotus-miner info ([filecoin-project/lotus#6708](https://github.com/filecoin-project/lotus/pull/6708)) + - add election backtest ([filecoin-project/lotus#5950](https://github.com/filecoin-project/lotus/pull/5950)) + - add dollar sign ([filecoin-project/lotus#6690](https://github.com/filecoin-project/lotus/pull/6690)) + - get-actor cli spelling fix ([filecoin-project/lotus#6681](https://github.com/filecoin-project/lotus/pull/6681)) + - polish(statetree): accept a context in statetree diff for timeouts ([filecoin-project/lotus#6639](https://github.com/filecoin-project/lotus/pull/6639)) + - Add helptext to lotus chain export ([filecoin-project/lotus#6672](https://github.com/filecoin-project/lotus/pull/6672)) + - add an incremental nonce itest. ([filecoin-project/lotus#6663](https://github.com/filecoin-project/lotus/pull/6663)) + - commit batch: Initialize the FailedSectors map ([filecoin-project/lotus#6647](https://github.com/filecoin-project/lotus/pull/6647)) + - Fast-path retry submitting commit aggregate if commit is still valid ([filecoin-project/lotus#6638](https://github.com/filecoin-project/lotus/pull/6638)) + - Reuse timers in sealing batch logic ([filecoin-project/lotus#6636](https://github.com/filecoin-project/lotus/pull/6636)) + +## Dependency Updates + - Update to proof v8.0.3 ([filecoin-project/lotus#6890](https://github.com/filecoin-project/lotus/pull/6890)) + - update to go-fil-market v1.6.0 ([filecoin-project/lotus#6885](https://github.com/filecoin-project/lotus/pull/6885)) + - Bump go-multihash, adjust test for supported version ([filecoin-project/lotus#6674](https://github.com/filecoin-project/lotus/pull/6674)) + - github.com/filecoin-project/go-data-transfer (v1.6.0 -> v1.7.2): + - github.com/filecoin-project/go-fil-markets (v1.5.0 -> v1.6.2): + - github.com/filecoin-project/go-padreader (v0.0.0-20200903213702-ed5fae088b20 -> v0.0.0-20210723183308-812a16dc01b1) + - github.com/filecoin-project/go-state-types (v0.1.1-0.20210506134452-99b279731c48 -> v0.1.1-0.20210810190654-139e0e79e69e) + - github.com/filecoin-project/go-statemachine (v0.0.0-20200925024713-05bd7c71fbfe -> v1.0.1) + - update go-libp2p-pubsub to v0.5.0 ([filecoin-project/lotus#6764](https://github.com/filecoin-project/lotus/pull/6764)) + +## Others + - Master->v1.11.1 ([filecoin-project/lotus#7051](https://github.com/filecoin-project/lotus/pull/7051)) + - v1.11.1-rc2 ([filecoin-project/lotus#6966](https://github.com/filecoin-project/lotus/pull/6966)) + - Backport master -> v1.11.1 ([filecoin-project/lotus#6965](https://github.com/filecoin-project/lotus/pull/6965)) + - Fixes in master -> release ([filecoin-project/lotus#6933](https://github.com/filecoin-project/lotus/pull/6933)) + - Add changelog for v1.11.1-rc1 and bump the version ([filecoin-project/lotus#6900](https://github.com/filecoin-project/lotus/pull/6900)) + - Fix merge release -> v1.11.1 ([filecoin-project/lotus#6897](https://github.com/filecoin-project/lotus/pull/6897)) + - Update RELEASE_ISSUE_TEMPLATE.md ([filecoin-project/lotus#6880](https://github.com/filecoin-project/lotus/pull/6880)) + - Add github actions for staled pr ([filecoin-project/lotus#6879](https://github.com/filecoin-project/lotus/pull/6879)) + - Update issue templates and add templates for M1 ([filecoin-project/lotus#6856](https://github.com/filecoin-project/lotus/pull/6856)) + - Fix links in issue templates + - Update issue templates to forms ([filecoin-project/lotus#6798](https://github.com/filecoin-project/lotus/pull/6798) + - Nerpa v13 upgrade ([filecoin-project/lotus#6837](https://github.com/filecoin-project/lotus/pull/6837)) + - add docker-compose file ([filecoin-project/lotus#6544](https://github.com/filecoin-project/lotus/pull/6544)) + - release -> master ([filecoin-project/lotus#6828](https://github.com/filecoin-project/lotus/pull/6828)) + - Resurrect CODEOWNERS, but for maintainers group ([filecoin-project/lotus#6773](https://github.com/filecoin-project/lotus/pull/6773)) + - Master disclaimer ([filecoin-project/lotus#6757](https://github.com/filecoin-project/lotus/pull/6757)) + - Create stale.yml ([filecoin-project/lotus#6747](https://github.com/filecoin-project/lotus/pull/6747)) + - Release template: Update all testnet infra at once ([filecoin-project/lotus#6710](https://github.com/filecoin-project/lotus/pull/6710)) + - Release Template: remove binary validation step ([filecoin-project/lotus#6709](https://github.com/filecoin-project/lotus/pull/6709)) + - Reset of the interop network ([filecoin-project/lotus#6689](https://github.com/filecoin-project/lotus/pull/6689)) + - Update version.go to 1.11.1 ([filecoin-project/lotus#6621](https://github.com/filecoin-project/lotus/pull/6621)) + +## Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| @vyzo | 313 | +8928/-6010 | 415 | +| @nonsense | 103 | +6041/-4041 | 304 | +| @magik6k | 37 | +3851/-1611 | 146 | +| @ZenGround0 | 24 | +1693/-1394 | 95 | +| @placer14 | 1 | +2310/-578 | 8 | +| @dirkmc | 7 | +1154/-726 | 29 | +| @raulk | 44 | +969/-616 | 141 | +| @jennijuju | 15 | +682/-354 | 47 | +| @ribasushi | 18 | +469/-273 | 64 | +| @coryschwartz | 5 | +576/-135 | 14 | +| @hunjixin | 7 | +404/-82 | 19 | +| @dirkmc | 17 | +348/-47 | 17 | +| @tchardin | 2 | +262/-34 | 5 | +| @aarshkshah1992 | 9 | +233/-63 | 44 | +| @Kubuxu | 4 | +254/-16 | 4 | +| @hannahhoward | 6 | +163/-75 | 8 | +| @whyrusleeping | 4 | +157/-16 | 9 | +| @Whyrusleeping | 2 | +87/-66 | 10 | +| @arajasek | 10 | +81/-53 | 13 | +| @zgfzgf | 2 | +104/-4 | 2 | +| @aarshkshah1992 | 6 | +85/-19 | 10 | +| @llifezou | 4 | +59/-20 | 4 | +| @Stebalien | 7 | +47/-17 | 9 | +| @johnli-helloworld | 3 | +46/-15 | 5 | +| @frrist | 1 | +28/-23 | 2 | +| @hannahhoward | 4 | +46/-5 | 11 | +| @Jennifer | 4 | +31/-2 | 4 | +| @wangchao | 1 | +1/-27 | 1 | +| @jennijuju | 2 | +7/-21 | 2 | +| @chadwick2143 | 1 | +15/-1 | 1 | +| @Jerry | 2 | +9/-4 | 2 | +| Steve Loeppky | 2 | +12/-0 | 2 | +| David Dias | 1 | +9/-0 | 1 | +| dependabot[bot] | 1 | +3/-3 | 1 | +| zhoutian527 | 1 | +2/-2 | 1 | +| xloem | 1 | +4/-0 | 1 | +| @travisperson| 2 | +2/-2 | 3 | +| Liviu Damian | 2 | +2/-2 | 2 | +| @jimpick | 2 | +2/-2 | 2 | +| Frank | 1 | +3/-0 | 1 | +| turuslan | 1 | +1/-1 | 1 | +| Kirk Baird | 1 | +0/-0 | 1 | + + # 1.11.0 / 2021-07-22 This is a **highly recommended** release of Lotus that have many bug fixes, improvements and new features. @@ -221,8 +397,6 @@ Contributors | @zhoutian527 | 1 | +2/-2 | 1 | | @ribasushi| 1 | +1/-1 | 1 | -||||||| 764fa9dae -======= # 1.10.1 / 2021-07-05 This is an optional but **highly recommended** release of Lotus for lotus miners that has many bug fixes and improvements based on the feedback we got from the community since HyperDrive. @@ -256,6 +430,11 @@ Contributors | @ribasushi| 1 | +1/-1 | 1 | +<<<<<<< HEAD +||||||| merged common ancestors +>>>>>>>>> Temporary merge branch 2 +======= +>>>>>>> releases >>>>>>> releases # 1.10.0 / 2021-06-23 diff --git a/Dockerfile.lotus b/Dockerfile.lotus index 0b43ef8063e..72c60930592 100644 --- a/Dockerfile.lotus +++ b/Dockerfile.lotus @@ -36,7 +36,7 @@ WORKDIR /opt/filecoin ARG RUSTFLAGS="" ARG GOFLAGS="" -RUN make deps lotus lotus-miner lotus-worker lotus-shed lotus-chainwatch lotus-stats +RUN make lotus lotus-miner lotus-worker lotus-shed lotus-wallet lotus-gateway FROM ubuntu:20.04 AS base @@ -56,19 +56,173 @@ COPY --from=builder /usr/lib/x86_64-linux-gnu/libOpenCL.so.1 /lib/ RUN useradd -r -u 532 -U fc +### FROM base AS lotus MAINTAINER Lotus Development Team -COPY --from=builder /opt/filecoin/lotus /usr/local/bin/ +COPY --from=builder /opt/filecoin/lotus /usr/local/bin/ COPY --from=builder /opt/filecoin/lotus-shed /usr/local/bin/ +COPY scripts/docker-lotus-entrypoint.sh / ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters ENV LOTUS_PATH /var/lib/lotus +ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1 +ENV LOTUS_JAEGER_AGENT_PORT 6831 +ENV DOCKER_LOTUS_IMPORT_SNAPSHOT https://fil-chain-snapshots-fallback.s3.amazonaws.com/mainnet/minimal_finality_stateroots_latest.car +ENV DOCKER_LOTUS_IMPORT_WALLET "" + +RUN mkdir /var/lib/lotus /var/tmp/filecoin-proof-parameters +RUN chown fc: /var/lib/lotus /var/tmp/filecoin-proof-parameters + +VOLUME /var/lib/lotus +VOLUME /var/tmp/filecoin-proof-parameters + +USER fc + +EXPOSE 1234 + +ENTRYPOINT ["/docker-lotus-entrypoint.sh"] + +CMD ["-help"] + +### +FROM base AS lotus-wallet +MAINTAINER Lotus Development Team + +COPY --from=builder /opt/filecoin/lotus-wallet /usr/local/bin/ -RUN mkdir /var/lib/lotus /var/tmp/filecoin-proof-parameters && chown fc /var/lib/lotus /var/tmp/filecoin-proof-parameters +ENV WALLET_PATH /var/lib/lotus-wallet +ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1 +ENV LOTUS_JAEGER_AGENT_PORT 6831 + +RUN mkdir /var/lib/lotus-wallet +RUN chown fc: /var/lib/lotus-wallet + +VOLUME /var/lib/lotus-wallet USER fc -ENTRYPOINT ["/usr/local/bin/lotus"] +EXPOSE 1777 + +ENTRYPOINT ["/usr/local/bin/lotus-wallet"] CMD ["-help"] + +### +FROM base AS lotus-gateway +MAINTAINER Lotus Development Team + +COPY --from=builder /opt/filecoin/lotus-gateway /usr/local/bin/ + +ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1 +ENV LOTUS_JAEGER_AGENT_PORT 6831 +ENV FULLNODE_API_INFO /ip4/127.0.0.1/tcp/1234/http + +USER fc + +EXPOSE 1234 + +ENTRYPOINT ["/usr/local/bin/lotus-gateway"] + +CMD ["-help"] + + +### +FROM base AS lotus-miner +MAINTAINER Lotus Development Team + +COPY --from=builder /opt/filecoin/lotus-miner /usr/local/bin/ +COPY scripts/docker-lotus-miner-entrypoint.sh / + +ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters +ENV FULLNODE_API_INFO /ip4/127.0.0.1/tcp/1234/http +ENV LOTUS_MINER_PATH /var/lib/lotus-miner +ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1 +ENV LOTUS_JAEGER_AGENT_PORT 6831 +ENV DOCKER_LOTUS_MINER_INIT true + +RUN mkdir /var/lib/lotus-miner /var/tmp/filecoin-proof-parameters +RUN chown fc: /var/lib/lotus-miner /var/tmp/filecoin-proof-parameters + +VOLUME /var/lib/lotus-miner +VOLUME /var/tmp/filecoin-proof-parameters + +USER fc + +EXPOSE 2345 + +ENTRYPOINT ["/docker-lotus-miner-entrypoint.sh"] + +CMD ["-help"] + + +### +FROM base AS lotus-worker +MAINTAINER Lotus Development Team + +COPY --from=builder /opt/filecoin/lotus-worker /usr/local/bin/ + +ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters +ENV MINER_API_INFO /ip4/127.0.0.1/tcp/2345/http +ENV LOTUS_WORKER_PATH /var/lib/lotus-worker +ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1 +ENV LOTUS_JAEGER_AGENT_PORT 6831 + +RUN mkdir /var/lib/lotus-worker +RUN chown fc: /var/lib/lotus-worker + +VOLUME /var/lib/lotus-worker + +USER fc + +EXPOSE 3456 + +ENTRYPOINT ["/usr/local/bin/lotus-worker"] + +CMD ["-help"] + + +### +from base as lotus-all-in-one + +ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters +ENV FULLNODE_API_INFO /ip4/127.0.0.1/tcp/1234/http +ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1 +ENV LOTUS_JAEGER_AGENT_PORT 6831 +ENV LOTUS_MINER_PATH /var/lib/lotus-miner +ENV LOTUS_PATH /var/lib/lotus +ENV LOTUS_WORKER_PATH /var/lib/lotus-worker +ENV MINER_API_INFO /ip4/127.0.0.1/tcp/2345/http +ENV WALLET_PATH /var/lib/lotus-wallet +ENV DOCKER_LOTUS_IMPORT_SNAPSHOT https://fil-chain-snapshots-fallback.s3.amazonaws.com/mainnet/minimal_finality_stateroots_latest.car +ENV DOCKER_LOTUS_MINER_INIT true + +COPY --from=builder /opt/filecoin/lotus /usr/local/bin/ +COPY --from=builder /opt/filecoin/lotus-shed /usr/local/bin/ +COPY --from=builder /opt/filecoin/lotus-wallet /usr/local/bin/ +COPY --from=builder /opt/filecoin/lotus-gateway /usr/local/bin/ +COPY --from=builder /opt/filecoin/lotus-miner /usr/local/bin/ +COPY --from=builder /opt/filecoin/lotus-worker /usr/local/bin/ + +RUN mkdir /var/tmp/filecoin-proof-parameters +RUN mkdir /var/lib/lotus +RUN mkdir /var/lib/lotus-miner +RUN mkdir /var/lib/lotus-worker +RUN mkdir /var/lib/lotus-wallet +RUN chown fc: /var/tmp/filecoin-proof-parameters +RUN chown fc: /var/lib/lotus +RUN chown fc: /var/lib/lotus-miner +RUN chown fc: /var/lib/lotus-worker +RUN chown fc: /var/lib/lotus-wallet + + +VOLUME /var/tmp/filecoin-proof-parameters +VOLUME /var/lib/lotus +VOLUME /var/lib/lotus-miner +VOLUME /var/lib/lotus-worker +VOLUME /var/lib/lotus-wallet + +EXPOSE 1234 +EXPOSE 2345 +EXPOSE 3456 +EXPOSE 1777 diff --git a/Makefile b/Makefile index 4e03d1b6a1c..77fd38b9ee7 100644 --- a/Makefile +++ b/Makefile @@ -92,7 +92,7 @@ BINS+=lotus lotus-miner: $(BUILD_DEPS) rm -f lotus-miner - go build $(GOFLAGS) -o lotus-miner ./cmd/lotus-storage-miner + go build $(GOFLAGS) -o lotus-miner ./cmd/lotus-miner .PHONY: lotus-miner BINS+=lotus-miner @@ -131,6 +131,9 @@ install-miner: install-worker: install -C ./lotus-worker /usr/local/bin/lotus-worker +install-app: + install -C ./$(APP) /usr/local/bin/$(APP) + # TOOLS lotus-seed: $(BUILD_DEPS) @@ -333,6 +336,9 @@ api-gen: goimports -w api .PHONY: api-gen +cfgdoc-gen: + go run ./node/config/cfgdocgen > ./node/config/doc_gen.go + appimage: lotus rm -rf appimage-builder-cache || true rm AppDir/io.filecoin.lotus.desktop || true @@ -370,7 +376,7 @@ docsgen-openrpc-worker: docsgen-openrpc-bin .PHONY: docsgen docsgen-md-bin docsgen-openrpc-bin -gen: actors-gen type-gen method-gen docsgen api-gen circleci +gen: actors-gen type-gen method-gen cfgdoc-gen docsgen api-gen circleci @echo ">>> IF YOU'VE MODIFIED THE CLI, REMEMBER TO ALSO MAKE docsgen-cli" .PHONY: gen diff --git a/README.md b/README.md index 0218e87e952..a44c690066c 100644 --- a/README.md +++ b/README.md @@ -18,6 +18,8 @@ Lotus is an implementation of the Filecoin Distributed Storage Network. For more ## Building & Documentation +> Note: The default `master` branch is the dev branch, please use with caution. For the latest stable version, checkout the most recent [`Latest release`](https://github.com/filecoin-project/lotus/releases). + For complete instructions on how to build, install and setup lotus, please visit [https://docs.filecoin.io/get-started/lotus](https://docs.filecoin.io/get-started/lotus/). Basic build instructions can be found further down in this readme. ## Reporting a Vulnerability diff --git a/api/api_common.go b/api/api_common.go index 2f27eb95f57..629299db3b6 100644 --- a/api/api_common.go +++ b/api/api_common.go @@ -4,15 +4,11 @@ import ( "context" "fmt" + apitypes "github.com/filecoin-project/lotus/api/types" + "github.com/google/uuid" "github.com/filecoin-project/go-jsonrpc/auth" - metrics "github.com/libp2p/go-libp2p-core/metrics" - "github.com/libp2p/go-libp2p-core/network" - "github.com/libp2p/go-libp2p-core/peer" - protocol "github.com/libp2p/go-libp2p-core/protocol" - - apitypes "github.com/filecoin-project/lotus/api/types" ) // MODIFYING THE API INTERFACE @@ -27,55 +23,23 @@ import ( // * Generate openrpc blobs type Common interface { - // MethodGroup: Auth AuthVerify(ctx context.Context, token string) ([]auth.Permission, error) //perm:read AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error) //perm:admin - // MethodGroup: Net - - NetConnectedness(context.Context, peer.ID) (network.Connectedness, error) //perm:read - NetPeers(context.Context) ([]peer.AddrInfo, error) //perm:read - NetConnect(context.Context, peer.AddrInfo) error //perm:write - NetAddrsListen(context.Context) (peer.AddrInfo, error) //perm:read - NetDisconnect(context.Context, peer.ID) error //perm:write - NetFindPeer(context.Context, peer.ID) (peer.AddrInfo, error) //perm:read - NetPubsubScores(context.Context) ([]PubsubScore, error) //perm:read - NetAutoNatStatus(context.Context) (NatInfo, error) //perm:read - NetAgentVersion(ctx context.Context, p peer.ID) (string, error) //perm:read - NetPeerInfo(context.Context, peer.ID) (*ExtendedPeerInfo, error) //perm:read + // MethodGroup: Log - // NetBandwidthStats returns statistics about the nodes total bandwidth - // usage and current rate across all peers and protocols. - NetBandwidthStats(ctx context.Context) (metrics.Stats, error) //perm:read - - // NetBandwidthStatsByPeer returns statistics about the nodes bandwidth - // usage and current rate per peer - NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error) //perm:read - - // NetBandwidthStatsByProtocol returns statistics about the nodes bandwidth - // usage and current rate per protocol - NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) //perm:read - - // ConnectionGater API - NetBlockAdd(ctx context.Context, acl NetBlockList) error //perm:admin - NetBlockRemove(ctx context.Context, acl NetBlockList) error //perm:admin - NetBlockList(ctx context.Context) (NetBlockList, error) //perm:read + LogList(context.Context) ([]string, error) //perm:write + LogSetLevel(context.Context, string, string) error //perm:write // MethodGroup: Common - // Discover returns an OpenRPC document describing an RPC API. - Discover(ctx context.Context) (apitypes.OpenRPCDocument, error) //perm:read - - // ID returns peerID of libp2p node backing this API - ID(context.Context) (peer.ID, error) //perm:read - // Version provides information about API provider Version(context.Context) (APIVersion, error) //perm:read - LogList(context.Context) ([]string, error) //perm:write - LogSetLevel(context.Context, string, string) error //perm:write + // Discover returns an OpenRPC document describing an RPC API. + Discover(ctx context.Context) (apitypes.OpenRPCDocument, error) //perm:read // trigger graceful shutdown Shutdown(context.Context) error //perm:admin @@ -105,8 +69,3 @@ type APIVersion struct { func (v APIVersion) String() string { return fmt.Sprintf("%s+api%s", v.Version, v.APIVersion.String()) } - -type NatInfo struct { - Reachability network.Reachability - PublicAddr string -} diff --git a/api/api_full.go b/api/api_full.go index 3dc503f46e1..412e223cd42 100644 --- a/api/api_full.go +++ b/api/api_full.go @@ -58,6 +58,7 @@ const LookbackNoLimit = abi.ChainEpoch(-1) // FullNode API is a low-level interface to the Filecoin network full node type FullNode interface { Common + Net // MethodGroup: Chain // The Chain method group contains methods for interacting with the @@ -104,6 +105,9 @@ type FullNode interface { // specified block. ChainGetParentMessages(ctx context.Context, blockCid cid.Cid) ([]Message, error) //perm:read + // ChainGetMessagesInTipset returns message stores in current tipset + ChainGetMessagesInTipset(ctx context.Context, tsk types.TipSetKey) ([]Message, error) //perm:read + // ChainGetTipSetByHeight looks back for a tipset at the specified epoch. // If there are no blocks at the specified epoch, a tipset at an earlier epoch // will be returned. @@ -160,6 +164,13 @@ type FullNode interface { // If oldmsgskip is set, messages from before the requested roots are also not included. ChainExport(ctx context.Context, nroots abi.ChainEpoch, oldmsgskip bool, tsk types.TipSetKey) (<-chan []byte, error) //perm:read + // ChainCheckBlockstore performs an (asynchronous) health check on the chain/state blockstore + // if supported by the underlying implementation. + ChainCheckBlockstore(context.Context) error //perm:admin + + // ChainBlockstoreInfo returns some basic information about the blockstore + ChainBlockstoreInfo(context.Context) (map[string]interface{}, error) //perm:read + // MethodGroup: Beacon // The Beacon method group contains methods for interacting with the random beacon (DRAND) diff --git a/api/api_gateway.go b/api/api_gateway.go index 0ee66ac179d..6db1c8e45a1 100644 --- a/api/api_gateway.go +++ b/api/api_gateway.go @@ -45,6 +45,7 @@ type Gateway interface { StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (DealCollateralBounds, error) StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error) + StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*ActorState, error) //perm:read StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (MarketBalance, error) diff --git a/api/api_net.go b/api/api_net.go new file mode 100644 index 00000000000..4cf9ca336a3 --- /dev/null +++ b/api/api_net.go @@ -0,0 +1,66 @@ +package api + +import ( + "context" + + metrics "github.com/libp2p/go-libp2p-core/metrics" + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p-core/protocol" +) + +// MODIFYING THE API INTERFACE +// +// When adding / changing methods in this file: +// * Do the change here +// * Adjust implementation in `node/impl/` +// * Run `make gen` - this will: +// * Generate proxy structs +// * Generate mocks +// * Generate markdown docs +// * Generate openrpc blobs + +type Net interface { + // MethodGroup: Net + + NetConnectedness(context.Context, peer.ID) (network.Connectedness, error) //perm:read + NetPeers(context.Context) ([]peer.AddrInfo, error) //perm:read + NetConnect(context.Context, peer.AddrInfo) error //perm:write + NetAddrsListen(context.Context) (peer.AddrInfo, error) //perm:read + NetDisconnect(context.Context, peer.ID) error //perm:write + NetFindPeer(context.Context, peer.ID) (peer.AddrInfo, error) //perm:read + NetPubsubScores(context.Context) ([]PubsubScore, error) //perm:read + NetAutoNatStatus(context.Context) (NatInfo, error) //perm:read + NetAgentVersion(ctx context.Context, p peer.ID) (string, error) //perm:read + NetPeerInfo(context.Context, peer.ID) (*ExtendedPeerInfo, error) //perm:read + + // NetBandwidthStats returns statistics about the nodes total bandwidth + // usage and current rate across all peers and protocols. + NetBandwidthStats(ctx context.Context) (metrics.Stats, error) //perm:read + + // NetBandwidthStatsByPeer returns statistics about the nodes bandwidth + // usage and current rate per peer + NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error) //perm:read + + // NetBandwidthStatsByProtocol returns statistics about the nodes bandwidth + // usage and current rate per protocol + NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) //perm:read + + // ConnectionGater API + NetBlockAdd(ctx context.Context, acl NetBlockList) error //perm:admin + NetBlockRemove(ctx context.Context, acl NetBlockList) error //perm:admin + NetBlockList(ctx context.Context) (NetBlockList, error) //perm:read + + // ID returns peerID of libp2p node backing this API + ID(context.Context) (peer.ID, error) //perm:read +} + +type CommonNet interface { + Common + Net +} + +type NatInfo struct { + Reachability network.Reachability + PublicAddr string +} diff --git a/api/api_storage.go b/api/api_storage.go index 0ccfbd88f6f..c391149290b 100644 --- a/api/api_storage.go +++ b/api/api_storage.go @@ -41,6 +41,7 @@ import ( // StorageMiner is a low-level interface to the Filecoin network storage miner node type StorageMiner interface { Common + Net ActorAddress(context.Context) (address.Address, error) //perm:read @@ -55,6 +56,13 @@ type StorageMiner interface { // Get the status of a given sector by ID SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (SectorInfo, error) //perm:read + // Add piece to an open sector. If no sectors with enough space are open, + // either a new sector will be created, or this call will block until more + // sectors can be created. + SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storage.Data, d PieceDealInfo) (SectorOffset, error) //perm:admin + + SectorsUnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd *cid.Cid) error //perm:admin + // List all staged sectors SectorsList(context.Context) ([]abi.SectorNumber, error) //perm:read @@ -135,8 +143,8 @@ type StorageMiner interface { StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType) ([]stores.StorageInfo, error) //perm:admin StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error //perm:admin StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) //perm:admin + StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) //perm:admin - StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) //perm:admin StorageLocal(ctx context.Context) (map[stores.ID]string, error) //perm:admin StorageStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error) //perm:admin @@ -158,6 +166,10 @@ type StorageMiner interface { MarketPendingDeals(ctx context.Context) (PendingDealInfo, error) //perm:write MarketPublishPendingDeals(ctx context.Context) error //perm:admin + // RuntimeSubsystems returns the subsystems that are enabled + // in this instance. + RuntimeSubsystems(ctx context.Context) (MinerSubsystems, error) //perm:read + DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error //perm:admin DealsList(ctx context.Context) ([]MarketDeal, error) //perm:admin DealsConsiderOnlineStorageDeals(context.Context) (bool, error) //perm:admin @@ -279,15 +291,17 @@ type AddrUse int const ( PreCommitAddr AddrUse = iota CommitAddr + DealPublishAddr PoStAddr TerminateSectorsAddr ) type AddressConfig struct { - PreCommitControl []address.Address - CommitControl []address.Address - TerminateControl []address.Address + PreCommitControl []address.Address + CommitControl []address.Address + TerminateControl []address.Address + DealPublishControl []address.Address DisableOwnerFallback bool DisableWorkerFallback bool @@ -300,3 +314,25 @@ type PendingDealInfo struct { PublishPeriodStart time.Time PublishPeriod time.Duration } + +type SectorOffset struct { + Sector abi.SectorNumber + Offset abi.PaddedPieceSize +} + +// DealInfo is a tuple of deal identity and its schedule +type PieceDealInfo struct { + PublishCid *cid.Cid + DealID abi.DealID + DealProposal *market.DealProposal + DealSchedule DealSchedule + KeepUnsealed bool +} + +// DealSchedule communicates the time interval of a storage deal. The deal must +// appear in a sealed (proven) sector no later than StartEpoch, otherwise it +// is invalid. +type DealSchedule struct { + StartEpoch abi.ChainEpoch + EndEpoch abi.ChainEpoch +} diff --git a/api/cbor_gen.go b/api/cbor_gen.go index 808e516ad62..4434b45ede9 100644 --- a/api/cbor_gen.go +++ b/api/cbor_gen.go @@ -8,6 +8,7 @@ import ( "sort" abi "github.com/filecoin-project/go-state-types/abi" + market "github.com/filecoin-project/specs-actors/actors/builtin/market" paych "github.com/filecoin-project/specs-actors/actors/builtin/paych" cid "github.com/ipfs/go-cid" cbg "github.com/whyrusleeping/cbor-gen" @@ -738,3 +739,381 @@ func (t *SealSeed) UnmarshalCBOR(r io.Reader) error { return nil } +func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{165}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.PublishCid (cid.Cid) (struct) + if len("PublishCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PublishCid\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PublishCid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PublishCid")); err != nil { + return err + } + + if t.PublishCid == nil { + if _, err := w.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCidBuf(scratch, w, *t.PublishCid); err != nil { + return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err) + } + } + + // t.DealID (abi.DealID) (uint64) + if len("DealID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("DealID")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { + return err + } + + // t.DealProposal (market.DealProposal) (struct) + if len("DealProposal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealProposal\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealProposal"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("DealProposal")); err != nil { + return err + } + + if err := t.DealProposal.MarshalCBOR(w); err != nil { + return err + } + + // t.DealSchedule (api.DealSchedule) (struct) + if len("DealSchedule") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealSchedule\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealSchedule"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("DealSchedule")); err != nil { + return err + } + + if err := t.DealSchedule.MarshalCBOR(w); err != nil { + return err + } + + // t.KeepUnsealed (bool) (bool) + if len("KeepUnsealed") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"KeepUnsealed\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("KeepUnsealed"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("KeepUnsealed")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.KeepUnsealed); err != nil { + return err + } + return nil +} + +func (t *PieceDealInfo) UnmarshalCBOR(r io.Reader) error { + *t = PieceDealInfo{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("PieceDealInfo: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.PublishCid (cid.Cid) (struct) + case "PublishCid": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err) + } + + t.PublishCid = &c + } + + } + // t.DealID (abi.DealID) (uint64) + case "DealID": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.DealID = abi.DealID(extra) + + } + // t.DealProposal (market.DealProposal) (struct) + case "DealProposal": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.DealProposal = new(market.DealProposal) + if err := t.DealProposal.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.DealProposal pointer: %w", err) + } + } + + } + // t.DealSchedule (api.DealSchedule) (struct) + case "DealSchedule": + + { + + if err := t.DealSchedule.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.DealSchedule: %w", err) + } + + } + // t.KeepUnsealed (bool) (bool) + case "KeepUnsealed": + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.KeepUnsealed = false + case 21: + t.KeepUnsealed = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *DealSchedule) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{162}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.StartEpoch (abi.ChainEpoch) (int64) + if len("StartEpoch") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"StartEpoch\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("StartEpoch"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("StartEpoch")); err != nil { + return err + } + + if t.StartEpoch >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.StartEpoch)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.StartEpoch-1)); err != nil { + return err + } + } + + // t.EndEpoch (abi.ChainEpoch) (int64) + if len("EndEpoch") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"EndEpoch\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("EndEpoch"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("EndEpoch")); err != nil { + return err + } + + if t.EndEpoch >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.EndEpoch)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.EndEpoch-1)); err != nil { + return err + } + } + return nil +} + +func (t *DealSchedule) UnmarshalCBOR(r io.Reader) error { + *t = DealSchedule{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("DealSchedule: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.StartEpoch (abi.ChainEpoch) (int64) + case "StartEpoch": + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.StartEpoch = abi.ChainEpoch(extraI) + } + // t.EndEpoch (abi.ChainEpoch) (int64) + case "EndEpoch": + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.EndEpoch = abi.ChainEpoch(extraI) + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} diff --git a/api/client/client.go b/api/client/client.go index 90fe714bf72..669c58f278b 100644 --- a/api/client/client.go +++ b/api/client/client.go @@ -16,14 +16,10 @@ import ( ) // NewCommonRPCV0 creates a new http jsonrpc client. -func NewCommonRPCV0(ctx context.Context, addr string, requestHeader http.Header) (api.Common, jsonrpc.ClientCloser, error) { - var res v0api.CommonStruct +func NewCommonRPCV0(ctx context.Context, addr string, requestHeader http.Header) (api.CommonNet, jsonrpc.ClientCloser, error) { + var res v0api.CommonNetStruct closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin", - []interface{}{ - &res.Internal, - }, - requestHeader, - ) + api.GetInternalStructs(&res), requestHeader) return &res, closer, err } @@ -31,11 +27,9 @@ func NewCommonRPCV0(ctx context.Context, addr string, requestHeader http.Header) // NewFullNodeRPCV0 creates a new http jsonrpc client. func NewFullNodeRPCV0(ctx context.Context, addr string, requestHeader http.Header) (v0api.FullNode, jsonrpc.ClientCloser, error) { var res v0api.FullNodeStruct + closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin", - []interface{}{ - &res.CommonStruct.Internal, - &res.Internal, - }, requestHeader) + api.GetInternalStructs(&res), requestHeader) return &res, closer, err } @@ -44,51 +38,56 @@ func NewFullNodeRPCV0(ctx context.Context, addr string, requestHeader http.Heade func NewFullNodeRPCV1(ctx context.Context, addr string, requestHeader http.Header) (api.FullNode, jsonrpc.ClientCloser, error) { var res v1api.FullNodeStruct closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin", - []interface{}{ - &res.CommonStruct.Internal, - &res.Internal, - }, requestHeader) + api.GetInternalStructs(&res), requestHeader) return &res, closer, err } +func getPushUrl(addr string) (string, error) { + pushUrl, err := url.Parse(addr) + if err != nil { + return "", err + } + switch pushUrl.Scheme { + case "ws": + pushUrl.Scheme = "http" + case "wss": + pushUrl.Scheme = "https" + } + ///rpc/v0 -> /rpc/streams/v0/push + + pushUrl.Path = path.Join(pushUrl.Path, "../streams/v0/push") + return pushUrl.String(), nil +} + // NewStorageMinerRPCV0 creates a new http jsonrpc client for miner func NewStorageMinerRPCV0(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (v0api.StorageMiner, jsonrpc.ClientCloser, error) { + pushUrl, err := getPushUrl(addr) + if err != nil { + return nil, nil, err + } + var res v0api.StorageMinerStruct closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin", - []interface{}{ - &res.CommonStruct.Internal, - &res.Internal, - }, - requestHeader, - opts..., - ) + api.GetInternalStructs(&res), requestHeader, + append([]jsonrpc.Option{ + rpcenc.ReaderParamEncoder(pushUrl), + }, opts...)...) return &res, closer, err } -func NewWorkerRPCV0(ctx context.Context, addr string, requestHeader http.Header) (api.Worker, jsonrpc.ClientCloser, error) { - u, err := url.Parse(addr) +func NewWorkerRPCV0(ctx context.Context, addr string, requestHeader http.Header) (v0api.Worker, jsonrpc.ClientCloser, error) { + pushUrl, err := getPushUrl(addr) if err != nil { return nil, nil, err } - switch u.Scheme { - case "ws": - u.Scheme = "http" - case "wss": - u.Scheme = "https" - } - ///rpc/v0 -> /rpc/streams/v0/push - - u.Path = path.Join(u.Path, "../streams/v0/push") var res api.WorkerStruct closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin", - []interface{}{ - &res.Internal, - }, + api.GetInternalStructs(&res), requestHeader, - rpcenc.ReaderParamEncoder(u.String()), + rpcenc.ReaderParamEncoder(pushUrl), jsonrpc.WithNoReconnect(), jsonrpc.WithTimeout(30*time.Second), ) @@ -100,9 +99,7 @@ func NewWorkerRPCV0(ctx context.Context, addr string, requestHeader http.Header) func NewGatewayRPCV1(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (api.Gateway, jsonrpc.ClientCloser, error) { var res api.GatewayStruct closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin", - []interface{}{ - &res.Internal, - }, + api.GetInternalStructs(&res), requestHeader, opts..., ) @@ -114,9 +111,7 @@ func NewGatewayRPCV1(ctx context.Context, addr string, requestHeader http.Header func NewGatewayRPCV0(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (v0api.Gateway, jsonrpc.ClientCloser, error) { var res v0api.GatewayStruct closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin", - []interface{}{ - &res.Internal, - }, + api.GetInternalStructs(&res), requestHeader, opts..., ) @@ -127,9 +122,7 @@ func NewGatewayRPCV0(ctx context.Context, addr string, requestHeader http.Header func NewWalletRPCV0(ctx context.Context, addr string, requestHeader http.Header) (api.Wallet, jsonrpc.ClientCloser, error) { var res api.WalletStruct closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin", - []interface{}{ - &res.Internal, - }, + api.GetInternalStructs(&res), requestHeader, ) diff --git a/api/docgen-openrpc/cmd/docgen_openrpc.go b/api/docgen-openrpc/cmd/docgen_openrpc.go index febbef3e412..cc5e9f0cda5 100644 --- a/api/docgen-openrpc/cmd/docgen_openrpc.go +++ b/api/docgen-openrpc/cmd/docgen_openrpc.go @@ -34,7 +34,7 @@ func main() { doc := docgen_openrpc.NewLotusOpenRPCDocument(Comments, GroupDocs) - i, _, _, _ := docgen.GetAPIType(os.Args[2], os.Args[3]) + i, _, _ := docgen.GetAPIType(os.Args[2], os.Args[3]) doc.RegisterReceiverName("Filecoin", i) out, err := doc.Discover() diff --git a/api/docgen/cmd/docgen.go b/api/docgen/cmd/docgen.go index 912eea841cd..9ae2df2e707 100644 --- a/api/docgen/cmd/docgen.go +++ b/api/docgen/cmd/docgen.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "os" + "reflect" "sort" "strings" @@ -15,7 +16,7 @@ func main() { groups := make(map[string]*docgen.MethodGroup) - _, t, permStruct, commonPermStruct := docgen.GetAPIType(os.Args[2], os.Args[3]) + _, t, permStruct := docgen.GetAPIType(os.Args[2], os.Args[3]) for i := 0; i < t.NumMethod(); i++ { m := t.Method(i) @@ -88,13 +89,17 @@ func main() { fmt.Printf("### %s\n", m.Name) fmt.Printf("%s\n\n", m.Comment) - meth, ok := permStruct.FieldByName(m.Name) - if !ok { - meth, ok = commonPermStruct.FieldByName(m.Name) - if !ok { - panic("no perms for method: " + m.Name) + var meth reflect.StructField + var ok bool + for _, ps := range permStruct { + meth, ok = ps.FieldByName(m.Name) + if ok { + break } } + if !ok { + panic("no perms for method: " + m.Name) + } perms := meth.Tag.Get("perm") diff --git a/api/docgen/docgen.go b/api/docgen/docgen.go index 4f9bc637ebd..f9addc940dd 100644 --- a/api/docgen/docgen.go +++ b/api/docgen/docgen.go @@ -16,10 +16,10 @@ import ( "github.com/google/uuid" "github.com/ipfs/go-cid" "github.com/ipfs/go-filestore" - metrics "github.com/libp2p/go-libp2p-core/metrics" + "github.com/libp2p/go-libp2p-core/metrics" "github.com/libp2p/go-libp2p-core/network" "github.com/libp2p/go-libp2p-core/peer" - protocol "github.com/libp2p/go-libp2p-core/protocol" + "github.com/libp2p/go-libp2p-core/protocol" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/multiformats/go-multiaddr" @@ -46,11 +46,12 @@ import ( ) var ExampleValues = map[reflect.Type]interface{}{ - reflect.TypeOf(auth.Permission("")): auth.Permission("write"), - reflect.TypeOf(""): "string value", - reflect.TypeOf(uint64(42)): uint64(42), - reflect.TypeOf(byte(7)): byte(7), - reflect.TypeOf([]byte{}): []byte("byte array"), + reflect.TypeOf(api.MinerSubsystem(0)): api.MinerSubsystem(1), + reflect.TypeOf(auth.Permission("")): auth.Permission("write"), + reflect.TypeOf(""): "string value", + reflect.TypeOf(uint64(42)): uint64(42), + reflect.TypeOf(byte(7)): byte(7), + reflect.TypeOf([]byte{}): []byte("byte array"), } func addExample(v interface{}) { @@ -264,27 +265,35 @@ func init() { addExample(api.CheckStatusCode(0)) addExample(map[string]interface{}{"abc": 123}) + addExample(api.MinerSubsystems{ + api.SubsystemMining, + api.SubsystemSealing, + api.SubsystemSectorStorage, + api.SubsystemMarkets, + }) } -func GetAPIType(name, pkg string) (i interface{}, t, permStruct, commonPermStruct reflect.Type) { +func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []reflect.Type) { + switch pkg { case "api": // latest switch name { case "FullNode": i = &api.FullNodeStruct{} t = reflect.TypeOf(new(struct{ api.FullNode })).Elem() - permStruct = reflect.TypeOf(api.FullNodeStruct{}.Internal) - commonPermStruct = reflect.TypeOf(api.CommonStruct{}.Internal) + permStruct = append(permStruct, reflect.TypeOf(api.FullNodeStruct{}.Internal)) + permStruct = append(permStruct, reflect.TypeOf(api.CommonStruct{}.Internal)) + permStruct = append(permStruct, reflect.TypeOf(api.NetStruct{}.Internal)) case "StorageMiner": i = &api.StorageMinerStruct{} t = reflect.TypeOf(new(struct{ api.StorageMiner })).Elem() - permStruct = reflect.TypeOf(api.StorageMinerStruct{}.Internal) - commonPermStruct = reflect.TypeOf(api.CommonStruct{}.Internal) + permStruct = append(permStruct, reflect.TypeOf(api.StorageMinerStruct{}.Internal)) + permStruct = append(permStruct, reflect.TypeOf(api.CommonStruct{}.Internal)) + permStruct = append(permStruct, reflect.TypeOf(api.NetStruct{}.Internal)) case "Worker": i = &api.WorkerStruct{} t = reflect.TypeOf(new(struct{ api.Worker })).Elem() - permStruct = reflect.TypeOf(api.WorkerStruct{}.Internal) - commonPermStruct = reflect.TypeOf(api.WorkerStruct{}.Internal) + permStruct = append(permStruct, reflect.TypeOf(api.WorkerStruct{}.Internal)) default: panic("unknown type") } @@ -293,8 +302,9 @@ func GetAPIType(name, pkg string) (i interface{}, t, permStruct, commonPermStruc case "FullNode": i = v0api.FullNodeStruct{} t = reflect.TypeOf(new(struct{ v0api.FullNode })).Elem() - permStruct = reflect.TypeOf(v0api.FullNodeStruct{}.Internal) - commonPermStruct = reflect.TypeOf(v0api.CommonStruct{}.Internal) + permStruct = append(permStruct, reflect.TypeOf(v0api.FullNodeStruct{}.Internal)) + permStruct = append(permStruct, reflect.TypeOf(v0api.CommonStruct{}.Internal)) + permStruct = append(permStruct, reflect.TypeOf(v0api.NetStruct{}.Internal)) default: panic("unknown type") } diff --git a/api/miner_subsystems.go b/api/miner_subsystems.go new file mode 100644 index 00000000000..a77de7e3c95 --- /dev/null +++ b/api/miner_subsystems.go @@ -0,0 +1,79 @@ +package api + +import ( + "encoding/json" +) + +// MinerSubsystem represents a miner subsystem. Int and string values are not +// guaranteed to be stable over time is not +// guaranteed to be stable over time. +type MinerSubsystem int + +const ( + // SubsystemUnknown is a placeholder for the zero value. It should never + // be used. + SubsystemUnknown MinerSubsystem = iota + // SubsystemMarkets signifies the storage and retrieval + // deal-making subsystem. + SubsystemMarkets + // SubsystemMining signifies the mining subsystem. + SubsystemMining + // SubsystemSealing signifies the sealing subsystem. + SubsystemSealing + // SubsystemSectorStorage signifies the sector storage subsystem. + SubsystemSectorStorage +) + +var MinerSubsystemToString = map[MinerSubsystem]string{ + SubsystemUnknown: "Unknown", + SubsystemMarkets: "Markets", + SubsystemMining: "Mining", + SubsystemSealing: "Sealing", + SubsystemSectorStorage: "SectorStorage", +} + +var MinerSubsystemToID = map[string]MinerSubsystem{ + "Unknown": SubsystemUnknown, + "Markets": SubsystemMarkets, + "Mining": SubsystemMining, + "Sealing": SubsystemSealing, + "SectorStorage": SubsystemSectorStorage, +} + +func (ms MinerSubsystem) MarshalJSON() ([]byte, error) { + return json.Marshal(MinerSubsystemToString[ms]) +} + +func (ms *MinerSubsystem) UnmarshalJSON(b []byte) error { + var j string + err := json.Unmarshal(b, &j) + if err != nil { + return err + } + s, ok := MinerSubsystemToID[j] + if !ok { + *ms = SubsystemUnknown + } else { + *ms = s + } + return nil +} + +type MinerSubsystems []MinerSubsystem + +func (ms MinerSubsystems) Has(entry MinerSubsystem) bool { + for _, v := range ms { + if v == entry { + return true + } + } + return false +} + +func (ms MinerSubsystem) String() string { + s, ok := MinerSubsystemToString[ms] + if !ok { + return MinerSubsystemToString[SubsystemUnknown] + } + return s +} diff --git a/api/mocks/mock_full.go b/api/mocks/mock_full.go index bb83a88a242..124532c14f4 100644 --- a/api/mocks/mock_full.go +++ b/api/mocks/mock_full.go @@ -105,6 +105,35 @@ func (mr *MockFullNodeMockRecorder) BeaconGetEntry(arg0, arg1 interface{}) *gomo return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeaconGetEntry", reflect.TypeOf((*MockFullNode)(nil).BeaconGetEntry), arg0, arg1) } +// ChainBlockstoreInfo mocks base method. +func (m *MockFullNode) ChainBlockstoreInfo(arg0 context.Context) (map[string]interface{}, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainBlockstoreInfo", arg0) + ret0, _ := ret[0].(map[string]interface{}) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainBlockstoreInfo indicates an expected call of ChainBlockstoreInfo. +func (mr *MockFullNodeMockRecorder) ChainBlockstoreInfo(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainBlockstoreInfo", reflect.TypeOf((*MockFullNode)(nil).ChainBlockstoreInfo), arg0) +} + +// ChainCheckBlockstore mocks base method. +func (m *MockFullNode) ChainCheckBlockstore(arg0 context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainCheckBlockstore", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// ChainCheckBlockstore indicates an expected call of ChainCheckBlockstore. +func (mr *MockFullNodeMockRecorder) ChainCheckBlockstore(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainCheckBlockstore", reflect.TypeOf((*MockFullNode)(nil).ChainCheckBlockstore), arg0) +} + // ChainDeleteObj mocks base method. func (m *MockFullNode) ChainDeleteObj(arg0 context.Context, arg1 cid.Cid) error { m.ctrl.T.Helper() @@ -194,6 +223,21 @@ func (mr *MockFullNodeMockRecorder) ChainGetMessage(arg0, arg1 interface{}) *gom return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetMessage", reflect.TypeOf((*MockFullNode)(nil).ChainGetMessage), arg0, arg1) } +// ChainGetMessagesInTipset mocks base method. +func (m *MockFullNode) ChainGetMessagesInTipset(arg0 context.Context, arg1 types.TipSetKey) ([]api.Message, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetMessagesInTipset", arg0, arg1) + ret0, _ := ret[0].([]api.Message) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetMessagesInTipset indicates an expected call of ChainGetMessagesInTipset. +func (mr *MockFullNodeMockRecorder) ChainGetMessagesInTipset(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetMessagesInTipset", reflect.TypeOf((*MockFullNode)(nil).ChainGetMessagesInTipset), arg0, arg1) +} + // ChainGetNode mocks base method. func (m *MockFullNode) ChainGetNode(arg0 context.Context, arg1 string) (*api.IpldObject, error) { m.ctrl.T.Helper() diff --git a/api/permissioned.go b/api/permissioned.go index d99e5943b8b..72d2239ee3c 100644 --- a/api/permissioned.go +++ b/api/permissioned.go @@ -16,28 +16,33 @@ const ( var AllPermissions = []auth.Permission{PermRead, PermWrite, PermSign, PermAdmin} var DefaultPerms = []auth.Permission{PermRead} +func permissionedProxies(in, out interface{}) { + outs := GetInternalStructs(out) + for _, o := range outs { + auth.PermissionedProxy(AllPermissions, DefaultPerms, in, o) + } +} + func PermissionedStorMinerAPI(a StorageMiner) StorageMiner { var out StorageMinerStruct - auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal) - auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.CommonStruct.Internal) + permissionedProxies(a, &out) return &out } func PermissionedFullAPI(a FullNode) FullNode { var out FullNodeStruct - auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal) - auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.CommonStruct.Internal) + permissionedProxies(a, &out) return &out } func PermissionedWorkerAPI(a Worker) Worker { var out WorkerStruct - auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal) + permissionedProxies(a, &out) return &out } func PermissionedWalletAPI(a Wallet) Wallet { var out WalletStruct - auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal) + permissionedProxies(a, &out) return &out } diff --git a/api/proxy_gen.go b/api/proxy_gen.go index 8b99b6f19c9..a4feb7be157 100644 --- a/api/proxy_gen.go +++ b/api/proxy_gen.go @@ -35,10 +35,12 @@ import ( metrics "github.com/libp2p/go-libp2p-core/metrics" "github.com/libp2p/go-libp2p-core/network" "github.com/libp2p/go-libp2p-core/peer" - protocol "github.com/libp2p/go-libp2p-core/protocol" + "github.com/libp2p/go-libp2p-core/protocol" xerrors "golang.org/x/xerrors" ) +var ErrNotSupported = xerrors.New("method not supported") + type ChainIOStruct struct { Internal struct { ChainHasObj func(p0 context.Context, p1 cid.Cid) (bool, error) `` @@ -60,44 +62,10 @@ type CommonStruct struct { Discover func(p0 context.Context) (apitypes.OpenRPCDocument, error) `perm:"read"` - ID func(p0 context.Context) (peer.ID, error) `perm:"read"` - LogList func(p0 context.Context) ([]string, error) `perm:"write"` LogSetLevel func(p0 context.Context, p1 string, p2 string) error `perm:"write"` - NetAddrsListen func(p0 context.Context) (peer.AddrInfo, error) `perm:"read"` - - NetAgentVersion func(p0 context.Context, p1 peer.ID) (string, error) `perm:"read"` - - NetAutoNatStatus func(p0 context.Context) (NatInfo, error) `perm:"read"` - - NetBandwidthStats func(p0 context.Context) (metrics.Stats, error) `perm:"read"` - - NetBandwidthStatsByPeer func(p0 context.Context) (map[string]metrics.Stats, error) `perm:"read"` - - NetBandwidthStatsByProtocol func(p0 context.Context) (map[protocol.ID]metrics.Stats, error) `perm:"read"` - - NetBlockAdd func(p0 context.Context, p1 NetBlockList) error `perm:"admin"` - - NetBlockList func(p0 context.Context) (NetBlockList, error) `perm:"read"` - - NetBlockRemove func(p0 context.Context, p1 NetBlockList) error `perm:"admin"` - - NetConnect func(p0 context.Context, p1 peer.AddrInfo) error `perm:"write"` - - NetConnectedness func(p0 context.Context, p1 peer.ID) (network.Connectedness, error) `perm:"read"` - - NetDisconnect func(p0 context.Context, p1 peer.ID) error `perm:"write"` - - NetFindPeer func(p0 context.Context, p1 peer.ID) (peer.AddrInfo, error) `perm:"read"` - - NetPeerInfo func(p0 context.Context, p1 peer.ID) (*ExtendedPeerInfo, error) `perm:"read"` - - NetPeers func(p0 context.Context) ([]peer.AddrInfo, error) `perm:"read"` - - NetPubsubScores func(p0 context.Context) ([]PubsubScore, error) `perm:"read"` - Session func(p0 context.Context) (uuid.UUID, error) `perm:"read"` Shutdown func(p0 context.Context) error `perm:"admin"` @@ -109,12 +77,33 @@ type CommonStruct struct { type CommonStub struct { } +type CommonNetStruct struct { + CommonStruct + + NetStruct + + Internal struct { + } +} + +type CommonNetStub struct { + CommonStub + + NetStub +} + type FullNodeStruct struct { CommonStruct + NetStruct + Internal struct { BeaconGetEntry func(p0 context.Context, p1 abi.ChainEpoch) (*types.BeaconEntry, error) `perm:"read"` + ChainBlockstoreInfo func(p0 context.Context) (map[string]interface{}, error) `perm:"read"` + + ChainCheckBlockstore func(p0 context.Context) error `perm:"admin"` + ChainDeleteObj func(p0 context.Context, p1 cid.Cid) error `perm:"admin"` ChainExport func(p0 context.Context, p1 abi.ChainEpoch, p2 bool, p3 types.TipSetKey) (<-chan []byte, error) `perm:"read"` @@ -127,6 +116,8 @@ type FullNodeStruct struct { ChainGetMessage func(p0 context.Context, p1 cid.Cid) (*types.Message, error) `perm:"read"` + ChainGetMessagesInTipset func(p0 context.Context, p1 types.TipSetKey) ([]Message, error) `perm:"read"` + ChainGetNode func(p0 context.Context, p1 string) (*IpldObject, error) `perm:"read"` ChainGetParentMessages func(p0 context.Context, p1 cid.Cid) ([]Message, error) `perm:"read"` @@ -471,6 +462,8 @@ type FullNodeStruct struct { type FullNodeStub struct { CommonStub + + NetStub } type GatewayStruct struct { @@ -523,6 +516,8 @@ type GatewayStruct struct { StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) `` + StateReadState func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*ActorState, error) `perm:"read"` + StateSearchMsg func(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) `` StateSectorGetInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) `` @@ -540,6 +535,47 @@ type GatewayStruct struct { type GatewayStub struct { } +type NetStruct struct { + Internal struct { + ID func(p0 context.Context) (peer.ID, error) `perm:"read"` + + NetAddrsListen func(p0 context.Context) (peer.AddrInfo, error) `perm:"read"` + + NetAgentVersion func(p0 context.Context, p1 peer.ID) (string, error) `perm:"read"` + + NetAutoNatStatus func(p0 context.Context) (NatInfo, error) `perm:"read"` + + NetBandwidthStats func(p0 context.Context) (metrics.Stats, error) `perm:"read"` + + NetBandwidthStatsByPeer func(p0 context.Context) (map[string]metrics.Stats, error) `perm:"read"` + + NetBandwidthStatsByProtocol func(p0 context.Context) (map[protocol.ID]metrics.Stats, error) `perm:"read"` + + NetBlockAdd func(p0 context.Context, p1 NetBlockList) error `perm:"admin"` + + NetBlockList func(p0 context.Context) (NetBlockList, error) `perm:"read"` + + NetBlockRemove func(p0 context.Context, p1 NetBlockList) error `perm:"admin"` + + NetConnect func(p0 context.Context, p1 peer.AddrInfo) error `perm:"write"` + + NetConnectedness func(p0 context.Context, p1 peer.ID) (network.Connectedness, error) `perm:"read"` + + NetDisconnect func(p0 context.Context, p1 peer.ID) error `perm:"write"` + + NetFindPeer func(p0 context.Context, p1 peer.ID) (peer.AddrInfo, error) `perm:"read"` + + NetPeerInfo func(p0 context.Context, p1 peer.ID) (*ExtendedPeerInfo, error) `perm:"read"` + + NetPeers func(p0 context.Context) ([]peer.AddrInfo, error) `perm:"read"` + + NetPubsubScores func(p0 context.Context) ([]PubsubScore, error) `perm:"read"` + } +} + +type NetStub struct { +} + type SignableStruct struct { Internal struct { Sign func(p0 context.Context, p1 SignFunc) error `` @@ -552,6 +588,8 @@ type SignableStub struct { type StorageMinerStruct struct { CommonStruct + NetStruct + Internal struct { ActorAddress func(p0 context.Context) (address.Address, error) `perm:"read"` @@ -661,10 +699,14 @@ type StorageMinerStruct struct { ReturnUnsealPiece func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"` + RuntimeSubsystems func(p0 context.Context) (MinerSubsystems, error) `perm:"read"` + SealingAbort func(p0 context.Context, p1 storiface.CallID) error `perm:"admin"` SealingSchedDiag func(p0 context.Context, p1 bool) (interface{}, error) `perm:"admin"` + SectorAddPieceToAny func(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data, p3 PieceDealInfo) (SectorOffset, error) `perm:"admin"` + SectorCommitFlush func(p0 context.Context) ([]sealiface.CommitBatchRes, error) `perm:"admin"` SectorCommitPending func(p0 context.Context) ([]abi.SectorID, error) `perm:"admin"` @@ -703,6 +745,8 @@ type StorageMinerStruct struct { SectorsSummary func(p0 context.Context) (map[SectorState]int, error) `perm:"read"` + SectorsUnsealPiece func(p0 context.Context, p1 storage.SectorRef, p2 storiface.UnpaddedByteIndex, p3 abi.UnpaddedPieceSize, p4 abi.SealRandomness, p5 *cid.Cid) error `perm:"admin"` + SectorsUpdate func(p0 context.Context, p1 abi.SectorNumber, p2 SectorState) error `perm:"admin"` StorageAddLocal func(p0 context.Context, p1 string) error `perm:"admin"` @@ -741,6 +785,8 @@ type StorageMinerStruct struct { type StorageMinerStub struct { CommonStub + + NetStub } type WalletStruct struct { @@ -818,2873 +864,4026 @@ type WorkerStub struct { } func (s *ChainIOStruct) ChainHasObj(p0 context.Context, p1 cid.Cid) (bool, error) { + if s.Internal.ChainHasObj == nil { + return false, ErrNotSupported + } return s.Internal.ChainHasObj(p0, p1) } func (s *ChainIOStub) ChainHasObj(p0 context.Context, p1 cid.Cid) (bool, error) { - return false, xerrors.New("method not supported") + return false, ErrNotSupported } func (s *ChainIOStruct) ChainReadObj(p0 context.Context, p1 cid.Cid) ([]byte, error) { + if s.Internal.ChainReadObj == nil { + return *new([]byte), ErrNotSupported + } return s.Internal.ChainReadObj(p0, p1) } func (s *ChainIOStub) ChainReadObj(p0 context.Context, p1 cid.Cid) ([]byte, error) { - return *new([]byte), xerrors.New("method not supported") + return *new([]byte), ErrNotSupported } func (s *CommonStruct) AuthNew(p0 context.Context, p1 []auth.Permission) ([]byte, error) { + if s.Internal.AuthNew == nil { + return *new([]byte), ErrNotSupported + } return s.Internal.AuthNew(p0, p1) } func (s *CommonStub) AuthNew(p0 context.Context, p1 []auth.Permission) ([]byte, error) { - return *new([]byte), xerrors.New("method not supported") + return *new([]byte), ErrNotSupported } func (s *CommonStruct) AuthVerify(p0 context.Context, p1 string) ([]auth.Permission, error) { + if s.Internal.AuthVerify == nil { + return *new([]auth.Permission), ErrNotSupported + } return s.Internal.AuthVerify(p0, p1) } func (s *CommonStub) AuthVerify(p0 context.Context, p1 string) ([]auth.Permission, error) { - return *new([]auth.Permission), xerrors.New("method not supported") + return *new([]auth.Permission), ErrNotSupported } func (s *CommonStruct) Closing(p0 context.Context) (<-chan struct{}, error) { + if s.Internal.Closing == nil { + return nil, ErrNotSupported + } return s.Internal.Closing(p0) } func (s *CommonStub) Closing(p0 context.Context) (<-chan struct{}, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *CommonStruct) Discover(p0 context.Context) (apitypes.OpenRPCDocument, error) { + if s.Internal.Discover == nil { + return *new(apitypes.OpenRPCDocument), ErrNotSupported + } return s.Internal.Discover(p0) } func (s *CommonStub) Discover(p0 context.Context) (apitypes.OpenRPCDocument, error) { - return *new(apitypes.OpenRPCDocument), xerrors.New("method not supported") -} - -func (s *CommonStruct) ID(p0 context.Context) (peer.ID, error) { - return s.Internal.ID(p0) -} - -func (s *CommonStub) ID(p0 context.Context) (peer.ID, error) { - return *new(peer.ID), xerrors.New("method not supported") + return *new(apitypes.OpenRPCDocument), ErrNotSupported } func (s *CommonStruct) LogList(p0 context.Context) ([]string, error) { + if s.Internal.LogList == nil { + return *new([]string), ErrNotSupported + } return s.Internal.LogList(p0) } func (s *CommonStub) LogList(p0 context.Context) ([]string, error) { - return *new([]string), xerrors.New("method not supported") + return *new([]string), ErrNotSupported } func (s *CommonStruct) LogSetLevel(p0 context.Context, p1 string, p2 string) error { + if s.Internal.LogSetLevel == nil { + return ErrNotSupported + } return s.Internal.LogSetLevel(p0, p1, p2) } func (s *CommonStub) LogSetLevel(p0 context.Context, p1 string, p2 string) error { - return xerrors.New("method not supported") -} - -func (s *CommonStruct) NetAddrsListen(p0 context.Context) (peer.AddrInfo, error) { - return s.Internal.NetAddrsListen(p0) -} - -func (s *CommonStub) NetAddrsListen(p0 context.Context) (peer.AddrInfo, error) { - return *new(peer.AddrInfo), xerrors.New("method not supported") -} - -func (s *CommonStruct) NetAgentVersion(p0 context.Context, p1 peer.ID) (string, error) { - return s.Internal.NetAgentVersion(p0, p1) -} - -func (s *CommonStub) NetAgentVersion(p0 context.Context, p1 peer.ID) (string, error) { - return "", xerrors.New("method not supported") -} - -func (s *CommonStruct) NetAutoNatStatus(p0 context.Context) (NatInfo, error) { - return s.Internal.NetAutoNatStatus(p0) -} - -func (s *CommonStub) NetAutoNatStatus(p0 context.Context) (NatInfo, error) { - return *new(NatInfo), xerrors.New("method not supported") -} - -func (s *CommonStruct) NetBandwidthStats(p0 context.Context) (metrics.Stats, error) { - return s.Internal.NetBandwidthStats(p0) -} - -func (s *CommonStub) NetBandwidthStats(p0 context.Context) (metrics.Stats, error) { - return *new(metrics.Stats), xerrors.New("method not supported") -} - -func (s *CommonStruct) NetBandwidthStatsByPeer(p0 context.Context) (map[string]metrics.Stats, error) { - return s.Internal.NetBandwidthStatsByPeer(p0) -} - -func (s *CommonStub) NetBandwidthStatsByPeer(p0 context.Context) (map[string]metrics.Stats, error) { - return *new(map[string]metrics.Stats), xerrors.New("method not supported") -} - -func (s *CommonStruct) NetBandwidthStatsByProtocol(p0 context.Context) (map[protocol.ID]metrics.Stats, error) { - return s.Internal.NetBandwidthStatsByProtocol(p0) -} - -func (s *CommonStub) NetBandwidthStatsByProtocol(p0 context.Context) (map[protocol.ID]metrics.Stats, error) { - return *new(map[protocol.ID]metrics.Stats), xerrors.New("method not supported") -} - -func (s *CommonStruct) NetBlockAdd(p0 context.Context, p1 NetBlockList) error { - return s.Internal.NetBlockAdd(p0, p1) -} - -func (s *CommonStub) NetBlockAdd(p0 context.Context, p1 NetBlockList) error { - return xerrors.New("method not supported") -} - -func (s *CommonStruct) NetBlockList(p0 context.Context) (NetBlockList, error) { - return s.Internal.NetBlockList(p0) -} - -func (s *CommonStub) NetBlockList(p0 context.Context) (NetBlockList, error) { - return *new(NetBlockList), xerrors.New("method not supported") -} - -func (s *CommonStruct) NetBlockRemove(p0 context.Context, p1 NetBlockList) error { - return s.Internal.NetBlockRemove(p0, p1) -} - -func (s *CommonStub) NetBlockRemove(p0 context.Context, p1 NetBlockList) error { - return xerrors.New("method not supported") -} - -func (s *CommonStruct) NetConnect(p0 context.Context, p1 peer.AddrInfo) error { - return s.Internal.NetConnect(p0, p1) -} - -func (s *CommonStub) NetConnect(p0 context.Context, p1 peer.AddrInfo) error { - return xerrors.New("method not supported") -} - -func (s *CommonStruct) NetConnectedness(p0 context.Context, p1 peer.ID) (network.Connectedness, error) { - return s.Internal.NetConnectedness(p0, p1) -} - -func (s *CommonStub) NetConnectedness(p0 context.Context, p1 peer.ID) (network.Connectedness, error) { - return *new(network.Connectedness), xerrors.New("method not supported") -} - -func (s *CommonStruct) NetDisconnect(p0 context.Context, p1 peer.ID) error { - return s.Internal.NetDisconnect(p0, p1) -} - -func (s *CommonStub) NetDisconnect(p0 context.Context, p1 peer.ID) error { - return xerrors.New("method not supported") -} - -func (s *CommonStruct) NetFindPeer(p0 context.Context, p1 peer.ID) (peer.AddrInfo, error) { - return s.Internal.NetFindPeer(p0, p1) -} - -func (s *CommonStub) NetFindPeer(p0 context.Context, p1 peer.ID) (peer.AddrInfo, error) { - return *new(peer.AddrInfo), xerrors.New("method not supported") -} - -func (s *CommonStruct) NetPeerInfo(p0 context.Context, p1 peer.ID) (*ExtendedPeerInfo, error) { - return s.Internal.NetPeerInfo(p0, p1) -} - -func (s *CommonStub) NetPeerInfo(p0 context.Context, p1 peer.ID) (*ExtendedPeerInfo, error) { - return nil, xerrors.New("method not supported") -} - -func (s *CommonStruct) NetPeers(p0 context.Context) ([]peer.AddrInfo, error) { - return s.Internal.NetPeers(p0) -} - -func (s *CommonStub) NetPeers(p0 context.Context) ([]peer.AddrInfo, error) { - return *new([]peer.AddrInfo), xerrors.New("method not supported") -} - -func (s *CommonStruct) NetPubsubScores(p0 context.Context) ([]PubsubScore, error) { - return s.Internal.NetPubsubScores(p0) -} - -func (s *CommonStub) NetPubsubScores(p0 context.Context) ([]PubsubScore, error) { - return *new([]PubsubScore), xerrors.New("method not supported") + return ErrNotSupported } func (s *CommonStruct) Session(p0 context.Context) (uuid.UUID, error) { + if s.Internal.Session == nil { + return *new(uuid.UUID), ErrNotSupported + } return s.Internal.Session(p0) } func (s *CommonStub) Session(p0 context.Context) (uuid.UUID, error) { - return *new(uuid.UUID), xerrors.New("method not supported") + return *new(uuid.UUID), ErrNotSupported } func (s *CommonStruct) Shutdown(p0 context.Context) error { + if s.Internal.Shutdown == nil { + return ErrNotSupported + } return s.Internal.Shutdown(p0) } func (s *CommonStub) Shutdown(p0 context.Context) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *CommonStruct) Version(p0 context.Context) (APIVersion, error) { + if s.Internal.Version == nil { + return *new(APIVersion), ErrNotSupported + } return s.Internal.Version(p0) } func (s *CommonStub) Version(p0 context.Context) (APIVersion, error) { - return *new(APIVersion), xerrors.New("method not supported") + return *new(APIVersion), ErrNotSupported } func (s *FullNodeStruct) BeaconGetEntry(p0 context.Context, p1 abi.ChainEpoch) (*types.BeaconEntry, error) { + if s.Internal.BeaconGetEntry == nil { + return nil, ErrNotSupported + } return s.Internal.BeaconGetEntry(p0, p1) } func (s *FullNodeStub) BeaconGetEntry(p0 context.Context, p1 abi.ChainEpoch) (*types.BeaconEntry, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported +} + +func (s *FullNodeStruct) ChainBlockstoreInfo(p0 context.Context) (map[string]interface{}, error) { + if s.Internal.ChainBlockstoreInfo == nil { + return *new(map[string]interface{}), ErrNotSupported + } + return s.Internal.ChainBlockstoreInfo(p0) +} + +func (s *FullNodeStub) ChainBlockstoreInfo(p0 context.Context) (map[string]interface{}, error) { + return *new(map[string]interface{}), ErrNotSupported +} + +func (s *FullNodeStruct) ChainCheckBlockstore(p0 context.Context) error { + if s.Internal.ChainCheckBlockstore == nil { + return ErrNotSupported + } + return s.Internal.ChainCheckBlockstore(p0) +} + +func (s *FullNodeStub) ChainCheckBlockstore(p0 context.Context) error { + return ErrNotSupported } func (s *FullNodeStruct) ChainDeleteObj(p0 context.Context, p1 cid.Cid) error { + if s.Internal.ChainDeleteObj == nil { + return ErrNotSupported + } return s.Internal.ChainDeleteObj(p0, p1) } func (s *FullNodeStub) ChainDeleteObj(p0 context.Context, p1 cid.Cid) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) ChainExport(p0 context.Context, p1 abi.ChainEpoch, p2 bool, p3 types.TipSetKey) (<-chan []byte, error) { + if s.Internal.ChainExport == nil { + return nil, ErrNotSupported + } return s.Internal.ChainExport(p0, p1, p2, p3) } func (s *FullNodeStub) ChainExport(p0 context.Context, p1 abi.ChainEpoch, p2 bool, p3 types.TipSetKey) (<-chan []byte, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ChainGetBlock(p0 context.Context, p1 cid.Cid) (*types.BlockHeader, error) { + if s.Internal.ChainGetBlock == nil { + return nil, ErrNotSupported + } return s.Internal.ChainGetBlock(p0, p1) } func (s *FullNodeStub) ChainGetBlock(p0 context.Context, p1 cid.Cid) (*types.BlockHeader, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*BlockMessages, error) { + if s.Internal.ChainGetBlockMessages == nil { + return nil, ErrNotSupported + } return s.Internal.ChainGetBlockMessages(p0, p1) } func (s *FullNodeStub) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*BlockMessages, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ChainGetGenesis(p0 context.Context) (*types.TipSet, error) { + if s.Internal.ChainGetGenesis == nil { + return nil, ErrNotSupported + } return s.Internal.ChainGetGenesis(p0) } func (s *FullNodeStub) ChainGetGenesis(p0 context.Context) (*types.TipSet, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Message, error) { + if s.Internal.ChainGetMessage == nil { + return nil, ErrNotSupported + } return s.Internal.ChainGetMessage(p0, p1) } func (s *FullNodeStub) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Message, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported +} + +func (s *FullNodeStruct) ChainGetMessagesInTipset(p0 context.Context, p1 types.TipSetKey) ([]Message, error) { + if s.Internal.ChainGetMessagesInTipset == nil { + return *new([]Message), ErrNotSupported + } + return s.Internal.ChainGetMessagesInTipset(p0, p1) +} + +func (s *FullNodeStub) ChainGetMessagesInTipset(p0 context.Context, p1 types.TipSetKey) ([]Message, error) { + return *new([]Message), ErrNotSupported } func (s *FullNodeStruct) ChainGetNode(p0 context.Context, p1 string) (*IpldObject, error) { + if s.Internal.ChainGetNode == nil { + return nil, ErrNotSupported + } return s.Internal.ChainGetNode(p0, p1) } func (s *FullNodeStub) ChainGetNode(p0 context.Context, p1 string) (*IpldObject, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ChainGetParentMessages(p0 context.Context, p1 cid.Cid) ([]Message, error) { + if s.Internal.ChainGetParentMessages == nil { + return *new([]Message), ErrNotSupported + } return s.Internal.ChainGetParentMessages(p0, p1) } func (s *FullNodeStub) ChainGetParentMessages(p0 context.Context, p1 cid.Cid) ([]Message, error) { - return *new([]Message), xerrors.New("method not supported") + return *new([]Message), ErrNotSupported } func (s *FullNodeStruct) ChainGetParentReceipts(p0 context.Context, p1 cid.Cid) ([]*types.MessageReceipt, error) { + if s.Internal.ChainGetParentReceipts == nil { + return *new([]*types.MessageReceipt), ErrNotSupported + } return s.Internal.ChainGetParentReceipts(p0, p1) } func (s *FullNodeStub) ChainGetParentReceipts(p0 context.Context, p1 cid.Cid) ([]*types.MessageReceipt, error) { - return *new([]*types.MessageReceipt), xerrors.New("method not supported") + return *new([]*types.MessageReceipt), ErrNotSupported } func (s *FullNodeStruct) ChainGetPath(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*HeadChange, error) { + if s.Internal.ChainGetPath == nil { + return *new([]*HeadChange), ErrNotSupported + } return s.Internal.ChainGetPath(p0, p1, p2) } func (s *FullNodeStub) ChainGetPath(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*HeadChange, error) { - return *new([]*HeadChange), xerrors.New("method not supported") + return *new([]*HeadChange), ErrNotSupported } func (s *FullNodeStruct) ChainGetRandomnessFromBeacon(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) { + if s.Internal.ChainGetRandomnessFromBeacon == nil { + return *new(abi.Randomness), ErrNotSupported + } return s.Internal.ChainGetRandomnessFromBeacon(p0, p1, p2, p3, p4) } func (s *FullNodeStub) ChainGetRandomnessFromBeacon(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) { - return *new(abi.Randomness), xerrors.New("method not supported") + return *new(abi.Randomness), ErrNotSupported } func (s *FullNodeStruct) ChainGetRandomnessFromTickets(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) { + if s.Internal.ChainGetRandomnessFromTickets == nil { + return *new(abi.Randomness), ErrNotSupported + } return s.Internal.ChainGetRandomnessFromTickets(p0, p1, p2, p3, p4) } func (s *FullNodeStub) ChainGetRandomnessFromTickets(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) { - return *new(abi.Randomness), xerrors.New("method not supported") + return *new(abi.Randomness), ErrNotSupported } func (s *FullNodeStruct) ChainGetTipSet(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) { + if s.Internal.ChainGetTipSet == nil { + return nil, ErrNotSupported + } return s.Internal.ChainGetTipSet(p0, p1) } func (s *FullNodeStub) ChainGetTipSet(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ChainGetTipSetByHeight(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) { + if s.Internal.ChainGetTipSetByHeight == nil { + return nil, ErrNotSupported + } return s.Internal.ChainGetTipSetByHeight(p0, p1, p2) } func (s *FullNodeStub) ChainGetTipSetByHeight(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ChainHasObj(p0 context.Context, p1 cid.Cid) (bool, error) { + if s.Internal.ChainHasObj == nil { + return false, ErrNotSupported + } return s.Internal.ChainHasObj(p0, p1) } func (s *FullNodeStub) ChainHasObj(p0 context.Context, p1 cid.Cid) (bool, error) { - return false, xerrors.New("method not supported") + return false, ErrNotSupported } func (s *FullNodeStruct) ChainHead(p0 context.Context) (*types.TipSet, error) { + if s.Internal.ChainHead == nil { + return nil, ErrNotSupported + } return s.Internal.ChainHead(p0) } func (s *FullNodeStub) ChainHead(p0 context.Context) (*types.TipSet, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ChainNotify(p0 context.Context) (<-chan []*HeadChange, error) { + if s.Internal.ChainNotify == nil { + return nil, ErrNotSupported + } return s.Internal.ChainNotify(p0) } func (s *FullNodeStub) ChainNotify(p0 context.Context) (<-chan []*HeadChange, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ChainReadObj(p0 context.Context, p1 cid.Cid) ([]byte, error) { + if s.Internal.ChainReadObj == nil { + return *new([]byte), ErrNotSupported + } return s.Internal.ChainReadObj(p0, p1) } func (s *FullNodeStub) ChainReadObj(p0 context.Context, p1 cid.Cid) ([]byte, error) { - return *new([]byte), xerrors.New("method not supported") + return *new([]byte), ErrNotSupported } func (s *FullNodeStruct) ChainSetHead(p0 context.Context, p1 types.TipSetKey) error { + if s.Internal.ChainSetHead == nil { + return ErrNotSupported + } return s.Internal.ChainSetHead(p0, p1) } func (s *FullNodeStub) ChainSetHead(p0 context.Context, p1 types.TipSetKey) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) ChainStatObj(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (ObjStat, error) { + if s.Internal.ChainStatObj == nil { + return *new(ObjStat), ErrNotSupported + } return s.Internal.ChainStatObj(p0, p1, p2) } func (s *FullNodeStub) ChainStatObj(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (ObjStat, error) { - return *new(ObjStat), xerrors.New("method not supported") + return *new(ObjStat), ErrNotSupported } func (s *FullNodeStruct) ChainTipSetWeight(p0 context.Context, p1 types.TipSetKey) (types.BigInt, error) { + if s.Internal.ChainTipSetWeight == nil { + return *new(types.BigInt), ErrNotSupported + } return s.Internal.ChainTipSetWeight(p0, p1) } func (s *FullNodeStub) ChainTipSetWeight(p0 context.Context, p1 types.TipSetKey) (types.BigInt, error) { - return *new(types.BigInt), xerrors.New("method not supported") + return *new(types.BigInt), ErrNotSupported } func (s *FullNodeStruct) ClientCalcCommP(p0 context.Context, p1 string) (*CommPRet, error) { + if s.Internal.ClientCalcCommP == nil { + return nil, ErrNotSupported + } return s.Internal.ClientCalcCommP(p0, p1) } func (s *FullNodeStub) ClientCalcCommP(p0 context.Context, p1 string) (*CommPRet, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ClientCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { + if s.Internal.ClientCancelDataTransfer == nil { + return ErrNotSupported + } return s.Internal.ClientCancelDataTransfer(p0, p1, p2, p3) } func (s *FullNodeStub) ClientCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) ClientCancelRetrievalDeal(p0 context.Context, p1 retrievalmarket.DealID) error { + if s.Internal.ClientCancelRetrievalDeal == nil { + return ErrNotSupported + } return s.Internal.ClientCancelRetrievalDeal(p0, p1) } func (s *FullNodeStub) ClientCancelRetrievalDeal(p0 context.Context, p1 retrievalmarket.DealID) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) ClientDataTransferUpdates(p0 context.Context) (<-chan DataTransferChannel, error) { + if s.Internal.ClientDataTransferUpdates == nil { + return nil, ErrNotSupported + } return s.Internal.ClientDataTransferUpdates(p0) } func (s *FullNodeStub) ClientDataTransferUpdates(p0 context.Context) (<-chan DataTransferChannel, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ClientDealPieceCID(p0 context.Context, p1 cid.Cid) (DataCIDSize, error) { + if s.Internal.ClientDealPieceCID == nil { + return *new(DataCIDSize), ErrNotSupported + } return s.Internal.ClientDealPieceCID(p0, p1) } func (s *FullNodeStub) ClientDealPieceCID(p0 context.Context, p1 cid.Cid) (DataCIDSize, error) { - return *new(DataCIDSize), xerrors.New("method not supported") + return *new(DataCIDSize), ErrNotSupported } func (s *FullNodeStruct) ClientDealSize(p0 context.Context, p1 cid.Cid) (DataSize, error) { + if s.Internal.ClientDealSize == nil { + return *new(DataSize), ErrNotSupported + } return s.Internal.ClientDealSize(p0, p1) } func (s *FullNodeStub) ClientDealSize(p0 context.Context, p1 cid.Cid) (DataSize, error) { - return *new(DataSize), xerrors.New("method not supported") + return *new(DataSize), ErrNotSupported } func (s *FullNodeStruct) ClientFindData(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]QueryOffer, error) { + if s.Internal.ClientFindData == nil { + return *new([]QueryOffer), ErrNotSupported + } return s.Internal.ClientFindData(p0, p1, p2) } func (s *FullNodeStub) ClientFindData(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]QueryOffer, error) { - return *new([]QueryOffer), xerrors.New("method not supported") + return *new([]QueryOffer), ErrNotSupported } func (s *FullNodeStruct) ClientGenCar(p0 context.Context, p1 FileRef, p2 string) error { + if s.Internal.ClientGenCar == nil { + return ErrNotSupported + } return s.Internal.ClientGenCar(p0, p1, p2) } func (s *FullNodeStub) ClientGenCar(p0 context.Context, p1 FileRef, p2 string) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) ClientGetDealInfo(p0 context.Context, p1 cid.Cid) (*DealInfo, error) { + if s.Internal.ClientGetDealInfo == nil { + return nil, ErrNotSupported + } return s.Internal.ClientGetDealInfo(p0, p1) } func (s *FullNodeStub) ClientGetDealInfo(p0 context.Context, p1 cid.Cid) (*DealInfo, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ClientGetDealStatus(p0 context.Context, p1 uint64) (string, error) { + if s.Internal.ClientGetDealStatus == nil { + return "", ErrNotSupported + } return s.Internal.ClientGetDealStatus(p0, p1) } func (s *FullNodeStub) ClientGetDealStatus(p0 context.Context, p1 uint64) (string, error) { - return "", xerrors.New("method not supported") + return "", ErrNotSupported } func (s *FullNodeStruct) ClientGetDealUpdates(p0 context.Context) (<-chan DealInfo, error) { + if s.Internal.ClientGetDealUpdates == nil { + return nil, ErrNotSupported + } return s.Internal.ClientGetDealUpdates(p0) } func (s *FullNodeStub) ClientGetDealUpdates(p0 context.Context) (<-chan DealInfo, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ClientGetRetrievalUpdates(p0 context.Context) (<-chan RetrievalInfo, error) { + if s.Internal.ClientGetRetrievalUpdates == nil { + return nil, ErrNotSupported + } return s.Internal.ClientGetRetrievalUpdates(p0) } func (s *FullNodeStub) ClientGetRetrievalUpdates(p0 context.Context) (<-chan RetrievalInfo, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ClientHasLocal(p0 context.Context, p1 cid.Cid) (bool, error) { + if s.Internal.ClientHasLocal == nil { + return false, ErrNotSupported + } return s.Internal.ClientHasLocal(p0, p1) } func (s *FullNodeStub) ClientHasLocal(p0 context.Context, p1 cid.Cid) (bool, error) { - return false, xerrors.New("method not supported") + return false, ErrNotSupported } func (s *FullNodeStruct) ClientImport(p0 context.Context, p1 FileRef) (*ImportRes, error) { + if s.Internal.ClientImport == nil { + return nil, ErrNotSupported + } return s.Internal.ClientImport(p0, p1) } func (s *FullNodeStub) ClientImport(p0 context.Context, p1 FileRef) (*ImportRes, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ClientListDataTransfers(p0 context.Context) ([]DataTransferChannel, error) { + if s.Internal.ClientListDataTransfers == nil { + return *new([]DataTransferChannel), ErrNotSupported + } return s.Internal.ClientListDataTransfers(p0) } func (s *FullNodeStub) ClientListDataTransfers(p0 context.Context) ([]DataTransferChannel, error) { - return *new([]DataTransferChannel), xerrors.New("method not supported") + return *new([]DataTransferChannel), ErrNotSupported } func (s *FullNodeStruct) ClientListDeals(p0 context.Context) ([]DealInfo, error) { + if s.Internal.ClientListDeals == nil { + return *new([]DealInfo), ErrNotSupported + } return s.Internal.ClientListDeals(p0) } func (s *FullNodeStub) ClientListDeals(p0 context.Context) ([]DealInfo, error) { - return *new([]DealInfo), xerrors.New("method not supported") + return *new([]DealInfo), ErrNotSupported } func (s *FullNodeStruct) ClientListImports(p0 context.Context) ([]Import, error) { + if s.Internal.ClientListImports == nil { + return *new([]Import), ErrNotSupported + } return s.Internal.ClientListImports(p0) } func (s *FullNodeStub) ClientListImports(p0 context.Context) ([]Import, error) { - return *new([]Import), xerrors.New("method not supported") + return *new([]Import), ErrNotSupported } func (s *FullNodeStruct) ClientListRetrievals(p0 context.Context) ([]RetrievalInfo, error) { + if s.Internal.ClientListRetrievals == nil { + return *new([]RetrievalInfo), ErrNotSupported + } return s.Internal.ClientListRetrievals(p0) } func (s *FullNodeStub) ClientListRetrievals(p0 context.Context) ([]RetrievalInfo, error) { - return *new([]RetrievalInfo), xerrors.New("method not supported") + return *new([]RetrievalInfo), ErrNotSupported } func (s *FullNodeStruct) ClientMinerQueryOffer(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (QueryOffer, error) { + if s.Internal.ClientMinerQueryOffer == nil { + return *new(QueryOffer), ErrNotSupported + } return s.Internal.ClientMinerQueryOffer(p0, p1, p2, p3) } func (s *FullNodeStub) ClientMinerQueryOffer(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (QueryOffer, error) { - return *new(QueryOffer), xerrors.New("method not supported") + return *new(QueryOffer), ErrNotSupported } func (s *FullNodeStruct) ClientQueryAsk(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) { + if s.Internal.ClientQueryAsk == nil { + return nil, ErrNotSupported + } return s.Internal.ClientQueryAsk(p0, p1, p2) } func (s *FullNodeStub) ClientQueryAsk(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ClientRemoveImport(p0 context.Context, p1 multistore.StoreID) error { + if s.Internal.ClientRemoveImport == nil { + return ErrNotSupported + } return s.Internal.ClientRemoveImport(p0, p1) } func (s *FullNodeStub) ClientRemoveImport(p0 context.Context, p1 multistore.StoreID) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) ClientRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { + if s.Internal.ClientRestartDataTransfer == nil { + return ErrNotSupported + } return s.Internal.ClientRestartDataTransfer(p0, p1, p2, p3) } func (s *FullNodeStub) ClientRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) ClientRetrieve(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) error { + if s.Internal.ClientRetrieve == nil { + return ErrNotSupported + } return s.Internal.ClientRetrieve(p0, p1, p2) } func (s *FullNodeStub) ClientRetrieve(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) ClientRetrieveTryRestartInsufficientFunds(p0 context.Context, p1 address.Address) error { + if s.Internal.ClientRetrieveTryRestartInsufficientFunds == nil { + return ErrNotSupported + } return s.Internal.ClientRetrieveTryRestartInsufficientFunds(p0, p1) } func (s *FullNodeStub) ClientRetrieveTryRestartInsufficientFunds(p0 context.Context, p1 address.Address) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) ClientRetrieveWithEvents(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) (<-chan marketevents.RetrievalEvent, error) { + if s.Internal.ClientRetrieveWithEvents == nil { + return nil, ErrNotSupported + } return s.Internal.ClientRetrieveWithEvents(p0, p1, p2) } func (s *FullNodeStub) ClientRetrieveWithEvents(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) (<-chan marketevents.RetrievalEvent, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ClientStartDeal(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) { + if s.Internal.ClientStartDeal == nil { + return nil, ErrNotSupported + } return s.Internal.ClientStartDeal(p0, p1) } func (s *FullNodeStub) ClientStartDeal(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ClientStatelessDeal(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) { + if s.Internal.ClientStatelessDeal == nil { + return nil, ErrNotSupported + } return s.Internal.ClientStatelessDeal(p0, p1) } func (s *FullNodeStub) ClientStatelessDeal(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) CreateBackup(p0 context.Context, p1 string) error { + if s.Internal.CreateBackup == nil { + return ErrNotSupported + } return s.Internal.CreateBackup(p0, p1) } func (s *FullNodeStub) CreateBackup(p0 context.Context, p1 string) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) GasEstimateFeeCap(p0 context.Context, p1 *types.Message, p2 int64, p3 types.TipSetKey) (types.BigInt, error) { + if s.Internal.GasEstimateFeeCap == nil { + return *new(types.BigInt), ErrNotSupported + } return s.Internal.GasEstimateFeeCap(p0, p1, p2, p3) } func (s *FullNodeStub) GasEstimateFeeCap(p0 context.Context, p1 *types.Message, p2 int64, p3 types.TipSetKey) (types.BigInt, error) { - return *new(types.BigInt), xerrors.New("method not supported") + return *new(types.BigInt), ErrNotSupported } func (s *FullNodeStruct) GasEstimateGasLimit(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (int64, error) { + if s.Internal.GasEstimateGasLimit == nil { + return 0, ErrNotSupported + } return s.Internal.GasEstimateGasLimit(p0, p1, p2) } func (s *FullNodeStub) GasEstimateGasLimit(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (int64, error) { - return 0, xerrors.New("method not supported") + return 0, ErrNotSupported } func (s *FullNodeStruct) GasEstimateGasPremium(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) { + if s.Internal.GasEstimateGasPremium == nil { + return *new(types.BigInt), ErrNotSupported + } return s.Internal.GasEstimateGasPremium(p0, p1, p2, p3, p4) } func (s *FullNodeStub) GasEstimateGasPremium(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) { - return *new(types.BigInt), xerrors.New("method not supported") + return *new(types.BigInt), ErrNotSupported } func (s *FullNodeStruct) GasEstimateMessageGas(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) { + if s.Internal.GasEstimateMessageGas == nil { + return nil, ErrNotSupported + } return s.Internal.GasEstimateMessageGas(p0, p1, p2, p3) } func (s *FullNodeStub) GasEstimateMessageGas(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) MarketAddBalance(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) { + if s.Internal.MarketAddBalance == nil { + return *new(cid.Cid), ErrNotSupported + } return s.Internal.MarketAddBalance(p0, p1, p2, p3) } func (s *FullNodeStub) MarketAddBalance(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") + return *new(cid.Cid), ErrNotSupported } func (s *FullNodeStruct) MarketGetReserved(p0 context.Context, p1 address.Address) (types.BigInt, error) { + if s.Internal.MarketGetReserved == nil { + return *new(types.BigInt), ErrNotSupported + } return s.Internal.MarketGetReserved(p0, p1) } func (s *FullNodeStub) MarketGetReserved(p0 context.Context, p1 address.Address) (types.BigInt, error) { - return *new(types.BigInt), xerrors.New("method not supported") + return *new(types.BigInt), ErrNotSupported } func (s *FullNodeStruct) MarketReleaseFunds(p0 context.Context, p1 address.Address, p2 types.BigInt) error { + if s.Internal.MarketReleaseFunds == nil { + return ErrNotSupported + } return s.Internal.MarketReleaseFunds(p0, p1, p2) } func (s *FullNodeStub) MarketReleaseFunds(p0 context.Context, p1 address.Address, p2 types.BigInt) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) MarketReserveFunds(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) { + if s.Internal.MarketReserveFunds == nil { + return *new(cid.Cid), ErrNotSupported + } return s.Internal.MarketReserveFunds(p0, p1, p2, p3) } func (s *FullNodeStub) MarketReserveFunds(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") + return *new(cid.Cid), ErrNotSupported } func (s *FullNodeStruct) MarketWithdraw(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) { + if s.Internal.MarketWithdraw == nil { + return *new(cid.Cid), ErrNotSupported + } return s.Internal.MarketWithdraw(p0, p1, p2, p3) } func (s *FullNodeStub) MarketWithdraw(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") + return *new(cid.Cid), ErrNotSupported } func (s *FullNodeStruct) MinerCreateBlock(p0 context.Context, p1 *BlockTemplate) (*types.BlockMsg, error) { + if s.Internal.MinerCreateBlock == nil { + return nil, ErrNotSupported + } return s.Internal.MinerCreateBlock(p0, p1) } func (s *FullNodeStub) MinerCreateBlock(p0 context.Context, p1 *BlockTemplate) (*types.BlockMsg, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) MinerGetBaseInfo(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*MiningBaseInfo, error) { + if s.Internal.MinerGetBaseInfo == nil { + return nil, ErrNotSupported + } return s.Internal.MinerGetBaseInfo(p0, p1, p2, p3) } func (s *FullNodeStub) MinerGetBaseInfo(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*MiningBaseInfo, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) MpoolBatchPush(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) { + if s.Internal.MpoolBatchPush == nil { + return *new([]cid.Cid), ErrNotSupported + } return s.Internal.MpoolBatchPush(p0, p1) } func (s *FullNodeStub) MpoolBatchPush(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) { - return *new([]cid.Cid), xerrors.New("method not supported") + return *new([]cid.Cid), ErrNotSupported } func (s *FullNodeStruct) MpoolBatchPushMessage(p0 context.Context, p1 []*types.Message, p2 *MessageSendSpec) ([]*types.SignedMessage, error) { + if s.Internal.MpoolBatchPushMessage == nil { + return *new([]*types.SignedMessage), ErrNotSupported + } return s.Internal.MpoolBatchPushMessage(p0, p1, p2) } func (s *FullNodeStub) MpoolBatchPushMessage(p0 context.Context, p1 []*types.Message, p2 *MessageSendSpec) ([]*types.SignedMessage, error) { - return *new([]*types.SignedMessage), xerrors.New("method not supported") + return *new([]*types.SignedMessage), ErrNotSupported } func (s *FullNodeStruct) MpoolBatchPushUntrusted(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) { + if s.Internal.MpoolBatchPushUntrusted == nil { + return *new([]cid.Cid), ErrNotSupported + } return s.Internal.MpoolBatchPushUntrusted(p0, p1) } func (s *FullNodeStub) MpoolBatchPushUntrusted(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) { - return *new([]cid.Cid), xerrors.New("method not supported") + return *new([]cid.Cid), ErrNotSupported } func (s *FullNodeStruct) MpoolCheckMessages(p0 context.Context, p1 []*MessagePrototype) ([][]MessageCheckStatus, error) { + if s.Internal.MpoolCheckMessages == nil { + return *new([][]MessageCheckStatus), ErrNotSupported + } return s.Internal.MpoolCheckMessages(p0, p1) } func (s *FullNodeStub) MpoolCheckMessages(p0 context.Context, p1 []*MessagePrototype) ([][]MessageCheckStatus, error) { - return *new([][]MessageCheckStatus), xerrors.New("method not supported") + return *new([][]MessageCheckStatus), ErrNotSupported } func (s *FullNodeStruct) MpoolCheckPendingMessages(p0 context.Context, p1 address.Address) ([][]MessageCheckStatus, error) { + if s.Internal.MpoolCheckPendingMessages == nil { + return *new([][]MessageCheckStatus), ErrNotSupported + } return s.Internal.MpoolCheckPendingMessages(p0, p1) } func (s *FullNodeStub) MpoolCheckPendingMessages(p0 context.Context, p1 address.Address) ([][]MessageCheckStatus, error) { - return *new([][]MessageCheckStatus), xerrors.New("method not supported") + return *new([][]MessageCheckStatus), ErrNotSupported } func (s *FullNodeStruct) MpoolCheckReplaceMessages(p0 context.Context, p1 []*types.Message) ([][]MessageCheckStatus, error) { + if s.Internal.MpoolCheckReplaceMessages == nil { + return *new([][]MessageCheckStatus), ErrNotSupported + } return s.Internal.MpoolCheckReplaceMessages(p0, p1) } func (s *FullNodeStub) MpoolCheckReplaceMessages(p0 context.Context, p1 []*types.Message) ([][]MessageCheckStatus, error) { - return *new([][]MessageCheckStatus), xerrors.New("method not supported") + return *new([][]MessageCheckStatus), ErrNotSupported } func (s *FullNodeStruct) MpoolClear(p0 context.Context, p1 bool) error { + if s.Internal.MpoolClear == nil { + return ErrNotSupported + } return s.Internal.MpoolClear(p0, p1) } func (s *FullNodeStub) MpoolClear(p0 context.Context, p1 bool) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) MpoolGetConfig(p0 context.Context) (*types.MpoolConfig, error) { + if s.Internal.MpoolGetConfig == nil { + return nil, ErrNotSupported + } return s.Internal.MpoolGetConfig(p0) } func (s *FullNodeStub) MpoolGetConfig(p0 context.Context) (*types.MpoolConfig, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) MpoolGetNonce(p0 context.Context, p1 address.Address) (uint64, error) { + if s.Internal.MpoolGetNonce == nil { + return 0, ErrNotSupported + } return s.Internal.MpoolGetNonce(p0, p1) } func (s *FullNodeStub) MpoolGetNonce(p0 context.Context, p1 address.Address) (uint64, error) { - return 0, xerrors.New("method not supported") + return 0, ErrNotSupported } func (s *FullNodeStruct) MpoolPending(p0 context.Context, p1 types.TipSetKey) ([]*types.SignedMessage, error) { + if s.Internal.MpoolPending == nil { + return *new([]*types.SignedMessage), ErrNotSupported + } return s.Internal.MpoolPending(p0, p1) } func (s *FullNodeStub) MpoolPending(p0 context.Context, p1 types.TipSetKey) ([]*types.SignedMessage, error) { - return *new([]*types.SignedMessage), xerrors.New("method not supported") + return *new([]*types.SignedMessage), ErrNotSupported } func (s *FullNodeStruct) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) { + if s.Internal.MpoolPush == nil { + return *new(cid.Cid), ErrNotSupported + } return s.Internal.MpoolPush(p0, p1) } func (s *FullNodeStub) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") + return *new(cid.Cid), ErrNotSupported } func (s *FullNodeStruct) MpoolPushMessage(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec) (*types.SignedMessage, error) { + if s.Internal.MpoolPushMessage == nil { + return nil, ErrNotSupported + } return s.Internal.MpoolPushMessage(p0, p1, p2) } func (s *FullNodeStub) MpoolPushMessage(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec) (*types.SignedMessage, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) MpoolPushUntrusted(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) { + if s.Internal.MpoolPushUntrusted == nil { + return *new(cid.Cid), ErrNotSupported + } return s.Internal.MpoolPushUntrusted(p0, p1) } func (s *FullNodeStub) MpoolPushUntrusted(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") + return *new(cid.Cid), ErrNotSupported } func (s *FullNodeStruct) MpoolSelect(p0 context.Context, p1 types.TipSetKey, p2 float64) ([]*types.SignedMessage, error) { + if s.Internal.MpoolSelect == nil { + return *new([]*types.SignedMessage), ErrNotSupported + } return s.Internal.MpoolSelect(p0, p1, p2) } func (s *FullNodeStub) MpoolSelect(p0 context.Context, p1 types.TipSetKey, p2 float64) ([]*types.SignedMessage, error) { - return *new([]*types.SignedMessage), xerrors.New("method not supported") + return *new([]*types.SignedMessage), ErrNotSupported } func (s *FullNodeStruct) MpoolSetConfig(p0 context.Context, p1 *types.MpoolConfig) error { + if s.Internal.MpoolSetConfig == nil { + return ErrNotSupported + } return s.Internal.MpoolSetConfig(p0, p1) } func (s *FullNodeStub) MpoolSetConfig(p0 context.Context, p1 *types.MpoolConfig) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) MpoolSub(p0 context.Context) (<-chan MpoolUpdate, error) { + if s.Internal.MpoolSub == nil { + return nil, ErrNotSupported + } return s.Internal.MpoolSub(p0) } func (s *FullNodeStub) MpoolSub(p0 context.Context) (<-chan MpoolUpdate, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) MsigAddApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 bool) (*MessagePrototype, error) { + if s.Internal.MsigAddApprove == nil { + return nil, ErrNotSupported + } return s.Internal.MsigAddApprove(p0, p1, p2, p3, p4, p5, p6) } func (s *FullNodeStub) MsigAddApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 bool) (*MessagePrototype, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) MsigAddCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 bool) (*MessagePrototype, error) { + if s.Internal.MsigAddCancel == nil { + return nil, ErrNotSupported + } return s.Internal.MsigAddCancel(p0, p1, p2, p3, p4, p5) } func (s *FullNodeStub) MsigAddCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 bool) (*MessagePrototype, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) MsigAddPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (*MessagePrototype, error) { + if s.Internal.MsigAddPropose == nil { + return nil, ErrNotSupported + } return s.Internal.MsigAddPropose(p0, p1, p2, p3, p4) } func (s *FullNodeStub) MsigAddPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (*MessagePrototype, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) MsigApprove(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (*MessagePrototype, error) { + if s.Internal.MsigApprove == nil { + return nil, ErrNotSupported + } return s.Internal.MsigApprove(p0, p1, p2, p3) } func (s *FullNodeStub) MsigApprove(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (*MessagePrototype, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) MsigApproveTxnHash(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (*MessagePrototype, error) { + if s.Internal.MsigApproveTxnHash == nil { + return nil, ErrNotSupported + } return s.Internal.MsigApproveTxnHash(p0, p1, p2, p3, p4, p5, p6, p7, p8) } func (s *FullNodeStub) MsigApproveTxnHash(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (*MessagePrototype, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) { + if s.Internal.MsigCancel == nil { + return nil, ErrNotSupported + } return s.Internal.MsigCancel(p0, p1, p2, p3, p4, p5, p6, p7) } func (s *FullNodeStub) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) MsigCreate(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (*MessagePrototype, error) { + if s.Internal.MsigCreate == nil { + return nil, ErrNotSupported + } return s.Internal.MsigCreate(p0, p1, p2, p3, p4, p5, p6) } func (s *FullNodeStub) MsigCreate(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (*MessagePrototype, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) MsigGetAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) { + if s.Internal.MsigGetAvailableBalance == nil { + return *new(types.BigInt), ErrNotSupported + } return s.Internal.MsigGetAvailableBalance(p0, p1, p2) } func (s *FullNodeStub) MsigGetAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) { - return *new(types.BigInt), xerrors.New("method not supported") + return *new(types.BigInt), ErrNotSupported } func (s *FullNodeStruct) MsigGetPending(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*MsigTransaction, error) { + if s.Internal.MsigGetPending == nil { + return *new([]*MsigTransaction), ErrNotSupported + } return s.Internal.MsigGetPending(p0, p1, p2) } func (s *FullNodeStub) MsigGetPending(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*MsigTransaction, error) { - return *new([]*MsigTransaction), xerrors.New("method not supported") + return *new([]*MsigTransaction), ErrNotSupported } func (s *FullNodeStruct) MsigGetVested(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) { + if s.Internal.MsigGetVested == nil { + return *new(types.BigInt), ErrNotSupported + } return s.Internal.MsigGetVested(p0, p1, p2, p3) } func (s *FullNodeStub) MsigGetVested(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) { - return *new(types.BigInt), xerrors.New("method not supported") + return *new(types.BigInt), ErrNotSupported } func (s *FullNodeStruct) MsigGetVestingSchedule(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MsigVesting, error) { + if s.Internal.MsigGetVestingSchedule == nil { + return *new(MsigVesting), ErrNotSupported + } return s.Internal.MsigGetVestingSchedule(p0, p1, p2) } func (s *FullNodeStub) MsigGetVestingSchedule(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MsigVesting, error) { - return *new(MsigVesting), xerrors.New("method not supported") + return *new(MsigVesting), ErrNotSupported } func (s *FullNodeStruct) MsigPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 address.Address, p5 uint64, p6 []byte) (*MessagePrototype, error) { + if s.Internal.MsigPropose == nil { + return nil, ErrNotSupported + } return s.Internal.MsigPropose(p0, p1, p2, p3, p4, p5, p6) } func (s *FullNodeStub) MsigPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 address.Address, p5 uint64, p6 []byte) (*MessagePrototype, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) MsigRemoveSigner(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (*MessagePrototype, error) { + if s.Internal.MsigRemoveSigner == nil { + return nil, ErrNotSupported + } return s.Internal.MsigRemoveSigner(p0, p1, p2, p3, p4) } func (s *FullNodeStub) MsigRemoveSigner(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (*MessagePrototype, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) MsigSwapApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 address.Address) (*MessagePrototype, error) { + if s.Internal.MsigSwapApprove == nil { + return nil, ErrNotSupported + } return s.Internal.MsigSwapApprove(p0, p1, p2, p3, p4, p5, p6) } func (s *FullNodeStub) MsigSwapApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 address.Address) (*MessagePrototype, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) MsigSwapCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address) (*MessagePrototype, error) { + if s.Internal.MsigSwapCancel == nil { + return nil, ErrNotSupported + } return s.Internal.MsigSwapCancel(p0, p1, p2, p3, p4, p5) } func (s *FullNodeStub) MsigSwapCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address) (*MessagePrototype, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) MsigSwapPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 address.Address) (*MessagePrototype, error) { + if s.Internal.MsigSwapPropose == nil { + return nil, ErrNotSupported + } return s.Internal.MsigSwapPropose(p0, p1, p2, p3, p4) } func (s *FullNodeStub) MsigSwapPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 address.Address) (*MessagePrototype, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) NodeStatus(p0 context.Context, p1 bool) (NodeStatus, error) { + if s.Internal.NodeStatus == nil { + return *new(NodeStatus), ErrNotSupported + } return s.Internal.NodeStatus(p0, p1) } func (s *FullNodeStub) NodeStatus(p0 context.Context, p1 bool) (NodeStatus, error) { - return *new(NodeStatus), xerrors.New("method not supported") + return *new(NodeStatus), ErrNotSupported } func (s *FullNodeStruct) PaychAllocateLane(p0 context.Context, p1 address.Address) (uint64, error) { + if s.Internal.PaychAllocateLane == nil { + return 0, ErrNotSupported + } return s.Internal.PaychAllocateLane(p0, p1) } func (s *FullNodeStub) PaychAllocateLane(p0 context.Context, p1 address.Address) (uint64, error) { - return 0, xerrors.New("method not supported") + return 0, ErrNotSupported } func (s *FullNodeStruct) PaychAvailableFunds(p0 context.Context, p1 address.Address) (*ChannelAvailableFunds, error) { + if s.Internal.PaychAvailableFunds == nil { + return nil, ErrNotSupported + } return s.Internal.PaychAvailableFunds(p0, p1) } func (s *FullNodeStub) PaychAvailableFunds(p0 context.Context, p1 address.Address) (*ChannelAvailableFunds, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) PaychAvailableFundsByFromTo(p0 context.Context, p1 address.Address, p2 address.Address) (*ChannelAvailableFunds, error) { + if s.Internal.PaychAvailableFundsByFromTo == nil { + return nil, ErrNotSupported + } return s.Internal.PaychAvailableFundsByFromTo(p0, p1, p2) } func (s *FullNodeStub) PaychAvailableFundsByFromTo(p0 context.Context, p1 address.Address, p2 address.Address) (*ChannelAvailableFunds, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) PaychCollect(p0 context.Context, p1 address.Address) (cid.Cid, error) { + if s.Internal.PaychCollect == nil { + return *new(cid.Cid), ErrNotSupported + } return s.Internal.PaychCollect(p0, p1) } func (s *FullNodeStub) PaychCollect(p0 context.Context, p1 address.Address) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") + return *new(cid.Cid), ErrNotSupported } func (s *FullNodeStruct) PaychGet(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (*ChannelInfo, error) { + if s.Internal.PaychGet == nil { + return nil, ErrNotSupported + } return s.Internal.PaychGet(p0, p1, p2, p3) } func (s *FullNodeStub) PaychGet(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (*ChannelInfo, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) PaychGetWaitReady(p0 context.Context, p1 cid.Cid) (address.Address, error) { + if s.Internal.PaychGetWaitReady == nil { + return *new(address.Address), ErrNotSupported + } return s.Internal.PaychGetWaitReady(p0, p1) } func (s *FullNodeStub) PaychGetWaitReady(p0 context.Context, p1 cid.Cid) (address.Address, error) { - return *new(address.Address), xerrors.New("method not supported") + return *new(address.Address), ErrNotSupported } func (s *FullNodeStruct) PaychList(p0 context.Context) ([]address.Address, error) { + if s.Internal.PaychList == nil { + return *new([]address.Address), ErrNotSupported + } return s.Internal.PaychList(p0) } func (s *FullNodeStub) PaychList(p0 context.Context) ([]address.Address, error) { - return *new([]address.Address), xerrors.New("method not supported") + return *new([]address.Address), ErrNotSupported } func (s *FullNodeStruct) PaychNewPayment(p0 context.Context, p1 address.Address, p2 address.Address, p3 []VoucherSpec) (*PaymentInfo, error) { + if s.Internal.PaychNewPayment == nil { + return nil, ErrNotSupported + } return s.Internal.PaychNewPayment(p0, p1, p2, p3) } func (s *FullNodeStub) PaychNewPayment(p0 context.Context, p1 address.Address, p2 address.Address, p3 []VoucherSpec) (*PaymentInfo, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) PaychSettle(p0 context.Context, p1 address.Address) (cid.Cid, error) { + if s.Internal.PaychSettle == nil { + return *new(cid.Cid), ErrNotSupported + } return s.Internal.PaychSettle(p0, p1) } func (s *FullNodeStub) PaychSettle(p0 context.Context, p1 address.Address) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") + return *new(cid.Cid), ErrNotSupported } func (s *FullNodeStruct) PaychStatus(p0 context.Context, p1 address.Address) (*PaychStatus, error) { + if s.Internal.PaychStatus == nil { + return nil, ErrNotSupported + } return s.Internal.PaychStatus(p0, p1) } func (s *FullNodeStub) PaychStatus(p0 context.Context, p1 address.Address) (*PaychStatus, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) PaychVoucherAdd(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 types.BigInt) (types.BigInt, error) { + if s.Internal.PaychVoucherAdd == nil { + return *new(types.BigInt), ErrNotSupported + } return s.Internal.PaychVoucherAdd(p0, p1, p2, p3, p4) } func (s *FullNodeStub) PaychVoucherAdd(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 types.BigInt) (types.BigInt, error) { - return *new(types.BigInt), xerrors.New("method not supported") + return *new(types.BigInt), ErrNotSupported } func (s *FullNodeStruct) PaychVoucherCheckSpendable(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (bool, error) { + if s.Internal.PaychVoucherCheckSpendable == nil { + return false, ErrNotSupported + } return s.Internal.PaychVoucherCheckSpendable(p0, p1, p2, p3, p4) } func (s *FullNodeStub) PaychVoucherCheckSpendable(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (bool, error) { - return false, xerrors.New("method not supported") + return false, ErrNotSupported } func (s *FullNodeStruct) PaychVoucherCheckValid(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher) error { + if s.Internal.PaychVoucherCheckValid == nil { + return ErrNotSupported + } return s.Internal.PaychVoucherCheckValid(p0, p1, p2) } func (s *FullNodeStub) PaychVoucherCheckValid(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) PaychVoucherCreate(p0 context.Context, p1 address.Address, p2 types.BigInt, p3 uint64) (*VoucherCreateResult, error) { + if s.Internal.PaychVoucherCreate == nil { + return nil, ErrNotSupported + } return s.Internal.PaychVoucherCreate(p0, p1, p2, p3) } func (s *FullNodeStub) PaychVoucherCreate(p0 context.Context, p1 address.Address, p2 types.BigInt, p3 uint64) (*VoucherCreateResult, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) PaychVoucherList(p0 context.Context, p1 address.Address) ([]*paych.SignedVoucher, error) { + if s.Internal.PaychVoucherList == nil { + return *new([]*paych.SignedVoucher), ErrNotSupported + } return s.Internal.PaychVoucherList(p0, p1) } func (s *FullNodeStub) PaychVoucherList(p0 context.Context, p1 address.Address) ([]*paych.SignedVoucher, error) { - return *new([]*paych.SignedVoucher), xerrors.New("method not supported") + return *new([]*paych.SignedVoucher), ErrNotSupported } func (s *FullNodeStruct) PaychVoucherSubmit(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (cid.Cid, error) { + if s.Internal.PaychVoucherSubmit == nil { + return *new(cid.Cid), ErrNotSupported + } return s.Internal.PaychVoucherSubmit(p0, p1, p2, p3, p4) } func (s *FullNodeStub) PaychVoucherSubmit(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") + return *new(cid.Cid), ErrNotSupported } func (s *FullNodeStruct) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { + if s.Internal.StateAccountKey == nil { + return *new(address.Address), ErrNotSupported + } return s.Internal.StateAccountKey(p0, p1, p2) } func (s *FullNodeStub) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { - return *new(address.Address), xerrors.New("method not supported") + return *new(address.Address), ErrNotSupported } func (s *FullNodeStruct) StateAllMinerFaults(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) ([]*Fault, error) { + if s.Internal.StateAllMinerFaults == nil { + return *new([]*Fault), ErrNotSupported + } return s.Internal.StateAllMinerFaults(p0, p1, p2) } func (s *FullNodeStub) StateAllMinerFaults(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) ([]*Fault, error) { - return *new([]*Fault), xerrors.New("method not supported") + return *new([]*Fault), ErrNotSupported } func (s *FullNodeStruct) StateCall(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*InvocResult, error) { + if s.Internal.StateCall == nil { + return nil, ErrNotSupported + } return s.Internal.StateCall(p0, p1, p2) } func (s *FullNodeStub) StateCall(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*InvocResult, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) StateChangedActors(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (map[string]types.Actor, error) { + if s.Internal.StateChangedActors == nil { + return *new(map[string]types.Actor), ErrNotSupported + } return s.Internal.StateChangedActors(p0, p1, p2) } func (s *FullNodeStub) StateChangedActors(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (map[string]types.Actor, error) { - return *new(map[string]types.Actor), xerrors.New("method not supported") + return *new(map[string]types.Actor), ErrNotSupported } func (s *FullNodeStruct) StateCirculatingSupply(p0 context.Context, p1 types.TipSetKey) (abi.TokenAmount, error) { + if s.Internal.StateCirculatingSupply == nil { + return *new(abi.TokenAmount), ErrNotSupported + } return s.Internal.StateCirculatingSupply(p0, p1) } func (s *FullNodeStub) StateCirculatingSupply(p0 context.Context, p1 types.TipSetKey) (abi.TokenAmount, error) { - return *new(abi.TokenAmount), xerrors.New("method not supported") + return *new(abi.TokenAmount), ErrNotSupported } func (s *FullNodeStruct) StateCompute(p0 context.Context, p1 abi.ChainEpoch, p2 []*types.Message, p3 types.TipSetKey) (*ComputeStateOutput, error) { + if s.Internal.StateCompute == nil { + return nil, ErrNotSupported + } return s.Internal.StateCompute(p0, p1, p2, p3) } func (s *FullNodeStub) StateCompute(p0 context.Context, p1 abi.ChainEpoch, p2 []*types.Message, p3 types.TipSetKey) (*ComputeStateOutput, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) StateDealProviderCollateralBounds(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (DealCollateralBounds, error) { + if s.Internal.StateDealProviderCollateralBounds == nil { + return *new(DealCollateralBounds), ErrNotSupported + } return s.Internal.StateDealProviderCollateralBounds(p0, p1, p2, p3) } func (s *FullNodeStub) StateDealProviderCollateralBounds(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (DealCollateralBounds, error) { - return *new(DealCollateralBounds), xerrors.New("method not supported") + return *new(DealCollateralBounds), ErrNotSupported } func (s *FullNodeStruct) StateDecodeParams(p0 context.Context, p1 address.Address, p2 abi.MethodNum, p3 []byte, p4 types.TipSetKey) (interface{}, error) { + if s.Internal.StateDecodeParams == nil { + return nil, ErrNotSupported + } return s.Internal.StateDecodeParams(p0, p1, p2, p3, p4) } func (s *FullNodeStub) StateDecodeParams(p0 context.Context, p1 address.Address, p2 abi.MethodNum, p3 []byte, p4 types.TipSetKey) (interface{}, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) StateGetActor(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) { + if s.Internal.StateGetActor == nil { + return nil, ErrNotSupported + } return s.Internal.StateGetActor(p0, p1, p2) } func (s *FullNodeStub) StateGetActor(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) StateListActors(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) { + if s.Internal.StateListActors == nil { + return *new([]address.Address), ErrNotSupported + } return s.Internal.StateListActors(p0, p1) } func (s *FullNodeStub) StateListActors(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) { - return *new([]address.Address), xerrors.New("method not supported") + return *new([]address.Address), ErrNotSupported } func (s *FullNodeStruct) StateListMessages(p0 context.Context, p1 *MessageMatch, p2 types.TipSetKey, p3 abi.ChainEpoch) ([]cid.Cid, error) { + if s.Internal.StateListMessages == nil { + return *new([]cid.Cid), ErrNotSupported + } return s.Internal.StateListMessages(p0, p1, p2, p3) } func (s *FullNodeStub) StateListMessages(p0 context.Context, p1 *MessageMatch, p2 types.TipSetKey, p3 abi.ChainEpoch) ([]cid.Cid, error) { - return *new([]cid.Cid), xerrors.New("method not supported") + return *new([]cid.Cid), ErrNotSupported } func (s *FullNodeStruct) StateListMiners(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) { + if s.Internal.StateListMiners == nil { + return *new([]address.Address), ErrNotSupported + } return s.Internal.StateListMiners(p0, p1) } func (s *FullNodeStub) StateListMiners(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) { - return *new([]address.Address), xerrors.New("method not supported") + return *new([]address.Address), ErrNotSupported } func (s *FullNodeStruct) StateLookupID(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { + if s.Internal.StateLookupID == nil { + return *new(address.Address), ErrNotSupported + } return s.Internal.StateLookupID(p0, p1, p2) } func (s *FullNodeStub) StateLookupID(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { - return *new(address.Address), xerrors.New("method not supported") + return *new(address.Address), ErrNotSupported } func (s *FullNodeStruct) StateMarketBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MarketBalance, error) { + if s.Internal.StateMarketBalance == nil { + return *new(MarketBalance), ErrNotSupported + } return s.Internal.StateMarketBalance(p0, p1, p2) } func (s *FullNodeStub) StateMarketBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MarketBalance, error) { - return *new(MarketBalance), xerrors.New("method not supported") + return *new(MarketBalance), ErrNotSupported } func (s *FullNodeStruct) StateMarketDeals(p0 context.Context, p1 types.TipSetKey) (map[string]MarketDeal, error) { + if s.Internal.StateMarketDeals == nil { + return *new(map[string]MarketDeal), ErrNotSupported + } return s.Internal.StateMarketDeals(p0, p1) } func (s *FullNodeStub) StateMarketDeals(p0 context.Context, p1 types.TipSetKey) (map[string]MarketDeal, error) { - return *new(map[string]MarketDeal), xerrors.New("method not supported") + return *new(map[string]MarketDeal), ErrNotSupported } func (s *FullNodeStruct) StateMarketParticipants(p0 context.Context, p1 types.TipSetKey) (map[string]MarketBalance, error) { + if s.Internal.StateMarketParticipants == nil { + return *new(map[string]MarketBalance), ErrNotSupported + } return s.Internal.StateMarketParticipants(p0, p1) } func (s *FullNodeStub) StateMarketParticipants(p0 context.Context, p1 types.TipSetKey) (map[string]MarketBalance, error) { - return *new(map[string]MarketBalance), xerrors.New("method not supported") + return *new(map[string]MarketBalance), ErrNotSupported } func (s *FullNodeStruct) StateMarketStorageDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*MarketDeal, error) { + if s.Internal.StateMarketStorageDeal == nil { + return nil, ErrNotSupported + } return s.Internal.StateMarketStorageDeal(p0, p1, p2) } func (s *FullNodeStub) StateMarketStorageDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*MarketDeal, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) StateMinerActiveSectors(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { + if s.Internal.StateMinerActiveSectors == nil { + return *new([]*miner.SectorOnChainInfo), ErrNotSupported + } return s.Internal.StateMinerActiveSectors(p0, p1, p2) } func (s *FullNodeStub) StateMinerActiveSectors(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { - return *new([]*miner.SectorOnChainInfo), xerrors.New("method not supported") + return *new([]*miner.SectorOnChainInfo), ErrNotSupported } func (s *FullNodeStruct) StateMinerAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) { + if s.Internal.StateMinerAvailableBalance == nil { + return *new(types.BigInt), ErrNotSupported + } return s.Internal.StateMinerAvailableBalance(p0, p1, p2) } func (s *FullNodeStub) StateMinerAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) { - return *new(types.BigInt), xerrors.New("method not supported") + return *new(types.BigInt), ErrNotSupported } func (s *FullNodeStruct) StateMinerDeadlines(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]Deadline, error) { + if s.Internal.StateMinerDeadlines == nil { + return *new([]Deadline), ErrNotSupported + } return s.Internal.StateMinerDeadlines(p0, p1, p2) } func (s *FullNodeStub) StateMinerDeadlines(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]Deadline, error) { - return *new([]Deadline), xerrors.New("method not supported") + return *new([]Deadline), ErrNotSupported } func (s *FullNodeStruct) StateMinerFaults(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) { + if s.Internal.StateMinerFaults == nil { + return *new(bitfield.BitField), ErrNotSupported + } return s.Internal.StateMinerFaults(p0, p1, p2) } func (s *FullNodeStub) StateMinerFaults(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) { - return *new(bitfield.BitField), xerrors.New("method not supported") + return *new(bitfield.BitField), ErrNotSupported } func (s *FullNodeStruct) StateMinerInfo(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (miner.MinerInfo, error) { + if s.Internal.StateMinerInfo == nil { + return *new(miner.MinerInfo), ErrNotSupported + } return s.Internal.StateMinerInfo(p0, p1, p2) } func (s *FullNodeStub) StateMinerInfo(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (miner.MinerInfo, error) { - return *new(miner.MinerInfo), xerrors.New("method not supported") + return *new(miner.MinerInfo), ErrNotSupported } func (s *FullNodeStruct) StateMinerInitialPledgeCollateral(p0 context.Context, p1 address.Address, p2 miner.SectorPreCommitInfo, p3 types.TipSetKey) (types.BigInt, error) { + if s.Internal.StateMinerInitialPledgeCollateral == nil { + return *new(types.BigInt), ErrNotSupported + } return s.Internal.StateMinerInitialPledgeCollateral(p0, p1, p2, p3) } func (s *FullNodeStub) StateMinerInitialPledgeCollateral(p0 context.Context, p1 address.Address, p2 miner.SectorPreCommitInfo, p3 types.TipSetKey) (types.BigInt, error) { - return *new(types.BigInt), xerrors.New("method not supported") + return *new(types.BigInt), ErrNotSupported } func (s *FullNodeStruct) StateMinerPartitions(p0 context.Context, p1 address.Address, p2 uint64, p3 types.TipSetKey) ([]Partition, error) { + if s.Internal.StateMinerPartitions == nil { + return *new([]Partition), ErrNotSupported + } return s.Internal.StateMinerPartitions(p0, p1, p2, p3) } func (s *FullNodeStub) StateMinerPartitions(p0 context.Context, p1 address.Address, p2 uint64, p3 types.TipSetKey) ([]Partition, error) { - return *new([]Partition), xerrors.New("method not supported") + return *new([]Partition), ErrNotSupported } func (s *FullNodeStruct) StateMinerPower(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*MinerPower, error) { + if s.Internal.StateMinerPower == nil { + return nil, ErrNotSupported + } return s.Internal.StateMinerPower(p0, p1, p2) } func (s *FullNodeStub) StateMinerPower(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*MinerPower, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) StateMinerPreCommitDepositForPower(p0 context.Context, p1 address.Address, p2 miner.SectorPreCommitInfo, p3 types.TipSetKey) (types.BigInt, error) { + if s.Internal.StateMinerPreCommitDepositForPower == nil { + return *new(types.BigInt), ErrNotSupported + } return s.Internal.StateMinerPreCommitDepositForPower(p0, p1, p2, p3) } func (s *FullNodeStub) StateMinerPreCommitDepositForPower(p0 context.Context, p1 address.Address, p2 miner.SectorPreCommitInfo, p3 types.TipSetKey) (types.BigInt, error) { - return *new(types.BigInt), xerrors.New("method not supported") + return *new(types.BigInt), ErrNotSupported } func (s *FullNodeStruct) StateMinerProvingDeadline(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) { + if s.Internal.StateMinerProvingDeadline == nil { + return nil, ErrNotSupported + } return s.Internal.StateMinerProvingDeadline(p0, p1, p2) } func (s *FullNodeStub) StateMinerProvingDeadline(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) StateMinerRecoveries(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) { + if s.Internal.StateMinerRecoveries == nil { + return *new(bitfield.BitField), ErrNotSupported + } return s.Internal.StateMinerRecoveries(p0, p1, p2) } func (s *FullNodeStub) StateMinerRecoveries(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) { - return *new(bitfield.BitField), xerrors.New("method not supported") + return *new(bitfield.BitField), ErrNotSupported } func (s *FullNodeStruct) StateMinerSectorAllocated(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (bool, error) { + if s.Internal.StateMinerSectorAllocated == nil { + return false, ErrNotSupported + } return s.Internal.StateMinerSectorAllocated(p0, p1, p2, p3) } func (s *FullNodeStub) StateMinerSectorAllocated(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (bool, error) { - return false, xerrors.New("method not supported") + return false, ErrNotSupported } func (s *FullNodeStruct) StateMinerSectorCount(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MinerSectors, error) { + if s.Internal.StateMinerSectorCount == nil { + return *new(MinerSectors), ErrNotSupported + } return s.Internal.StateMinerSectorCount(p0, p1, p2) } func (s *FullNodeStub) StateMinerSectorCount(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MinerSectors, error) { - return *new(MinerSectors), xerrors.New("method not supported") + return *new(MinerSectors), ErrNotSupported } func (s *FullNodeStruct) StateMinerSectors(p0 context.Context, p1 address.Address, p2 *bitfield.BitField, p3 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { + if s.Internal.StateMinerSectors == nil { + return *new([]*miner.SectorOnChainInfo), ErrNotSupported + } return s.Internal.StateMinerSectors(p0, p1, p2, p3) } func (s *FullNodeStub) StateMinerSectors(p0 context.Context, p1 address.Address, p2 *bitfield.BitField, p3 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { - return *new([]*miner.SectorOnChainInfo), xerrors.New("method not supported") + return *new([]*miner.SectorOnChainInfo), ErrNotSupported } func (s *FullNodeStruct) StateNetworkName(p0 context.Context) (dtypes.NetworkName, error) { + if s.Internal.StateNetworkName == nil { + return *new(dtypes.NetworkName), ErrNotSupported + } return s.Internal.StateNetworkName(p0) } func (s *FullNodeStub) StateNetworkName(p0 context.Context) (dtypes.NetworkName, error) { - return *new(dtypes.NetworkName), xerrors.New("method not supported") + return *new(dtypes.NetworkName), ErrNotSupported } func (s *FullNodeStruct) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) { + if s.Internal.StateNetworkVersion == nil { + return *new(apitypes.NetworkVersion), ErrNotSupported + } return s.Internal.StateNetworkVersion(p0, p1) } func (s *FullNodeStub) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) { - return *new(apitypes.NetworkVersion), xerrors.New("method not supported") + return *new(apitypes.NetworkVersion), ErrNotSupported } func (s *FullNodeStruct) StateReadState(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*ActorState, error) { + if s.Internal.StateReadState == nil { + return nil, ErrNotSupported + } return s.Internal.StateReadState(p0, p1, p2) } func (s *FullNodeStub) StateReadState(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*ActorState, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) StateReplay(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid) (*InvocResult, error) { + if s.Internal.StateReplay == nil { + return nil, ErrNotSupported + } return s.Internal.StateReplay(p0, p1, p2) } func (s *FullNodeStub) StateReplay(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid) (*InvocResult, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) StateSearchMsg(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) { + if s.Internal.StateSearchMsg == nil { + return nil, ErrNotSupported + } return s.Internal.StateSearchMsg(p0, p1, p2, p3, p4) } func (s *FullNodeStub) StateSearchMsg(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) StateSectorExpiration(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorExpiration, error) { + if s.Internal.StateSectorExpiration == nil { + return nil, ErrNotSupported + } return s.Internal.StateSectorExpiration(p0, p1, p2, p3) } func (s *FullNodeStub) StateSectorExpiration(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorExpiration, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) StateSectorGetInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) { + if s.Internal.StateSectorGetInfo == nil { + return nil, ErrNotSupported + } return s.Internal.StateSectorGetInfo(p0, p1, p2, p3) } func (s *FullNodeStub) StateSectorGetInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) StateSectorPartition(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorLocation, error) { + if s.Internal.StateSectorPartition == nil { + return nil, ErrNotSupported + } return s.Internal.StateSectorPartition(p0, p1, p2, p3) } func (s *FullNodeStub) StateSectorPartition(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorLocation, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) StateSectorPreCommitInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) { + if s.Internal.StateSectorPreCommitInfo == nil { + return *new(miner.SectorPreCommitOnChainInfo), ErrNotSupported + } return s.Internal.StateSectorPreCommitInfo(p0, p1, p2, p3) } func (s *FullNodeStub) StateSectorPreCommitInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) { - return *new(miner.SectorPreCommitOnChainInfo), xerrors.New("method not supported") + return *new(miner.SectorPreCommitOnChainInfo), ErrNotSupported } func (s *FullNodeStruct) StateVMCirculatingSupplyInternal(p0 context.Context, p1 types.TipSetKey) (CirculatingSupply, error) { + if s.Internal.StateVMCirculatingSupplyInternal == nil { + return *new(CirculatingSupply), ErrNotSupported + } return s.Internal.StateVMCirculatingSupplyInternal(p0, p1) } func (s *FullNodeStub) StateVMCirculatingSupplyInternal(p0 context.Context, p1 types.TipSetKey) (CirculatingSupply, error) { - return *new(CirculatingSupply), xerrors.New("method not supported") + return *new(CirculatingSupply), ErrNotSupported } func (s *FullNodeStruct) StateVerifiedClientStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) { + if s.Internal.StateVerifiedClientStatus == nil { + return nil, ErrNotSupported + } return s.Internal.StateVerifiedClientStatus(p0, p1, p2) } func (s *FullNodeStub) StateVerifiedClientStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) StateVerifiedRegistryRootKey(p0 context.Context, p1 types.TipSetKey) (address.Address, error) { + if s.Internal.StateVerifiedRegistryRootKey == nil { + return *new(address.Address), ErrNotSupported + } return s.Internal.StateVerifiedRegistryRootKey(p0, p1) } func (s *FullNodeStub) StateVerifiedRegistryRootKey(p0 context.Context, p1 types.TipSetKey) (address.Address, error) { - return *new(address.Address), xerrors.New("method not supported") + return *new(address.Address), ErrNotSupported } func (s *FullNodeStruct) StateVerifierStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) { + if s.Internal.StateVerifierStatus == nil { + return nil, ErrNotSupported + } return s.Internal.StateVerifierStatus(p0, p1, p2) } func (s *FullNodeStub) StateVerifierStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) { + if s.Internal.StateWaitMsg == nil { + return nil, ErrNotSupported + } return s.Internal.StateWaitMsg(p0, p1, p2, p3, p4) } func (s *FullNodeStub) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) SyncCheckBad(p0 context.Context, p1 cid.Cid) (string, error) { + if s.Internal.SyncCheckBad == nil { + return "", ErrNotSupported + } return s.Internal.SyncCheckBad(p0, p1) } func (s *FullNodeStub) SyncCheckBad(p0 context.Context, p1 cid.Cid) (string, error) { - return "", xerrors.New("method not supported") + return "", ErrNotSupported } func (s *FullNodeStruct) SyncCheckpoint(p0 context.Context, p1 types.TipSetKey) error { + if s.Internal.SyncCheckpoint == nil { + return ErrNotSupported + } return s.Internal.SyncCheckpoint(p0, p1) } func (s *FullNodeStub) SyncCheckpoint(p0 context.Context, p1 types.TipSetKey) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) SyncIncomingBlocks(p0 context.Context) (<-chan *types.BlockHeader, error) { + if s.Internal.SyncIncomingBlocks == nil { + return nil, ErrNotSupported + } return s.Internal.SyncIncomingBlocks(p0) } func (s *FullNodeStub) SyncIncomingBlocks(p0 context.Context) (<-chan *types.BlockHeader, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) SyncMarkBad(p0 context.Context, p1 cid.Cid) error { + if s.Internal.SyncMarkBad == nil { + return ErrNotSupported + } return s.Internal.SyncMarkBad(p0, p1) } func (s *FullNodeStub) SyncMarkBad(p0 context.Context, p1 cid.Cid) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) SyncState(p0 context.Context) (*SyncState, error) { + if s.Internal.SyncState == nil { + return nil, ErrNotSupported + } return s.Internal.SyncState(p0) } func (s *FullNodeStub) SyncState(p0 context.Context) (*SyncState, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) SyncSubmitBlock(p0 context.Context, p1 *types.BlockMsg) error { + if s.Internal.SyncSubmitBlock == nil { + return ErrNotSupported + } return s.Internal.SyncSubmitBlock(p0, p1) } func (s *FullNodeStub) SyncSubmitBlock(p0 context.Context, p1 *types.BlockMsg) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) SyncUnmarkAllBad(p0 context.Context) error { + if s.Internal.SyncUnmarkAllBad == nil { + return ErrNotSupported + } return s.Internal.SyncUnmarkAllBad(p0) } func (s *FullNodeStub) SyncUnmarkAllBad(p0 context.Context) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) SyncUnmarkBad(p0 context.Context, p1 cid.Cid) error { + if s.Internal.SyncUnmarkBad == nil { + return ErrNotSupported + } return s.Internal.SyncUnmarkBad(p0, p1) } func (s *FullNodeStub) SyncUnmarkBad(p0 context.Context, p1 cid.Cid) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) SyncValidateTipset(p0 context.Context, p1 types.TipSetKey) (bool, error) { + if s.Internal.SyncValidateTipset == nil { + return false, ErrNotSupported + } return s.Internal.SyncValidateTipset(p0, p1) } func (s *FullNodeStub) SyncValidateTipset(p0 context.Context, p1 types.TipSetKey) (bool, error) { - return false, xerrors.New("method not supported") + return false, ErrNotSupported } func (s *FullNodeStruct) WalletBalance(p0 context.Context, p1 address.Address) (types.BigInt, error) { + if s.Internal.WalletBalance == nil { + return *new(types.BigInt), ErrNotSupported + } return s.Internal.WalletBalance(p0, p1) } func (s *FullNodeStub) WalletBalance(p0 context.Context, p1 address.Address) (types.BigInt, error) { - return *new(types.BigInt), xerrors.New("method not supported") + return *new(types.BigInt), ErrNotSupported } func (s *FullNodeStruct) WalletDefaultAddress(p0 context.Context) (address.Address, error) { + if s.Internal.WalletDefaultAddress == nil { + return *new(address.Address), ErrNotSupported + } return s.Internal.WalletDefaultAddress(p0) } func (s *FullNodeStub) WalletDefaultAddress(p0 context.Context) (address.Address, error) { - return *new(address.Address), xerrors.New("method not supported") + return *new(address.Address), ErrNotSupported } func (s *FullNodeStruct) WalletDelete(p0 context.Context, p1 address.Address) error { + if s.Internal.WalletDelete == nil { + return ErrNotSupported + } return s.Internal.WalletDelete(p0, p1) } func (s *FullNodeStub) WalletDelete(p0 context.Context, p1 address.Address) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) WalletExport(p0 context.Context, p1 address.Address) (*types.KeyInfo, error) { + if s.Internal.WalletExport == nil { + return nil, ErrNotSupported + } return s.Internal.WalletExport(p0, p1) } func (s *FullNodeStub) WalletExport(p0 context.Context, p1 address.Address) (*types.KeyInfo, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) WalletHas(p0 context.Context, p1 address.Address) (bool, error) { + if s.Internal.WalletHas == nil { + return false, ErrNotSupported + } return s.Internal.WalletHas(p0, p1) } func (s *FullNodeStub) WalletHas(p0 context.Context, p1 address.Address) (bool, error) { - return false, xerrors.New("method not supported") + return false, ErrNotSupported } func (s *FullNodeStruct) WalletImport(p0 context.Context, p1 *types.KeyInfo) (address.Address, error) { + if s.Internal.WalletImport == nil { + return *new(address.Address), ErrNotSupported + } return s.Internal.WalletImport(p0, p1) } func (s *FullNodeStub) WalletImport(p0 context.Context, p1 *types.KeyInfo) (address.Address, error) { - return *new(address.Address), xerrors.New("method not supported") + return *new(address.Address), ErrNotSupported } func (s *FullNodeStruct) WalletList(p0 context.Context) ([]address.Address, error) { + if s.Internal.WalletList == nil { + return *new([]address.Address), ErrNotSupported + } return s.Internal.WalletList(p0) } func (s *FullNodeStub) WalletList(p0 context.Context) ([]address.Address, error) { - return *new([]address.Address), xerrors.New("method not supported") + return *new([]address.Address), ErrNotSupported } func (s *FullNodeStruct) WalletNew(p0 context.Context, p1 types.KeyType) (address.Address, error) { + if s.Internal.WalletNew == nil { + return *new(address.Address), ErrNotSupported + } return s.Internal.WalletNew(p0, p1) } func (s *FullNodeStub) WalletNew(p0 context.Context, p1 types.KeyType) (address.Address, error) { - return *new(address.Address), xerrors.New("method not supported") + return *new(address.Address), ErrNotSupported } func (s *FullNodeStruct) WalletSetDefault(p0 context.Context, p1 address.Address) error { + if s.Internal.WalletSetDefault == nil { + return ErrNotSupported + } return s.Internal.WalletSetDefault(p0, p1) } func (s *FullNodeStub) WalletSetDefault(p0 context.Context, p1 address.Address) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) WalletSign(p0 context.Context, p1 address.Address, p2 []byte) (*crypto.Signature, error) { + if s.Internal.WalletSign == nil { + return nil, ErrNotSupported + } return s.Internal.WalletSign(p0, p1, p2) } func (s *FullNodeStub) WalletSign(p0 context.Context, p1 address.Address, p2 []byte) (*crypto.Signature, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) WalletSignMessage(p0 context.Context, p1 address.Address, p2 *types.Message) (*types.SignedMessage, error) { + if s.Internal.WalletSignMessage == nil { + return nil, ErrNotSupported + } return s.Internal.WalletSignMessage(p0, p1, p2) } func (s *FullNodeStub) WalletSignMessage(p0 context.Context, p1 address.Address, p2 *types.Message) (*types.SignedMessage, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) WalletValidateAddress(p0 context.Context, p1 string) (address.Address, error) { + if s.Internal.WalletValidateAddress == nil { + return *new(address.Address), ErrNotSupported + } return s.Internal.WalletValidateAddress(p0, p1) } func (s *FullNodeStub) WalletValidateAddress(p0 context.Context, p1 string) (address.Address, error) { - return *new(address.Address), xerrors.New("method not supported") + return *new(address.Address), ErrNotSupported } func (s *FullNodeStruct) WalletVerify(p0 context.Context, p1 address.Address, p2 []byte, p3 *crypto.Signature) (bool, error) { + if s.Internal.WalletVerify == nil { + return false, ErrNotSupported + } return s.Internal.WalletVerify(p0, p1, p2, p3) } func (s *FullNodeStub) WalletVerify(p0 context.Context, p1 address.Address, p2 []byte, p3 *crypto.Signature) (bool, error) { - return false, xerrors.New("method not supported") + return false, ErrNotSupported } func (s *GatewayStruct) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*BlockMessages, error) { + if s.Internal.ChainGetBlockMessages == nil { + return nil, ErrNotSupported + } return s.Internal.ChainGetBlockMessages(p0, p1) } func (s *GatewayStub) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*BlockMessages, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *GatewayStruct) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Message, error) { + if s.Internal.ChainGetMessage == nil { + return nil, ErrNotSupported + } return s.Internal.ChainGetMessage(p0, p1) } func (s *GatewayStub) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Message, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *GatewayStruct) ChainGetTipSet(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) { + if s.Internal.ChainGetTipSet == nil { + return nil, ErrNotSupported + } return s.Internal.ChainGetTipSet(p0, p1) } func (s *GatewayStub) ChainGetTipSet(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *GatewayStruct) ChainGetTipSetByHeight(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) { + if s.Internal.ChainGetTipSetByHeight == nil { + return nil, ErrNotSupported + } return s.Internal.ChainGetTipSetByHeight(p0, p1, p2) } func (s *GatewayStub) ChainGetTipSetByHeight(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *GatewayStruct) ChainHasObj(p0 context.Context, p1 cid.Cid) (bool, error) { + if s.Internal.ChainHasObj == nil { + return false, ErrNotSupported + } return s.Internal.ChainHasObj(p0, p1) } func (s *GatewayStub) ChainHasObj(p0 context.Context, p1 cid.Cid) (bool, error) { - return false, xerrors.New("method not supported") + return false, ErrNotSupported } func (s *GatewayStruct) ChainHead(p0 context.Context) (*types.TipSet, error) { + if s.Internal.ChainHead == nil { + return nil, ErrNotSupported + } return s.Internal.ChainHead(p0) } func (s *GatewayStub) ChainHead(p0 context.Context) (*types.TipSet, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *GatewayStruct) ChainNotify(p0 context.Context) (<-chan []*HeadChange, error) { + if s.Internal.ChainNotify == nil { + return nil, ErrNotSupported + } return s.Internal.ChainNotify(p0) } func (s *GatewayStub) ChainNotify(p0 context.Context) (<-chan []*HeadChange, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *GatewayStruct) ChainReadObj(p0 context.Context, p1 cid.Cid) ([]byte, error) { + if s.Internal.ChainReadObj == nil { + return *new([]byte), ErrNotSupported + } return s.Internal.ChainReadObj(p0, p1) } func (s *GatewayStub) ChainReadObj(p0 context.Context, p1 cid.Cid) ([]byte, error) { - return *new([]byte), xerrors.New("method not supported") + return *new([]byte), ErrNotSupported } func (s *GatewayStruct) GasEstimateMessageGas(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) { + if s.Internal.GasEstimateMessageGas == nil { + return nil, ErrNotSupported + } return s.Internal.GasEstimateMessageGas(p0, p1, p2, p3) } func (s *GatewayStub) GasEstimateMessageGas(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *GatewayStruct) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) { + if s.Internal.MpoolPush == nil { + return *new(cid.Cid), ErrNotSupported + } return s.Internal.MpoolPush(p0, p1) } func (s *GatewayStub) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") + return *new(cid.Cid), ErrNotSupported } func (s *GatewayStruct) MsigGetAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) { + if s.Internal.MsigGetAvailableBalance == nil { + return *new(types.BigInt), ErrNotSupported + } return s.Internal.MsigGetAvailableBalance(p0, p1, p2) } func (s *GatewayStub) MsigGetAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) { - return *new(types.BigInt), xerrors.New("method not supported") + return *new(types.BigInt), ErrNotSupported } func (s *GatewayStruct) MsigGetPending(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*MsigTransaction, error) { + if s.Internal.MsigGetPending == nil { + return *new([]*MsigTransaction), ErrNotSupported + } return s.Internal.MsigGetPending(p0, p1, p2) } func (s *GatewayStub) MsigGetPending(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*MsigTransaction, error) { - return *new([]*MsigTransaction), xerrors.New("method not supported") + return *new([]*MsigTransaction), ErrNotSupported } func (s *GatewayStruct) MsigGetVested(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) { + if s.Internal.MsigGetVested == nil { + return *new(types.BigInt), ErrNotSupported + } return s.Internal.MsigGetVested(p0, p1, p2, p3) } func (s *GatewayStub) MsigGetVested(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) { - return *new(types.BigInt), xerrors.New("method not supported") + return *new(types.BigInt), ErrNotSupported } func (s *GatewayStruct) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { + if s.Internal.StateAccountKey == nil { + return *new(address.Address), ErrNotSupported + } return s.Internal.StateAccountKey(p0, p1, p2) } func (s *GatewayStub) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { - return *new(address.Address), xerrors.New("method not supported") + return *new(address.Address), ErrNotSupported } func (s *GatewayStruct) StateDealProviderCollateralBounds(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (DealCollateralBounds, error) { + if s.Internal.StateDealProviderCollateralBounds == nil { + return *new(DealCollateralBounds), ErrNotSupported + } return s.Internal.StateDealProviderCollateralBounds(p0, p1, p2, p3) } func (s *GatewayStub) StateDealProviderCollateralBounds(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (DealCollateralBounds, error) { - return *new(DealCollateralBounds), xerrors.New("method not supported") + return *new(DealCollateralBounds), ErrNotSupported } func (s *GatewayStruct) StateGetActor(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) { + if s.Internal.StateGetActor == nil { + return nil, ErrNotSupported + } return s.Internal.StateGetActor(p0, p1, p2) } func (s *GatewayStub) StateGetActor(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *GatewayStruct) StateListMiners(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) { + if s.Internal.StateListMiners == nil { + return *new([]address.Address), ErrNotSupported + } return s.Internal.StateListMiners(p0, p1) } func (s *GatewayStub) StateListMiners(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) { - return *new([]address.Address), xerrors.New("method not supported") + return *new([]address.Address), ErrNotSupported } func (s *GatewayStruct) StateLookupID(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { + if s.Internal.StateLookupID == nil { + return *new(address.Address), ErrNotSupported + } return s.Internal.StateLookupID(p0, p1, p2) } func (s *GatewayStub) StateLookupID(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { - return *new(address.Address), xerrors.New("method not supported") + return *new(address.Address), ErrNotSupported } func (s *GatewayStruct) StateMarketBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MarketBalance, error) { + if s.Internal.StateMarketBalance == nil { + return *new(MarketBalance), ErrNotSupported + } return s.Internal.StateMarketBalance(p0, p1, p2) } func (s *GatewayStub) StateMarketBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MarketBalance, error) { - return *new(MarketBalance), xerrors.New("method not supported") + return *new(MarketBalance), ErrNotSupported } func (s *GatewayStruct) StateMarketStorageDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*MarketDeal, error) { + if s.Internal.StateMarketStorageDeal == nil { + return nil, ErrNotSupported + } return s.Internal.StateMarketStorageDeal(p0, p1, p2) } func (s *GatewayStub) StateMarketStorageDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*MarketDeal, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *GatewayStruct) StateMinerInfo(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (miner.MinerInfo, error) { + if s.Internal.StateMinerInfo == nil { + return *new(miner.MinerInfo), ErrNotSupported + } return s.Internal.StateMinerInfo(p0, p1, p2) } func (s *GatewayStub) StateMinerInfo(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (miner.MinerInfo, error) { - return *new(miner.MinerInfo), xerrors.New("method not supported") + return *new(miner.MinerInfo), ErrNotSupported } func (s *GatewayStruct) StateMinerPower(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*MinerPower, error) { + if s.Internal.StateMinerPower == nil { + return nil, ErrNotSupported + } return s.Internal.StateMinerPower(p0, p1, p2) } func (s *GatewayStub) StateMinerPower(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*MinerPower, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *GatewayStruct) StateMinerProvingDeadline(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) { + if s.Internal.StateMinerProvingDeadline == nil { + return nil, ErrNotSupported + } return s.Internal.StateMinerProvingDeadline(p0, p1, p2) } func (s *GatewayStub) StateMinerProvingDeadline(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *GatewayStruct) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) { + if s.Internal.StateNetworkVersion == nil { + return *new(apitypes.NetworkVersion), ErrNotSupported + } return s.Internal.StateNetworkVersion(p0, p1) } func (s *GatewayStub) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) { - return *new(apitypes.NetworkVersion), xerrors.New("method not supported") + return *new(apitypes.NetworkVersion), ErrNotSupported +} + +func (s *GatewayStruct) StateReadState(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*ActorState, error) { + if s.Internal.StateReadState == nil { + return nil, ErrNotSupported + } + return s.Internal.StateReadState(p0, p1, p2) +} + +func (s *GatewayStub) StateReadState(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*ActorState, error) { + return nil, ErrNotSupported } func (s *GatewayStruct) StateSearchMsg(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) { + if s.Internal.StateSearchMsg == nil { + return nil, ErrNotSupported + } return s.Internal.StateSearchMsg(p0, p1, p2, p3, p4) } func (s *GatewayStub) StateSearchMsg(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *GatewayStruct) StateSectorGetInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) { + if s.Internal.StateSectorGetInfo == nil { + return nil, ErrNotSupported + } return s.Internal.StateSectorGetInfo(p0, p1, p2, p3) } func (s *GatewayStub) StateSectorGetInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *GatewayStruct) StateVerifiedClientStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) { + if s.Internal.StateVerifiedClientStatus == nil { + return nil, ErrNotSupported + } return s.Internal.StateVerifiedClientStatus(p0, p1, p2) } func (s *GatewayStub) StateVerifiedClientStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *GatewayStruct) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) { + if s.Internal.StateWaitMsg == nil { + return nil, ErrNotSupported + } return s.Internal.StateWaitMsg(p0, p1, p2, p3, p4) } func (s *GatewayStub) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *GatewayStruct) Version(p0 context.Context) (APIVersion, error) { + if s.Internal.Version == nil { + return *new(APIVersion), ErrNotSupported + } return s.Internal.Version(p0) } func (s *GatewayStub) Version(p0 context.Context) (APIVersion, error) { - return *new(APIVersion), xerrors.New("method not supported") + return *new(APIVersion), ErrNotSupported } func (s *GatewayStruct) WalletBalance(p0 context.Context, p1 address.Address) (types.BigInt, error) { + if s.Internal.WalletBalance == nil { + return *new(types.BigInt), ErrNotSupported + } return s.Internal.WalletBalance(p0, p1) } func (s *GatewayStub) WalletBalance(p0 context.Context, p1 address.Address) (types.BigInt, error) { - return *new(types.BigInt), xerrors.New("method not supported") + return *new(types.BigInt), ErrNotSupported +} + +func (s *NetStruct) ID(p0 context.Context) (peer.ID, error) { + if s.Internal.ID == nil { + return *new(peer.ID), ErrNotSupported + } + return s.Internal.ID(p0) +} + +func (s *NetStub) ID(p0 context.Context) (peer.ID, error) { + return *new(peer.ID), ErrNotSupported +} + +func (s *NetStruct) NetAddrsListen(p0 context.Context) (peer.AddrInfo, error) { + if s.Internal.NetAddrsListen == nil { + return *new(peer.AddrInfo), ErrNotSupported + } + return s.Internal.NetAddrsListen(p0) +} + +func (s *NetStub) NetAddrsListen(p0 context.Context) (peer.AddrInfo, error) { + return *new(peer.AddrInfo), ErrNotSupported +} + +func (s *NetStruct) NetAgentVersion(p0 context.Context, p1 peer.ID) (string, error) { + if s.Internal.NetAgentVersion == nil { + return "", ErrNotSupported + } + return s.Internal.NetAgentVersion(p0, p1) +} + +func (s *NetStub) NetAgentVersion(p0 context.Context, p1 peer.ID) (string, error) { + return "", ErrNotSupported +} + +func (s *NetStruct) NetAutoNatStatus(p0 context.Context) (NatInfo, error) { + if s.Internal.NetAutoNatStatus == nil { + return *new(NatInfo), ErrNotSupported + } + return s.Internal.NetAutoNatStatus(p0) +} + +func (s *NetStub) NetAutoNatStatus(p0 context.Context) (NatInfo, error) { + return *new(NatInfo), ErrNotSupported +} + +func (s *NetStruct) NetBandwidthStats(p0 context.Context) (metrics.Stats, error) { + if s.Internal.NetBandwidthStats == nil { + return *new(metrics.Stats), ErrNotSupported + } + return s.Internal.NetBandwidthStats(p0) +} + +func (s *NetStub) NetBandwidthStats(p0 context.Context) (metrics.Stats, error) { + return *new(metrics.Stats), ErrNotSupported +} + +func (s *NetStruct) NetBandwidthStatsByPeer(p0 context.Context) (map[string]metrics.Stats, error) { + if s.Internal.NetBandwidthStatsByPeer == nil { + return *new(map[string]metrics.Stats), ErrNotSupported + } + return s.Internal.NetBandwidthStatsByPeer(p0) +} + +func (s *NetStub) NetBandwidthStatsByPeer(p0 context.Context) (map[string]metrics.Stats, error) { + return *new(map[string]metrics.Stats), ErrNotSupported +} + +func (s *NetStruct) NetBandwidthStatsByProtocol(p0 context.Context) (map[protocol.ID]metrics.Stats, error) { + if s.Internal.NetBandwidthStatsByProtocol == nil { + return *new(map[protocol.ID]metrics.Stats), ErrNotSupported + } + return s.Internal.NetBandwidthStatsByProtocol(p0) +} + +func (s *NetStub) NetBandwidthStatsByProtocol(p0 context.Context) (map[protocol.ID]metrics.Stats, error) { + return *new(map[protocol.ID]metrics.Stats), ErrNotSupported +} + +func (s *NetStruct) NetBlockAdd(p0 context.Context, p1 NetBlockList) error { + if s.Internal.NetBlockAdd == nil { + return ErrNotSupported + } + return s.Internal.NetBlockAdd(p0, p1) +} + +func (s *NetStub) NetBlockAdd(p0 context.Context, p1 NetBlockList) error { + return ErrNotSupported +} + +func (s *NetStruct) NetBlockList(p0 context.Context) (NetBlockList, error) { + if s.Internal.NetBlockList == nil { + return *new(NetBlockList), ErrNotSupported + } + return s.Internal.NetBlockList(p0) +} + +func (s *NetStub) NetBlockList(p0 context.Context) (NetBlockList, error) { + return *new(NetBlockList), ErrNotSupported +} + +func (s *NetStruct) NetBlockRemove(p0 context.Context, p1 NetBlockList) error { + if s.Internal.NetBlockRemove == nil { + return ErrNotSupported + } + return s.Internal.NetBlockRemove(p0, p1) +} + +func (s *NetStub) NetBlockRemove(p0 context.Context, p1 NetBlockList) error { + return ErrNotSupported +} + +func (s *NetStruct) NetConnect(p0 context.Context, p1 peer.AddrInfo) error { + if s.Internal.NetConnect == nil { + return ErrNotSupported + } + return s.Internal.NetConnect(p0, p1) +} + +func (s *NetStub) NetConnect(p0 context.Context, p1 peer.AddrInfo) error { + return ErrNotSupported +} + +func (s *NetStruct) NetConnectedness(p0 context.Context, p1 peer.ID) (network.Connectedness, error) { + if s.Internal.NetConnectedness == nil { + return *new(network.Connectedness), ErrNotSupported + } + return s.Internal.NetConnectedness(p0, p1) +} + +func (s *NetStub) NetConnectedness(p0 context.Context, p1 peer.ID) (network.Connectedness, error) { + return *new(network.Connectedness), ErrNotSupported +} + +func (s *NetStruct) NetDisconnect(p0 context.Context, p1 peer.ID) error { + if s.Internal.NetDisconnect == nil { + return ErrNotSupported + } + return s.Internal.NetDisconnect(p0, p1) +} + +func (s *NetStub) NetDisconnect(p0 context.Context, p1 peer.ID) error { + return ErrNotSupported +} + +func (s *NetStruct) NetFindPeer(p0 context.Context, p1 peer.ID) (peer.AddrInfo, error) { + if s.Internal.NetFindPeer == nil { + return *new(peer.AddrInfo), ErrNotSupported + } + return s.Internal.NetFindPeer(p0, p1) +} + +func (s *NetStub) NetFindPeer(p0 context.Context, p1 peer.ID) (peer.AddrInfo, error) { + return *new(peer.AddrInfo), ErrNotSupported +} + +func (s *NetStruct) NetPeerInfo(p0 context.Context, p1 peer.ID) (*ExtendedPeerInfo, error) { + if s.Internal.NetPeerInfo == nil { + return nil, ErrNotSupported + } + return s.Internal.NetPeerInfo(p0, p1) +} + +func (s *NetStub) NetPeerInfo(p0 context.Context, p1 peer.ID) (*ExtendedPeerInfo, error) { + return nil, ErrNotSupported +} + +func (s *NetStruct) NetPeers(p0 context.Context) ([]peer.AddrInfo, error) { + if s.Internal.NetPeers == nil { + return *new([]peer.AddrInfo), ErrNotSupported + } + return s.Internal.NetPeers(p0) +} + +func (s *NetStub) NetPeers(p0 context.Context) ([]peer.AddrInfo, error) { + return *new([]peer.AddrInfo), ErrNotSupported +} + +func (s *NetStruct) NetPubsubScores(p0 context.Context) ([]PubsubScore, error) { + if s.Internal.NetPubsubScores == nil { + return *new([]PubsubScore), ErrNotSupported + } + return s.Internal.NetPubsubScores(p0) +} + +func (s *NetStub) NetPubsubScores(p0 context.Context) ([]PubsubScore, error) { + return *new([]PubsubScore), ErrNotSupported } func (s *SignableStruct) Sign(p0 context.Context, p1 SignFunc) error { + if s.Internal.Sign == nil { + return ErrNotSupported + } return s.Internal.Sign(p0, p1) } func (s *SignableStub) Sign(p0 context.Context, p1 SignFunc) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) ActorAddress(p0 context.Context) (address.Address, error) { + if s.Internal.ActorAddress == nil { + return *new(address.Address), ErrNotSupported + } return s.Internal.ActorAddress(p0) } func (s *StorageMinerStub) ActorAddress(p0 context.Context) (address.Address, error) { - return *new(address.Address), xerrors.New("method not supported") + return *new(address.Address), ErrNotSupported } func (s *StorageMinerStruct) ActorAddressConfig(p0 context.Context) (AddressConfig, error) { + if s.Internal.ActorAddressConfig == nil { + return *new(AddressConfig), ErrNotSupported + } return s.Internal.ActorAddressConfig(p0) } func (s *StorageMinerStub) ActorAddressConfig(p0 context.Context) (AddressConfig, error) { - return *new(AddressConfig), xerrors.New("method not supported") + return *new(AddressConfig), ErrNotSupported } func (s *StorageMinerStruct) ActorSectorSize(p0 context.Context, p1 address.Address) (abi.SectorSize, error) { + if s.Internal.ActorSectorSize == nil { + return *new(abi.SectorSize), ErrNotSupported + } return s.Internal.ActorSectorSize(p0, p1) } func (s *StorageMinerStub) ActorSectorSize(p0 context.Context, p1 address.Address) (abi.SectorSize, error) { - return *new(abi.SectorSize), xerrors.New("method not supported") + return *new(abi.SectorSize), ErrNotSupported } func (s *StorageMinerStruct) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) { + if s.Internal.CheckProvable == nil { + return *new(map[abi.SectorNumber]string), ErrNotSupported + } return s.Internal.CheckProvable(p0, p1, p2, p3) } func (s *StorageMinerStub) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) { - return *new(map[abi.SectorNumber]string), xerrors.New("method not supported") + return *new(map[abi.SectorNumber]string), ErrNotSupported } func (s *StorageMinerStruct) ComputeProof(p0 context.Context, p1 []builtin.SectorInfo, p2 abi.PoStRandomness) ([]builtin.PoStProof, error) { + if s.Internal.ComputeProof == nil { + return *new([]builtin.PoStProof), ErrNotSupported + } return s.Internal.ComputeProof(p0, p1, p2) } func (s *StorageMinerStub) ComputeProof(p0 context.Context, p1 []builtin.SectorInfo, p2 abi.PoStRandomness) ([]builtin.PoStProof, error) { - return *new([]builtin.PoStProof), xerrors.New("method not supported") + return *new([]builtin.PoStProof), ErrNotSupported } func (s *StorageMinerStruct) CreateBackup(p0 context.Context, p1 string) error { + if s.Internal.CreateBackup == nil { + return ErrNotSupported + } return s.Internal.CreateBackup(p0, p1) } func (s *StorageMinerStub) CreateBackup(p0 context.Context, p1 string) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) DealsConsiderOfflineRetrievalDeals(p0 context.Context) (bool, error) { + if s.Internal.DealsConsiderOfflineRetrievalDeals == nil { + return false, ErrNotSupported + } return s.Internal.DealsConsiderOfflineRetrievalDeals(p0) } func (s *StorageMinerStub) DealsConsiderOfflineRetrievalDeals(p0 context.Context) (bool, error) { - return false, xerrors.New("method not supported") + return false, ErrNotSupported } func (s *StorageMinerStruct) DealsConsiderOfflineStorageDeals(p0 context.Context) (bool, error) { + if s.Internal.DealsConsiderOfflineStorageDeals == nil { + return false, ErrNotSupported + } return s.Internal.DealsConsiderOfflineStorageDeals(p0) } func (s *StorageMinerStub) DealsConsiderOfflineStorageDeals(p0 context.Context) (bool, error) { - return false, xerrors.New("method not supported") + return false, ErrNotSupported } func (s *StorageMinerStruct) DealsConsiderOnlineRetrievalDeals(p0 context.Context) (bool, error) { + if s.Internal.DealsConsiderOnlineRetrievalDeals == nil { + return false, ErrNotSupported + } return s.Internal.DealsConsiderOnlineRetrievalDeals(p0) } func (s *StorageMinerStub) DealsConsiderOnlineRetrievalDeals(p0 context.Context) (bool, error) { - return false, xerrors.New("method not supported") + return false, ErrNotSupported } func (s *StorageMinerStruct) DealsConsiderOnlineStorageDeals(p0 context.Context) (bool, error) { + if s.Internal.DealsConsiderOnlineStorageDeals == nil { + return false, ErrNotSupported + } return s.Internal.DealsConsiderOnlineStorageDeals(p0) } func (s *StorageMinerStub) DealsConsiderOnlineStorageDeals(p0 context.Context) (bool, error) { - return false, xerrors.New("method not supported") + return false, ErrNotSupported } func (s *StorageMinerStruct) DealsConsiderUnverifiedStorageDeals(p0 context.Context) (bool, error) { + if s.Internal.DealsConsiderUnverifiedStorageDeals == nil { + return false, ErrNotSupported + } return s.Internal.DealsConsiderUnverifiedStorageDeals(p0) } func (s *StorageMinerStub) DealsConsiderUnverifiedStorageDeals(p0 context.Context) (bool, error) { - return false, xerrors.New("method not supported") + return false, ErrNotSupported } func (s *StorageMinerStruct) DealsConsiderVerifiedStorageDeals(p0 context.Context) (bool, error) { + if s.Internal.DealsConsiderVerifiedStorageDeals == nil { + return false, ErrNotSupported + } return s.Internal.DealsConsiderVerifiedStorageDeals(p0) } func (s *StorageMinerStub) DealsConsiderVerifiedStorageDeals(p0 context.Context) (bool, error) { - return false, xerrors.New("method not supported") + return false, ErrNotSupported } func (s *StorageMinerStruct) DealsImportData(p0 context.Context, p1 cid.Cid, p2 string) error { + if s.Internal.DealsImportData == nil { + return ErrNotSupported + } return s.Internal.DealsImportData(p0, p1, p2) } func (s *StorageMinerStub) DealsImportData(p0 context.Context, p1 cid.Cid, p2 string) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) DealsList(p0 context.Context) ([]MarketDeal, error) { + if s.Internal.DealsList == nil { + return *new([]MarketDeal), ErrNotSupported + } return s.Internal.DealsList(p0) } func (s *StorageMinerStub) DealsList(p0 context.Context) ([]MarketDeal, error) { - return *new([]MarketDeal), xerrors.New("method not supported") + return *new([]MarketDeal), ErrNotSupported } func (s *StorageMinerStruct) DealsPieceCidBlocklist(p0 context.Context) ([]cid.Cid, error) { + if s.Internal.DealsPieceCidBlocklist == nil { + return *new([]cid.Cid), ErrNotSupported + } return s.Internal.DealsPieceCidBlocklist(p0) } func (s *StorageMinerStub) DealsPieceCidBlocklist(p0 context.Context) ([]cid.Cid, error) { - return *new([]cid.Cid), xerrors.New("method not supported") + return *new([]cid.Cid), ErrNotSupported } func (s *StorageMinerStruct) DealsSetConsiderOfflineRetrievalDeals(p0 context.Context, p1 bool) error { + if s.Internal.DealsSetConsiderOfflineRetrievalDeals == nil { + return ErrNotSupported + } return s.Internal.DealsSetConsiderOfflineRetrievalDeals(p0, p1) } func (s *StorageMinerStub) DealsSetConsiderOfflineRetrievalDeals(p0 context.Context, p1 bool) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) DealsSetConsiderOfflineStorageDeals(p0 context.Context, p1 bool) error { + if s.Internal.DealsSetConsiderOfflineStorageDeals == nil { + return ErrNotSupported + } return s.Internal.DealsSetConsiderOfflineStorageDeals(p0, p1) } func (s *StorageMinerStub) DealsSetConsiderOfflineStorageDeals(p0 context.Context, p1 bool) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) DealsSetConsiderOnlineRetrievalDeals(p0 context.Context, p1 bool) error { + if s.Internal.DealsSetConsiderOnlineRetrievalDeals == nil { + return ErrNotSupported + } return s.Internal.DealsSetConsiderOnlineRetrievalDeals(p0, p1) } func (s *StorageMinerStub) DealsSetConsiderOnlineRetrievalDeals(p0 context.Context, p1 bool) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) DealsSetConsiderOnlineStorageDeals(p0 context.Context, p1 bool) error { + if s.Internal.DealsSetConsiderOnlineStorageDeals == nil { + return ErrNotSupported + } return s.Internal.DealsSetConsiderOnlineStorageDeals(p0, p1) } func (s *StorageMinerStub) DealsSetConsiderOnlineStorageDeals(p0 context.Context, p1 bool) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) DealsSetConsiderUnverifiedStorageDeals(p0 context.Context, p1 bool) error { + if s.Internal.DealsSetConsiderUnverifiedStorageDeals == nil { + return ErrNotSupported + } return s.Internal.DealsSetConsiderUnverifiedStorageDeals(p0, p1) } func (s *StorageMinerStub) DealsSetConsiderUnverifiedStorageDeals(p0 context.Context, p1 bool) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) DealsSetConsiderVerifiedStorageDeals(p0 context.Context, p1 bool) error { + if s.Internal.DealsSetConsiderVerifiedStorageDeals == nil { + return ErrNotSupported + } return s.Internal.DealsSetConsiderVerifiedStorageDeals(p0, p1) } func (s *StorageMinerStub) DealsSetConsiderVerifiedStorageDeals(p0 context.Context, p1 bool) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) DealsSetPieceCidBlocklist(p0 context.Context, p1 []cid.Cid) error { + if s.Internal.DealsSetPieceCidBlocklist == nil { + return ErrNotSupported + } return s.Internal.DealsSetPieceCidBlocklist(p0, p1) } func (s *StorageMinerStub) DealsSetPieceCidBlocklist(p0 context.Context, p1 []cid.Cid) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) MarketCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { + if s.Internal.MarketCancelDataTransfer == nil { + return ErrNotSupported + } return s.Internal.MarketCancelDataTransfer(p0, p1, p2, p3) } func (s *StorageMinerStub) MarketCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) MarketDataTransferUpdates(p0 context.Context) (<-chan DataTransferChannel, error) { + if s.Internal.MarketDataTransferUpdates == nil { + return nil, ErrNotSupported + } return s.Internal.MarketDataTransferUpdates(p0) } func (s *StorageMinerStub) MarketDataTransferUpdates(p0 context.Context) (<-chan DataTransferChannel, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *StorageMinerStruct) MarketGetAsk(p0 context.Context) (*storagemarket.SignedStorageAsk, error) { + if s.Internal.MarketGetAsk == nil { + return nil, ErrNotSupported + } return s.Internal.MarketGetAsk(p0) } func (s *StorageMinerStub) MarketGetAsk(p0 context.Context) (*storagemarket.SignedStorageAsk, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *StorageMinerStruct) MarketGetDealUpdates(p0 context.Context) (<-chan storagemarket.MinerDeal, error) { + if s.Internal.MarketGetDealUpdates == nil { + return nil, ErrNotSupported + } return s.Internal.MarketGetDealUpdates(p0) } func (s *StorageMinerStub) MarketGetDealUpdates(p0 context.Context) (<-chan storagemarket.MinerDeal, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *StorageMinerStruct) MarketGetRetrievalAsk(p0 context.Context) (*retrievalmarket.Ask, error) { + if s.Internal.MarketGetRetrievalAsk == nil { + return nil, ErrNotSupported + } return s.Internal.MarketGetRetrievalAsk(p0) } func (s *StorageMinerStub) MarketGetRetrievalAsk(p0 context.Context) (*retrievalmarket.Ask, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *StorageMinerStruct) MarketImportDealData(p0 context.Context, p1 cid.Cid, p2 string) error { + if s.Internal.MarketImportDealData == nil { + return ErrNotSupported + } return s.Internal.MarketImportDealData(p0, p1, p2) } func (s *StorageMinerStub) MarketImportDealData(p0 context.Context, p1 cid.Cid, p2 string) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) MarketListDataTransfers(p0 context.Context) ([]DataTransferChannel, error) { + if s.Internal.MarketListDataTransfers == nil { + return *new([]DataTransferChannel), ErrNotSupported + } return s.Internal.MarketListDataTransfers(p0) } func (s *StorageMinerStub) MarketListDataTransfers(p0 context.Context) ([]DataTransferChannel, error) { - return *new([]DataTransferChannel), xerrors.New("method not supported") + return *new([]DataTransferChannel), ErrNotSupported } func (s *StorageMinerStruct) MarketListDeals(p0 context.Context) ([]MarketDeal, error) { + if s.Internal.MarketListDeals == nil { + return *new([]MarketDeal), ErrNotSupported + } return s.Internal.MarketListDeals(p0) } func (s *StorageMinerStub) MarketListDeals(p0 context.Context) ([]MarketDeal, error) { - return *new([]MarketDeal), xerrors.New("method not supported") + return *new([]MarketDeal), ErrNotSupported } func (s *StorageMinerStruct) MarketListIncompleteDeals(p0 context.Context) ([]storagemarket.MinerDeal, error) { + if s.Internal.MarketListIncompleteDeals == nil { + return *new([]storagemarket.MinerDeal), ErrNotSupported + } return s.Internal.MarketListIncompleteDeals(p0) } func (s *StorageMinerStub) MarketListIncompleteDeals(p0 context.Context) ([]storagemarket.MinerDeal, error) { - return *new([]storagemarket.MinerDeal), xerrors.New("method not supported") + return *new([]storagemarket.MinerDeal), ErrNotSupported } func (s *StorageMinerStruct) MarketListRetrievalDeals(p0 context.Context) ([]retrievalmarket.ProviderDealState, error) { + if s.Internal.MarketListRetrievalDeals == nil { + return *new([]retrievalmarket.ProviderDealState), ErrNotSupported + } return s.Internal.MarketListRetrievalDeals(p0) } func (s *StorageMinerStub) MarketListRetrievalDeals(p0 context.Context) ([]retrievalmarket.ProviderDealState, error) { - return *new([]retrievalmarket.ProviderDealState), xerrors.New("method not supported") + return *new([]retrievalmarket.ProviderDealState), ErrNotSupported } func (s *StorageMinerStruct) MarketPendingDeals(p0 context.Context) (PendingDealInfo, error) { + if s.Internal.MarketPendingDeals == nil { + return *new(PendingDealInfo), ErrNotSupported + } return s.Internal.MarketPendingDeals(p0) } func (s *StorageMinerStub) MarketPendingDeals(p0 context.Context) (PendingDealInfo, error) { - return *new(PendingDealInfo), xerrors.New("method not supported") + return *new(PendingDealInfo), ErrNotSupported } func (s *StorageMinerStruct) MarketPublishPendingDeals(p0 context.Context) error { + if s.Internal.MarketPublishPendingDeals == nil { + return ErrNotSupported + } return s.Internal.MarketPublishPendingDeals(p0) } func (s *StorageMinerStub) MarketPublishPendingDeals(p0 context.Context) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) MarketRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { + if s.Internal.MarketRestartDataTransfer == nil { + return ErrNotSupported + } return s.Internal.MarketRestartDataTransfer(p0, p1, p2, p3) } func (s *StorageMinerStub) MarketRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) MarketSetAsk(p0 context.Context, p1 types.BigInt, p2 types.BigInt, p3 abi.ChainEpoch, p4 abi.PaddedPieceSize, p5 abi.PaddedPieceSize) error { + if s.Internal.MarketSetAsk == nil { + return ErrNotSupported + } return s.Internal.MarketSetAsk(p0, p1, p2, p3, p4, p5) } func (s *StorageMinerStub) MarketSetAsk(p0 context.Context, p1 types.BigInt, p2 types.BigInt, p3 abi.ChainEpoch, p4 abi.PaddedPieceSize, p5 abi.PaddedPieceSize) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) MarketSetRetrievalAsk(p0 context.Context, p1 *retrievalmarket.Ask) error { + if s.Internal.MarketSetRetrievalAsk == nil { + return ErrNotSupported + } return s.Internal.MarketSetRetrievalAsk(p0, p1) } func (s *StorageMinerStub) MarketSetRetrievalAsk(p0 context.Context, p1 *retrievalmarket.Ask) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) MiningBase(p0 context.Context) (*types.TipSet, error) { + if s.Internal.MiningBase == nil { + return nil, ErrNotSupported + } return s.Internal.MiningBase(p0) } func (s *StorageMinerStub) MiningBase(p0 context.Context) (*types.TipSet, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *StorageMinerStruct) PiecesGetCIDInfo(p0 context.Context, p1 cid.Cid) (*piecestore.CIDInfo, error) { + if s.Internal.PiecesGetCIDInfo == nil { + return nil, ErrNotSupported + } return s.Internal.PiecesGetCIDInfo(p0, p1) } func (s *StorageMinerStub) PiecesGetCIDInfo(p0 context.Context, p1 cid.Cid) (*piecestore.CIDInfo, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *StorageMinerStruct) PiecesGetPieceInfo(p0 context.Context, p1 cid.Cid) (*piecestore.PieceInfo, error) { + if s.Internal.PiecesGetPieceInfo == nil { + return nil, ErrNotSupported + } return s.Internal.PiecesGetPieceInfo(p0, p1) } func (s *StorageMinerStub) PiecesGetPieceInfo(p0 context.Context, p1 cid.Cid) (*piecestore.PieceInfo, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *StorageMinerStruct) PiecesListCidInfos(p0 context.Context) ([]cid.Cid, error) { + if s.Internal.PiecesListCidInfos == nil { + return *new([]cid.Cid), ErrNotSupported + } return s.Internal.PiecesListCidInfos(p0) } func (s *StorageMinerStub) PiecesListCidInfos(p0 context.Context) ([]cid.Cid, error) { - return *new([]cid.Cid), xerrors.New("method not supported") + return *new([]cid.Cid), ErrNotSupported } func (s *StorageMinerStruct) PiecesListPieces(p0 context.Context) ([]cid.Cid, error) { + if s.Internal.PiecesListPieces == nil { + return *new([]cid.Cid), ErrNotSupported + } return s.Internal.PiecesListPieces(p0) } func (s *StorageMinerStub) PiecesListPieces(p0 context.Context) ([]cid.Cid, error) { - return *new([]cid.Cid), xerrors.New("method not supported") + return *new([]cid.Cid), ErrNotSupported } func (s *StorageMinerStruct) PledgeSector(p0 context.Context) (abi.SectorID, error) { + if s.Internal.PledgeSector == nil { + return *new(abi.SectorID), ErrNotSupported + } return s.Internal.PledgeSector(p0) } func (s *StorageMinerStub) PledgeSector(p0 context.Context) (abi.SectorID, error) { - return *new(abi.SectorID), xerrors.New("method not supported") + return *new(abi.SectorID), ErrNotSupported } func (s *StorageMinerStruct) ReturnAddPiece(p0 context.Context, p1 storiface.CallID, p2 abi.PieceInfo, p3 *storiface.CallError) error { + if s.Internal.ReturnAddPiece == nil { + return ErrNotSupported + } return s.Internal.ReturnAddPiece(p0, p1, p2, p3) } func (s *StorageMinerStub) ReturnAddPiece(p0 context.Context, p1 storiface.CallID, p2 abi.PieceInfo, p3 *storiface.CallError) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) ReturnFetch(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error { + if s.Internal.ReturnFetch == nil { + return ErrNotSupported + } return s.Internal.ReturnFetch(p0, p1, p2) } func (s *StorageMinerStub) ReturnFetch(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) ReturnFinalizeSector(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error { + if s.Internal.ReturnFinalizeSector == nil { + return ErrNotSupported + } return s.Internal.ReturnFinalizeSector(p0, p1, p2) } func (s *StorageMinerStub) ReturnFinalizeSector(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) ReturnMoveStorage(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error { + if s.Internal.ReturnMoveStorage == nil { + return ErrNotSupported + } return s.Internal.ReturnMoveStorage(p0, p1, p2) } func (s *StorageMinerStub) ReturnMoveStorage(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) ReturnReadPiece(p0 context.Context, p1 storiface.CallID, p2 bool, p3 *storiface.CallError) error { + if s.Internal.ReturnReadPiece == nil { + return ErrNotSupported + } return s.Internal.ReturnReadPiece(p0, p1, p2, p3) } func (s *StorageMinerStub) ReturnReadPiece(p0 context.Context, p1 storiface.CallID, p2 bool, p3 *storiface.CallError) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) ReturnReleaseUnsealed(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error { + if s.Internal.ReturnReleaseUnsealed == nil { + return ErrNotSupported + } return s.Internal.ReturnReleaseUnsealed(p0, p1, p2) } func (s *StorageMinerStub) ReturnReleaseUnsealed(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) ReturnSealCommit1(p0 context.Context, p1 storiface.CallID, p2 storage.Commit1Out, p3 *storiface.CallError) error { + if s.Internal.ReturnSealCommit1 == nil { + return ErrNotSupported + } return s.Internal.ReturnSealCommit1(p0, p1, p2, p3) } func (s *StorageMinerStub) ReturnSealCommit1(p0 context.Context, p1 storiface.CallID, p2 storage.Commit1Out, p3 *storiface.CallError) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) ReturnSealCommit2(p0 context.Context, p1 storiface.CallID, p2 storage.Proof, p3 *storiface.CallError) error { + if s.Internal.ReturnSealCommit2 == nil { + return ErrNotSupported + } return s.Internal.ReturnSealCommit2(p0, p1, p2, p3) } func (s *StorageMinerStub) ReturnSealCommit2(p0 context.Context, p1 storiface.CallID, p2 storage.Proof, p3 *storiface.CallError) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) ReturnSealPreCommit1(p0 context.Context, p1 storiface.CallID, p2 storage.PreCommit1Out, p3 *storiface.CallError) error { + if s.Internal.ReturnSealPreCommit1 == nil { + return ErrNotSupported + } return s.Internal.ReturnSealPreCommit1(p0, p1, p2, p3) } func (s *StorageMinerStub) ReturnSealPreCommit1(p0 context.Context, p1 storiface.CallID, p2 storage.PreCommit1Out, p3 *storiface.CallError) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) ReturnSealPreCommit2(p0 context.Context, p1 storiface.CallID, p2 storage.SectorCids, p3 *storiface.CallError) error { + if s.Internal.ReturnSealPreCommit2 == nil { + return ErrNotSupported + } return s.Internal.ReturnSealPreCommit2(p0, p1, p2, p3) } func (s *StorageMinerStub) ReturnSealPreCommit2(p0 context.Context, p1 storiface.CallID, p2 storage.SectorCids, p3 *storiface.CallError) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) ReturnUnsealPiece(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error { + if s.Internal.ReturnUnsealPiece == nil { + return ErrNotSupported + } return s.Internal.ReturnUnsealPiece(p0, p1, p2) } func (s *StorageMinerStub) ReturnUnsealPiece(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error { - return xerrors.New("method not supported") + return ErrNotSupported +} + +func (s *StorageMinerStruct) RuntimeSubsystems(p0 context.Context) (MinerSubsystems, error) { + if s.Internal.RuntimeSubsystems == nil { + return *new(MinerSubsystems), ErrNotSupported + } + return s.Internal.RuntimeSubsystems(p0) +} + +func (s *StorageMinerStub) RuntimeSubsystems(p0 context.Context) (MinerSubsystems, error) { + return *new(MinerSubsystems), ErrNotSupported } func (s *StorageMinerStruct) SealingAbort(p0 context.Context, p1 storiface.CallID) error { + if s.Internal.SealingAbort == nil { + return ErrNotSupported + } return s.Internal.SealingAbort(p0, p1) } func (s *StorageMinerStub) SealingAbort(p0 context.Context, p1 storiface.CallID) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) SealingSchedDiag(p0 context.Context, p1 bool) (interface{}, error) { + if s.Internal.SealingSchedDiag == nil { + return nil, ErrNotSupported + } return s.Internal.SealingSchedDiag(p0, p1) } func (s *StorageMinerStub) SealingSchedDiag(p0 context.Context, p1 bool) (interface{}, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported +} + +func (s *StorageMinerStruct) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data, p3 PieceDealInfo) (SectorOffset, error) { + if s.Internal.SectorAddPieceToAny == nil { + return *new(SectorOffset), ErrNotSupported + } + return s.Internal.SectorAddPieceToAny(p0, p1, p2, p3) +} + +func (s *StorageMinerStub) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data, p3 PieceDealInfo) (SectorOffset, error) { + return *new(SectorOffset), ErrNotSupported } func (s *StorageMinerStruct) SectorCommitFlush(p0 context.Context) ([]sealiface.CommitBatchRes, error) { + if s.Internal.SectorCommitFlush == nil { + return *new([]sealiface.CommitBatchRes), ErrNotSupported + } return s.Internal.SectorCommitFlush(p0) } func (s *StorageMinerStub) SectorCommitFlush(p0 context.Context) ([]sealiface.CommitBatchRes, error) { - return *new([]sealiface.CommitBatchRes), xerrors.New("method not supported") + return *new([]sealiface.CommitBatchRes), ErrNotSupported } func (s *StorageMinerStruct) SectorCommitPending(p0 context.Context) ([]abi.SectorID, error) { + if s.Internal.SectorCommitPending == nil { + return *new([]abi.SectorID), ErrNotSupported + } return s.Internal.SectorCommitPending(p0) } func (s *StorageMinerStub) SectorCommitPending(p0 context.Context) ([]abi.SectorID, error) { - return *new([]abi.SectorID), xerrors.New("method not supported") + return *new([]abi.SectorID), ErrNotSupported } func (s *StorageMinerStruct) SectorGetExpectedSealDuration(p0 context.Context) (time.Duration, error) { + if s.Internal.SectorGetExpectedSealDuration == nil { + return *new(time.Duration), ErrNotSupported + } return s.Internal.SectorGetExpectedSealDuration(p0) } func (s *StorageMinerStub) SectorGetExpectedSealDuration(p0 context.Context) (time.Duration, error) { - return *new(time.Duration), xerrors.New("method not supported") + return *new(time.Duration), ErrNotSupported } func (s *StorageMinerStruct) SectorGetSealDelay(p0 context.Context) (time.Duration, error) { + if s.Internal.SectorGetSealDelay == nil { + return *new(time.Duration), ErrNotSupported + } return s.Internal.SectorGetSealDelay(p0) } func (s *StorageMinerStub) SectorGetSealDelay(p0 context.Context) (time.Duration, error) { - return *new(time.Duration), xerrors.New("method not supported") + return *new(time.Duration), ErrNotSupported } func (s *StorageMinerStruct) SectorMarkForUpgrade(p0 context.Context, p1 abi.SectorNumber) error { + if s.Internal.SectorMarkForUpgrade == nil { + return ErrNotSupported + } return s.Internal.SectorMarkForUpgrade(p0, p1) } func (s *StorageMinerStub) SectorMarkForUpgrade(p0 context.Context, p1 abi.SectorNumber) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) SectorPreCommitFlush(p0 context.Context) ([]sealiface.PreCommitBatchRes, error) { + if s.Internal.SectorPreCommitFlush == nil { + return *new([]sealiface.PreCommitBatchRes), ErrNotSupported + } return s.Internal.SectorPreCommitFlush(p0) } func (s *StorageMinerStub) SectorPreCommitFlush(p0 context.Context) ([]sealiface.PreCommitBatchRes, error) { - return *new([]sealiface.PreCommitBatchRes), xerrors.New("method not supported") + return *new([]sealiface.PreCommitBatchRes), ErrNotSupported } func (s *StorageMinerStruct) SectorPreCommitPending(p0 context.Context) ([]abi.SectorID, error) { + if s.Internal.SectorPreCommitPending == nil { + return *new([]abi.SectorID), ErrNotSupported + } return s.Internal.SectorPreCommitPending(p0) } func (s *StorageMinerStub) SectorPreCommitPending(p0 context.Context) ([]abi.SectorID, error) { - return *new([]abi.SectorID), xerrors.New("method not supported") + return *new([]abi.SectorID), ErrNotSupported } func (s *StorageMinerStruct) SectorRemove(p0 context.Context, p1 abi.SectorNumber) error { + if s.Internal.SectorRemove == nil { + return ErrNotSupported + } return s.Internal.SectorRemove(p0, p1) } func (s *StorageMinerStub) SectorRemove(p0 context.Context, p1 abi.SectorNumber) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) SectorSetExpectedSealDuration(p0 context.Context, p1 time.Duration) error { + if s.Internal.SectorSetExpectedSealDuration == nil { + return ErrNotSupported + } return s.Internal.SectorSetExpectedSealDuration(p0, p1) } func (s *StorageMinerStub) SectorSetExpectedSealDuration(p0 context.Context, p1 time.Duration) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) SectorSetSealDelay(p0 context.Context, p1 time.Duration) error { + if s.Internal.SectorSetSealDelay == nil { + return ErrNotSupported + } return s.Internal.SectorSetSealDelay(p0, p1) } func (s *StorageMinerStub) SectorSetSealDelay(p0 context.Context, p1 time.Duration) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) SectorStartSealing(p0 context.Context, p1 abi.SectorNumber) error { + if s.Internal.SectorStartSealing == nil { + return ErrNotSupported + } return s.Internal.SectorStartSealing(p0, p1) } func (s *StorageMinerStub) SectorStartSealing(p0 context.Context, p1 abi.SectorNumber) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) SectorTerminate(p0 context.Context, p1 abi.SectorNumber) error { + if s.Internal.SectorTerminate == nil { + return ErrNotSupported + } return s.Internal.SectorTerminate(p0, p1) } func (s *StorageMinerStub) SectorTerminate(p0 context.Context, p1 abi.SectorNumber) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) SectorTerminateFlush(p0 context.Context) (*cid.Cid, error) { + if s.Internal.SectorTerminateFlush == nil { + return nil, ErrNotSupported + } return s.Internal.SectorTerminateFlush(p0) } func (s *StorageMinerStub) SectorTerminateFlush(p0 context.Context) (*cid.Cid, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *StorageMinerStruct) SectorTerminatePending(p0 context.Context) ([]abi.SectorID, error) { + if s.Internal.SectorTerminatePending == nil { + return *new([]abi.SectorID), ErrNotSupported + } return s.Internal.SectorTerminatePending(p0) } func (s *StorageMinerStub) SectorTerminatePending(p0 context.Context) ([]abi.SectorID, error) { - return *new([]abi.SectorID), xerrors.New("method not supported") + return *new([]abi.SectorID), ErrNotSupported } func (s *StorageMinerStruct) SectorsList(p0 context.Context) ([]abi.SectorNumber, error) { + if s.Internal.SectorsList == nil { + return *new([]abi.SectorNumber), ErrNotSupported + } return s.Internal.SectorsList(p0) } func (s *StorageMinerStub) SectorsList(p0 context.Context) ([]abi.SectorNumber, error) { - return *new([]abi.SectorNumber), xerrors.New("method not supported") + return *new([]abi.SectorNumber), ErrNotSupported } func (s *StorageMinerStruct) SectorsListInStates(p0 context.Context, p1 []SectorState) ([]abi.SectorNumber, error) { + if s.Internal.SectorsListInStates == nil { + return *new([]abi.SectorNumber), ErrNotSupported + } return s.Internal.SectorsListInStates(p0, p1) } func (s *StorageMinerStub) SectorsListInStates(p0 context.Context, p1 []SectorState) ([]abi.SectorNumber, error) { - return *new([]abi.SectorNumber), xerrors.New("method not supported") + return *new([]abi.SectorNumber), ErrNotSupported } func (s *StorageMinerStruct) SectorsRefs(p0 context.Context) (map[string][]SealedRef, error) { + if s.Internal.SectorsRefs == nil { + return *new(map[string][]SealedRef), ErrNotSupported + } return s.Internal.SectorsRefs(p0) } func (s *StorageMinerStub) SectorsRefs(p0 context.Context) (map[string][]SealedRef, error) { - return *new(map[string][]SealedRef), xerrors.New("method not supported") + return *new(map[string][]SealedRef), ErrNotSupported } func (s *StorageMinerStruct) SectorsStatus(p0 context.Context, p1 abi.SectorNumber, p2 bool) (SectorInfo, error) { + if s.Internal.SectorsStatus == nil { + return *new(SectorInfo), ErrNotSupported + } return s.Internal.SectorsStatus(p0, p1, p2) } func (s *StorageMinerStub) SectorsStatus(p0 context.Context, p1 abi.SectorNumber, p2 bool) (SectorInfo, error) { - return *new(SectorInfo), xerrors.New("method not supported") + return *new(SectorInfo), ErrNotSupported } func (s *StorageMinerStruct) SectorsSummary(p0 context.Context) (map[SectorState]int, error) { + if s.Internal.SectorsSummary == nil { + return *new(map[SectorState]int), ErrNotSupported + } return s.Internal.SectorsSummary(p0) } func (s *StorageMinerStub) SectorsSummary(p0 context.Context) (map[SectorState]int, error) { - return *new(map[SectorState]int), xerrors.New("method not supported") + return *new(map[SectorState]int), ErrNotSupported +} + +func (s *StorageMinerStruct) SectorsUnsealPiece(p0 context.Context, p1 storage.SectorRef, p2 storiface.UnpaddedByteIndex, p3 abi.UnpaddedPieceSize, p4 abi.SealRandomness, p5 *cid.Cid) error { + if s.Internal.SectorsUnsealPiece == nil { + return ErrNotSupported + } + return s.Internal.SectorsUnsealPiece(p0, p1, p2, p3, p4, p5) +} + +func (s *StorageMinerStub) SectorsUnsealPiece(p0 context.Context, p1 storage.SectorRef, p2 storiface.UnpaddedByteIndex, p3 abi.UnpaddedPieceSize, p4 abi.SealRandomness, p5 *cid.Cid) error { + return ErrNotSupported } func (s *StorageMinerStruct) SectorsUpdate(p0 context.Context, p1 abi.SectorNumber, p2 SectorState) error { + if s.Internal.SectorsUpdate == nil { + return ErrNotSupported + } return s.Internal.SectorsUpdate(p0, p1, p2) } func (s *StorageMinerStub) SectorsUpdate(p0 context.Context, p1 abi.SectorNumber, p2 SectorState) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) StorageAddLocal(p0 context.Context, p1 string) error { + if s.Internal.StorageAddLocal == nil { + return ErrNotSupported + } return s.Internal.StorageAddLocal(p0, p1) } func (s *StorageMinerStub) StorageAddLocal(p0 context.Context, p1 string) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) StorageAttach(p0 context.Context, p1 stores.StorageInfo, p2 fsutil.FsStat) error { + if s.Internal.StorageAttach == nil { + return ErrNotSupported + } return s.Internal.StorageAttach(p0, p1, p2) } func (s *StorageMinerStub) StorageAttach(p0 context.Context, p1 stores.StorageInfo, p2 fsutil.FsStat) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) StorageBestAlloc(p0 context.Context, p1 storiface.SectorFileType, p2 abi.SectorSize, p3 storiface.PathType) ([]stores.StorageInfo, error) { + if s.Internal.StorageBestAlloc == nil { + return *new([]stores.StorageInfo), ErrNotSupported + } return s.Internal.StorageBestAlloc(p0, p1, p2, p3) } func (s *StorageMinerStub) StorageBestAlloc(p0 context.Context, p1 storiface.SectorFileType, p2 abi.SectorSize, p3 storiface.PathType) ([]stores.StorageInfo, error) { - return *new([]stores.StorageInfo), xerrors.New("method not supported") + return *new([]stores.StorageInfo), ErrNotSupported } func (s *StorageMinerStruct) StorageDeclareSector(p0 context.Context, p1 stores.ID, p2 abi.SectorID, p3 storiface.SectorFileType, p4 bool) error { + if s.Internal.StorageDeclareSector == nil { + return ErrNotSupported + } return s.Internal.StorageDeclareSector(p0, p1, p2, p3, p4) } func (s *StorageMinerStub) StorageDeclareSector(p0 context.Context, p1 stores.ID, p2 abi.SectorID, p3 storiface.SectorFileType, p4 bool) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) StorageDropSector(p0 context.Context, p1 stores.ID, p2 abi.SectorID, p3 storiface.SectorFileType) error { + if s.Internal.StorageDropSector == nil { + return ErrNotSupported + } return s.Internal.StorageDropSector(p0, p1, p2, p3) } func (s *StorageMinerStub) StorageDropSector(p0 context.Context, p1 stores.ID, p2 abi.SectorID, p3 storiface.SectorFileType) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) StorageFindSector(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 abi.SectorSize, p4 bool) ([]stores.SectorStorageInfo, error) { + if s.Internal.StorageFindSector == nil { + return *new([]stores.SectorStorageInfo), ErrNotSupported + } return s.Internal.StorageFindSector(p0, p1, p2, p3, p4) } func (s *StorageMinerStub) StorageFindSector(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 abi.SectorSize, p4 bool) ([]stores.SectorStorageInfo, error) { - return *new([]stores.SectorStorageInfo), xerrors.New("method not supported") + return *new([]stores.SectorStorageInfo), ErrNotSupported } func (s *StorageMinerStruct) StorageInfo(p0 context.Context, p1 stores.ID) (stores.StorageInfo, error) { + if s.Internal.StorageInfo == nil { + return *new(stores.StorageInfo), ErrNotSupported + } return s.Internal.StorageInfo(p0, p1) } func (s *StorageMinerStub) StorageInfo(p0 context.Context, p1 stores.ID) (stores.StorageInfo, error) { - return *new(stores.StorageInfo), xerrors.New("method not supported") + return *new(stores.StorageInfo), ErrNotSupported } func (s *StorageMinerStruct) StorageList(p0 context.Context) (map[stores.ID][]stores.Decl, error) { + if s.Internal.StorageList == nil { + return *new(map[stores.ID][]stores.Decl), ErrNotSupported + } return s.Internal.StorageList(p0) } func (s *StorageMinerStub) StorageList(p0 context.Context) (map[stores.ID][]stores.Decl, error) { - return *new(map[stores.ID][]stores.Decl), xerrors.New("method not supported") + return *new(map[stores.ID][]stores.Decl), ErrNotSupported } func (s *StorageMinerStruct) StorageLocal(p0 context.Context) (map[stores.ID]string, error) { + if s.Internal.StorageLocal == nil { + return *new(map[stores.ID]string), ErrNotSupported + } return s.Internal.StorageLocal(p0) } func (s *StorageMinerStub) StorageLocal(p0 context.Context) (map[stores.ID]string, error) { - return *new(map[stores.ID]string), xerrors.New("method not supported") + return *new(map[stores.ID]string), ErrNotSupported } func (s *StorageMinerStruct) StorageLock(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 storiface.SectorFileType) error { + if s.Internal.StorageLock == nil { + return ErrNotSupported + } return s.Internal.StorageLock(p0, p1, p2, p3) } func (s *StorageMinerStub) StorageLock(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 storiface.SectorFileType) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) StorageReportHealth(p0 context.Context, p1 stores.ID, p2 stores.HealthReport) error { + if s.Internal.StorageReportHealth == nil { + return ErrNotSupported + } return s.Internal.StorageReportHealth(p0, p1, p2) } func (s *StorageMinerStub) StorageReportHealth(p0 context.Context, p1 stores.ID, p2 stores.HealthReport) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) StorageStat(p0 context.Context, p1 stores.ID) (fsutil.FsStat, error) { + if s.Internal.StorageStat == nil { + return *new(fsutil.FsStat), ErrNotSupported + } return s.Internal.StorageStat(p0, p1) } func (s *StorageMinerStub) StorageStat(p0 context.Context, p1 stores.ID) (fsutil.FsStat, error) { - return *new(fsutil.FsStat), xerrors.New("method not supported") + return *new(fsutil.FsStat), ErrNotSupported } func (s *StorageMinerStruct) StorageTryLock(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 storiface.SectorFileType) (bool, error) { + if s.Internal.StorageTryLock == nil { + return false, ErrNotSupported + } return s.Internal.StorageTryLock(p0, p1, p2, p3) } func (s *StorageMinerStub) StorageTryLock(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 storiface.SectorFileType) (bool, error) { - return false, xerrors.New("method not supported") + return false, ErrNotSupported } func (s *StorageMinerStruct) WorkerConnect(p0 context.Context, p1 string) error { + if s.Internal.WorkerConnect == nil { + return ErrNotSupported + } return s.Internal.WorkerConnect(p0, p1) } func (s *StorageMinerStub) WorkerConnect(p0 context.Context, p1 string) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *StorageMinerStruct) WorkerJobs(p0 context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) { + if s.Internal.WorkerJobs == nil { + return *new(map[uuid.UUID][]storiface.WorkerJob), ErrNotSupported + } return s.Internal.WorkerJobs(p0) } func (s *StorageMinerStub) WorkerJobs(p0 context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) { - return *new(map[uuid.UUID][]storiface.WorkerJob), xerrors.New("method not supported") + return *new(map[uuid.UUID][]storiface.WorkerJob), ErrNotSupported } func (s *StorageMinerStruct) WorkerStats(p0 context.Context) (map[uuid.UUID]storiface.WorkerStats, error) { + if s.Internal.WorkerStats == nil { + return *new(map[uuid.UUID]storiface.WorkerStats), ErrNotSupported + } return s.Internal.WorkerStats(p0) } func (s *StorageMinerStub) WorkerStats(p0 context.Context) (map[uuid.UUID]storiface.WorkerStats, error) { - return *new(map[uuid.UUID]storiface.WorkerStats), xerrors.New("method not supported") + return *new(map[uuid.UUID]storiface.WorkerStats), ErrNotSupported } func (s *WalletStruct) WalletDelete(p0 context.Context, p1 address.Address) error { + if s.Internal.WalletDelete == nil { + return ErrNotSupported + } return s.Internal.WalletDelete(p0, p1) } func (s *WalletStub) WalletDelete(p0 context.Context, p1 address.Address) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *WalletStruct) WalletExport(p0 context.Context, p1 address.Address) (*types.KeyInfo, error) { + if s.Internal.WalletExport == nil { + return nil, ErrNotSupported + } return s.Internal.WalletExport(p0, p1) } func (s *WalletStub) WalletExport(p0 context.Context, p1 address.Address) (*types.KeyInfo, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *WalletStruct) WalletHas(p0 context.Context, p1 address.Address) (bool, error) { + if s.Internal.WalletHas == nil { + return false, ErrNotSupported + } return s.Internal.WalletHas(p0, p1) } func (s *WalletStub) WalletHas(p0 context.Context, p1 address.Address) (bool, error) { - return false, xerrors.New("method not supported") + return false, ErrNotSupported } func (s *WalletStruct) WalletImport(p0 context.Context, p1 *types.KeyInfo) (address.Address, error) { + if s.Internal.WalletImport == nil { + return *new(address.Address), ErrNotSupported + } return s.Internal.WalletImport(p0, p1) } func (s *WalletStub) WalletImport(p0 context.Context, p1 *types.KeyInfo) (address.Address, error) { - return *new(address.Address), xerrors.New("method not supported") + return *new(address.Address), ErrNotSupported } func (s *WalletStruct) WalletList(p0 context.Context) ([]address.Address, error) { + if s.Internal.WalletList == nil { + return *new([]address.Address), ErrNotSupported + } return s.Internal.WalletList(p0) } func (s *WalletStub) WalletList(p0 context.Context) ([]address.Address, error) { - return *new([]address.Address), xerrors.New("method not supported") + return *new([]address.Address), ErrNotSupported } func (s *WalletStruct) WalletNew(p0 context.Context, p1 types.KeyType) (address.Address, error) { + if s.Internal.WalletNew == nil { + return *new(address.Address), ErrNotSupported + } return s.Internal.WalletNew(p0, p1) } func (s *WalletStub) WalletNew(p0 context.Context, p1 types.KeyType) (address.Address, error) { - return *new(address.Address), xerrors.New("method not supported") + return *new(address.Address), ErrNotSupported } func (s *WalletStruct) WalletSign(p0 context.Context, p1 address.Address, p2 []byte, p3 MsgMeta) (*crypto.Signature, error) { + if s.Internal.WalletSign == nil { + return nil, ErrNotSupported + } return s.Internal.WalletSign(p0, p1, p2, p3) } func (s *WalletStub) WalletSign(p0 context.Context, p1 address.Address, p2 []byte, p3 MsgMeta) (*crypto.Signature, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *WorkerStruct) AddPiece(p0 context.Context, p1 storage.SectorRef, p2 []abi.UnpaddedPieceSize, p3 abi.UnpaddedPieceSize, p4 storage.Data) (storiface.CallID, error) { + if s.Internal.AddPiece == nil { + return *new(storiface.CallID), ErrNotSupported + } return s.Internal.AddPiece(p0, p1, p2, p3, p4) } func (s *WorkerStub) AddPiece(p0 context.Context, p1 storage.SectorRef, p2 []abi.UnpaddedPieceSize, p3 abi.UnpaddedPieceSize, p4 storage.Data) (storiface.CallID, error) { - return *new(storiface.CallID), xerrors.New("method not supported") + return *new(storiface.CallID), ErrNotSupported } func (s *WorkerStruct) Enabled(p0 context.Context) (bool, error) { + if s.Internal.Enabled == nil { + return false, ErrNotSupported + } return s.Internal.Enabled(p0) } func (s *WorkerStub) Enabled(p0 context.Context) (bool, error) { - return false, xerrors.New("method not supported") + return false, ErrNotSupported } func (s *WorkerStruct) Fetch(p0 context.Context, p1 storage.SectorRef, p2 storiface.SectorFileType, p3 storiface.PathType, p4 storiface.AcquireMode) (storiface.CallID, error) { + if s.Internal.Fetch == nil { + return *new(storiface.CallID), ErrNotSupported + } return s.Internal.Fetch(p0, p1, p2, p3, p4) } func (s *WorkerStub) Fetch(p0 context.Context, p1 storage.SectorRef, p2 storiface.SectorFileType, p3 storiface.PathType, p4 storiface.AcquireMode) (storiface.CallID, error) { - return *new(storiface.CallID), xerrors.New("method not supported") + return *new(storiface.CallID), ErrNotSupported } func (s *WorkerStruct) FinalizeSector(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) { + if s.Internal.FinalizeSector == nil { + return *new(storiface.CallID), ErrNotSupported + } return s.Internal.FinalizeSector(p0, p1, p2) } func (s *WorkerStub) FinalizeSector(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) { - return *new(storiface.CallID), xerrors.New("method not supported") + return *new(storiface.CallID), ErrNotSupported } func (s *WorkerStruct) Info(p0 context.Context) (storiface.WorkerInfo, error) { + if s.Internal.Info == nil { + return *new(storiface.WorkerInfo), ErrNotSupported + } return s.Internal.Info(p0) } func (s *WorkerStub) Info(p0 context.Context) (storiface.WorkerInfo, error) { - return *new(storiface.WorkerInfo), xerrors.New("method not supported") + return *new(storiface.WorkerInfo), ErrNotSupported } func (s *WorkerStruct) MoveStorage(p0 context.Context, p1 storage.SectorRef, p2 storiface.SectorFileType) (storiface.CallID, error) { + if s.Internal.MoveStorage == nil { + return *new(storiface.CallID), ErrNotSupported + } return s.Internal.MoveStorage(p0, p1, p2) } func (s *WorkerStub) MoveStorage(p0 context.Context, p1 storage.SectorRef, p2 storiface.SectorFileType) (storiface.CallID, error) { - return *new(storiface.CallID), xerrors.New("method not supported") + return *new(storiface.CallID), ErrNotSupported } func (s *WorkerStruct) Paths(p0 context.Context) ([]stores.StoragePath, error) { + if s.Internal.Paths == nil { + return *new([]stores.StoragePath), ErrNotSupported + } return s.Internal.Paths(p0) } func (s *WorkerStub) Paths(p0 context.Context) ([]stores.StoragePath, error) { - return *new([]stores.StoragePath), xerrors.New("method not supported") + return *new([]stores.StoragePath), ErrNotSupported } func (s *WorkerStruct) ProcessSession(p0 context.Context) (uuid.UUID, error) { + if s.Internal.ProcessSession == nil { + return *new(uuid.UUID), ErrNotSupported + } return s.Internal.ProcessSession(p0) } func (s *WorkerStub) ProcessSession(p0 context.Context) (uuid.UUID, error) { - return *new(uuid.UUID), xerrors.New("method not supported") + return *new(uuid.UUID), ErrNotSupported } func (s *WorkerStruct) ReleaseUnsealed(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) { + if s.Internal.ReleaseUnsealed == nil { + return *new(storiface.CallID), ErrNotSupported + } return s.Internal.ReleaseUnsealed(p0, p1, p2) } func (s *WorkerStub) ReleaseUnsealed(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) { - return *new(storiface.CallID), xerrors.New("method not supported") + return *new(storiface.CallID), ErrNotSupported } func (s *WorkerStruct) Remove(p0 context.Context, p1 abi.SectorID) error { + if s.Internal.Remove == nil { + return ErrNotSupported + } return s.Internal.Remove(p0, p1) } func (s *WorkerStub) Remove(p0 context.Context, p1 abi.SectorID) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *WorkerStruct) SealCommit1(p0 context.Context, p1 storage.SectorRef, p2 abi.SealRandomness, p3 abi.InteractiveSealRandomness, p4 []abi.PieceInfo, p5 storage.SectorCids) (storiface.CallID, error) { + if s.Internal.SealCommit1 == nil { + return *new(storiface.CallID), ErrNotSupported + } return s.Internal.SealCommit1(p0, p1, p2, p3, p4, p5) } func (s *WorkerStub) SealCommit1(p0 context.Context, p1 storage.SectorRef, p2 abi.SealRandomness, p3 abi.InteractiveSealRandomness, p4 []abi.PieceInfo, p5 storage.SectorCids) (storiface.CallID, error) { - return *new(storiface.CallID), xerrors.New("method not supported") + return *new(storiface.CallID), ErrNotSupported } func (s *WorkerStruct) SealCommit2(p0 context.Context, p1 storage.SectorRef, p2 storage.Commit1Out) (storiface.CallID, error) { + if s.Internal.SealCommit2 == nil { + return *new(storiface.CallID), ErrNotSupported + } return s.Internal.SealCommit2(p0, p1, p2) } func (s *WorkerStub) SealCommit2(p0 context.Context, p1 storage.SectorRef, p2 storage.Commit1Out) (storiface.CallID, error) { - return *new(storiface.CallID), xerrors.New("method not supported") + return *new(storiface.CallID), ErrNotSupported } func (s *WorkerStruct) SealPreCommit1(p0 context.Context, p1 storage.SectorRef, p2 abi.SealRandomness, p3 []abi.PieceInfo) (storiface.CallID, error) { + if s.Internal.SealPreCommit1 == nil { + return *new(storiface.CallID), ErrNotSupported + } return s.Internal.SealPreCommit1(p0, p1, p2, p3) } func (s *WorkerStub) SealPreCommit1(p0 context.Context, p1 storage.SectorRef, p2 abi.SealRandomness, p3 []abi.PieceInfo) (storiface.CallID, error) { - return *new(storiface.CallID), xerrors.New("method not supported") + return *new(storiface.CallID), ErrNotSupported } func (s *WorkerStruct) SealPreCommit2(p0 context.Context, p1 storage.SectorRef, p2 storage.PreCommit1Out) (storiface.CallID, error) { + if s.Internal.SealPreCommit2 == nil { + return *new(storiface.CallID), ErrNotSupported + } return s.Internal.SealPreCommit2(p0, p1, p2) } func (s *WorkerStub) SealPreCommit2(p0 context.Context, p1 storage.SectorRef, p2 storage.PreCommit1Out) (storiface.CallID, error) { - return *new(storiface.CallID), xerrors.New("method not supported") + return *new(storiface.CallID), ErrNotSupported } func (s *WorkerStruct) Session(p0 context.Context) (uuid.UUID, error) { + if s.Internal.Session == nil { + return *new(uuid.UUID), ErrNotSupported + } return s.Internal.Session(p0) } func (s *WorkerStub) Session(p0 context.Context) (uuid.UUID, error) { - return *new(uuid.UUID), xerrors.New("method not supported") + return *new(uuid.UUID), ErrNotSupported } func (s *WorkerStruct) SetEnabled(p0 context.Context, p1 bool) error { + if s.Internal.SetEnabled == nil { + return ErrNotSupported + } return s.Internal.SetEnabled(p0, p1) } func (s *WorkerStub) SetEnabled(p0 context.Context, p1 bool) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *WorkerStruct) StorageAddLocal(p0 context.Context, p1 string) error { + if s.Internal.StorageAddLocal == nil { + return ErrNotSupported + } return s.Internal.StorageAddLocal(p0, p1) } func (s *WorkerStub) StorageAddLocal(p0 context.Context, p1 string) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *WorkerStruct) TaskDisable(p0 context.Context, p1 sealtasks.TaskType) error { + if s.Internal.TaskDisable == nil { + return ErrNotSupported + } return s.Internal.TaskDisable(p0, p1) } func (s *WorkerStub) TaskDisable(p0 context.Context, p1 sealtasks.TaskType) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *WorkerStruct) TaskEnable(p0 context.Context, p1 sealtasks.TaskType) error { + if s.Internal.TaskEnable == nil { + return ErrNotSupported + } return s.Internal.TaskEnable(p0, p1) } func (s *WorkerStub) TaskEnable(p0 context.Context, p1 sealtasks.TaskType) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *WorkerStruct) TaskTypes(p0 context.Context) (map[sealtasks.TaskType]struct{}, error) { + if s.Internal.TaskTypes == nil { + return *new(map[sealtasks.TaskType]struct{}), ErrNotSupported + } return s.Internal.TaskTypes(p0) } func (s *WorkerStub) TaskTypes(p0 context.Context) (map[sealtasks.TaskType]struct{}, error) { - return *new(map[sealtasks.TaskType]struct{}), xerrors.New("method not supported") + return *new(map[sealtasks.TaskType]struct{}), ErrNotSupported } func (s *WorkerStruct) UnsealPiece(p0 context.Context, p1 storage.SectorRef, p2 storiface.UnpaddedByteIndex, p3 abi.UnpaddedPieceSize, p4 abi.SealRandomness, p5 cid.Cid) (storiface.CallID, error) { + if s.Internal.UnsealPiece == nil { + return *new(storiface.CallID), ErrNotSupported + } return s.Internal.UnsealPiece(p0, p1, p2, p3, p4, p5) } func (s *WorkerStub) UnsealPiece(p0 context.Context, p1 storage.SectorRef, p2 storiface.UnpaddedByteIndex, p3 abi.UnpaddedPieceSize, p4 abi.SealRandomness, p5 cid.Cid) (storiface.CallID, error) { - return *new(storiface.CallID), xerrors.New("method not supported") + return *new(storiface.CallID), ErrNotSupported } func (s *WorkerStruct) Version(p0 context.Context) (Version, error) { + if s.Internal.Version == nil { + return *new(Version), ErrNotSupported + } return s.Internal.Version(p0) } func (s *WorkerStub) Version(p0 context.Context) (Version, error) { - return *new(Version), xerrors.New("method not supported") + return *new(Version), ErrNotSupported } func (s *WorkerStruct) WaitQuiet(p0 context.Context) error { + if s.Internal.WaitQuiet == nil { + return ErrNotSupported + } return s.Internal.WaitQuiet(p0) } func (s *WorkerStub) WaitQuiet(p0 context.Context) error { - return xerrors.New("method not supported") + return ErrNotSupported } var _ ChainIO = new(ChainIOStruct) var _ Common = new(CommonStruct) +var _ CommonNet = new(CommonNetStruct) var _ FullNode = new(FullNodeStruct) var _ Gateway = new(GatewayStruct) +var _ Net = new(NetStruct) var _ Signable = new(SignableStruct) var _ StorageMiner = new(StorageMinerStruct) var _ Wallet = new(WalletStruct) diff --git a/api/proxy_util.go b/api/proxy_util.go new file mode 100644 index 00000000000..ba94a9e5dce --- /dev/null +++ b/api/proxy_util.go @@ -0,0 +1,30 @@ +package api + +import "reflect" + +var _internalField = "Internal" + +// GetInternalStructs extracts all pointers to 'Internal' sub-structs from the provided pointer to a proxy struct +func GetInternalStructs(in interface{}) []interface{} { + return getInternalStructs(reflect.ValueOf(in).Elem()) +} + +func getInternalStructs(rv reflect.Value) []interface{} { + var out []interface{} + + internal := rv.FieldByName(_internalField) + ii := internal.Addr().Interface() + out = append(out, ii) + + for i := 0; i < rv.NumField(); i++ { + if rv.Type().Field(i).Name == _internalField { + continue + } + + sub := getInternalStructs(rv.Field(i)) + + out = append(out, sub...) + } + + return out +} diff --git a/api/proxy_util_test.go b/api/proxy_util_test.go new file mode 100644 index 00000000000..3cbc466b6a4 --- /dev/null +++ b/api/proxy_util_test.go @@ -0,0 +1,62 @@ +package api + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +type StrA struct { + StrB + + Internal struct { + A int + } +} + +type StrB struct { + Internal struct { + B int + } +} + +type StrC struct { + Internal struct { + Internal struct { + C int + } + } +} + +func TestGetInternalStructs(t *testing.T) { + var proxy StrA + + sts := GetInternalStructs(&proxy) + require.Len(t, sts, 2) + + sa := sts[0].(*struct{ A int }) + sa.A = 3 + sb := sts[1].(*struct{ B int }) + sb.B = 4 + + require.Equal(t, 3, proxy.Internal.A) + require.Equal(t, 4, proxy.StrB.Internal.B) +} + +func TestNestedInternalStructs(t *testing.T) { + var proxy StrC + + // check that only the top-level internal struct gets picked up + + sts := GetInternalStructs(&proxy) + require.Len(t, sts, 1) + + sa := sts[0].(*struct { + Internal struct { + C int + } + }) + sa.Internal.C = 5 + + require.Equal(t, 5, proxy.Internal.Internal.C) +} diff --git a/api/v0api/full.go b/api/v0api/full.go index f646aa9fd2f..b152c6cbb84 100644 --- a/api/v0api/full.go +++ b/api/v0api/full.go @@ -46,6 +46,7 @@ import ( // FullNode API is a low-level interface to the Filecoin network full node type FullNode interface { Common + Net // MethodGroup: Chain // The Chain method group contains methods for interacting with the @@ -92,6 +93,9 @@ type FullNode interface { // specified block. ChainGetParentMessages(ctx context.Context, blockCid cid.Cid) ([]api.Message, error) //perm:read + // ChainGetMessagesInTipset returns message stores in current tipset + ChainGetMessagesInTipset(ctx context.Context, tsk types.TipSetKey) ([]api.Message, error) //perm:read + // ChainGetTipSetByHeight looks back for a tipset at the specified epoch. // If there are no blocks at the specified epoch, a tipset at an earlier epoch // will be returned. diff --git a/api/v0api/latest.go b/api/v0api/latest.go index 87f977be608..d423f57bc86 100644 --- a/api/v0api/latest.go +++ b/api/v0api/latest.go @@ -5,8 +5,15 @@ import ( ) type Common = api.Common +type Net = api.Net +type CommonNet = api.CommonNet + type CommonStruct = api.CommonStruct type CommonStub = api.CommonStub +type NetStruct = api.NetStruct +type NetStub = api.NetStub +type CommonNetStruct = api.CommonNetStruct +type CommonNetStub = api.CommonNetStub type StorageMiner = api.StorageMiner type StorageMinerStruct = api.StorageMinerStruct diff --git a/api/v0api/proxy_gen.go b/api/v0api/proxy_gen.go index 0f5d2f9183f..21b751ca276 100644 --- a/api/v0api/proxy_gen.go +++ b/api/v0api/proxy_gen.go @@ -27,9 +27,13 @@ import ( "golang.org/x/xerrors" ) +var ErrNotSupported = xerrors.New("method not supported") + type FullNodeStruct struct { CommonStruct + NetStruct + Internal struct { BeaconGetEntry func(p0 context.Context, p1 abi.ChainEpoch) (*types.BeaconEntry, error) `perm:"read"` @@ -45,6 +49,8 @@ type FullNodeStruct struct { ChainGetMessage func(p0 context.Context, p1 cid.Cid) (*types.Message, error) `perm:"read"` + ChainGetMessagesInTipset func(p0 context.Context, p1 types.TipSetKey) ([]api.Message, error) `perm:"read"` + ChainGetNode func(p0 context.Context, p1 string) (*api.IpldObject, error) `perm:"read"` ChainGetParentMessages func(p0 context.Context, p1 cid.Cid) ([]api.Message, error) `perm:"read"` @@ -387,6 +393,8 @@ type FullNodeStruct struct { type FullNodeStub struct { CommonStub + + NetStub } type GatewayStruct struct { @@ -459,1659 +467,2291 @@ type GatewayStub struct { } func (s *FullNodeStruct) BeaconGetEntry(p0 context.Context, p1 abi.ChainEpoch) (*types.BeaconEntry, error) { + if s.Internal.BeaconGetEntry == nil { + return nil, ErrNotSupported + } return s.Internal.BeaconGetEntry(p0, p1) } func (s *FullNodeStub) BeaconGetEntry(p0 context.Context, p1 abi.ChainEpoch) (*types.BeaconEntry, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ChainDeleteObj(p0 context.Context, p1 cid.Cid) error { + if s.Internal.ChainDeleteObj == nil { + return ErrNotSupported + } return s.Internal.ChainDeleteObj(p0, p1) } func (s *FullNodeStub) ChainDeleteObj(p0 context.Context, p1 cid.Cid) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) ChainExport(p0 context.Context, p1 abi.ChainEpoch, p2 bool, p3 types.TipSetKey) (<-chan []byte, error) { + if s.Internal.ChainExport == nil { + return nil, ErrNotSupported + } return s.Internal.ChainExport(p0, p1, p2, p3) } func (s *FullNodeStub) ChainExport(p0 context.Context, p1 abi.ChainEpoch, p2 bool, p3 types.TipSetKey) (<-chan []byte, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ChainGetBlock(p0 context.Context, p1 cid.Cid) (*types.BlockHeader, error) { + if s.Internal.ChainGetBlock == nil { + return nil, ErrNotSupported + } return s.Internal.ChainGetBlock(p0, p1) } func (s *FullNodeStub) ChainGetBlock(p0 context.Context, p1 cid.Cid) (*types.BlockHeader, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*api.BlockMessages, error) { + if s.Internal.ChainGetBlockMessages == nil { + return nil, ErrNotSupported + } return s.Internal.ChainGetBlockMessages(p0, p1) } func (s *FullNodeStub) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*api.BlockMessages, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ChainGetGenesis(p0 context.Context) (*types.TipSet, error) { + if s.Internal.ChainGetGenesis == nil { + return nil, ErrNotSupported + } return s.Internal.ChainGetGenesis(p0) } func (s *FullNodeStub) ChainGetGenesis(p0 context.Context) (*types.TipSet, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Message, error) { + if s.Internal.ChainGetMessage == nil { + return nil, ErrNotSupported + } return s.Internal.ChainGetMessage(p0, p1) } func (s *FullNodeStub) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Message, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported +} + +func (s *FullNodeStruct) ChainGetMessagesInTipset(p0 context.Context, p1 types.TipSetKey) ([]api.Message, error) { + if s.Internal.ChainGetMessagesInTipset == nil { + return *new([]api.Message), ErrNotSupported + } + return s.Internal.ChainGetMessagesInTipset(p0, p1) +} + +func (s *FullNodeStub) ChainGetMessagesInTipset(p0 context.Context, p1 types.TipSetKey) ([]api.Message, error) { + return *new([]api.Message), ErrNotSupported } func (s *FullNodeStruct) ChainGetNode(p0 context.Context, p1 string) (*api.IpldObject, error) { + if s.Internal.ChainGetNode == nil { + return nil, ErrNotSupported + } return s.Internal.ChainGetNode(p0, p1) } func (s *FullNodeStub) ChainGetNode(p0 context.Context, p1 string) (*api.IpldObject, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ChainGetParentMessages(p0 context.Context, p1 cid.Cid) ([]api.Message, error) { + if s.Internal.ChainGetParentMessages == nil { + return *new([]api.Message), ErrNotSupported + } return s.Internal.ChainGetParentMessages(p0, p1) } func (s *FullNodeStub) ChainGetParentMessages(p0 context.Context, p1 cid.Cid) ([]api.Message, error) { - return *new([]api.Message), xerrors.New("method not supported") + return *new([]api.Message), ErrNotSupported } func (s *FullNodeStruct) ChainGetParentReceipts(p0 context.Context, p1 cid.Cid) ([]*types.MessageReceipt, error) { + if s.Internal.ChainGetParentReceipts == nil { + return *new([]*types.MessageReceipt), ErrNotSupported + } return s.Internal.ChainGetParentReceipts(p0, p1) } func (s *FullNodeStub) ChainGetParentReceipts(p0 context.Context, p1 cid.Cid) ([]*types.MessageReceipt, error) { - return *new([]*types.MessageReceipt), xerrors.New("method not supported") + return *new([]*types.MessageReceipt), ErrNotSupported } func (s *FullNodeStruct) ChainGetPath(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*api.HeadChange, error) { + if s.Internal.ChainGetPath == nil { + return *new([]*api.HeadChange), ErrNotSupported + } return s.Internal.ChainGetPath(p0, p1, p2) } func (s *FullNodeStub) ChainGetPath(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*api.HeadChange, error) { - return *new([]*api.HeadChange), xerrors.New("method not supported") + return *new([]*api.HeadChange), ErrNotSupported } func (s *FullNodeStruct) ChainGetRandomnessFromBeacon(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) { + if s.Internal.ChainGetRandomnessFromBeacon == nil { + return *new(abi.Randomness), ErrNotSupported + } return s.Internal.ChainGetRandomnessFromBeacon(p0, p1, p2, p3, p4) } func (s *FullNodeStub) ChainGetRandomnessFromBeacon(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) { - return *new(abi.Randomness), xerrors.New("method not supported") + return *new(abi.Randomness), ErrNotSupported } func (s *FullNodeStruct) ChainGetRandomnessFromTickets(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) { + if s.Internal.ChainGetRandomnessFromTickets == nil { + return *new(abi.Randomness), ErrNotSupported + } return s.Internal.ChainGetRandomnessFromTickets(p0, p1, p2, p3, p4) } func (s *FullNodeStub) ChainGetRandomnessFromTickets(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) { - return *new(abi.Randomness), xerrors.New("method not supported") + return *new(abi.Randomness), ErrNotSupported } func (s *FullNodeStruct) ChainGetTipSet(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) { + if s.Internal.ChainGetTipSet == nil { + return nil, ErrNotSupported + } return s.Internal.ChainGetTipSet(p0, p1) } func (s *FullNodeStub) ChainGetTipSet(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ChainGetTipSetByHeight(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) { + if s.Internal.ChainGetTipSetByHeight == nil { + return nil, ErrNotSupported + } return s.Internal.ChainGetTipSetByHeight(p0, p1, p2) } func (s *FullNodeStub) ChainGetTipSetByHeight(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ChainHasObj(p0 context.Context, p1 cid.Cid) (bool, error) { + if s.Internal.ChainHasObj == nil { + return false, ErrNotSupported + } return s.Internal.ChainHasObj(p0, p1) } func (s *FullNodeStub) ChainHasObj(p0 context.Context, p1 cid.Cid) (bool, error) { - return false, xerrors.New("method not supported") + return false, ErrNotSupported } func (s *FullNodeStruct) ChainHead(p0 context.Context) (*types.TipSet, error) { + if s.Internal.ChainHead == nil { + return nil, ErrNotSupported + } return s.Internal.ChainHead(p0) } func (s *FullNodeStub) ChainHead(p0 context.Context) (*types.TipSet, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ChainNotify(p0 context.Context) (<-chan []*api.HeadChange, error) { + if s.Internal.ChainNotify == nil { + return nil, ErrNotSupported + } return s.Internal.ChainNotify(p0) } func (s *FullNodeStub) ChainNotify(p0 context.Context) (<-chan []*api.HeadChange, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ChainReadObj(p0 context.Context, p1 cid.Cid) ([]byte, error) { + if s.Internal.ChainReadObj == nil { + return *new([]byte), ErrNotSupported + } return s.Internal.ChainReadObj(p0, p1) } func (s *FullNodeStub) ChainReadObj(p0 context.Context, p1 cid.Cid) ([]byte, error) { - return *new([]byte), xerrors.New("method not supported") + return *new([]byte), ErrNotSupported } func (s *FullNodeStruct) ChainSetHead(p0 context.Context, p1 types.TipSetKey) error { + if s.Internal.ChainSetHead == nil { + return ErrNotSupported + } return s.Internal.ChainSetHead(p0, p1) } func (s *FullNodeStub) ChainSetHead(p0 context.Context, p1 types.TipSetKey) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) ChainStatObj(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (api.ObjStat, error) { + if s.Internal.ChainStatObj == nil { + return *new(api.ObjStat), ErrNotSupported + } return s.Internal.ChainStatObj(p0, p1, p2) } func (s *FullNodeStub) ChainStatObj(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (api.ObjStat, error) { - return *new(api.ObjStat), xerrors.New("method not supported") + return *new(api.ObjStat), ErrNotSupported } func (s *FullNodeStruct) ChainTipSetWeight(p0 context.Context, p1 types.TipSetKey) (types.BigInt, error) { + if s.Internal.ChainTipSetWeight == nil { + return *new(types.BigInt), ErrNotSupported + } return s.Internal.ChainTipSetWeight(p0, p1) } func (s *FullNodeStub) ChainTipSetWeight(p0 context.Context, p1 types.TipSetKey) (types.BigInt, error) { - return *new(types.BigInt), xerrors.New("method not supported") + return *new(types.BigInt), ErrNotSupported } func (s *FullNodeStruct) ClientCalcCommP(p0 context.Context, p1 string) (*api.CommPRet, error) { + if s.Internal.ClientCalcCommP == nil { + return nil, ErrNotSupported + } return s.Internal.ClientCalcCommP(p0, p1) } func (s *FullNodeStub) ClientCalcCommP(p0 context.Context, p1 string) (*api.CommPRet, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ClientCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { + if s.Internal.ClientCancelDataTransfer == nil { + return ErrNotSupported + } return s.Internal.ClientCancelDataTransfer(p0, p1, p2, p3) } func (s *FullNodeStub) ClientCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) ClientCancelRetrievalDeal(p0 context.Context, p1 retrievalmarket.DealID) error { + if s.Internal.ClientCancelRetrievalDeal == nil { + return ErrNotSupported + } return s.Internal.ClientCancelRetrievalDeal(p0, p1) } func (s *FullNodeStub) ClientCancelRetrievalDeal(p0 context.Context, p1 retrievalmarket.DealID) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) ClientDataTransferUpdates(p0 context.Context) (<-chan api.DataTransferChannel, error) { + if s.Internal.ClientDataTransferUpdates == nil { + return nil, ErrNotSupported + } return s.Internal.ClientDataTransferUpdates(p0) } func (s *FullNodeStub) ClientDataTransferUpdates(p0 context.Context) (<-chan api.DataTransferChannel, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ClientDealPieceCID(p0 context.Context, p1 cid.Cid) (api.DataCIDSize, error) { + if s.Internal.ClientDealPieceCID == nil { + return *new(api.DataCIDSize), ErrNotSupported + } return s.Internal.ClientDealPieceCID(p0, p1) } func (s *FullNodeStub) ClientDealPieceCID(p0 context.Context, p1 cid.Cid) (api.DataCIDSize, error) { - return *new(api.DataCIDSize), xerrors.New("method not supported") + return *new(api.DataCIDSize), ErrNotSupported } func (s *FullNodeStruct) ClientDealSize(p0 context.Context, p1 cid.Cid) (api.DataSize, error) { + if s.Internal.ClientDealSize == nil { + return *new(api.DataSize), ErrNotSupported + } return s.Internal.ClientDealSize(p0, p1) } func (s *FullNodeStub) ClientDealSize(p0 context.Context, p1 cid.Cid) (api.DataSize, error) { - return *new(api.DataSize), xerrors.New("method not supported") + return *new(api.DataSize), ErrNotSupported } func (s *FullNodeStruct) ClientFindData(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]api.QueryOffer, error) { + if s.Internal.ClientFindData == nil { + return *new([]api.QueryOffer), ErrNotSupported + } return s.Internal.ClientFindData(p0, p1, p2) } func (s *FullNodeStub) ClientFindData(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]api.QueryOffer, error) { - return *new([]api.QueryOffer), xerrors.New("method not supported") + return *new([]api.QueryOffer), ErrNotSupported } func (s *FullNodeStruct) ClientGenCar(p0 context.Context, p1 api.FileRef, p2 string) error { + if s.Internal.ClientGenCar == nil { + return ErrNotSupported + } return s.Internal.ClientGenCar(p0, p1, p2) } func (s *FullNodeStub) ClientGenCar(p0 context.Context, p1 api.FileRef, p2 string) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) ClientGetDealInfo(p0 context.Context, p1 cid.Cid) (*api.DealInfo, error) { + if s.Internal.ClientGetDealInfo == nil { + return nil, ErrNotSupported + } return s.Internal.ClientGetDealInfo(p0, p1) } func (s *FullNodeStub) ClientGetDealInfo(p0 context.Context, p1 cid.Cid) (*api.DealInfo, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ClientGetDealStatus(p0 context.Context, p1 uint64) (string, error) { + if s.Internal.ClientGetDealStatus == nil { + return "", ErrNotSupported + } return s.Internal.ClientGetDealStatus(p0, p1) } func (s *FullNodeStub) ClientGetDealStatus(p0 context.Context, p1 uint64) (string, error) { - return "", xerrors.New("method not supported") + return "", ErrNotSupported } func (s *FullNodeStruct) ClientGetDealUpdates(p0 context.Context) (<-chan api.DealInfo, error) { + if s.Internal.ClientGetDealUpdates == nil { + return nil, ErrNotSupported + } return s.Internal.ClientGetDealUpdates(p0) } func (s *FullNodeStub) ClientGetDealUpdates(p0 context.Context) (<-chan api.DealInfo, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ClientGetRetrievalUpdates(p0 context.Context) (<-chan api.RetrievalInfo, error) { + if s.Internal.ClientGetRetrievalUpdates == nil { + return nil, ErrNotSupported + } return s.Internal.ClientGetRetrievalUpdates(p0) } func (s *FullNodeStub) ClientGetRetrievalUpdates(p0 context.Context) (<-chan api.RetrievalInfo, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ClientHasLocal(p0 context.Context, p1 cid.Cid) (bool, error) { + if s.Internal.ClientHasLocal == nil { + return false, ErrNotSupported + } return s.Internal.ClientHasLocal(p0, p1) } func (s *FullNodeStub) ClientHasLocal(p0 context.Context, p1 cid.Cid) (bool, error) { - return false, xerrors.New("method not supported") + return false, ErrNotSupported } func (s *FullNodeStruct) ClientImport(p0 context.Context, p1 api.FileRef) (*api.ImportRes, error) { + if s.Internal.ClientImport == nil { + return nil, ErrNotSupported + } return s.Internal.ClientImport(p0, p1) } func (s *FullNodeStub) ClientImport(p0 context.Context, p1 api.FileRef) (*api.ImportRes, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ClientListDataTransfers(p0 context.Context) ([]api.DataTransferChannel, error) { + if s.Internal.ClientListDataTransfers == nil { + return *new([]api.DataTransferChannel), ErrNotSupported + } return s.Internal.ClientListDataTransfers(p0) } func (s *FullNodeStub) ClientListDataTransfers(p0 context.Context) ([]api.DataTransferChannel, error) { - return *new([]api.DataTransferChannel), xerrors.New("method not supported") + return *new([]api.DataTransferChannel), ErrNotSupported } func (s *FullNodeStruct) ClientListDeals(p0 context.Context) ([]api.DealInfo, error) { + if s.Internal.ClientListDeals == nil { + return *new([]api.DealInfo), ErrNotSupported + } return s.Internal.ClientListDeals(p0) } func (s *FullNodeStub) ClientListDeals(p0 context.Context) ([]api.DealInfo, error) { - return *new([]api.DealInfo), xerrors.New("method not supported") + return *new([]api.DealInfo), ErrNotSupported } func (s *FullNodeStruct) ClientListImports(p0 context.Context) ([]api.Import, error) { + if s.Internal.ClientListImports == nil { + return *new([]api.Import), ErrNotSupported + } return s.Internal.ClientListImports(p0) } func (s *FullNodeStub) ClientListImports(p0 context.Context) ([]api.Import, error) { - return *new([]api.Import), xerrors.New("method not supported") + return *new([]api.Import), ErrNotSupported } func (s *FullNodeStruct) ClientListRetrievals(p0 context.Context) ([]api.RetrievalInfo, error) { + if s.Internal.ClientListRetrievals == nil { + return *new([]api.RetrievalInfo), ErrNotSupported + } return s.Internal.ClientListRetrievals(p0) } func (s *FullNodeStub) ClientListRetrievals(p0 context.Context) ([]api.RetrievalInfo, error) { - return *new([]api.RetrievalInfo), xerrors.New("method not supported") + return *new([]api.RetrievalInfo), ErrNotSupported } func (s *FullNodeStruct) ClientMinerQueryOffer(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (api.QueryOffer, error) { + if s.Internal.ClientMinerQueryOffer == nil { + return *new(api.QueryOffer), ErrNotSupported + } return s.Internal.ClientMinerQueryOffer(p0, p1, p2, p3) } func (s *FullNodeStub) ClientMinerQueryOffer(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (api.QueryOffer, error) { - return *new(api.QueryOffer), xerrors.New("method not supported") + return *new(api.QueryOffer), ErrNotSupported } func (s *FullNodeStruct) ClientQueryAsk(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) { + if s.Internal.ClientQueryAsk == nil { + return nil, ErrNotSupported + } return s.Internal.ClientQueryAsk(p0, p1, p2) } func (s *FullNodeStub) ClientQueryAsk(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ClientRemoveImport(p0 context.Context, p1 multistore.StoreID) error { + if s.Internal.ClientRemoveImport == nil { + return ErrNotSupported + } return s.Internal.ClientRemoveImport(p0, p1) } func (s *FullNodeStub) ClientRemoveImport(p0 context.Context, p1 multistore.StoreID) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) ClientRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { + if s.Internal.ClientRestartDataTransfer == nil { + return ErrNotSupported + } return s.Internal.ClientRestartDataTransfer(p0, p1, p2, p3) } func (s *FullNodeStub) ClientRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) ClientRetrieve(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) error { + if s.Internal.ClientRetrieve == nil { + return ErrNotSupported + } return s.Internal.ClientRetrieve(p0, p1, p2) } func (s *FullNodeStub) ClientRetrieve(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) ClientRetrieveTryRestartInsufficientFunds(p0 context.Context, p1 address.Address) error { + if s.Internal.ClientRetrieveTryRestartInsufficientFunds == nil { + return ErrNotSupported + } return s.Internal.ClientRetrieveTryRestartInsufficientFunds(p0, p1) } func (s *FullNodeStub) ClientRetrieveTryRestartInsufficientFunds(p0 context.Context, p1 address.Address) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) ClientRetrieveWithEvents(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { + if s.Internal.ClientRetrieveWithEvents == nil { + return nil, ErrNotSupported + } return s.Internal.ClientRetrieveWithEvents(p0, p1, p2) } func (s *FullNodeStub) ClientRetrieveWithEvents(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ClientStartDeal(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) { + if s.Internal.ClientStartDeal == nil { + return nil, ErrNotSupported + } return s.Internal.ClientStartDeal(p0, p1) } func (s *FullNodeStub) ClientStartDeal(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) ClientStatelessDeal(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) { + if s.Internal.ClientStatelessDeal == nil { + return nil, ErrNotSupported + } return s.Internal.ClientStatelessDeal(p0, p1) } func (s *FullNodeStub) ClientStatelessDeal(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) CreateBackup(p0 context.Context, p1 string) error { + if s.Internal.CreateBackup == nil { + return ErrNotSupported + } return s.Internal.CreateBackup(p0, p1) } func (s *FullNodeStub) CreateBackup(p0 context.Context, p1 string) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) GasEstimateFeeCap(p0 context.Context, p1 *types.Message, p2 int64, p3 types.TipSetKey) (types.BigInt, error) { + if s.Internal.GasEstimateFeeCap == nil { + return *new(types.BigInt), ErrNotSupported + } return s.Internal.GasEstimateFeeCap(p0, p1, p2, p3) } func (s *FullNodeStub) GasEstimateFeeCap(p0 context.Context, p1 *types.Message, p2 int64, p3 types.TipSetKey) (types.BigInt, error) { - return *new(types.BigInt), xerrors.New("method not supported") + return *new(types.BigInt), ErrNotSupported } func (s *FullNodeStruct) GasEstimateGasLimit(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (int64, error) { + if s.Internal.GasEstimateGasLimit == nil { + return 0, ErrNotSupported + } return s.Internal.GasEstimateGasLimit(p0, p1, p2) } func (s *FullNodeStub) GasEstimateGasLimit(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (int64, error) { - return 0, xerrors.New("method not supported") + return 0, ErrNotSupported } func (s *FullNodeStruct) GasEstimateGasPremium(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) { + if s.Internal.GasEstimateGasPremium == nil { + return *new(types.BigInt), ErrNotSupported + } return s.Internal.GasEstimateGasPremium(p0, p1, p2, p3, p4) } func (s *FullNodeStub) GasEstimateGasPremium(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) { - return *new(types.BigInt), xerrors.New("method not supported") + return *new(types.BigInt), ErrNotSupported } func (s *FullNodeStruct) GasEstimateMessageGas(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) { + if s.Internal.GasEstimateMessageGas == nil { + return nil, ErrNotSupported + } return s.Internal.GasEstimateMessageGas(p0, p1, p2, p3) } func (s *FullNodeStub) GasEstimateMessageGas(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) MarketAddBalance(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) { + if s.Internal.MarketAddBalance == nil { + return *new(cid.Cid), ErrNotSupported + } return s.Internal.MarketAddBalance(p0, p1, p2, p3) } func (s *FullNodeStub) MarketAddBalance(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") + return *new(cid.Cid), ErrNotSupported } func (s *FullNodeStruct) MarketGetReserved(p0 context.Context, p1 address.Address) (types.BigInt, error) { + if s.Internal.MarketGetReserved == nil { + return *new(types.BigInt), ErrNotSupported + } return s.Internal.MarketGetReserved(p0, p1) } func (s *FullNodeStub) MarketGetReserved(p0 context.Context, p1 address.Address) (types.BigInt, error) { - return *new(types.BigInt), xerrors.New("method not supported") + return *new(types.BigInt), ErrNotSupported } func (s *FullNodeStruct) MarketReleaseFunds(p0 context.Context, p1 address.Address, p2 types.BigInt) error { + if s.Internal.MarketReleaseFunds == nil { + return ErrNotSupported + } return s.Internal.MarketReleaseFunds(p0, p1, p2) } func (s *FullNodeStub) MarketReleaseFunds(p0 context.Context, p1 address.Address, p2 types.BigInt) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) MarketReserveFunds(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) { + if s.Internal.MarketReserveFunds == nil { + return *new(cid.Cid), ErrNotSupported + } return s.Internal.MarketReserveFunds(p0, p1, p2, p3) } func (s *FullNodeStub) MarketReserveFunds(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") + return *new(cid.Cid), ErrNotSupported } func (s *FullNodeStruct) MarketWithdraw(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) { + if s.Internal.MarketWithdraw == nil { + return *new(cid.Cid), ErrNotSupported + } return s.Internal.MarketWithdraw(p0, p1, p2, p3) } func (s *FullNodeStub) MarketWithdraw(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") + return *new(cid.Cid), ErrNotSupported } func (s *FullNodeStruct) MinerCreateBlock(p0 context.Context, p1 *api.BlockTemplate) (*types.BlockMsg, error) { + if s.Internal.MinerCreateBlock == nil { + return nil, ErrNotSupported + } return s.Internal.MinerCreateBlock(p0, p1) } func (s *FullNodeStub) MinerCreateBlock(p0 context.Context, p1 *api.BlockTemplate) (*types.BlockMsg, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) MinerGetBaseInfo(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*api.MiningBaseInfo, error) { + if s.Internal.MinerGetBaseInfo == nil { + return nil, ErrNotSupported + } return s.Internal.MinerGetBaseInfo(p0, p1, p2, p3) } func (s *FullNodeStub) MinerGetBaseInfo(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*api.MiningBaseInfo, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) MpoolBatchPush(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) { + if s.Internal.MpoolBatchPush == nil { + return *new([]cid.Cid), ErrNotSupported + } return s.Internal.MpoolBatchPush(p0, p1) } func (s *FullNodeStub) MpoolBatchPush(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) { - return *new([]cid.Cid), xerrors.New("method not supported") + return *new([]cid.Cid), ErrNotSupported } func (s *FullNodeStruct) MpoolBatchPushMessage(p0 context.Context, p1 []*types.Message, p2 *api.MessageSendSpec) ([]*types.SignedMessage, error) { + if s.Internal.MpoolBatchPushMessage == nil { + return *new([]*types.SignedMessage), ErrNotSupported + } return s.Internal.MpoolBatchPushMessage(p0, p1, p2) } func (s *FullNodeStub) MpoolBatchPushMessage(p0 context.Context, p1 []*types.Message, p2 *api.MessageSendSpec) ([]*types.SignedMessage, error) { - return *new([]*types.SignedMessage), xerrors.New("method not supported") + return *new([]*types.SignedMessage), ErrNotSupported } func (s *FullNodeStruct) MpoolBatchPushUntrusted(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) { + if s.Internal.MpoolBatchPushUntrusted == nil { + return *new([]cid.Cid), ErrNotSupported + } return s.Internal.MpoolBatchPushUntrusted(p0, p1) } func (s *FullNodeStub) MpoolBatchPushUntrusted(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) { - return *new([]cid.Cid), xerrors.New("method not supported") + return *new([]cid.Cid), ErrNotSupported } func (s *FullNodeStruct) MpoolClear(p0 context.Context, p1 bool) error { + if s.Internal.MpoolClear == nil { + return ErrNotSupported + } return s.Internal.MpoolClear(p0, p1) } func (s *FullNodeStub) MpoolClear(p0 context.Context, p1 bool) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) MpoolGetConfig(p0 context.Context) (*types.MpoolConfig, error) { + if s.Internal.MpoolGetConfig == nil { + return nil, ErrNotSupported + } return s.Internal.MpoolGetConfig(p0) } func (s *FullNodeStub) MpoolGetConfig(p0 context.Context) (*types.MpoolConfig, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) MpoolGetNonce(p0 context.Context, p1 address.Address) (uint64, error) { + if s.Internal.MpoolGetNonce == nil { + return 0, ErrNotSupported + } return s.Internal.MpoolGetNonce(p0, p1) } func (s *FullNodeStub) MpoolGetNonce(p0 context.Context, p1 address.Address) (uint64, error) { - return 0, xerrors.New("method not supported") + return 0, ErrNotSupported } func (s *FullNodeStruct) MpoolPending(p0 context.Context, p1 types.TipSetKey) ([]*types.SignedMessage, error) { + if s.Internal.MpoolPending == nil { + return *new([]*types.SignedMessage), ErrNotSupported + } return s.Internal.MpoolPending(p0, p1) } func (s *FullNodeStub) MpoolPending(p0 context.Context, p1 types.TipSetKey) ([]*types.SignedMessage, error) { - return *new([]*types.SignedMessage), xerrors.New("method not supported") + return *new([]*types.SignedMessage), ErrNotSupported } func (s *FullNodeStruct) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) { + if s.Internal.MpoolPush == nil { + return *new(cid.Cid), ErrNotSupported + } return s.Internal.MpoolPush(p0, p1) } func (s *FullNodeStub) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") + return *new(cid.Cid), ErrNotSupported } func (s *FullNodeStruct) MpoolPushMessage(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec) (*types.SignedMessage, error) { + if s.Internal.MpoolPushMessage == nil { + return nil, ErrNotSupported + } return s.Internal.MpoolPushMessage(p0, p1, p2) } func (s *FullNodeStub) MpoolPushMessage(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec) (*types.SignedMessage, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) MpoolPushUntrusted(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) { + if s.Internal.MpoolPushUntrusted == nil { + return *new(cid.Cid), ErrNotSupported + } return s.Internal.MpoolPushUntrusted(p0, p1) } func (s *FullNodeStub) MpoolPushUntrusted(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") + return *new(cid.Cid), ErrNotSupported } func (s *FullNodeStruct) MpoolSelect(p0 context.Context, p1 types.TipSetKey, p2 float64) ([]*types.SignedMessage, error) { + if s.Internal.MpoolSelect == nil { + return *new([]*types.SignedMessage), ErrNotSupported + } return s.Internal.MpoolSelect(p0, p1, p2) } func (s *FullNodeStub) MpoolSelect(p0 context.Context, p1 types.TipSetKey, p2 float64) ([]*types.SignedMessage, error) { - return *new([]*types.SignedMessage), xerrors.New("method not supported") + return *new([]*types.SignedMessage), ErrNotSupported } func (s *FullNodeStruct) MpoolSetConfig(p0 context.Context, p1 *types.MpoolConfig) error { + if s.Internal.MpoolSetConfig == nil { + return ErrNotSupported + } return s.Internal.MpoolSetConfig(p0, p1) } func (s *FullNodeStub) MpoolSetConfig(p0 context.Context, p1 *types.MpoolConfig) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) MpoolSub(p0 context.Context) (<-chan api.MpoolUpdate, error) { + if s.Internal.MpoolSub == nil { + return nil, ErrNotSupported + } return s.Internal.MpoolSub(p0) } func (s *FullNodeStub) MpoolSub(p0 context.Context) (<-chan api.MpoolUpdate, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) MsigAddApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 bool) (cid.Cid, error) { + if s.Internal.MsigAddApprove == nil { + return *new(cid.Cid), ErrNotSupported + } return s.Internal.MsigAddApprove(p0, p1, p2, p3, p4, p5, p6) } func (s *FullNodeStub) MsigAddApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 bool) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") + return *new(cid.Cid), ErrNotSupported } func (s *FullNodeStruct) MsigAddCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 bool) (cid.Cid, error) { + if s.Internal.MsigAddCancel == nil { + return *new(cid.Cid), ErrNotSupported + } return s.Internal.MsigAddCancel(p0, p1, p2, p3, p4, p5) } func (s *FullNodeStub) MsigAddCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 bool) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") + return *new(cid.Cid), ErrNotSupported } func (s *FullNodeStruct) MsigAddPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (cid.Cid, error) { + if s.Internal.MsigAddPropose == nil { + return *new(cid.Cid), ErrNotSupported + } return s.Internal.MsigAddPropose(p0, p1, p2, p3, p4) } func (s *FullNodeStub) MsigAddPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") + return *new(cid.Cid), ErrNotSupported } func (s *FullNodeStruct) MsigApprove(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (cid.Cid, error) { + if s.Internal.MsigApprove == nil { + return *new(cid.Cid), ErrNotSupported + } return s.Internal.MsigApprove(p0, p1, p2, p3) } func (s *FullNodeStub) MsigApprove(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") + return *new(cid.Cid), ErrNotSupported } func (s *FullNodeStruct) MsigApproveTxnHash(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (cid.Cid, error) { + if s.Internal.MsigApproveTxnHash == nil { + return *new(cid.Cid), ErrNotSupported + } return s.Internal.MsigApproveTxnHash(p0, p1, p2, p3, p4, p5, p6, p7, p8) } func (s *FullNodeStub) MsigApproveTxnHash(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") + return *new(cid.Cid), ErrNotSupported } func (s *FullNodeStruct) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (cid.Cid, error) { + if s.Internal.MsigCancel == nil { + return *new(cid.Cid), ErrNotSupported + } return s.Internal.MsigCancel(p0, p1, p2, p3, p4, p5, p6, p7) } func (s *FullNodeStub) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") + return *new(cid.Cid), ErrNotSupported } func (s *FullNodeStruct) MsigCreate(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (cid.Cid, error) { + if s.Internal.MsigCreate == nil { + return *new(cid.Cid), ErrNotSupported + } return s.Internal.MsigCreate(p0, p1, p2, p3, p4, p5, p6) } func (s *FullNodeStub) MsigCreate(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") + return *new(cid.Cid), ErrNotSupported } func (s *FullNodeStruct) MsigGetAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) { + if s.Internal.MsigGetAvailableBalance == nil { + return *new(types.BigInt), ErrNotSupported + } return s.Internal.MsigGetAvailableBalance(p0, p1, p2) } func (s *FullNodeStub) MsigGetAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) { - return *new(types.BigInt), xerrors.New("method not supported") + return *new(types.BigInt), ErrNotSupported } func (s *FullNodeStruct) MsigGetPending(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*api.MsigTransaction, error) { + if s.Internal.MsigGetPending == nil { + return *new([]*api.MsigTransaction), ErrNotSupported + } return s.Internal.MsigGetPending(p0, p1, p2) } func (s *FullNodeStub) MsigGetPending(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*api.MsigTransaction, error) { - return *new([]*api.MsigTransaction), xerrors.New("method not supported") + return *new([]*api.MsigTransaction), ErrNotSupported } func (s *FullNodeStruct) MsigGetVested(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) { + if s.Internal.MsigGetVested == nil { + return *new(types.BigInt), ErrNotSupported + } return s.Internal.MsigGetVested(p0, p1, p2, p3) } func (s *FullNodeStub) MsigGetVested(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) { - return *new(types.BigInt), xerrors.New("method not supported") + return *new(types.BigInt), ErrNotSupported } func (s *FullNodeStruct) MsigGetVestingSchedule(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MsigVesting, error) { + if s.Internal.MsigGetVestingSchedule == nil { + return *new(api.MsigVesting), ErrNotSupported + } return s.Internal.MsigGetVestingSchedule(p0, p1, p2) } func (s *FullNodeStub) MsigGetVestingSchedule(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MsigVesting, error) { - return *new(api.MsigVesting), xerrors.New("method not supported") + return *new(api.MsigVesting), ErrNotSupported } func (s *FullNodeStruct) MsigPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 address.Address, p5 uint64, p6 []byte) (cid.Cid, error) { + if s.Internal.MsigPropose == nil { + return *new(cid.Cid), ErrNotSupported + } return s.Internal.MsigPropose(p0, p1, p2, p3, p4, p5, p6) } func (s *FullNodeStub) MsigPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 address.Address, p5 uint64, p6 []byte) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") + return *new(cid.Cid), ErrNotSupported } func (s *FullNodeStruct) MsigRemoveSigner(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (cid.Cid, error) { + if s.Internal.MsigRemoveSigner == nil { + return *new(cid.Cid), ErrNotSupported + } return s.Internal.MsigRemoveSigner(p0, p1, p2, p3, p4) } func (s *FullNodeStub) MsigRemoveSigner(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") + return *new(cid.Cid), ErrNotSupported } func (s *FullNodeStruct) MsigSwapApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 address.Address) (cid.Cid, error) { + if s.Internal.MsigSwapApprove == nil { + return *new(cid.Cid), ErrNotSupported + } return s.Internal.MsigSwapApprove(p0, p1, p2, p3, p4, p5, p6) } func (s *FullNodeStub) MsigSwapApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 address.Address) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") + return *new(cid.Cid), ErrNotSupported } func (s *FullNodeStruct) MsigSwapCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address) (cid.Cid, error) { + if s.Internal.MsigSwapCancel == nil { + return *new(cid.Cid), ErrNotSupported + } return s.Internal.MsigSwapCancel(p0, p1, p2, p3, p4, p5) } func (s *FullNodeStub) MsigSwapCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") + return *new(cid.Cid), ErrNotSupported } func (s *FullNodeStruct) MsigSwapPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 address.Address) (cid.Cid, error) { + if s.Internal.MsigSwapPropose == nil { + return *new(cid.Cid), ErrNotSupported + } return s.Internal.MsigSwapPropose(p0, p1, p2, p3, p4) } func (s *FullNodeStub) MsigSwapPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 address.Address) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") + return *new(cid.Cid), ErrNotSupported } func (s *FullNodeStruct) PaychAllocateLane(p0 context.Context, p1 address.Address) (uint64, error) { + if s.Internal.PaychAllocateLane == nil { + return 0, ErrNotSupported + } return s.Internal.PaychAllocateLane(p0, p1) } func (s *FullNodeStub) PaychAllocateLane(p0 context.Context, p1 address.Address) (uint64, error) { - return 0, xerrors.New("method not supported") + return 0, ErrNotSupported } func (s *FullNodeStruct) PaychAvailableFunds(p0 context.Context, p1 address.Address) (*api.ChannelAvailableFunds, error) { + if s.Internal.PaychAvailableFunds == nil { + return nil, ErrNotSupported + } return s.Internal.PaychAvailableFunds(p0, p1) } func (s *FullNodeStub) PaychAvailableFunds(p0 context.Context, p1 address.Address) (*api.ChannelAvailableFunds, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) PaychAvailableFundsByFromTo(p0 context.Context, p1 address.Address, p2 address.Address) (*api.ChannelAvailableFunds, error) { + if s.Internal.PaychAvailableFundsByFromTo == nil { + return nil, ErrNotSupported + } return s.Internal.PaychAvailableFundsByFromTo(p0, p1, p2) } func (s *FullNodeStub) PaychAvailableFundsByFromTo(p0 context.Context, p1 address.Address, p2 address.Address) (*api.ChannelAvailableFunds, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) PaychCollect(p0 context.Context, p1 address.Address) (cid.Cid, error) { + if s.Internal.PaychCollect == nil { + return *new(cid.Cid), ErrNotSupported + } return s.Internal.PaychCollect(p0, p1) } func (s *FullNodeStub) PaychCollect(p0 context.Context, p1 address.Address) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") + return *new(cid.Cid), ErrNotSupported } func (s *FullNodeStruct) PaychGet(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (*api.ChannelInfo, error) { + if s.Internal.PaychGet == nil { + return nil, ErrNotSupported + } return s.Internal.PaychGet(p0, p1, p2, p3) } func (s *FullNodeStub) PaychGet(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (*api.ChannelInfo, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) PaychGetWaitReady(p0 context.Context, p1 cid.Cid) (address.Address, error) { + if s.Internal.PaychGetWaitReady == nil { + return *new(address.Address), ErrNotSupported + } return s.Internal.PaychGetWaitReady(p0, p1) } func (s *FullNodeStub) PaychGetWaitReady(p0 context.Context, p1 cid.Cid) (address.Address, error) { - return *new(address.Address), xerrors.New("method not supported") + return *new(address.Address), ErrNotSupported } func (s *FullNodeStruct) PaychList(p0 context.Context) ([]address.Address, error) { + if s.Internal.PaychList == nil { + return *new([]address.Address), ErrNotSupported + } return s.Internal.PaychList(p0) } func (s *FullNodeStub) PaychList(p0 context.Context) ([]address.Address, error) { - return *new([]address.Address), xerrors.New("method not supported") + return *new([]address.Address), ErrNotSupported } func (s *FullNodeStruct) PaychNewPayment(p0 context.Context, p1 address.Address, p2 address.Address, p3 []api.VoucherSpec) (*api.PaymentInfo, error) { + if s.Internal.PaychNewPayment == nil { + return nil, ErrNotSupported + } return s.Internal.PaychNewPayment(p0, p1, p2, p3) } func (s *FullNodeStub) PaychNewPayment(p0 context.Context, p1 address.Address, p2 address.Address, p3 []api.VoucherSpec) (*api.PaymentInfo, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) PaychSettle(p0 context.Context, p1 address.Address) (cid.Cid, error) { + if s.Internal.PaychSettle == nil { + return *new(cid.Cid), ErrNotSupported + } return s.Internal.PaychSettle(p0, p1) } func (s *FullNodeStub) PaychSettle(p0 context.Context, p1 address.Address) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") + return *new(cid.Cid), ErrNotSupported } func (s *FullNodeStruct) PaychStatus(p0 context.Context, p1 address.Address) (*api.PaychStatus, error) { + if s.Internal.PaychStatus == nil { + return nil, ErrNotSupported + } return s.Internal.PaychStatus(p0, p1) } func (s *FullNodeStub) PaychStatus(p0 context.Context, p1 address.Address) (*api.PaychStatus, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) PaychVoucherAdd(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 types.BigInt) (types.BigInt, error) { + if s.Internal.PaychVoucherAdd == nil { + return *new(types.BigInt), ErrNotSupported + } return s.Internal.PaychVoucherAdd(p0, p1, p2, p3, p4) } func (s *FullNodeStub) PaychVoucherAdd(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 types.BigInt) (types.BigInt, error) { - return *new(types.BigInt), xerrors.New("method not supported") + return *new(types.BigInt), ErrNotSupported } func (s *FullNodeStruct) PaychVoucherCheckSpendable(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (bool, error) { + if s.Internal.PaychVoucherCheckSpendable == nil { + return false, ErrNotSupported + } return s.Internal.PaychVoucherCheckSpendable(p0, p1, p2, p3, p4) } func (s *FullNodeStub) PaychVoucherCheckSpendable(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (bool, error) { - return false, xerrors.New("method not supported") + return false, ErrNotSupported } func (s *FullNodeStruct) PaychVoucherCheckValid(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher) error { + if s.Internal.PaychVoucherCheckValid == nil { + return ErrNotSupported + } return s.Internal.PaychVoucherCheckValid(p0, p1, p2) } func (s *FullNodeStub) PaychVoucherCheckValid(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) PaychVoucherCreate(p0 context.Context, p1 address.Address, p2 types.BigInt, p3 uint64) (*api.VoucherCreateResult, error) { + if s.Internal.PaychVoucherCreate == nil { + return nil, ErrNotSupported + } return s.Internal.PaychVoucherCreate(p0, p1, p2, p3) } func (s *FullNodeStub) PaychVoucherCreate(p0 context.Context, p1 address.Address, p2 types.BigInt, p3 uint64) (*api.VoucherCreateResult, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) PaychVoucherList(p0 context.Context, p1 address.Address) ([]*paych.SignedVoucher, error) { + if s.Internal.PaychVoucherList == nil { + return *new([]*paych.SignedVoucher), ErrNotSupported + } return s.Internal.PaychVoucherList(p0, p1) } func (s *FullNodeStub) PaychVoucherList(p0 context.Context, p1 address.Address) ([]*paych.SignedVoucher, error) { - return *new([]*paych.SignedVoucher), xerrors.New("method not supported") + return *new([]*paych.SignedVoucher), ErrNotSupported } func (s *FullNodeStruct) PaychVoucherSubmit(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (cid.Cid, error) { + if s.Internal.PaychVoucherSubmit == nil { + return *new(cid.Cid), ErrNotSupported + } return s.Internal.PaychVoucherSubmit(p0, p1, p2, p3, p4) } func (s *FullNodeStub) PaychVoucherSubmit(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") + return *new(cid.Cid), ErrNotSupported } func (s *FullNodeStruct) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { + if s.Internal.StateAccountKey == nil { + return *new(address.Address), ErrNotSupported + } return s.Internal.StateAccountKey(p0, p1, p2) } func (s *FullNodeStub) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { - return *new(address.Address), xerrors.New("method not supported") + return *new(address.Address), ErrNotSupported } func (s *FullNodeStruct) StateAllMinerFaults(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) ([]*api.Fault, error) { + if s.Internal.StateAllMinerFaults == nil { + return *new([]*api.Fault), ErrNotSupported + } return s.Internal.StateAllMinerFaults(p0, p1, p2) } func (s *FullNodeStub) StateAllMinerFaults(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) ([]*api.Fault, error) { - return *new([]*api.Fault), xerrors.New("method not supported") + return *new([]*api.Fault), ErrNotSupported } func (s *FullNodeStruct) StateCall(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*api.InvocResult, error) { + if s.Internal.StateCall == nil { + return nil, ErrNotSupported + } return s.Internal.StateCall(p0, p1, p2) } func (s *FullNodeStub) StateCall(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*api.InvocResult, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) StateChangedActors(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (map[string]types.Actor, error) { + if s.Internal.StateChangedActors == nil { + return *new(map[string]types.Actor), ErrNotSupported + } return s.Internal.StateChangedActors(p0, p1, p2) } func (s *FullNodeStub) StateChangedActors(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (map[string]types.Actor, error) { - return *new(map[string]types.Actor), xerrors.New("method not supported") + return *new(map[string]types.Actor), ErrNotSupported } func (s *FullNodeStruct) StateCirculatingSupply(p0 context.Context, p1 types.TipSetKey) (abi.TokenAmount, error) { + if s.Internal.StateCirculatingSupply == nil { + return *new(abi.TokenAmount), ErrNotSupported + } return s.Internal.StateCirculatingSupply(p0, p1) } func (s *FullNodeStub) StateCirculatingSupply(p0 context.Context, p1 types.TipSetKey) (abi.TokenAmount, error) { - return *new(abi.TokenAmount), xerrors.New("method not supported") + return *new(abi.TokenAmount), ErrNotSupported } func (s *FullNodeStruct) StateCompute(p0 context.Context, p1 abi.ChainEpoch, p2 []*types.Message, p3 types.TipSetKey) (*api.ComputeStateOutput, error) { + if s.Internal.StateCompute == nil { + return nil, ErrNotSupported + } return s.Internal.StateCompute(p0, p1, p2, p3) } func (s *FullNodeStub) StateCompute(p0 context.Context, p1 abi.ChainEpoch, p2 []*types.Message, p3 types.TipSetKey) (*api.ComputeStateOutput, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) StateDealProviderCollateralBounds(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (api.DealCollateralBounds, error) { + if s.Internal.StateDealProviderCollateralBounds == nil { + return *new(api.DealCollateralBounds), ErrNotSupported + } return s.Internal.StateDealProviderCollateralBounds(p0, p1, p2, p3) } func (s *FullNodeStub) StateDealProviderCollateralBounds(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (api.DealCollateralBounds, error) { - return *new(api.DealCollateralBounds), xerrors.New("method not supported") + return *new(api.DealCollateralBounds), ErrNotSupported } func (s *FullNodeStruct) StateDecodeParams(p0 context.Context, p1 address.Address, p2 abi.MethodNum, p3 []byte, p4 types.TipSetKey) (interface{}, error) { + if s.Internal.StateDecodeParams == nil { + return nil, ErrNotSupported + } return s.Internal.StateDecodeParams(p0, p1, p2, p3, p4) } func (s *FullNodeStub) StateDecodeParams(p0 context.Context, p1 address.Address, p2 abi.MethodNum, p3 []byte, p4 types.TipSetKey) (interface{}, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) StateGetActor(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) { + if s.Internal.StateGetActor == nil { + return nil, ErrNotSupported + } return s.Internal.StateGetActor(p0, p1, p2) } func (s *FullNodeStub) StateGetActor(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) StateGetReceipt(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) { + if s.Internal.StateGetReceipt == nil { + return nil, ErrNotSupported + } return s.Internal.StateGetReceipt(p0, p1, p2) } func (s *FullNodeStub) StateGetReceipt(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) StateListActors(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) { + if s.Internal.StateListActors == nil { + return *new([]address.Address), ErrNotSupported + } return s.Internal.StateListActors(p0, p1) } func (s *FullNodeStub) StateListActors(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) { - return *new([]address.Address), xerrors.New("method not supported") + return *new([]address.Address), ErrNotSupported } func (s *FullNodeStruct) StateListMessages(p0 context.Context, p1 *api.MessageMatch, p2 types.TipSetKey, p3 abi.ChainEpoch) ([]cid.Cid, error) { + if s.Internal.StateListMessages == nil { + return *new([]cid.Cid), ErrNotSupported + } return s.Internal.StateListMessages(p0, p1, p2, p3) } func (s *FullNodeStub) StateListMessages(p0 context.Context, p1 *api.MessageMatch, p2 types.TipSetKey, p3 abi.ChainEpoch) ([]cid.Cid, error) { - return *new([]cid.Cid), xerrors.New("method not supported") + return *new([]cid.Cid), ErrNotSupported } func (s *FullNodeStruct) StateListMiners(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) { + if s.Internal.StateListMiners == nil { + return *new([]address.Address), ErrNotSupported + } return s.Internal.StateListMiners(p0, p1) } func (s *FullNodeStub) StateListMiners(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) { - return *new([]address.Address), xerrors.New("method not supported") + return *new([]address.Address), ErrNotSupported } func (s *FullNodeStruct) StateLookupID(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { + if s.Internal.StateLookupID == nil { + return *new(address.Address), ErrNotSupported + } return s.Internal.StateLookupID(p0, p1, p2) } func (s *FullNodeStub) StateLookupID(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { - return *new(address.Address), xerrors.New("method not supported") + return *new(address.Address), ErrNotSupported } func (s *FullNodeStruct) StateMarketBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MarketBalance, error) { + if s.Internal.StateMarketBalance == nil { + return *new(api.MarketBalance), ErrNotSupported + } return s.Internal.StateMarketBalance(p0, p1, p2) } func (s *FullNodeStub) StateMarketBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MarketBalance, error) { - return *new(api.MarketBalance), xerrors.New("method not supported") + return *new(api.MarketBalance), ErrNotSupported } func (s *FullNodeStruct) StateMarketDeals(p0 context.Context, p1 types.TipSetKey) (map[string]api.MarketDeal, error) { + if s.Internal.StateMarketDeals == nil { + return *new(map[string]api.MarketDeal), ErrNotSupported + } return s.Internal.StateMarketDeals(p0, p1) } func (s *FullNodeStub) StateMarketDeals(p0 context.Context, p1 types.TipSetKey) (map[string]api.MarketDeal, error) { - return *new(map[string]api.MarketDeal), xerrors.New("method not supported") + return *new(map[string]api.MarketDeal), ErrNotSupported } func (s *FullNodeStruct) StateMarketParticipants(p0 context.Context, p1 types.TipSetKey) (map[string]api.MarketBalance, error) { + if s.Internal.StateMarketParticipants == nil { + return *new(map[string]api.MarketBalance), ErrNotSupported + } return s.Internal.StateMarketParticipants(p0, p1) } func (s *FullNodeStub) StateMarketParticipants(p0 context.Context, p1 types.TipSetKey) (map[string]api.MarketBalance, error) { - return *new(map[string]api.MarketBalance), xerrors.New("method not supported") + return *new(map[string]api.MarketBalance), ErrNotSupported } func (s *FullNodeStruct) StateMarketStorageDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*api.MarketDeal, error) { + if s.Internal.StateMarketStorageDeal == nil { + return nil, ErrNotSupported + } return s.Internal.StateMarketStorageDeal(p0, p1, p2) } func (s *FullNodeStub) StateMarketStorageDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*api.MarketDeal, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) StateMinerActiveSectors(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { + if s.Internal.StateMinerActiveSectors == nil { + return *new([]*miner.SectorOnChainInfo), ErrNotSupported + } return s.Internal.StateMinerActiveSectors(p0, p1, p2) } func (s *FullNodeStub) StateMinerActiveSectors(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { - return *new([]*miner.SectorOnChainInfo), xerrors.New("method not supported") + return *new([]*miner.SectorOnChainInfo), ErrNotSupported } func (s *FullNodeStruct) StateMinerAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) { + if s.Internal.StateMinerAvailableBalance == nil { + return *new(types.BigInt), ErrNotSupported + } return s.Internal.StateMinerAvailableBalance(p0, p1, p2) } func (s *FullNodeStub) StateMinerAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) { - return *new(types.BigInt), xerrors.New("method not supported") + return *new(types.BigInt), ErrNotSupported } func (s *FullNodeStruct) StateMinerDeadlines(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]api.Deadline, error) { + if s.Internal.StateMinerDeadlines == nil { + return *new([]api.Deadline), ErrNotSupported + } return s.Internal.StateMinerDeadlines(p0, p1, p2) } func (s *FullNodeStub) StateMinerDeadlines(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]api.Deadline, error) { - return *new([]api.Deadline), xerrors.New("method not supported") + return *new([]api.Deadline), ErrNotSupported } func (s *FullNodeStruct) StateMinerFaults(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) { + if s.Internal.StateMinerFaults == nil { + return *new(bitfield.BitField), ErrNotSupported + } return s.Internal.StateMinerFaults(p0, p1, p2) } func (s *FullNodeStub) StateMinerFaults(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) { - return *new(bitfield.BitField), xerrors.New("method not supported") + return *new(bitfield.BitField), ErrNotSupported } func (s *FullNodeStruct) StateMinerInfo(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (miner.MinerInfo, error) { + if s.Internal.StateMinerInfo == nil { + return *new(miner.MinerInfo), ErrNotSupported + } return s.Internal.StateMinerInfo(p0, p1, p2) } func (s *FullNodeStub) StateMinerInfo(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (miner.MinerInfo, error) { - return *new(miner.MinerInfo), xerrors.New("method not supported") + return *new(miner.MinerInfo), ErrNotSupported } func (s *FullNodeStruct) StateMinerInitialPledgeCollateral(p0 context.Context, p1 address.Address, p2 miner.SectorPreCommitInfo, p3 types.TipSetKey) (types.BigInt, error) { + if s.Internal.StateMinerInitialPledgeCollateral == nil { + return *new(types.BigInt), ErrNotSupported + } return s.Internal.StateMinerInitialPledgeCollateral(p0, p1, p2, p3) } func (s *FullNodeStub) StateMinerInitialPledgeCollateral(p0 context.Context, p1 address.Address, p2 miner.SectorPreCommitInfo, p3 types.TipSetKey) (types.BigInt, error) { - return *new(types.BigInt), xerrors.New("method not supported") + return *new(types.BigInt), ErrNotSupported } func (s *FullNodeStruct) StateMinerPartitions(p0 context.Context, p1 address.Address, p2 uint64, p3 types.TipSetKey) ([]api.Partition, error) { + if s.Internal.StateMinerPartitions == nil { + return *new([]api.Partition), ErrNotSupported + } return s.Internal.StateMinerPartitions(p0, p1, p2, p3) } func (s *FullNodeStub) StateMinerPartitions(p0 context.Context, p1 address.Address, p2 uint64, p3 types.TipSetKey) ([]api.Partition, error) { - return *new([]api.Partition), xerrors.New("method not supported") + return *new([]api.Partition), ErrNotSupported } func (s *FullNodeStruct) StateMinerPower(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*api.MinerPower, error) { + if s.Internal.StateMinerPower == nil { + return nil, ErrNotSupported + } return s.Internal.StateMinerPower(p0, p1, p2) } func (s *FullNodeStub) StateMinerPower(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*api.MinerPower, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) StateMinerPreCommitDepositForPower(p0 context.Context, p1 address.Address, p2 miner.SectorPreCommitInfo, p3 types.TipSetKey) (types.BigInt, error) { + if s.Internal.StateMinerPreCommitDepositForPower == nil { + return *new(types.BigInt), ErrNotSupported + } return s.Internal.StateMinerPreCommitDepositForPower(p0, p1, p2, p3) } func (s *FullNodeStub) StateMinerPreCommitDepositForPower(p0 context.Context, p1 address.Address, p2 miner.SectorPreCommitInfo, p3 types.TipSetKey) (types.BigInt, error) { - return *new(types.BigInt), xerrors.New("method not supported") + return *new(types.BigInt), ErrNotSupported } func (s *FullNodeStruct) StateMinerProvingDeadline(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) { + if s.Internal.StateMinerProvingDeadline == nil { + return nil, ErrNotSupported + } return s.Internal.StateMinerProvingDeadline(p0, p1, p2) } func (s *FullNodeStub) StateMinerProvingDeadline(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) StateMinerRecoveries(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) { + if s.Internal.StateMinerRecoveries == nil { + return *new(bitfield.BitField), ErrNotSupported + } return s.Internal.StateMinerRecoveries(p0, p1, p2) } func (s *FullNodeStub) StateMinerRecoveries(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) { - return *new(bitfield.BitField), xerrors.New("method not supported") + return *new(bitfield.BitField), ErrNotSupported } func (s *FullNodeStruct) StateMinerSectorAllocated(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (bool, error) { + if s.Internal.StateMinerSectorAllocated == nil { + return false, ErrNotSupported + } return s.Internal.StateMinerSectorAllocated(p0, p1, p2, p3) } func (s *FullNodeStub) StateMinerSectorAllocated(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (bool, error) { - return false, xerrors.New("method not supported") + return false, ErrNotSupported } func (s *FullNodeStruct) StateMinerSectorCount(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MinerSectors, error) { + if s.Internal.StateMinerSectorCount == nil { + return *new(api.MinerSectors), ErrNotSupported + } return s.Internal.StateMinerSectorCount(p0, p1, p2) } func (s *FullNodeStub) StateMinerSectorCount(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MinerSectors, error) { - return *new(api.MinerSectors), xerrors.New("method not supported") + return *new(api.MinerSectors), ErrNotSupported } func (s *FullNodeStruct) StateMinerSectors(p0 context.Context, p1 address.Address, p2 *bitfield.BitField, p3 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { + if s.Internal.StateMinerSectors == nil { + return *new([]*miner.SectorOnChainInfo), ErrNotSupported + } return s.Internal.StateMinerSectors(p0, p1, p2, p3) } func (s *FullNodeStub) StateMinerSectors(p0 context.Context, p1 address.Address, p2 *bitfield.BitField, p3 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { - return *new([]*miner.SectorOnChainInfo), xerrors.New("method not supported") + return *new([]*miner.SectorOnChainInfo), ErrNotSupported } func (s *FullNodeStruct) StateNetworkName(p0 context.Context) (dtypes.NetworkName, error) { + if s.Internal.StateNetworkName == nil { + return *new(dtypes.NetworkName), ErrNotSupported + } return s.Internal.StateNetworkName(p0) } func (s *FullNodeStub) StateNetworkName(p0 context.Context) (dtypes.NetworkName, error) { - return *new(dtypes.NetworkName), xerrors.New("method not supported") + return *new(dtypes.NetworkName), ErrNotSupported } func (s *FullNodeStruct) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) { + if s.Internal.StateNetworkVersion == nil { + return *new(apitypes.NetworkVersion), ErrNotSupported + } return s.Internal.StateNetworkVersion(p0, p1) } func (s *FullNodeStub) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) { - return *new(apitypes.NetworkVersion), xerrors.New("method not supported") + return *new(apitypes.NetworkVersion), ErrNotSupported } func (s *FullNodeStruct) StateReadState(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*api.ActorState, error) { + if s.Internal.StateReadState == nil { + return nil, ErrNotSupported + } return s.Internal.StateReadState(p0, p1, p2) } func (s *FullNodeStub) StateReadState(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*api.ActorState, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) StateReplay(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid) (*api.InvocResult, error) { + if s.Internal.StateReplay == nil { + return nil, ErrNotSupported + } return s.Internal.StateReplay(p0, p1, p2) } func (s *FullNodeStub) StateReplay(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid) (*api.InvocResult, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) StateSearchMsg(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) { + if s.Internal.StateSearchMsg == nil { + return nil, ErrNotSupported + } return s.Internal.StateSearchMsg(p0, p1) } func (s *FullNodeStub) StateSearchMsg(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) StateSearchMsgLimited(p0 context.Context, p1 cid.Cid, p2 abi.ChainEpoch) (*api.MsgLookup, error) { + if s.Internal.StateSearchMsgLimited == nil { + return nil, ErrNotSupported + } return s.Internal.StateSearchMsgLimited(p0, p1, p2) } func (s *FullNodeStub) StateSearchMsgLimited(p0 context.Context, p1 cid.Cid, p2 abi.ChainEpoch) (*api.MsgLookup, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) StateSectorExpiration(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorExpiration, error) { + if s.Internal.StateSectorExpiration == nil { + return nil, ErrNotSupported + } return s.Internal.StateSectorExpiration(p0, p1, p2, p3) } func (s *FullNodeStub) StateSectorExpiration(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorExpiration, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) StateSectorGetInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) { + if s.Internal.StateSectorGetInfo == nil { + return nil, ErrNotSupported + } return s.Internal.StateSectorGetInfo(p0, p1, p2, p3) } func (s *FullNodeStub) StateSectorGetInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) StateSectorPartition(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorLocation, error) { + if s.Internal.StateSectorPartition == nil { + return nil, ErrNotSupported + } return s.Internal.StateSectorPartition(p0, p1, p2, p3) } func (s *FullNodeStub) StateSectorPartition(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorLocation, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) StateSectorPreCommitInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) { + if s.Internal.StateSectorPreCommitInfo == nil { + return *new(miner.SectorPreCommitOnChainInfo), ErrNotSupported + } return s.Internal.StateSectorPreCommitInfo(p0, p1, p2, p3) } func (s *FullNodeStub) StateSectorPreCommitInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) { - return *new(miner.SectorPreCommitOnChainInfo), xerrors.New("method not supported") + return *new(miner.SectorPreCommitOnChainInfo), ErrNotSupported } func (s *FullNodeStruct) StateVMCirculatingSupplyInternal(p0 context.Context, p1 types.TipSetKey) (api.CirculatingSupply, error) { + if s.Internal.StateVMCirculatingSupplyInternal == nil { + return *new(api.CirculatingSupply), ErrNotSupported + } return s.Internal.StateVMCirculatingSupplyInternal(p0, p1) } func (s *FullNodeStub) StateVMCirculatingSupplyInternal(p0 context.Context, p1 types.TipSetKey) (api.CirculatingSupply, error) { - return *new(api.CirculatingSupply), xerrors.New("method not supported") + return *new(api.CirculatingSupply), ErrNotSupported } func (s *FullNodeStruct) StateVerifiedClientStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) { + if s.Internal.StateVerifiedClientStatus == nil { + return nil, ErrNotSupported + } return s.Internal.StateVerifiedClientStatus(p0, p1, p2) } func (s *FullNodeStub) StateVerifiedClientStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) StateVerifiedRegistryRootKey(p0 context.Context, p1 types.TipSetKey) (address.Address, error) { + if s.Internal.StateVerifiedRegistryRootKey == nil { + return *new(address.Address), ErrNotSupported + } return s.Internal.StateVerifiedRegistryRootKey(p0, p1) } func (s *FullNodeStub) StateVerifiedRegistryRootKey(p0 context.Context, p1 types.TipSetKey) (address.Address, error) { - return *new(address.Address), xerrors.New("method not supported") + return *new(address.Address), ErrNotSupported } func (s *FullNodeStruct) StateVerifierStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) { + if s.Internal.StateVerifierStatus == nil { + return nil, ErrNotSupported + } return s.Internal.StateVerifierStatus(p0, p1, p2) } func (s *FullNodeStub) StateVerifierStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64) (*api.MsgLookup, error) { + if s.Internal.StateWaitMsg == nil { + return nil, ErrNotSupported + } return s.Internal.StateWaitMsg(p0, p1, p2) } func (s *FullNodeStub) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64) (*api.MsgLookup, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) StateWaitMsgLimited(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch) (*api.MsgLookup, error) { + if s.Internal.StateWaitMsgLimited == nil { + return nil, ErrNotSupported + } return s.Internal.StateWaitMsgLimited(p0, p1, p2, p3) } func (s *FullNodeStub) StateWaitMsgLimited(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch) (*api.MsgLookup, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) SyncCheckBad(p0 context.Context, p1 cid.Cid) (string, error) { + if s.Internal.SyncCheckBad == nil { + return "", ErrNotSupported + } return s.Internal.SyncCheckBad(p0, p1) } func (s *FullNodeStub) SyncCheckBad(p0 context.Context, p1 cid.Cid) (string, error) { - return "", xerrors.New("method not supported") + return "", ErrNotSupported } func (s *FullNodeStruct) SyncCheckpoint(p0 context.Context, p1 types.TipSetKey) error { + if s.Internal.SyncCheckpoint == nil { + return ErrNotSupported + } return s.Internal.SyncCheckpoint(p0, p1) } func (s *FullNodeStub) SyncCheckpoint(p0 context.Context, p1 types.TipSetKey) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) SyncIncomingBlocks(p0 context.Context) (<-chan *types.BlockHeader, error) { + if s.Internal.SyncIncomingBlocks == nil { + return nil, ErrNotSupported + } return s.Internal.SyncIncomingBlocks(p0) } func (s *FullNodeStub) SyncIncomingBlocks(p0 context.Context) (<-chan *types.BlockHeader, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) SyncMarkBad(p0 context.Context, p1 cid.Cid) error { + if s.Internal.SyncMarkBad == nil { + return ErrNotSupported + } return s.Internal.SyncMarkBad(p0, p1) } func (s *FullNodeStub) SyncMarkBad(p0 context.Context, p1 cid.Cid) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) SyncState(p0 context.Context) (*api.SyncState, error) { + if s.Internal.SyncState == nil { + return nil, ErrNotSupported + } return s.Internal.SyncState(p0) } func (s *FullNodeStub) SyncState(p0 context.Context) (*api.SyncState, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) SyncSubmitBlock(p0 context.Context, p1 *types.BlockMsg) error { + if s.Internal.SyncSubmitBlock == nil { + return ErrNotSupported + } return s.Internal.SyncSubmitBlock(p0, p1) } func (s *FullNodeStub) SyncSubmitBlock(p0 context.Context, p1 *types.BlockMsg) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) SyncUnmarkAllBad(p0 context.Context) error { + if s.Internal.SyncUnmarkAllBad == nil { + return ErrNotSupported + } return s.Internal.SyncUnmarkAllBad(p0) } func (s *FullNodeStub) SyncUnmarkAllBad(p0 context.Context) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) SyncUnmarkBad(p0 context.Context, p1 cid.Cid) error { + if s.Internal.SyncUnmarkBad == nil { + return ErrNotSupported + } return s.Internal.SyncUnmarkBad(p0, p1) } func (s *FullNodeStub) SyncUnmarkBad(p0 context.Context, p1 cid.Cid) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) SyncValidateTipset(p0 context.Context, p1 types.TipSetKey) (bool, error) { + if s.Internal.SyncValidateTipset == nil { + return false, ErrNotSupported + } return s.Internal.SyncValidateTipset(p0, p1) } func (s *FullNodeStub) SyncValidateTipset(p0 context.Context, p1 types.TipSetKey) (bool, error) { - return false, xerrors.New("method not supported") + return false, ErrNotSupported } func (s *FullNodeStruct) WalletBalance(p0 context.Context, p1 address.Address) (types.BigInt, error) { + if s.Internal.WalletBalance == nil { + return *new(types.BigInt), ErrNotSupported + } return s.Internal.WalletBalance(p0, p1) } func (s *FullNodeStub) WalletBalance(p0 context.Context, p1 address.Address) (types.BigInt, error) { - return *new(types.BigInt), xerrors.New("method not supported") + return *new(types.BigInt), ErrNotSupported } func (s *FullNodeStruct) WalletDefaultAddress(p0 context.Context) (address.Address, error) { + if s.Internal.WalletDefaultAddress == nil { + return *new(address.Address), ErrNotSupported + } return s.Internal.WalletDefaultAddress(p0) } func (s *FullNodeStub) WalletDefaultAddress(p0 context.Context) (address.Address, error) { - return *new(address.Address), xerrors.New("method not supported") + return *new(address.Address), ErrNotSupported } func (s *FullNodeStruct) WalletDelete(p0 context.Context, p1 address.Address) error { + if s.Internal.WalletDelete == nil { + return ErrNotSupported + } return s.Internal.WalletDelete(p0, p1) } func (s *FullNodeStub) WalletDelete(p0 context.Context, p1 address.Address) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) WalletExport(p0 context.Context, p1 address.Address) (*types.KeyInfo, error) { + if s.Internal.WalletExport == nil { + return nil, ErrNotSupported + } return s.Internal.WalletExport(p0, p1) } func (s *FullNodeStub) WalletExport(p0 context.Context, p1 address.Address) (*types.KeyInfo, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) WalletHas(p0 context.Context, p1 address.Address) (bool, error) { + if s.Internal.WalletHas == nil { + return false, ErrNotSupported + } return s.Internal.WalletHas(p0, p1) } func (s *FullNodeStub) WalletHas(p0 context.Context, p1 address.Address) (bool, error) { - return false, xerrors.New("method not supported") + return false, ErrNotSupported } func (s *FullNodeStruct) WalletImport(p0 context.Context, p1 *types.KeyInfo) (address.Address, error) { + if s.Internal.WalletImport == nil { + return *new(address.Address), ErrNotSupported + } return s.Internal.WalletImport(p0, p1) } func (s *FullNodeStub) WalletImport(p0 context.Context, p1 *types.KeyInfo) (address.Address, error) { - return *new(address.Address), xerrors.New("method not supported") + return *new(address.Address), ErrNotSupported } func (s *FullNodeStruct) WalletList(p0 context.Context) ([]address.Address, error) { + if s.Internal.WalletList == nil { + return *new([]address.Address), ErrNotSupported + } return s.Internal.WalletList(p0) } func (s *FullNodeStub) WalletList(p0 context.Context) ([]address.Address, error) { - return *new([]address.Address), xerrors.New("method not supported") + return *new([]address.Address), ErrNotSupported } func (s *FullNodeStruct) WalletNew(p0 context.Context, p1 types.KeyType) (address.Address, error) { + if s.Internal.WalletNew == nil { + return *new(address.Address), ErrNotSupported + } return s.Internal.WalletNew(p0, p1) } func (s *FullNodeStub) WalletNew(p0 context.Context, p1 types.KeyType) (address.Address, error) { - return *new(address.Address), xerrors.New("method not supported") + return *new(address.Address), ErrNotSupported } func (s *FullNodeStruct) WalletSetDefault(p0 context.Context, p1 address.Address) error { + if s.Internal.WalletSetDefault == nil { + return ErrNotSupported + } return s.Internal.WalletSetDefault(p0, p1) } func (s *FullNodeStub) WalletSetDefault(p0 context.Context, p1 address.Address) error { - return xerrors.New("method not supported") + return ErrNotSupported } func (s *FullNodeStruct) WalletSign(p0 context.Context, p1 address.Address, p2 []byte) (*crypto.Signature, error) { + if s.Internal.WalletSign == nil { + return nil, ErrNotSupported + } return s.Internal.WalletSign(p0, p1, p2) } func (s *FullNodeStub) WalletSign(p0 context.Context, p1 address.Address, p2 []byte) (*crypto.Signature, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) WalletSignMessage(p0 context.Context, p1 address.Address, p2 *types.Message) (*types.SignedMessage, error) { + if s.Internal.WalletSignMessage == nil { + return nil, ErrNotSupported + } return s.Internal.WalletSignMessage(p0, p1, p2) } func (s *FullNodeStub) WalletSignMessage(p0 context.Context, p1 address.Address, p2 *types.Message) (*types.SignedMessage, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *FullNodeStruct) WalletValidateAddress(p0 context.Context, p1 string) (address.Address, error) { + if s.Internal.WalletValidateAddress == nil { + return *new(address.Address), ErrNotSupported + } return s.Internal.WalletValidateAddress(p0, p1) } func (s *FullNodeStub) WalletValidateAddress(p0 context.Context, p1 string) (address.Address, error) { - return *new(address.Address), xerrors.New("method not supported") + return *new(address.Address), ErrNotSupported } func (s *FullNodeStruct) WalletVerify(p0 context.Context, p1 address.Address, p2 []byte, p3 *crypto.Signature) (bool, error) { + if s.Internal.WalletVerify == nil { + return false, ErrNotSupported + } return s.Internal.WalletVerify(p0, p1, p2, p3) } func (s *FullNodeStub) WalletVerify(p0 context.Context, p1 address.Address, p2 []byte, p3 *crypto.Signature) (bool, error) { - return false, xerrors.New("method not supported") + return false, ErrNotSupported } func (s *GatewayStruct) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*api.BlockMessages, error) { + if s.Internal.ChainGetBlockMessages == nil { + return nil, ErrNotSupported + } return s.Internal.ChainGetBlockMessages(p0, p1) } func (s *GatewayStub) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*api.BlockMessages, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *GatewayStruct) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Message, error) { + if s.Internal.ChainGetMessage == nil { + return nil, ErrNotSupported + } return s.Internal.ChainGetMessage(p0, p1) } func (s *GatewayStub) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Message, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *GatewayStruct) ChainGetTipSet(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) { + if s.Internal.ChainGetTipSet == nil { + return nil, ErrNotSupported + } return s.Internal.ChainGetTipSet(p0, p1) } func (s *GatewayStub) ChainGetTipSet(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *GatewayStruct) ChainGetTipSetByHeight(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) { + if s.Internal.ChainGetTipSetByHeight == nil { + return nil, ErrNotSupported + } return s.Internal.ChainGetTipSetByHeight(p0, p1, p2) } func (s *GatewayStub) ChainGetTipSetByHeight(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *GatewayStruct) ChainHasObj(p0 context.Context, p1 cid.Cid) (bool, error) { + if s.Internal.ChainHasObj == nil { + return false, ErrNotSupported + } return s.Internal.ChainHasObj(p0, p1) } func (s *GatewayStub) ChainHasObj(p0 context.Context, p1 cid.Cid) (bool, error) { - return false, xerrors.New("method not supported") + return false, ErrNotSupported } func (s *GatewayStruct) ChainHead(p0 context.Context) (*types.TipSet, error) { + if s.Internal.ChainHead == nil { + return nil, ErrNotSupported + } return s.Internal.ChainHead(p0) } func (s *GatewayStub) ChainHead(p0 context.Context) (*types.TipSet, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *GatewayStruct) ChainNotify(p0 context.Context) (<-chan []*api.HeadChange, error) { + if s.Internal.ChainNotify == nil { + return nil, ErrNotSupported + } return s.Internal.ChainNotify(p0) } func (s *GatewayStub) ChainNotify(p0 context.Context) (<-chan []*api.HeadChange, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *GatewayStruct) ChainReadObj(p0 context.Context, p1 cid.Cid) ([]byte, error) { + if s.Internal.ChainReadObj == nil { + return *new([]byte), ErrNotSupported + } return s.Internal.ChainReadObj(p0, p1) } func (s *GatewayStub) ChainReadObj(p0 context.Context, p1 cid.Cid) ([]byte, error) { - return *new([]byte), xerrors.New("method not supported") + return *new([]byte), ErrNotSupported } func (s *GatewayStruct) GasEstimateMessageGas(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) { + if s.Internal.GasEstimateMessageGas == nil { + return nil, ErrNotSupported + } return s.Internal.GasEstimateMessageGas(p0, p1, p2, p3) } func (s *GatewayStub) GasEstimateMessageGas(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *GatewayStruct) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) { + if s.Internal.MpoolPush == nil { + return *new(cid.Cid), ErrNotSupported + } return s.Internal.MpoolPush(p0, p1) } func (s *GatewayStub) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") + return *new(cid.Cid), ErrNotSupported } func (s *GatewayStruct) MsigGetAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) { + if s.Internal.MsigGetAvailableBalance == nil { + return *new(types.BigInt), ErrNotSupported + } return s.Internal.MsigGetAvailableBalance(p0, p1, p2) } func (s *GatewayStub) MsigGetAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) { - return *new(types.BigInt), xerrors.New("method not supported") + return *new(types.BigInt), ErrNotSupported } func (s *GatewayStruct) MsigGetPending(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*api.MsigTransaction, error) { + if s.Internal.MsigGetPending == nil { + return *new([]*api.MsigTransaction), ErrNotSupported + } return s.Internal.MsigGetPending(p0, p1, p2) } func (s *GatewayStub) MsigGetPending(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*api.MsigTransaction, error) { - return *new([]*api.MsigTransaction), xerrors.New("method not supported") + return *new([]*api.MsigTransaction), ErrNotSupported } func (s *GatewayStruct) MsigGetVested(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) { + if s.Internal.MsigGetVested == nil { + return *new(types.BigInt), ErrNotSupported + } return s.Internal.MsigGetVested(p0, p1, p2, p3) } func (s *GatewayStub) MsigGetVested(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) { - return *new(types.BigInt), xerrors.New("method not supported") + return *new(types.BigInt), ErrNotSupported } func (s *GatewayStruct) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { + if s.Internal.StateAccountKey == nil { + return *new(address.Address), ErrNotSupported + } return s.Internal.StateAccountKey(p0, p1, p2) } func (s *GatewayStub) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { - return *new(address.Address), xerrors.New("method not supported") + return *new(address.Address), ErrNotSupported } func (s *GatewayStruct) StateDealProviderCollateralBounds(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (api.DealCollateralBounds, error) { + if s.Internal.StateDealProviderCollateralBounds == nil { + return *new(api.DealCollateralBounds), ErrNotSupported + } return s.Internal.StateDealProviderCollateralBounds(p0, p1, p2, p3) } func (s *GatewayStub) StateDealProviderCollateralBounds(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (api.DealCollateralBounds, error) { - return *new(api.DealCollateralBounds), xerrors.New("method not supported") + return *new(api.DealCollateralBounds), ErrNotSupported } func (s *GatewayStruct) StateGetActor(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) { + if s.Internal.StateGetActor == nil { + return nil, ErrNotSupported + } return s.Internal.StateGetActor(p0, p1, p2) } func (s *GatewayStub) StateGetActor(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *GatewayStruct) StateGetReceipt(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) { + if s.Internal.StateGetReceipt == nil { + return nil, ErrNotSupported + } return s.Internal.StateGetReceipt(p0, p1, p2) } func (s *GatewayStub) StateGetReceipt(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *GatewayStruct) StateListMiners(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) { + if s.Internal.StateListMiners == nil { + return *new([]address.Address), ErrNotSupported + } return s.Internal.StateListMiners(p0, p1) } func (s *GatewayStub) StateListMiners(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) { - return *new([]address.Address), xerrors.New("method not supported") + return *new([]address.Address), ErrNotSupported } func (s *GatewayStruct) StateLookupID(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { + if s.Internal.StateLookupID == nil { + return *new(address.Address), ErrNotSupported + } return s.Internal.StateLookupID(p0, p1, p2) } func (s *GatewayStub) StateLookupID(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { - return *new(address.Address), xerrors.New("method not supported") + return *new(address.Address), ErrNotSupported } func (s *GatewayStruct) StateMarketBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MarketBalance, error) { + if s.Internal.StateMarketBalance == nil { + return *new(api.MarketBalance), ErrNotSupported + } return s.Internal.StateMarketBalance(p0, p1, p2) } func (s *GatewayStub) StateMarketBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MarketBalance, error) { - return *new(api.MarketBalance), xerrors.New("method not supported") + return *new(api.MarketBalance), ErrNotSupported } func (s *GatewayStruct) StateMarketStorageDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*api.MarketDeal, error) { + if s.Internal.StateMarketStorageDeal == nil { + return nil, ErrNotSupported + } return s.Internal.StateMarketStorageDeal(p0, p1, p2) } func (s *GatewayStub) StateMarketStorageDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*api.MarketDeal, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *GatewayStruct) StateMinerInfo(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (miner.MinerInfo, error) { + if s.Internal.StateMinerInfo == nil { + return *new(miner.MinerInfo), ErrNotSupported + } return s.Internal.StateMinerInfo(p0, p1, p2) } func (s *GatewayStub) StateMinerInfo(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (miner.MinerInfo, error) { - return *new(miner.MinerInfo), xerrors.New("method not supported") + return *new(miner.MinerInfo), ErrNotSupported } func (s *GatewayStruct) StateMinerPower(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*api.MinerPower, error) { + if s.Internal.StateMinerPower == nil { + return nil, ErrNotSupported + } return s.Internal.StateMinerPower(p0, p1, p2) } func (s *GatewayStub) StateMinerPower(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*api.MinerPower, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *GatewayStruct) StateMinerProvingDeadline(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) { + if s.Internal.StateMinerProvingDeadline == nil { + return nil, ErrNotSupported + } return s.Internal.StateMinerProvingDeadline(p0, p1, p2) } func (s *GatewayStub) StateMinerProvingDeadline(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *GatewayStruct) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (network.Version, error) { + if s.Internal.StateNetworkVersion == nil { + return *new(network.Version), ErrNotSupported + } return s.Internal.StateNetworkVersion(p0, p1) } func (s *GatewayStub) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (network.Version, error) { - return *new(network.Version), xerrors.New("method not supported") + return *new(network.Version), ErrNotSupported } func (s *GatewayStruct) StateSearchMsg(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) { + if s.Internal.StateSearchMsg == nil { + return nil, ErrNotSupported + } return s.Internal.StateSearchMsg(p0, p1) } func (s *GatewayStub) StateSearchMsg(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *GatewayStruct) StateSectorGetInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) { + if s.Internal.StateSectorGetInfo == nil { + return nil, ErrNotSupported + } return s.Internal.StateSectorGetInfo(p0, p1, p2, p3) } func (s *GatewayStub) StateSectorGetInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *GatewayStruct) StateVerifiedClientStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) { + if s.Internal.StateVerifiedClientStatus == nil { + return nil, ErrNotSupported + } return s.Internal.StateVerifiedClientStatus(p0, p1, p2) } func (s *GatewayStub) StateVerifiedClientStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *GatewayStruct) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64) (*api.MsgLookup, error) { + if s.Internal.StateWaitMsg == nil { + return nil, ErrNotSupported + } return s.Internal.StateWaitMsg(p0, p1, p2) } func (s *GatewayStub) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64) (*api.MsgLookup, error) { - return nil, xerrors.New("method not supported") + return nil, ErrNotSupported } func (s *GatewayStruct) Version(p0 context.Context) (api.APIVersion, error) { + if s.Internal.Version == nil { + return *new(api.APIVersion), ErrNotSupported + } return s.Internal.Version(p0) } func (s *GatewayStub) Version(p0 context.Context) (api.APIVersion, error) { - return *new(api.APIVersion), xerrors.New("method not supported") + return *new(api.APIVersion), ErrNotSupported } func (s *GatewayStruct) WalletBalance(p0 context.Context, p1 address.Address) (types.BigInt, error) { + if s.Internal.WalletBalance == nil { + return *new(types.BigInt), ErrNotSupported + } return s.Internal.WalletBalance(p0, p1) } func (s *GatewayStub) WalletBalance(p0 context.Context, p1 address.Address) (types.BigInt, error) { - return *new(types.BigInt), xerrors.New("method not supported") + return *new(types.BigInt), ErrNotSupported } var _ FullNode = new(FullNodeStruct) diff --git a/api/v0api/v0mocks/mock_full.go b/api/v0api/v0mocks/mock_full.go index a268d4a8a82..6a4ef690ed1 100644 --- a/api/v0api/v0mocks/mock_full.go +++ b/api/v0api/v0mocks/mock_full.go @@ -194,6 +194,21 @@ func (mr *MockFullNodeMockRecorder) ChainGetMessage(arg0, arg1 interface{}) *gom return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetMessage", reflect.TypeOf((*MockFullNode)(nil).ChainGetMessage), arg0, arg1) } +// ChainGetMessagesInTipset mocks base method. +func (m *MockFullNode) ChainGetMessagesInTipset(arg0 context.Context, arg1 types.TipSetKey) ([]api.Message, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetMessagesInTipset", arg0, arg1) + ret0, _ := ret[0].([]api.Message) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetMessagesInTipset indicates an expected call of ChainGetMessagesInTipset. +func (mr *MockFullNodeMockRecorder) ChainGetMessagesInTipset(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetMessagesInTipset", reflect.TypeOf((*MockFullNode)(nil).ChainGetMessagesInTipset), arg0, arg1) +} + // ChainGetNode mocks base method. func (m *MockFullNode) ChainGetNode(arg0 context.Context, arg1 string) (*api.IpldObject, error) { m.ctrl.T.Helper() diff --git a/api/version.go b/api/version.go index e8011204d1e..687f5135a89 100644 --- a/api/version.go +++ b/api/version.go @@ -57,7 +57,7 @@ var ( FullAPIVersion0 = newVer(1, 3, 0) FullAPIVersion1 = newVer(2, 1, 0) - MinerAPIVersion0 = newVer(1, 1, 0) + MinerAPIVersion0 = newVer(1, 2, 0) WorkerAPIVersion0 = newVer(1, 1, 0) ) diff --git a/blockstore/badger/blockstore.go b/blockstore/badger/blockstore.go index e03266ab7f9..a0b51d8df61 100644 --- a/blockstore/badger/blockstore.go +++ b/blockstore/badger/blockstore.go @@ -4,11 +4,15 @@ import ( "context" "fmt" "io" + "os" + "path/filepath" "runtime" - "sync/atomic" + "sync" + "time" "github.com/dgraph-io/badger/v2" "github.com/dgraph-io/badger/v2/options" + "github.com/dgraph-io/badger/v2/pb" "github.com/multiformats/go-base32" "go.uber.org/zap" @@ -72,23 +76,45 @@ func (b *badgerLogger) Warningf(format string, args ...interface{}) { b.skip2.Warnf(format, args...) } +// bsState is the current blockstore state +type bsState int + const ( - stateOpen int64 = iota + // stateOpen signifies an open blockstore + stateOpen bsState = iota + // stateClosing signifies a blockstore that is currently closing stateClosing + // stateClosed signifies a blockstore that has been colosed stateClosed ) +type bsMoveState int + +const ( + // moveStateNone signifies that there is no move in progress + moveStateNone bsMoveState = iota + // moveStateMoving signifies that there is a move in a progress + moveStateMoving + // moveStateCleanup signifies that a move has completed or aborted and we are cleaning up + moveStateCleanup + // moveStateLock signifies that an exclusive lock has been acquired + moveStateLock +) + // Blockstore is a badger-backed IPLD blockstore. -// -// NOTE: once Close() is called, methods will try their best to return -// ErrBlockstoreClosed. This will guaranteed to happen for all subsequent -// operation calls after Close() has returned, but it may not happen for -// operations in progress. Those are likely to fail with a different error. type Blockstore struct { - // state is accessed atomically - state int64 + stateLk sync.RWMutex + state bsState + viewers sync.WaitGroup + + moveMx sync.Mutex + moveCond sync.Cond + moveState bsMoveState + rlock int - DB *badger.DB + db *badger.DB + dbNext *badger.DB // when moving + opts Options prefixing bool prefix []byte @@ -97,6 +123,9 @@ type Blockstore struct { var _ blockstore.Blockstore = (*Blockstore)(nil) var _ blockstore.Viewer = (*Blockstore)(nil) +var _ blockstore.BlockstoreIterator = (*Blockstore)(nil) +var _ blockstore.BlockstoreGC = (*Blockstore)(nil) +var _ blockstore.BlockstoreSize = (*Blockstore)(nil) var _ io.Closer = (*Blockstore)(nil) // Open creates a new badger-backed blockstore, with the supplied options. @@ -111,73 +140,406 @@ func Open(opts Options) (*Blockstore, error) { return nil, fmt.Errorf("failed to open badger blockstore: %w", err) } - bs := &Blockstore{DB: db} + bs := &Blockstore{db: db, opts: opts} if p := opts.Prefix; p != "" { bs.prefixing = true bs.prefix = []byte(p) bs.prefixLen = len(bs.prefix) } + bs.moveCond.L = &bs.moveMx + return bs, nil } // Close closes the store. If the store has already been closed, this noops and // returns an error, even if the first closure resulted in error. func (b *Blockstore) Close() error { - if !atomic.CompareAndSwapInt64(&b.state, stateOpen, stateClosing) { + b.stateLk.Lock() + if b.state != stateOpen { + b.stateLk.Unlock() return nil } + b.state = stateClosing + b.stateLk.Unlock() + + defer func() { + b.stateLk.Lock() + b.state = stateClosed + b.stateLk.Unlock() + }() + + // wait for all accesses to complete + b.viewers.Wait() - defer atomic.StoreInt64(&b.state, stateClosed) - return b.DB.Close() + return b.db.Close() } -// CollectGarbage runs garbage collection on the value log -func (b *Blockstore) CollectGarbage() error { - if atomic.LoadInt64(&b.state) != stateOpen { +func (b *Blockstore) access() error { + b.stateLk.RLock() + defer b.stateLk.RUnlock() + + if b.state != stateOpen { return ErrBlockstoreClosed } - var err error + b.viewers.Add(1) + return nil +} + +func (b *Blockstore) isOpen() bool { + b.stateLk.RLock() + defer b.stateLk.RUnlock() + + return b.state == stateOpen +} + +// lockDB/unlockDB implement a recursive lock contingent on move state +func (b *Blockstore) lockDB() { + b.moveMx.Lock() + defer b.moveMx.Unlock() + + if b.rlock == 0 { + for b.moveState == moveStateLock { + b.moveCond.Wait() + } + } + + b.rlock++ +} + +func (b *Blockstore) unlockDB() { + b.moveMx.Lock() + defer b.moveMx.Unlock() + + b.rlock-- + if b.rlock == 0 && b.moveState == moveStateLock { + b.moveCond.Broadcast() + } +} + +// lockMove/unlockMove implement an exclusive lock of move state +func (b *Blockstore) lockMove() { + b.moveMx.Lock() + b.moveState = moveStateLock + for b.rlock > 0 { + b.moveCond.Wait() + } +} + +func (b *Blockstore) unlockMove(state bsMoveState) { + b.moveState = state + b.moveCond.Broadcast() + b.moveMx.Unlock() +} + +// movingGC moves the blockstore to a new path, adjacent to the current path, and creates +// a symlink from the current path to the new path; the old blockstore is deleted. +// +// The blockstore MUST accept new writes during the move and ensure that these +// are persisted to the new blockstore; if a failure occurs aboring the move, +// then they must be peristed to the old blockstore. +// In short, the blockstore must not lose data from new writes during the move. +func (b *Blockstore) movingGC() error { + // this inlines moveLock/moveUnlock for the initial state check to prevent a second move + // while one is in progress without clobbering state + b.moveMx.Lock() + if b.moveState != moveStateNone { + b.moveMx.Unlock() + return fmt.Errorf("move in progress") + } + + b.moveState = moveStateLock + for b.rlock > 0 { + b.moveCond.Wait() + } + + b.moveState = moveStateMoving + b.moveCond.Broadcast() + b.moveMx.Unlock() + + var newPath string + + defer func() { + b.lockMove() + + dbNext := b.dbNext + b.dbNext = nil + + var state bsMoveState + if dbNext != nil { + state = moveStateCleanup + } else { + state = moveStateNone + } + + b.unlockMove(state) + + if dbNext != nil { + // the move failed and we have a left-over db; delete it. + err := dbNext.Close() + if err != nil { + log.Warnf("error closing badger db: %s", err) + } + b.deleteDB(newPath) + + b.lockMove() + b.unlockMove(moveStateNone) + } + }() + + // we resolve symlinks to create the new path in the adjacent to the old path. + // this allows the user to symlink the db directory into a separate filesystem. + basePath := b.opts.Dir + linkPath, err := filepath.EvalSymlinks(basePath) + if err != nil { + return fmt.Errorf("error resolving symlink %s: %w", basePath, err) + } + + if basePath == linkPath { + newPath = basePath + } else { + // we do this dance to create a name adjacent to the current one, while avoiding clown + // shoes with multiple moves (i.e. we can't just take the basename of the linkPath, as it + // could have been created in a previous move and have the timestamp suffix, which would then + // perpetuate itself. + name := filepath.Base(basePath) + dir := filepath.Dir(linkPath) + newPath = filepath.Join(dir, name) + } + newPath = fmt.Sprintf("%s.%d", newPath, time.Now().UnixNano()) + + log.Infof("moving blockstore from %s to %s", b.opts.Dir, newPath) + + opts := b.opts + opts.Dir = newPath + opts.ValueDir = newPath + + dbNew, err := badger.Open(opts.Options) + if err != nil { + return fmt.Errorf("failed to open badger blockstore in %s: %w", newPath, err) + } + + b.lockMove() + b.dbNext = dbNew + b.unlockMove(moveStateMoving) + + log.Info("copying blockstore") + err = b.doCopy(b.db, b.dbNext) + if err != nil { + return fmt.Errorf("error moving badger blockstore to %s: %w", newPath, err) + } + + b.lockMove() + dbOld := b.db + b.db = b.dbNext + b.dbNext = nil + b.unlockMove(moveStateCleanup) + + err = dbOld.Close() + if err != nil { + log.Warnf("error closing old badger db: %s", err) + } + + // this is the canonical db path; this is where our db lives. + dbPath := b.opts.Dir + + // we first move the existing db out of the way, and only delete it after we have symlinked the + // new db to the canonical path + backupPath := fmt.Sprintf("%s.old.%d", dbPath, time.Now().Unix()) + if err = os.Rename(dbPath, backupPath); err != nil { + // this is not catastrophic in the sense that we have not lost any data. + // but it is pretty bad, as the db path points to the old db, while we are now using to the new + // db; we can't continue and leave a ticking bomb for the next restart. + // so a panic is appropriate and user can fix. + panic(fmt.Errorf("error renaming old badger db dir from %s to %s: %w; USER ACTION REQUIRED", dbPath, backupPath, err)) //nolint + } + + if err = symlink(newPath, dbPath); err != nil { + // same here; the db path is pointing to the void. panic and let the user fix. + panic(fmt.Errorf("error symlinking new badger db dir from %s to %s: %w; USER ACTION REQUIRED", newPath, dbPath, err)) //nolint + } + + // the delete follows symlinks + b.deleteDB(backupPath) + + log.Info("moving blockstore done") + return nil +} + +// symlink creates a symlink from path to linkTo; the link is relative if the two are +// in the same directory +func symlink(path, linkTo string) error { + resolvedPathDir, err := filepath.EvalSymlinks(filepath.Dir(path)) + if err != nil { + return fmt.Errorf("error resolving links in %s: %w", path, err) + } + + resolvedLinkDir, err := filepath.EvalSymlinks(filepath.Dir(linkTo)) + if err != nil { + return fmt.Errorf("error resolving links in %s: %w", linkTo, err) + } + + if resolvedPathDir == resolvedLinkDir { + path = filepath.Base(path) + } + + return os.Symlink(path, linkTo) +} + +// doCopy copies a badger blockstore to another, with an optional filter; if the filter +// is not nil, then only cids that satisfy the filter will be copied. +func (b *Blockstore) doCopy(from, to *badger.DB) error { + workers := runtime.NumCPU() / 2 + if workers < 2 { + workers = 2 + } + + stream := from.NewStream() + stream.NumGo = workers + stream.LogPrefix = "doCopy" + stream.Send = func(list *pb.KVList) error { + batch := to.NewWriteBatch() + defer batch.Cancel() + + for _, kv := range list.Kv { + if kv.Key == nil || kv.Value == nil { + continue + } + if err := batch.Set(kv.Key, kv.Value); err != nil { + return err + } + } + + return batch.Flush() + } + + return stream.Orchestrate(context.Background()) +} + +func (b *Blockstore) deleteDB(path string) { + // follow symbolic links, otherwise the data wil be left behind + linkPath, err := filepath.EvalSymlinks(path) + if err != nil { + log.Warnf("error resolving symlinks in %s", path) + return + } + + log.Infof("removing data directory %s", linkPath) + if err := os.RemoveAll(linkPath); err != nil { + log.Warnf("error deleting db at %s: %s", linkPath, err) + return + } + + if path != linkPath { + log.Infof("removing link %s", path) + if err := os.Remove(path); err != nil { + log.Warnf("error removing symbolic link %s", err) + } + } +} + +func (b *Blockstore) onlineGC() error { + b.lockDB() + defer b.unlockDB() + + // compact first to gather the necessary statistics for GC + nworkers := runtime.NumCPU() / 2 + if nworkers < 2 { + nworkers = 2 + } + + err := b.db.Flatten(nworkers) + if err != nil { + return err + } + for err == nil { - err = b.DB.RunValueLogGC(0.125) + err = b.db.RunValueLogGC(0.125) } if err == badger.ErrNoRewrite { - // not really an error in this case + // not really an error in this case, it signals the end of GC return nil } return err } -// Compact runs a synchronous compaction -func (b *Blockstore) Compact() error { - if atomic.LoadInt64(&b.state) != stateOpen { - return ErrBlockstoreClosed +// CollectGarbage compacts and runs garbage collection on the value log; +// implements the BlockstoreGC trait +func (b *Blockstore) CollectGarbage(opts ...blockstore.BlockstoreGCOption) error { + if err := b.access(); err != nil { + return err } + defer b.viewers.Done() - nworkers := runtime.NumCPU() / 2 - if nworkers < 2 { - nworkers = 2 + var options blockstore.BlockstoreGCOptions + for _, opt := range opts { + err := opt(&options) + if err != nil { + return err + } + } + + if options.FullGC { + return b.movingGC() } - return b.DB.Flatten(nworkers) + return b.onlineGC() +} + +// Size returns the aggregate size of the blockstore +func (b *Blockstore) Size() (int64, error) { + if err := b.access(); err != nil { + return 0, err + } + defer b.viewers.Done() + + b.lockDB() + defer b.unlockDB() + + lsm, vlog := b.db.Size() + size := lsm + vlog + + if size == 0 { + // badger reports a 0 size on symlinked directories... sigh + dir := b.opts.Dir + entries, err := os.ReadDir(dir) + if err != nil { + return 0, err + } + + for _, e := range entries { + path := filepath.Join(dir, e.Name()) + finfo, err := os.Stat(path) + if err != nil { + return 0, err + } + size += finfo.Size() + } + } + + return size, nil } // View implements blockstore.Viewer, which leverages zero-copy read-only // access to values. func (b *Blockstore) View(cid cid.Cid, fn func([]byte) error) error { - if atomic.LoadInt64(&b.state) != stateOpen { - return ErrBlockstoreClosed + if err := b.access(); err != nil { + return err } + defer b.viewers.Done() + + b.lockDB() + defer b.unlockDB() k, pooled := b.PooledStorageKey(cid) if pooled { defer KeyPool.Put(k) } - return b.DB.View(func(txn *badger.Txn) error { + return b.db.View(func(txn *badger.Txn) error { switch item, err := txn.Get(k); err { case nil: return item.Value(fn) @@ -191,16 +553,20 @@ func (b *Blockstore) View(cid cid.Cid, fn func([]byte) error) error { // Has implements Blockstore.Has. func (b *Blockstore) Has(cid cid.Cid) (bool, error) { - if atomic.LoadInt64(&b.state) != stateOpen { - return false, ErrBlockstoreClosed + if err := b.access(); err != nil { + return false, err } + defer b.viewers.Done() + + b.lockDB() + defer b.unlockDB() k, pooled := b.PooledStorageKey(cid) if pooled { defer KeyPool.Put(k) } - err := b.DB.View(func(txn *badger.Txn) error { + err := b.db.View(func(txn *badger.Txn) error { _, err := txn.Get(k) return err }) @@ -221,9 +587,13 @@ func (b *Blockstore) Get(cid cid.Cid) (blocks.Block, error) { return nil, blockstore.ErrNotFound } - if atomic.LoadInt64(&b.state) != stateOpen { - return nil, ErrBlockstoreClosed + if err := b.access(); err != nil { + return nil, err } + defer b.viewers.Done() + + b.lockDB() + defer b.unlockDB() k, pooled := b.PooledStorageKey(cid) if pooled { @@ -231,7 +601,7 @@ func (b *Blockstore) Get(cid cid.Cid) (blocks.Block, error) { } var val []byte - err := b.DB.View(func(txn *badger.Txn) error { + err := b.db.View(func(txn *badger.Txn) error { switch item, err := txn.Get(k); err { case nil: val, err = item.ValueCopy(nil) @@ -250,9 +620,13 @@ func (b *Blockstore) Get(cid cid.Cid) (blocks.Block, error) { // GetSize implements Blockstore.GetSize. func (b *Blockstore) GetSize(cid cid.Cid) (int, error) { - if atomic.LoadInt64(&b.state) != stateOpen { - return -1, ErrBlockstoreClosed + if err := b.access(); err != nil { + return 0, err } + defer b.viewers.Done() + + b.lockDB() + defer b.unlockDB() k, pooled := b.PooledStorageKey(cid) if pooled { @@ -260,7 +634,7 @@ func (b *Blockstore) GetSize(cid cid.Cid) (int, error) { } var size int - err := b.DB.View(func(txn *badger.Txn) error { + err := b.db.View(func(txn *badger.Txn) error { switch item, err := txn.Get(k); err { case nil: size = int(item.ValueSize()) @@ -279,29 +653,52 @@ func (b *Blockstore) GetSize(cid cid.Cid) (int, error) { // Put implements Blockstore.Put. func (b *Blockstore) Put(block blocks.Block) error { - if atomic.LoadInt64(&b.state) != stateOpen { - return ErrBlockstoreClosed + if err := b.access(); err != nil { + return err } + defer b.viewers.Done() + + b.lockDB() + defer b.unlockDB() k, pooled := b.PooledStorageKey(block.Cid()) if pooled { defer KeyPool.Put(k) } - err := b.DB.Update(func(txn *badger.Txn) error { - return txn.Set(k, block.RawData()) - }) - if err != nil { - err = fmt.Errorf("failed to put block in badger blockstore: %w", err) + put := func(db *badger.DB) error { + err := db.Update(func(txn *badger.Txn) error { + return txn.Set(k, block.RawData()) + }) + if err != nil { + return fmt.Errorf("failed to put block in badger blockstore: %w", err) + } + + return nil } - return err + + if err := put(b.db); err != nil { + return err + } + + if b.dbNext != nil { + if err := put(b.dbNext); err != nil { + return err + } + } + + return nil } // PutMany implements Blockstore.PutMany. func (b *Blockstore) PutMany(blocks []blocks.Block) error { - if atomic.LoadInt64(&b.state) != stateOpen { - return ErrBlockstoreClosed + if err := b.access(); err != nil { + return err } + defer b.viewers.Done() + + b.lockDB() + defer b.unlockDB() // toReturn tracks the byte slices to return to the pool, if we're using key // prefixing. we can't return each slice to the pool after each Set, because @@ -316,46 +713,75 @@ func (b *Blockstore) PutMany(blocks []blocks.Block) error { }() } - batch := b.DB.NewWriteBatch() - defer batch.Cancel() - + keys := make([][]byte, 0, len(blocks)) for _, block := range blocks { k, pooled := b.PooledStorageKey(block.Cid()) if pooled { toReturn = append(toReturn, k) } - if err := batch.Set(k, block.RawData()); err != nil { - return err + keys = append(keys, k) + } + + put := func(db *badger.DB) error { + batch := db.NewWriteBatch() + defer batch.Cancel() + + for i, block := range blocks { + k := keys[i] + if err := batch.Set(k, block.RawData()); err != nil { + return err + } } + + err := batch.Flush() + if err != nil { + return fmt.Errorf("failed to put blocks in badger blockstore: %w", err) + } + + return nil } - err := batch.Flush() - if err != nil { - err = fmt.Errorf("failed to put blocks in badger blockstore: %w", err) + if err := put(b.db); err != nil { + return err } - return err + + if b.dbNext != nil { + if err := put(b.dbNext); err != nil { + return err + } + } + + return nil } // DeleteBlock implements Blockstore.DeleteBlock. func (b *Blockstore) DeleteBlock(cid cid.Cid) error { - if atomic.LoadInt64(&b.state) != stateOpen { - return ErrBlockstoreClosed + if err := b.access(); err != nil { + return err } + defer b.viewers.Done() + + b.lockDB() + defer b.unlockDB() k, pooled := b.PooledStorageKey(cid) if pooled { defer KeyPool.Put(k) } - return b.DB.Update(func(txn *badger.Txn) error { + return b.db.Update(func(txn *badger.Txn) error { return txn.Delete(k) }) } func (b *Blockstore) DeleteMany(cids []cid.Cid) error { - if atomic.LoadInt64(&b.state) != stateOpen { - return ErrBlockstoreClosed + if err := b.access(); err != nil { + return err } + defer b.viewers.Done() + + b.lockDB() + defer b.unlockDB() // toReturn tracks the byte slices to return to the pool, if we're using key // prefixing. we can't return each slice to the pool after each Set, because @@ -370,7 +796,7 @@ func (b *Blockstore) DeleteMany(cids []cid.Cid) error { }() } - batch := b.DB.NewWriteBatch() + batch := b.db.NewWriteBatch() defer batch.Cancel() for _, cid := range cids { @@ -392,11 +818,14 @@ func (b *Blockstore) DeleteMany(cids []cid.Cid) error { // AllKeysChan implements Blockstore.AllKeysChan. func (b *Blockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { - if atomic.LoadInt64(&b.state) != stateOpen { - return nil, ErrBlockstoreClosed + if err := b.access(); err != nil { + return nil, err } - txn := b.DB.NewTransaction(false) + b.lockDB() + defer b.unlockDB() + + txn := b.db.NewTransaction(false) opts := badger.IteratorOptions{PrefetchSize: 100} if b.prefixing { opts.Prefix = b.prefix @@ -405,6 +834,7 @@ func (b *Blockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { ch := make(chan cid.Cid) go func() { + defer b.viewers.Done() defer close(ch) defer iter.Close() @@ -415,7 +845,7 @@ func (b *Blockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { if ctx.Err() != nil { return // context has fired. } - if atomic.LoadInt64(&b.state) != stateOpen { + if !b.isOpen() { // open iterators will run even after the database is closed... return // closing, yield. } @@ -442,6 +872,59 @@ func (b *Blockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { return ch, nil } +// Implementation of BlockstoreIterator interface +func (b *Blockstore) ForEachKey(f func(cid.Cid) error) error { + if err := b.access(); err != nil { + return err + } + defer b.viewers.Done() + + b.lockDB() + defer b.unlockDB() + + txn := b.db.NewTransaction(false) + defer txn.Discard() + + opts := badger.IteratorOptions{PrefetchSize: 100} + if b.prefixing { + opts.Prefix = b.prefix + } + + iter := txn.NewIterator(opts) + defer iter.Close() + + var buf []byte + for iter.Rewind(); iter.Valid(); iter.Next() { + if !b.isOpen() { + return ErrBlockstoreClosed + } + + k := iter.Item().Key() + if b.prefixing { + k = k[b.prefixLen:] + } + + klen := base32.RawStdEncoding.DecodedLen(len(k)) + if klen > len(buf) { + buf = make([]byte, klen) + } + + n, err := base32.RawStdEncoding.Decode(buf, k) + if err != nil { + return err + } + + c := cid.NewCidV1(cid.Raw, buf[:n]) + + err = f(c) + if err != nil { + return err + } + } + + return nil +} + // HashOnRead implements Blockstore.HashOnRead. It is not supported by this // blockstore. func (b *Blockstore) HashOnRead(_ bool) { @@ -494,3 +977,9 @@ func (b *Blockstore) StorageKey(dst []byte, cid cid.Cid) []byte { } return dst[:reqsize] } + +// this method is added for lotus-shed needs +// WARNING: THIS IS COMPLETELY UNSAFE; DONT USE THIS IN PRODUCTION CODE +func (b *Blockstore) DB() *badger.DB { + return b.db +} diff --git a/blockstore/badger/blockstore_test.go b/blockstore/badger/blockstore_test.go index 3221458d28f..d8ef5241b49 100644 --- a/blockstore/badger/blockstore_test.go +++ b/blockstore/badger/blockstore_test.go @@ -1,12 +1,19 @@ package badgerbs import ( + "bytes" + "fmt" "io/ioutil" "os" + "path/filepath" + "strings" "testing" - blocks "github.com/ipfs/go-block-format" "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" + + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" "github.com/filecoin-project/lotus/blockstore" ) @@ -89,3 +96,180 @@ func openBlockstore(optsSupplier func(path string) Options) func(tb testing.TB, return Open(optsSupplier(path)) } } + +func testMove(t *testing.T, optsF func(string) Options) { + basePath, err := ioutil.TempDir("", "") + if err != nil { + t.Fatal(err) + } + + dbPath := filepath.Join(basePath, "db") + + t.Cleanup(func() { + _ = os.RemoveAll(basePath) + }) + + db, err := Open(optsF(dbPath)) + if err != nil { + t.Fatal(err) + } + + defer db.Close() //nolint + + var have []blocks.Block + var deleted []cid.Cid + + // add some blocks + for i := 0; i < 10; i++ { + blk := blocks.NewBlock([]byte(fmt.Sprintf("some data %d", i))) + err := db.Put(blk) + if err != nil { + t.Fatal(err) + } + have = append(have, blk) + } + + // delete some of them + for i := 5; i < 10; i++ { + c := have[i].Cid() + err := db.DeleteBlock(c) + if err != nil { + t.Fatal(err) + } + deleted = append(deleted, c) + } + have = have[:5] + + // start a move concurrent with some more puts + g := new(errgroup.Group) + g.Go(func() error { + for i := 10; i < 1000; i++ { + blk := blocks.NewBlock([]byte(fmt.Sprintf("some data %d", i))) + err := db.Put(blk) + if err != nil { + return err + } + have = append(have, blk) + } + return nil + }) + g.Go(func() error { + return db.CollectGarbage(blockstore.WithFullGC(true)) + }) + + err = g.Wait() + if err != nil { + t.Fatal(err) + } + + // now check that we have all the blocks in have and none in the deleted lists + checkBlocks := func() { + for _, blk := range have { + has, err := db.Has(blk.Cid()) + if err != nil { + t.Fatal(err) + } + + if !has { + t.Fatal("missing block") + } + + blk2, err := db.Get(blk.Cid()) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(blk.RawData(), blk2.RawData()) { + t.Fatal("data mismatch") + } + } + + for _, c := range deleted { + has, err := db.Has(c) + if err != nil { + t.Fatal(err) + } + + if has { + t.Fatal("resurrected block") + } + } + } + + checkBlocks() + + // check the basePath -- it should contain a directory with name db.{timestamp}, soft-linked + // to db and nothing else + checkPath := func() { + entries, err := os.ReadDir(basePath) + if err != nil { + t.Fatal(err) + } + + if len(entries) != 2 { + t.Fatalf("too many entries; expected %d but got %d", 2, len(entries)) + } + + var haveDB, haveDBLink bool + for _, e := range entries { + if e.Name() == "db" { + if (e.Type() & os.ModeSymlink) == 0 { + t.Fatal("found db, but it's not a symlink") + } + haveDBLink = true + continue + } + if strings.HasPrefix(e.Name(), "db.") { + if !e.Type().IsDir() { + t.Fatal("found db prefix, but it's not a directory") + } + haveDB = true + continue + } + } + + if !haveDB { + t.Fatal("db directory is missing") + } + if !haveDBLink { + t.Fatal("db link is missing") + } + } + + checkPath() + + // now do another FullGC to test the double move and following of symlinks + if err := db.CollectGarbage(blockstore.WithFullGC(true)); err != nil { + t.Fatal(err) + } + + checkBlocks() + checkPath() + + // reopen the db to make sure our relative link works: + err = db.Close() + if err != nil { + t.Fatal(err) + } + + db, err = Open(optsF(dbPath)) + if err != nil { + t.Fatal(err) + } + + // db.Close() is already deferred + + checkBlocks() +} + +func TestMoveNoPrefix(t *testing.T) { + testMove(t, DefaultOptions) +} + +func TestMoveWithPrefix(t *testing.T) { + testMove(t, func(path string) Options { + opts := DefaultOptions(path) + opts.Prefix = "/prefixed/" + return opts + }) +} diff --git a/blockstore/blockstore.go b/blockstore/blockstore.go index 23f0bd7546c..8ede31eb9b4 100644 --- a/blockstore/blockstore.go +++ b/blockstore/blockstore.go @@ -30,6 +30,36 @@ type BatchDeleter interface { DeleteMany(cids []cid.Cid) error } +// BlockstoreIterator is a trait for efficient iteration +type BlockstoreIterator interface { + ForEachKey(func(cid.Cid) error) error +} + +// BlockstoreGC is a trait for blockstores that support online garbage collection +type BlockstoreGC interface { + CollectGarbage(options ...BlockstoreGCOption) error +} + +// BlockstoreGCOption is a functional interface for controlling blockstore GC options +type BlockstoreGCOption = func(*BlockstoreGCOptions) error + +// BlockstoreGCOptions is a struct with GC options +type BlockstoreGCOptions struct { + FullGC bool +} + +func WithFullGC(fullgc bool) BlockstoreGCOption { + return func(opts *BlockstoreGCOptions) error { + opts.FullGC = fullgc + return nil + } +} + +// BlockstoreSize is a trait for on-disk blockstores that can report their size +type BlockstoreSize interface { + Size() (int64, error) +} + // WrapIDStore wraps the underlying blockstore in an "identity" blockstore. // The ID store filters out all puts for blocks with CIDs using the "identity" // hash function. It also extracts inlined blocks from CIDs using the identity diff --git a/blockstore/discard.go b/blockstore/discard.go new file mode 100644 index 00000000000..afd0651bc07 --- /dev/null +++ b/blockstore/discard.go @@ -0,0 +1,66 @@ +package blockstore + +import ( + "context" + "io" + + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" +) + +var _ Blockstore = (*discardstore)(nil) + +type discardstore struct { + bs Blockstore +} + +func NewDiscardStore(bs Blockstore) Blockstore { + return &discardstore{bs: bs} +} + +func (b *discardstore) Has(cid cid.Cid) (bool, error) { + return b.bs.Has(cid) +} + +func (b *discardstore) HashOnRead(hor bool) { + b.bs.HashOnRead(hor) +} + +func (b *discardstore) Get(cid cid.Cid) (blocks.Block, error) { + return b.bs.Get(cid) +} + +func (b *discardstore) GetSize(cid cid.Cid) (int, error) { + return b.bs.GetSize(cid) +} + +func (b *discardstore) View(cid cid.Cid, f func([]byte) error) error { + return b.bs.View(cid, f) +} + +func (b *discardstore) Put(blk blocks.Block) error { + return nil +} + +func (b *discardstore) PutMany(blks []blocks.Block) error { + return nil +} + +func (b *discardstore) DeleteBlock(cid cid.Cid) error { + return nil +} + +func (b *discardstore) DeleteMany(cids []cid.Cid) error { + return nil +} + +func (b *discardstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + return b.bs.AllKeysChan(ctx) +} + +func (b *discardstore) Close() error { + if c, ok := b.bs.(io.Closer); ok { + return c.Close() + } + return nil +} diff --git a/blockstore/splitstore/README.md b/blockstore/splitstore/README.md new file mode 100644 index 00000000000..f69a056ca43 --- /dev/null +++ b/blockstore/splitstore/README.md @@ -0,0 +1,125 @@ +# SplitStore: An actively scalable blockstore for the Filecoin chain + +The SplitStore was first introduced in lotus v1.5.1, as an experiment +in reducing the performance impact of large blockstores. + +With lotus v1.11.1, we introduce the next iteration in design and +implementation, which we call SplitStore v1. + +The new design (see [#6474](https://github.com/filecoin-project/lotus/pull/6474) +evolves the splitstore to be a freestanding compacting blockstore that +allows us to keep a small (60-100GB) working set in a hot blockstore +and reliably archive out of scope objects in a coldstore. The +coldstore can also be a discard store, whereby out of scope objects +are discarded or a regular badger blockstore (the default), which can +be periodically garbage collected according to configurable user +retention policies. + +To enable the splitstore, edit `.lotus/config.toml` and add the following: +``` +[Chainstore] + EnableSplitstore = true +``` + +If you intend to use the discard coldstore, your also need to add the following: +``` + [Chainstore.Splitstore] + ColdStoreType = "discard" +``` +In general you _should not_ have to use the discard store, unless you +are running a network assistive node (like a bootstrapper or booster) +or have very constrained hardware with not enough disk space to +maintain a coldstore, even with garbage collection. It is also appropriate +for small nodes that are simply watching the chain. + +*Warning:* Using the discard store for a general purpose node is discouraged, unless +you really know what you are doing. Use it at your own risk. + +## Configuration Options + +These are options in the `[Chainstore.Splitstore]` section of the configuration: + +- `HotStoreType` -- specifies the type of hotstore to use. + The only currently supported option is `"badger"`. +- `ColdStoreType` -- specifies the type of coldstore to use. + The default value is `"universal"`, which will use the initial monolith blockstore + as the coldstore. + The other possible value is `"discard"`, as outlined above, which is specialized for + running without a coldstore. Note that the discard store wraps the initial monolith + blockstore and discards writes; this is necessary to support syncing from a snapshot. +- `MarkSetType` -- specifies the type of markset to use during compaction. + The markset is the data structure used by compaction/gc to track live objects. + The default value is `"map"`, which will use an in-memory map; if you are limited + in memory (or indeed see compaction run out of memory), you can also specify + `"badger"` which will use an disk backed markset, using badger. This will use + much less memory, but will also make compaction slower. +- `HotStoreMessageRetention` -- specifies how many finalities, beyond the 4 + finalities maintained by default, to maintain messages and message receipts in the + hotstore. This is useful for assistive nodes that want to support syncing for other + nodes beyond 4 finalities, while running with the discard coldstore option. + It is also useful for miners who accept deals and need to lookback messages beyond + the 4 finalities, which would otherwise hit the coldstore. +- `HotStoreFullGCFrequency` -- specifies how frequenty to garbage collect the hotstore + using full (moving) GC. + The default value is 20, which uses full GC every 20 compactions (about once a week); + set to 0 to disable full GC altogether. + Rationale: badger supports online GC, and this is used by default. However it has proven to + be ineffective in practice with the hotstore size slowly creeping up. In order to address this, + we have added moving GC support in our badger wrapper, which can effectively reclaim all space. + The downside is that it takes a bit longer to perform a moving GC and you also need enough + space to house the new hotstore while the old one is still live. + + +## Operation + +When the splitstore is first enabled, the existing blockstore becomes +the coldstore and a fresh hotstore is initialized. + +The hotstore is warmed up on first startup so as to load all chain +headers and state roots in the current head. This allows us to +immediately gain the performance benefits of a smallerblockstore which +can be substantial for full archival nodes. + +All new writes are directed to the hotstore, while reads first hit the +hotstore, with fallback to the coldstore. + +Once 5 finalities have ellapsed, and every finality henceforth, the +blockstore _compacts_. Compaction is the process of moving all +unreachable objects within the last 4 finalities from the hotstore to +the coldstore. If the system is configured with a discard coldstore, +these objects are discarded. Note that chain headers, all the way to +genesis, are considered reachable. Stateroots and messages are +considered reachable only within the last 4 finalities, unless there +is a live reference to them. + +## Compaction + +Compaction works transactionally with the following algorithm: +- We prepare a transaction, whereby all i/o referenced objects through the API are tracked. +- We walk the chain and mark reachable objects, keeping 4 finalities of state roots and messages and all headers all the way to genesis. +- Once the chain walk is complete, we begin full transaction protection with concurrent marking; we walk and mark all references created during the chain walk. On the same time, all I/O through the API concurrently marks objects as live references. +- We collect cold objects by iterating through the hotstore and checking the mark set; if an object is not marked, then it is candidate for purge. +- When running with a coldstore, we next copy all cold objects to the coldstore. +- At this point we are ready to begin purging: + - We sort cold objects heaviest first, so as to never delete the consituents of a DAG before the DAG itself (which would leave dangling references) + - We delete in small batches taking a lock; each batch is checked again for marks, from the concurrent transactional mark, so as to never delete anything live +- We then end the transaction and compact/gc the hotstore. + +## Garbage Collection + +TBD -- see [#6577](https://github.com/filecoin-project/lotus/issues/6577) + +## Utilities + +`lotus-shed` has a `splitstore` command which provides some utilities: + +- `rollback` -- rolls back a splitstore installation. + This command copies the hotstore on top of the coldstore, and then deletes the splitstore + directory and associated metadata keys. + It can also optionally compact/gc the coldstore after the copy (with the `--gc-coldstore` flag) + and automatically rewrite the lotus config to disable splitstore (with the `--rewrite-config` flag). + Note: the node *must be stopped* before running this command. +- `clear` -- clears a splitstore installation for restart from snapshot. +- `check` -- asynchronously runs a basic healthcheck on the splitstore. + The results are appended to `/datastore/splitstore/check.txt`. +- `info` -- prints some basic information about the splitstore. diff --git a/blockstore/splitstore/debug.go b/blockstore/splitstore/debug.go new file mode 100644 index 00000000000..2be85ebfe8d --- /dev/null +++ b/blockstore/splitstore/debug.go @@ -0,0 +1,273 @@ +package splitstore + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "os" + "os/exec" + "path/filepath" + "runtime/debug" + "strings" + "sync" + "time" + + "go.uber.org/multierr" + "golang.org/x/xerrors" + + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" +) + +type debugLog struct { + readLog, writeLog, deleteLog, stackLog *debugLogOp + + stackMx sync.Mutex + stackMap map[string]string +} + +type debugLogOp struct { + path string + mx sync.Mutex + log *os.File + count int +} + +func openDebugLog(path string) (*debugLog, error) { + basePath := filepath.Join(path, "debug") + err := os.MkdirAll(basePath, 0755) + if err != nil { + return nil, err + } + + readLog, err := openDebugLogOp(basePath, "read.log") + if err != nil { + return nil, err + } + + writeLog, err := openDebugLogOp(basePath, "write.log") + if err != nil { + _ = readLog.Close() + return nil, err + } + + deleteLog, err := openDebugLogOp(basePath, "delete.log") + if err != nil { + _ = readLog.Close() + _ = writeLog.Close() + return nil, err + } + + stackLog, err := openDebugLogOp(basePath, "stack.log") + if err != nil { + _ = readLog.Close() + _ = writeLog.Close() + _ = deleteLog.Close() + return nil, xerrors.Errorf("error opening stack log: %w", err) + } + + return &debugLog{ + readLog: readLog, + writeLog: writeLog, + deleteLog: deleteLog, + stackLog: stackLog, + stackMap: make(map[string]string), + }, nil +} + +func (d *debugLog) LogReadMiss(cid cid.Cid) { + if d == nil { + return + } + + stack := d.getStack() + err := d.readLog.Log("%s %s %s\n", d.timestamp(), cid, stack) + if err != nil { + log.Warnf("error writing read log: %s", err) + } +} + +func (d *debugLog) LogWrite(blk blocks.Block) { + if d == nil { + return + } + + var stack string + if enableDebugLogWriteTraces { + stack = " " + d.getStack() + } + + err := d.writeLog.Log("%s %s%s\n", d.timestamp(), blk.Cid(), stack) + if err != nil { + log.Warnf("error writing write log: %s", err) + } +} + +func (d *debugLog) LogWriteMany(blks []blocks.Block) { + if d == nil { + return + } + + var stack string + if enableDebugLogWriteTraces { + stack = " " + d.getStack() + } + + now := d.timestamp() + for _, blk := range blks { + err := d.writeLog.Log("%s %s%s\n", now, blk.Cid(), stack) + if err != nil { + log.Warnf("error writing write log: %s", err) + break + } + } +} + +func (d *debugLog) LogDelete(cids []cid.Cid) { + if d == nil { + return + } + + now := d.timestamp() + for _, c := range cids { + err := d.deleteLog.Log("%s %s\n", now, c) + if err != nil { + log.Warnf("error writing delete log: %s", err) + break + } + } +} + +func (d *debugLog) Flush() { + if d == nil { + return + } + + // rotate non-empty logs + d.readLog.Rotate() + d.writeLog.Rotate() + d.deleteLog.Rotate() + d.stackLog.Rotate() +} + +func (d *debugLog) Close() error { + if d == nil { + return nil + } + + err1 := d.readLog.Close() + err2 := d.writeLog.Close() + err3 := d.deleteLog.Close() + err4 := d.stackLog.Close() + + return multierr.Combine(err1, err2, err3, err4) +} + +func (d *debugLog) getStack() string { + sk := d.getNormalizedStackTrace() + hash := sha256.Sum256([]byte(sk)) + key := string(hash[:]) + + d.stackMx.Lock() + repr, ok := d.stackMap[key] + if !ok { + repr = hex.EncodeToString(hash[:]) + d.stackMap[key] = repr + + err := d.stackLog.Log("%s\n%s\n", repr, sk) + if err != nil { + log.Warnf("error writing stack trace for %s: %s", repr, err) + } + } + d.stackMx.Unlock() + + return repr +} + +func (d *debugLog) getNormalizedStackTrace() string { + sk := string(debug.Stack()) + + // Normalization for deduplication + // skip first line -- it's the goroutine + // for each line that ends in a ), remove the call args -- these are the registers + lines := strings.Split(sk, "\n")[1:] + for i, line := range lines { + if len(line) > 0 && line[len(line)-1] == ')' { + idx := strings.LastIndex(line, "(") + if idx < 0 { + continue + } + lines[i] = line[:idx] + } + } + + return strings.Join(lines, "\n") +} + +func (d *debugLog) timestamp() string { + ts, _ := time.Now().MarshalText() + return string(ts) +} + +func openDebugLogOp(basePath, name string) (*debugLogOp, error) { + path := filepath.Join(basePath, name) + file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644) + if err != nil { + return nil, xerrors.Errorf("error opening %s: %w", name, err) + } + + return &debugLogOp{path: path, log: file}, nil +} + +func (d *debugLogOp) Close() error { + d.mx.Lock() + defer d.mx.Unlock() + + return d.log.Close() +} + +func (d *debugLogOp) Log(template string, arg ...interface{}) error { + d.mx.Lock() + defer d.mx.Unlock() + + d.count++ + _, err := fmt.Fprintf(d.log, template, arg...) + return err +} + +func (d *debugLogOp) Rotate() { + d.mx.Lock() + defer d.mx.Unlock() + + if d.count == 0 { + return + } + + err := d.log.Close() + if err != nil { + log.Warnf("error closing log (file: %s): %s", d.path, err) + return + } + + arxivPath := fmt.Sprintf("%s-%d", d.path, time.Now().Unix()) + err = os.Rename(d.path, arxivPath) + if err != nil { + log.Warnf("error moving log (file: %s): %s", d.path, err) + return + } + + go func() { + cmd := exec.Command("gzip", arxivPath) + err := cmd.Run() + if err != nil { + log.Warnf("error compressing log (file: %s): %s", arxivPath, err) + } + }() + + d.count = 0 + d.log, err = os.OpenFile(d.path, os.O_WRONLY|os.O_CREATE, 0644) + if err != nil { + log.Warnf("error opening log (file: %s): %s", d.path, err) + return + } +} diff --git a/blockstore/splitstore/markset.go b/blockstore/splitstore/markset.go index ef14a2fc668..458ea8bebaa 100644 --- a/blockstore/splitstore/markset.go +++ b/blockstore/splitstore/markset.go @@ -1,26 +1,26 @@ package splitstore import ( - "path/filepath" + "errors" "golang.org/x/xerrors" cid "github.com/ipfs/go-cid" ) +var errMarkSetClosed = errors.New("markset closed") + // MarkSet is a utility to keep track of seen CID, and later query for them. // // * If the expected dataset is large, it can be backed by a datastore (e.g. bbolt). -// * If a probabilistic result is acceptable, it can be backed by a bloom filter (default). +// * If a probabilistic result is acceptable, it can be backed by a bloom filter type MarkSet interface { Mark(cid.Cid) error Has(cid.Cid) (bool, error) Close() error + SetConcurrent() } -// markBytes is deliberately a non-nil empty byte slice for serialization. -var markBytes = []byte{} - type MarkSetEnv interface { Create(name string, sizeHint int64) (MarkSet, error) Close() error @@ -28,10 +28,12 @@ type MarkSetEnv interface { func OpenMarkSetEnv(path string, mtype string) (MarkSetEnv, error) { switch mtype { - case "", "bloom": + case "bloom": return NewBloomMarkSetEnv() - case "bolt": - return NewBoltMarkSetEnv(filepath.Join(path, "markset.bolt")) + case "map": + return NewMapMarkSetEnv() + case "badger": + return NewBadgerMarkSetEnv(path) default: return nil, xerrors.Errorf("unknown mark set type %s", mtype) } diff --git a/blockstore/splitstore/markset_badger.go b/blockstore/splitstore/markset_badger.go new file mode 100644 index 00000000000..ef67db213ba --- /dev/null +++ b/blockstore/splitstore/markset_badger.go @@ -0,0 +1,230 @@ +package splitstore + +import ( + "os" + "path/filepath" + "sync" + + "golang.org/x/xerrors" + + "github.com/dgraph-io/badger/v2" + "github.com/dgraph-io/badger/v2/options" + "go.uber.org/zap" + + cid "github.com/ipfs/go-cid" +) + +type BadgerMarkSetEnv struct { + path string +} + +var _ MarkSetEnv = (*BadgerMarkSetEnv)(nil) + +type BadgerMarkSet struct { + mx sync.RWMutex + cond sync.Cond + pend map[string]struct{} + writing map[int]map[string]struct{} + writers int + seqno int + + db *badger.DB + path string +} + +var _ MarkSet = (*BadgerMarkSet)(nil) + +var badgerMarkSetBatchSize = 16384 + +func NewBadgerMarkSetEnv(path string) (MarkSetEnv, error) { + msPath := filepath.Join(path, "markset.badger") + err := os.MkdirAll(msPath, 0755) //nolint:gosec + if err != nil { + return nil, xerrors.Errorf("error creating markset directory: %w", err) + } + + return &BadgerMarkSetEnv{path: msPath}, nil +} + +func (e *BadgerMarkSetEnv) Create(name string, sizeHint int64) (MarkSet, error) { + path := filepath.Join(e.path, name) + + // clean up first + err := os.RemoveAll(path) + if err != nil { + return nil, xerrors.Errorf("error clearing markset directory: %w", err) + } + + err = os.MkdirAll(path, 0755) //nolint:gosec + if err != nil { + return nil, xerrors.Errorf("error creating markset directory: %w", err) + } + + opts := badger.DefaultOptions(path) + opts.SyncWrites = false + opts.CompactL0OnClose = false + opts.Compression = options.None + // Note: We use FileIO for loading modes to avoid memory thrashing and interference + // between the system blockstore and the markset. + // It was observed that using the default memory mapped option resulted in + // significant interference and unacceptably high block validation times once the markset + // exceeded 1GB in size. + opts.TableLoadingMode = options.FileIO + opts.ValueLogLoadingMode = options.FileIO + opts.Logger = &badgerLogger{ + SugaredLogger: log.Desugar().WithOptions(zap.AddCallerSkip(1)).Sugar(), + skip2: log.Desugar().WithOptions(zap.AddCallerSkip(2)).Sugar(), + } + + db, err := badger.Open(opts) + if err != nil { + return nil, xerrors.Errorf("error creating badger markset: %w", err) + } + + ms := &BadgerMarkSet{ + pend: make(map[string]struct{}), + writing: make(map[int]map[string]struct{}), + db: db, + path: path, + } + ms.cond.L = &ms.mx + + return ms, nil +} + +func (e *BadgerMarkSetEnv) Close() error { + return os.RemoveAll(e.path) +} + +func (s *BadgerMarkSet) Mark(c cid.Cid) error { + s.mx.Lock() + + if s.pend == nil { + s.mx.Unlock() + return errMarkSetClosed + } + + s.pend[string(c.Hash())] = struct{}{} + + if len(s.pend) < badgerMarkSetBatchSize { + s.mx.Unlock() + return nil + } + + pend := s.pend + seqno := s.seqno + s.seqno++ + s.writing[seqno] = pend + s.pend = make(map[string]struct{}) + s.writers++ + s.mx.Unlock() + + defer func() { + s.mx.Lock() + defer s.mx.Unlock() + + delete(s.writing, seqno) + s.writers-- + if s.writers == 0 { + s.cond.Broadcast() + } + }() + + empty := []byte{} // not nil + + batch := s.db.NewWriteBatch() + defer batch.Cancel() + + for k := range pend { + if err := batch.Set([]byte(k), empty); err != nil { + return err + } + } + + err := batch.Flush() + if err != nil { + return xerrors.Errorf("error flushing batch to badger markset: %w", err) + } + + return nil +} + +func (s *BadgerMarkSet) Has(c cid.Cid) (bool, error) { + s.mx.RLock() + defer s.mx.RUnlock() + + if s.pend == nil { + return false, errMarkSetClosed + } + + key := c.Hash() + pendKey := string(key) + _, ok := s.pend[pendKey] + if ok { + return true, nil + } + + for _, wr := range s.writing { + _, ok := wr[pendKey] + if ok { + return true, nil + } + } + + err := s.db.View(func(txn *badger.Txn) error { + _, err := txn.Get(key) + return err + }) + + switch err { + case nil: + return true, nil + + case badger.ErrKeyNotFound: + return false, nil + + default: + return false, xerrors.Errorf("error checking badger markset: %w", err) + } +} + +func (s *BadgerMarkSet) Close() error { + s.mx.Lock() + defer s.mx.Unlock() + + if s.pend == nil { + return nil + } + + for s.writers > 0 { + s.cond.Wait() + } + + s.pend = nil + db := s.db + s.db = nil + + err := db.Close() + if err != nil { + return xerrors.Errorf("error closing badger markset: %w", err) + } + + err = os.RemoveAll(s.path) + if err != nil { + return xerrors.Errorf("error deleting badger markset: %w", err) + } + + return nil +} + +func (s *BadgerMarkSet) SetConcurrent() {} + +// badger logging through go-log +type badgerLogger struct { + *zap.SugaredLogger + skip2 *zap.SugaredLogger +} + +func (b *badgerLogger) Warningf(format string, args ...interface{}) {} +func (b *badgerLogger) Infof(format string, args ...interface{}) {} +func (b *badgerLogger) Debugf(format string, args ...interface{}) {} diff --git a/blockstore/splitstore/markset_bloom.go b/blockstore/splitstore/markset_bloom.go index c213436c898..9261de7c753 100644 --- a/blockstore/splitstore/markset_bloom.go +++ b/blockstore/splitstore/markset_bloom.go @@ -3,6 +3,7 @@ package splitstore import ( "crypto/rand" "crypto/sha256" + "sync" "golang.org/x/xerrors" @@ -21,7 +22,9 @@ var _ MarkSetEnv = (*BloomMarkSetEnv)(nil) type BloomMarkSet struct { salt []byte + mx sync.RWMutex bf *bbloom.Bloom + ts bool } var _ MarkSet = (*BloomMarkSet)(nil) @@ -64,14 +67,41 @@ func (s *BloomMarkSet) saltedKey(cid cid.Cid) []byte { } func (s *BloomMarkSet) Mark(cid cid.Cid) error { + if s.ts { + s.mx.Lock() + defer s.mx.Unlock() + } + + if s.bf == nil { + return errMarkSetClosed + } + s.bf.Add(s.saltedKey(cid)) return nil } func (s *BloomMarkSet) Has(cid cid.Cid) (bool, error) { + if s.ts { + s.mx.RLock() + defer s.mx.RUnlock() + } + + if s.bf == nil { + return false, errMarkSetClosed + } + return s.bf.Has(s.saltedKey(cid)), nil } func (s *BloomMarkSet) Close() error { + if s.ts { + s.mx.Lock() + defer s.mx.Unlock() + } + s.bf = nil return nil } + +func (s *BloomMarkSet) SetConcurrent() { + s.ts = true +} diff --git a/blockstore/splitstore/markset_bolt.go b/blockstore/splitstore/markset_bolt.go deleted file mode 100644 index cab0dd74af9..00000000000 --- a/blockstore/splitstore/markset_bolt.go +++ /dev/null @@ -1,81 +0,0 @@ -package splitstore - -import ( - "time" - - "golang.org/x/xerrors" - - cid "github.com/ipfs/go-cid" - bolt "go.etcd.io/bbolt" -) - -type BoltMarkSetEnv struct { - db *bolt.DB -} - -var _ MarkSetEnv = (*BoltMarkSetEnv)(nil) - -type BoltMarkSet struct { - db *bolt.DB - bucketId []byte -} - -var _ MarkSet = (*BoltMarkSet)(nil) - -func NewBoltMarkSetEnv(path string) (*BoltMarkSetEnv, error) { - db, err := bolt.Open(path, 0644, - &bolt.Options{ - Timeout: 1 * time.Second, - NoSync: true, - }) - if err != nil { - return nil, err - } - - return &BoltMarkSetEnv{db: db}, nil -} - -func (e *BoltMarkSetEnv) Create(name string, hint int64) (MarkSet, error) { - bucketId := []byte(name) - err := e.db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucketIfNotExists(bucketId) - if err != nil { - return xerrors.Errorf("error creating bolt db bucket %s: %w", name, err) - } - return nil - }) - - if err != nil { - return nil, err - } - - return &BoltMarkSet{db: e.db, bucketId: bucketId}, nil -} - -func (e *BoltMarkSetEnv) Close() error { - return e.db.Close() -} - -func (s *BoltMarkSet) Mark(cid cid.Cid) error { - return s.db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket(s.bucketId) - return b.Put(cid.Hash(), markBytes) - }) -} - -func (s *BoltMarkSet) Has(cid cid.Cid) (result bool, err error) { - err = s.db.View(func(tx *bolt.Tx) error { - b := tx.Bucket(s.bucketId) - v := b.Get(cid.Hash()) - result = v != nil - return nil - }) - - return result, err -} - -func (s *BoltMarkSet) Close() error { - return s.db.Update(func(tx *bolt.Tx) error { - return tx.DeleteBucket(s.bucketId) - }) -} diff --git a/blockstore/splitstore/markset_map.go b/blockstore/splitstore/markset_map.go new file mode 100644 index 00000000000..197c824242a --- /dev/null +++ b/blockstore/splitstore/markset_map.go @@ -0,0 +1,75 @@ +package splitstore + +import ( + "sync" + + cid "github.com/ipfs/go-cid" +) + +type MapMarkSetEnv struct{} + +var _ MarkSetEnv = (*MapMarkSetEnv)(nil) + +type MapMarkSet struct { + mx sync.RWMutex + set map[string]struct{} + + ts bool +} + +var _ MarkSet = (*MapMarkSet)(nil) + +func NewMapMarkSetEnv() (*MapMarkSetEnv, error) { + return &MapMarkSetEnv{}, nil +} + +func (e *MapMarkSetEnv) Create(name string, sizeHint int64) (MarkSet, error) { + return &MapMarkSet{ + set: make(map[string]struct{}, sizeHint), + }, nil +} + +func (e *MapMarkSetEnv) Close() error { + return nil +} + +func (s *MapMarkSet) Mark(cid cid.Cid) error { + if s.ts { + s.mx.Lock() + defer s.mx.Unlock() + } + + if s.set == nil { + return errMarkSetClosed + } + + s.set[string(cid.Hash())] = struct{}{} + return nil +} + +func (s *MapMarkSet) Has(cid cid.Cid) (bool, error) { + if s.ts { + s.mx.RLock() + defer s.mx.RUnlock() + } + + if s.set == nil { + return false, errMarkSetClosed + } + + _, ok := s.set[string(cid.Hash())] + return ok, nil +} + +func (s *MapMarkSet) Close() error { + if s.ts { + s.mx.Lock() + defer s.mx.Unlock() + } + s.set = nil + return nil +} + +func (s *MapMarkSet) SetConcurrent() { + s.ts = true +} diff --git a/blockstore/splitstore/markset_test.go b/blockstore/splitstore/markset_test.go index 367ab8d06e7..38519949a4a 100644 --- a/blockstore/splitstore/markset_test.go +++ b/blockstore/splitstore/markset_test.go @@ -8,14 +8,23 @@ import ( "github.com/multiformats/go-multihash" ) -func TestBoltMarkSet(t *testing.T) { - testMarkSet(t, "bolt") +func TestMapMarkSet(t *testing.T) { + testMarkSet(t, "map") } func TestBloomMarkSet(t *testing.T) { testMarkSet(t, "bloom") } +func TestBadgerMarkSet(t *testing.T) { + bs := badgerMarkSetBatchSize + badgerMarkSetBatchSize = 1 + t.Cleanup(func() { + badgerMarkSetBatchSize = bs + }) + testMarkSet(t, "badger") +} + func testMarkSet(t *testing.T, lsType string) { t.Helper() diff --git a/blockstore/splitstore/splitstore.go b/blockstore/splitstore/splitstore.go index f6d26bbdd60..171b5a6e416 100644 --- a/blockstore/splitstore/splitstore.go +++ b/blockstore/splitstore/splitstore.go @@ -2,8 +2,8 @@ package splitstore import ( "context" - "encoding/binary" "errors" + "os" "sync" "sync/atomic" "time" @@ -17,41 +17,13 @@ import ( logging "github.com/ipfs/go-log/v2" "github.com/filecoin-project/go-state-types/abi" - bstore "github.com/filecoin-project/lotus/blockstore" - "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/metrics" "go.opencensus.io/stats" ) -var ( - // CompactionThreshold is the number of epochs that need to have elapsed - // from the previously compacted epoch to trigger a new compaction. - // - // |················· CompactionThreshold ··················| - // | | - // =======‖≡≡≡≡≡≡≡‖-----------------------|------------------------» - // | | | chain --> ↑__ current epoch - // |·······| | - // ↑________ CompactionCold ↑________ CompactionBoundary - // - // === :: cold (already archived) - // ≡≡≡ :: to be archived in this compaction - // --- :: hot - CompactionThreshold = 5 * build.Finality - - // CompactionCold is the number of epochs that will be archived to the - // cold store on compaction. See diagram on CompactionThreshold for a - // better sense. - CompactionCold = build.Finality - - // CompactionBoundary is the number of epochs from the current epoch at which - // we will walk the chain for live objects - CompactionBoundary = 2 * build.Finality -) - var ( // baseEpochKey stores the base epoch (last compaction epoch) in the // metadata store. @@ -66,37 +38,56 @@ var ( // this is first computed at warmup and updated in every compaction markSetSizeKey = dstore.NewKey("/splitstore/markSetSize") + // compactionIndexKey stores the compaction index (serial number) + compactionIndexKey = dstore.NewKey("/splitstore/compactionIndex") + log = logging.Logger("splitstore") + + // set this to true if you are debugging the splitstore to enable debug logging + enableDebugLog = false + // set this to true if you want to track origin stack traces in the write log + enableDebugLogWriteTraces = false ) -const ( - batchSize = 16384 +func init() { + if os.Getenv("LOTUS_SPLITSTORE_DEBUG_LOG") == "1" { + enableDebugLog = true + } - defaultColdPurgeSize = 7_000_000 - defaultDeadPurgeSize = 1_000_000 -) + if os.Getenv("LOTUS_SPLITSTORE_DEBUG_LOG_WRITE_TRACES") == "1" { + enableDebugLogWriteTraces = true + } +} type Config struct { - // TrackingStore is the type of tracking store to use. - // - // Supported values are: "bolt" (default if omitted), "mem" (for tests and readonly access). - TrackingStoreType string - // MarkSetType is the type of mark set to use. // - // Supported values are: "bloom" (default if omitted), "bolt". + // The default value is "map", which uses an in-memory map-backed markset. + // If you are constrained in memory (i.e. compaction runs out of memory), you + // can use "badger", which will use a disk-backed markset using badger. + // Note that compaction will take quite a bit longer when using the "badger" option, + // but that shouldn't really matter (as long as it is under 7.5hrs). MarkSetType string - // perform full reachability analysis (expensive) for compaction - // You should enable this option if you plan to use the splitstore without a backing coldstore - EnableFullCompaction bool - // EXPERIMENTAL enable pruning of unreachable objects. - // This has not been sufficiently tested yet; only enable if you know what you are doing. - // Only applies if you enable full compaction. - EnableGC bool - // full archival nodes should enable this if EnableFullCompaction is enabled - // do NOT enable this if you synced from a snapshot. - // Only applies if you enabled full compaction - Archival bool + + // DiscardColdBlocks indicates whether to skip moving cold blocks to the coldstore. + // If the splitstore is running with a noop coldstore then this option is set to true + // which skips moving (as it is a noop, but still takes time to read all the cold objects) + // and directly purges cold blocks. + DiscardColdBlocks bool + + // HotstoreMessageRetention indicates the hotstore retention policy for messages. + // It has the following semantics: + // - a value of 0 will only retain messages within the compaction boundary (4 finalities) + // - a positive integer indicates the number of finalities, outside the compaction boundary, + // for which messages will be retained in the hotstore. + HotStoreMessageRetention uint64 + + // HotstoreFullGCFrequency indicates how frequently (in terms of compactions) to garbage collect + // the hotstore using full (moving) GC if supported by the hotstore. + // A value of 0 disables full GC entirely. + // A positive value is the number of compactions before a full GC is performed; + // a value of 1 will perform full GC in every compaction. + HotStoreFullGCFrequency uint64 } // ChainAccessor allows the Splitstore to access the chain. It will most likely @@ -105,37 +96,59 @@ type ChainAccessor interface { GetTipsetByHeight(context.Context, abi.ChainEpoch, *types.TipSet, bool) (*types.TipSet, error) GetHeaviestTipSet() *types.TipSet SubscribeHeadChanges(change func(revert []*types.TipSet, apply []*types.TipSet) error) - WalkSnapshot(context.Context, *types.TipSet, abi.ChainEpoch, bool, bool, func(cid.Cid) error) error +} + +// hotstore is the interface that must be satisfied by the hot blockstore; it is an extension +// of the Blockstore interface with the traits we need for compaction. +type hotstore interface { + bstore.Blockstore + bstore.BlockstoreIterator } type SplitStore struct { - compacting int32 // compaction (or warmp up) in progress - critsection int32 // compaction critical section - closing int32 // the split store is closing + compacting int32 // compaction/prune/warmup in progress + closing int32 // the splitstore is closing + + cfg *Config + path string - fullCompaction bool - enableGC bool - skipOldMsgs bool - skipMsgReceipts bool + mx sync.Mutex + warmupEpoch abi.ChainEpoch // protected by mx + baseEpoch abi.ChainEpoch // protected by compaction lock - baseEpoch abi.ChainEpoch - warmupEpoch abi.ChainEpoch + headChangeMx sync.Mutex coldPurgeSize int - deadPurgeSize int - mx sync.Mutex - curTs *types.TipSet + chain ChainAccessor + ds dstore.Datastore + cold bstore.Blockstore + hot hotstore + + markSetEnv MarkSetEnv + markSetSize int64 - chain ChainAccessor - ds dstore.Datastore - hot bstore.Blockstore - cold bstore.Blockstore - tracker TrackingStore + compactionIndex int64 - env MarkSetEnv + ctx context.Context + cancel func() - markSetSize int64 + debug *debugLog + + // transactional protection for concurrent read/writes during compaction + txnLk sync.RWMutex + txnViewsMx sync.Mutex + txnViewsCond sync.Cond + txnViews int + txnViewsWaiting bool + txnActive bool + txnProtect MarkSet + txnRefsMx sync.Mutex + txnRefs map[cid.Cid]struct{} + txnMissing map[cid.Cid]struct{} + + // registered protectors + protectors []func(func(cid.Cid) error) error } var _ bstore.Blockstore = (*SplitStore)(nil) @@ -144,37 +157,43 @@ var _ bstore.Blockstore = (*SplitStore)(nil) // is backed by the provided hot and cold stores. The returned SplitStore MUST be // attached to the ChainStore with Start in order to trigger compaction. func Open(path string, ds dstore.Datastore, hot, cold bstore.Blockstore, cfg *Config) (*SplitStore, error) { - // the tracking store - tracker, err := OpenTrackingStore(path, cfg.TrackingStoreType) - if err != nil { - return nil, err + // hot blockstore must support the hotstore interface + hots, ok := hot.(hotstore) + if !ok { + // be specific about what is missing + if _, ok := hot.(bstore.BlockstoreIterator); !ok { + return nil, xerrors.Errorf("hot blockstore does not support efficient iteration: %T", hot) + } + + return nil, xerrors.Errorf("hot blockstore does not support the necessary traits: %T", hot) } // the markset env - env, err := OpenMarkSetEnv(path, cfg.MarkSetType) + markSetEnv, err := OpenMarkSetEnv(path, cfg.MarkSetType) if err != nil { - _ = tracker.Close() return nil, err } // and now we can make a SplitStore ss := &SplitStore{ - ds: ds, - hot: hot, - cold: cold, - tracker: tracker, - env: env, - - fullCompaction: cfg.EnableFullCompaction, - enableGC: cfg.EnableGC, - skipOldMsgs: !(cfg.EnableFullCompaction && cfg.Archival), - skipMsgReceipts: !(cfg.EnableFullCompaction && cfg.Archival), + cfg: cfg, + path: path, + ds: ds, + cold: cold, + hot: hots, + markSetEnv: markSetEnv, coldPurgeSize: defaultColdPurgeSize, } - if cfg.EnableGC { - ss.deadPurgeSize = defaultDeadPurgeSize + ss.txnViewsCond.L = &ss.txnViewsMx + ss.ctx, ss.cancel = context.WithCancel(context.Background()) + + if enableDebugLog { + ss.debug, err = openDebugLog(path) + if err != nil { + return nil, err + } } return ss, nil @@ -192,26 +211,56 @@ func (s *SplitStore) DeleteMany(_ []cid.Cid) error { } func (s *SplitStore) Has(cid cid.Cid) (bool, error) { + if isIdentiyCid(cid) { + return true, nil + } + + s.txnLk.RLock() + defer s.txnLk.RUnlock() + has, err := s.hot.Has(cid) - if err != nil || has { + if err != nil { return has, err } + if has { + s.trackTxnRef(cid) + return true, nil + } + return s.cold.Has(cid) } func (s *SplitStore) Get(cid cid.Cid) (blocks.Block, error) { + if isIdentiyCid(cid) { + data, err := decodeIdentityCid(cid) + if err != nil { + return nil, err + } + + return blocks.NewBlockWithCid(data, cid) + } + + s.txnLk.RLock() + defer s.txnLk.RUnlock() + blk, err := s.hot.Get(cid) switch err { case nil: + s.trackTxnRef(cid) return blk, nil case bstore.ErrNotFound: + if s.isWarm() { + s.debug.LogReadMiss(cid) + } + blk, err = s.cold.Get(cid) if err == nil { - stats.Record(context.Background(), metrics.SplitstoreMiss.M(1)) + stats.Record(s.ctx, metrics.SplitstoreMiss.M(1)) + } return blk, err @@ -221,16 +270,33 @@ func (s *SplitStore) Get(cid cid.Cid) (blocks.Block, error) { } func (s *SplitStore) GetSize(cid cid.Cid) (int, error) { + if isIdentiyCid(cid) { + data, err := decodeIdentityCid(cid) + if err != nil { + return 0, err + } + + return len(data), nil + } + + s.txnLk.RLock() + defer s.txnLk.RUnlock() + size, err := s.hot.GetSize(cid) switch err { case nil: + s.trackTxnRef(cid) return size, nil case bstore.ErrNotFound: + if s.isWarm() { + s.debug.LogReadMiss(cid) + } + size, err = s.cold.GetSize(cid) if err == nil { - stats.Record(context.Background(), metrics.SplitstoreMiss.M(1)) + stats.Record(s.ctx, metrics.SplitstoreMiss.M(1)) } return size, err @@ -240,46 +306,67 @@ func (s *SplitStore) GetSize(cid cid.Cid) (int, error) { } func (s *SplitStore) Put(blk blocks.Block) error { - s.mx.Lock() - if s.curTs == nil { - s.mx.Unlock() - return s.cold.Put(blk) + if isIdentiyCid(blk.Cid()) { + return nil } - epoch := s.curTs.Height() - s.mx.Unlock() + s.txnLk.RLock() + defer s.txnLk.RUnlock() - err := s.tracker.Put(blk.Cid(), epoch) + err := s.hot.Put(blk) if err != nil { - log.Errorf("error tracking CID in hotstore: %s; falling back to coldstore", err) - return s.cold.Put(blk) + return err } - return s.hot.Put(blk) + s.debug.LogWrite(blk) + + s.trackTxnRef(blk.Cid()) + return nil } func (s *SplitStore) PutMany(blks []blocks.Block) error { - s.mx.Lock() - if s.curTs == nil { - s.mx.Unlock() - return s.cold.PutMany(blks) + // filter identites + idcids := 0 + for _, blk := range blks { + if isIdentiyCid(blk.Cid()) { + idcids++ + } } - epoch := s.curTs.Height() - s.mx.Unlock() + if idcids > 0 { + if idcids == len(blks) { + // it's all identities + return nil + } + + filtered := make([]blocks.Block, 0, len(blks)-idcids) + for _, blk := range blks { + if isIdentiyCid(blk.Cid()) { + continue + } + filtered = append(filtered, blk) + } + + blks = filtered + } batch := make([]cid.Cid, 0, len(blks)) for _, blk := range blks { batch = append(batch, blk.Cid()) } - err := s.tracker.PutBatch(batch, epoch) + s.txnLk.RLock() + defer s.txnLk.RUnlock() + + err := s.hot.PutMany(blks) if err != nil { - log.Errorf("error tracking CIDs in hotstore: %s; falling back to coldstore", err) - return s.cold.PutMany(blks) + return err } - return s.hot.PutMany(blks) + s.debug.LogWriteMany(blks) + + s.trackTxnRefMany(batch) + return nil } func (s *SplitStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { @@ -297,15 +384,21 @@ func (s *SplitStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { return nil, err } - ch := make(chan cid.Cid) + seen := cid.NewSet() + ch := make(chan cid.Cid, 8) // buffer is arbitrary, just enough to avoid context switches go func() { defer cancel() defer close(ch) for _, in := range []<-chan cid.Cid{chHot, chCold} { - for cid := range in { + for c := range in { + // ensure we only emit each key once + if !seen.Visit(c) { + continue + } + select { - case ch <- cid: + case ch <- c: case <-ctx.Done(): return } @@ -322,20 +415,57 @@ func (s *SplitStore) HashOnRead(enabled bool) { } func (s *SplitStore) View(cid cid.Cid, cb func([]byte) error) error { + if isIdentiyCid(cid) { + data, err := decodeIdentityCid(cid) + if err != nil { + return err + } + + return cb(data) + } + + // views are (optimistically) protected two-fold: + // - if there is an active transaction, then the reference is protected. + // - if there is no active transaction, active views are tracked in a + // wait group and compaction is inhibited from starting until they + // have all completed. this is necessary to ensure that a (very) long-running + // view can't have its data pointer deleted, which would be catastrophic. + // Note that we can't just RLock for the duration of the view, as this could + // lead to deadlock with recursive views. + s.protectView(cid) + defer s.viewDone() + err := s.hot.View(cid, cb) switch err { case bstore.ErrNotFound: - return s.cold.View(cid, cb) + if s.isWarm() { + s.debug.LogReadMiss(cid) + } + + err = s.cold.View(cid, cb) + if err == nil { + stats.Record(s.ctx, metrics.SplitstoreMiss.M(1)) + } + return err default: return err } } +func (s *SplitStore) isWarm() bool { + s.mx.Lock() + defer s.mx.Unlock() + return s.warmupEpoch > 0 +} + // State tracking func (s *SplitStore) Start(chain ChainAccessor) error { s.chain = chain - s.curTs = chain.GetHeaviestTipSet() + curTs := chain.GetHeaviestTipSet() + + // should we warmup + warmup := false // load base epoch from metadata ds // if none, then use current epoch because it's a fresh start @@ -345,12 +475,12 @@ func (s *SplitStore) Start(chain ChainAccessor) error { s.baseEpoch = bytesToEpoch(bs) case dstore.ErrNotFound: - if s.curTs == nil { + if curTs == nil { // this can happen in some tests break } - err = s.setBaseEpoch(s.curTs.Height()) + err = s.setBaseEpoch(curTs.Height()) if err != nil { return xerrors.Errorf("error saving base epoch: %w", err) } @@ -360,20 +490,19 @@ func (s *SplitStore) Start(chain ChainAccessor) error { } // load warmup epoch from metadata ds - // if none, then the splitstore will warm up the hotstore at first head change notif - // by walking the current tipset bs, err = s.ds.Get(warmupEpochKey) switch err { case nil: s.warmupEpoch = bytesToEpoch(bs) case dstore.ErrNotFound: + warmup = true + default: return xerrors.Errorf("error loading warmup epoch: %w", err) } - // load markSetSize from metadata ds - // if none, the splitstore will compute it during warmup and update in every compaction + // load markSetSize from metadata ds to provide a size hint for marksets bs, err = s.ds.Get(markSetSizeKey) switch err { case nil: @@ -384,668 +513,62 @@ func (s *SplitStore) Start(chain ChainAccessor) error { return xerrors.Errorf("error loading mark set size: %w", err) } - log.Infow("starting splitstore", "baseEpoch", s.baseEpoch, "warmupEpoch", s.warmupEpoch) - - // watch the chain - chain.SubscribeHeadChanges(s.HeadChange) - - return nil -} - -func (s *SplitStore) Close() error { - atomic.StoreInt32(&s.closing, 1) - - if atomic.LoadInt32(&s.critsection) == 1 { - log.Warn("ongoing compaction in critical section; waiting for it to finish...") - for atomic.LoadInt32(&s.critsection) == 1 { - time.Sleep(time.Second) - } - } - - return multierr.Combine(s.tracker.Close(), s.env.Close()) -} - -func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error { - // Revert only. - if len(apply) == 0 { - return nil - } - - s.mx.Lock() - curTs := apply[len(apply)-1] - epoch := curTs.Height() - s.curTs = curTs - s.mx.Unlock() - - if !atomic.CompareAndSwapInt32(&s.compacting, 0, 1) { - // we are currently compacting, do nothing and wait for the next head change - return nil - } - - if s.warmupEpoch == 0 { - // splitstore needs to warm up - go func() { - defer atomic.StoreInt32(&s.compacting, 0) - - log.Info("warming up hotstore") - start := time.Now() - - s.warmup(curTs) - - log.Infow("warm up done", "took", time.Since(start)) - }() - - return nil - } - - if epoch-s.baseEpoch > CompactionThreshold { - // it's time to compact - go func() { - defer atomic.StoreInt32(&s.compacting, 0) - - log.Info("compacting splitstore") - start := time.Now() - - s.compact(curTs) - - log.Infow("compaction done", "took", time.Since(start)) - }() - } else { - // no compaction necessary - atomic.StoreInt32(&s.compacting, 0) - } - - return nil -} - -func (s *SplitStore) warmup(curTs *types.TipSet) { - epoch := curTs.Height() - - batchHot := make([]blocks.Block, 0, batchSize) - batchSnoop := make([]cid.Cid, 0, batchSize) - - count := int64(0) - err := s.chain.WalkSnapshot(context.Background(), curTs, 1, s.skipOldMsgs, s.skipMsgReceipts, - func(cid cid.Cid) error { - count++ - - has, err := s.hot.Has(cid) - if err != nil { - return err - } - - if has { - return nil - } - - blk, err := s.cold.Get(cid) - if err != nil { - return err - } - - batchHot = append(batchHot, blk) - batchSnoop = append(batchSnoop, cid) - - if len(batchHot) == batchSize { - err = s.tracker.PutBatch(batchSnoop, epoch) - if err != nil { - return err - } - batchSnoop = batchSnoop[:0] - - err = s.hot.PutMany(batchHot) - if err != nil { - return err - } - batchHot = batchHot[:0] - } - - return nil - }) - - if err != nil { - log.Errorf("error warming up splitstore: %s", err) - return - } - - if len(batchHot) > 0 { - err = s.tracker.PutBatch(batchSnoop, epoch) - if err != nil { - log.Errorf("error warming up splitstore: %s", err) - return - } - - err = s.hot.PutMany(batchHot) - if err != nil { - log.Errorf("error warming up splitstore: %s", err) - return - } - } - - if count > s.markSetSize { - s.markSetSize = count + count>>2 // overestimate a bit - } - - // save the warmup epoch - s.warmupEpoch = epoch - err = s.ds.Put(warmupEpochKey, epochToBytes(epoch)) - if err != nil { - log.Errorf("error saving warmup epoch: %s", err) - } - - err = s.ds.Put(markSetSizeKey, int64ToBytes(s.markSetSize)) - if err != nil { - log.Errorf("error saving mark set size: %s", err) - } -} - -// Compaction/GC Algorithm -func (s *SplitStore) compact(curTs *types.TipSet) { - var err error - if s.markSetSize == 0 { - start := time.Now() - log.Info("estimating mark set size") - err = s.estimateMarkSetSize(curTs) - if err != nil { - log.Errorf("error estimating mark set size: %s; aborting compaction", err) - return - } - log.Infow("estimating mark set size done", "took", time.Since(start), "size", s.markSetSize) - } else { - log.Infow("current mark set size estimate", "size", s.markSetSize) - } - - start := time.Now() - if s.fullCompaction { - err = s.compactFull(curTs) - } else { - err = s.compactSimple(curTs) - } - took := time.Since(start).Milliseconds() - stats.Record(context.Background(), metrics.SplitstoreCompactionTimeSeconds.M(float64(took)/1e3)) - - if err != nil { - log.Errorf("COMPACTION ERROR: %s", err) - } -} - -func (s *SplitStore) estimateMarkSetSize(curTs *types.TipSet) error { - var count int64 - err := s.chain.WalkSnapshot(context.Background(), curTs, 1, s.skipOldMsgs, s.skipMsgReceipts, - func(cid cid.Cid) error { - count++ - return nil - }) - - if err != nil { - return err - } - - s.markSetSize = count + count>>2 // overestimate a bit - return nil -} - -func (s *SplitStore) compactSimple(curTs *types.TipSet) error { - coldEpoch := s.baseEpoch + CompactionCold - currentEpoch := curTs.Height() - boundaryEpoch := currentEpoch - CompactionBoundary - - log.Infow("running simple compaction", "currentEpoch", currentEpoch, "baseEpoch", s.baseEpoch, "coldEpoch", coldEpoch, "boundaryEpoch", boundaryEpoch) - - coldSet, err := s.env.Create("cold", s.markSetSize) - if err != nil { - return xerrors.Errorf("error creating mark set: %w", err) - } - defer coldSet.Close() //nolint:errcheck - - // 1. mark reachable cold objects by looking at the objects reachable only from the cold epoch - log.Infow("marking reachable cold blocks", "boundaryEpoch", boundaryEpoch) - startMark := time.Now() - - boundaryTs, err := s.chain.GetTipsetByHeight(context.Background(), boundaryEpoch, curTs, true) - if err != nil { - return xerrors.Errorf("error getting tipset at boundary epoch: %w", err) - } - - var count int64 - err = s.chain.WalkSnapshot(context.Background(), boundaryTs, 1, s.skipOldMsgs, s.skipMsgReceipts, - func(cid cid.Cid) error { - count++ - return coldSet.Mark(cid) - }) - - if err != nil { - return xerrors.Errorf("error marking cold blocks: %w", err) - } + // load compactionIndex from metadata ds to provide a hint as to when to perform moving gc + bs, err = s.ds.Get(compactionIndexKey) + switch err { + case nil: + s.compactionIndex = bytesToInt64(bs) - if count > s.markSetSize { - s.markSetSize = count + count>>2 // overestimate a bit + case dstore.ErrNotFound: + // this is potentially an upgrade from splitstore v0; schedule a warmup as v0 has + // some issues with hot references leaking into the coldstore. + warmup = true + default: + return xerrors.Errorf("error loading compaction index: %w", err) } - log.Infow("marking done", "took", time.Since(startMark)) - - // 2. move cold unreachable objects to the coldstore - log.Info("collecting cold objects") - startCollect := time.Now() - - cold := make([]cid.Cid, 0, s.coldPurgeSize) - - // some stats for logging - var hotCnt, coldCnt int - - // 2.1 iterate through the tracking store and collect unreachable cold objects - err = s.tracker.ForEach(func(cid cid.Cid, writeEpoch abi.ChainEpoch) error { - // is the object still hot? - if writeEpoch > coldEpoch { - // yes, stay in the hotstore - hotCnt++ - return nil - } + log.Infow("starting splitstore", "baseEpoch", s.baseEpoch, "warmupEpoch", s.warmupEpoch) - // check whether it is reachable in the cold boundary - mark, err := coldSet.Has(cid) + if warmup { + err = s.warmup(curTs) if err != nil { - return xerrors.Errorf("error checkiing cold set for %s: %w", cid, err) + return xerrors.Errorf("error starting warmup: %w", err) } - - if mark { - hotCnt++ - return nil - } - - // it's cold, mark it for move - cold = append(cold, cid) - coldCnt++ - return nil - }) - - if err != nil { - return xerrors.Errorf("error collecting cold objects: %w", err) - } - - if coldCnt > 0 { - s.coldPurgeSize = coldCnt + coldCnt>>2 // overestimate a bit - } - - log.Infow("collection done", "took", time.Since(startCollect)) - log.Infow("compaction stats", "hot", hotCnt, "cold", coldCnt) - stats.Record(context.Background(), metrics.SplitstoreCompactionHot.M(int64(hotCnt))) - stats.Record(context.Background(), metrics.SplitstoreCompactionCold.M(int64(coldCnt))) - - // Enter critical section - atomic.StoreInt32(&s.critsection, 1) - defer atomic.StoreInt32(&s.critsection, 0) - - // check to see if we are closing first; if that's the case just return - if atomic.LoadInt32(&s.closing) == 1 { - log.Info("splitstore is closing; aborting compaction") - return xerrors.Errorf("compaction aborted") - } - - // 2.2 copy the cold objects to the coldstore - log.Info("moving cold blocks to the coldstore") - startMove := time.Now() - err = s.moveColdBlocks(cold) - if err != nil { - return xerrors.Errorf("error moving cold blocks: %w", err) - } - log.Infow("moving done", "took", time.Since(startMove)) - - // 2.3 delete cold objects from the hotstore - log.Info("purging cold objects from the hotstore") - startPurge := time.Now() - err = s.purgeBlocks(cold) - if err != nil { - return xerrors.Errorf("error purging cold blocks: %w", err) - } - log.Infow("purging cold from hotstore done", "took", time.Since(startPurge)) - - // 2.4 remove the tracker tracking for cold objects - startPurge = time.Now() - log.Info("purging cold objects from tracker") - err = s.purgeTracking(cold) - if err != nil { - return xerrors.Errorf("error purging tracking for cold blocks: %w", err) - } - log.Infow("purging cold from tracker done", "took", time.Since(startPurge)) - - // we are done; do some housekeeping - err = s.tracker.Sync() - if err != nil { - return xerrors.Errorf("error syncing tracker: %w", err) - } - - s.gcHotstore() - - err = s.setBaseEpoch(coldEpoch) - if err != nil { - return xerrors.Errorf("error saving base epoch: %w", err) } - err = s.ds.Put(markSetSizeKey, int64ToBytes(s.markSetSize)) - if err != nil { - return xerrors.Errorf("error saving mark set size: %w", err) - } + // watch the chain + chain.SubscribeHeadChanges(s.HeadChange) return nil } -func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error { - batch := make([]blocks.Block, 0, batchSize) - - for _, cid := range cold { - blk, err := s.hot.Get(cid) - if err != nil { - if err == dstore.ErrNotFound { - // this can happen if the node is killed after we have deleted the block from the hotstore - // but before we have deleted it from the tracker; just delete the tracker. - err = s.tracker.Delete(cid) - if err != nil { - return xerrors.Errorf("error deleting unreachable cid %s from tracker: %w", cid, err) - } - } else { - return xerrors.Errorf("error retrieving tracked block %s from hotstore: %w", cid, err) - } - - continue - } - - batch = append(batch, blk) - if len(batch) == batchSize { - err = s.cold.PutMany(batch) - if err != nil { - return xerrors.Errorf("error putting batch to coldstore: %w", err) - } - batch = batch[:0] - } - } - - if len(batch) > 0 { - err := s.cold.PutMany(batch) - if err != nil { - return xerrors.Errorf("error putting cold to coldstore: %w", err) - } - } +func (s *SplitStore) AddProtector(protector func(func(cid.Cid) error) error) { + s.mx.Lock() + defer s.mx.Unlock() - return nil + s.protectors = append(s.protectors, protector) } -func (s *SplitStore) purgeBatch(cids []cid.Cid, deleteBatch func([]cid.Cid) error) error { - if len(cids) == 0 { +func (s *SplitStore) Close() error { + if !atomic.CompareAndSwapInt32(&s.closing, 0, 1) { + // already closing return nil } - // don't delete one giant batch of 7M objects, but rather do smaller batches - done := false - for i := 0; !done; i++ { - start := i * batchSize - end := start + batchSize - if end >= len(cids) { - end = len(cids) - done = true - } - - err := deleteBatch(cids[start:end]) - if err != nil { - return xerrors.Errorf("error deleting batch: %w", err) - } - } - - return nil -} - -func (s *SplitStore) purgeBlocks(cids []cid.Cid) error { - return s.purgeBatch(cids, s.hot.DeleteMany) -} - -func (s *SplitStore) purgeTracking(cids []cid.Cid) error { - return s.purgeBatch(cids, s.tracker.DeleteBatch) -} - -func (s *SplitStore) gcHotstore() { - if compact, ok := s.hot.(interface{ Compact() error }); ok { - log.Infof("compacting hotstore") - startCompact := time.Now() - err := compact.Compact() - if err != nil { - log.Warnf("error compacting hotstore: %s", err) - return + if atomic.LoadInt32(&s.compacting) == 1 { + log.Warn("close with ongoing compaction in progress; waiting for it to finish...") + for atomic.LoadInt32(&s.compacting) == 1 { + time.Sleep(time.Second) } - log.Infow("hotstore compaction done", "took", time.Since(startCompact)) } - if gc, ok := s.hot.(interface{ CollectGarbage() error }); ok { - log.Infof("garbage collecting hotstore") - startGC := time.Now() - err := gc.CollectGarbage() - if err != nil { - log.Warnf("error garbage collecting hotstore: %s", err) - return - } - log.Infow("hotstore garbage collection done", "took", time.Since(startGC)) - } + s.cancel() + return multierr.Combine(s.markSetEnv.Close(), s.debug.Close()) } -func (s *SplitStore) compactFull(curTs *types.TipSet) error { - currentEpoch := curTs.Height() - coldEpoch := s.baseEpoch + CompactionCold - boundaryEpoch := currentEpoch - CompactionBoundary - - log.Infow("running full compaction", "currentEpoch", currentEpoch, "baseEpoch", s.baseEpoch, "coldEpoch", coldEpoch, "boundaryEpoch", boundaryEpoch) - - // create two mark sets, one for marking the cold finality region - // and one for marking the hot region - hotSet, err := s.env.Create("hot", s.markSetSize) - if err != nil { - return xerrors.Errorf("error creating hot mark set: %w", err) - } - defer hotSet.Close() //nolint:errcheck - - coldSet, err := s.env.Create("cold", s.markSetSize) - if err != nil { - return xerrors.Errorf("error creating cold mark set: %w", err) - } - defer coldSet.Close() //nolint:errcheck - - // Phase 1: marking - log.Info("marking live blocks") - startMark := time.Now() - - // Phase 1a: mark all reachable CIDs in the hot range - boundaryTs, err := s.chain.GetTipsetByHeight(context.Background(), boundaryEpoch, curTs, true) - if err != nil { - return xerrors.Errorf("error getting tipset at boundary epoch: %w", err) - } - - count := int64(0) - err = s.chain.WalkSnapshot(context.Background(), boundaryTs, boundaryEpoch-coldEpoch, s.skipOldMsgs, s.skipMsgReceipts, - func(cid cid.Cid) error { - count++ - return hotSet.Mark(cid) - }) - - if err != nil { - return xerrors.Errorf("error marking hot blocks: %w", err) - } - - if count > s.markSetSize { - s.markSetSize = count + count>>2 // overestimate a bit - } - - // Phase 1b: mark all reachable CIDs in the cold range - coldTs, err := s.chain.GetTipsetByHeight(context.Background(), coldEpoch, curTs, true) - if err != nil { - return xerrors.Errorf("error getting tipset at cold epoch: %w", err) - } - - count = 0 - err = s.chain.WalkSnapshot(context.Background(), coldTs, CompactionCold, s.skipOldMsgs, s.skipMsgReceipts, - func(cid cid.Cid) error { - count++ - return coldSet.Mark(cid) - }) - - if err != nil { - return xerrors.Errorf("error marking cold blocks: %w", err) - } - - if count > s.markSetSize { - s.markSetSize = count + count>>2 // overestimate a bit - } - - log.Infow("marking done", "took", time.Since(startMark)) - - // Phase 2: sweep cold objects: - // - If a cold object is reachable in the hot range, it stays in the hotstore. - // - If a cold object is reachable in the cold range, it is moved to the coldstore. - // - If a cold object is unreachable, it is deleted if GC is enabled, otherwise moved to the coldstore. - log.Info("collecting cold objects") - startCollect := time.Now() - - // some stats for logging - var hotCnt, coldCnt, deadCnt int - - cold := make([]cid.Cid, 0, s.coldPurgeSize) - dead := make([]cid.Cid, 0, s.deadPurgeSize) - - // 2.1 iterate through the tracker and collect cold and dead objects - err = s.tracker.ForEach(func(cid cid.Cid, wrEpoch abi.ChainEpoch) error { - // is the object stil hot? - if wrEpoch > coldEpoch { - // yes, stay in the hotstore - hotCnt++ - return nil - } - - // the object is cold -- check whether it is reachable in the hot range - mark, err := hotSet.Has(cid) - if err != nil { - return xerrors.Errorf("error checking live mark for %s: %w", cid, err) - } - - if mark { - // the object is reachable in the hot range, stay in the hotstore - hotCnt++ - return nil - } - - // check whether it is reachable in the cold range - mark, err = coldSet.Has(cid) - if err != nil { - return xerrors.Errorf("error checkiing cold set for %s: %w", cid, err) - } - - if s.enableGC { - if mark { - // the object is reachable in the cold range, move it to the cold store - cold = append(cold, cid) - coldCnt++ - } else { - // the object is dead and will be deleted - dead = append(dead, cid) - deadCnt++ - } - } else { - // if GC is disabled, we move both cold and dead objects to the coldstore - cold = append(cold, cid) - if mark { - coldCnt++ - } else { - deadCnt++ - } - } - - return nil - }) - - if err != nil { - return xerrors.Errorf("error collecting cold objects: %w", err) - } - - if coldCnt > 0 { - s.coldPurgeSize = coldCnt + coldCnt>>2 // overestimate a bit - } - if deadCnt > 0 { - s.deadPurgeSize = deadCnt + deadCnt>>2 // overestimate a bit - } - - log.Infow("collection done", "took", time.Since(startCollect)) - log.Infow("compaction stats", "hot", hotCnt, "cold", coldCnt, "dead", deadCnt) - stats.Record(context.Background(), metrics.SplitstoreCompactionHot.M(int64(hotCnt))) - stats.Record(context.Background(), metrics.SplitstoreCompactionCold.M(int64(coldCnt))) - stats.Record(context.Background(), metrics.SplitstoreCompactionDead.M(int64(deadCnt))) - - // Enter critical section - atomic.StoreInt32(&s.critsection, 1) - defer atomic.StoreInt32(&s.critsection, 0) - - // check to see if we are closing first; if that's the case just return +func (s *SplitStore) checkClosing() error { if atomic.LoadInt32(&s.closing) == 1 { - log.Info("splitstore is closing; aborting compaction") - return xerrors.Errorf("compaction aborted") - } - - // 2.2 copy the cold objects to the coldstore - log.Info("moving cold objects to the coldstore") - startMove := time.Now() - err = s.moveColdBlocks(cold) - if err != nil { - return xerrors.Errorf("error moving cold blocks: %w", err) - } - log.Infow("moving done", "took", time.Since(startMove)) - - // 2.3 delete cold objects from the hotstore - log.Info("purging cold objects from the hotstore") - startPurge := time.Now() - err = s.purgeBlocks(cold) - if err != nil { - return xerrors.Errorf("error purging cold blocks: %w", err) - } - log.Infow("purging cold from hotstore done", "took", time.Since(startPurge)) - - // 2.4 remove the tracker tracking for cold objects - startPurge = time.Now() - log.Info("purging cold objects from tracker") - err = s.purgeTracking(cold) - if err != nil { - return xerrors.Errorf("error purging tracking for cold blocks: %w", err) - } - log.Infow("purging cold from tracker done", "took", time.Since(startPurge)) - - // 3. if we have dead objects, delete them from the hotstore and remove the tracking - if len(dead) > 0 { - log.Info("deleting dead objects") - err = s.purgeBlocks(dead) - if err != nil { - return xerrors.Errorf("error purging dead blocks: %w", err) - } - - // remove the tracker tracking - startPurge := time.Now() - log.Info("purging dead objects from tracker") - err = s.purgeTracking(dead) - if err != nil { - return xerrors.Errorf("error purging tracking for dead blocks: %w", err) - } - log.Infow("purging dead from tracker done", "took", time.Since(startPurge)) - } - - // we are done; do some housekeeping - err = s.tracker.Sync() - if err != nil { - return xerrors.Errorf("error syncing tracker: %w", err) - } - - s.gcHotstore() - - err = s.setBaseEpoch(coldEpoch) - if err != nil { - return xerrors.Errorf("error saving base epoch: %w", err) - } - - err = s.ds.Put(markSetSizeKey, int64ToBytes(s.markSetSize)) - if err != nil { - return xerrors.Errorf("error saving mark set size: %w", err) + return xerrors.Errorf("splitstore is closing") } return nil @@ -1053,33 +576,5 @@ func (s *SplitStore) compactFull(curTs *types.TipSet) error { func (s *SplitStore) setBaseEpoch(epoch abi.ChainEpoch) error { s.baseEpoch = epoch - // write to datastore return s.ds.Put(baseEpochKey, epochToBytes(epoch)) } - -func epochToBytes(epoch abi.ChainEpoch) []byte { - return uint64ToBytes(uint64(epoch)) -} - -func bytesToEpoch(buf []byte) abi.ChainEpoch { - return abi.ChainEpoch(bytesToUint64(buf)) -} - -func int64ToBytes(i int64) []byte { - return uint64ToBytes(uint64(i)) -} - -func bytesToInt64(buf []byte) int64 { - return int64(bytesToUint64(buf)) -} - -func uint64ToBytes(i uint64) []byte { - buf := make([]byte, 16) - n := binary.PutUvarint(buf, i) - return buf[:n] -} - -func bytesToUint64(buf []byte) uint64 { - i, _ := binary.Uvarint(buf) - return i -} diff --git a/blockstore/splitstore/splitstore_check.go b/blockstore/splitstore/splitstore_check.go new file mode 100644 index 00000000000..8c38b07e9fe --- /dev/null +++ b/blockstore/splitstore/splitstore_check.go @@ -0,0 +1,150 @@ +package splitstore + +import ( + "fmt" + "os" + "path/filepath" + "sync/atomic" + "time" + + "golang.org/x/xerrors" + + cid "github.com/ipfs/go-cid" + + bstore "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/types" +) + +// performs an asynchronous health-check on the splitstore; results are appended to +// /check.txt +func (s *SplitStore) Check() error { + s.headChangeMx.Lock() + defer s.headChangeMx.Unlock() + + // try to take compaction lock and inhibit compaction while the health-check is running + if !atomic.CompareAndSwapInt32(&s.compacting, 0, 1) { + return xerrors.Errorf("can't acquire compaction lock; compacting operation in progress") + } + + if s.compactionIndex == 0 { + atomic.StoreInt32(&s.compacting, 0) + return xerrors.Errorf("splitstore hasn't compacted yet; health check is not meaningful") + } + + // check if we are actually closing first + if err := s.checkClosing(); err != nil { + atomic.StoreInt32(&s.compacting, 0) + return err + } + + curTs := s.chain.GetHeaviestTipSet() + go func() { + defer atomic.StoreInt32(&s.compacting, 0) + + log.Info("checking splitstore health") + start := time.Now() + + err := s.doCheck(curTs) + if err != nil { + log.Errorf("error checking splitstore health: %s", err) + return + } + + log.Infow("health check done", "took", time.Since(start)) + }() + + return nil +} + +func (s *SplitStore) doCheck(curTs *types.TipSet) error { + currentEpoch := curTs.Height() + boundaryEpoch := currentEpoch - CompactionBoundary + + outputPath := filepath.Join(s.path, "check.txt") + output, err := os.OpenFile(outputPath, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644) + if err != nil { + return xerrors.Errorf("error opening check output file %s: %w", outputPath, err) + } + defer output.Close() //nolint:errcheck + + write := func(format string, args ...interface{}) { + _, err := fmt.Fprintf(output, format+"\n", args...) + if err != nil { + log.Warnf("error writing check output: %s", err) + } + } + + ts, _ := time.Now().MarshalText() + write("---------------------------------------------") + write("start check at %s", ts) + write("current epoch: %d", currentEpoch) + write("boundary epoch: %d", boundaryEpoch) + write("compaction index: %d", s.compactionIndex) + write("--") + + var coldCnt, missingCnt int64 + err = s.walkChain(curTs, boundaryEpoch, boundaryEpoch, + func(c cid.Cid) error { + if isUnitaryObject(c) { + return errStopWalk + } + + has, err := s.hot.Has(c) + if err != nil { + return xerrors.Errorf("error checking hotstore: %w", err) + } + + if has { + return nil + } + + has, err = s.cold.Has(c) + if err != nil { + return xerrors.Errorf("error checking coldstore: %w", err) + } + + if has { + coldCnt++ + write("cold object reference: %s", c) + } else { + missingCnt++ + write("missing object reference: %s", c) + return errStopWalk + } + + return nil + }) + + if err != nil { + err = xerrors.Errorf("error walking chain: %w", err) + write("ERROR: %s", err) + return err + } + + log.Infow("check done", "cold", coldCnt, "missing", missingCnt) + write("--") + write("cold: %d missing: %d", coldCnt, missingCnt) + write("DONE") + + return nil +} + +// provides some basic information about the splitstore +func (s *SplitStore) Info() map[string]interface{} { + info := make(map[string]interface{}) + info["base epoch"] = s.baseEpoch + info["warmup epoch"] = s.warmupEpoch + info["compactions"] = s.compactionIndex + + sizer, ok := s.hot.(bstore.BlockstoreSize) + if ok { + size, err := sizer.Size() + if err != nil { + log.Warnf("error getting hotstore size: %s", err) + } else { + info["hotstore size"] = size + } + } + + return info +} diff --git a/blockstore/splitstore/splitstore_compact.go b/blockstore/splitstore/splitstore_compact.go new file mode 100644 index 00000000000..b95459ea5ff --- /dev/null +++ b/blockstore/splitstore/splitstore_compact.go @@ -0,0 +1,1121 @@ +package splitstore + +import ( + "bytes" + "errors" + "runtime" + "sort" + "sync/atomic" + "time" + + "golang.org/x/sync/errgroup" + "golang.org/x/xerrors" + + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-state-types/abi" + bstore "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/metrics" + + "go.opencensus.io/stats" +) + +var ( + // CompactionThreshold is the number of epochs that need to have elapsed + // from the previously compacted epoch to trigger a new compaction. + // + // |················· CompactionThreshold ··················| + // | | + // =======‖≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡‖------------------------» + // | | chain --> ↑__ current epoch + // | archived epochs ___↑ + // ↑________ CompactionBoundary + // + // === :: cold (already archived) + // ≡≡≡ :: to be archived in this compaction + // --- :: hot + CompactionThreshold = 5 * build.Finality + + // CompactionBoundary is the number of epochs from the current epoch at which + // we will walk the chain for live objects. + CompactionBoundary = 4 * build.Finality + + // SyncGapTime is the time delay from a tipset's min timestamp before we decide + // there is a sync gap + SyncGapTime = time.Minute +) + +var ( + // used to signal end of walk + errStopWalk = errors.New("stop walk") +) + +const ( + batchSize = 16384 + + defaultColdPurgeSize = 7_000_000 +) + +func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error { + s.headChangeMx.Lock() + defer s.headChangeMx.Unlock() + + // Revert only. + if len(apply) == 0 { + return nil + } + + curTs := apply[len(apply)-1] + epoch := curTs.Height() + + // NOTE: there is an implicit invariant assumption that HeadChange is invoked + // synchronously and no other HeadChange can be invoked while one is in + // progress. + // this is guaranteed by the chainstore, and it is pervasive in all lotus + // -- if that ever changes then all hell will break loose in general and + // we will have a rance to protectTipSets here. + // Reagrdless, we put a mutex in HeadChange just to be safe + + if !atomic.CompareAndSwapInt32(&s.compacting, 0, 1) { + // we are currently compacting -- protect the new tipset(s) + s.protectTipSets(apply) + return nil + } + + // check if we are actually closing first + if atomic.LoadInt32(&s.closing) == 1 { + atomic.StoreInt32(&s.compacting, 0) + return nil + } + + timestamp := time.Unix(int64(curTs.MinTimestamp()), 0) + if time.Since(timestamp) > SyncGapTime { + // don't attempt compaction before we have caught up syncing + atomic.StoreInt32(&s.compacting, 0) + return nil + } + + if epoch-s.baseEpoch > CompactionThreshold { + // it's time to compact -- prepare the transaction and go! + s.beginTxnProtect() + go func() { + defer atomic.StoreInt32(&s.compacting, 0) + defer s.endTxnProtect() + + log.Info("compacting splitstore") + start := time.Now() + + s.compact(curTs) + + log.Infow("compaction done", "took", time.Since(start)) + }() + } else { + // no compaction necessary + atomic.StoreInt32(&s.compacting, 0) + } + + return nil +} + +// transactionally protect incoming tipsets +func (s *SplitStore) protectTipSets(apply []*types.TipSet) { + s.txnLk.RLock() + defer s.txnLk.RUnlock() + + if !s.txnActive { + return + } + + var cids []cid.Cid + for _, ts := range apply { + cids = append(cids, ts.Cids()...) + } + + s.trackTxnRefMany(cids) +} + +// transactionally protect a view +func (s *SplitStore) protectView(c cid.Cid) { + s.txnLk.RLock() + defer s.txnLk.RUnlock() + + if s.txnActive { + s.trackTxnRef(c) + } + + s.txnViewsMx.Lock() + s.txnViews++ + s.txnViewsMx.Unlock() +} + +func (s *SplitStore) viewDone() { + s.txnViewsMx.Lock() + defer s.txnViewsMx.Unlock() + + s.txnViews-- + if s.txnViews == 0 && s.txnViewsWaiting { + s.txnViewsCond.Broadcast() + } +} + +func (s *SplitStore) viewWait() { + s.txnViewsMx.Lock() + defer s.txnViewsMx.Unlock() + + s.txnViewsWaiting = true + for s.txnViews > 0 { + s.txnViewsCond.Wait() + } + s.txnViewsWaiting = false +} + +// transactionally protect a reference to an object +func (s *SplitStore) trackTxnRef(c cid.Cid) { + if !s.txnActive { + // not compacting + return + } + + if isUnitaryObject(c) { + return + } + + s.txnRefsMx.Lock() + s.txnRefs[c] = struct{}{} + s.txnRefsMx.Unlock() +} + +// transactionally protect a batch of references +func (s *SplitStore) trackTxnRefMany(cids []cid.Cid) { + if !s.txnActive { + // not compacting + return + } + + s.txnRefsMx.Lock() + defer s.txnRefsMx.Unlock() + + for _, c := range cids { + if isUnitaryObject(c) { + continue + } + + s.txnRefs[c] = struct{}{} + } + + return +} + +// protect all pending transactional references +func (s *SplitStore) protectTxnRefs(markSet MarkSet) error { + for { + var txnRefs map[cid.Cid]struct{} + + s.txnRefsMx.Lock() + if len(s.txnRefs) > 0 { + txnRefs = s.txnRefs + s.txnRefs = make(map[cid.Cid]struct{}) + } + s.txnRefsMx.Unlock() + + if len(txnRefs) == 0 { + return nil + } + + log.Infow("protecting transactional references", "refs", len(txnRefs)) + count := 0 + workch := make(chan cid.Cid, len(txnRefs)) + startProtect := time.Now() + + for c := range txnRefs { + mark, err := markSet.Has(c) + if err != nil { + return xerrors.Errorf("error checking markset: %w", err) + } + + if mark { + continue + } + + workch <- c + count++ + } + close(workch) + + if count == 0 { + return nil + } + + workers := runtime.NumCPU() / 2 + if workers < 2 { + workers = 2 + } + if workers > count { + workers = count + } + + worker := func() error { + for c := range workch { + err := s.doTxnProtect(c, markSet) + if err != nil { + return xerrors.Errorf("error protecting transactional references to %s: %w", c, err) + } + } + return nil + } + + g := new(errgroup.Group) + for i := 0; i < workers; i++ { + g.Go(worker) + } + + if err := g.Wait(); err != nil { + return err + } + + log.Infow("protecting transactional refs done", "took", time.Since(startProtect), "protected", count) + } +} + +// transactionally protect a reference by walking the object and marking. +// concurrent markings are short circuited by checking the markset. +func (s *SplitStore) doTxnProtect(root cid.Cid, markSet MarkSet) error { + if err := s.checkClosing(); err != nil { + return err + } + + // Note: cold objects are deleted heaviest first, so the consituents of an object + // cannot be deleted before the object itself. + return s.walkObjectIncomplete(root, cid.NewSet(), + func(c cid.Cid) error { + if isUnitaryObject(c) { + return errStopWalk + } + + mark, err := markSet.Has(c) + if err != nil { + return xerrors.Errorf("error checking markset: %w", err) + } + + // it's marked, nothing to do + if mark { + return errStopWalk + } + + return markSet.Mark(c) + }, + func(c cid.Cid) error { + if s.txnMissing != nil { + log.Warnf("missing object reference %s in %s", c, root) + s.txnRefsMx.Lock() + s.txnMissing[c] = struct{}{} + s.txnRefsMx.Unlock() + } + return errStopWalk + }) +} + +func (s *SplitStore) applyProtectors() error { + s.mx.Lock() + defer s.mx.Unlock() + + count := 0 + for _, protect := range s.protectors { + err := protect(func(c cid.Cid) error { + s.trackTxnRef(c) + count++ + return nil + }) + + if err != nil { + return xerrors.Errorf("error applynig protector: %w", err) + } + } + + if count > 0 { + log.Infof("protected %d references through %d protectors", count, len(s.protectors)) + } + + return nil +} + +// --- Compaction --- +// Compaction works transactionally with the following algorithm: +// - We prepare a transaction, whereby all i/o referenced objects through the API are tracked. +// - We walk the chain and mark reachable objects, keeping 4 finalities of state roots and messages and all headers all the way to genesis. +// - Once the chain walk is complete, we begin full transaction protection with concurrent marking; we walk and mark all references created during the chain walk. On the same time, all I/O through the API concurrently marks objects as live references. +// - We collect cold objects by iterating through the hotstore and checking the mark set; if an object is not marked, then it is candidate for purge. +// - When running with a coldstore, we next copy all cold objects to the coldstore. +// - At this point we are ready to begin purging: +// - We sort cold objects heaviest first, so as to never delete the consituents of a DAG before the DAG itself (which would leave dangling references) +// - We delete in small batches taking a lock; each batch is checked again for marks, from the concurrent transactional mark, so as to never delete anything live +// - We then end the transaction and compact/gc the hotstore. +func (s *SplitStore) compact(curTs *types.TipSet) { + log.Info("waiting for active views to complete") + start := time.Now() + s.viewWait() + log.Infow("waiting for active views done", "took", time.Since(start)) + + start = time.Now() + err := s.doCompact(curTs) + took := time.Since(start).Milliseconds() + stats.Record(s.ctx, metrics.SplitstoreCompactionTimeSeconds.M(float64(took)/1e3)) + + if err != nil { + log.Errorf("COMPACTION ERROR: %s", err) + } +} + +func (s *SplitStore) doCompact(curTs *types.TipSet) error { + currentEpoch := curTs.Height() + boundaryEpoch := currentEpoch - CompactionBoundary + + var inclMsgsEpoch abi.ChainEpoch + inclMsgsRange := abi.ChainEpoch(s.cfg.HotStoreMessageRetention) * build.Finality + if inclMsgsRange < boundaryEpoch { + inclMsgsEpoch = boundaryEpoch - inclMsgsRange + } + + log.Infow("running compaction", "currentEpoch", currentEpoch, "baseEpoch", s.baseEpoch, "boundaryEpoch", boundaryEpoch, "inclMsgsEpoch", inclMsgsEpoch, "compactionIndex", s.compactionIndex) + + markSet, err := s.markSetEnv.Create("live", s.markSetSize) + if err != nil { + return xerrors.Errorf("error creating mark set: %w", err) + } + defer markSet.Close() //nolint:errcheck + defer s.debug.Flush() + + if err := s.checkClosing(); err != nil { + return err + } + + // we are ready for concurrent marking + s.beginTxnMarking(markSet) + + // 0. track all protected references at beginning of compaction; anything added later should + // be transactionally protected by the write + log.Info("protecting references with registered protectors") + err = s.applyProtectors() + if err != nil { + return err + } + + // 1. mark reachable objects by walking the chain from the current epoch; we keep state roots + // and messages until the boundary epoch. + log.Info("marking reachable objects") + startMark := time.Now() + + var count int64 + err = s.walkChain(curTs, boundaryEpoch, inclMsgsEpoch, + func(c cid.Cid) error { + if isUnitaryObject(c) { + return errStopWalk + } + + count++ + return markSet.Mark(c) + }) + + if err != nil { + return xerrors.Errorf("error marking: %w", err) + } + + s.markSetSize = count + count>>2 // overestimate a bit + + log.Infow("marking done", "took", time.Since(startMark), "marked", count) + + if err := s.checkClosing(); err != nil { + return err + } + + // 1.1 protect transactional refs + err = s.protectTxnRefs(markSet) + if err != nil { + return xerrors.Errorf("error protecting transactional refs: %w", err) + } + + if err := s.checkClosing(); err != nil { + return err + } + + // 2. iterate through the hotstore to collect cold objects + log.Info("collecting cold objects") + startCollect := time.Now() + + // some stats for logging + var hotCnt, coldCnt int + + cold := make([]cid.Cid, 0, s.coldPurgeSize) + err = s.hot.ForEachKey(func(c cid.Cid) error { + // was it marked? + mark, err := markSet.Has(c) + if err != nil { + return xerrors.Errorf("error checking mark set for %s: %w", c, err) + } + + if mark { + hotCnt++ + return nil + } + + // it's cold, mark it as candidate for move + cold = append(cold, c) + coldCnt++ + + return nil + }) + + if err != nil { + return xerrors.Errorf("error collecting cold objects: %w", err) + } + + log.Infow("cold collection done", "took", time.Since(startCollect)) + + if coldCnt > 0 { + s.coldPurgeSize = coldCnt + coldCnt>>2 // overestimate a bit + } + + log.Infow("compaction stats", "hot", hotCnt, "cold", coldCnt) + stats.Record(s.ctx, metrics.SplitstoreCompactionHot.M(int64(hotCnt))) + stats.Record(s.ctx, metrics.SplitstoreCompactionCold.M(int64(coldCnt))) + + if err := s.checkClosing(); err != nil { + return err + } + + // now that we have collected cold objects, check for missing references from transactional i/o + // and disable further collection of such references (they will not be acted upon as we can't + // possibly delete objects we didn't have when we were collecting cold objects) + s.waitForMissingRefs(markSet) + + if err := s.checkClosing(); err != nil { + return err + } + + // 3. copy the cold objects to the coldstore -- if we have one + if !s.cfg.DiscardColdBlocks { + log.Info("moving cold objects to the coldstore") + startMove := time.Now() + err = s.moveColdBlocks(cold) + if err != nil { + return xerrors.Errorf("error moving cold objects: %w", err) + } + log.Infow("moving done", "took", time.Since(startMove)) + + if err := s.checkClosing(); err != nil { + return err + } + } + + // 4. sort cold objects so that the dags with most references are deleted first + // this ensures that we can't refer to a dag with its consituents already deleted, ie + // we lave no dangling references. + log.Info("sorting cold objects") + startSort := time.Now() + err = s.sortObjects(cold) + if err != nil { + return xerrors.Errorf("error sorting objects: %w", err) + } + log.Infow("sorting done", "took", time.Since(startSort)) + + // 4.1 protect transactional refs once more + // strictly speaking, this is not necessary as purge will do it before deleting each + // batch. however, there is likely a largish number of references accumulated during + // ths sort and this protects before entering pruge context. + err = s.protectTxnRefs(markSet) + if err != nil { + return xerrors.Errorf("error protecting transactional refs: %w", err) + } + + if err := s.checkClosing(); err != nil { + return err + } + + // 5. purge cold objects from the hotstore, taking protected references into account + log.Info("purging cold objects from the hotstore") + startPurge := time.Now() + err = s.purge(cold, markSet) + if err != nil { + return xerrors.Errorf("error purging cold blocks: %w", err) + } + log.Infow("purging cold objects from hotstore done", "took", time.Since(startPurge)) + + // we are done; do some housekeeping + s.endTxnProtect() + s.gcHotstore() + + err = s.setBaseEpoch(boundaryEpoch) + if err != nil { + return xerrors.Errorf("error saving base epoch: %w", err) + } + + err = s.ds.Put(markSetSizeKey, int64ToBytes(s.markSetSize)) + if err != nil { + return xerrors.Errorf("error saving mark set size: %w", err) + } + + s.compactionIndex++ + err = s.ds.Put(compactionIndexKey, int64ToBytes(s.compactionIndex)) + if err != nil { + return xerrors.Errorf("error saving compaction index: %w", err) + } + + return nil +} + +func (s *SplitStore) beginTxnProtect() { + log.Info("preparing compaction transaction") + + s.txnLk.Lock() + defer s.txnLk.Unlock() + + s.txnActive = true + s.txnRefs = make(map[cid.Cid]struct{}) + s.txnMissing = make(map[cid.Cid]struct{}) +} + +func (s *SplitStore) beginTxnMarking(markSet MarkSet) { + markSet.SetConcurrent() + + s.txnLk.Lock() + s.txnProtect = markSet + s.txnLk.Unlock() +} + +func (s *SplitStore) endTxnProtect() { + s.txnLk.Lock() + defer s.txnLk.Unlock() + + if !s.txnActive { + return + } + + // release markset memory + if s.txnProtect != nil { + _ = s.txnProtect.Close() + } + + s.txnActive = false + s.txnProtect = nil + s.txnRefs = nil + s.txnMissing = nil +} + +func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEpoch, + f func(cid.Cid) error) error { + visited := cid.NewSet() + walked := cid.NewSet() + toWalk := ts.Cids() + walkCnt := 0 + scanCnt := 0 + + stopWalk := func(_ cid.Cid) error { return errStopWalk } + + walkBlock := func(c cid.Cid) error { + if !visited.Visit(c) { + return nil + } + + walkCnt++ + + if err := f(c); err != nil { + return err + } + + var hdr types.BlockHeader + err := s.view(c, func(data []byte) error { + return hdr.UnmarshalCBOR(bytes.NewBuffer(data)) + }) + + if err != nil { + return xerrors.Errorf("error unmarshaling block header (cid: %s): %w", c, err) + } + + // message are retained if within the inclMsgs boundary + if hdr.Height >= inclMsgs && hdr.Height > 0 { + if inclMsgs < inclState { + // we need to use walkObjectIncomplete here, as messages/receipts may be missing early on if we + // synced from snapshot and have a long HotStoreMessageRetentionPolicy. + if err := s.walkObjectIncomplete(hdr.Messages, walked, f, stopWalk); err != nil { + return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err) + } + + if err := s.walkObjectIncomplete(hdr.ParentMessageReceipts, walked, f, stopWalk); err != nil { + return xerrors.Errorf("error walking messages receipts (cid: %s): %w", hdr.ParentMessageReceipts, err) + } + } else { + if err := s.walkObject(hdr.Messages, walked, f); err != nil { + return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err) + } + + if err := s.walkObject(hdr.ParentMessageReceipts, walked, f); err != nil { + return xerrors.Errorf("error walking message receipts (cid: %s): %w", hdr.ParentMessageReceipts, err) + } + } + } + + // state is only retained if within the inclState boundary, with the exception of genesis + if hdr.Height >= inclState || hdr.Height == 0 { + if err := s.walkObject(hdr.ParentStateRoot, walked, f); err != nil { + return xerrors.Errorf("error walking state root (cid: %s): %w", hdr.ParentStateRoot, err) + } + scanCnt++ + } + + if hdr.Height > 0 { + toWalk = append(toWalk, hdr.Parents...) + } + + return nil + } + + for len(toWalk) > 0 { + // walking can take a while, so check this with every opportunity + if err := s.checkClosing(); err != nil { + return err + } + + walking := toWalk + toWalk = nil + for _, c := range walking { + if err := walkBlock(c); err != nil { + return xerrors.Errorf("error walking block (cid: %s): %w", c, err) + } + } + } + + log.Infow("chain walk done", "walked", walkCnt, "scanned", scanCnt) + + return nil +} + +func (s *SplitStore) walkObject(c cid.Cid, walked *cid.Set, f func(cid.Cid) error) error { + if !walked.Visit(c) { + return nil + } + + if err := f(c); err != nil { + if err == errStopWalk { + return nil + } + + return err + } + + if c.Prefix().Codec != cid.DagCBOR { + return nil + } + + // check this before recursing + if err := s.checkClosing(); err != nil { + return err + } + + var links []cid.Cid + err := s.view(c, func(data []byte) error { + return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) { + links = append(links, c) + }) + }) + + if err != nil { + return xerrors.Errorf("error scanning linked block (cid: %s): %w", c, err) + } + + for _, c := range links { + err := s.walkObject(c, walked, f) + if err != nil { + return xerrors.Errorf("error walking link (cid: %s): %w", c, err) + } + } + + return nil +} + +// like walkObject, but the object may be potentially incomplete (references missing) +func (s *SplitStore) walkObjectIncomplete(c cid.Cid, walked *cid.Set, f, missing func(cid.Cid) error) error { + if !walked.Visit(c) { + return nil + } + + // occurs check -- only for DAGs + if c.Prefix().Codec == cid.DagCBOR { + has, err := s.has(c) + if err != nil { + return xerrors.Errorf("error occur checking %s: %w", c, err) + } + + if !has { + err = missing(c) + if err == errStopWalk { + return nil + } + + return err + } + } + + if err := f(c); err != nil { + if err == errStopWalk { + return nil + } + + return err + } + + if c.Prefix().Codec != cid.DagCBOR { + return nil + } + + // check this before recursing + if err := s.checkClosing(); err != nil { + return err + } + + var links []cid.Cid + err := s.view(c, func(data []byte) error { + return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) { + links = append(links, c) + }) + }) + + if err != nil { + return xerrors.Errorf("error scanning linked block (cid: %s): %w", c, err) + } + + for _, c := range links { + err := s.walkObjectIncomplete(c, walked, f, missing) + if err != nil { + return xerrors.Errorf("error walking link (cid: %s): %w", c, err) + } + } + + return nil +} + +// internal version used by walk +func (s *SplitStore) view(c cid.Cid, cb func([]byte) error) error { + if isIdentiyCid(c) { + data, err := decodeIdentityCid(c) + if err != nil { + return err + } + + return cb(data) + } + + err := s.hot.View(c, cb) + switch err { + case bstore.ErrNotFound: + return s.cold.View(c, cb) + + default: + return err + } +} + +func (s *SplitStore) has(c cid.Cid) (bool, error) { + if isIdentiyCid(c) { + return true, nil + } + + has, err := s.hot.Has(c) + + if has || err != nil { + return has, err + } + + return s.cold.Has(c) +} + +func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error { + batch := make([]blocks.Block, 0, batchSize) + + for _, c := range cold { + if err := s.checkClosing(); err != nil { + return err + } + + blk, err := s.hot.Get(c) + if err != nil { + if err == bstore.ErrNotFound { + log.Warnf("hotstore missing block %s", c) + continue + } + + return xerrors.Errorf("error retrieving block %s from hotstore: %w", c, err) + } + + batch = append(batch, blk) + if len(batch) == batchSize { + err = s.cold.PutMany(batch) + if err != nil { + return xerrors.Errorf("error putting batch to coldstore: %w", err) + } + batch = batch[:0] + } + } + + if len(batch) > 0 { + err := s.cold.PutMany(batch) + if err != nil { + return xerrors.Errorf("error putting batch to coldstore: %w", err) + } + } + + return nil +} + +// sorts a slice of objects heaviest first -- it's a little expensive but worth the +// guarantee that we don't leave dangling references behind, e.g. if we die in the middle +// of a purge. +func (s *SplitStore) sortObjects(cids []cid.Cid) error { + // we cache the keys to avoid making a gazillion of strings + keys := make(map[cid.Cid]string) + key := func(c cid.Cid) string { + s, ok := keys[c] + if !ok { + s = string(c.Hash()) + keys[c] = s + } + return s + } + + // compute sorting weights as the cumulative number of DAG links + weights := make(map[string]int) + for _, c := range cids { + // this can take quite a while, so check for shutdown with every opportunity + if err := s.checkClosing(); err != nil { + return err + } + + w := s.getObjectWeight(c, weights, key) + weights[key(c)] = w + } + + // sort! + sort.Slice(cids, func(i, j int) bool { + wi := weights[key(cids[i])] + wj := weights[key(cids[j])] + if wi == wj { + return bytes.Compare(cids[i].Hash(), cids[j].Hash()) > 0 + } + + return wi > wj + }) + + return nil +} + +func (s *SplitStore) getObjectWeight(c cid.Cid, weights map[string]int, key func(cid.Cid) string) int { + w, ok := weights[key(c)] + if ok { + return w + } + + // we treat block headers specially to avoid walking the entire chain + var hdr types.BlockHeader + err := s.view(c, func(data []byte) error { + return hdr.UnmarshalCBOR(bytes.NewBuffer(data)) + }) + if err == nil { + w1 := s.getObjectWeight(hdr.ParentStateRoot, weights, key) + weights[key(hdr.ParentStateRoot)] = w1 + + w2 := s.getObjectWeight(hdr.Messages, weights, key) + weights[key(hdr.Messages)] = w2 + + return 1 + w1 + w2 + } + + var links []cid.Cid + err = s.view(c, func(data []byte) error { + return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) { + links = append(links, c) + }) + }) + if err != nil { + return 1 + } + + w = 1 + for _, c := range links { + // these are internal refs, so dags will be dags + if c.Prefix().Codec != cid.DagCBOR { + w++ + continue + } + + wc := s.getObjectWeight(c, weights, key) + weights[key(c)] = wc + + w += wc + } + + return w +} + +func (s *SplitStore) purgeBatch(cids []cid.Cid, deleteBatch func([]cid.Cid) error) error { + if len(cids) == 0 { + return nil + } + + // we don't delete one giant batch of millions of objects, but rather do smaller batches + // so that we don't stop the world for an extended period of time + done := false + for i := 0; !done; i++ { + start := i * batchSize + end := start + batchSize + if end >= len(cids) { + end = len(cids) + done = true + } + + err := deleteBatch(cids[start:end]) + if err != nil { + return xerrors.Errorf("error deleting batch: %w", err) + } + } + + return nil +} + +func (s *SplitStore) purge(cids []cid.Cid, markSet MarkSet) error { + deadCids := make([]cid.Cid, 0, batchSize) + var purgeCnt, liveCnt int + defer func() { + log.Infow("purged cold objects", "purged", purgeCnt, "live", liveCnt) + }() + + return s.purgeBatch(cids, + func(cids []cid.Cid) error { + deadCids := deadCids[:0] + + for { + if err := s.checkClosing(); err != nil { + return err + } + + s.txnLk.Lock() + if len(s.txnRefs) == 0 { + // keep the lock! + break + } + + // unlock and protect + s.txnLk.Unlock() + + err := s.protectTxnRefs(markSet) + if err != nil { + return xerrors.Errorf("error protecting transactional refs: %w", err) + } + } + + defer s.txnLk.Unlock() + + for _, c := range cids { + live, err := markSet.Has(c) + if err != nil { + return xerrors.Errorf("error checking for liveness: %w", err) + } + + if live { + liveCnt++ + continue + } + + deadCids = append(deadCids, c) + } + + err := s.hot.DeleteMany(deadCids) + if err != nil { + return xerrors.Errorf("error purging cold objects: %w", err) + } + + s.debug.LogDelete(deadCids) + + purgeCnt += len(deadCids) + return nil + }) +} + +// I really don't like having this code, but we seem to have some occasional DAG references with +// missing constituents. During testing in mainnet *some* of these references *sometimes* appeared +// after a little bit. +// We need to figure out where they are coming from and eliminate that vector, but until then we +// have this gem[TM]. +// My best guess is that they are parent message receipts or yet to be computed state roots; magik +// thinks the cause may be block validation. +func (s *SplitStore) waitForMissingRefs(markSet MarkSet) { + s.txnLk.Lock() + missing := s.txnMissing + s.txnMissing = nil + s.txnLk.Unlock() + + if len(missing) == 0 { + return + } + + log.Info("waiting for missing references") + start := time.Now() + count := 0 + defer func() { + log.Infow("waiting for missing references done", "took", time.Since(start), "marked", count) + }() + + for i := 0; i < 3 && len(missing) > 0; i++ { + if err := s.checkClosing(); err != nil { + return + } + + wait := time.Duration(i) * time.Minute + log.Infof("retrying for %d missing references in %s (attempt: %d)", len(missing), wait, i+1) + if wait > 0 { + time.Sleep(wait) + } + + towalk := missing + walked := cid.NewSet() + missing = make(map[cid.Cid]struct{}) + + for c := range towalk { + err := s.walkObjectIncomplete(c, walked, + func(c cid.Cid) error { + if isUnitaryObject(c) { + return errStopWalk + } + + mark, err := markSet.Has(c) + if err != nil { + return xerrors.Errorf("error checking markset for %s: %w", c, err) + } + + if mark { + return errStopWalk + } + + count++ + return markSet.Mark(c) + }, + func(c cid.Cid) error { + missing[c] = struct{}{} + return errStopWalk + }) + + if err != nil { + log.Warnf("error marking: %s", err) + } + } + } + + if len(missing) > 0 { + log.Warnf("still missing %d references", len(missing)) + for c := range missing { + log.Warnf("unresolved missing reference: %s", c) + } + } +} diff --git a/blockstore/splitstore/splitstore_expose.go b/blockstore/splitstore/splitstore_expose.go new file mode 100644 index 00000000000..1065e460c2d --- /dev/null +++ b/blockstore/splitstore/splitstore_expose.go @@ -0,0 +1,114 @@ +package splitstore + +import ( + "context" + "errors" + + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + + bstore "github.com/filecoin-project/lotus/blockstore" +) + +type exposedSplitStore struct { + s *SplitStore +} + +var _ bstore.Blockstore = (*exposedSplitStore)(nil) + +func (s *SplitStore) Expose() bstore.Blockstore { + return &exposedSplitStore{s: s} +} + +func (es *exposedSplitStore) DeleteBlock(_ cid.Cid) error { + return errors.New("DeleteBlock: operation not supported") +} + +func (es *exposedSplitStore) DeleteMany(_ []cid.Cid) error { + return errors.New("DeleteMany: operation not supported") +} + +func (es *exposedSplitStore) Has(c cid.Cid) (bool, error) { + if isIdentiyCid(c) { + return true, nil + } + + has, err := es.s.hot.Has(c) + if has || err != nil { + return has, err + } + + return es.s.cold.Has(c) +} + +func (es *exposedSplitStore) Get(c cid.Cid) (blocks.Block, error) { + if isIdentiyCid(c) { + data, err := decodeIdentityCid(c) + if err != nil { + return nil, err + } + + return blocks.NewBlockWithCid(data, c) + } + + blk, err := es.s.hot.Get(c) + switch err { + case bstore.ErrNotFound: + return es.s.cold.Get(c) + default: + return blk, err + } +} + +func (es *exposedSplitStore) GetSize(c cid.Cid) (int, error) { + if isIdentiyCid(c) { + data, err := decodeIdentityCid(c) + if err != nil { + return 0, err + } + + return len(data), nil + } + + size, err := es.s.hot.GetSize(c) + switch err { + case bstore.ErrNotFound: + return es.s.cold.GetSize(c) + default: + return size, err + } +} + +func (es *exposedSplitStore) Put(blk blocks.Block) error { + return es.s.Put(blk) +} + +func (es *exposedSplitStore) PutMany(blks []blocks.Block) error { + return es.s.PutMany(blks) +} + +func (es *exposedSplitStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + return es.s.AllKeysChan(ctx) +} + +func (es *exposedSplitStore) HashOnRead(enabled bool) {} + +func (es *exposedSplitStore) View(c cid.Cid, f func([]byte) error) error { + if isIdentiyCid(c) { + data, err := decodeIdentityCid(c) + if err != nil { + return err + } + + return f(data) + } + + err := es.s.hot.View(c, f) + switch err { + case bstore.ErrNotFound: + return es.s.cold.View(c, f) + + default: + return err + } +} diff --git a/blockstore/splitstore/splitstore_gc.go b/blockstore/splitstore/splitstore_gc.go new file mode 100644 index 00000000000..2e1ffd4adcf --- /dev/null +++ b/blockstore/splitstore/splitstore_gc.go @@ -0,0 +1,35 @@ +package splitstore + +import ( + "fmt" + "time" + + bstore "github.com/filecoin-project/lotus/blockstore" +) + +func (s *SplitStore) gcHotstore() { + var opts []bstore.BlockstoreGCOption + if s.cfg.HotStoreFullGCFrequency > 0 && s.compactionIndex%int64(s.cfg.HotStoreFullGCFrequency) == 0 { + opts = append(opts, bstore.WithFullGC(true)) + } + + if err := s.gcBlockstore(s.hot, opts); err != nil { + log.Warnf("error garbage collecting hostore: %s", err) + } +} + +func (s *SplitStore) gcBlockstore(b bstore.Blockstore, opts []bstore.BlockstoreGCOption) error { + if gc, ok := b.(bstore.BlockstoreGC); ok { + log.Info("garbage collecting blockstore") + startGC := time.Now() + + if err := gc.CollectGarbage(opts...); err != nil { + return err + } + + log.Infow("garbage collecting hotstore done", "took", time.Since(startGC)) + return nil + } + + return fmt.Errorf("blockstore doesn't support garbage collection: %T", b) +} diff --git a/blockstore/splitstore/splitstore_test.go b/blockstore/splitstore/splitstore_test.go index dcaf276474d..df9984d4117 100644 --- a/blockstore/splitstore/splitstore_test.go +++ b/blockstore/splitstore/splitstore_test.go @@ -2,6 +2,7 @@ package splitstore import ( "context" + "errors" "fmt" "sync" "sync/atomic" @@ -13,6 +14,7 @@ import ( "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/mock" + blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" datastore "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" @@ -21,23 +23,36 @@ import ( func init() { CompactionThreshold = 5 - CompactionCold = 1 CompactionBoundary = 2 + WarmupBoundary = 0 logging.SetLogLevel("splitstore", "DEBUG") } func testSplitStore(t *testing.T, cfg *Config) { chain := &mockChain{t: t} + + // the myriads of stores + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + hot := newMockStore() + cold := newMockStore() + + // this is necessary to avoid the garbage mock puts in the blocks + garbage := blocks.NewBlock([]byte{1, 2, 3}) + err := cold.Put(garbage) + if err != nil { + t.Fatal(err) + } + // genesis genBlock := mock.MkBlock(nil, 0, 0) + genBlock.Messages = garbage.Cid() + genBlock.ParentMessageReceipts = garbage.Cid() + genBlock.ParentStateRoot = garbage.Cid() + genBlock.Timestamp = uint64(time.Now().Unix()) + genTs := mock.TipSet(genBlock) chain.push(genTs) - // the myriads of stores - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - hot := blockstore.NewMemorySync() - cold := blockstore.NewMemorySync() - // put the genesis block to cold store blk, err := genBlock.ToStorageBlock() if err != nil { @@ -49,6 +64,20 @@ func testSplitStore(t *testing.T, cfg *Config) { t.Fatal(err) } + // create a garbage block that is protected with a rgistered protector + protected := blocks.NewBlock([]byte("protected!")) + err = hot.Put(protected) + if err != nil { + t.Fatal(err) + } + + // and another one that is not protected + unprotected := blocks.NewBlock([]byte("unprotected!")) + err = hot.Put(unprotected) + if err != nil { + t.Fatal(err) + } + // open the splitstore ss, err := Open("", ds, hot, cold, cfg) if err != nil { @@ -56,18 +85,33 @@ func testSplitStore(t *testing.T, cfg *Config) { } defer ss.Close() //nolint + // register our protector + ss.AddProtector(func(protect func(cid.Cid) error) error { + return protect(protected.Cid()) + }) + err = ss.Start(chain) if err != nil { t.Fatal(err) } // make some tipsets, but not enough to cause compaction - mkBlock := func(curTs *types.TipSet, i int) *types.TipSet { + mkBlock := func(curTs *types.TipSet, i int, stateRoot blocks.Block) *types.TipSet { blk := mock.MkBlock(curTs, uint64(i), uint64(i)) + + blk.Messages = garbage.Cid() + blk.ParentMessageReceipts = garbage.Cid() + blk.ParentStateRoot = stateRoot.Cid() + blk.Timestamp = uint64(time.Now().Unix()) + sblk, err := blk.ToStorageBlock() if err != nil { t.Fatal(err) } + err = ss.Put(stateRoot) + if err != nil { + t.Fatal(err) + } err = ss.Put(sblk) if err != nil { t.Fatal(err) @@ -78,18 +122,6 @@ func testSplitStore(t *testing.T, cfg *Config) { return ts } - mkGarbageBlock := func(curTs *types.TipSet, i int) { - blk := mock.MkBlock(curTs, uint64(i), uint64(i)) - sblk, err := blk.ToStorageBlock() - if err != nil { - t.Fatal(err) - } - err = ss.Put(sblk) - if err != nil { - t.Fatal(err) - } - } - waitForCompaction := func() { for atomic.LoadInt32(&ss.compacting) == 1 { time.Sleep(100 * time.Millisecond) @@ -98,105 +130,101 @@ func testSplitStore(t *testing.T, cfg *Config) { curTs := genTs for i := 1; i < 5; i++ { - curTs = mkBlock(curTs, i) + stateRoot := blocks.NewBlock([]byte{byte(i), 3, 3, 7}) + curTs = mkBlock(curTs, i, stateRoot) waitForCompaction() } - mkGarbageBlock(genTs, 1) - // count objects in the cold and hot stores - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - countBlocks := func(bs blockstore.Blockstore) int { count := 0 - ch, err := bs.AllKeysChan(ctx) - if err != nil { - t.Fatal(err) - } - for range ch { + _ = bs.(blockstore.BlockstoreIterator).ForEachKey(func(_ cid.Cid) error { count++ - } + return nil + }) return count } coldCnt := countBlocks(cold) hotCnt := countBlocks(hot) - if coldCnt != 1 { - t.Errorf("expected %d blocks, but got %d", 1, coldCnt) + if coldCnt != 2 { + t.Errorf("expected %d blocks, but got %d", 2, coldCnt) } - if hotCnt != 5 { - t.Errorf("expected %d blocks, but got %d", 5, hotCnt) + if hotCnt != 12 { + t.Errorf("expected %d blocks, but got %d", 12, hotCnt) } // trigger a compaction for i := 5; i < 10; i++ { - curTs = mkBlock(curTs, i) + stateRoot := blocks.NewBlock([]byte{byte(i), 3, 3, 7}) + curTs = mkBlock(curTs, i, stateRoot) waitForCompaction() } coldCnt = countBlocks(cold) hotCnt = countBlocks(hot) - if !cfg.EnableFullCompaction { - if coldCnt != 5 { - t.Errorf("expected %d cold blocks, but got %d", 5, coldCnt) - } + if coldCnt != 6 { + t.Errorf("expected %d cold blocks, but got %d", 6, coldCnt) + } - if hotCnt != 5 { - t.Errorf("expected %d hot blocks, but got %d", 5, hotCnt) - } + if hotCnt != 18 { + t.Errorf("expected %d hot blocks, but got %d", 18, hotCnt) } - if cfg.EnableFullCompaction && !cfg.EnableGC { - if coldCnt != 3 { - t.Errorf("expected %d cold blocks, but got %d", 3, coldCnt) - } + // ensure our protected block is still there + has, err := hot.Has(protected.Cid()) + if err != nil { + t.Fatal(err) + } - if hotCnt != 7 { - t.Errorf("expected %d hot blocks, but got %d", 7, hotCnt) - } + if !has { + t.Fatal("protected block is missing from hotstore") } - if cfg.EnableFullCompaction && cfg.EnableGC { - if coldCnt != 2 { - t.Errorf("expected %d cold blocks, but got %d", 2, coldCnt) - } + // ensure our unprotected block is in the coldstore now + has, err = hot.Has(unprotected.Cid()) + if err != nil { + t.Fatal(err) + } - if hotCnt != 7 { - t.Errorf("expected %d hot blocks, but got %d", 7, hotCnt) - } + if has { + t.Fatal("unprotected block is still in hotstore") + } + + has, err = cold.Has(unprotected.Cid()) + if err != nil { + t.Fatal(err) + } + + if !has { + t.Fatal("unprotected block is missing from coldstore") } // Make sure we can revert without panicking. chain.revert(2) } -func TestSplitStoreSimpleCompaction(t *testing.T) { - testSplitStore(t, &Config{TrackingStoreType: "mem"}) +func TestSplitStoreCompaction(t *testing.T) { + testSplitStore(t, &Config{MarkSetType: "map"}) } -func TestSplitStoreFullCompactionWithoutGC(t *testing.T) { - testSplitStore(t, &Config{ - TrackingStoreType: "mem", - EnableFullCompaction: true, - }) -} - -func TestSplitStoreFullCompactionWithGC(t *testing.T) { - testSplitStore(t, &Config{ - TrackingStoreType: "mem", - EnableFullCompaction: true, - EnableGC: true, +func TestSplitStoreCompactionWithBadger(t *testing.T) { + bs := badgerMarkSetBatchSize + badgerMarkSetBatchSize = 1 + t.Cleanup(func() { + badgerMarkSetBatchSize = bs }) + testSplitStore(t, &Config{MarkSetType: "badger"}) } type mockChain struct { t testing.TB sync.Mutex + genesis *types.BlockHeader tipsets []*types.TipSet listener func(revert []*types.TipSet, apply []*types.TipSet) error } @@ -204,6 +232,9 @@ type mockChain struct { func (c *mockChain) push(ts *types.TipSet) { c.Lock() c.tipsets = append(c.tipsets, ts) + if c.genesis == nil { + c.genesis = ts.Blocks()[0] + } c.Unlock() if c.listener != nil { @@ -242,7 +273,7 @@ func (c *mockChain) GetTipsetByHeight(_ context.Context, epoch abi.ChainEpoch, _ return nil, fmt.Errorf("bad epoch %d", epoch) } - return c.tipsets[iEpoch-1], nil + return c.tipsets[iEpoch], nil } func (c *mockChain) GetHeaviestTipSet() *types.TipSet { @@ -256,24 +287,105 @@ func (c *mockChain) SubscribeHeadChanges(change func(revert []*types.TipSet, app c.listener = change } -func (c *mockChain) WalkSnapshot(_ context.Context, ts *types.TipSet, epochs abi.ChainEpoch, _ bool, _ bool, f func(cid.Cid) error) error { - c.Lock() - defer c.Unlock() +type mockStore struct { + mx sync.Mutex + set map[cid.Cid]blocks.Block +} + +func newMockStore() *mockStore { + return &mockStore{set: make(map[cid.Cid]blocks.Block)} +} + +func (b *mockStore) Has(cid cid.Cid) (bool, error) { + b.mx.Lock() + defer b.mx.Unlock() + _, ok := b.set[cid] + return ok, nil +} + +func (b *mockStore) HashOnRead(hor bool) {} + +func (b *mockStore) Get(cid cid.Cid) (blocks.Block, error) { + b.mx.Lock() + defer b.mx.Unlock() + + blk, ok := b.set[cid] + if !ok { + return nil, blockstore.ErrNotFound + } + return blk, nil +} + +func (b *mockStore) GetSize(cid cid.Cid) (int, error) { + blk, err := b.Get(cid) + if err != nil { + return 0, err + } + + return len(blk.RawData()), nil +} - start := int(ts.Height()) - 1 - end := start - int(epochs) - if end < 0 { - end = -1 - } - for i := start; i > end; i-- { - ts := c.tipsets[i] - for _, cid := range ts.Cids() { - err := f(cid) - if err != nil { - return err - } +func (b *mockStore) View(cid cid.Cid, f func([]byte) error) error { + blk, err := b.Get(cid) + if err != nil { + return err + } + return f(blk.RawData()) +} + +func (b *mockStore) Put(blk blocks.Block) error { + b.mx.Lock() + defer b.mx.Unlock() + + b.set[blk.Cid()] = blk + return nil +} + +func (b *mockStore) PutMany(blks []blocks.Block) error { + b.mx.Lock() + defer b.mx.Unlock() + + for _, blk := range blks { + b.set[blk.Cid()] = blk + } + return nil +} + +func (b *mockStore) DeleteBlock(cid cid.Cid) error { + b.mx.Lock() + defer b.mx.Unlock() + + delete(b.set, cid) + return nil +} + +func (b *mockStore) DeleteMany(cids []cid.Cid) error { + b.mx.Lock() + defer b.mx.Unlock() + + for _, c := range cids { + delete(b.set, c) + } + return nil +} + +func (b *mockStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + return nil, errors.New("not implemented") +} + +func (b *mockStore) ForEachKey(f func(cid.Cid) error) error { + b.mx.Lock() + defer b.mx.Unlock() + + for c := range b.set { + err := f(c) + if err != nil { + return err } } + return nil +} +func (b *mockStore) Close() error { return nil } diff --git a/blockstore/splitstore/splitstore_util.go b/blockstore/splitstore/splitstore_util.go new file mode 100644 index 00000000000..aef845832c0 --- /dev/null +++ b/blockstore/splitstore/splitstore_util.go @@ -0,0 +1,67 @@ +package splitstore + +import ( + "encoding/binary" + + "golang.org/x/xerrors" + + cid "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" + + "github.com/filecoin-project/go-state-types/abi" +) + +func epochToBytes(epoch abi.ChainEpoch) []byte { + return uint64ToBytes(uint64(epoch)) +} + +func bytesToEpoch(buf []byte) abi.ChainEpoch { + return abi.ChainEpoch(bytesToUint64(buf)) +} + +func int64ToBytes(i int64) []byte { + return uint64ToBytes(uint64(i)) +} + +func bytesToInt64(buf []byte) int64 { + return int64(bytesToUint64(buf)) +} + +func uint64ToBytes(i uint64) []byte { + buf := make([]byte, 16) + n := binary.PutUvarint(buf, i) + return buf[:n] +} + +func bytesToUint64(buf []byte) uint64 { + i, _ := binary.Uvarint(buf) + return i +} + +func isUnitaryObject(c cid.Cid) bool { + pre := c.Prefix() + switch pre.Codec { + case cid.FilCommitmentSealed, cid.FilCommitmentUnsealed: + return true + default: + return pre.MhType == mh.IDENTITY + } +} + +func isIdentiyCid(c cid.Cid) bool { + return c.Prefix().MhType == mh.IDENTITY +} + +func decodeIdentityCid(c cid.Cid) ([]byte, error) { + dmh, err := mh.Decode(c.Hash()) + if err != nil { + return nil, xerrors.Errorf("error decoding identity cid %s: %w", c, err) + } + + // sanity check + if dmh.Code != mh.IDENTITY { + return nil, xerrors.Errorf("error decoding identity cid %s: hash type is not identity", c) + } + + return dmh.Digest, nil +} diff --git a/blockstore/splitstore/splitstore_warmup.go b/blockstore/splitstore/splitstore_warmup.go new file mode 100644 index 00000000000..2079a547473 --- /dev/null +++ b/blockstore/splitstore/splitstore_warmup.go @@ -0,0 +1,137 @@ +package splitstore + +import ( + "sync/atomic" + "time" + + "golang.org/x/xerrors" + + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-state-types/abi" + bstore "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/types" +) + +var ( + // WarmupBoundary is the number of epochs to load state during warmup. + WarmupBoundary = build.Finality +) + +// warmup acuiqres the compaction lock and spawns a goroutine to warm up the hotstore; +// this is necessary when we sync from a snapshot or when we enable the splitstore +// on top of an existing blockstore (which becomes the coldstore). +func (s *SplitStore) warmup(curTs *types.TipSet) error { + if !atomic.CompareAndSwapInt32(&s.compacting, 0, 1) { + return xerrors.Errorf("error locking compaction") + } + + go func() { + defer atomic.StoreInt32(&s.compacting, 0) + + log.Info("warming up hotstore") + start := time.Now() + + err := s.doWarmup(curTs) + if err != nil { + log.Errorf("error warming up hotstore: %s", err) + return + } + + log.Infow("warm up done", "took", time.Since(start)) + }() + + return nil +} + +// the actual warmup procedure; it walks the chain loading all state roots at the boundary +// and headers all the way up to genesis. +// objects are written in batches so as to minimize overhead. +func (s *SplitStore) doWarmup(curTs *types.TipSet) error { + var boundaryEpoch abi.ChainEpoch + epoch := curTs.Height() + if WarmupBoundary < epoch { + boundaryEpoch = epoch - WarmupBoundary + } + batchHot := make([]blocks.Block, 0, batchSize) + count := int64(0) + xcount := int64(0) + missing := int64(0) + err := s.walkChain(curTs, boundaryEpoch, epoch+1, // we don't load messages/receipts in warmup + func(c cid.Cid) error { + if isUnitaryObject(c) { + return errStopWalk + } + + count++ + + has, err := s.hot.Has(c) + if err != nil { + return err + } + + if has { + return nil + } + + blk, err := s.cold.Get(c) + if err != nil { + if err == bstore.ErrNotFound { + missing++ + return errStopWalk + } + return err + } + + xcount++ + + batchHot = append(batchHot, blk) + if len(batchHot) == batchSize { + err = s.hot.PutMany(batchHot) + if err != nil { + return err + } + batchHot = batchHot[:0] + } + + return nil + }) + + if err != nil { + return err + } + + if len(batchHot) > 0 { + err = s.hot.PutMany(batchHot) + if err != nil { + return err + } + } + + log.Infow("warmup stats", "visited", count, "warm", xcount, "missing", missing) + + s.markSetSize = count + count>>2 // overestimate a bit + err = s.ds.Put(markSetSizeKey, int64ToBytes(s.markSetSize)) + if err != nil { + log.Warnf("error saving mark set size: %s", err) + } + + // save the warmup epoch + err = s.ds.Put(warmupEpochKey, epochToBytes(epoch)) + if err != nil { + return xerrors.Errorf("error saving warm up epoch: %w", err) + } + s.mx.Lock() + s.warmupEpoch = epoch + s.mx.Unlock() + + // also save the compactionIndex, as this is used as an indicator of warmup for upgraded nodes + err = s.ds.Put(compactionIndexKey, int64ToBytes(s.compactionIndex)) + if err != nil { + return xerrors.Errorf("error saving compaction index: %w", err) + } + + return nil +} diff --git a/blockstore/splitstore/tracking.go b/blockstore/splitstore/tracking.go deleted file mode 100644 index d57fd45ef6a..00000000000 --- a/blockstore/splitstore/tracking.go +++ /dev/null @@ -1,109 +0,0 @@ -package splitstore - -import ( - "path/filepath" - "sync" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-state-types/abi" - cid "github.com/ipfs/go-cid" -) - -// TrackingStore is a persistent store that tracks blocks that are added -// to the hotstore, tracking the epoch at which they are written. -type TrackingStore interface { - Put(cid.Cid, abi.ChainEpoch) error - PutBatch([]cid.Cid, abi.ChainEpoch) error - Get(cid.Cid) (abi.ChainEpoch, error) - Delete(cid.Cid) error - DeleteBatch([]cid.Cid) error - ForEach(func(cid.Cid, abi.ChainEpoch) error) error - Sync() error - Close() error -} - -// OpenTrackingStore opens a tracking store of the specified type in the -// specified path. -func OpenTrackingStore(path string, ttype string) (TrackingStore, error) { - switch ttype { - case "", "bolt": - return OpenBoltTrackingStore(filepath.Join(path, "tracker.bolt")) - case "mem": - return NewMemTrackingStore(), nil - default: - return nil, xerrors.Errorf("unknown tracking store type %s", ttype) - } -} - -// NewMemTrackingStore creates an in-memory tracking store. -// This is only useful for test or situations where you don't want to open the -// real tracking store (eg concurrent read only access on a node's datastore) -func NewMemTrackingStore() *MemTrackingStore { - return &MemTrackingStore{tab: make(map[cid.Cid]abi.ChainEpoch)} -} - -// MemTrackingStore is a simple in-memory tracking store -type MemTrackingStore struct { - sync.Mutex - tab map[cid.Cid]abi.ChainEpoch -} - -var _ TrackingStore = (*MemTrackingStore)(nil) - -func (s *MemTrackingStore) Put(cid cid.Cid, epoch abi.ChainEpoch) error { - s.Lock() - defer s.Unlock() - s.tab[cid] = epoch - return nil -} - -func (s *MemTrackingStore) PutBatch(cids []cid.Cid, epoch abi.ChainEpoch) error { - s.Lock() - defer s.Unlock() - for _, cid := range cids { - s.tab[cid] = epoch - } - return nil -} - -func (s *MemTrackingStore) Get(cid cid.Cid) (abi.ChainEpoch, error) { - s.Lock() - defer s.Unlock() - epoch, ok := s.tab[cid] - if ok { - return epoch, nil - } - return 0, xerrors.Errorf("missing tracking epoch for %s", cid) -} - -func (s *MemTrackingStore) Delete(cid cid.Cid) error { - s.Lock() - defer s.Unlock() - delete(s.tab, cid) - return nil -} - -func (s *MemTrackingStore) DeleteBatch(cids []cid.Cid) error { - s.Lock() - defer s.Unlock() - for _, cid := range cids { - delete(s.tab, cid) - } - return nil -} - -func (s *MemTrackingStore) ForEach(f func(cid.Cid, abi.ChainEpoch) error) error { - s.Lock() - defer s.Unlock() - for cid, epoch := range s.tab { - err := f(cid, epoch) - if err != nil { - return err - } - } - return nil -} - -func (s *MemTrackingStore) Sync() error { return nil } -func (s *MemTrackingStore) Close() error { return nil } diff --git a/blockstore/splitstore/tracking_bolt.go b/blockstore/splitstore/tracking_bolt.go deleted file mode 100644 index c5c451e1570..00000000000 --- a/blockstore/splitstore/tracking_bolt.go +++ /dev/null @@ -1,120 +0,0 @@ -package splitstore - -import ( - "time" - - "golang.org/x/xerrors" - - cid "github.com/ipfs/go-cid" - bolt "go.etcd.io/bbolt" - - "github.com/filecoin-project/go-state-types/abi" -) - -type BoltTrackingStore struct { - db *bolt.DB - bucketId []byte -} - -var _ TrackingStore = (*BoltTrackingStore)(nil) - -func OpenBoltTrackingStore(path string) (*BoltTrackingStore, error) { - opts := &bolt.Options{ - Timeout: 1 * time.Second, - NoSync: true, - } - db, err := bolt.Open(path, 0644, opts) - if err != nil { - return nil, err - } - - bucketId := []byte("tracker") - err = db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucketIfNotExists(bucketId) - if err != nil { - return xerrors.Errorf("error creating bolt db bucket %s: %w", string(bucketId), err) - } - return nil - }) - - if err != nil { - _ = db.Close() - return nil, err - } - - return &BoltTrackingStore{db: db, bucketId: bucketId}, nil -} - -func (s *BoltTrackingStore) Put(cid cid.Cid, epoch abi.ChainEpoch) error { - val := epochToBytes(epoch) - return s.db.Batch(func(tx *bolt.Tx) error { - b := tx.Bucket(s.bucketId) - return b.Put(cid.Hash(), val) - }) -} - -func (s *BoltTrackingStore) PutBatch(cids []cid.Cid, epoch abi.ChainEpoch) error { - val := epochToBytes(epoch) - return s.db.Batch(func(tx *bolt.Tx) error { - b := tx.Bucket(s.bucketId) - for _, cid := range cids { - err := b.Put(cid.Hash(), val) - if err != nil { - return err - } - } - return nil - }) -} - -func (s *BoltTrackingStore) Get(cid cid.Cid) (epoch abi.ChainEpoch, err error) { - err = s.db.View(func(tx *bolt.Tx) error { - b := tx.Bucket(s.bucketId) - val := b.Get(cid.Hash()) - if val == nil { - return xerrors.Errorf("missing tracking epoch for %s", cid) - } - epoch = bytesToEpoch(val) - return nil - }) - return epoch, err -} - -func (s *BoltTrackingStore) Delete(cid cid.Cid) error { - return s.db.Batch(func(tx *bolt.Tx) error { - b := tx.Bucket(s.bucketId) - return b.Delete(cid.Hash()) - }) -} - -func (s *BoltTrackingStore) DeleteBatch(cids []cid.Cid) error { - return s.db.Batch(func(tx *bolt.Tx) error { - b := tx.Bucket(s.bucketId) - for _, cid := range cids { - err := b.Delete(cid.Hash()) - if err != nil { - return xerrors.Errorf("error deleting %s", cid) - } - } - return nil - }) -} - -func (s *BoltTrackingStore) ForEach(f func(cid.Cid, abi.ChainEpoch) error) error { - return s.db.View(func(tx *bolt.Tx) error { - b := tx.Bucket(s.bucketId) - return b.ForEach(func(k, v []byte) error { - cid := cid.NewCidV1(cid.Raw, k) - epoch := bytesToEpoch(v) - return f(cid, epoch) - }) - }) -} - -func (s *BoltTrackingStore) Sync() error { - return s.db.Sync() -} - -func (s *BoltTrackingStore) Close() error { - return s.db.Close() -} diff --git a/blockstore/splitstore/tracking_test.go b/blockstore/splitstore/tracking_test.go deleted file mode 100644 index afd475da5a5..00000000000 --- a/blockstore/splitstore/tracking_test.go +++ /dev/null @@ -1,130 +0,0 @@ -package splitstore - -import ( - "io/ioutil" - "testing" - - cid "github.com/ipfs/go-cid" - "github.com/multiformats/go-multihash" - - "github.com/filecoin-project/go-state-types/abi" -) - -func TestBoltTrackingStore(t *testing.T) { - testTrackingStore(t, "bolt") -} - -func testTrackingStore(t *testing.T, tsType string) { - t.Helper() - - makeCid := func(key string) cid.Cid { - h, err := multihash.Sum([]byte(key), multihash.SHA2_256, -1) - if err != nil { - t.Fatal(err) - } - - return cid.NewCidV1(cid.Raw, h) - } - - mustHave := func(s TrackingStore, cid cid.Cid, epoch abi.ChainEpoch) { - val, err := s.Get(cid) - if err != nil { - t.Fatal(err) - } - - if val != epoch { - t.Fatal("epoch mismatch") - } - } - - mustNotHave := func(s TrackingStore, cid cid.Cid) { - _, err := s.Get(cid) - if err == nil { - t.Fatal("expected error") - } - } - - path, err := ioutil.TempDir("", "snoop-test.*") - if err != nil { - t.Fatal(err) - } - - s, err := OpenTrackingStore(path, tsType) - if err != nil { - t.Fatal(err) - } - - k1 := makeCid("a") - k2 := makeCid("b") - k3 := makeCid("c") - k4 := makeCid("d") - - s.Put(k1, 1) //nolint - s.Put(k2, 2) //nolint - s.Put(k3, 3) //nolint - s.Put(k4, 4) //nolint - - mustHave(s, k1, 1) - mustHave(s, k2, 2) - mustHave(s, k3, 3) - mustHave(s, k4, 4) - - s.Delete(k1) // nolint - s.Delete(k2) // nolint - - mustNotHave(s, k1) - mustNotHave(s, k2) - mustHave(s, k3, 3) - mustHave(s, k4, 4) - - s.PutBatch([]cid.Cid{k1}, 1) //nolint - s.PutBatch([]cid.Cid{k2}, 2) //nolint - - mustHave(s, k1, 1) - mustHave(s, k2, 2) - mustHave(s, k3, 3) - mustHave(s, k4, 4) - - allKeys := map[string]struct{}{ - k1.String(): {}, - k2.String(): {}, - k3.String(): {}, - k4.String(): {}, - } - - err = s.ForEach(func(k cid.Cid, _ abi.ChainEpoch) error { - _, ok := allKeys[k.String()] - if !ok { - t.Fatal("unexpected key") - } - - delete(allKeys, k.String()) - return nil - }) - - if err != nil { - t.Fatal(err) - } - - if len(allKeys) != 0 { - t.Fatal("not all keys were returned") - } - - // no close and reopen and ensure the keys still exist - err = s.Close() - if err != nil { - t.Fatal(err) - } - - s, err = OpenTrackingStore(path, tsType) - if err != nil { - t.Fatal(err) - } - - mustHave(s, k1, 1) - mustHave(s, k2, 2) - mustHave(s, k3, 3) - mustHave(s, k4, 4) - - s.Close() //nolint:errcheck -} diff --git a/build/bootstrap/interopnet.pi b/build/bootstrap/interopnet.pi index 112d9611382..923653d94e3 100644 --- a/build/bootstrap/interopnet.pi +++ b/build/bootstrap/interopnet.pi @@ -1,2 +1,2 @@ -/dns4/bootstrap-0.interop.fildev.network/tcp/1347/p2p/12D3KooWN86wA54r3v9M8bBYbc1vK9W1ehHDxVGPRaoeUYuXF8R7 -/dns4/bootstrap-1.interop.fildev.network/tcp/1347/p2p/12D3KooWNZ41kev8mtBZgWe43qam1VX9pJyf87jnaisQP2urZZ2M +/dns4/bootstrap-0.interop.fildev.network/tcp/1347/p2p/12D3KooWLGPq9JL1xwL6gHok7HSNxtK1Q5kyfg4Hk69ifRPghn4i +/dns4/bootstrap-1.interop.fildev.network/tcp/1347/p2p/12D3KooWFYS1f31zafv8mqqYu8U3hEqYvaZ6avWzYU3BmZdpyH3h diff --git a/build/genesis/interopnet.car b/build/genesis/interopnet.car index 80ecd6e7297..2c7c2a49873 100644 Binary files a/build/genesis/interopnet.car and b/build/genesis/interopnet.car differ diff --git a/build/openrpc/full.json.gz b/build/openrpc/full.json.gz index fe0c17662ad..2f85885ef6b 100644 Binary files a/build/openrpc/full.json.gz and b/build/openrpc/full.json.gz differ diff --git a/build/openrpc/miner.json.gz b/build/openrpc/miner.json.gz index b653c2e7f77..9d2f5c2e070 100644 Binary files a/build/openrpc/miner.json.gz and b/build/openrpc/miner.json.gz differ diff --git a/build/openrpc/worker.json.gz b/build/openrpc/worker.json.gz index 4b2eb6a5a4f..53bb24e5859 100644 Binary files a/build/openrpc/worker.json.gz and b/build/openrpc/worker.json.gz differ diff --git a/build/params_2k.go b/build/params_2k.go index 387d2da0bbd..efa38dc0cec 100644 --- a/build/params_2k.go +++ b/build/params_2k.go @@ -28,18 +28,18 @@ var UpgradeAssemblyHeight = abi.ChainEpoch(-5) var UpgradeLiftoffHeight = abi.ChainEpoch(-6) var UpgradeKumquatHeight = abi.ChainEpoch(-7) -var UpgradeCalicoHeight = abi.ChainEpoch(-8) -var UpgradePersianHeight = abi.ChainEpoch(-9) -var UpgradeOrangeHeight = abi.ChainEpoch(-10) -var UpgradeClausHeight = abi.ChainEpoch(-11) +var UpgradeCalicoHeight = abi.ChainEpoch(-9) +var UpgradePersianHeight = abi.ChainEpoch(-10) +var UpgradeOrangeHeight = abi.ChainEpoch(-11) +var UpgradeClausHeight = abi.ChainEpoch(-12) -var UpgradeTrustHeight = abi.ChainEpoch(-12) +var UpgradeTrustHeight = abi.ChainEpoch(-13) -var UpgradeNorwegianHeight = abi.ChainEpoch(-13) +var UpgradeNorwegianHeight = abi.ChainEpoch(-14) -var UpgradeTurboHeight = abi.ChainEpoch(-14) +var UpgradeTurboHeight = abi.ChainEpoch(-15) -var UpgradeHyperdriveHeight = abi.ChainEpoch(-15) +var UpgradeHyperdriveHeight = abi.ChainEpoch(-16) var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ 0: DrandMainnet, diff --git a/build/params_interop.go b/build/params_interop.go index 73cc1c7d9ca..921dd0981f8 100644 --- a/build/params_interop.go +++ b/build/params_interop.go @@ -31,18 +31,18 @@ var UpgradeAssemblyHeight = abi.ChainEpoch(-5) var UpgradeLiftoffHeight = abi.ChainEpoch(-6) var UpgradeKumquatHeight = abi.ChainEpoch(-7) -var UpgradeCalicoHeight = abi.ChainEpoch(-8) -var UpgradePersianHeight = abi.ChainEpoch(-9) -var UpgradeOrangeHeight = abi.ChainEpoch(-10) -var UpgradeClausHeight = abi.ChainEpoch(-11) +var UpgradeCalicoHeight = abi.ChainEpoch(-9) +var UpgradePersianHeight = abi.ChainEpoch(-10) +var UpgradeOrangeHeight = abi.ChainEpoch(-11) +var UpgradeClausHeight = abi.ChainEpoch(-12) -var UpgradeTrustHeight = abi.ChainEpoch(-12) +var UpgradeTrustHeight = abi.ChainEpoch(-13) -var UpgradeNorwegianHeight = abi.ChainEpoch(-13) +var UpgradeNorwegianHeight = abi.ChainEpoch(-14) -var UpgradeTurboHeight = abi.ChainEpoch(-14) +var UpgradeTurboHeight = abi.ChainEpoch(-15) -var UpgradeHyperdriveHeight = abi.ChainEpoch(-15) +var UpgradeHyperdriveHeight = abi.ChainEpoch(-16) var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ 0: DrandMainnet, diff --git a/build/params_testground.go b/build/params_testground.go index 252d23e759e..204c74e676b 100644 --- a/build/params_testground.go +++ b/build/params_testground.go @@ -89,14 +89,14 @@ var ( UpgradeAssemblyHeight abi.ChainEpoch = 10 UpgradeLiftoffHeight abi.ChainEpoch = -5 UpgradeKumquatHeight abi.ChainEpoch = -6 - UpgradeCalicoHeight abi.ChainEpoch = -7 - UpgradePersianHeight abi.ChainEpoch = -8 - UpgradeOrangeHeight abi.ChainEpoch = -9 - UpgradeClausHeight abi.ChainEpoch = -10 - UpgradeTrustHeight abi.ChainEpoch = -11 - UpgradeNorwegianHeight abi.ChainEpoch = -12 - UpgradeTurboHeight abi.ChainEpoch = -13 - UpgradeHyperdriveHeight abi.ChainEpoch = -13 + UpgradeCalicoHeight abi.ChainEpoch = -8 + UpgradePersianHeight abi.ChainEpoch = -9 + UpgradeOrangeHeight abi.ChainEpoch = -10 + UpgradeClausHeight abi.ChainEpoch = -11 + UpgradeTrustHeight abi.ChainEpoch = -12 + UpgradeNorwegianHeight abi.ChainEpoch = -13 + UpgradeTurboHeight abi.ChainEpoch = -14 + UpgradeHyperdriveHeight abi.ChainEpoch = -15 DrandSchedule = map[abi.ChainEpoch]DrandEnum{ 0: DrandMainnet, diff --git a/build/version.go b/build/version.go index 9262af171ef..f1c12b16bf1 100644 --- a/build/version.go +++ b/build/version.go @@ -33,8 +33,8 @@ func buildType() string { } } -// BuildVersion is the local build version, set by build system -const BuildVersion = "1.11.0" +// BuildVersion is the local build version +const BuildVersion = "1.11.1" func UserVersion() string { if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" { diff --git a/chain/actors/builtin/miner/actor.go.template b/chain/actors/builtin/miner/actor.go.template index 8d46f99fd6a..12f418b3784 100644 --- a/chain/actors/builtin/miner/actor.go.template +++ b/chain/actors/builtin/miner/actor.go.template @@ -105,6 +105,7 @@ type State interface { // UnallocatedSectorNumbers returns up to count unallocated sector numbers (or less than // count if there aren't enough). UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) + GetAllocatedSectors() (*bitfield.BitField, error) // Note that ProvingPeriodStart is deprecated and will be renamed / removed in a future version of actors GetProvingPeriodStart() (abi.ChainEpoch, error) diff --git a/chain/actors/builtin/miner/miner.go b/chain/actors/builtin/miner/miner.go index 995dc78cba1..fc1d60e718a 100644 --- a/chain/actors/builtin/miner/miner.go +++ b/chain/actors/builtin/miner/miner.go @@ -164,6 +164,7 @@ type State interface { // UnallocatedSectorNumbers returns up to count unallocated sector numbers (or less than // count if there aren't enough). UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) + GetAllocatedSectors() (*bitfield.BitField, error) // Note that ProvingPeriodStart is deprecated and will be renamed / removed in a future version of actors GetProvingPeriodStart() (abi.ChainEpoch, error) diff --git a/chain/actors/builtin/miner/state.go.template b/chain/actors/builtin/miner/state.go.template index eb7ab00bf0b..09c1202d95e 100644 --- a/chain/actors/builtin/miner/state.go.template +++ b/chain/actors/builtin/miner/state.go.template @@ -318,6 +318,15 @@ func (s *state{{.v}}) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, e return sectors, nil } +func (s *state{{.v}}) GetAllocatedSectors() (*bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + return nil, err + } + + return &allocatedSectors, nil +} + func (s *state{{.v}}) LoadDeadline(idx uint64) (Deadline, error) { dls, err := s.State.LoadDeadlines(s.store) if err != nil { diff --git a/chain/actors/builtin/miner/v0.go b/chain/actors/builtin/miner/v0.go index c5e8874819d..cd922645ea4 100644 --- a/chain/actors/builtin/miner/v0.go +++ b/chain/actors/builtin/miner/v0.go @@ -311,6 +311,15 @@ func (s *state0) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) return sectors, nil } +func (s *state0) GetAllocatedSectors() (*bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + return nil, err + } + + return &allocatedSectors, nil +} + func (s *state0) LoadDeadline(idx uint64) (Deadline, error) { dls, err := s.State.LoadDeadlines(s.store) if err != nil { diff --git a/chain/actors/builtin/miner/v2.go b/chain/actors/builtin/miner/v2.go index 45d4a7165ba..5de653fe4e2 100644 --- a/chain/actors/builtin/miner/v2.go +++ b/chain/actors/builtin/miner/v2.go @@ -309,6 +309,15 @@ func (s *state2) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) return sectors, nil } +func (s *state2) GetAllocatedSectors() (*bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + return nil, err + } + + return &allocatedSectors, nil +} + func (s *state2) LoadDeadline(idx uint64) (Deadline, error) { dls, err := s.State.LoadDeadlines(s.store) if err != nil { diff --git a/chain/actors/builtin/miner/v3.go b/chain/actors/builtin/miner/v3.go index 166abe1e748..1819428a6d3 100644 --- a/chain/actors/builtin/miner/v3.go +++ b/chain/actors/builtin/miner/v3.go @@ -311,6 +311,15 @@ func (s *state3) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) return sectors, nil } +func (s *state3) GetAllocatedSectors() (*bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + return nil, err + } + + return &allocatedSectors, nil +} + func (s *state3) LoadDeadline(idx uint64) (Deadline, error) { dls, err := s.State.LoadDeadlines(s.store) if err != nil { diff --git a/chain/actors/builtin/miner/v4.go b/chain/actors/builtin/miner/v4.go index 71a2b9f9d23..5a3a75053c3 100644 --- a/chain/actors/builtin/miner/v4.go +++ b/chain/actors/builtin/miner/v4.go @@ -311,6 +311,15 @@ func (s *state4) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) return sectors, nil } +func (s *state4) GetAllocatedSectors() (*bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + return nil, err + } + + return &allocatedSectors, nil +} + func (s *state4) LoadDeadline(idx uint64) (Deadline, error) { dls, err := s.State.LoadDeadlines(s.store) if err != nil { diff --git a/chain/actors/builtin/miner/v5.go b/chain/actors/builtin/miner/v5.go index 56883477768..82e98c2ef06 100644 --- a/chain/actors/builtin/miner/v5.go +++ b/chain/actors/builtin/miner/v5.go @@ -311,6 +311,15 @@ func (s *state5) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) return sectors, nil } +func (s *state5) GetAllocatedSectors() (*bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + return nil, err + } + + return &allocatedSectors, nil +} + func (s *state5) LoadDeadline(idx uint64) (Deadline, error) { dls, err := s.State.LoadDeadlines(s.store) if err != nil { diff --git a/chain/actors/policy/policy.go b/chain/actors/policy/policy.go index c159dc98f22..492f7618354 100644 --- a/chain/actors/policy/policy.go +++ b/chain/actors/policy/policy.go @@ -4,6 +4,7 @@ import ( "sort" "github.com/filecoin-project/go-state-types/big" + "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/network" @@ -168,63 +169,99 @@ func SetMinVerifiedDealSize(size abi.StoragePower) { } -func GetMaxProveCommitDuration(ver actors.Version, t abi.RegisteredSealProof) abi.ChainEpoch { +func GetMaxProveCommitDuration(ver actors.Version, t abi.RegisteredSealProof) (abi.ChainEpoch, error) { switch ver { case actors.Version0: - return miner0.MaxSealDuration[t] + return miner0.MaxSealDuration[t], nil case actors.Version2: - return miner2.MaxProveCommitDuration[t] + return miner2.MaxProveCommitDuration[t], nil case actors.Version3: - return miner3.MaxProveCommitDuration[t] + return miner3.MaxProveCommitDuration[t], nil case actors.Version4: - return miner4.MaxProveCommitDuration[t] + return miner4.MaxProveCommitDuration[t], nil case actors.Version5: - return miner5.MaxProveCommitDuration[t] + return miner5.MaxProveCommitDuration[t], nil default: - panic("unsupported actors version") + return 0, xerrors.Errorf("unsupported actors version") } } +// SetProviderCollateralSupplyTarget sets the percentage of normalized circulating +// supply that must be covered by provider collateral in a deal. This should +// only be used for testing. +func SetProviderCollateralSupplyTarget(num, denom big.Int) { + + market2.ProviderCollateralSupplyTarget = builtin2.BigFrac{ + Numerator: num, + Denominator: denom, + } + + market3.ProviderCollateralSupplyTarget = builtin3.BigFrac{ + Numerator: num, + Denominator: denom, + } + + market4.ProviderCollateralSupplyTarget = builtin4.BigFrac{ + Numerator: num, + Denominator: denom, + } + + market5.ProviderCollateralSupplyTarget = builtin5.BigFrac{ + Numerator: num, + Denominator: denom, + } + +} + func DealProviderCollateralBounds( size abi.PaddedPieceSize, verified bool, rawBytePower, qaPower, baselinePower abi.StoragePower, circulatingFil abi.TokenAmount, nwVer network.Version, -) (min, max abi.TokenAmount) { - switch actors.VersionForNetwork(nwVer) { +) (min, max abi.TokenAmount, err error) { + v, err := actors.VersionForNetwork(nwVer) + if err != nil { + return big.Zero(), big.Zero(), err + } + switch v { case actors.Version0: - return market0.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil, nwVer) + min, max := market0.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil, nwVer) + return min, max, nil case actors.Version2: - return market2.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) + min, max := market2.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) + return min, max, nil case actors.Version3: - return market3.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) + min, max := market3.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) + return min, max, nil case actors.Version4: - return market4.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) + min, max := market4.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) + return min, max, nil case actors.Version5: - return market5.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) + min, max := market5.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) + return min, max, nil default: - panic("unsupported actors version") + return big.Zero(), big.Zero(), xerrors.Errorf("unsupported actors version") } } @@ -283,8 +320,11 @@ func GetMaxPoStPartitions(nv network.Version, p abi.RegisteredPoStProof) (int, e if err != nil { return 0, err } - maxSectors := uint64(GetAddressedSectorsMax(nv)) - return int(maxSectors / sectorsPerPart), nil + maxSectors, err := GetAddressedSectorsMax(nv) + if err != nil { + return 0, err + } + return int(uint64(maxSectors) / sectorsPerPart), nil } func GetDefaultSectorSize() abi.SectorSize { @@ -318,82 +358,94 @@ func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version) return builtin5.SealProofPoliciesV11[proof].SectorMaxLifetime } -func GetAddressedSectorsMax(nwVer network.Version) int { - switch actors.VersionForNetwork(nwVer) { +func GetAddressedSectorsMax(nwVer network.Version) (int, error) { + v, err := actors.VersionForNetwork(nwVer) + if err != nil { + return 0, err + } + switch v { case actors.Version0: - return miner0.AddressedSectorsMax + return miner0.AddressedSectorsMax, nil case actors.Version2: - return miner2.AddressedSectorsMax + return miner2.AddressedSectorsMax, nil case actors.Version3: - return miner3.AddressedSectorsMax + return miner3.AddressedSectorsMax, nil case actors.Version4: - return miner4.AddressedSectorsMax + return miner4.AddressedSectorsMax, nil case actors.Version5: - return miner5.AddressedSectorsMax + return miner5.AddressedSectorsMax, nil default: - panic("unsupported network version") + return 0, xerrors.Errorf("unsupported network version") } } -func GetDeclarationsMax(nwVer network.Version) int { - switch actors.VersionForNetwork(nwVer) { +func GetDeclarationsMax(nwVer network.Version) (int, error) { + v, err := actors.VersionForNetwork(nwVer) + if err != nil { + return 0, err + } + switch v { case actors.Version0: - // TODO: Should we instead panic here since the concept doesn't exist yet? - return miner0.AddressedPartitionsMax + // TODO: Should we instead error here since the concept doesn't exist yet? + return miner0.AddressedPartitionsMax, nil case actors.Version2: - return miner2.DeclarationsMax + return miner2.DeclarationsMax, nil case actors.Version3: - return miner3.DeclarationsMax + return miner3.DeclarationsMax, nil case actors.Version4: - return miner4.DeclarationsMax + return miner4.DeclarationsMax, nil case actors.Version5: - return miner5.DeclarationsMax + return miner5.DeclarationsMax, nil default: - panic("unsupported network version") + return 0, xerrors.Errorf("unsupported network version") } } -func AggregateNetworkFee(nwVer network.Version, aggregateSize int, baseFee abi.TokenAmount) abi.TokenAmount { - switch actors.VersionForNetwork(nwVer) { +func AggregateNetworkFee(nwVer network.Version, aggregateSize int, baseFee abi.TokenAmount) (abi.TokenAmount, error) { + v, err := actors.VersionForNetwork(nwVer) + if err != nil { + return big.Zero(), err + } + switch v { case actors.Version0: - return big.Zero() + return big.Zero(), nil case actors.Version2: - return big.Zero() + return big.Zero(), nil case actors.Version3: - return big.Zero() + return big.Zero(), nil case actors.Version4: - return big.Zero() + return big.Zero(), nil case actors.Version5: - return miner5.AggregateNetworkFee(aggregateSize, baseFee) + return miner5.AggregateNetworkFee(aggregateSize, baseFee), nil default: - panic("unsupported network version") + return big.Zero(), xerrors.Errorf("unsupported network version") } } diff --git a/chain/actors/policy/policy.go.template b/chain/actors/policy/policy.go.template index 17b3eb0ff70..264d4299203 100644 --- a/chain/actors/policy/policy.go.template +++ b/chain/actors/policy/policy.go.template @@ -4,6 +4,7 @@ import ( "sort" "github.com/filecoin-project/go-state-types/big" + "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/network" @@ -117,37 +118,57 @@ func SetMinVerifiedDealSize(size abi.StoragePower) { {{end}} } -func GetMaxProveCommitDuration(ver actors.Version, t abi.RegisteredSealProof) abi.ChainEpoch { +func GetMaxProveCommitDuration(ver actors.Version, t abi.RegisteredSealProof) (abi.ChainEpoch, error) { switch ver { {{range .versions}} case actors.Version{{.}}: {{if (eq . 0)}} - return miner{{.}}.MaxSealDuration[t] + return miner{{.}}.MaxSealDuration[t], nil {{else}} - return miner{{.}}.MaxProveCommitDuration[t] + return miner{{.}}.MaxProveCommitDuration[t], nil {{end}} {{end}} default: - panic("unsupported actors version") + return 0, xerrors.Errorf("unsupported actors version") } } +// SetProviderCollateralSupplyTarget sets the percentage of normalized circulating +// supply that must be covered by provider collateral in a deal. This should +// only be used for testing. +func SetProviderCollateralSupplyTarget(num, denom big.Int) { +{{range .versions}} + {{if (ge . 2)}} + market{{.}}.ProviderCollateralSupplyTarget = builtin{{.}}.BigFrac{ + Numerator: num, + Denominator: denom, + } + {{end}} +{{end}} +} + func DealProviderCollateralBounds( size abi.PaddedPieceSize, verified bool, rawBytePower, qaPower, baselinePower abi.StoragePower, circulatingFil abi.TokenAmount, nwVer network.Version, -) (min, max abi.TokenAmount) { - switch actors.VersionForNetwork(nwVer) { +) (min, max abi.TokenAmount, err error) { + v, err := actors.VersionForNetwork(nwVer) + if err != nil { + return big.Zero(), big.Zero(), err + } + switch v { {{range .versions}} case actors.Version{{.}}: {{if (eq . 0)}} - return market{{.}}.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil, nwVer) + min, max := market{{.}}.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil, nwVer) + return min, max, nil {{else}} - return market{{.}}.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) + min, max := market{{.}}.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) + return min, max, nil {{end}} {{end}} default: - panic("unsupported actors version") + return big.Zero(), big.Zero(), xerrors.Errorf("unsupported actors version") } } @@ -187,8 +208,11 @@ func GetMaxPoStPartitions(nv network.Version, p abi.RegisteredPoStProof) (int, e if err != nil { return 0, err } - maxSectors := uint64(GetAddressedSectorsMax(nv)) - return int(maxSectors / sectorsPerPart), nil + maxSectors, err := GetAddressedSectorsMax(nv) + if err != nil { + return 0, err + } + return int(uint64(maxSectors) / sectorsPerPart), nil } func GetDefaultSectorSize() abi.SectorSize { @@ -222,44 +246,56 @@ func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version) return builtin{{.latestVersion}}.SealProofPoliciesV11[proof].SectorMaxLifetime } -func GetAddressedSectorsMax(nwVer network.Version) int { - switch actors.VersionForNetwork(nwVer) { +func GetAddressedSectorsMax(nwVer network.Version) (int, error) { + v, err := actors.VersionForNetwork(nwVer) + if err != nil { + return 0, err + } + switch v { {{range .versions}} case actors.Version{{.}}: - return miner{{.}}.AddressedSectorsMax + return miner{{.}}.AddressedSectorsMax, nil {{end}} default: - panic("unsupported network version") + return 0, xerrors.Errorf("unsupported network version") } } -func GetDeclarationsMax(nwVer network.Version) int { - switch actors.VersionForNetwork(nwVer) { +func GetDeclarationsMax(nwVer network.Version) (int, error) { + v, err := actors.VersionForNetwork(nwVer) + if err != nil { + return 0, err + } + switch v { {{range .versions}} case actors.Version{{.}}: {{if (eq . 0)}} - // TODO: Should we instead panic here since the concept doesn't exist yet? - return miner{{.}}.AddressedPartitionsMax + // TODO: Should we instead error here since the concept doesn't exist yet? + return miner{{.}}.AddressedPartitionsMax, nil {{else}} - return miner{{.}}.DeclarationsMax + return miner{{.}}.DeclarationsMax, nil {{end}} {{end}} default: - panic("unsupported network version") + return 0, xerrors.Errorf("unsupported network version") } } -func AggregateNetworkFee(nwVer network.Version, aggregateSize int, baseFee abi.TokenAmount) abi.TokenAmount { - switch actors.VersionForNetwork(nwVer) { +func AggregateNetworkFee(nwVer network.Version, aggregateSize int, baseFee abi.TokenAmount) (abi.TokenAmount, error) { + v, err := actors.VersionForNetwork(nwVer) + if err != nil { + return big.Zero(), err + } + switch v { {{range .versions}} case actors.Version{{.}}: {{if (le . 4)}} - return big.Zero() + return big.Zero(), nil {{else}} - return miner{{.}}.AggregateNetworkFee(aggregateSize, baseFee) + return miner{{.}}.AggregateNetworkFee(aggregateSize, baseFee), nil {{end}} {{end}} default: - panic("unsupported network version") + return big.Zero(), xerrors.Errorf("unsupported network version") } } diff --git a/chain/actors/version.go b/chain/actors/version.go index 9710e62fa8f..8787089aff1 100644 --- a/chain/actors/version.go +++ b/chain/actors/version.go @@ -21,19 +21,19 @@ const ( ) // Converts a network version into an actors adt version. -func VersionForNetwork(version network.Version) Version { +func VersionForNetwork(version network.Version) (Version, error) { switch version { case network.Version0, network.Version1, network.Version2, network.Version3: - return Version0 + return Version0, nil case network.Version4, network.Version5, network.Version6, network.Version7, network.Version8, network.Version9: - return Version2 + return Version2, nil case network.Version10, network.Version11: - return Version3 + return Version3, nil case network.Version12: - return Version4 + return Version4, nil case network.Version13: - return Version5 + return Version5, nil default: - panic(fmt.Sprintf("unsupported network version %d", version)) + return -1, fmt.Errorf("unsupported network version %d", version) } } diff --git a/chain/events/events_called.go b/chain/events/events_called.go index 2fe6853eb36..1f0b80169e1 100644 --- a/chain/events/events_called.go +++ b/chain/events/events_called.go @@ -5,6 +5,9 @@ import ( "math" "sync" + "github.com/filecoin-project/lotus/api" + lru "github.com/hashicorp/golang-lru" + "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/go-state-types/abi" @@ -464,14 +467,20 @@ type messageEvents struct { lk sync.RWMutex matchers map[triggerID]MsgMatchFunc + + blockMsgLk sync.Mutex + blockMsgCache *lru.ARCCache } func newMessageEvents(ctx context.Context, hcAPI headChangeAPI, cs EventAPI) messageEvents { + blsMsgCache, _ := lru.NewARC(500) return messageEvents{ - ctx: ctx, - cs: cs, - hcAPI: hcAPI, - matchers: make(map[triggerID]MsgMatchFunc), + ctx: ctx, + cs: cs, + hcAPI: hcAPI, + matchers: make(map[triggerID]MsgMatchFunc), + blockMsgLk: sync.Mutex{}, + blockMsgCache: blsMsgCache, } } @@ -515,14 +524,21 @@ func (me *messageEvents) messagesForTs(ts *types.TipSet, consume func(*types.Mes seen := map[cid.Cid]struct{}{} for _, tsb := range ts.Blocks() { - - msgs, err := me.cs.ChainGetBlockMessages(context.TODO(), tsb.Cid()) - if err != nil { - log.Errorf("messagesForTs MessagesForBlock failed (ts.H=%d, Bcid:%s, B.Mcid:%s): %s", ts.Height(), tsb.Cid(), tsb.Messages, err) - // this is quite bad, but probably better than missing all the other updates - continue + me.blockMsgLk.Lock() + msgsI, ok := me.blockMsgCache.Get(tsb.Cid()) + var err error + if !ok { + msgsI, err = me.cs.ChainGetBlockMessages(context.TODO(), tsb.Cid()) + if err != nil { + log.Errorf("messagesForTs MessagesForBlock failed (ts.H=%d, Bcid:%s, B.Mcid:%s): %s", ts.Height(), tsb.Cid(), tsb.Messages, err) + // this is quite bad, but probably better than missing all the other updates + me.blockMsgLk.Unlock() + continue + } + me.blockMsgCache.Add(tsb.Cid(), msgsI) } - + me.blockMsgLk.Unlock() + msgs := msgsI.(*api.BlockMessages) for _, m := range msgs.BlsMessages { _, ok := seen[m.Cid()] if ok { diff --git a/chain/events/events_test.go b/chain/events/events_test.go index e18d5ba7c93..04f938055f1 100644 --- a/chain/events/events_test.go +++ b/chain/events/events_test.go @@ -6,6 +6,8 @@ import ( "sync" "testing" + "gotest.tools/assert" + "github.com/ipfs/go-cid" "github.com/multiformats/go-multihash" "github.com/stretchr/testify/require" @@ -44,25 +46,43 @@ type fakeCS struct { tipsets map[types.TipSetKey]*types.TipSet sub func(rev, app []*types.TipSet) + + callNumberLk sync.Mutex + callNumber map[string]int } func (fcs *fakeCS) ChainHead(ctx context.Context) (*types.TipSet, error) { + fcs.callNumberLk.Lock() + defer fcs.callNumberLk.Unlock() + fcs.callNumber["ChainHead"] = fcs.callNumber["ChainHead"] + 1 panic("implement me") } func (fcs *fakeCS) ChainGetTipSet(ctx context.Context, key types.TipSetKey) (*types.TipSet, error) { + fcs.callNumberLk.Lock() + defer fcs.callNumberLk.Unlock() + fcs.callNumber["ChainGetTipSet"] = fcs.callNumber["ChainGetTipSet"] + 1 return fcs.tipsets[key], nil } func (fcs *fakeCS) StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) { + fcs.callNumberLk.Lock() + defer fcs.callNumberLk.Unlock() + fcs.callNumber["StateSearchMsg"] = fcs.callNumber["StateSearchMsg"] + 1 return nil, nil } func (fcs *fakeCS) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) { + fcs.callNumberLk.Lock() + defer fcs.callNumberLk.Unlock() + fcs.callNumber["StateGetActor"] = fcs.callNumber["StateGetActor"] + 1 panic("Not Implemented") } func (fcs *fakeCS) ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) { + fcs.callNumberLk.Lock() + defer fcs.callNumberLk.Unlock() + fcs.callNumber["ChainGetTipSetByHeight"] = fcs.callNumber["ChainGetTipSetByHeight"] + 1 panic("Not Implemented") } @@ -113,6 +133,10 @@ func (fcs *fakeCS) makeTs(t *testing.T, parents []cid.Cid, h abi.ChainEpoch, msg } func (fcs *fakeCS) ChainNotify(context.Context) (<-chan []*api.HeadChange, error) { + fcs.callNumberLk.Lock() + defer fcs.callNumberLk.Unlock() + fcs.callNumber["ChainNotify"] = fcs.callNumber["ChainNotify"] + 1 + out := make(chan []*api.HeadChange, 1) best, err := fcs.tsc.best() if err != nil { @@ -143,6 +167,9 @@ func (fcs *fakeCS) ChainNotify(context.Context) (<-chan []*api.HeadChange, error } func (fcs *fakeCS) ChainGetBlockMessages(ctx context.Context, blk cid.Cid) (*api.BlockMessages, error) { + fcs.callNumberLk.Lock() + defer fcs.callNumberLk.Unlock() + fcs.callNumber["ChainGetBlockMessages"] = fcs.callNumber["ChainGetBlockMessages"] + 1 messages, ok := fcs.blkMsgs[blk] if !ok { return &api.BlockMessages{}, nil @@ -152,8 +179,8 @@ func (fcs *fakeCS) ChainGetBlockMessages(ctx context.Context, blk cid.Cid) (*api if !ok { return &api.BlockMessages{}, nil } - return &api.BlockMessages{BlsMessages: ms.bmsgs, SecpkMessages: ms.smsgs}, nil + return &api.BlockMessages{BlsMessages: ms.bmsgs, SecpkMessages: ms.smsgs}, nil } func (fcs *fakeCS) fakeMsgs(m fakeMsg) cid.Cid { @@ -233,9 +260,10 @@ var _ EventAPI = &fakeCS{} func TestAt(t *testing.T) { fcs := &fakeCS{ - t: t, - h: 1, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + t: t, + h: 1, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + callNumber: map[string]int{}, } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -298,9 +326,10 @@ func TestAt(t *testing.T) { func TestAtDoubleTrigger(t *testing.T) { fcs := &fakeCS{ - t: t, - h: 1, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + t: t, + h: 1, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + callNumber: map[string]int{}, } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -340,9 +369,10 @@ func TestAtDoubleTrigger(t *testing.T) { func TestAtNullTrigger(t *testing.T) { fcs := &fakeCS{ - t: t, - h: 1, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + t: t, + h: 1, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + callNumber: map[string]int{}, } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -374,9 +404,10 @@ func TestAtNullTrigger(t *testing.T) { func TestAtNullConf(t *testing.T) { fcs := &fakeCS{ - t: t, - h: 1, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + t: t, + h: 1, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + callNumber: map[string]int{}, } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -413,9 +444,10 @@ func TestAtNullConf(t *testing.T) { func TestAtStart(t *testing.T) { fcs := &fakeCS{ - t: t, - h: 1, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + t: t, + h: 1, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + callNumber: map[string]int{}, } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -447,9 +479,10 @@ func TestAtStart(t *testing.T) { func TestAtStartConfidence(t *testing.T) { fcs := &fakeCS{ - t: t, - h: 1, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + t: t, + h: 1, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + callNumber: map[string]int{}, } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -477,9 +510,10 @@ func TestAtStartConfidence(t *testing.T) { func TestAtChained(t *testing.T) { fcs := &fakeCS{ - t: t, - h: 1, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + t: t, + h: 1, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + callNumber: map[string]int{}, } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -511,9 +545,10 @@ func TestAtChained(t *testing.T) { func TestAtChainedConfidence(t *testing.T) { fcs := &fakeCS{ - t: t, - h: 1, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + t: t, + h: 1, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + callNumber: map[string]int{}, } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -545,9 +580,10 @@ func TestAtChainedConfidence(t *testing.T) { func TestAtChainedConfidenceNull(t *testing.T) { fcs := &fakeCS{ - t: t, - h: 1, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + t: t, + h: 1, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + callNumber: map[string]int{}, } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -583,9 +619,10 @@ func TestCalled(t *testing.T) { t: t, h: 1, - msgs: map[cid.Cid]fakeMsg{}, - blkMsgs: map[cid.Cid]cid.Cid{}, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + msgs: map[cid.Cid]fakeMsg{}, + blkMsgs: map[cid.Cid]cid.Cid{}, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + callNumber: map[string]int{}, } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -795,9 +832,10 @@ func TestCalledTimeout(t *testing.T) { t: t, h: 1, - msgs: map[cid.Cid]fakeMsg{}, - blkMsgs: map[cid.Cid]cid.Cid{}, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + msgs: map[cid.Cid]fakeMsg{}, + blkMsgs: map[cid.Cid]cid.Cid{}, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + callNumber: map[string]int{}, } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -835,9 +873,10 @@ func TestCalledTimeout(t *testing.T) { t: t, h: 1, - msgs: map[cid.Cid]fakeMsg{}, - blkMsgs: map[cid.Cid]cid.Cid{}, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + msgs: map[cid.Cid]fakeMsg{}, + blkMsgs: map[cid.Cid]cid.Cid{}, + callNumber: map[string]int{}, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -869,9 +908,10 @@ func TestCalledOrder(t *testing.T) { t: t, h: 1, - msgs: map[cid.Cid]fakeMsg{}, - blkMsgs: map[cid.Cid]cid.Cid{}, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + msgs: map[cid.Cid]fakeMsg{}, + blkMsgs: map[cid.Cid]cid.Cid{}, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + callNumber: map[string]int{}, } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -932,9 +972,10 @@ func TestCalledNull(t *testing.T) { t: t, h: 1, - msgs: map[cid.Cid]fakeMsg{}, - blkMsgs: map[cid.Cid]cid.Cid{}, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + msgs: map[cid.Cid]fakeMsg{}, + blkMsgs: map[cid.Cid]cid.Cid{}, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + callNumber: map[string]int{}, } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -997,9 +1038,10 @@ func TestRemoveTriggersOnMessage(t *testing.T) { t: t, h: 1, - msgs: map[cid.Cid]fakeMsg{}, - blkMsgs: map[cid.Cid]cid.Cid{}, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + msgs: map[cid.Cid]fakeMsg{}, + blkMsgs: map[cid.Cid]cid.Cid{}, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + callNumber: map[string]int{}, } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -1087,9 +1129,10 @@ func TestStateChanged(t *testing.T) { t: t, h: 1, - msgs: map[cid.Cid]fakeMsg{}, - blkMsgs: map[cid.Cid]cid.Cid{}, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + msgs: map[cid.Cid]fakeMsg{}, + blkMsgs: map[cid.Cid]cid.Cid{}, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + callNumber: map[string]int{}, } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -1175,9 +1218,10 @@ func TestStateChangedRevert(t *testing.T) { t: t, h: 1, - msgs: map[cid.Cid]fakeMsg{}, - blkMsgs: map[cid.Cid]cid.Cid{}, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + msgs: map[cid.Cid]fakeMsg{}, + blkMsgs: map[cid.Cid]cid.Cid{}, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + callNumber: map[string]int{}, } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -1253,9 +1297,10 @@ func TestStateChangedTimeout(t *testing.T) { t: t, h: 1, - msgs: map[cid.Cid]fakeMsg{}, - blkMsgs: map[cid.Cid]cid.Cid{}, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + msgs: map[cid.Cid]fakeMsg{}, + blkMsgs: map[cid.Cid]cid.Cid{}, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + callNumber: map[string]int{}, } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -1293,9 +1338,10 @@ func TestStateChangedTimeout(t *testing.T) { t: t, h: 1, - msgs: map[cid.Cid]fakeMsg{}, - blkMsgs: map[cid.Cid]cid.Cid{}, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + msgs: map[cid.Cid]fakeMsg{}, + blkMsgs: map[cid.Cid]cid.Cid{}, + callNumber: map[string]int{}, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -1329,9 +1375,10 @@ func TestCalledMultiplePerEpoch(t *testing.T) { t: t, h: 1, - msgs: map[cid.Cid]fakeMsg{}, - blkMsgs: map[cid.Cid]cid.Cid{}, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + msgs: map[cid.Cid]fakeMsg{}, + blkMsgs: map[cid.Cid]cid.Cid{}, + callNumber: map[string]int{}, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -1382,3 +1429,24 @@ func TestCalledMultiplePerEpoch(t *testing.T) { fcs.advance(9, 1, nil) } + +func TestCachedSameBlock(t *testing.T) { + fcs := &fakeCS{ + t: t, + h: 1, + + msgs: map[cid.Cid]fakeMsg{}, + blkMsgs: map[cid.Cid]cid.Cid{}, + callNumber: map[string]int{}, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + } + require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) + + _ = NewEvents(context.Background(), fcs) + + fcs.advance(0, 10, map[int]cid.Cid{}) + assert.Assert(t, fcs.callNumber["ChainGetBlockMessages"] == 20, "expect call ChainGetBlockMessages %d but got ", 20, fcs.callNumber["ChainGetBlockMessages"]) + + fcs.advance(5, 10, map[int]cid.Cid{}) + assert.Assert(t, fcs.callNumber["ChainGetBlockMessages"] == 30, "expect call ChainGetBlockMessages %d but got ", 30, fcs.callNumber["ChainGetBlockMessages"]) +} diff --git a/chain/events/state/predicates_test.go b/chain/events/state/predicates_test.go index 8af3bb6a0b9..bdc7523dce7 100644 --- a/chain/events/state/predicates_test.go +++ b/chain/events/state/predicates_test.go @@ -21,7 +21,7 @@ import ( market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt" - tutils "github.com/filecoin-project/specs-actors/v2/support/testing" + tutils "github.com/filecoin-project/specs-actors/v5/support/testing" bstore "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/actors/builtin/market" diff --git a/chain/gen/genesis/genesis.go b/chain/gen/genesis/genesis.go index 6dec3fea6d4..a94442d6511 100644 --- a/chain/gen/genesis/genesis.go +++ b/chain/gen/genesis/genesis.go @@ -149,7 +149,10 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge return nil, nil, xerrors.Errorf("making new state tree: %w", err) } - av := actors.VersionForNetwork(template.NetworkVersion) + av, err := actors.VersionForNetwork(template.NetworkVersion) + if err != nil { + return nil, nil, xerrors.Errorf("getting network version: %w", err) + } // Create system actor diff --git a/chain/gen/genesis/miners.go b/chain/gen/genesis/miners.go index e6f17d6779a..38d3db518b4 100644 --- a/chain/gen/genesis/miners.go +++ b/chain/gen/genesis/miners.go @@ -81,7 +81,10 @@ func mkFakedSigSyscalls(base vm.SyscallBuilder) vm.SyscallBuilder { func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid, miners []genesis.Miner, nv network.Version) (cid.Cid, error) { cst := cbor.NewCborStore(cs.StateBlockstore()) - av := actors.VersionForNetwork(nv) + av, err := actors.VersionForNetwork(nv) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to get network version: %w", err) + } csc := func(context.Context, abi.ChainEpoch, *state.StateTree) (abi.TokenAmount, error) { return big.Zero(), nil @@ -291,7 +294,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid return cid.Undef, xerrors.Errorf("setting power state: %w", err) } - rewact, err := SetupRewardActor(ctx, cs.StateBlockstore(), big.Zero(), actors.VersionForNetwork(nv)) + rewact, err := SetupRewardActor(ctx, cs.StateBlockstore(), big.Zero(), av) if err != nil { return cid.Undef, xerrors.Errorf("setup reward actor: %w", err) } diff --git a/chain/messagepool/check.go b/chain/messagepool/check.go index 11203e7dffd..283c0d1194e 100644 --- a/chain/messagepool/check.go +++ b/chain/messagepool/check.go @@ -11,7 +11,6 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" ) @@ -259,8 +258,14 @@ func (mp *MessagePool) checkMessages(ctx context.Context, msgs []*types.Message, Code: api.CheckStatusMessageValidity, }, } - - if err := m.ValidForBlockInclusion(0, build.NewestNetworkVersion); err != nil { + nv, err := mp.getNtwkVersion(epoch) + if err != nil { + check.OK = false + check.Err = fmt.Sprintf("error retrieving network version: %s", err.Error()) + } else { + check.OK = true + } + if err := m.ValidForBlockInclusion(0, nv); err != nil { check.OK = false check.Err = fmt.Sprintf("syntactically invalid message: %s", err.Error()) } else { diff --git a/chain/messagepool/messagepool.go b/chain/messagepool/messagepool.go index 865c18a3a0f..ee2518ed9ac 100644 --- a/chain/messagepool/messagepool.go +++ b/chain/messagepool/messagepool.go @@ -14,6 +14,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/network" "github.com/hashicorp/go-multierror" lru "github.com/hashicorp/golang-lru" "github.com/ipfs/go-cid" @@ -29,6 +30,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" @@ -147,6 +149,8 @@ type MessagePool struct { minGasPrice types.BigInt + getNtwkVersion func(abi.ChainEpoch) (network.Version, error) + currentSize int // pruneTrigger is a channel used to trigger a mempool pruning @@ -362,26 +366,28 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName, j journ if j == nil { j = journal.NilJournal() } + us := stmgr.DefaultUpgradeSchedule() mp := &MessagePool{ - ds: ds, - addSema: make(chan struct{}, 1), - closer: make(chan struct{}), - repubTk: build.Clock.Ticker(RepublishInterval), - repubTrigger: make(chan struct{}, 1), - localAddrs: make(map[address.Address]struct{}), - pending: make(map[address.Address]*msgSet), - keyCache: make(map[address.Address]address.Address), - minGasPrice: types.NewInt(0), - pruneTrigger: make(chan struct{}, 1), - pruneCooldown: make(chan struct{}, 1), - blsSigCache: cache, - sigValCache: verifcache, - changes: lps.New(50), - localMsgs: namespace.Wrap(ds, datastore.NewKey(localMsgsDs)), - api: api, - netName: netName, - cfg: cfg, + ds: ds, + addSema: make(chan struct{}, 1), + closer: make(chan struct{}), + repubTk: build.Clock.Ticker(RepublishInterval), + repubTrigger: make(chan struct{}, 1), + localAddrs: make(map[address.Address]struct{}), + pending: make(map[address.Address]*msgSet), + keyCache: make(map[address.Address]address.Address), + minGasPrice: types.NewInt(0), + getNtwkVersion: us.GetNtwkVersion, + pruneTrigger: make(chan struct{}, 1), + pruneCooldown: make(chan struct{}, 1), + blsSigCache: cache, + sigValCache: verifcache, + changes: lps.New(50), + localMsgs: namespace.Wrap(ds, datastore.NewKey(localMsgsDs)), + api: api, + netName: netName, + cfg: cfg, evtTypes: [...]journal.EventType{ evtTypeMpoolAdd: j.RegisterEventType("mpool", "add"), evtTypeMpoolRemove: j.RegisterEventType("mpool", "remove"), @@ -426,6 +432,27 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName, j journ return mp, nil } +func (mp *MessagePool) ForEachPendingMessage(f func(cid.Cid) error) error { + mp.lk.Lock() + defer mp.lk.Unlock() + + for _, mset := range mp.pending { + for _, m := range mset.msgs { + err := f(m.Cid()) + if err != nil { + return err + } + + err = f(m.Message.Cid()) + if err != nil { + return err + } + } + } + + return nil +} + func (mp *MessagePool) resolveToKey(ctx context.Context, addr address.Address) (address.Address, error) { // check the cache a, f := mp.keyCache[addr] diff --git a/chain/messagepool/messagepool_test.go b/chain/messagepool/messagepool_test.go index f271249dffd..e57212e7c14 100644 --- a/chain/messagepool/messagepool_test.go +++ b/chain/messagepool/messagepool_test.go @@ -105,6 +105,7 @@ func (tma *testMpoolAPI) SubscribeHeadChanges(cb func(rev, app []*types.TipSet) func (tma *testMpoolAPI) PutMessage(m types.ChainMsg) (cid.Cid, error) { return cid.Undef, nil } + func (tma *testMpoolAPI) IsLite() bool { return false } @@ -286,7 +287,7 @@ func TestCheckMessageBig(t *testing.T) { From: from, Value: types.NewInt(1), Nonce: 0, - GasLimit: 50000000, + GasLimit: 60000000, GasFeeCap: types.NewInt(100), GasPremium: types.NewInt(1), Params: make([]byte, 41<<10), // 41KiB payload diff --git a/chain/state/statetree.go b/chain/state/statetree.go index dbf150ecd65..8705aeff81b 100644 --- a/chain/state/statetree.go +++ b/chain/state/statetree.go @@ -547,7 +547,7 @@ func (st *StateTree) Version() types.StateTreeVersion { return st.version } -func Diff(oldTree, newTree *StateTree) (map[string]types.Actor, error) { +func Diff(ctx context.Context, oldTree, newTree *StateTree) (map[string]types.Actor, error) { out := map[string]types.Actor{} var ( @@ -555,33 +555,38 @@ func Diff(oldTree, newTree *StateTree) (map[string]types.Actor, error) { buf = bytes.NewReader(nil) ) if err := newTree.root.ForEach(&ncval, func(k string) error { - var act types.Actor - - addr, err := address.NewFromBytes([]byte(k)) - if err != nil { - return xerrors.Errorf("address in state tree was not valid: %w", err) - } + select { + case <-ctx.Done(): + return ctx.Err() + default: + var act types.Actor + + addr, err := address.NewFromBytes([]byte(k)) + if err != nil { + return xerrors.Errorf("address in state tree was not valid: %w", err) + } - found, err := oldTree.root.Get(abi.AddrKey(addr), &ocval) - if err != nil { - return err - } + found, err := oldTree.root.Get(abi.AddrKey(addr), &ocval) + if err != nil { + return err + } - if found && bytes.Equal(ocval.Raw, ncval.Raw) { - return nil // not changed - } + if found && bytes.Equal(ocval.Raw, ncval.Raw) { + return nil // not changed + } - buf.Reset(ncval.Raw) - err = act.UnmarshalCBOR(buf) - buf.Reset(nil) + buf.Reset(ncval.Raw) + err = act.UnmarshalCBOR(buf) + buf.Reset(nil) - if err != nil { - return err - } + if err != nil { + return err + } - out[addr.String()] = act + out[addr.String()] = act - return nil + return nil + } }); err != nil { return nil, err } diff --git a/chain/stmgr/forks.go b/chain/stmgr/forks.go index bb87da44cf4..212272a95aa 100644 --- a/chain/stmgr/forks.go +++ b/chain/stmgr/forks.go @@ -4,42 +4,27 @@ import ( "bytes" "context" "encoding/binary" - "runtime" "sort" "sync" "time" - "github.com/filecoin-project/specs-actors/v5/actors/migration/nv13" - - "github.com/filecoin-project/go-state-types/rt" + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/network" - "github.com/filecoin-project/lotus/blockstore" - "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/go-state-types/rt" + + "github.com/filecoin-project/specs-actors/v3/actors/migration/nv10" + "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" - "github.com/filecoin-project/lotus/chain/actors/builtin/multisig" "github.com/filecoin-project/lotus/chain/state" - "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" - builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" - miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" - multisig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" - power0 "github.com/filecoin-project/specs-actors/actors/builtin/power" - "github.com/filecoin-project/specs-actors/actors/migration/nv3" - adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" - "github.com/filecoin-project/specs-actors/v2/actors/migration/nv4" - "github.com/filecoin-project/specs-actors/v2/actors/migration/nv7" - "github.com/filecoin-project/specs-actors/v3/actors/migration/nv10" - "github.com/filecoin-project/specs-actors/v4/actors/migration/nv12" - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - "golang.org/x/xerrors" ) // MigrationCache can be used to cache information used by a migration. This is primarily useful to @@ -125,117 +110,6 @@ func (ml migrationLogger) Log(level rt.LogLevel, msg string, args ...interface{} } } -func DefaultUpgradeSchedule() UpgradeSchedule { - var us UpgradeSchedule - - updates := []Upgrade{{ - Height: build.UpgradeBreezeHeight, - Network: network.Version1, - Migration: UpgradeFaucetBurnRecovery, - }, { - Height: build.UpgradeSmokeHeight, - Network: network.Version2, - Migration: nil, - }, { - Height: build.UpgradeIgnitionHeight, - Network: network.Version3, - Migration: UpgradeIgnition, - }, { - Height: build.UpgradeRefuelHeight, - Network: network.Version3, - Migration: UpgradeRefuel, - }, { - Height: build.UpgradeAssemblyHeight, - Network: network.Version4, - Expensive: true, - Migration: UpgradeActorsV2, - }, { - Height: build.UpgradeTapeHeight, - Network: network.Version5, - Migration: nil, - }, { - Height: build.UpgradeLiftoffHeight, - Network: network.Version5, - Migration: UpgradeLiftoff, - }, { - Height: build.UpgradeKumquatHeight, - Network: network.Version6, - Migration: nil, - }, { - Height: build.UpgradeCalicoHeight, - Network: network.Version7, - Migration: UpgradeCalico, - }, { - Height: build.UpgradePersianHeight, - Network: network.Version8, - Migration: nil, - }, { - Height: build.UpgradeOrangeHeight, - Network: network.Version9, - Migration: nil, - }, { - Height: build.UpgradeTrustHeight, - Network: network.Version10, - Migration: UpgradeActorsV3, - PreMigrations: []PreMigration{{ - PreMigration: PreUpgradeActorsV3, - StartWithin: 120, - DontStartWithin: 60, - StopWithin: 35, - }, { - PreMigration: PreUpgradeActorsV3, - StartWithin: 30, - DontStartWithin: 15, - StopWithin: 5, - }}, - Expensive: true, - }, { - Height: build.UpgradeNorwegianHeight, - Network: network.Version11, - Migration: nil, - }, { - Height: build.UpgradeTurboHeight, - Network: network.Version12, - Migration: UpgradeActorsV4, - PreMigrations: []PreMigration{{ - PreMigration: PreUpgradeActorsV4, - StartWithin: 120, - DontStartWithin: 60, - StopWithin: 35, - }, { - PreMigration: PreUpgradeActorsV4, - StartWithin: 30, - DontStartWithin: 15, - StopWithin: 5, - }}, - Expensive: true, - }, { - Height: build.UpgradeHyperdriveHeight, - Network: network.Version13, - Migration: UpgradeActorsV5, - PreMigrations: []PreMigration{{ - PreMigration: PreUpgradeActorsV5, - StartWithin: 120, - DontStartWithin: 60, - StopWithin: 35, - }, { - PreMigration: PreUpgradeActorsV5, - StartWithin: 30, - DontStartWithin: 15, - StopWithin: 5, - }}, - Expensive: true}} - - for _, u := range updates { - if u.Height < 0 { - // upgrade disabled - continue - } - us = append(us, u) - } - return us -} - func (us UpgradeSchedule) Validate() error { // Make sure each upgrade is valid. for _, u := range us { @@ -292,6 +166,18 @@ func (us UpgradeSchedule) Validate() error { return nil } +func (us UpgradeSchedule) GetNtwkVersion(e abi.ChainEpoch) (network.Version, error) { + // Traverse from newest to oldest returning upgrade active during epoch e + for i := len(us) - 1; i >= 0; i-- { + u := us[i] + // u.Height is the last epoch before u.Network becomes the active version + if u.Height < e { + return u.Network, nil + } + } + return network.Version0, xerrors.Errorf("Epoch %d has no defined network version", e) +} + func (sm *StateManager) handleStateForks(ctx context.Context, root cid.Cid, height abi.ChainEpoch, cb ExecMonitor, ts *types.TipSet) (cid.Cid, error) { retCid := root var err error @@ -472,469 +358,6 @@ func doTransfer(tree types.StateTree, from, to address.Address, amt abi.TokenAmo return nil } -func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ MigrationCache, em ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { - // Some initial parameters - FundsForMiners := types.FromFil(1_000_000) - LookbackEpoch := abi.ChainEpoch(32000) - AccountCap := types.FromFil(0) - BaseMinerBalance := types.FromFil(20) - DesiredReimbursementBalance := types.FromFil(5_000_000) - - isSystemAccount := func(addr address.Address) (bool, error) { - id, err := address.IDFromAddress(addr) - if err != nil { - return false, xerrors.Errorf("id address: %w", err) - } - - if id < 1000 { - return true, nil - } - return false, nil - } - - minerFundsAlloc := func(pow, tpow abi.StoragePower) abi.TokenAmount { - return types.BigDiv(types.BigMul(pow, FundsForMiners), tpow) - } - - // Grab lookback state for account checks - lbts, err := sm.ChainStore().GetTipsetByHeight(ctx, LookbackEpoch, ts, false) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to get tipset at lookback height: %w", err) - } - - lbtree, err := sm.ParentState(lbts) - if err != nil { - return cid.Undef, xerrors.Errorf("loading state tree failed: %w", err) - } - - tree, err := sm.StateTree(root) - if err != nil { - return cid.Undef, xerrors.Errorf("getting state tree: %w", err) - } - - type transfer struct { - From address.Address - To address.Address - Amt abi.TokenAmount - } - - var transfers []transfer - subcalls := make([]types.ExecutionTrace, 0) - transferCb := func(trace types.ExecutionTrace) { - subcalls = append(subcalls, trace) - } - - // Take all excess funds away, put them into the reserve account - err = tree.ForEach(func(addr address.Address, act *types.Actor) error { - switch act.Code { - case builtin0.AccountActorCodeID, builtin0.MultisigActorCodeID, builtin0.PaymentChannelActorCodeID: - sysAcc, err := isSystemAccount(addr) - if err != nil { - return xerrors.Errorf("checking system account: %w", err) - } - - if !sysAcc { - transfers = append(transfers, transfer{ - From: addr, - To: builtin.ReserveAddress, - Amt: act.Balance, - }) - } - case builtin0.StorageMinerActorCodeID: - var st miner0.State - if err := sm.ChainStore().ActorStore(ctx).Get(ctx, act.Head, &st); err != nil { - return xerrors.Errorf("failed to load miner state: %w", err) - } - - var available abi.TokenAmount - { - defer func() { - if err := recover(); err != nil { - log.Warnf("Get available balance failed (%s, %s, %s): %s", addr, act.Head, act.Balance, err) - } - available = abi.NewTokenAmount(0) - }() - // this panics if the miner doesnt have enough funds to cover their locked pledge - available = st.GetAvailableBalance(act.Balance) - } - - if !available.IsZero() { - transfers = append(transfers, transfer{ - From: addr, - To: builtin.ReserveAddress, - Amt: available, - }) - } - } - return nil - }) - if err != nil { - return cid.Undef, xerrors.Errorf("foreach over state tree failed: %w", err) - } - - // Execute transfers from previous step - for _, t := range transfers { - if err := doTransfer(tree, t.From, t.To, t.Amt, transferCb); err != nil { - return cid.Undef, xerrors.Errorf("transfer %s %s->%s failed: %w", t.Amt, t.From, t.To, err) - } - } - - // pull up power table to give miners back some funds proportional to their power - var ps power0.State - powAct, err := tree.GetActor(builtin0.StoragePowerActorAddr) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to load power actor: %w", err) - } - - cst := cbor.NewCborStore(sm.ChainStore().StateBlockstore()) - if err := cst.Get(ctx, powAct.Head, &ps); err != nil { - return cid.Undef, xerrors.Errorf("failed to get power actor state: %w", err) - } - - totalPower := ps.TotalBytesCommitted - - var transfersBack []transfer - // Now, we return some funds to places where they are needed - err = tree.ForEach(func(addr address.Address, act *types.Actor) error { - lbact, err := lbtree.GetActor(addr) - if err != nil { - if !xerrors.Is(err, types.ErrActorNotFound) { - return xerrors.Errorf("failed to get actor in lookback state") - } - } - - prevBalance := abi.NewTokenAmount(0) - if lbact != nil { - prevBalance = lbact.Balance - } - - switch act.Code { - case builtin0.AccountActorCodeID, builtin0.MultisigActorCodeID, builtin0.PaymentChannelActorCodeID: - nbalance := big.Min(prevBalance, AccountCap) - if nbalance.Sign() != 0 { - transfersBack = append(transfersBack, transfer{ - From: builtin.ReserveAddress, - To: addr, - Amt: nbalance, - }) - } - case builtin0.StorageMinerActorCodeID: - var st miner0.State - if err := sm.ChainStore().ActorStore(ctx).Get(ctx, act.Head, &st); err != nil { - return xerrors.Errorf("failed to load miner state: %w", err) - } - - var minfo miner0.MinerInfo - if err := cst.Get(ctx, st.Info, &minfo); err != nil { - return xerrors.Errorf("failed to get miner info: %w", err) - } - - sectorsArr, err := adt0.AsArray(sm.ChainStore().ActorStore(ctx), st.Sectors) - if err != nil { - return xerrors.Errorf("failed to load sectors array: %w", err) - } - - slen := sectorsArr.Length() - - power := types.BigMul(types.NewInt(slen), types.NewInt(uint64(minfo.SectorSize))) - - mfunds := minerFundsAlloc(power, totalPower) - transfersBack = append(transfersBack, transfer{ - From: builtin.ReserveAddress, - To: minfo.Worker, - Amt: mfunds, - }) - - // Now make sure to give each miner who had power at the lookback some FIL - lbact, err := lbtree.GetActor(addr) - if err == nil { - var lbst miner0.State - if err := sm.ChainStore().ActorStore(ctx).Get(ctx, lbact.Head, &lbst); err != nil { - return xerrors.Errorf("failed to load miner state: %w", err) - } - - lbsectors, err := adt0.AsArray(sm.ChainStore().ActorStore(ctx), lbst.Sectors) - if err != nil { - return xerrors.Errorf("failed to load lb sectors array: %w", err) - } - - if lbsectors.Length() > 0 { - transfersBack = append(transfersBack, transfer{ - From: builtin.ReserveAddress, - To: minfo.Worker, - Amt: BaseMinerBalance, - }) - } - - } else { - log.Warnf("failed to get miner in lookback state: %s", err) - } - } - return nil - }) - if err != nil { - return cid.Undef, xerrors.Errorf("foreach over state tree failed: %w", err) - } - - for _, t := range transfersBack { - if err := doTransfer(tree, t.From, t.To, t.Amt, transferCb); err != nil { - return cid.Undef, xerrors.Errorf("transfer %s %s->%s failed: %w", t.Amt, t.From, t.To, err) - } - } - - // transfer all burnt funds back to the reserve account - burntAct, err := tree.GetActor(builtin0.BurntFundsActorAddr) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to load burnt funds actor: %w", err) - } - if err := doTransfer(tree, builtin0.BurntFundsActorAddr, builtin.ReserveAddress, burntAct.Balance, transferCb); err != nil { - return cid.Undef, xerrors.Errorf("failed to unburn funds: %w", err) - } - - // Top up the reimbursement service - reimbAddr, err := address.NewFromString("t0111") - if err != nil { - return cid.Undef, xerrors.Errorf("failed to parse reimbursement service address") - } - - reimb, err := tree.GetActor(reimbAddr) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to load reimbursement account actor: %w", err) - } - - difference := types.BigSub(DesiredReimbursementBalance, reimb.Balance) - if err := doTransfer(tree, builtin.ReserveAddress, reimbAddr, difference, transferCb); err != nil { - return cid.Undef, xerrors.Errorf("failed to top up reimbursement account: %w", err) - } - - // Now, a final sanity check to make sure the balances all check out - total := abi.NewTokenAmount(0) - err = tree.ForEach(func(addr address.Address, act *types.Actor) error { - total = types.BigAdd(total, act.Balance) - return nil - }) - if err != nil { - return cid.Undef, xerrors.Errorf("checking final state balance failed: %w", err) - } - - exp := types.FromFil(build.FilBase) - if !exp.Equals(total) { - return cid.Undef, xerrors.Errorf("resultant state tree account balance was not correct: %s", total) - } - - if em != nil { - // record the transfer in execution traces - - fakeMsg := makeFakeMsg(builtin.SystemActorAddr, builtin.SystemActorAddr, big.Zero(), uint64(epoch)) - - if err := em.MessageApplied(ctx, ts, fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{ - MessageReceipt: *makeFakeRct(), - ActorErr: nil, - ExecutionTrace: types.ExecutionTrace{ - Msg: fakeMsg, - MsgRct: makeFakeRct(), - Error: "", - Duration: 0, - GasCharges: nil, - Subcalls: subcalls, - }, - Duration: 0, - GasCosts: nil, - }, false); err != nil { - return cid.Undef, xerrors.Errorf("recording transfers: %w", err) - } - } - - return tree.Flush(ctx) -} - -func UpgradeIgnition(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { - store := sm.cs.ActorStore(ctx) - - if build.UpgradeLiftoffHeight <= epoch { - return cid.Undef, xerrors.Errorf("liftoff height must be beyond ignition height") - } - - nst, err := nv3.MigrateStateTree(ctx, store, root, epoch) - if err != nil { - return cid.Undef, xerrors.Errorf("migrating actors state: %w", err) - } - - tree, err := sm.StateTree(nst) - if err != nil { - return cid.Undef, xerrors.Errorf("getting state tree: %w", err) - } - - err = setNetworkName(ctx, store, tree, "ignition") - if err != nil { - return cid.Undef, xerrors.Errorf("setting network name: %w", err) - } - - split1, err := address.NewFromString("t0115") - if err != nil { - return cid.Undef, xerrors.Errorf("first split address: %w", err) - } - - split2, err := address.NewFromString("t0116") - if err != nil { - return cid.Undef, xerrors.Errorf("second split address: %w", err) - } - - err = resetGenesisMsigs0(ctx, sm, store, tree, build.UpgradeLiftoffHeight) - if err != nil { - return cid.Undef, xerrors.Errorf("resetting genesis msig start epochs: %w", err) - } - - err = splitGenesisMultisig0(ctx, cb, split1, store, tree, 50, epoch, ts) - if err != nil { - return cid.Undef, xerrors.Errorf("splitting first msig: %w", err) - } - - err = splitGenesisMultisig0(ctx, cb, split2, store, tree, 50, epoch, ts) - if err != nil { - return cid.Undef, xerrors.Errorf("splitting second msig: %w", err) - } - - err = nv3.CheckStateTree(ctx, store, nst, epoch, builtin0.TotalFilecoin) - if err != nil { - return cid.Undef, xerrors.Errorf("sanity check after ignition upgrade failed: %w", err) - } - - return tree.Flush(ctx) -} - -func UpgradeRefuel(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { - - store := sm.cs.ActorStore(ctx) - tree, err := sm.StateTree(root) - if err != nil { - return cid.Undef, xerrors.Errorf("getting state tree: %w", err) - } - - err = resetMultisigVesting0(ctx, store, tree, builtin.SaftAddress, 0, 0, big.Zero()) - if err != nil { - return cid.Undef, xerrors.Errorf("tweaking msig vesting: %w", err) - } - - err = resetMultisigVesting0(ctx, store, tree, builtin.ReserveAddress, 0, 0, big.Zero()) - if err != nil { - return cid.Undef, xerrors.Errorf("tweaking msig vesting: %w", err) - } - - err = resetMultisigVesting0(ctx, store, tree, builtin.RootVerifierAddress, 0, 0, big.Zero()) - if err != nil { - return cid.Undef, xerrors.Errorf("tweaking msig vesting: %w", err) - } - - return tree.Flush(ctx) -} - -func UpgradeActorsV2(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { - buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync()) - store := store.ActorStore(ctx, buf) - - info, err := store.Put(ctx, new(types.StateInfo0)) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to create new state info for actors v2: %w", err) - } - - newHamtRoot, err := nv4.MigrateStateTree(ctx, store, root, epoch, nv4.DefaultConfig()) - if err != nil { - return cid.Undef, xerrors.Errorf("upgrading to actors v2: %w", err) - } - - newRoot, err := store.Put(ctx, &types.StateRoot{ - Version: types.StateTreeVersion1, - Actors: newHamtRoot, - Info: info, - }) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err) - } - - // perform some basic sanity checks to make sure everything still works. - if newSm, err := state.LoadStateTree(store, newRoot); err != nil { - return cid.Undef, xerrors.Errorf("state tree sanity load failed: %w", err) - } else if newRoot2, err := newSm.Flush(ctx); err != nil { - return cid.Undef, xerrors.Errorf("state tree sanity flush failed: %w", err) - } else if newRoot2 != newRoot { - return cid.Undef, xerrors.Errorf("state-root mismatch: %s != %s", newRoot, newRoot2) - } else if _, err := newSm.GetActor(builtin0.InitActorAddr); err != nil { - return cid.Undef, xerrors.Errorf("failed to load init actor after upgrade: %w", err) - } - - { - from := buf - to := buf.Read() - - if err := vm.Copy(ctx, from, to, newRoot); err != nil { - return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err) - } - } - - return newRoot, nil -} - -func UpgradeLiftoff(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { - tree, err := sm.StateTree(root) - if err != nil { - return cid.Undef, xerrors.Errorf("getting state tree: %w", err) - } - - err = setNetworkName(ctx, sm.cs.ActorStore(ctx), tree, "mainnet") - if err != nil { - return cid.Undef, xerrors.Errorf("setting network name: %w", err) - } - - return tree.Flush(ctx) -} - -func UpgradeCalico(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { - if build.BuildType != build.BuildMainnet { - return root, nil - } - - store := sm.cs.ActorStore(ctx) - var stateRoot types.StateRoot - if err := store.Get(ctx, root, &stateRoot); err != nil { - return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err) - } - - if stateRoot.Version != types.StateTreeVersion1 { - return cid.Undef, xerrors.Errorf( - "expected state root version 1 for calico upgrade, got %d", - stateRoot.Version, - ) - } - - newHamtRoot, err := nv7.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, nv7.DefaultConfig()) - if err != nil { - return cid.Undef, xerrors.Errorf("running nv7 migration: %w", err) - } - - newRoot, err := store.Put(ctx, &types.StateRoot{ - Version: stateRoot.Version, - Actors: newHamtRoot, - Info: stateRoot.Info, - }) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err) - } - - // perform some basic sanity checks to make sure everything still works. - if newSm, err := state.LoadStateTree(store, newRoot); err != nil { - return cid.Undef, xerrors.Errorf("state tree sanity load failed: %w", err) - } else if newRoot2, err := newSm.Flush(ctx); err != nil { - return cid.Undef, xerrors.Errorf("state tree sanity flush failed: %w", err) - } else if newRoot2 != newRoot { - return cid.Undef, xerrors.Errorf("state-root mismatch: %s != %s", newRoot, newRoot2) - } else if _, err := newSm.GetActor(builtin0.InitActorAddr); err != nil { - return cid.Undef, xerrors.Errorf("failed to load init actor after upgrade: %w", err) - } - - return newRoot, nil -} - func terminateActor(ctx context.Context, tree *state.StateTree, addr address.Address, em ExecMonitor, epoch abi.ChainEpoch, ts *types.TipSet) error { a, err := tree.GetActor(addr) if xerrors.Is(err, types.ErrActorNotFound) { @@ -995,282 +418,8 @@ func terminateActor(ctx context.Context, tree *state.StateTree, addr address.Add return tree.SetActor(init_.Address, ia) } -func UpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { - // Use all the CPUs except 3. - workerCount := runtime.NumCPU() - 3 - if workerCount <= 0 { - workerCount = 1 - } - - config := nv10.Config{ - MaxWorkers: uint(workerCount), - JobQueueSize: 1000, - ResultQueueSize: 100, - ProgressLogPeriod: 10 * time.Second, - } - newRoot, err := upgradeActorsV3Common(ctx, sm, cache, root, epoch, ts, config) - if err != nil { - return cid.Undef, xerrors.Errorf("migrating actors v3 state: %w", err) - } - - tree, err := sm.StateTree(newRoot) - if err != nil { - return cid.Undef, xerrors.Errorf("getting state tree: %w", err) - } - - if build.BuildType == build.BuildMainnet { - err := terminateActor(ctx, tree, build.ZeroAddress, cb, epoch, ts) - if err != nil && !xerrors.Is(err, types.ErrActorNotFound) { - return cid.Undef, xerrors.Errorf("deleting zero bls actor: %w", err) - } - - newRoot, err = tree.Flush(ctx) - if err != nil { - return cid.Undef, xerrors.Errorf("flushing state tree: %w", err) - } - } - - return newRoot, nil -} - -func PreUpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error { - // Use half the CPUs for pre-migration, but leave at least 3. - workerCount := runtime.NumCPU() - if workerCount <= 4 { - workerCount = 1 - } else { - workerCount /= 2 - } - config := nv10.Config{MaxWorkers: uint(workerCount)} - _, err := upgradeActorsV3Common(ctx, sm, cache, root, epoch, ts, config) - return err -} - -func upgradeActorsV3Common( - ctx context.Context, sm *StateManager, cache MigrationCache, - root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet, - config nv10.Config, -) (cid.Cid, error) { - buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync()) - store := store.ActorStore(ctx, buf) - - // Load the state root. - var stateRoot types.StateRoot - if err := store.Get(ctx, root, &stateRoot); err != nil { - return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err) - } - - if stateRoot.Version != types.StateTreeVersion1 { - return cid.Undef, xerrors.Errorf( - "expected state root version 1 for actors v3 upgrade, got %d", - stateRoot.Version, - ) - } - - // Perform the migration - newHamtRoot, err := nv10.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache) - if err != nil { - return cid.Undef, xerrors.Errorf("upgrading to actors v3: %w", err) - } - - // Persist the result. - newRoot, err := store.Put(ctx, &types.StateRoot{ - Version: types.StateTreeVersion2, - Actors: newHamtRoot, - Info: stateRoot.Info, - }) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err) - } - - // Persist the new tree. - - { - from := buf - to := buf.Read() - - if err := vm.Copy(ctx, from, to, newRoot); err != nil { - return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err) - } - } - - return newRoot, nil -} - -func UpgradeActorsV4(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { - // Use all the CPUs except 3. - workerCount := runtime.NumCPU() - 3 - if workerCount <= 0 { - workerCount = 1 - } - - config := nv12.Config{ - MaxWorkers: uint(workerCount), - JobQueueSize: 1000, - ResultQueueSize: 100, - ProgressLogPeriod: 10 * time.Second, - } - - newRoot, err := upgradeActorsV4Common(ctx, sm, cache, root, epoch, ts, config) - if err != nil { - return cid.Undef, xerrors.Errorf("migrating actors v4 state: %w", err) - } - - return newRoot, nil -} - -func PreUpgradeActorsV4(ctx context.Context, sm *StateManager, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error { - // Use half the CPUs for pre-migration, but leave at least 3. - workerCount := runtime.NumCPU() - if workerCount <= 4 { - workerCount = 1 - } else { - workerCount /= 2 - } - config := nv12.Config{MaxWorkers: uint(workerCount)} - _, err := upgradeActorsV4Common(ctx, sm, cache, root, epoch, ts, config) - return err -} - -func upgradeActorsV4Common( - ctx context.Context, sm *StateManager, cache MigrationCache, - root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet, - config nv12.Config, -) (cid.Cid, error) { - buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync()) - store := store.ActorStore(ctx, buf) - - // Load the state root. - var stateRoot types.StateRoot - if err := store.Get(ctx, root, &stateRoot); err != nil { - return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err) - } - - if stateRoot.Version != types.StateTreeVersion2 { - return cid.Undef, xerrors.Errorf( - "expected state root version 2 for actors v4 upgrade, got %d", - stateRoot.Version, - ) - } - - // Perform the migration - newHamtRoot, err := nv12.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache) - if err != nil { - return cid.Undef, xerrors.Errorf("upgrading to actors v4: %w", err) - } - - // Persist the result. - newRoot, err := store.Put(ctx, &types.StateRoot{ - Version: types.StateTreeVersion3, - Actors: newHamtRoot, - Info: stateRoot.Info, - }) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err) - } - - // Persist the new tree. - - { - from := buf - to := buf.Read() - - if err := vm.Copy(ctx, from, to, newRoot); err != nil { - return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err) - } - } - - return newRoot, nil -} - -func UpgradeActorsV5(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { - // Use all the CPUs except 3. - workerCount := runtime.NumCPU() - 3 - if workerCount <= 0 { - workerCount = 1 - } - - config := nv13.Config{ - MaxWorkers: uint(workerCount), - JobQueueSize: 1000, - ResultQueueSize: 100, - ProgressLogPeriod: 10 * time.Second, - } - - newRoot, err := upgradeActorsV5Common(ctx, sm, cache, root, epoch, ts, config) - if err != nil { - return cid.Undef, xerrors.Errorf("migrating actors v5 state: %w", err) - } - - return newRoot, nil -} - -func PreUpgradeActorsV5(ctx context.Context, sm *StateManager, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error { - // Use half the CPUs for pre-migration, but leave at least 3. - workerCount := runtime.NumCPU() - if workerCount <= 4 { - workerCount = 1 - } else { - workerCount /= 2 - } - config := nv13.Config{MaxWorkers: uint(workerCount)} - _, err := upgradeActorsV5Common(ctx, sm, cache, root, epoch, ts, config) - return err -} - -func upgradeActorsV5Common( - ctx context.Context, sm *StateManager, cache MigrationCache, - root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet, - config nv13.Config, -) (cid.Cid, error) { - buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync()) - store := store.ActorStore(ctx, buf) - - // Load the state root. - var stateRoot types.StateRoot - if err := store.Get(ctx, root, &stateRoot); err != nil { - return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err) - } - - if stateRoot.Version != types.StateTreeVersion3 { - return cid.Undef, xerrors.Errorf( - "expected state root version 3 for actors v5 upgrade, got %d", - stateRoot.Version, - ) - } - - // Perform the migration - newHamtRoot, err := nv13.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache) - if err != nil { - return cid.Undef, xerrors.Errorf("upgrading to actors v5: %w", err) - } - - // Persist the result. - newRoot, err := store.Put(ctx, &types.StateRoot{ - Version: types.StateTreeVersion4, - Actors: newHamtRoot, - Info: stateRoot.Info, - }) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err) - } - - // Persist the new tree. - - { - from := buf - to := buf.Read() - - if err := vm.Copy(ctx, from, to, newRoot); err != nil { - return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err) - } - } - - return newRoot, nil -} - func setNetworkName(ctx context.Context, store adt.Store, tree *state.StateTree, name string) error { - ia, err := tree.GetActor(builtin0.InitActorAddr) + ia, err := tree.GetActor(init_.Address) if err != nil { return xerrors.Errorf("getting init actor: %w", err) } @@ -1289,136 +438,13 @@ func setNetworkName(ctx context.Context, store adt.Store, tree *state.StateTree, return xerrors.Errorf("writing new init state: %w", err) } - if err := tree.SetActor(builtin0.InitActorAddr, ia); err != nil { + if err := tree.SetActor(init_.Address, ia); err != nil { return xerrors.Errorf("setting init actor: %w", err) } return nil } -func splitGenesisMultisig0(ctx context.Context, em ExecMonitor, addr address.Address, store adt0.Store, tree *state.StateTree, portions uint64, epoch abi.ChainEpoch, ts *types.TipSet) error { - if portions < 1 { - return xerrors.Errorf("cannot split into 0 portions") - } - - mact, err := tree.GetActor(addr) - if err != nil { - return xerrors.Errorf("getting msig actor: %w", err) - } - - mst, err := multisig.Load(store, mact) - if err != nil { - return xerrors.Errorf("getting msig state: %w", err) - } - - signers, err := mst.Signers() - if err != nil { - return xerrors.Errorf("getting msig signers: %w", err) - } - - thresh, err := mst.Threshold() - if err != nil { - return xerrors.Errorf("getting msig threshold: %w", err) - } - - ibal, err := mst.InitialBalance() - if err != nil { - return xerrors.Errorf("getting msig initial balance: %w", err) - } - - se, err := mst.StartEpoch() - if err != nil { - return xerrors.Errorf("getting msig start epoch: %w", err) - } - - ud, err := mst.UnlockDuration() - if err != nil { - return xerrors.Errorf("getting msig unlock duration: %w", err) - } - - pending, err := adt0.MakeEmptyMap(store).Root() - if err != nil { - return xerrors.Errorf("failed to create empty map: %w", err) - } - - newIbal := big.Div(ibal, types.NewInt(portions)) - newState := &multisig0.State{ - Signers: signers, - NumApprovalsThreshold: thresh, - NextTxnID: 0, - InitialBalance: newIbal, - StartEpoch: se, - UnlockDuration: ud, - PendingTxns: pending, - } - - scid, err := store.Put(ctx, newState) - if err != nil { - return xerrors.Errorf("storing new state: %w", err) - } - - newActor := types.Actor{ - Code: builtin0.MultisigActorCodeID, - Head: scid, - Nonce: 0, - Balance: big.Zero(), - } - - i := uint64(0) - subcalls := make([]types.ExecutionTrace, 0, portions) - transferCb := func(trace types.ExecutionTrace) { - subcalls = append(subcalls, trace) - } - - for i < portions { - keyAddr, err := makeKeyAddr(addr, i) - if err != nil { - return xerrors.Errorf("creating key address: %w", err) - } - - idAddr, err := tree.RegisterNewAddress(keyAddr) - if err != nil { - return xerrors.Errorf("registering new address: %w", err) - } - - err = tree.SetActor(idAddr, &newActor) - if err != nil { - return xerrors.Errorf("setting new msig actor state: %w", err) - } - - if err := doTransfer(tree, addr, idAddr, newIbal, transferCb); err != nil { - return xerrors.Errorf("transferring split msig balance: %w", err) - } - - i++ - } - - if em != nil { - // record the transfer in execution traces - - fakeMsg := makeFakeMsg(builtin.SystemActorAddr, addr, big.Zero(), uint64(epoch)) - - if err := em.MessageApplied(ctx, ts, fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{ - MessageReceipt: *makeFakeRct(), - ActorErr: nil, - ExecutionTrace: types.ExecutionTrace{ - Msg: fakeMsg, - MsgRct: makeFakeRct(), - Error: "", - Duration: 0, - GasCharges: nil, - Subcalls: subcalls, - }, - Duration: 0, - GasCosts: nil, - }, false); err != nil { - return xerrors.Errorf("recording transfers: %w", err) - } - } - - return nil -} - func makeKeyAddr(splitAddr address.Address, count uint64) (address.Address, error) { var b bytes.Buffer if err := splitAddr.MarshalCBOR(&b); err != nil { @@ -1441,88 +467,6 @@ func makeKeyAddr(splitAddr address.Address, count uint64) (address.Address, erro return addr, nil } -// TODO: After the Liftoff epoch, refactor this to use resetMultisigVesting -func resetGenesisMsigs0(ctx context.Context, sm *StateManager, store adt0.Store, tree *state.StateTree, startEpoch abi.ChainEpoch) error { - gb, err := sm.cs.GetGenesis() - if err != nil { - return xerrors.Errorf("getting genesis block: %w", err) - } - - gts, err := types.NewTipSet([]*types.BlockHeader{gb}) - if err != nil { - return xerrors.Errorf("getting genesis tipset: %w", err) - } - - cst := cbor.NewCborStore(sm.cs.StateBlockstore()) - genesisTree, err := state.LoadStateTree(cst, gts.ParentState()) - if err != nil { - return xerrors.Errorf("loading state tree: %w", err) - } - - err = genesisTree.ForEach(func(addr address.Address, genesisActor *types.Actor) error { - if genesisActor.Code == builtin0.MultisigActorCodeID { - currActor, err := tree.GetActor(addr) - if err != nil { - return xerrors.Errorf("loading actor: %w", err) - } - - var currState multisig0.State - if err := store.Get(ctx, currActor.Head, &currState); err != nil { - return xerrors.Errorf("reading multisig state: %w", err) - } - - currState.StartEpoch = startEpoch - - currActor.Head, err = store.Put(ctx, &currState) - if err != nil { - return xerrors.Errorf("writing new multisig state: %w", err) - } - - if err := tree.SetActor(addr, currActor); err != nil { - return xerrors.Errorf("setting multisig actor: %w", err) - } - } - return nil - }) - - if err != nil { - return xerrors.Errorf("iterating over genesis actors: %w", err) - } - - return nil -} - -func resetMultisigVesting0(ctx context.Context, store adt0.Store, tree *state.StateTree, addr address.Address, startEpoch abi.ChainEpoch, duration abi.ChainEpoch, balance abi.TokenAmount) error { - act, err := tree.GetActor(addr) - if err != nil { - return xerrors.Errorf("getting actor: %w", err) - } - - if !builtin.IsMultisigActor(act.Code) { - return xerrors.Errorf("actor wasn't msig: %w", err) - } - - var msigState multisig0.State - if err := store.Get(ctx, act.Head, &msigState); err != nil { - return xerrors.Errorf("reading multisig state: %w", err) - } - - msigState.StartEpoch = startEpoch - msigState.UnlockDuration = duration - msigState.InitialBalance = balance - - act.Head, err = store.Put(ctx, &msigState) - if err != nil { - return xerrors.Errorf("writing new multisig state: %w", err) - } - - if err := tree.SetActor(addr, act); err != nil { - return xerrors.Errorf("setting multisig actor: %w", err) - } - - return nil -} - func makeFakeMsg(from address.Address, to address.Address, amt abi.TokenAmount, nonce uint64) *types.Message { return &types.Message{ From: from, diff --git a/chain/stmgr/forks_test.go b/chain/stmgr/forks_test.go index dd2e47a57ab..9caeee51f0c 100644 --- a/chain/stmgr/forks_test.go +++ b/chain/stmgr/forks_test.go @@ -17,6 +17,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/cbor" + "github.com/filecoin-project/go-state-types/network" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" init2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/init" @@ -121,7 +122,7 @@ func TestForkHeightTriggers(t *testing.T) { sm, err := NewStateManagerWithUpgradeSchedule( cg.ChainStore(), UpgradeSchedule{{ - Network: 1, + Network: network.Version1, Height: testForkHeight, Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { @@ -250,7 +251,7 @@ func TestForkRefuseCall(t *testing.T) { sm, err := NewStateManagerWithUpgradeSchedule( cg.ChainStore(), UpgradeSchedule{{ - Network: 1, + Network: network.Version1, Expensive: true, Height: testForkHeight, Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, @@ -297,22 +298,26 @@ func TestForkRefuseCall(t *testing.T) { t.Fatal(err) } + pts, err := cg.ChainStore().LoadTipSet(ts.TipSet.TipSet().Parents()) + require.NoError(t, err) + parentHeight := pts.Height() + currentHeight := ts.TipSet.TipSet().Height() + + // CallWithGas calls _at_ the current tipset. ret, err := sm.CallWithGas(ctx, m, nil, ts.TipSet.TipSet()) - switch ts.TipSet.TipSet().Height() { - case testForkHeight, testForkHeight + 1: + if parentHeight <= testForkHeight && currentHeight >= testForkHeight { // If I had a fork, or I _will_ have a fork, it should fail. require.Equal(t, ErrExpensiveFork, err) - default: + } else { require.NoError(t, err) require.True(t, ret.MsgRct.ExitCode.IsSuccess()) } - // Call just runs on the parent state for a tipset, so we only - // expect an error at the fork height. + + // Call always applies the message to the "next block" after the tipset's parent state. ret, err = sm.Call(ctx, m, ts.TipSet.TipSet()) - switch ts.TipSet.TipSet().Height() { - case testForkHeight + 1: + if parentHeight == testForkHeight { require.Equal(t, ErrExpensiveFork, err) - default: + } else { require.NoError(t, err) require.True(t, ret.MsgRct.ExitCode.IsSuccess()) } @@ -361,7 +366,7 @@ func TestForkPreMigration(t *testing.T) { sm, err := NewStateManagerWithUpgradeSchedule( cg.ChainStore(), UpgradeSchedule{{ - Network: 1, + Network: network.Version1, Height: testForkHeight, Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { diff --git a/chain/stmgr/upgrades.go b/chain/stmgr/upgrades.go new file mode 100644 index 00000000000..d2ccbad39a4 --- /dev/null +++ b/chain/stmgr/upgrades.go @@ -0,0 +1,1090 @@ +package stmgr + +import ( + "context" + "runtime" + "time" + + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" + + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + multisig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" + power0 "github.com/filecoin-project/specs-actors/actors/builtin/power" + "github.com/filecoin-project/specs-actors/actors/migration/nv3" + adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" + "github.com/filecoin-project/specs-actors/v2/actors/migration/nv4" + "github.com/filecoin-project/specs-actors/v2/actors/migration/nv7" + "github.com/filecoin-project/specs-actors/v3/actors/migration/nv10" + "github.com/filecoin-project/specs-actors/v4/actors/migration/nv12" + "github.com/filecoin-project/specs-actors/v5/actors/migration/nv13" + + "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors/builtin/multisig" + "github.com/filecoin-project/lotus/chain/state" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/vm" +) + +func DefaultUpgradeSchedule() UpgradeSchedule { + var us UpgradeSchedule + + updates := []Upgrade{{ + Height: build.UpgradeBreezeHeight, + Network: network.Version1, + Migration: UpgradeFaucetBurnRecovery, + }, { + Height: build.UpgradeSmokeHeight, + Network: network.Version2, + Migration: nil, + }, { + Height: build.UpgradeIgnitionHeight, + Network: network.Version3, + Migration: UpgradeIgnition, + }, { + Height: build.UpgradeRefuelHeight, + Network: network.Version3, + Migration: UpgradeRefuel, + }, { + Height: build.UpgradeAssemblyHeight, + Network: network.Version4, + Expensive: true, + Migration: UpgradeActorsV2, + }, { + Height: build.UpgradeTapeHeight, + Network: network.Version5, + Migration: nil, + }, { + Height: build.UpgradeLiftoffHeight, + Network: network.Version5, + Migration: UpgradeLiftoff, + }, { + Height: build.UpgradeKumquatHeight, + Network: network.Version6, + Migration: nil, + }, { + Height: build.UpgradeCalicoHeight, + Network: network.Version7, + Migration: UpgradeCalico, + }, { + Height: build.UpgradePersianHeight, + Network: network.Version8, + Migration: nil, + }, { + Height: build.UpgradeOrangeHeight, + Network: network.Version9, + Migration: nil, + }, { + Height: build.UpgradeTrustHeight, + Network: network.Version10, + Migration: UpgradeActorsV3, + PreMigrations: []PreMigration{{ + PreMigration: PreUpgradeActorsV3, + StartWithin: 120, + DontStartWithin: 60, + StopWithin: 35, + }, { + PreMigration: PreUpgradeActorsV3, + StartWithin: 30, + DontStartWithin: 15, + StopWithin: 5, + }}, + Expensive: true, + }, { + Height: build.UpgradeNorwegianHeight, + Network: network.Version11, + Migration: nil, + }, { + Height: build.UpgradeTurboHeight, + Network: network.Version12, + Migration: UpgradeActorsV4, + PreMigrations: []PreMigration{{ + PreMigration: PreUpgradeActorsV4, + StartWithin: 120, + DontStartWithin: 60, + StopWithin: 35, + }, { + PreMigration: PreUpgradeActorsV4, + StartWithin: 30, + DontStartWithin: 15, + StopWithin: 5, + }}, + Expensive: true, + }, { + Height: build.UpgradeHyperdriveHeight, + Network: network.Version13, + Migration: UpgradeActorsV5, + PreMigrations: []PreMigration{{ + PreMigration: PreUpgradeActorsV5, + StartWithin: 120, + DontStartWithin: 60, + StopWithin: 35, + }, { + PreMigration: PreUpgradeActorsV5, + StartWithin: 30, + DontStartWithin: 15, + StopWithin: 5, + }}, + Expensive: true}} + + for _, u := range updates { + if u.Height < 0 { + // upgrade disabled + continue + } + us = append(us, u) + } + return us +} + +func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ MigrationCache, em ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + // Some initial parameters + FundsForMiners := types.FromFil(1_000_000) + LookbackEpoch := abi.ChainEpoch(32000) + AccountCap := types.FromFil(0) + BaseMinerBalance := types.FromFil(20) + DesiredReimbursementBalance := types.FromFil(5_000_000) + + isSystemAccount := func(addr address.Address) (bool, error) { + id, err := address.IDFromAddress(addr) + if err != nil { + return false, xerrors.Errorf("id address: %w", err) + } + + if id < 1000 { + return true, nil + } + return false, nil + } + + minerFundsAlloc := func(pow, tpow abi.StoragePower) abi.TokenAmount { + return types.BigDiv(types.BigMul(pow, FundsForMiners), tpow) + } + + // Grab lookback state for account checks + lbts, err := sm.ChainStore().GetTipsetByHeight(ctx, LookbackEpoch, ts, false) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to get tipset at lookback height: %w", err) + } + + lbtree, err := sm.ParentState(lbts) + if err != nil { + return cid.Undef, xerrors.Errorf("loading state tree failed: %w", err) + } + + tree, err := sm.StateTree(root) + if err != nil { + return cid.Undef, xerrors.Errorf("getting state tree: %w", err) + } + + type transfer struct { + From address.Address + To address.Address + Amt abi.TokenAmount + } + + var transfers []transfer + subcalls := make([]types.ExecutionTrace, 0) + transferCb := func(trace types.ExecutionTrace) { + subcalls = append(subcalls, trace) + } + + // Take all excess funds away, put them into the reserve account + err = tree.ForEach(func(addr address.Address, act *types.Actor) error { + switch act.Code { + case builtin0.AccountActorCodeID, builtin0.MultisigActorCodeID, builtin0.PaymentChannelActorCodeID: + sysAcc, err := isSystemAccount(addr) + if err != nil { + return xerrors.Errorf("checking system account: %w", err) + } + + if !sysAcc { + transfers = append(transfers, transfer{ + From: addr, + To: builtin.ReserveAddress, + Amt: act.Balance, + }) + } + case builtin0.StorageMinerActorCodeID: + var st miner0.State + if err := sm.ChainStore().ActorStore(ctx).Get(ctx, act.Head, &st); err != nil { + return xerrors.Errorf("failed to load miner state: %w", err) + } + + var available abi.TokenAmount + { + defer func() { + if err := recover(); err != nil { + log.Warnf("Get available balance failed (%s, %s, %s): %s", addr, act.Head, act.Balance, err) + } + available = abi.NewTokenAmount(0) + }() + // this panics if the miner doesnt have enough funds to cover their locked pledge + available = st.GetAvailableBalance(act.Balance) + } + + if !available.IsZero() { + transfers = append(transfers, transfer{ + From: addr, + To: builtin.ReserveAddress, + Amt: available, + }) + } + } + return nil + }) + if err != nil { + return cid.Undef, xerrors.Errorf("foreach over state tree failed: %w", err) + } + + // Execute transfers from previous step + for _, t := range transfers { + if err := doTransfer(tree, t.From, t.To, t.Amt, transferCb); err != nil { + return cid.Undef, xerrors.Errorf("transfer %s %s->%s failed: %w", t.Amt, t.From, t.To, err) + } + } + + // pull up power table to give miners back some funds proportional to their power + var ps power0.State + powAct, err := tree.GetActor(builtin0.StoragePowerActorAddr) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to load power actor: %w", err) + } + + cst := cbor.NewCborStore(sm.ChainStore().StateBlockstore()) + if err := cst.Get(ctx, powAct.Head, &ps); err != nil { + return cid.Undef, xerrors.Errorf("failed to get power actor state: %w", err) + } + + totalPower := ps.TotalBytesCommitted + + var transfersBack []transfer + // Now, we return some funds to places where they are needed + err = tree.ForEach(func(addr address.Address, act *types.Actor) error { + lbact, err := lbtree.GetActor(addr) + if err != nil { + if !xerrors.Is(err, types.ErrActorNotFound) { + return xerrors.Errorf("failed to get actor in lookback state") + } + } + + prevBalance := abi.NewTokenAmount(0) + if lbact != nil { + prevBalance = lbact.Balance + } + + switch act.Code { + case builtin0.AccountActorCodeID, builtin0.MultisigActorCodeID, builtin0.PaymentChannelActorCodeID: + nbalance := big.Min(prevBalance, AccountCap) + if nbalance.Sign() != 0 { + transfersBack = append(transfersBack, transfer{ + From: builtin.ReserveAddress, + To: addr, + Amt: nbalance, + }) + } + case builtin0.StorageMinerActorCodeID: + var st miner0.State + if err := sm.ChainStore().ActorStore(ctx).Get(ctx, act.Head, &st); err != nil { + return xerrors.Errorf("failed to load miner state: %w", err) + } + + var minfo miner0.MinerInfo + if err := cst.Get(ctx, st.Info, &minfo); err != nil { + return xerrors.Errorf("failed to get miner info: %w", err) + } + + sectorsArr, err := adt0.AsArray(sm.ChainStore().ActorStore(ctx), st.Sectors) + if err != nil { + return xerrors.Errorf("failed to load sectors array: %w", err) + } + + slen := sectorsArr.Length() + + power := types.BigMul(types.NewInt(slen), types.NewInt(uint64(minfo.SectorSize))) + + mfunds := minerFundsAlloc(power, totalPower) + transfersBack = append(transfersBack, transfer{ + From: builtin.ReserveAddress, + To: minfo.Worker, + Amt: mfunds, + }) + + // Now make sure to give each miner who had power at the lookback some FIL + lbact, err := lbtree.GetActor(addr) + if err == nil { + var lbst miner0.State + if err := sm.ChainStore().ActorStore(ctx).Get(ctx, lbact.Head, &lbst); err != nil { + return xerrors.Errorf("failed to load miner state: %w", err) + } + + lbsectors, err := adt0.AsArray(sm.ChainStore().ActorStore(ctx), lbst.Sectors) + if err != nil { + return xerrors.Errorf("failed to load lb sectors array: %w", err) + } + + if lbsectors.Length() > 0 { + transfersBack = append(transfersBack, transfer{ + From: builtin.ReserveAddress, + To: minfo.Worker, + Amt: BaseMinerBalance, + }) + } + + } else { + log.Warnf("failed to get miner in lookback state: %s", err) + } + } + return nil + }) + if err != nil { + return cid.Undef, xerrors.Errorf("foreach over state tree failed: %w", err) + } + + for _, t := range transfersBack { + if err := doTransfer(tree, t.From, t.To, t.Amt, transferCb); err != nil { + return cid.Undef, xerrors.Errorf("transfer %s %s->%s failed: %w", t.Amt, t.From, t.To, err) + } + } + + // transfer all burnt funds back to the reserve account + burntAct, err := tree.GetActor(builtin0.BurntFundsActorAddr) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to load burnt funds actor: %w", err) + } + if err := doTransfer(tree, builtin0.BurntFundsActorAddr, builtin.ReserveAddress, burntAct.Balance, transferCb); err != nil { + return cid.Undef, xerrors.Errorf("failed to unburn funds: %w", err) + } + + // Top up the reimbursement service + reimbAddr, err := address.NewFromString("t0111") + if err != nil { + return cid.Undef, xerrors.Errorf("failed to parse reimbursement service address") + } + + reimb, err := tree.GetActor(reimbAddr) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to load reimbursement account actor: %w", err) + } + + difference := types.BigSub(DesiredReimbursementBalance, reimb.Balance) + if err := doTransfer(tree, builtin.ReserveAddress, reimbAddr, difference, transferCb); err != nil { + return cid.Undef, xerrors.Errorf("failed to top up reimbursement account: %w", err) + } + + // Now, a final sanity check to make sure the balances all check out + total := abi.NewTokenAmount(0) + err = tree.ForEach(func(addr address.Address, act *types.Actor) error { + total = types.BigAdd(total, act.Balance) + return nil + }) + if err != nil { + return cid.Undef, xerrors.Errorf("checking final state balance failed: %w", err) + } + + exp := types.FromFil(build.FilBase) + if !exp.Equals(total) { + return cid.Undef, xerrors.Errorf("resultant state tree account balance was not correct: %s", total) + } + + if em != nil { + // record the transfer in execution traces + + fakeMsg := makeFakeMsg(builtin.SystemActorAddr, builtin.SystemActorAddr, big.Zero(), uint64(epoch)) + + if err := em.MessageApplied(ctx, ts, fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{ + MessageReceipt: *makeFakeRct(), + ActorErr: nil, + ExecutionTrace: types.ExecutionTrace{ + Msg: fakeMsg, + MsgRct: makeFakeRct(), + Error: "", + Duration: 0, + GasCharges: nil, + Subcalls: subcalls, + }, + Duration: 0, + GasCosts: nil, + }, false); err != nil { + return cid.Undef, xerrors.Errorf("recording transfers: %w", err) + } + } + + return tree.Flush(ctx) +} + +func UpgradeIgnition(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + store := sm.cs.ActorStore(ctx) + + if build.UpgradeLiftoffHeight <= epoch { + return cid.Undef, xerrors.Errorf("liftoff height must be beyond ignition height") + } + + nst, err := nv3.MigrateStateTree(ctx, store, root, epoch) + if err != nil { + return cid.Undef, xerrors.Errorf("migrating actors state: %w", err) + } + + tree, err := sm.StateTree(nst) + if err != nil { + return cid.Undef, xerrors.Errorf("getting state tree: %w", err) + } + + err = setNetworkName(ctx, store, tree, "ignition") + if err != nil { + return cid.Undef, xerrors.Errorf("setting network name: %w", err) + } + + split1, err := address.NewFromString("t0115") + if err != nil { + return cid.Undef, xerrors.Errorf("first split address: %w", err) + } + + split2, err := address.NewFromString("t0116") + if err != nil { + return cid.Undef, xerrors.Errorf("second split address: %w", err) + } + + err = resetGenesisMsigs0(ctx, sm, store, tree, build.UpgradeLiftoffHeight) + if err != nil { + return cid.Undef, xerrors.Errorf("resetting genesis msig start epochs: %w", err) + } + + err = splitGenesisMultisig0(ctx, cb, split1, store, tree, 50, epoch, ts) + if err != nil { + return cid.Undef, xerrors.Errorf("splitting first msig: %w", err) + } + + err = splitGenesisMultisig0(ctx, cb, split2, store, tree, 50, epoch, ts) + if err != nil { + return cid.Undef, xerrors.Errorf("splitting second msig: %w", err) + } + + err = nv3.CheckStateTree(ctx, store, nst, epoch, builtin0.TotalFilecoin) + if err != nil { + return cid.Undef, xerrors.Errorf("sanity check after ignition upgrade failed: %w", err) + } + + return tree.Flush(ctx) +} + +func splitGenesisMultisig0(ctx context.Context, em ExecMonitor, addr address.Address, store adt0.Store, tree *state.StateTree, portions uint64, epoch abi.ChainEpoch, ts *types.TipSet) error { + if portions < 1 { + return xerrors.Errorf("cannot split into 0 portions") + } + + mact, err := tree.GetActor(addr) + if err != nil { + return xerrors.Errorf("getting msig actor: %w", err) + } + + mst, err := multisig.Load(store, mact) + if err != nil { + return xerrors.Errorf("getting msig state: %w", err) + } + + signers, err := mst.Signers() + if err != nil { + return xerrors.Errorf("getting msig signers: %w", err) + } + + thresh, err := mst.Threshold() + if err != nil { + return xerrors.Errorf("getting msig threshold: %w", err) + } + + ibal, err := mst.InitialBalance() + if err != nil { + return xerrors.Errorf("getting msig initial balance: %w", err) + } + + se, err := mst.StartEpoch() + if err != nil { + return xerrors.Errorf("getting msig start epoch: %w", err) + } + + ud, err := mst.UnlockDuration() + if err != nil { + return xerrors.Errorf("getting msig unlock duration: %w", err) + } + + pending, err := adt0.MakeEmptyMap(store).Root() + if err != nil { + return xerrors.Errorf("failed to create empty map: %w", err) + } + + newIbal := big.Div(ibal, types.NewInt(portions)) + newState := &multisig0.State{ + Signers: signers, + NumApprovalsThreshold: thresh, + NextTxnID: 0, + InitialBalance: newIbal, + StartEpoch: se, + UnlockDuration: ud, + PendingTxns: pending, + } + + scid, err := store.Put(ctx, newState) + if err != nil { + return xerrors.Errorf("storing new state: %w", err) + } + + newActor := types.Actor{ + Code: builtin0.MultisigActorCodeID, + Head: scid, + Nonce: 0, + Balance: big.Zero(), + } + + i := uint64(0) + subcalls := make([]types.ExecutionTrace, 0, portions) + transferCb := func(trace types.ExecutionTrace) { + subcalls = append(subcalls, trace) + } + + for i < portions { + keyAddr, err := makeKeyAddr(addr, i) + if err != nil { + return xerrors.Errorf("creating key address: %w", err) + } + + idAddr, err := tree.RegisterNewAddress(keyAddr) + if err != nil { + return xerrors.Errorf("registering new address: %w", err) + } + + err = tree.SetActor(idAddr, &newActor) + if err != nil { + return xerrors.Errorf("setting new msig actor state: %w", err) + } + + if err := doTransfer(tree, addr, idAddr, newIbal, transferCb); err != nil { + return xerrors.Errorf("transferring split msig balance: %w", err) + } + + i++ + } + + if em != nil { + // record the transfer in execution traces + + fakeMsg := makeFakeMsg(builtin.SystemActorAddr, addr, big.Zero(), uint64(epoch)) + + if err := em.MessageApplied(ctx, ts, fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{ + MessageReceipt: *makeFakeRct(), + ActorErr: nil, + ExecutionTrace: types.ExecutionTrace{ + Msg: fakeMsg, + MsgRct: makeFakeRct(), + Error: "", + Duration: 0, + GasCharges: nil, + Subcalls: subcalls, + }, + Duration: 0, + GasCosts: nil, + }, false); err != nil { + return xerrors.Errorf("recording transfers: %w", err) + } + } + + return nil +} + +// TODO: After the Liftoff epoch, refactor this to use resetMultisigVesting +func resetGenesisMsigs0(ctx context.Context, sm *StateManager, store adt0.Store, tree *state.StateTree, startEpoch abi.ChainEpoch) error { + gb, err := sm.cs.GetGenesis() + if err != nil { + return xerrors.Errorf("getting genesis block: %w", err) + } + + gts, err := types.NewTipSet([]*types.BlockHeader{gb}) + if err != nil { + return xerrors.Errorf("getting genesis tipset: %w", err) + } + + cst := cbor.NewCborStore(sm.cs.StateBlockstore()) + genesisTree, err := state.LoadStateTree(cst, gts.ParentState()) + if err != nil { + return xerrors.Errorf("loading state tree: %w", err) + } + + err = genesisTree.ForEach(func(addr address.Address, genesisActor *types.Actor) error { + if genesisActor.Code == builtin0.MultisigActorCodeID { + currActor, err := tree.GetActor(addr) + if err != nil { + return xerrors.Errorf("loading actor: %w", err) + } + + var currState multisig0.State + if err := store.Get(ctx, currActor.Head, &currState); err != nil { + return xerrors.Errorf("reading multisig state: %w", err) + } + + currState.StartEpoch = startEpoch + + currActor.Head, err = store.Put(ctx, &currState) + if err != nil { + return xerrors.Errorf("writing new multisig state: %w", err) + } + + if err := tree.SetActor(addr, currActor); err != nil { + return xerrors.Errorf("setting multisig actor: %w", err) + } + } + return nil + }) + + if err != nil { + return xerrors.Errorf("iterating over genesis actors: %w", err) + } + + return nil +} + +func resetMultisigVesting0(ctx context.Context, store adt0.Store, tree *state.StateTree, addr address.Address, startEpoch abi.ChainEpoch, duration abi.ChainEpoch, balance abi.TokenAmount) error { + act, err := tree.GetActor(addr) + if err != nil { + return xerrors.Errorf("getting actor: %w", err) + } + + if !builtin.IsMultisigActor(act.Code) { + return xerrors.Errorf("actor wasn't msig: %w", err) + } + + var msigState multisig0.State + if err := store.Get(ctx, act.Head, &msigState); err != nil { + return xerrors.Errorf("reading multisig state: %w", err) + } + + msigState.StartEpoch = startEpoch + msigState.UnlockDuration = duration + msigState.InitialBalance = balance + + act.Head, err = store.Put(ctx, &msigState) + if err != nil { + return xerrors.Errorf("writing new multisig state: %w", err) + } + + if err := tree.SetActor(addr, act); err != nil { + return xerrors.Errorf("setting multisig actor: %w", err) + } + + return nil +} + +func UpgradeRefuel(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + + store := sm.cs.ActorStore(ctx) + tree, err := sm.StateTree(root) + if err != nil { + return cid.Undef, xerrors.Errorf("getting state tree: %w", err) + } + + err = resetMultisigVesting0(ctx, store, tree, builtin.SaftAddress, 0, 0, big.Zero()) + if err != nil { + return cid.Undef, xerrors.Errorf("tweaking msig vesting: %w", err) + } + + err = resetMultisigVesting0(ctx, store, tree, builtin.ReserveAddress, 0, 0, big.Zero()) + if err != nil { + return cid.Undef, xerrors.Errorf("tweaking msig vesting: %w", err) + } + + err = resetMultisigVesting0(ctx, store, tree, builtin.RootVerifierAddress, 0, 0, big.Zero()) + if err != nil { + return cid.Undef, xerrors.Errorf("tweaking msig vesting: %w", err) + } + + return tree.Flush(ctx) +} + +func UpgradeActorsV2(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync()) + store := store.ActorStore(ctx, buf) + + info, err := store.Put(ctx, new(types.StateInfo0)) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to create new state info for actors v2: %w", err) + } + + newHamtRoot, err := nv4.MigrateStateTree(ctx, store, root, epoch, nv4.DefaultConfig()) + if err != nil { + return cid.Undef, xerrors.Errorf("upgrading to actors v2: %w", err) + } + + newRoot, err := store.Put(ctx, &types.StateRoot{ + Version: types.StateTreeVersion1, + Actors: newHamtRoot, + Info: info, + }) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err) + } + + // perform some basic sanity checks to make sure everything still works. + if newSm, err := state.LoadStateTree(store, newRoot); err != nil { + return cid.Undef, xerrors.Errorf("state tree sanity load failed: %w", err) + } else if newRoot2, err := newSm.Flush(ctx); err != nil { + return cid.Undef, xerrors.Errorf("state tree sanity flush failed: %w", err) + } else if newRoot2 != newRoot { + return cid.Undef, xerrors.Errorf("state-root mismatch: %s != %s", newRoot, newRoot2) + } else if _, err := newSm.GetActor(builtin0.InitActorAddr); err != nil { + return cid.Undef, xerrors.Errorf("failed to load init actor after upgrade: %w", err) + } + + { + from := buf + to := buf.Read() + + if err := vm.Copy(ctx, from, to, newRoot); err != nil { + return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err) + } + } + + return newRoot, nil +} + +func UpgradeLiftoff(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + tree, err := sm.StateTree(root) + if err != nil { + return cid.Undef, xerrors.Errorf("getting state tree: %w", err) + } + + err = setNetworkName(ctx, sm.cs.ActorStore(ctx), tree, "mainnet") + if err != nil { + return cid.Undef, xerrors.Errorf("setting network name: %w", err) + } + + return tree.Flush(ctx) +} + +func UpgradeCalico(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + if build.BuildType != build.BuildMainnet { + return root, nil + } + + store := sm.cs.ActorStore(ctx) + var stateRoot types.StateRoot + if err := store.Get(ctx, root, &stateRoot); err != nil { + return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err) + } + + if stateRoot.Version != types.StateTreeVersion1 { + return cid.Undef, xerrors.Errorf( + "expected state root version 1 for calico upgrade, got %d", + stateRoot.Version, + ) + } + + newHamtRoot, err := nv7.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, nv7.DefaultConfig()) + if err != nil { + return cid.Undef, xerrors.Errorf("running nv7 migration: %w", err) + } + + newRoot, err := store.Put(ctx, &types.StateRoot{ + Version: stateRoot.Version, + Actors: newHamtRoot, + Info: stateRoot.Info, + }) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err) + } + + // perform some basic sanity checks to make sure everything still works. + if newSm, err := state.LoadStateTree(store, newRoot); err != nil { + return cid.Undef, xerrors.Errorf("state tree sanity load failed: %w", err) + } else if newRoot2, err := newSm.Flush(ctx); err != nil { + return cid.Undef, xerrors.Errorf("state tree sanity flush failed: %w", err) + } else if newRoot2 != newRoot { + return cid.Undef, xerrors.Errorf("state-root mismatch: %s != %s", newRoot, newRoot2) + } else if _, err := newSm.GetActor(builtin0.InitActorAddr); err != nil { + return cid.Undef, xerrors.Errorf("failed to load init actor after upgrade: %w", err) + } + + return newRoot, nil +} + +func UpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + // Use all the CPUs except 3. + workerCount := runtime.NumCPU() - 3 + if workerCount <= 0 { + workerCount = 1 + } + + config := nv10.Config{ + MaxWorkers: uint(workerCount), + JobQueueSize: 1000, + ResultQueueSize: 100, + ProgressLogPeriod: 10 * time.Second, + } + newRoot, err := upgradeActorsV3Common(ctx, sm, cache, root, epoch, ts, config) + if err != nil { + return cid.Undef, xerrors.Errorf("migrating actors v3 state: %w", err) + } + + tree, err := sm.StateTree(newRoot) + if err != nil { + return cid.Undef, xerrors.Errorf("getting state tree: %w", err) + } + + if build.BuildType == build.BuildMainnet { + err := terminateActor(ctx, tree, build.ZeroAddress, cb, epoch, ts) + if err != nil && !xerrors.Is(err, types.ErrActorNotFound) { + return cid.Undef, xerrors.Errorf("deleting zero bls actor: %w", err) + } + + newRoot, err = tree.Flush(ctx) + if err != nil { + return cid.Undef, xerrors.Errorf("flushing state tree: %w", err) + } + } + + return newRoot, nil +} + +func PreUpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error { + // Use half the CPUs for pre-migration, but leave at least 3. + workerCount := runtime.NumCPU() + if workerCount <= 4 { + workerCount = 1 + } else { + workerCount /= 2 + } + config := nv10.Config{MaxWorkers: uint(workerCount)} + _, err := upgradeActorsV3Common(ctx, sm, cache, root, epoch, ts, config) + return err +} + +func upgradeActorsV3Common( + ctx context.Context, sm *StateManager, cache MigrationCache, + root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet, + config nv10.Config, +) (cid.Cid, error) { + buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync()) + store := store.ActorStore(ctx, buf) + + // Load the state root. + var stateRoot types.StateRoot + if err := store.Get(ctx, root, &stateRoot); err != nil { + return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err) + } + + if stateRoot.Version != types.StateTreeVersion1 { + return cid.Undef, xerrors.Errorf( + "expected state root version 1 for actors v3 upgrade, got %d", + stateRoot.Version, + ) + } + + // Perform the migration + newHamtRoot, err := nv10.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache) + if err != nil { + return cid.Undef, xerrors.Errorf("upgrading to actors v3: %w", err) + } + + // Persist the result. + newRoot, err := store.Put(ctx, &types.StateRoot{ + Version: types.StateTreeVersion2, + Actors: newHamtRoot, + Info: stateRoot.Info, + }) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err) + } + + // Persist the new tree. + + { + from := buf + to := buf.Read() + + if err := vm.Copy(ctx, from, to, newRoot); err != nil { + return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err) + } + } + + return newRoot, nil +} + +func UpgradeActorsV4(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + // Use all the CPUs except 3. + workerCount := runtime.NumCPU() - 3 + if workerCount <= 0 { + workerCount = 1 + } + + config := nv12.Config{ + MaxWorkers: uint(workerCount), + JobQueueSize: 1000, + ResultQueueSize: 100, + ProgressLogPeriod: 10 * time.Second, + } + + newRoot, err := upgradeActorsV4Common(ctx, sm, cache, root, epoch, ts, config) + if err != nil { + return cid.Undef, xerrors.Errorf("migrating actors v4 state: %w", err) + } + + return newRoot, nil +} + +func PreUpgradeActorsV4(ctx context.Context, sm *StateManager, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error { + // Use half the CPUs for pre-migration, but leave at least 3. + workerCount := runtime.NumCPU() + if workerCount <= 4 { + workerCount = 1 + } else { + workerCount /= 2 + } + config := nv12.Config{MaxWorkers: uint(workerCount)} + _, err := upgradeActorsV4Common(ctx, sm, cache, root, epoch, ts, config) + return err +} + +func upgradeActorsV4Common( + ctx context.Context, sm *StateManager, cache MigrationCache, + root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet, + config nv12.Config, +) (cid.Cid, error) { + buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync()) + store := store.ActorStore(ctx, buf) + + // Load the state root. + var stateRoot types.StateRoot + if err := store.Get(ctx, root, &stateRoot); err != nil { + return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err) + } + + if stateRoot.Version != types.StateTreeVersion2 { + return cid.Undef, xerrors.Errorf( + "expected state root version 2 for actors v4 upgrade, got %d", + stateRoot.Version, + ) + } + + // Perform the migration + newHamtRoot, err := nv12.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache) + if err != nil { + return cid.Undef, xerrors.Errorf("upgrading to actors v4: %w", err) + } + + // Persist the result. + newRoot, err := store.Put(ctx, &types.StateRoot{ + Version: types.StateTreeVersion3, + Actors: newHamtRoot, + Info: stateRoot.Info, + }) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err) + } + + // Persist the new tree. + + { + from := buf + to := buf.Read() + + if err := vm.Copy(ctx, from, to, newRoot); err != nil { + return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err) + } + } + + return newRoot, nil +} + +func UpgradeActorsV5(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + // Use all the CPUs except 3. + workerCount := runtime.NumCPU() - 3 + if workerCount <= 0 { + workerCount = 1 + } + + config := nv13.Config{ + MaxWorkers: uint(workerCount), + JobQueueSize: 1000, + ResultQueueSize: 100, + ProgressLogPeriod: 10 * time.Second, + } + + newRoot, err := upgradeActorsV5Common(ctx, sm, cache, root, epoch, ts, config) + if err != nil { + return cid.Undef, xerrors.Errorf("migrating actors v5 state: %w", err) + } + + return newRoot, nil +} + +func PreUpgradeActorsV5(ctx context.Context, sm *StateManager, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error { + // Use half the CPUs for pre-migration, but leave at least 3. + workerCount := runtime.NumCPU() + if workerCount <= 4 { + workerCount = 1 + } else { + workerCount /= 2 + } + config := nv13.Config{MaxWorkers: uint(workerCount)} + _, err := upgradeActorsV5Common(ctx, sm, cache, root, epoch, ts, config) + return err +} + +func upgradeActorsV5Common( + ctx context.Context, sm *StateManager, cache MigrationCache, + root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet, + config nv13.Config, +) (cid.Cid, error) { + buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync()) + store := store.ActorStore(ctx, buf) + + // Load the state root. + var stateRoot types.StateRoot + if err := store.Get(ctx, root, &stateRoot); err != nil { + return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err) + } + + if stateRoot.Version != types.StateTreeVersion3 { + return cid.Undef, xerrors.Errorf( + "expected state root version 3 for actors v5 upgrade, got %d", + stateRoot.Version, + ) + } + + // Perform the migration + newHamtRoot, err := nv13.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache) + if err != nil { + return cid.Undef, xerrors.Errorf("upgrading to actors v5: %w", err) + } + + // Persist the result. + newRoot, err := store.Put(ctx, &types.StateRoot{ + Version: types.StateTreeVersion4, + Actors: newHamtRoot, + Info: stateRoot.Info, + }) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err) + } + + // Persist the new tree. + + { + from := buf + to := buf.Read() + + if err := vm.Copy(ctx, from, to, newRoot); err != nil { + return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err) + } + } + + return newRoot, nil +} diff --git a/chain/sync.go b/chain/sync.go index 167856927f3..7914cc8d5fd 100644 --- a/chain/sync.go +++ b/chain/sync.go @@ -727,6 +727,11 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock, use } // fast checks first + + if h.Height <= baseTs.Height() { + return xerrors.Errorf("block height not greater than parent height: %d != %d", h.Height, baseTs.Height()) + } + nulls := h.Height - (baseTs.Height() + 1) if tgtTs := baseTs.MinTimestamp() + build.BlockDelaySecs*uint64(nulls+1); h.Timestamp != tgtTs { return xerrors.Errorf("block has wrong timestamp: %d != %d", h.Timestamp, tgtTs) @@ -1054,6 +1059,7 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock return xerrors.Errorf("failed to load base state tree: %w", err) } + nv := syncer.sm.GetNtwkVersion(ctx, b.Header.Height) pl := vm.PricelistByEpoch(baseTs.Height()) var sumGasLimit int64 checkMsg := func(msg types.ChainMsg) error { @@ -1061,7 +1067,7 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock // Phase 1: syntactic validation, as defined in the spec minGas := pl.OnChainMessage(msg.ChainLength()) - if err := m.ValidForBlockInclusion(minGas.Total(), syncer.sm.GetNtwkVersion(ctx, b.Header.Height)); err != nil { + if err := m.ValidForBlockInclusion(minGas.Total(), nv); err != nil { return err } @@ -1075,7 +1081,7 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock // Phase 2: (Partial) semantic validation: // the sender exists and is an account actor, and the nonces make sense var sender address.Address - if syncer.sm.GetNtwkVersion(ctx, b.Header.Height) >= network.Version13 { + if nv >= network.Version13 { sender, err = st.LookupID(m.From) if err != nil { return err diff --git a/chain/sync_test.go b/chain/sync_test.go index ae4b6cc2e06..bda8c60eef6 100644 --- a/chain/sync_test.go +++ b/chain/sync_test.go @@ -230,7 +230,7 @@ func (tu *syncTestUtil) pushTsExpectErr(to int, fts *store.FullTipSet, experr bo } } -func (tu *syncTestUtil) mineOnBlock(blk *store.FullTipSet, to int, miners []int, wait, fail bool, msgs [][]*types.SignedMessage, nulls abi.ChainEpoch) *store.FullTipSet { +func (tu *syncTestUtil) mineOnBlock(blk *store.FullTipSet, to int, miners []int, wait, fail bool, msgs [][]*types.SignedMessage, nulls abi.ChainEpoch, push bool) *store.FullTipSet { if miners == nil { for i := range tu.g.Miners { miners = append(miners, i) @@ -247,7 +247,7 @@ func (tu *syncTestUtil) mineOnBlock(blk *store.FullTipSet, to int, miners []int, var nts *store.FullTipSet var err error if msgs != nil { - nts, err = tu.g.NextTipSetFromMinersWithMessagesAndNulls(blk.TipSet(), maddrs, msgs, 0) + nts, err = tu.g.NextTipSetFromMinersWithMessagesAndNulls(blk.TipSet(), maddrs, msgs, nulls) require.NoError(tu.t, err) } else { mt, err := tu.g.NextTipSetFromMiners(blk.TipSet(), maddrs, nulls) @@ -255,17 +255,19 @@ func (tu *syncTestUtil) mineOnBlock(blk *store.FullTipSet, to int, miners []int, nts = mt.TipSet } - if fail { - tu.pushTsExpectErr(to, nts, true) - } else { - tu.pushFtsAndWait(to, nts, wait) + if push { + if fail { + tu.pushTsExpectErr(to, nts, true) + } else { + tu.pushFtsAndWait(to, nts, wait) + } } return nts } func (tu *syncTestUtil) mineNewBlock(src int, miners []int) { - mts := tu.mineOnBlock(tu.g.CurTipset, src, miners, true, false, nil, 0) + mts := tu.mineOnBlock(tu.g.CurTipset, src, miners, true, false, nil, 0, true) tu.g.CurTipset = mts } @@ -279,7 +281,7 @@ func (tu *syncTestUtil) addSourceNode(gen int) { stop, err := node.New(tu.ctx, node.FullAPI(&out), - node.Online(), + node.Base(), node.Repo(sourceRepo), node.MockHost(tu.mn), node.Test(), @@ -310,10 +312,11 @@ func (tu *syncTestUtil) addClientNode() int { var out api.FullNode + r := repo.NewMemory(nil) stop, err := node.New(tu.ctx, node.FullAPI(&out), - node.Online(), - node.Repo(repo.NewMemory(nil)), + node.Base(), + node.Repo(r), node.MockHost(tu.mn), node.Test(), @@ -509,7 +512,7 @@ func TestSyncBadTimestamp(t *testing.T) { fmt.Println("BASE: ", base.Cids()) tu.printHeads() - a1 := tu.mineOnBlock(base, 0, nil, false, true, nil, 0) + a1 := tu.mineOnBlock(base, 0, nil, false, true, nil, 0, true) tu.g.Timestamper = nil require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet())) @@ -518,7 +521,7 @@ func TestSyncBadTimestamp(t *testing.T) { fmt.Println("After mine bad block!") tu.printHeads() - a2 := tu.mineOnBlock(base, 0, nil, true, false, nil, 0) + a2 := tu.mineOnBlock(base, 0, nil, true, false, nil, 0, true) tu.waitUntilSync(0, client) @@ -562,7 +565,7 @@ func TestSyncBadWinningPoSt(t *testing.T) { tu.g.SetWinningPoStProver(tu.g.Miners[1], &badWpp{}) // now ensure that new blocks are not accepted - tu.mineOnBlock(base, client, nil, false, true, nil, 0) + tu.mineOnBlock(base, client, nil, false, true, nil, 0, true) } func (tu *syncTestUtil) loadChainToNode(to int) { @@ -612,16 +615,16 @@ func TestSyncFork(t *testing.T) { fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height()) // The two nodes fork at this point into 'a' and 'b' - a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil, 0) - a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil, 0) - a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0) + a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil, 0, true) + a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil, 0, true) + a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0, true) require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet())) // chain B will now be heaviest - b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil, 0) - b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0) - b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0) - b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0) + b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil, 0, true) + b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0, true) + b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0, true) + b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0, true) fmt.Println("A: ", a.Cids(), a.TipSet().Height()) fmt.Println("B: ", b.Cids(), b.TipSet().Height()) @@ -685,13 +688,13 @@ func TestDuplicateNonce(t *testing.T) { msgs[k] = []*types.SignedMessage{makeMsg(tu.g.Miners[k])} } - ts1 := tu.mineOnBlock(base, 0, []int{0, 1}, true, false, msgs, 0) + ts1 := tu.mineOnBlock(base, 0, []int{0, 1}, true, false, msgs, 0, true) tu.waitUntilSyncTarget(0, ts1.TipSet()) // mine another tipset - ts2 := tu.mineOnBlock(ts1, 0, []int{0, 1}, true, false, make([][]*types.SignedMessage, 2), 0) + ts2 := tu.mineOnBlock(ts1, 0, []int{0, 1}, true, false, make([][]*types.SignedMessage, 2), 0, true) tu.waitUntilSyncTarget(0, ts2.TipSet()) var includedMsg cid.Cid @@ -777,7 +780,7 @@ func TestBadNonce(t *testing.T) { msgs := make([][]*types.SignedMessage, 1) msgs[0] = []*types.SignedMessage{makeBadMsg()} - tu.mineOnBlock(base, 0, []int{0}, true, true, msgs, 0) + tu.mineOnBlock(base, 0, []int{0}, true, true, msgs, 0, true) } // This test introduces a block that has 2 messages, with the same sender, and same nonce. @@ -831,7 +834,7 @@ func TestMismatchedNoncesRobustID(t *testing.T) { msgs := make([][]*types.SignedMessage, 1) msgs[0] = []*types.SignedMessage{makeMsg(false), makeMsg(true)} - tu.mineOnBlock(base, 0, []int{0}, true, true, msgs, 0) + tu.mineOnBlock(base, 0, []int{0}, true, true, msgs, 0, true) } // This test introduces a block that has 2 messages, with the same sender, and nonces N and N+1 (so both can be included in a block) @@ -885,7 +888,7 @@ func TestMatchedNoncesRobustID(t *testing.T) { msgs := make([][]*types.SignedMessage, 1) msgs[0] = []*types.SignedMessage{makeMsg(ba.Nonce, false), makeMsg(ba.Nonce+1, true)} - tu.mineOnBlock(base, 0, []int{0}, true, false, msgs, 0) + tu.mineOnBlock(base, 0, []int{0}, true, false, msgs, 0, true) } func BenchmarkSyncBasic(b *testing.B) { @@ -950,19 +953,19 @@ func TestSyncCheckpointHead(t *testing.T) { fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height()) // The two nodes fork at this point into 'a' and 'b' - a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil, 0) - a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil, 0) - a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0) + a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil, 0, true) + a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil, 0, true) + a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0, true) tu.waitUntilSyncTarget(p1, a.TipSet()) tu.checkpointTs(p1, a.TipSet().Key()) require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet())) // chain B will now be heaviest - b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil, 0) - b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0) - b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0) - b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0) + b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil, 0, true) + b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0, true) + b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0, true) + b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0, true) fmt.Println("A: ", a.Cids(), a.TipSet().Height()) fmt.Println("B: ", b.Cids(), b.TipSet().Height()) @@ -997,19 +1000,19 @@ func TestSyncCheckpointEarlierThanHead(t *testing.T) { fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height()) // The two nodes fork at this point into 'a' and 'b' - a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil, 0) - a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil, 0) - a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0) + a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil, 0, true) + a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil, 0, true) + a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0, true) tu.waitUntilSyncTarget(p1, a.TipSet()) tu.checkpointTs(p1, a1.TipSet().Key()) require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet())) // chain B will now be heaviest - b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil, 0) - b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0) - b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0) - b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0) + b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil, 0, true) + b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0, true) + b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0, true) + b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0, true) fmt.Println("A: ", a.Cids(), a.TipSet().Height()) fmt.Println("B: ", b.Cids(), b.TipSet().Height()) @@ -1047,7 +1050,7 @@ func TestDrandNull(t *testing.T) { pers := crypto.DomainSeparationTag_WinningPoStChallengeSeed beforeNull := tu.g.CurTipset - afterNull := tu.mineOnBlock(beforeNull, p0, nil, false, false, nil, 2) + afterNull := tu.mineOnBlock(beforeNull, p0, nil, false, false, nil, 2, true) nullHeight := beforeNull.TipSet().Height() + 1 if afterNull.TipSet().Height() == nullHeight { t.Fatal("didn't inject nulls as expected") @@ -1064,14 +1067,14 @@ func TestDrandNull(t *testing.T) { require.Equal(t, []byte(rand), expectedRand) // zoom zoom to past the v5 upgrade by injecting many many nulls - postUpgrade := tu.mineOnBlock(afterNull, p0, nil, false, false, nil, v5h) + postUpgrade := tu.mineOnBlock(afterNull, p0, nil, false, false, nil, v5h, true) nv, err := tu.nds[p0].StateNetworkVersion(tu.ctx, postUpgrade.TipSet().Key()) require.NoError(t, err) if nv != network.Version13 { t.Fatal("expect to be v13 by now") } - afterNull = tu.mineOnBlock(postUpgrade, p0, nil, false, false, nil, 2) + afterNull = tu.mineOnBlock(postUpgrade, p0, nil, false, false, nil, 2, true) nullHeight = postUpgrade.TipSet().Height() + 1 if afterNull.TipSet().Height() == nullHeight { t.Fatal("didn't inject nulls as expected") @@ -1103,3 +1106,22 @@ func TestDrandNull(t *testing.T) { build.UpgradeHyperdriveHeight = ov5h } + +func TestInvalidHeight(t *testing.T) { + H := 50 + tu := prepSyncTest(t, H) + + client := tu.addClientNode() + + require.NoError(t, tu.mn.LinkAll()) + tu.connect(client, 0) + tu.waitUntilSync(0, client) + + base := tu.g.CurTipset + + for i := 0; i < 5; i++ { + base = tu.mineOnBlock(base, 0, nil, false, false, nil, 0, false) + } + + tu.mineOnBlock(base, 0, nil, false, true, nil, -1, true) +} diff --git a/chain/vm/gas.go b/chain/vm/gas.go index c860ce9a0c2..206a55d3643 100644 --- a/chain/vm/gas.go +++ b/chain/vm/gas.go @@ -3,12 +3,11 @@ package vm import ( "fmt" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/go-address" addr "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/lotus/build" vmr5 "github.com/filecoin-project/specs-actors/v5/actors/runtime" proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" "github.com/ipfs/go-cid" diff --git a/chain/vm/invoker.go b/chain/vm/invoker.go index e4b15403187..be35d93c813 100644 --- a/chain/vm/invoker.go +++ b/chain/vm/invoker.go @@ -39,7 +39,10 @@ type ActorPredicate func(vmr.Runtime, rtt.VMActor) error func ActorsVersionPredicate(ver actors.Version) ActorPredicate { return func(rt vmr.Runtime, v rtt.VMActor) error { - aver := actors.VersionForNetwork(rt.NetworkVersion()) + aver, err := actors.VersionForNetwork(rt.NetworkVersion()) + if err != nil { + return xerrors.Errorf("unsupported network version: %w", err) + } if aver != ver { return xerrors.Errorf("actor %s is a version %d actor; chain only supports actor version %d at height %d and nver %d", v.Code(), ver, aver, rt.CurrEpoch(), rt.NetworkVersion()) } diff --git a/chain/vm/mkactor.go b/chain/vm/mkactor.go index 669c1450f1a..e461a2b4c51 100644 --- a/chain/vm/mkactor.go +++ b/chain/vm/mkactor.go @@ -54,7 +54,12 @@ func TryCreateAccountActor(rt *Runtime, addr address.Address) (*types.Actor, add return nil, address.Undef, aerrors.Escalate(err, "registering actor address") } - act, aerr := makeActor(actors.VersionForNetwork(rt.NetworkVersion()), addr) + av, err := actors.VersionForNetwork(rt.NetworkVersion()) + if err != nil { + return nil, address.Undef, aerrors.Escalate(err, "unsupported network version") + } + + act, aerr := makeActor(av, addr) if aerr != nil { return nil, address.Undef, aerr } diff --git a/cli/auth.go b/cli/auth.go index 20b9bb39428..286eb978bbf 100644 --- a/cli/auth.go +++ b/cli/auth.go @@ -113,7 +113,7 @@ var AuthApiInfoToken = &cli.Command{ ti, ok := cctx.App.Metadata["repoType"] if !ok { - log.Errorf("unknown repo type, are you sure you want to use GetAPI?") + log.Errorf("unknown repo type, are you sure you want to use GetCommonAPI?") ti = repo.FullNode } t, ok := ti.(repo.RepoType) @@ -128,7 +128,8 @@ var AuthApiInfoToken = &cli.Command{ // TODO: Log in audit log when it is implemented - fmt.Printf("%s=%s:%s\n", cliutil.EnvForRepo(t), string(token), ainfo.Addr) + currentEnv, _, _ := cliutil.EnvsForAPIInfos(t) + fmt.Printf("%s=%s:%s\n", currentEnv, string(token), ainfo.Addr) return nil }, } diff --git a/cli/chain.go b/cli/chain.go index 0fca73406f2..e30a685dd84 100644 --- a/cli/chain.go +++ b/cli/chain.go @@ -1030,7 +1030,9 @@ var ChainExportCmd = &cli.Command{ ArgsUsage: "[outputPath]", Flags: []cli.Flag{ &cli.StringFlag{ - Name: "tipset", + Name: "tipset", + Usage: "specify tipset to start the export from", + Value: "@head", }, &cli.Int64Flag{ Name: "recent-stateroots", diff --git a/cli/client.go b/cli/client.go index dc925b72faa..774d9aa5ff9 100644 --- a/cli/client.go +++ b/cli/client.go @@ -1231,9 +1231,9 @@ var clientListRetrievalsCmd = &cli.Command{ Usage: "print verbose deal details", }, &cli.BoolFlag{ - Name: "color", - Usage: "use color in display output", - Value: true, + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", }, &cli.BoolFlag{ Name: "show-failed", @@ -1250,6 +1250,10 @@ var clientListRetrievalsCmd = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err @@ -1258,7 +1262,6 @@ var clientListRetrievalsCmd = &cli.Command{ ctx := ReqContext(cctx) verbose := cctx.Bool("verbose") - color := cctx.Bool("color") watch := cctx.Bool("watch") showFailed := cctx.Bool("show-failed") completed := cctx.Bool("completed") @@ -1278,7 +1281,7 @@ var clientListRetrievalsCmd = &cli.Command{ tm.Clear() tm.MoveCursor(1, 1) - err = outputRetrievalDeals(ctx, tm.Screen, localDeals, verbose, color, showFailed, completed) + err = outputRetrievalDeals(ctx, tm.Screen, localDeals, verbose, showFailed, completed) if err != nil { return err } @@ -1304,7 +1307,7 @@ var clientListRetrievalsCmd = &cli.Command{ } } - return outputRetrievalDeals(ctx, cctx.App.Writer, localDeals, verbose, color, showFailed, completed) + return outputRetrievalDeals(ctx, cctx.App.Writer, localDeals, verbose, showFailed, completed) }, } @@ -1312,7 +1315,7 @@ func isTerminalError(status retrievalmarket.DealStatus) bool { // should patch this in go-fil-markets but to solve the problem immediate and not have buggy output return retrievalmarket.IsTerminalError(status) || status == retrievalmarket.DealStatusErrored || status == retrievalmarket.DealStatusCancelled } -func outputRetrievalDeals(ctx context.Context, out io.Writer, localDeals []lapi.RetrievalInfo, verbose bool, color bool, showFailed bool, completed bool) error { +func outputRetrievalDeals(ctx context.Context, out io.Writer, localDeals []lapi.RetrievalInfo, verbose bool, showFailed bool, completed bool) error { var deals []api.RetrievalInfo for _, deal := range localDeals { if !showFailed && isTerminalError(deal.Status) { @@ -1348,13 +1351,13 @@ func outputRetrievalDeals(ctx context.Context, out io.Writer, localDeals []lapi. w := tablewriter.New(tableColumns...) for _, d := range deals { - w.Write(toRetrievalOutput(d, color, verbose)) + w.Write(toRetrievalOutput(d, verbose)) } return w.Flush(out) } -func toRetrievalOutput(d api.RetrievalInfo, color bool, verbose bool) map[string]interface{} { +func toRetrievalOutput(d api.RetrievalInfo, verbose bool) map[string]interface{} { payloadCID := d.PayloadCID.String() provider := d.Provider.String() @@ -1367,7 +1370,7 @@ func toRetrievalOutput(d api.RetrievalInfo, color bool, verbose bool) map[string "PayloadCID": payloadCID, "DealId": d.ID, "Provider": provider, - "Status": retrievalStatusString(color, d.Status), + "Status": retrievalStatusString(d.Status), "PricePerByte": types.FIL(d.PricePerByte), "Received": units.BytesSize(float64(d.BytesReceived)), "TotalPaid": types.FIL(d.TotalPaid), @@ -1397,19 +1400,17 @@ func toRetrievalOutput(d api.RetrievalInfo, color bool, verbose bool) map[string return retrievalOutput } -func retrievalStatusString(c bool, status retrievalmarket.DealStatus) string { +func retrievalStatusString(status retrievalmarket.DealStatus) string { s := retrievalmarket.DealStatuses[status] - if !c { - return s - } - if isTerminalError(status) { + switch { + case isTerminalError(status): return color.RedString(s) - } - if retrievalmarket.IsTerminalSuccess(status) { + case retrievalmarket.IsTerminalSuccess(status): return color.GreenString(s) + default: + return s } - return s } var clientInspectDealCmd = &cli.Command{ @@ -1804,9 +1805,9 @@ var clientListDeals = &cli.Command{ Usage: "print verbose deal details", }, &cli.BoolFlag{ - Name: "color", - Usage: "use color in display output", - Value: true, + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", }, &cli.BoolFlag{ Name: "show-failed", @@ -1818,6 +1819,10 @@ var clientListDeals = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err @@ -1826,7 +1831,6 @@ var clientListDeals = &cli.Command{ ctx := ReqContext(cctx) verbose := cctx.Bool("verbose") - color := cctx.Bool("color") watch := cctx.Bool("watch") showFailed := cctx.Bool("show-failed") @@ -1845,7 +1849,7 @@ var clientListDeals = &cli.Command{ tm.Clear() tm.MoveCursor(1, 1) - err = outputStorageDeals(ctx, tm.Screen, api, localDeals, verbose, color, showFailed) + err = outputStorageDeals(ctx, tm.Screen, api, localDeals, verbose, showFailed) if err != nil { return err } @@ -1871,7 +1875,7 @@ var clientListDeals = &cli.Command{ } } - return outputStorageDeals(ctx, cctx.App.Writer, api, localDeals, verbose, color, showFailed) + return outputStorageDeals(ctx, cctx.App.Writer, api, localDeals, verbose, showFailed) }, } @@ -1894,7 +1898,7 @@ func dealFromDealInfo(ctx context.Context, full v0api.FullNode, head *types.TipS } } -func outputStorageDeals(ctx context.Context, out io.Writer, full v0api.FullNode, localDeals []lapi.DealInfo, verbose bool, color bool, showFailed bool) error { +func outputStorageDeals(ctx context.Context, out io.Writer, full v0api.FullNode, localDeals []lapi.DealInfo, verbose bool, showFailed bool) error { sort.Slice(localDeals, func(i, j int) bool { return localDeals[i].CreationTime.Before(localDeals[j].CreationTime) }) @@ -1946,7 +1950,7 @@ func outputStorageDeals(ctx context.Context, out io.Writer, full v0api.FullNode, d.LocalDeal.ProposalCid, d.LocalDeal.DealID, d.LocalDeal.Provider, - dealStateString(color, d.LocalDeal.State), + dealStateString(d.LocalDeal.State), onChain, slashed, d.LocalDeal.PieceCID, @@ -1995,7 +1999,7 @@ func outputStorageDeals(ctx context.Context, out io.Writer, full v0api.FullNode, "DealCid": propcid, "DealId": d.LocalDeal.DealID, "Provider": d.LocalDeal.Provider, - "State": dealStateString(color, d.LocalDeal.State), + "State": dealStateString(d.LocalDeal.State), "On Chain?": onChain, "Slashed?": slashed, "PieceCID": piece, @@ -2010,12 +2014,8 @@ func outputStorageDeals(ctx context.Context, out io.Writer, full v0api.FullNode, return w.Flush(out) } -func dealStateString(c bool, state storagemarket.StorageDealStatus) string { +func dealStateString(state storagemarket.StorageDealStatus) string { s := storagemarket.DealStates[state] - if !c { - return s - } - switch state { case storagemarket.StorageDealError, storagemarket.StorageDealExpired: return color.RedString(s) @@ -2334,9 +2334,9 @@ var clientListTransfers = &cli.Command{ Usage: "print verbose transfer details", }, &cli.BoolFlag{ - Name: "color", - Usage: "use color in display output", - Value: true, + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", }, &cli.BoolFlag{ Name: "completed", @@ -2352,6 +2352,10 @@ var clientListTransfers = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err @@ -2366,7 +2370,6 @@ var clientListTransfers = &cli.Command{ verbose := cctx.Bool("verbose") completed := cctx.Bool("completed") - color := cctx.Bool("color") watch := cctx.Bool("watch") showFailed := cctx.Bool("show-failed") if watch { @@ -2380,7 +2383,7 @@ var clientListTransfers = &cli.Command{ tm.MoveCursor(1, 1) - OutputDataTransferChannels(tm.Screen, channels, verbose, completed, color, showFailed) + OutputDataTransferChannels(tm.Screen, channels, verbose, completed, showFailed) tm.Flush() @@ -2405,13 +2408,13 @@ var clientListTransfers = &cli.Command{ } } } - OutputDataTransferChannels(os.Stdout, channels, verbose, completed, color, showFailed) + OutputDataTransferChannels(os.Stdout, channels, verbose, completed, showFailed) return nil }, } // OutputDataTransferChannels generates table output for a list of channels -func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChannel, verbose, completed, color, showFailed bool) { +func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChannel, verbose, completed, showFailed bool) { sort.Slice(channels, func(i, j int) bool { return channels[i].TransferID < channels[j].TransferID }) @@ -2441,7 +2444,7 @@ func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChann tablewriter.Col("Voucher"), tablewriter.NewLineCol("Message")) for _, channel := range sendingChannels { - w.Write(toChannelOutput(color, "Sending To", channel, verbose)) + w.Write(toChannelOutput("Sending To", channel, verbose)) } w.Flush(out) //nolint:errcheck @@ -2455,17 +2458,13 @@ func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChann tablewriter.Col("Voucher"), tablewriter.NewLineCol("Message")) for _, channel := range receivingChannels { - w.Write(toChannelOutput(color, "Receiving From", channel, verbose)) + w.Write(toChannelOutput("Receiving From", channel, verbose)) } w.Flush(out) //nolint:errcheck } -func channelStatusString(useColor bool, status datatransfer.Status) string { +func channelStatusString(status datatransfer.Status) string { s := datatransfer.Statuses[status] - if !useColor { - return s - } - switch status { case datatransfer.Failed, datatransfer.Cancelled: return color.RedString(s) @@ -2476,7 +2475,7 @@ func channelStatusString(useColor bool, status datatransfer.Status) string { } } -func toChannelOutput(useColor bool, otherPartyColumn string, channel lapi.DataTransferChannel, verbose bool) map[string]interface{} { +func toChannelOutput(otherPartyColumn string, channel lapi.DataTransferChannel, verbose bool) map[string]interface{} { rootCid := channel.BaseCID.String() otherParty := channel.OtherPeer.String() if !verbose { @@ -2496,7 +2495,7 @@ func toChannelOutput(useColor bool, otherPartyColumn string, channel lapi.DataTr return map[string]interface{}{ "ID": channel.TransferID, - "Status": channelStatusString(useColor, channel.Status), + "Status": channelStatusString(channel.Status), otherPartyColumn: otherParty, "Root Cid": rootCid, "Initiated?": initiated, diff --git a/cli/cmd.go b/cli/cmd.go index 630aae1bc75..71524d787d2 100644 --- a/cli/cmd.go +++ b/cli/cmd.go @@ -44,7 +44,7 @@ func GetFullNodeServices(ctx *cli.Context) (ServicesAPI, error) { var GetAPIInfo = cliutil.GetAPIInfo var GetRawAPI = cliutil.GetRawAPI -var GetAPI = cliutil.GetAPI +var GetAPI = cliutil.GetCommonAPI var DaemonContext = cliutil.DaemonContext var ReqContext = cliutil.ReqContext @@ -54,10 +54,10 @@ var GetFullNodeAPIV1 = cliutil.GetFullNodeAPIV1 var GetGatewayAPI = cliutil.GetGatewayAPI var GetStorageMinerAPI = cliutil.GetStorageMinerAPI +var GetMarketsAPI = cliutil.GetMarketsAPI var GetWorkerAPI = cliutil.GetWorkerAPI var CommonCommands = []*cli.Command{ - NetCmd, AuthCmd, LogCmd, WaitApiCmd, diff --git a/cli/state.go b/cli/state.go index 6bf23d798db..d5251fb8595 100644 --- a/cli/state.go +++ b/cli/state.go @@ -446,6 +446,9 @@ var StateExecTraceCmd = &cli.Command{ if err != nil { return err } + if lookup == nil { + return fmt.Errorf("failed to find message: %s", mcid) + } ts, err := capi.ChainGetTipSet(ctx, lookup.TipSet) if err != nil { @@ -695,7 +698,7 @@ var StateListActorsCmd = &cli.Command{ var StateGetActorCmd = &cli.Command{ Name: "get-actor", Usage: "Print actor information", - ArgsUsage: "[actorrAddress]", + ArgsUsage: "[actorAddress]", Action: func(cctx *cli.Context) error { api, closer, err := GetFullNodeAPI(cctx) if err != nil { @@ -1491,6 +1494,10 @@ var StateSearchMsgCmd = &cli.Command{ return err } + if mw == nil { + return fmt.Errorf("failed to find message: %s", msg) + } + m, err := api.ChainGetMessage(ctx, msg) if err != nil { return err diff --git a/cli/util.go b/cli/util.go index 3183e21cff8..73668742def 100644 --- a/cli/util.go +++ b/cli/util.go @@ -3,10 +3,13 @@ package cli import ( "context" "fmt" + "os" "time" + "github.com/fatih/color" "github.com/hako/durafmt" "github.com/ipfs/go-cid" + "github.com/mattn/go-isatty" "github.com/filecoin-project/go-state-types/abi" @@ -15,6 +18,13 @@ import ( "github.com/filecoin-project/lotus/chain/types" ) +// Set the global default, to be overridden by individual cli flags in order +func init() { + color.NoColor = os.Getenv("GOLOG_LOG_FMT") != "color" && + !isatty.IsTerminal(os.Stdout.Fd()) && + !isatty.IsCygwinTerminal(os.Stdout.Fd()) +} + func parseTipSet(ctx context.Context, api v0api.FullNode, vals []string) (*types.TipSet, error) { var headers []*types.BlockHeader for _, c := range vals { diff --git a/cli/util/api.go b/cli/util/api.go index ec826160423..37df41a87ab 100644 --- a/cli/util/api.go +++ b/cli/util/api.go @@ -27,112 +27,145 @@ const ( metadataTraceContext = "traceContext" ) -// The flag passed on the command line with the listen address of the API -// server (only used by the tests) -func flagForAPI(t repo.RepoType) string { +// flagsForAPI returns flags passed on the command line with the listen address +// of the API server (only used by the tests), in the order of precedence they +// should be applied for the requested kind of node. +func flagsForAPI(t repo.RepoType) []string { switch t { case repo.FullNode: - return "api-url" + return []string{"api-url"} case repo.StorageMiner: - return "miner-api-url" + return []string{"miner-api-url"} case repo.Worker: - return "worker-api-url" + return []string{"worker-api-url"} + case repo.Markets: + // support split markets-miner and monolith deployments. + return []string{"markets-api-url", "miner-api-url"} default: panic(fmt.Sprintf("Unknown repo type: %v", t)) } } -func flagForRepo(t repo.RepoType) string { +func flagsForRepo(t repo.RepoType) []string { switch t { case repo.FullNode: - return "repo" + return []string{"repo"} case repo.StorageMiner: - return "miner-repo" + return []string{"miner-repo"} case repo.Worker: - return "worker-repo" + return []string{"worker-repo"} + case repo.Markets: + // support split markets-miner and monolith deployments. + return []string{"markets-repo", "miner-repo"} default: panic(fmt.Sprintf("Unknown repo type: %v", t)) } } -func EnvForRepo(t repo.RepoType) string { +// EnvsForAPIInfos returns the environment variables to use in order of precedence +// to determine the API endpoint of the specified node type. +// +// It returns the current variables and deprecated ones separately, so that +// the user can log a warning when deprecated ones are found to be in use. +func EnvsForAPIInfos(t repo.RepoType) (primary string, fallbacks []string, deprecated []string) { switch t { case repo.FullNode: - return "FULLNODE_API_INFO" + return "FULLNODE_API_INFO", nil, nil case repo.StorageMiner: - return "MINER_API_INFO" + // TODO remove deprecated deprecation period + return "MINER_API_INFO", nil, []string{"STORAGE_API_INFO"} case repo.Worker: - return "WORKER_API_INFO" - default: - panic(fmt.Sprintf("Unknown repo type: %v", t)) - } -} - -// TODO remove after deprecation period -func envForRepoDeprecation(t repo.RepoType) string { - switch t { - case repo.FullNode: - return "FULLNODE_API_INFO" - case repo.StorageMiner: - return "STORAGE_API_INFO" - case repo.Worker: - return "WORKER_API_INFO" + return "WORKER_API_INFO", nil, nil + case repo.Markets: + // support split markets-miner and monolith deployments. + return "MARKETS_API_INFO", []string{"MINER_API_INFO"}, nil default: panic(fmt.Sprintf("Unknown repo type: %v", t)) } } +// GetAPIInfo returns the API endpoint to use for the specified kind of repo. +// +// The order of precedence is as follows: +// +// 1. *-api-url command line flags. +// 2. *_API_INFO environment variables +// 3. deprecated *_API_INFO environment variables +// 4. *-repo command line flags. func GetAPIInfo(ctx *cli.Context, t repo.RepoType) (APIInfo, error) { // Check if there was a flag passed with the listen address of the API // server (only used by the tests) - apiFlag := flagForAPI(t) - if ctx.IsSet(apiFlag) { - strma := ctx.String(apiFlag) + apiFlags := flagsForAPI(t) + for _, f := range apiFlags { + if !ctx.IsSet(f) { + continue + } + strma := ctx.String(f) strma = strings.TrimSpace(strma) return APIInfo{Addr: strma}, nil } - envKey := EnvForRepo(t) - env, ok := os.LookupEnv(envKey) - if !ok { - // TODO remove after deprecation period - envKey = envForRepoDeprecation(t) - env, ok = os.LookupEnv(envKey) - if ok { - log.Warnf("Use deprecation env(%s) value, please use env(%s) instead.", envKey, EnvForRepo(t)) - } - } + // + // Note: it is not correct/intuitive to prefer environment variables over + // CLI flags (repo flags below). + // + primaryEnv, fallbacksEnvs, deprecatedEnvs := EnvsForAPIInfos(t) + env, ok := os.LookupEnv(primaryEnv) if ok { return ParseApiInfo(env), nil } - repoFlag := flagForRepo(t) - - p, err := homedir.Expand(ctx.String(repoFlag)) - if err != nil { - return APIInfo{}, xerrors.Errorf("could not expand home dir (%s): %w", repoFlag, err) + for _, env := range deprecatedEnvs { + env, ok := os.LookupEnv(env) + if ok { + log.Warnf("Using deprecated env(%s) value, please use env(%s) instead.", env, primaryEnv) + return ParseApiInfo(env), nil + } } - r, err := repo.NewFS(p) - if err != nil { - return APIInfo{}, xerrors.Errorf("could not open repo at path: %s; %w", p, err) - } + repoFlags := flagsForRepo(t) + for _, f := range repoFlags { + // cannot use ctx.IsSet because it ignores default values + path := ctx.String(f) + if path == "" { + continue + } - ma, err := r.APIEndpoint() - if err != nil { - return APIInfo{}, xerrors.Errorf("could not get api endpoint: %w", err) + p, err := homedir.Expand(path) + if err != nil { + return APIInfo{}, xerrors.Errorf("could not expand home dir (%s): %w", f, err) + } + + r, err := repo.NewFS(p) + if err != nil { + return APIInfo{}, xerrors.Errorf("could not open repo at path: %s; %w", p, err) + } + + ma, err := r.APIEndpoint() + if err != nil { + return APIInfo{}, xerrors.Errorf("could not get api endpoint: %w", err) + } + + token, err := r.APIToken() + if err != nil { + log.Warnf("Couldn't load CLI token, capabilities may be limited: %v", err) + } + + return APIInfo{ + Addr: ma.String(), + Token: token, + }, nil } - token, err := r.APIToken() - if err != nil { - log.Warnf("Couldn't load CLI token, capabilities may be limited: %v", err) + for _, env := range fallbacksEnvs { + env, ok := os.LookupEnv(env) + if ok { + return ParseApiInfo(env), nil + } } - return APIInfo{ - Addr: ma.String(), - Token: token, - }, nil + return APIInfo{}, fmt.Errorf("could not determine API endpoint for node type: %v", t) } func GetRawAPI(ctx *cli.Context, t repo.RepoType, version string) (string, http.Header, error) { @@ -146,13 +179,17 @@ func GetRawAPI(ctx *cli.Context, t repo.RepoType, version string) (string, http. return "", nil, xerrors.Errorf("could not get DialArgs: %w", err) } + if IsVeryVerbose { + _, _ = fmt.Fprintf(ctx.App.Writer, "using raw API %s endpoint: %s\n", version, addr) + } + return addr, ainfo.AuthHeader(), nil } -func GetAPI(ctx *cli.Context) (api.Common, jsonrpc.ClientCloser, error) { +func GetCommonAPI(ctx *cli.Context) (api.CommonNet, jsonrpc.ClientCloser, error) { ti, ok := ctx.App.Metadata["repoType"] if !ok { - log.Errorf("unknown repo type, are you sure you want to use GetAPI?") + log.Errorf("unknown repo type, are you sure you want to use GetCommonAPI?") ti = repo.FullNode } t, ok := ti.(repo.RepoType) @@ -185,6 +222,10 @@ func GetFullNodeAPI(ctx *cli.Context) (v0api.FullNode, jsonrpc.ClientCloser, err return nil, nil, err } + if IsVeryVerbose { + _, _ = fmt.Fprintln(ctx.App.Writer, "using full node API v0 endpoint:", addr) + } + return client.NewFullNodeRPCV0(ctx.Context, addr, headers) } @@ -198,6 +239,10 @@ func GetFullNodeAPIV1(ctx *cli.Context) (v1api.FullNode, jsonrpc.ClientCloser, e return nil, nil, err } + if IsVeryVerbose { + _, _ = fmt.Fprintln(ctx.App.Writer, "using full node API v1 endpoint:", addr) + } + return client.NewFullNodeRPCV1(ctx.Context, addr, headers) } @@ -242,6 +287,10 @@ func GetStorageMinerAPI(ctx *cli.Context, opts ...GetStorageMinerOption) (api.St addr = u.String() } + if IsVeryVerbose { + _, _ = fmt.Fprintln(ctx.App.Writer, "using miner API v0 endpoint:", addr) + } + return client.NewStorageMinerRPCV0(ctx.Context, addr, headers) } @@ -251,15 +300,44 @@ func GetWorkerAPI(ctx *cli.Context) (api.Worker, jsonrpc.ClientCloser, error) { return nil, nil, err } + if IsVeryVerbose { + _, _ = fmt.Fprintln(ctx.App.Writer, "using worker API v0 endpoint:", addr) + } + return client.NewWorkerRPCV0(ctx.Context, addr, headers) } +func GetMarketsAPI(ctx *cli.Context) (api.StorageMiner, jsonrpc.ClientCloser, error) { + // to support lotus-miner cli tests. + if tn, ok := ctx.App.Metadata["testnode-storage"]; ok { + return tn.(api.StorageMiner), func() {}, nil + } + + addr, headers, err := GetRawAPI(ctx, repo.Markets, "v0") + if err != nil { + return nil, nil, err + } + + if IsVeryVerbose { + _, _ = fmt.Fprintln(ctx.App.Writer, "using markets API v0 endpoint:", addr) + } + + // the markets node is a specialised miner's node, supporting only the + // markets API, which is a subset of the miner API. All non-markets + // operations will error out with "unsupported". + return client.NewStorageMinerRPCV0(ctx.Context, addr, headers) +} + func GetGatewayAPI(ctx *cli.Context) (api.Gateway, jsonrpc.ClientCloser, error) { addr, headers, err := GetRawAPI(ctx, repo.FullNode, "v1") if err != nil { return nil, nil, err } + if IsVeryVerbose { + _, _ = fmt.Fprintln(ctx.App.Writer, "using gateway API v1 endpoint:", addr) + } + return client.NewGatewayRPCV1(ctx.Context, addr, headers) } @@ -269,6 +347,10 @@ func GetGatewayAPIV0(ctx *cli.Context) (v0api.Gateway, jsonrpc.ClientCloser, err return nil, nil, err } + if IsVeryVerbose { + _, _ = fmt.Fprintln(ctx.App.Writer, "using gateway API v0 endpoint:", addr) + } + return client.NewGatewayRPCV0(ctx.Context, addr, headers) } diff --git a/cli/util/verbose.go b/cli/util/verbose.go new file mode 100644 index 00000000000..efcad09629b --- /dev/null +++ b/cli/util/verbose.go @@ -0,0 +1,16 @@ +package cliutil + +import "github.com/urfave/cli/v2" + +// IsVeryVerbose is a global var signalling if the CLI is running in very +// verbose mode or not (default: false). +var IsVeryVerbose bool + +// FlagVeryVerbose enables very verbose mode, which is useful when debugging +// the CLI itself. It should be included as a flag on the top-level command +// (e.g. lotus -vv, lotus-miner -vv). +var FlagVeryVerbose = &cli.BoolFlag{ + Name: "vv", + Usage: "enables very verbose mode, useful for debugging the CLI", + Destination: &IsVeryVerbose, +} diff --git a/cmd/lotus-storage-miner/actor.go b/cmd/lotus-miner/actor.go similarity index 83% rename from cmd/lotus-storage-miner/actor.go rename to cmd/lotus-miner/actor.go index 7e428d0e4af..8b03f236061 100644 --- a/cmd/lotus-storage-miner/actor.go +++ b/cmd/lotus-miner/actor.go @@ -5,6 +5,7 @@ import ( "os" "strings" + rlepluslazy "github.com/filecoin-project/go-bitfield/rle" cbor "github.com/ipfs/go-ipld-cbor" "github.com/fatih/color" @@ -14,6 +15,7 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" @@ -41,6 +43,7 @@ var actorCmd = &cli.Command{ actorControl, actorProposeChangeWorker, actorConfirmChangeWorker, + actorCompactAllocatedCmd, }, } @@ -388,12 +391,15 @@ var actorControlList = &cli.Command{ Name: "verbose", }, &cli.BoolFlag{ - Name: "color", - Value: true, + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", }, }, Action: func(cctx *cli.Context) error { - color.NoColor = !cctx.Bool("color") + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) if err != nil { @@ -435,6 +441,7 @@ var actorControlList = &cli.Command{ commit := map[address.Address]struct{}{} precommit := map[address.Address]struct{}{} terminate := map[address.Address]struct{}{} + dealPublish := map[address.Address]struct{}{} post := map[address.Address]struct{}{} for _, ca := range mi.ControlAddresses { @@ -471,6 +478,16 @@ var actorControlList = &cli.Command{ terminate[ca] = struct{}{} } + for _, ca := range ac.DealPublishControl { + ca, err := api.StateLookupID(ctx, ca, types.EmptyTSK) + if err != nil { + return err + } + + delete(post, ca) + dealPublish[ca] = struct{}{} + } + printKey := func(name string, a address.Address) { b, err := api.WalletBalance(ctx, a) if err != nil { @@ -515,6 +532,9 @@ var actorControlList = &cli.Command{ if _, ok := terminate[a]; ok { uses = append(uses, color.YellowString("terminate")) } + if _, ok := dealPublish[a]; ok { + uses = append(uses, color.MagentaString("deals")) + } tw.Write(map[string]interface{}{ "name": name, @@ -970,3 +990,154 @@ var actorConfirmChangeWorker = &cli.Command{ return nil }, } + +var actorCompactAllocatedCmd = &cli.Command{ + Name: "compact-allocated", + Usage: "compact allocated sectors bitfield", + Flags: []cli.Flag{ + &cli.Uint64Flag{ + Name: "mask-last-offset", + Usage: "Mask sector IDs from 0 to 'higest_allocated - offset'", + }, + &cli.Uint64Flag{ + Name: "mask-upto-n", + Usage: "Mask sector IDs from 0 to 'n'", + }, + &cli.BoolFlag{ + Name: "really-do-it", + Usage: "Actually send transaction performing the action", + Value: false, + }, + }, + Action: func(cctx *cli.Context) error { + if !cctx.Bool("really-do-it") { + fmt.Println("Pass --really-do-it to actually execute this action") + return nil + } + + if !cctx.Args().Present() { + return fmt.Errorf("must pass address of new owner address") + } + + nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + + api, acloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer acloser() + + ctx := lcli.ReqContext(cctx) + + maddr, err := nodeApi.ActorAddress(ctx) + if err != nil { + return err + } + + mact, err := api.StateGetActor(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + store := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(api))) + + mst, err := miner.Load(store, mact) + if err != nil { + return err + } + + allocs, err := mst.GetAllocatedSectors() + if err != nil { + return err + } + + var maskBf bitfield.BitField + + { + exclusiveFlags := []string{"mask-last-offset", "mask-upto-n"} + hasFlag := false + for _, f := range exclusiveFlags { + if hasFlag && cctx.IsSet(f) { + return xerrors.Errorf("more than one 'mask` flag set") + } + hasFlag = hasFlag || cctx.IsSet(f) + } + } + switch { + case cctx.IsSet("mask-last-offset"): + last, err := allocs.Last() + if err != nil { + return err + } + + m := cctx.Uint64("mask-last-offset") + if last <= m+1 { + return xerrors.Errorf("highest allocated sector lower than mask offset %d: %d", m+1, last) + } + // securty to not brick a miner + if last > 1<<60 { + return xerrors.Errorf("very high last sector number, refusing to mask: %d", last) + } + + maskBf, err = bitfield.NewFromIter(&rlepluslazy.RunSliceIterator{ + Runs: []rlepluslazy.Run{{Val: true, Len: last - m}}}) + if err != nil { + return xerrors.Errorf("forming bitfield: %w", err) + } + case cctx.IsSet("mask-upto-n"): + n := cctx.Uint64("mask-upto-n") + maskBf, err = bitfield.NewFromIter(&rlepluslazy.RunSliceIterator{ + Runs: []rlepluslazy.Run{{Val: true, Len: n}}}) + if err != nil { + return xerrors.Errorf("forming bitfield: %w", err) + } + default: + return xerrors.Errorf("no 'mask' flags set") + } + + mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + params := &miner2.CompactSectorNumbersParams{ + MaskSectorNumbers: maskBf, + } + + sp, err := actors.SerializeParams(params) + if err != nil { + return xerrors.Errorf("serializing params: %w", err) + } + + smsg, err := api.MpoolPushMessage(ctx, &types.Message{ + From: mi.Worker, + To: maddr, + Method: miner.Methods.CompactSectorNumbers, + Value: big.Zero(), + Params: sp, + }, nil) + if err != nil { + return xerrors.Errorf("mpool push: %w", err) + } + + fmt.Println("CompactSectorNumbers Message CID:", smsg.Cid()) + + // wait for it to get mined into a block + wait, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence) + if err != nil { + return err + } + + // check it executed successfully + if wait.Receipt.ExitCode != 0 { + fmt.Println("Propose owner change failed!") + return err + } + + return nil + }, +} diff --git a/cmd/lotus-storage-miner/actor_test.go b/cmd/lotus-miner/actor_test.go similarity index 100% rename from cmd/lotus-storage-miner/actor_test.go rename to cmd/lotus-miner/actor_test.go diff --git a/cmd/lotus-storage-miner/allinfo_test.go b/cmd/lotus-miner/allinfo_test.go similarity index 96% rename from cmd/lotus-storage-miner/allinfo_test.go rename to cmd/lotus-miner/allinfo_test.go index 21ae4c8ee35..5f30b4fec3d 100644 --- a/cmd/lotus-storage-miner/allinfo_test.go +++ b/cmd/lotus-miner/allinfo_test.go @@ -49,7 +49,7 @@ func TestMinerAllInfo(t *testing.T) { t.Run("pre-info-all", run) - dh := kit.NewDealHarness(t, client, miner) + dh := kit.NewDealHarness(t, client, miner, miner) deal, res, inPath := dh.MakeOnlineDeal(context.Background(), kit.MakeFullDealParams{Rseed: 6}) outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, false) kit.AssertFilesEqual(t, inPath, outPath) diff --git a/cmd/lotus-storage-miner/backup.go b/cmd/lotus-miner/backup.go similarity index 100% rename from cmd/lotus-storage-miner/backup.go rename to cmd/lotus-miner/backup.go diff --git a/cmd/lotus-miner/config.go b/cmd/lotus-miner/config.go new file mode 100644 index 00000000000..652426583e9 --- /dev/null +++ b/cmd/lotus-miner/config.go @@ -0,0 +1,94 @@ +package main + +import ( + "fmt" + + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/node/repo" +) + +var configCmd = &cli.Command{ + Name: "config", + Usage: "Manage node config", + Subcommands: []*cli.Command{ + configDefaultCmd, + configUpdateCmd, + }, +} + +var configDefaultCmd = &cli.Command{ + Name: "default", + Usage: "Print default node config", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "no-comment", + Usage: "don't comment default values", + }, + }, + Action: func(cctx *cli.Context) error { + c := config.DefaultStorageMiner() + + cb, err := config.ConfigUpdate(c, nil, !cctx.Bool("no-comment")) + if err != nil { + return err + } + + fmt.Println(string(cb)) + + return nil + }, +} + +var configUpdateCmd = &cli.Command{ + Name: "updated", + Usage: "Print updated node config", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "no-comment", + Usage: "don't comment default values", + }, + }, + Action: func(cctx *cli.Context) error { + r, err := repo.NewFS(cctx.String(FlagMinerRepo)) + if err != nil { + return err + } + + ok, err := r.Exists() + if err != nil { + return err + } + + if !ok { + return xerrors.Errorf("repo not initialized") + } + + lr, err := r.LockRO(repo.StorageMiner) + if err != nil { + return xerrors.Errorf("locking repo: %w", err) + } + + cfgNode, err := lr.Config() + if err != nil { + _ = lr.Close() + return xerrors.Errorf("getting node config: %w", err) + } + + if err := lr.Close(); err != nil { + return err + } + + cfgDef := config.DefaultStorageMiner() + + updated, err := config.ConfigUpdate(cfgNode, cfgDef, !cctx.Bool("no-comment")) + if err != nil { + return err + } + + fmt.Print(string(updated)) + return nil + }, +} diff --git a/cmd/lotus-storage-miner/info.go b/cmd/lotus-miner/info.go similarity index 74% rename from cmd/lotus-storage-miner/info.go rename to cmd/lotus-miner/info.go index 0d8cdf07bc6..f37952057ff 100644 --- a/cmd/lotus-storage-miner/info.go +++ b/cmd/lotus-miner/info.go @@ -5,7 +5,10 @@ import ( "fmt" "math" corebig "math/big" + "os" "sort" + "strings" + "text/tabwriter" "time" "github.com/fatih/color" @@ -14,9 +17,11 @@ import ( cbor "github.com/ipfs/go-ipld-cbor" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/api/v0api" sealing "github.com/filecoin-project/lotus/extern/storage-sealing" "github.com/filecoin-project/lotus/api" @@ -45,15 +50,19 @@ var infoCmd = &cli.Command{ } func infoCmdAct(cctx *cli.Context) error { - color.NoColor = !cctx.Bool("color") + minerApi, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() - nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) + marketsApi, closer, err := lcli.GetMarketsAPI(cctx) if err != nil { return err } defer closer() - api, acloser, err := lcli.GetFullNodeAPI(cctx) + fullapi, acloser, err := lcli.GetFullNodeAPI(cctx) if err != nil { return err } @@ -61,9 +70,23 @@ func infoCmdAct(cctx *cli.Context) error { ctx := lcli.ReqContext(cctx) + subsystems, err := minerApi.RuntimeSubsystems(ctx) + if err != nil { + return err + } + + fmt.Println("Enabled subsystems (from miner API):", subsystems) + + subsystems, err = marketsApi.RuntimeSubsystems(ctx) + if err != nil { + return err + } + + fmt.Println("Enabled subsystems (from markets API):", subsystems) + fmt.Print("Chain: ") - head, err := api.ChainHead(ctx) + head, err := fullapi.ChainHead(ctx) if err != nil { return err } @@ -93,24 +116,38 @@ func infoCmdAct(cctx *cli.Context) error { fmt.Println() + err = handleMiningInfo(ctx, cctx, fullapi, minerApi) + if err != nil { + return err + } + + err = handleMarketsInfo(ctx, marketsApi) + if err != nil { + return err + } + + return nil +} + +func handleMiningInfo(ctx context.Context, cctx *cli.Context, fullapi v0api.FullNode, nodeApi api.StorageMiner) error { maddr, err := getActorAddress(ctx, cctx) if err != nil { return err } - mact, err := api.StateGetActor(ctx, maddr, types.EmptyTSK) + mact, err := fullapi.StateGetActor(ctx, maddr, types.EmptyTSK) if err != nil { return err } - tbs := blockstore.NewTieredBstore(blockstore.NewAPIBlockstore(api), blockstore.NewMemory()) + tbs := blockstore.NewTieredBstore(blockstore.NewAPIBlockstore(fullapi), blockstore.NewMemory()) mas, err := miner.Load(adt.WrapStore(ctx, cbor.NewCborStore(tbs)), mact) if err != nil { return err } // Sector size - mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) + mi, err := fullapi.StateMinerInfo(ctx, maddr, types.EmptyTSK) if err != nil { return err } @@ -118,7 +155,7 @@ func infoCmdAct(cctx *cli.Context) error { ssize := types.SizeStr(types.NewInt(uint64(mi.SectorSize))) fmt.Printf("Miner: %s (%s sectors)\n", color.BlueString("%s", maddr), ssize) - pow, err := api.StateMinerPower(ctx, maddr, types.EmptyTSK) + pow, err := fullapi.StateMinerPower(ctx, maddr, types.EmptyTSK) if err != nil { return err } @@ -140,7 +177,7 @@ func infoCmdAct(cctx *cli.Context) error { pow.TotalPower.RawBytePower, ), ) - secCounts, err := api.StateMinerSectorCount(ctx, maddr, types.EmptyTSK) + secCounts, err := fullapi.StateMinerSectorCount(ctx, maddr, types.EmptyTSK) if err != nil { return err } @@ -217,36 +254,6 @@ func infoCmdAct(cctx *cli.Context) error { fmt.Println() - deals, err := nodeApi.MarketListIncompleteDeals(ctx) - if err != nil { - return err - } - - var nactiveDeals, nVerifDeals, ndeals uint64 - var activeDealBytes, activeVerifDealBytes, dealBytes abi.PaddedPieceSize - for _, deal := range deals { - if deal.State == storagemarket.StorageDealError { - continue - } - - ndeals++ - dealBytes += deal.Proposal.PieceSize - - if deal.State == storagemarket.StorageDealActive { - nactiveDeals++ - activeDealBytes += deal.Proposal.PieceSize - - if deal.Proposal.VerifiedDeal { - nVerifDeals++ - activeVerifDealBytes += deal.Proposal.PieceSize - } - } - } - - fmt.Printf("Deals: %d, %s\n", ndeals, types.SizeStr(types.NewInt(uint64(dealBytes)))) - fmt.Printf("\tActive: %d, %s (Verified: %d, %s)\n", nactiveDeals, types.SizeStr(types.NewInt(uint64(activeDealBytes))), nVerifDeals, types.SizeStr(types.NewInt(uint64(activeVerifDealBytes)))) - fmt.Println() - spendable := big.Zero() // NOTE: there's no need to unlock anything here. Funds only @@ -267,7 +274,7 @@ func infoCmdAct(cctx *cli.Context) error { fmt.Printf(" Vesting: %s\n", types.FIL(lockedFunds.VestingFunds).Short()) colorTokenAmount(" Available: %s\n", availBalance) - mb, err := api.StateMarketBalance(ctx, maddr, types.EmptyTSK) + mb, err := fullapi.StateMarketBalance(ctx, maddr, types.EmptyTSK) if err != nil { return xerrors.Errorf("getting market balance: %w", err) } @@ -277,7 +284,7 @@ func infoCmdAct(cctx *cli.Context) error { fmt.Printf(" Locked: %s\n", types.FIL(mb.Locked).Short()) colorTokenAmount(" Available: %s\n", big.Sub(mb.Escrow, mb.Locked)) - wb, err := api.WalletBalance(ctx, mi.Worker) + wb, err := fullapi.WalletBalance(ctx, mi.Worker) if err != nil { return xerrors.Errorf("getting worker balance: %w", err) } @@ -286,7 +293,7 @@ func infoCmdAct(cctx *cli.Context) error { if len(mi.ControlAddresses) > 0 { cbsum := big.Zero() for _, ca := range mi.ControlAddresses { - b, err := api.WalletBalance(ctx, ca) + b, err := fullapi.WalletBalance(ctx, ca) if err != nil { return xerrors.Errorf("getting control address balance: %w", err) } @@ -311,6 +318,102 @@ func infoCmdAct(cctx *cli.Context) error { // TODO: grab actr state / info // * Sealed sectors (count / bytes) // * Power + + return nil +} + +func handleMarketsInfo(ctx context.Context, nodeApi api.StorageMiner) error { + deals, err := nodeApi.MarketListIncompleteDeals(ctx) + if err != nil { + return err + } + + type dealStat struct { + count, verifCount int + bytes, verifBytes uint64 + } + dsAdd := func(ds *dealStat, deal storagemarket.MinerDeal) { + ds.count++ + ds.bytes += uint64(deal.Proposal.PieceSize) + if deal.Proposal.VerifiedDeal { + ds.verifCount++ + ds.verifBytes += uint64(deal.Proposal.PieceSize) + } + } + + showDealStates := map[storagemarket.StorageDealStatus]struct{}{ + storagemarket.StorageDealActive: {}, + storagemarket.StorageDealTransferring: {}, + storagemarket.StorageDealStaged: {}, + storagemarket.StorageDealAwaitingPreCommit: {}, + storagemarket.StorageDealSealing: {}, + storagemarket.StorageDealPublish: {}, + storagemarket.StorageDealCheckForAcceptance: {}, + storagemarket.StorageDealPublishing: {}, + } + + var total dealStat + perState := map[storagemarket.StorageDealStatus]*dealStat{} + for _, deal := range deals { + if _, ok := showDealStates[deal.State]; !ok { + continue + } + if perState[deal.State] == nil { + perState[deal.State] = new(dealStat) + } + + dsAdd(&total, deal) + dsAdd(perState[deal.State], deal) + } + + type wstr struct { + str string + status storagemarket.StorageDealStatus + } + sorted := make([]wstr, 0, len(perState)) + for status, stat := range perState { + st := strings.TrimPrefix(storagemarket.DealStates[status], "StorageDeal") + sorted = append(sorted, wstr{ + str: fmt.Sprintf(" %s:\t%d\t\t%s\t(Verified: %d\t%s)\n", st, stat.count, types.SizeStr(types.NewInt(stat.bytes)), stat.verifCount, types.SizeStr(types.NewInt(stat.verifBytes))), + status: status, + }, + ) + } + sort.Slice(sorted, func(i, j int) bool { + if sorted[i].status == storagemarket.StorageDealActive || sorted[j].status == storagemarket.StorageDealActive { + return sorted[i].status == storagemarket.StorageDealActive + } + return sorted[i].status > sorted[j].status + }) + + fmt.Println() + fmt.Printf("Storage Deals: %d, %s\n", total.count, types.SizeStr(types.NewInt(total.bytes))) + + tw := tabwriter.NewWriter(os.Stdout, 1, 1, 1, ' ', 0) + for _, e := range sorted { + _, _ = tw.Write([]byte(e.str)) + } + + _ = tw.Flush() + fmt.Println() + + retrievals, err := nodeApi.MarketListRetrievalDeals(ctx) + if err != nil { + return xerrors.Errorf("getting retrieval deal list: %w", err) + } + + var retrComplete dealStat + for _, retrieval := range retrievals { + if retrieval.Status == retrievalmarket.DealStatusCompleted { + retrComplete.count++ + retrComplete.bytes += retrieval.TotalSent + } + } + + fmt.Printf("Retrieval Deals (complete): %d, %s\n", retrComplete.count, types.SizeStr(types.NewInt(retrComplete.bytes))) + + fmt.Println() + return nil } diff --git a/cmd/lotus-storage-miner/info_all.go b/cmd/lotus-miner/info_all.go similarity index 100% rename from cmd/lotus-storage-miner/info_all.go rename to cmd/lotus-miner/info_all.go diff --git a/cmd/lotus-storage-miner/init.go b/cmd/lotus-miner/init.go similarity index 98% rename from cmd/lotus-storage-miner/init.go rename to cmd/lotus-miner/init.go index 249f0ee03c9..1cce52a41a1 100644 --- a/cmd/lotus-storage-miner/init.go +++ b/cmd/lotus-miner/init.go @@ -121,7 +121,8 @@ var initCmd = &cli.Command{ }, }, Subcommands: []*cli.Command{ - initRestoreCmd, + restoreCmd, + serviceCmd, }, Action: func(cctx *cli.Context) error { log.Info("Initializing lotus miner") @@ -317,10 +318,10 @@ func migratePreSealMeta(ctx context.Context, api v1api.FullNode, metadata string Size: abi.PaddedPieceSize(meta.SectorSize), PieceCID: commD, }, - DealInfo: &sealing.DealInfo{ + DealInfo: &lapi.PieceDealInfo{ DealID: dealID, DealProposal: §or.Deal, - DealSchedule: sealing.DealSchedule{ + DealSchedule: lapi.DealSchedule{ StartEpoch: sector.Deal.StartEpoch, EndEpoch: sector.Deal.EndEpoch, }, @@ -470,7 +471,6 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api v1api.FullNode AllowCommit: true, AllowUnseal: true, }, wsts, smsts) - if err != nil { return err } @@ -734,6 +734,8 @@ func createStorageMiner(ctx context.Context, api v1api.FullNode, peerid peer.ID, return retval.IDAddress, nil } +// checkV1ApiSupport uses v0 api version to signal support for v1 API +// trying to query the v1 api on older lotus versions would get a 404, which can happen for any number of other reasons func checkV1ApiSupport(ctx context.Context, cctx *cli.Context) error { // check v0 api version to make sure it supports v1 api api0, closer, err := lcli.GetFullNodeAPI(cctx) diff --git a/cmd/lotus-miner/init_restore.go b/cmd/lotus-miner/init_restore.go new file mode 100644 index 00000000000..393b44dd2d3 --- /dev/null +++ b/cmd/lotus-miner/init_restore.go @@ -0,0 +1,298 @@ +package main + +import ( + "context" + "encoding/json" + "io/ioutil" + "os" + + "github.com/filecoin-project/lotus/api/v0api" + + "github.com/docker/go-units" + "github.com/ipfs/go-datastore" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/mitchellh/go-homedir" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + "gopkg.in/cheggaaa/pb.v1" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-paramfetch" + "github.com/filecoin-project/go-state-types/big" + + lapi "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/types" + lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/extern/sector-storage/stores" + "github.com/filecoin-project/lotus/lib/backupds" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/node/repo" +) + +var restoreCmd = &cli.Command{ + Name: "restore", + Usage: "Initialize a lotus miner repo from a backup", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "nosync", + Usage: "don't check full-node sync status", + }, + &cli.StringFlag{ + Name: "config", + Usage: "config file (config.toml)", + }, + &cli.StringFlag{ + Name: "storage-config", + Usage: "storage paths config (storage.json)", + }, + }, + ArgsUsage: "[backupFile]", + Action: func(cctx *cli.Context) error { + ctx := lcli.ReqContext(cctx) + log.Info("Initializing lotus miner using a backup") + + var storageCfg *stores.StorageConfig + if cctx.IsSet("storage-config") { + cf, err := homedir.Expand(cctx.String("storage-config")) + if err != nil { + return xerrors.Errorf("expanding storage config path: %w", err) + } + + cfb, err := ioutil.ReadFile(cf) + if err != nil { + return xerrors.Errorf("reading storage config: %w", err) + } + + storageCfg = &stores.StorageConfig{} + err = json.Unmarshal(cfb, storageCfg) + if err != nil { + return xerrors.Errorf("cannot unmarshal json for storage config: %w", err) + } + } + + repoPath := cctx.String(FlagMinerRepo) + + if err := restore(ctx, cctx, repoPath, storageCfg, nil, func(api lapi.FullNode, maddr address.Address, peerid peer.ID, mi miner.MinerInfo) error { + log.Info("Checking proof parameters") + + if err := paramfetch.GetParams(ctx, build.ParametersJSON(), build.SrsJSON(), uint64(mi.SectorSize)); err != nil { + return xerrors.Errorf("fetching proof parameters: %w", err) + } + + log.Info("Configuring miner actor") + + if err := configureStorageMiner(ctx, api, maddr, peerid, big.Zero()); err != nil { + return err + } + + return nil + }); err != nil { + return err + } + + return nil + }, +} + +func restore(ctx context.Context, cctx *cli.Context, targetPath string, strConfig *stores.StorageConfig, manageConfig func(*config.StorageMiner) error, after func(api lapi.FullNode, addr address.Address, peerid peer.ID, mi miner.MinerInfo) error) error { + if cctx.Args().Len() != 1 { + return xerrors.Errorf("expected 1 argument") + } + + log.Info("Trying to connect to full node RPC") + + api, closer, err := lcli.GetFullNodeAPIV1(cctx) // TODO: consider storing full node address in config + if err != nil { + return err + } + defer closer() + + log.Info("Checking full node version") + + v, err := api.Version(ctx) + if err != nil { + return err + } + + if !v.APIVersion.EqMajorMinor(lapi.FullAPIVersion1) { + return xerrors.Errorf("Remote API version didn't match (expected %s, remote %s)", lapi.FullAPIVersion1, v.APIVersion) + } + + if !cctx.Bool("nosync") { + if err := lcli.SyncWait(ctx, &v0api.WrapperV1Full{FullNode: api}, false); err != nil { + return xerrors.Errorf("sync wait: %w", err) + } + } + + bf, err := homedir.Expand(cctx.Args().First()) + if err != nil { + return xerrors.Errorf("expand backup file path: %w", err) + } + + st, err := os.Stat(bf) + if err != nil { + return xerrors.Errorf("stat backup file (%s): %w", bf, err) + } + + f, err := os.Open(bf) + if err != nil { + return xerrors.Errorf("opening backup file: %w", err) + } + defer f.Close() // nolint:errcheck + + log.Info("Checking if repo exists") + + r, err := repo.NewFS(targetPath) + if err != nil { + return err + } + + ok, err := r.Exists() + if err != nil { + return err + } + if ok { + return xerrors.Errorf("repo at '%s' is already initialized", cctx.String(FlagMinerRepo)) + } + + log.Info("Initializing repo") + + if err := r.Init(repo.StorageMiner); err != nil { + return err + } + + lr, err := r.Lock(repo.StorageMiner) + if err != nil { + return err + } + defer lr.Close() //nolint:errcheck + + if cctx.IsSet("config") { + log.Info("Restoring config") + + cf, err := homedir.Expand(cctx.String("config")) + if err != nil { + return xerrors.Errorf("expanding config path: %w", err) + } + + _, err = os.Stat(cf) + if err != nil { + return xerrors.Errorf("stat config file (%s): %w", cf, err) + } + + var cerr error + err = lr.SetConfig(func(raw interface{}) { + rcfg, ok := raw.(*config.StorageMiner) + if !ok { + cerr = xerrors.New("expected miner config") + return + } + + ff, err := config.FromFile(cf, rcfg) + if err != nil { + cerr = xerrors.Errorf("loading config: %w", err) + return + } + + *rcfg = *ff.(*config.StorageMiner) + if manageConfig != nil { + cerr = manageConfig(rcfg) + } + }) + if cerr != nil { + return cerr + } + if err != nil { + return xerrors.Errorf("setting config: %w", err) + } + + } else { + log.Warn("--config NOT SET, WILL USE DEFAULT VALUES") + } + + if strConfig != nil { + log.Info("Restoring storage path config") + + err = lr.SetStorage(func(scfg *stores.StorageConfig) { + *scfg = *strConfig + }) + if err != nil { + return xerrors.Errorf("setting storage config: %w", err) + } + } else { + log.Warn("--storage-config NOT SET. NO SECTOR PATHS WILL BE CONFIGURED") + } + + log.Info("Restoring metadata backup") + + mds, err := lr.Datastore(context.TODO(), "/metadata") + if err != nil { + return err + } + + bar := pb.New64(st.Size()) + br := bar.NewProxyReader(f) + bar.ShowTimeLeft = true + bar.ShowPercent = true + bar.ShowSpeed = true + bar.Units = pb.U_BYTES + + bar.Start() + err = backupds.RestoreInto(br, mds) + bar.Finish() + + if err != nil { + return xerrors.Errorf("restoring metadata: %w", err) + } + + log.Info("Checking actor metadata") + + abytes, err := mds.Get(datastore.NewKey("miner-address")) + if err != nil { + return xerrors.Errorf("getting actor address from metadata datastore: %w", err) + } + + maddr, err := address.NewFromBytes(abytes) + if err != nil { + return xerrors.Errorf("parsing actor address: %w", err) + } + + log.Info("ACTOR ADDRESS: ", maddr.String()) + + mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting miner info: %w", err) + } + + log.Info("SECTOR SIZE: ", units.BytesSize(float64(mi.SectorSize))) + + wk, err := api.StateAccountKey(ctx, mi.Worker, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("resolving worker key: %w", err) + } + + has, err := api.WalletHas(ctx, wk) + if err != nil { + return xerrors.Errorf("checking worker address: %w", err) + } + + if !has { + return xerrors.Errorf("worker address %s for miner actor %s not present in full node wallet", mi.Worker, maddr) + } + + log.Info("Initializing libp2p identity") + + p2pSk, err := makeHostKey(lr) + if err != nil { + return xerrors.Errorf("make host key: %w", err) + } + + peerid, err := peer.IDFromPrivateKey(p2pSk) + if err != nil { + return xerrors.Errorf("peer ID from private key: %w", err) + } + + return after(api, maddr, peerid, mi) +} diff --git a/cmd/lotus-miner/init_service.go b/cmd/lotus-miner/init_service.go new file mode 100644 index 00000000000..6e874023e44 --- /dev/null +++ b/cmd/lotus-miner/init_service.go @@ -0,0 +1,157 @@ +package main + +import ( + "context" + "strings" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + lapi "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/client" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + lcli "github.com/filecoin-project/lotus/cli" + cliutil "github.com/filecoin-project/lotus/cli/util" + "github.com/filecoin-project/lotus/extern/sector-storage/stores" + "github.com/filecoin-project/lotus/node/config" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" +) + +const ( + MarketsService = "markets" +) + +var serviceCmd = &cli.Command{ + Name: "service", + Usage: "Initialize a lotus miner sub-service", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "config", + Usage: "config file (config.toml)", + Required: true, + }, + &cli.BoolFlag{ + Name: "nosync", + Usage: "don't check full-node sync status", + }, + &cli.StringSliceFlag{ + Name: "type", + Usage: "type of service to be enabled", + }, + &cli.StringFlag{ + Name: "api-sealer", + Usage: "sealer API info (lotus-miner auth api-info --perm=admin)", + }, + &cli.StringFlag{ + Name: "api-sector-index", + Usage: "sector Index API info (lotus-miner auth api-info --perm=admin)", + }, + }, + ArgsUsage: "[backupFile]", + Action: func(cctx *cli.Context) error { + ctx := lcli.ReqContext(cctx) + log.Info("Initializing lotus miner service") + + es := EnabledServices(cctx.StringSlice("type")) + + if len(es) == 0 { + return xerrors.Errorf("at least one module must be enabled") + } + + // we should remove this as soon as we have more service types and not just `markets` + if !es.Contains(MarketsService) { + return xerrors.Errorf("markets module must be enabled") + } + + if !cctx.IsSet("api-sealer") { + return xerrors.Errorf("--api-sealer is required without the sealer module enabled") + } + if !cctx.IsSet("api-sector-index") { + return xerrors.Errorf("--api-sector-index is required without the sector storage module enabled") + } + + repoPath := cctx.String(FlagMarketsRepo) + if repoPath == "" { + return xerrors.Errorf("please provide Lotus markets repo path via flag %s", FlagMarketsRepo) + } + + if err := restore(ctx, cctx, repoPath, &stores.StorageConfig{}, func(cfg *config.StorageMiner) error { + cfg.Subsystems.EnableMarkets = es.Contains(MarketsService) + cfg.Subsystems.EnableMining = false + cfg.Subsystems.EnableSealing = false + cfg.Subsystems.EnableSectorStorage = false + + if !cfg.Subsystems.EnableSealing { + ai, err := checkApiInfo(ctx, cctx.String("api-sealer")) + if err != nil { + return xerrors.Errorf("checking sealer API: %w", err) + } + cfg.Subsystems.SealerApiInfo = ai + } + + if !cfg.Subsystems.EnableSectorStorage { + ai, err := checkApiInfo(ctx, cctx.String("api-sector-index")) + if err != nil { + return xerrors.Errorf("checking sector index API: %w", err) + } + cfg.Subsystems.SectorIndexApiInfo = ai + } + + return nil + }, func(api lapi.FullNode, maddr address.Address, peerid peer.ID, mi miner.MinerInfo) error { + if es.Contains(MarketsService) { + log.Info("Configuring miner actor") + + if err := configureStorageMiner(ctx, api, maddr, peerid, big.Zero()); err != nil { + return err + } + } + + return nil + }); err != nil { + return err + } + + return nil + }, +} + +type EnabledServices []string + +func (es EnabledServices) Contains(name string) bool { + for _, s := range es { + if s == name { + return true + } + } + return false +} + +func checkApiInfo(ctx context.Context, ai string) (string, error) { + ai = strings.TrimPrefix(strings.TrimSpace(ai), "MINER_API_INFO=") + info := cliutil.ParseApiInfo(ai) + addr, err := info.DialArgs("v0") + if err != nil { + return "", xerrors.Errorf("could not get DialArgs: %w", err) + } + + log.Infof("Checking api version of %s", addr) + + api, closer, err := client.NewStorageMinerRPCV0(ctx, addr, info.AuthHeader()) + if err != nil { + return "", err + } + defer closer() + + v, err := api.Version(ctx) + if err != nil { + return "", xerrors.Errorf("checking version: %w", err) + } + + if !v.APIVersion.EqMajorMinor(lapi.MinerAPIVersion0) { + return "", xerrors.Errorf("remote service API version didn't match (expected %s, remote %s)", lapi.MinerAPIVersion0, v.APIVersion) + } + + return ai, nil +} diff --git a/cmd/lotus-storage-miner/main.go b/cmd/lotus-miner/main.go similarity index 67% rename from cmd/lotus-storage-miner/main.go rename to cmd/lotus-miner/main.go index f5ff2517772..9cee61b0375 100644 --- a/cmd/lotus-storage-miner/main.go +++ b/cmd/lotus-miner/main.go @@ -4,15 +4,18 @@ import ( "context" "fmt" + "github.com/fatih/color" logging "github.com/ipfs/go-log/v2" "github.com/urfave/cli/v2" "go.opencensus.io/trace" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" lcli "github.com/filecoin-project/lotus/cli" + cliutil "github.com/filecoin-project/lotus/cli/util" "github.com/filecoin-project/lotus/lib/lotuslog" "github.com/filecoin-project/lotus/lib/tracing" "github.com/filecoin-project/lotus/node/repo" @@ -20,7 +23,10 @@ import ( var log = logging.Logger("main") -const FlagMinerRepo = "miner-repo" +const ( + FlagMinerRepo = "miner-repo" + FlagMarketsRepo = "markets-repo" +) // TODO remove after deprecation period const FlagMinerRepoDeprecation = "storagerepo" @@ -61,13 +67,32 @@ func main() { trace.UnregisterExporter(jaeger) jaeger = tracing.SetupJaegerTracing("lotus/" + cmd.Name) + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } + if originBefore != nil { return originBefore(cctx) } + return nil } } + // adapt the Net* commands to always hit the node running the markets + // subsystem, as that is the only one that runs a libp2p node. + netCmd := *lcli.NetCmd // make a copy. + prev := netCmd.Before + netCmd.Before = func(c *cli.Context) error { + if prev != nil { + if err := prev(c); err != nil { + return err + } + } + c.App.Metadata["repoType"] = repo.Markets + return nil + } + app := &cli.App{ Name: "lotus-miner", Usage: "Filecoin decentralized storage network miner", @@ -81,7 +106,10 @@ func main() { Aliases: []string{"a"}, }, &cli.BoolFlag{ - Name: "color", + // examined in the Before above + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", }, &cli.StringFlag{ Name: "repo", @@ -96,13 +124,29 @@ func main() { Value: "~/.lotusminer", // TODO: Consider XDG_DATA_HOME Usage: fmt.Sprintf("Specify miner repo path. flag(%s) and env(LOTUS_STORAGE_PATH) are DEPRECATION, will REMOVE SOON", FlagMinerRepoDeprecation), }, + &cli.StringFlag{ + Name: FlagMarketsRepo, + EnvVars: []string{"LOTUS_MARKETS_PATH"}, + Usage: fmt.Sprintf("Markets repo path"), + }, + &cli.BoolFlag{ + Name: "call-on-markets", + Usage: "(experimental; may be removed) call this command against a markets node; use only with common commands like net, auth, pprof, etc. whose target may be ambiguous", + }, + cliutil.FlagVeryVerbose, + }, + Commands: append(local, append(lcli.CommonCommands, &netCmd)...), + Before: func(c *cli.Context) error { + // this command is explicitly called on markets, inform + // common commands by overriding the repoType. + if c.Bool("call-on-markets") { + c.App.Metadata["repoType"] = repo.Markets + } + return nil }, - - Commands: append(local, lcli.CommonCommands...), } app.Setup() app.Metadata["repoType"] = repo.StorageMiner - lcli.RunApp(app) } diff --git a/cmd/lotus-storage-miner/market.go b/cmd/lotus-miner/market.go similarity index 94% rename from cmd/lotus-storage-miner/market.go rename to cmd/lotus-miner/market.go index f46ad32bfa8..a9d1f2f4658 100644 --- a/cmd/lotus-storage-miner/market.go +++ b/cmd/lotus-miner/market.go @@ -15,6 +15,7 @@ import ( tm "github.com/buger/goterm" "github.com/docker/go-units" + "github.com/fatih/color" "github.com/ipfs/go-cid" "github.com/ipfs/go-cidutil/cidenc" "github.com/libp2p/go-libp2p-core/peer" @@ -72,7 +73,7 @@ var storageDealSelectionShowCmd = &cli.Command{ Name: "list", Usage: "List storage deal proposal selection criteria", Action: func(cctx *cli.Context) error { - smapi, closer, err := lcli.GetStorageMinerAPI(cctx) + smapi, closer, err := lcli.GetMarketsAPI(cctx) if err != nil { return err } @@ -99,7 +100,7 @@ var storageDealSelectionResetCmd = &cli.Command{ Name: "reset", Usage: "Reset storage deal proposal selection criteria to default values", Action: func(cctx *cli.Context) error { - smapi, closer, err := lcli.GetStorageMinerAPI(cctx) + smapi, closer, err := lcli.GetMarketsAPI(cctx) if err != nil { return err } @@ -147,7 +148,7 @@ var storageDealSelectionRejectCmd = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { - smapi, closer, err := lcli.GetStorageMinerAPI(cctx) + smapi, closer, err := lcli.GetMarketsAPI(cctx) if err != nil { return err } @@ -214,7 +215,13 @@ var setAskCmd = &cli.Command{ Action: func(cctx *cli.Context) error { ctx := lcli.DaemonContext(cctx) - api, closer, err := lcli.GetStorageMinerAPI(cctx) + minerApi, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + + marketsApi, closer, err := lcli.GetMarketsAPI(cctx) if err != nil { return err } @@ -251,12 +258,12 @@ var setAskCmd = &cli.Command{ return xerrors.Errorf("cannot parse max-piece-size to quantity of bytes: %w", err) } - maddr, err := api.ActorAddress(ctx) + maddr, err := minerApi.ActorAddress(ctx) if err != nil { return err } - ssize, err := api.ActorSectorSize(ctx, maddr) + ssize, err := minerApi.ActorSectorSize(ctx, maddr) if err != nil { return err } @@ -271,7 +278,7 @@ var setAskCmd = &cli.Command{ return xerrors.Errorf("max piece size (w/bit-padding) %s cannot exceed miner sector size %s", types.SizeStr(types.NewInt(uint64(max))), types.SizeStr(types.NewInt(uint64(smax)))) } - return api.MarketSetAsk(ctx, types.BigInt(pri), types.BigInt(vpri), abi.ChainEpoch(qty), abi.PaddedPieceSize(min), abi.PaddedPieceSize(max)) + return marketsApi.MarketSetAsk(ctx, types.BigInt(pri), types.BigInt(vpri), abi.ChainEpoch(qty), abi.PaddedPieceSize(min), abi.PaddedPieceSize(max)) }, } @@ -288,7 +295,7 @@ var getAskCmd = &cli.Command{ } defer closer() - smapi, closer, err := lcli.GetStorageMinerAPI(cctx) + smapi, closer, err := lcli.GetMarketsAPI(cctx) if err != nil { return err } @@ -351,7 +358,7 @@ var dealsImportDataCmd = &cli.Command{ Usage: "Manually import data for a deal", ArgsUsage: " ", Action: func(cctx *cli.Context) error { - api, closer, err := lcli.GetStorageMinerAPI(cctx) + api, closer, err := lcli.GetMarketsAPI(cctx) if err != nil { return err } @@ -389,7 +396,7 @@ var dealsListCmd = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { - api, closer, err := lcli.GetStorageMinerAPI(cctx) + api, closer, err := lcli.GetMarketsAPI(cctx) if err != nil { return err } @@ -493,7 +500,7 @@ var getBlocklistCmd = &cli.Command{ &CidBaseFlag, }, Action: func(cctx *cli.Context) error { - api, closer, err := lcli.GetStorageMinerAPI(cctx) + api, closer, err := lcli.GetMarketsAPI(cctx) if err != nil { return err } @@ -523,7 +530,7 @@ var setBlocklistCmd = &cli.Command{ ArgsUsage: "[ (optional, will read from stdin if omitted)]", Flags: []cli.Flag{}, Action: func(cctx *cli.Context) error { - api, closer, err := lcli.GetStorageMinerAPI(cctx) + api, closer, err := lcli.GetMarketsAPI(cctx) if err != nil { return err } @@ -569,7 +576,7 @@ var resetBlocklistCmd = &cli.Command{ Usage: "Remove all entries from the miner's piece CID blocklist", Flags: []cli.Flag{}, Action: func(cctx *cli.Context) error { - api, closer, err := lcli.GetStorageMinerAPI(cctx) + api, closer, err := lcli.GetMarketsAPI(cctx) if err != nil { return err } @@ -633,7 +640,7 @@ var marketRestartTransfer = &cli.Command{ if !cctx.Args().Present() { return cli.ShowCommandHelp(cctx, cctx.Command.Name) } - nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) + nodeApi, closer, err := lcli.GetMarketsAPI(cctx) if err != nil { return err } @@ -698,7 +705,7 @@ var marketCancelTransfer = &cli.Command{ if !cctx.Args().Present() { return cli.ShowCommandHelp(cctx, cctx.Command.Name) } - nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) + nodeApi, closer, err := lcli.GetMarketsAPI(cctx) if err != nil { return err } @@ -752,9 +759,9 @@ var transfersListCmd = &cli.Command{ Usage: "print verbose transfer details", }, &cli.BoolFlag{ - Name: "color", - Usage: "use color in display output", - Value: true, + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", }, &cli.BoolFlag{ Name: "completed", @@ -770,7 +777,11 @@ var transfersListCmd = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { - api, closer, err := lcli.GetStorageMinerAPI(cctx) + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } + + api, closer, err := lcli.GetMarketsAPI(cctx) if err != nil { return err } @@ -784,7 +795,6 @@ var transfersListCmd = &cli.Command{ verbose := cctx.Bool("verbose") completed := cctx.Bool("completed") - color := cctx.Bool("color") watch := cctx.Bool("watch") showFailed := cctx.Bool("show-failed") if watch { @@ -798,7 +808,7 @@ var transfersListCmd = &cli.Command{ tm.MoveCursor(1, 1) - lcli.OutputDataTransferChannels(tm.Screen, channels, verbose, completed, color, showFailed) + lcli.OutputDataTransferChannels(tm.Screen, channels, verbose, completed, showFailed) tm.Flush() @@ -823,7 +833,7 @@ var transfersListCmd = &cli.Command{ } } } - lcli.OutputDataTransferChannels(os.Stdout, channels, verbose, completed, color, showFailed) + lcli.OutputDataTransferChannels(os.Stdout, channels, verbose, completed, showFailed) return nil }, } @@ -838,7 +848,7 @@ var dealsPendingPublish = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { - api, closer, err := lcli.GetStorageMinerAPI(cctx) + api, closer, err := lcli.GetMarketsAPI(cctx) if err != nil { return err } diff --git a/cmd/lotus-storage-miner/pieces.go b/cmd/lotus-miner/pieces.go similarity index 100% rename from cmd/lotus-storage-miner/pieces.go rename to cmd/lotus-miner/pieces.go diff --git a/cmd/lotus-storage-miner/proving.go b/cmd/lotus-miner/proving.go similarity index 98% rename from cmd/lotus-storage-miner/proving.go rename to cmd/lotus-miner/proving.go index 0e36c6508d5..5dfe5d4ceda 100644 --- a/cmd/lotus-storage-miner/proving.go +++ b/cmd/lotus-miner/proving.go @@ -36,8 +36,6 @@ var provingFaultsCmd = &cli.Command{ Name: "faults", Usage: "View the currently known proving faulty sectors information", Action: func(cctx *cli.Context) error { - color.NoColor = !cctx.Bool("color") - api, acloser, err := lcli.GetFullNodeAPI(cctx) if err != nil { return err @@ -90,8 +88,6 @@ var provingInfoCmd = &cli.Command{ Name: "info", Usage: "View current state information", Action: func(cctx *cli.Context) error { - color.NoColor = !cctx.Bool("color") - api, acloser, err := lcli.GetFullNodeAPI(cctx) if err != nil { return err @@ -197,8 +193,6 @@ var provingDeadlinesCmd = &cli.Command{ Name: "deadlines", Usage: "View the current proving period deadlines information", Action: func(cctx *cli.Context) error { - color.NoColor = !cctx.Bool("color") - api, acloser, err := lcli.GetFullNodeAPI(cctx) if err != nil { return err diff --git a/cmd/lotus-storage-miner/retrieval-deals.go b/cmd/lotus-miner/retrieval-deals.go similarity index 91% rename from cmd/lotus-storage-miner/retrieval-deals.go rename to cmd/lotus-miner/retrieval-deals.go index 0411f7f130a..1ce1f6593df 100644 --- a/cmd/lotus-storage-miner/retrieval-deals.go +++ b/cmd/lotus-miner/retrieval-deals.go @@ -39,7 +39,7 @@ var retrievalDealSelectionShowCmd = &cli.Command{ Name: "list", Usage: "List retrieval deal proposal selection criteria", Action: func(cctx *cli.Context) error { - smapi, closer, err := lcli.GetStorageMinerAPI(cctx) + smapi, closer, err := lcli.GetMarketsAPI(cctx) if err != nil { return err } @@ -66,7 +66,7 @@ var retrievalDealSelectionResetCmd = &cli.Command{ Name: "reset", Usage: "Reset retrieval deal proposal selection criteria to default values", Action: func(cctx *cli.Context) error { - smapi, closer, err := lcli.GetStorageMinerAPI(cctx) + smapi, closer, err := lcli.GetMarketsAPI(cctx) if err != nil { return err } @@ -98,7 +98,7 @@ var retrievalDealSelectionRejectCmd = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { - smapi, closer, err := lcli.GetStorageMinerAPI(cctx) + smapi, closer, err := lcli.GetMarketsAPI(cctx) if err != nil { return err } @@ -126,7 +126,7 @@ var retrievalDealsListCmd = &cli.Command{ Name: "list", Usage: "List all active retrieval deals for this miner", Action: func(cctx *cli.Context) error { - api, closer, err := lcli.GetStorageMinerAPI(cctx) + api, closer, err := lcli.GetMarketsAPI(cctx) if err != nil { return err } @@ -186,7 +186,7 @@ var retrievalSetAskCmd = &cli.Command{ Action: func(cctx *cli.Context) error { ctx := lcli.DaemonContext(cctx) - api, closer, err := lcli.GetStorageMinerAPI(cctx) + api, closer, err := lcli.GetMarketsAPI(cctx) if err != nil { return err } @@ -240,7 +240,7 @@ var retrievalGetAskCmd = &cli.Command{ Action: func(cctx *cli.Context) error { ctx := lcli.DaemonContext(cctx) - api, closer, err := lcli.GetStorageMinerAPI(cctx) + api, closer, err := lcli.GetMarketsAPI(cctx) if err != nil { return err } @@ -252,13 +252,13 @@ var retrievalGetAskCmd = &cli.Command{ } w := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0) - fmt.Fprintf(w, "Price per Byte\tUnseal Price\tPayment Interval\tPayment Interval Increase\n") + _, _ = fmt.Fprintf(w, "Price per Byte\tUnseal Price\tPayment Interval\tPayment Interval Increase\n") if ask == nil { - fmt.Fprintf(w, "\n") + _, _ = fmt.Fprintf(w, "\n") return w.Flush() } - fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", + _, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", types.FIL(ask.PricePerByte), types.FIL(ask.UnsealPrice), units.BytesSize(float64(ask.PaymentInterval)), diff --git a/cmd/lotus-storage-miner/run.go b/cmd/lotus-miner/run.go similarity index 83% rename from cmd/lotus-storage-miner/run.go rename to cmd/lotus-miner/run.go index 3daf9a91142..f276f319c9b 100644 --- a/cmd/lotus-storage-miner/run.go +++ b/cmd/lotus-miner/run.go @@ -22,6 +22,7 @@ import ( "github.com/filecoin-project/lotus/lib/ulimit" "github.com/filecoin-project/lotus/metrics" "github.com/filecoin-project/lotus/node" + "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/repo" ) @@ -118,13 +119,33 @@ var runCmd = &cli.Command{ return xerrors.Errorf("repo at '%s' is not initialized, run 'lotus-miner init' to set it up", minerRepoPath) } + lr, err := r.Lock(repo.StorageMiner) + if err != nil { + return err + } + c, err := lr.Config() + if err != nil { + return err + } + cfg, ok := c.(*config.StorageMiner) + if !ok { + return xerrors.Errorf("invalid config for repo, got: %T", c) + } + + bootstrapLibP2P := cfg.Subsystems.EnableMarkets + + err = lr.Close() + if err != nil { + return err + } + shutdownChan := make(chan struct{}) var minerapi api.StorageMiner stop, err := node.New(ctx, - node.StorageMiner(&minerapi), + node.StorageMiner(&minerapi, cfg.Subsystems), node.Override(new(dtypes.ShutdownChan), shutdownChan), - node.Online(), + node.Base(), node.Repo(r), node.ApplyIf(func(s *node.Settings) bool { return cctx.IsSet("miner-api") }, @@ -142,14 +163,18 @@ var runCmd = &cli.Command{ return xerrors.Errorf("getting API endpoint: %w", err) } - // Bootstrap with full node - remoteAddrs, err := nodeApi.NetAddrsListen(ctx) - if err != nil { - return xerrors.Errorf("getting full node libp2p address: %w", err) - } + if bootstrapLibP2P { + log.Infof("Bootstrapping libp2p network with full node") - if err := minerapi.NetConnect(ctx, remoteAddrs); err != nil { - return xerrors.Errorf("connecting to full node (libp2p): %w", err) + // Bootstrap with full node + remoteAddrs, err := nodeApi.NetAddrsListen(ctx) + if err != nil { + return xerrors.Errorf("getting full node libp2p address: %w", err) + } + + if err := minerapi.NetConnect(ctx, remoteAddrs); err != nil { + return xerrors.Errorf("connecting to full node (libp2p): %w", err) + } } log.Infof("Remote version %s", v) diff --git a/cmd/lotus-storage-miner/sealing.go b/cmd/lotus-miner/sealing.go similarity index 94% rename from cmd/lotus-storage-miner/sealing.go rename to cmd/lotus-miner/sealing.go index ad890129d0b..3bf4c675fd7 100644 --- a/cmd/lotus-storage-miner/sealing.go +++ b/cmd/lotus-miner/sealing.go @@ -36,10 +36,16 @@ var sealingWorkersCmd = &cli.Command{ Name: "workers", Usage: "list workers", Flags: []cli.Flag{ - &cli.BoolFlag{Name: "color"}, + &cli.BoolFlag{ + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", + }, }, Action: func(cctx *cli.Context) error { - color.NoColor = !cctx.Bool("color") + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) if err != nil { @@ -127,14 +133,20 @@ var sealingJobsCmd = &cli.Command{ Name: "jobs", Usage: "list running jobs", Flags: []cli.Flag{ - &cli.BoolFlag{Name: "color"}, + &cli.BoolFlag{ + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", + }, &cli.BoolFlag{ Name: "show-ret-done", Usage: "show returned but not consumed calls", }, }, Action: func(cctx *cli.Context) error { - color.NoColor = !cctx.Bool("color") + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) if err != nil { diff --git a/cmd/lotus-storage-miner/sectors.go b/cmd/lotus-miner/sectors.go similarity index 96% rename from cmd/lotus-storage-miner/sectors.go rename to cmd/lotus-miner/sectors.go index 2476c16e813..fbf84ecff60 100644 --- a/cmd/lotus-storage-miner/sectors.go +++ b/cmd/lotus-miner/sectors.go @@ -161,9 +161,10 @@ var sectorsListCmd = &cli.Command{ Usage: "show removed sectors", }, &cli.BoolFlag{ - Name: "color", - Aliases: []string{"c"}, - Value: true, + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", + Aliases: []string{"c"}, }, &cli.BoolFlag{ Name: "fast", @@ -183,7 +184,9 @@ var sectorsListCmd = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { - color.NoColor = !cctx.Bool("color") + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) if err != nil { @@ -436,6 +439,12 @@ var sectorsExtendCmd = &cli.Command{ Usage: "when extending v1 sectors, don't try to extend sectors by fewer than this number of epochs", Required: false, }, + &cli.Int64Flag{ + Name: "expiration-ignore", + Value: 120, + Usage: "when extending v1 sectors, skip sectors whose current expiration is less than epochs from now", + Required: false, + }, &cli.Int64Flag{ Name: "expiration-cutoff", Usage: "when extending v1 sectors, skip sectors whose current expiration is more than epochs from now (infinity if unspecified)", @@ -494,6 +503,10 @@ var sectorsExtendCmd = &cli.Command{ continue } + if si.Expiration < (head.Height() + abi.ChainEpoch(cctx.Int64("expiration-ignore"))) { + continue + } + if cctx.IsSet("expiration-cutoff") { if si.Expiration > (head.Height() + abi.ChainEpoch(cctx.Int64("expiration-cutoff"))) { continue @@ -508,6 +521,10 @@ var sectorsExtendCmd = &cli.Command{ // Set the new expiration to 48 hours less than the theoretical maximum lifetime newExp := ml - (miner3.WPoStProvingPeriod * 2) + si.Activation + if withinTolerance(si.Expiration, newExp) || si.Expiration >= newExp { + continue + } + p, err := api.StateSectorPartition(ctx, maddr, si.SectorNumber, types.EmptyTSK) if err != nil { return xerrors.Errorf("getting sector location for sector %d: %w", si.SectorNumber, err) @@ -525,7 +542,7 @@ var sectorsExtendCmd = &cli.Command{ } else { added := false for exp := range es { - if withinTolerance(exp, newExp) { + if withinTolerance(exp, newExp) && newExp >= exp && exp > si.Expiration { es[exp] = append(es[exp], uint64(si.SectorNumber)) added = true break @@ -544,7 +561,15 @@ var sectorsExtendCmd = &cli.Command{ for l, exts := range extensions { for newExp, numbers := range exts { scount += len(numbers) - if scount > policy.GetAddressedSectorsMax(nv) || len(p.Extensions) == policy.GetDeclarationsMax(nv) { + addressedMax, err := policy.GetAddressedSectorsMax(nv) + if err != nil { + return xerrors.Errorf("failed to get addressed sectors max") + } + declMax, err := policy.GetDeclarationsMax(nv) + if err != nil { + return xerrors.Errorf("failed to get declarations max") + } + if scount > addressedMax || len(p.Extensions) == declMax { params = append(params, p) p = miner3.ExtendSectorExpirationParams{} scount = len(numbers) diff --git a/cmd/lotus-storage-miner/stop.go b/cmd/lotus-miner/stop.go similarity index 100% rename from cmd/lotus-storage-miner/stop.go rename to cmd/lotus-miner/stop.go diff --git a/cmd/lotus-storage-miner/storage.go b/cmd/lotus-miner/storage.go similarity index 97% rename from cmd/lotus-storage-miner/storage.go rename to cmd/lotus-miner/storage.go index f2068ea86cd..e7508eb295c 100644 --- a/cmd/lotus-storage-miner/storage.go +++ b/cmd/lotus-miner/storage.go @@ -166,13 +166,19 @@ var storageListCmd = &cli.Command{ Name: "list", Usage: "list local storage paths", Flags: []cli.Flag{ - &cli.BoolFlag{Name: "color"}, + &cli.BoolFlag{ + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", + }, }, Subcommands: []*cli.Command{ storageListSectorsCmd, }, Action: func(cctx *cli.Context) error { - color.NoColor = !cctx.Bool("color") + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) if err != nil { @@ -478,12 +484,15 @@ var storageListSectorsCmd = &cli.Command{ Usage: "get list of all sector files", Flags: []cli.Flag{ &cli.BoolFlag{ - Name: "color", - Value: true, + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", }, }, Action: func(cctx *cli.Context) error { - color.NoColor = !cctx.Bool("color") + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) if err != nil { diff --git a/cmd/lotus-seed/genesis.go b/cmd/lotus-seed/genesis.go index 66de93888b5..a27cc0a2f7c 100644 --- a/cmd/lotus-seed/genesis.go +++ b/cmd/lotus-seed/genesis.go @@ -572,7 +572,7 @@ var genesisCarCmd = &cli.Command{ } ofile := c.String("out") jrnl := journal.NilJournal() - bstor := blockstore.NewMemorySync() + bstor := blockstore.WrapIDStore(blockstore.NewMemorySync()) sbldr := vm.Syscalls(ffiwrapper.ProofVerifier) _, err := testing.MakeGenesis(ofile, c.Args().First())(bstor, sbldr, jrnl)() return err diff --git a/cmd/lotus-shed/actor.go b/cmd/lotus-shed/actor.go index 9d242e2df2c..b78f283497f 100644 --- a/cmd/lotus-shed/actor.go +++ b/cmd/lotus-shed/actor.go @@ -265,12 +265,15 @@ var actorControlList = &cli.Command{ Name: "verbose", }, &cli.BoolFlag{ - Name: "color", - Value: true, + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", }, }, Action: func(cctx *cli.Context) error { - color.NoColor = !cctx.Bool("color") + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } var maddr address.Address if act := cctx.String("actor"); act != "" { diff --git a/cmd/lotus-shed/election.go b/cmd/lotus-shed/election.go index c844203d6c9..d49d5c04f4f 100644 --- a/cmd/lotus-shed/election.go +++ b/cmd/lotus-shed/election.go @@ -1,10 +1,16 @@ package main import ( + "context" "encoding/binary" "fmt" "math/rand" + "github.com/filecoin-project/lotus/api/v0api" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/gen" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" @@ -18,6 +24,7 @@ var electionCmd = &cli.Command{ Subcommands: []*cli.Command{ electionRunDummy, electionEstimate, + electionBacktest, }, } @@ -124,3 +131,97 @@ var electionEstimate = &cli.Command{ return nil }, } + +var electionBacktest = &cli.Command{ + Name: "backtest", + Usage: "Backtest elections with given miner", + ArgsUsage: "[minerAddress]", + Flags: []cli.Flag{ + &cli.Uint64Flag{ + Name: "height", + Usage: "blockchain head height", + }, + &cli.IntFlag{ + Name: "count", + Usage: "number of won elections to look for", + Value: 120, + }, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return xerrors.Errorf("GetFullNodeAPI: %w", err) + } + + defer closer() + ctx := lcli.ReqContext(cctx) + + var head *types.TipSet + if cctx.IsSet("height") { + head, err = api.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(cctx.Uint64("height")), types.EmptyTSK) + if err != nil { + return xerrors.Errorf("ChainGetTipSetByHeight: %w", err) + } + } else { + head, err = api.ChainHead(ctx) + if err != nil { + return xerrors.Errorf("ChainHead: %w", err) + } + } + + miner, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return xerrors.Errorf("miner address: %w", err) + } + + count := cctx.Int("count") + if count < 1 { + return xerrors.Errorf("count: %d", count) + } + + fmt.Println("height, winCount") + roundEnd := head.Height() + abi.ChainEpoch(1) + for i := 0; i < count; { + for round := head.Height() + abi.ChainEpoch(1); round <= roundEnd; round++ { + i++ + win, err := backTestWinner(ctx, miner, round, head, api) + if err == nil && win != nil { + fmt.Printf("%d, %d\n", round, win.WinCount) + } + } + + roundEnd = head.Height() + head, err = api.ChainGetTipSet(ctx, head.Parents()) + if err != nil { + break + } + } + return nil + }, +} + +func backTestWinner(ctx context.Context, miner address.Address, round abi.ChainEpoch, ts *types.TipSet, api v0api.FullNode) (*types.ElectionProof, error) { + mbi, err := api.MinerGetBaseInfo(ctx, miner, round, ts.Key()) + if err != nil { + return nil, xerrors.Errorf("failed to get mining base info: %w", err) + } + if mbi == nil { + return nil, nil + } + if !mbi.EligibleForMining { + return nil, nil + } + + brand := mbi.PrevBeaconEntry + bvals := mbi.BeaconEntries + if len(bvals) > 0 { + brand = bvals[len(bvals)-1] + } + + winner, err := gen.IsRoundWinner(ctx, ts, round, miner, brand, mbi, api) + if err != nil { + return nil, xerrors.Errorf("failed to check if we win next round: %w", err) + } + + return winner, nil +} diff --git a/cmd/lotus-shed/main.go b/cmd/lotus-shed/main.go index e06b630800c..e16007e7778 100644 --- a/cmd/lotus-shed/main.go +++ b/cmd/lotus-shed/main.go @@ -60,6 +60,7 @@ func main() { actorCmd, minerTypesCmd, minerMultisigsCmd, + splitstoreCmd, } app := &cli.App{ diff --git a/cmd/lotus-shed/market.go b/cmd/lotus-shed/market.go index e2e322784cb..8221e53eb51 100644 --- a/cmd/lotus-shed/market.go +++ b/cmd/lotus-shed/market.go @@ -2,6 +2,18 @@ package main import ( "fmt" + "os" + "path" + + levelds "github.com/ipfs/go-ds-leveldb" + ldbopts "github.com/syndtr/goleveldb/leveldb/opt" + + "github.com/filecoin-project/lotus/lib/backupds" + + "github.com/filecoin-project/lotus/node/repo" + "github.com/ipfs/go-datastore" + dsq "github.com/ipfs/go-datastore/query" + logging "github.com/ipfs/go-log/v2" lcli "github.com/filecoin-project/lotus/cli" @@ -18,6 +30,8 @@ var marketCmd = &cli.Command{ Flags: []cli.Flag{}, Subcommands: []*cli.Command{ marketDealFeesCmd, + marketExportDatastoreCmd, + marketImportDatastoreCmd, }, } @@ -100,3 +114,196 @@ var marketDealFeesCmd = &cli.Command{ return xerrors.New("must provide either --provider or --dealId flag") }, } + +const mktsMetadataNamespace = "metadata" + +var marketExportDatastoreCmd = &cli.Command{ + Name: "export-datastore", + Description: "export markets datastore key/values to a file", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "repo", + Usage: "path to the repo", + }, + &cli.StringFlag{ + Name: "backup-dir", + Usage: "path to the backup directory", + }, + }, + Action: func(cctx *cli.Context) error { + logging.SetLogLevel("badger", "ERROR") // nolint:errcheck + + // If the backup dir is not specified, just use the OS temp dir + backupDir := cctx.String("backup-dir") + if backupDir == "" { + backupDir = os.TempDir() + } + + // Open the repo at the repo path + repoPath := cctx.String("repo") + lr, err := openLockedRepo(repoPath) + if err != nil { + return err + } + defer lr.Close() //nolint:errcheck + + // Open the metadata datastore on the repo + ds, err := lr.Datastore(cctx.Context, datastore.NewKey(mktsMetadataNamespace).String()) + if err != nil { + return xerrors.Errorf("opening datastore %s on repo %s: %w", mktsMetadataNamespace, repoPath, err) + } + + // Create a tmp datastore that we'll add the exported key / values to + // and then backup + backupDsDir := path.Join(backupDir, "markets-backup-datastore") + if err := os.MkdirAll(backupDsDir, 0775); err != nil { //nolint:gosec + return xerrors.Errorf("creating tmp datastore directory: %w", err) + } + defer os.RemoveAll(backupDsDir) //nolint:errcheck + + backupDs, err := levelds.NewDatastore(backupDsDir, &levelds.Options{ + Compression: ldbopts.NoCompression, + NoSync: false, + Strict: ldbopts.StrictAll, + ReadOnly: false, + }) + if err != nil { + return xerrors.Errorf("opening backup datastore at %s: %w", backupDir, err) + } + + // Export the key / values + prefixes := []string{ + "/deals/provider", + "/retrievals/provider", + "/storagemarket", + } + for _, prefix := range prefixes { + err := exportPrefix(prefix, ds, backupDs) + if err != nil { + return err + } + } + + // Wrap the datastore in a backup datastore + bds, err := backupds.Wrap(backupDs, "") + if err != nil { + return xerrors.Errorf("opening backupds: %w", err) + } + + // Create a file for the backup + fpath := path.Join(backupDir, "markets.datastore.backup") + out, err := os.OpenFile(fpath, os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return xerrors.Errorf("opening backup file %s: %w", fpath, err) + } + + // Write the backup to the file + if err := bds.Backup(out); err != nil { + if cerr := out.Close(); cerr != nil { + log.Errorw("error closing backup file while handling backup error", "closeErr", cerr, "backupErr", err) + } + return xerrors.Errorf("backup error: %w", err) + } + if err := out.Close(); err != nil { + return xerrors.Errorf("closing backup file: %w", err) + } + + fmt.Println("Wrote backup file to " + fpath) + + return nil + }, +} + +func exportPrefix(prefix string, ds datastore.Batching, backupDs datastore.Batching) error { + q, err := ds.Query(dsq.Query{ + Prefix: prefix, + }) + if err != nil { + return xerrors.Errorf("datastore query: %w", err) + } + defer q.Close() //nolint:errcheck + + for res := range q.Next() { + fmt.Println("Exporting key " + res.Key) + err := backupDs.Put(datastore.NewKey(res.Key), res.Value) + if err != nil { + return xerrors.Errorf("putting %s to backup datastore: %w", res.Key, err) + } + } + + return nil +} + +var marketImportDatastoreCmd = &cli.Command{ + Name: "import-datastore", + Description: "import markets datastore key/values from a backup file", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "repo", + Usage: "path to the repo", + }, + &cli.StringFlag{ + Name: "backup-path", + Usage: "path to the backup file", + Required: true, + }, + }, + Action: func(cctx *cli.Context) error { + logging.SetLogLevel("badger", "ERROR") // nolint:errcheck + + backupPath := cctx.String("backup-path") + + // Open the repo at the repo path + lr, err := openLockedRepo(cctx.String("repo")) + if err != nil { + return err + } + defer lr.Close() //nolint:errcheck + + // Open the metadata datastore on the repo + repoDs, err := lr.Datastore(cctx.Context, datastore.NewKey(mktsMetadataNamespace).String()) + if err != nil { + return err + } + + r, err := os.Open(backupPath) + if err != nil { + return xerrors.Errorf("opening backup path %s: %w", backupPath, err) + } + + fmt.Println("Importing from backup file " + backupPath) + err = backupds.RestoreInto(r, repoDs) + if err != nil { + return xerrors.Errorf("restoring backup from path %s: %w", backupPath, err) + } + + fmt.Println("Completed importing from backup file " + backupPath) + + return nil + }, +} + +func openLockedRepo(path string) (repo.LockedRepo, error) { + // Open the repo at the repo path + rpo, err := repo.NewFS(path) + if err != nil { + return nil, xerrors.Errorf("could not open repo %s: %w", path, err) + } + + // Make sure the repo exists + exists, err := rpo.Exists() + if err != nil { + return nil, xerrors.Errorf("checking repo %s exists: %w", path, err) + } + if !exists { + return nil, xerrors.Errorf("repo does not exist: %s", path) + } + + // Lock the repo + lr, err := rpo.Lock(repo.StorageMiner) + if err != nil { + return nil, xerrors.Errorf("locking repo %s: %w", path, err) + } + + return lr, nil +} diff --git a/cmd/lotus-shed/math.go b/cmd/lotus-shed/math.go index 434559f09a0..c6d4ed0c952 100644 --- a/cmd/lotus-shed/math.go +++ b/cmd/lotus-shed/math.go @@ -8,8 +8,10 @@ import ( "strings" "github.com/urfave/cli/v2" + "golang.org/x/xerrors" "github.com/filecoin-project/lotus/chain/types" + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" ) var mathCmd = &cli.Command{ @@ -17,6 +19,7 @@ var mathCmd = &cli.Command{ Usage: "utility commands around doing math on a list of numbers", Subcommands: []*cli.Command{ mathSumCmd, + mathAggFeesCmd, }, } @@ -101,3 +104,30 @@ var mathSumCmd = &cli.Command{ return nil }, } + +var mathAggFeesCmd = &cli.Command{ + Name: "agg-fees", + Flags: []cli.Flag{ + &cli.IntFlag{ + Name: "size", + Required: true, + }, + &cli.StringFlag{ + Name: "base-fee", + Usage: "baseFee aFIL", + Required: true, + }, + }, + Action: func(cctx *cli.Context) error { + as := cctx.Int("size") + + bf, err := types.BigFromString(cctx.String("base-fee")) + if err != nil { + return xerrors.Errorf("parsing basefee: %w", err) + } + + fmt.Println(types.FIL(miner5.AggregateNetworkFee(as, bf))) + + return nil + }, +} diff --git a/cmd/lotus-shed/pruning.go b/cmd/lotus-shed/pruning.go index 1afe76c4d38..188f5b28fb8 100644 --- a/cmd/lotus-shed/pruning.go +++ b/cmd/lotus-shed/pruning.go @@ -161,7 +161,7 @@ var stateTreePruneCmd = &cli.Command{ if cctx.Bool("only-ds-gc") { fmt.Println("running datastore gc....") for i := 0; i < cctx.Int("gc-count"); i++ { - if err := badgbs.DB.RunValueLogGC(DiscardRatio); err != nil { + if err := badgbs.DB().RunValueLogGC(DiscardRatio); err != nil { return xerrors.Errorf("datastore GC failed: %w", err) } } @@ -208,7 +208,7 @@ var stateTreePruneCmd = &cli.Command{ return nil } - b := badgbs.DB.NewWriteBatch() + b := badgbs.DB().NewWriteBatch() defer b.Cancel() markForRemoval := func(c cid.Cid) error { @@ -249,7 +249,7 @@ var stateTreePruneCmd = &cli.Command{ fmt.Println("running datastore gc....") for i := 0; i < cctx.Int("gc-count"); i++ { - if err := badgbs.DB.RunValueLogGC(DiscardRatio); err != nil { + if err := badgbs.DB().RunValueLogGC(DiscardRatio); err != nil { return xerrors.Errorf("datastore GC failed: %w", err) } } diff --git a/cmd/lotus-shed/sectors.go b/cmd/lotus-shed/sectors.go index cf40e1152d0..726d992c44d 100644 --- a/cmd/lotus-shed/sectors.go +++ b/cmd/lotus-shed/sectors.go @@ -1,8 +1,16 @@ package main import ( + "bytes" + "encoding/base64" "fmt" + "image" + "image/color" + "image/png" + "os" + "sort" "strconv" + "sync" "golang.org/x/xerrors" @@ -10,6 +18,7 @@ import ( "github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" + "github.com/ipfs/go-cid" "github.com/urfave/cli/v2" miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" @@ -18,6 +27,7 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/lib/parmap" ) var sectorsCmd = &cli.Command{ @@ -27,6 +37,7 @@ var sectorsCmd = &cli.Command{ Subcommands: []*cli.Command{ terminateSectorCmd, terminateSectorPenaltyEstimationCmd, + visAllocatedSectorsCmd, }, } @@ -263,3 +274,188 @@ var terminateSectorPenaltyEstimationCmd = &cli.Command{ return nil }, } + +var visAllocatedSectorsCmd = &cli.Command{ + Name: "vis-allocated", + Usage: "Produces a html with visualisation of allocated sectors", + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := lcli.ReqContext(cctx) + var miners []address.Address + if cctx.NArg() == 0 { + miners, err = api.StateListMiners(ctx, types.EmptyTSK) + if err != nil { + return err + } + powCache := make(map[address.Address]types.BigInt) + var lk sync.Mutex + parmap.Par(32, miners, func(a address.Address) { + pow, err := api.StateMinerPower(ctx, a, types.EmptyTSK) + + lk.Lock() + if err == nil { + powCache[a] = pow.MinerPower.QualityAdjPower + } else { + powCache[a] = types.NewInt(0) + } + lk.Unlock() + }) + sort.Slice(miners, func(i, j int) bool { + return powCache[miners[i]].GreaterThan(powCache[miners[j]]) + }) + n := sort.Search(len(miners), func(i int) bool { + pow := powCache[miners[i]] + log.Infof("pow @%d = %s", i, pow) + return pow.IsZero() + }) + miners = miners[:n] + } else { + for _, mS := range cctx.Args().Slice() { + mA, err := address.NewFromString(mS) + if err != nil { + return xerrors.Errorf("parsing address '%s': %w", mS, err) + } + miners = append(miners, mA) + } + } + + pngs := make([][]byte, len(miners)) + for i := 0; i < len(miners); i++ { + func() { + state, err := api.StateReadState(ctx, miners[i], types.EmptyTSK) + if err != nil { + log.Errorf("getting state: %+v", err) + return + } + allocSString := state.State.(map[string]interface{})["AllocatedSectors"].(map[string]interface{})["/"].(string) + + allocCid, err := cid.Decode(allocSString) + if err != nil { + log.Errorf("decoding cid: %+v", err) + return + } + rle, err := api.ChainReadObj(ctx, allocCid) + if err != nil { + log.Errorf("reading AllocatedSectors: %+v", err) + return + } + png, err := rleToPng(rle) + if err != nil { + log.Errorf("converting to png: %+v", err) + return + } + pngs[i] = png + encoded := base64.StdEncoding.EncodeToString(pngs[i]) + fmt.Printf(`%s:

`+"\n", miners[i], encoded) + _ = os.Stdout.Sync() + }() + } + + return nil + }, +} + +func rleToPng(rleBytes []byte) ([]byte, error) { + var bf bitfield.BitField + err := bf.UnmarshalCBOR(bytes.NewReader(rleBytes)) + if err != nil { + return nil, xerrors.Errorf("decoding bitfield: %w", err) + } + { + last, err := bf.Last() + if err != nil { + return nil, xerrors.Errorf("getting last: %w", err) + } + if last == 0 { + return nil, nil + } + } + ri, err := bf.RunIterator() + if err != nil { + return nil, xerrors.Errorf("creating interator: %w", err) + } + + const width = 1024 + const skipTh = 64 + const skipSize = 32 + + var size uint64 + for ri.HasNext() { + run, err := ri.NextRun() + if err != nil { + return nil, xerrors.Errorf("getting next run: %w", err) + } + if run.Len > skipTh*width { + size += run.Len%(2*width) + skipSize*width + } else { + size += run.Len + } + } + + img := image.NewRGBA(image.Rect(0, 0, width, int((size+width-1)/width))) + for i := range img.Pix { + img.Pix[i] = 255 + } + + ri, err = bf.RunIterator() + if err != nil { + return nil, xerrors.Errorf("creating interator: %w", err) + } + + const shade = 15 + idx := uint64(0) + realIdx := uint64(0) + for ri.HasNext() { + run, err := ri.NextRun() + if err != nil { + return nil, xerrors.Errorf("getting next run: %w", err) + } + var cut = false + var oldLen uint64 + if run.Len > skipTh*width { + oldLen = run.Len + run.Len = run.Len%(2*width) + skipSize*width + cut = true + } + for i := uint64(0); i < run.Len; i++ { + col := color.Gray{0} + stripe := (realIdx+i)/width%256 >= 128 + if cut && i > skipSize*width/2 { + stripe = (realIdx+i+(skipSize/2*width))/width%256 >= 128 + } + if !run.Val { + col.Y = 255 + if stripe { + col.Y -= shade + } + } else if stripe { + col.Y += shade + } + img.Set(int((idx+i)%width), int((idx+i)/width), col) + } + if cut { + i := (idx + run.Len/2 + width) &^ (width - 1) + iend := i + width + col := color.RGBA{255, 0, 0, 255} + for ; i < iend; i++ { + img.Set(int(i)%width, int(i)/width, col) + } + realIdx += oldLen + idx += run.Len + } else { + realIdx += run.Len + idx += run.Len + } + } + buf := &bytes.Buffer{} + err = png.Encode(buf, img) + if err != nil { + return nil, xerrors.Errorf("encoding png: %w", err) + } + + return buf.Bytes(), nil +} diff --git a/cmd/lotus-shed/splitstore.go b/cmd/lotus-shed/splitstore.go new file mode 100644 index 00000000000..4f668888eaf --- /dev/null +++ b/cmd/lotus-shed/splitstore.go @@ -0,0 +1,407 @@ +package main + +import ( + "bufio" + "context" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + + "github.com/dgraph-io/badger/v2" + "github.com/urfave/cli/v2" + "go.uber.org/multierr" + "golang.org/x/sync/errgroup" + "golang.org/x/xerrors" + + "go.uber.org/zap" + + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/query" + + lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/node/repo" +) + +var splitstoreCmd = &cli.Command{ + Name: "splitstore", + Description: "splitstore utilities", + Subcommands: []*cli.Command{ + splitstoreRollbackCmd, + splitstoreClearCmd, + splitstoreCheckCmd, + splitstoreInfoCmd, + }, +} + +var splitstoreRollbackCmd = &cli.Command{ + Name: "rollback", + Description: "rollbacks a splitstore installation", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "repo", + Value: "~/.lotus", + }, + &cli.BoolFlag{ + Name: "gc-coldstore", + Usage: "compact and garbage collect the coldstore after copying the hotstore", + }, + &cli.BoolFlag{ + Name: "rewrite-config", + Usage: "rewrite the lotus configuration to disable splitstore", + }, + }, + Action: func(cctx *cli.Context) error { + r, err := repo.NewFS(cctx.String("repo")) + if err != nil { + return xerrors.Errorf("error opening fs repo: %w", err) + } + + exists, err := r.Exists() + if err != nil { + return err + } + if !exists { + return xerrors.Errorf("lotus repo doesn't exist") + } + + lr, err := r.Lock(repo.FullNode) + if err != nil { + return xerrors.Errorf("error locking repo: %w", err) + } + defer lr.Close() //nolint:errcheck + + cfg, err := lr.Config() + if err != nil { + return xerrors.Errorf("error getting config: %w", err) + } + + fncfg, ok := cfg.(*config.FullNode) + if !ok { + return xerrors.Errorf("wrong config type: %T", cfg) + } + + if !fncfg.Chainstore.EnableSplitstore { + return xerrors.Errorf("splitstore is not enabled") + } + + fmt.Println("copying hotstore to coldstore...") + err = copyHotstoreToColdstore(lr, cctx.Bool("gc-coldstore")) + if err != nil { + return xerrors.Errorf("error copying hotstore to coldstore: %w", err) + } + + fmt.Println("clearing splitstore directory...") + err = clearSplitstoreDir(lr) + if err != nil { + return xerrors.Errorf("error clearing splitstore directory: %w", err) + } + + fmt.Println("deleting splitstore directory...") + err = deleteSplitstoreDir(lr) + if err != nil { + log.Warnf("error deleting splitstore directory: %s", err) + } + + fmt.Println("deleting splitstore keys from metadata datastore...") + err = deleteSplitstoreKeys(lr) + if err != nil { + return xerrors.Errorf("error deleting splitstore keys: %w", err) + } + + if cctx.Bool("rewrite-config") { + fmt.Println("disabling splitstore in config...") + err = lr.SetConfig(func(cfg interface{}) { + cfg.(*config.FullNode).Chainstore.EnableSplitstore = false + }) + if err != nil { + return xerrors.Errorf("error disabling splitstore in config: %w", err) + } + } + + fmt.Println("splitstore has been rolled back.") + return nil + }, +} + +var splitstoreClearCmd = &cli.Command{ + Name: "clear", + Description: "clears a splitstore installation for restart from snapshot", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "repo", + Value: "~/.lotus", + }, + &cli.BoolFlag{ + Name: "keys-only", + Usage: "only delete splitstore keys", + }, + }, + Action: func(cctx *cli.Context) error { + r, err := repo.NewFS(cctx.String("repo")) + if err != nil { + return xerrors.Errorf("error opening fs repo: %w", err) + } + + exists, err := r.Exists() + if err != nil { + return err + } + if !exists { + return xerrors.Errorf("lotus repo doesn't exist") + } + + lr, err := r.Lock(repo.FullNode) + if err != nil { + return xerrors.Errorf("error locking repo: %w", err) + } + defer lr.Close() //nolint:errcheck + + cfg, err := lr.Config() + if err != nil { + return xerrors.Errorf("error getting config: %w", err) + } + + fncfg, ok := cfg.(*config.FullNode) + if !ok { + return xerrors.Errorf("wrong config type: %T", cfg) + } + + if !fncfg.Chainstore.EnableSplitstore { + return xerrors.Errorf("splitstore is not enabled") + } + + if !cctx.Bool("keys-only") { + fmt.Println("clearing splitstore directory...") + err = clearSplitstoreDir(lr) + if err != nil { + return xerrors.Errorf("error clearing splitstore directory: %w", err) + } + } + + fmt.Println("deleting splitstore keys from metadata datastore...") + err = deleteSplitstoreKeys(lr) + if err != nil { + return xerrors.Errorf("error deleting splitstore keys: %w", err) + } + + return nil + }, +} + +func copyHotstoreToColdstore(lr repo.LockedRepo, gcColdstore bool) error { + repoPath := lr.Path() + dataPath := filepath.Join(repoPath, "datastore") + coldPath := filepath.Join(dataPath, "chain") + hotPath := filepath.Join(dataPath, "splitstore", "hot.badger") + + blog := &badgerLogger{ + SugaredLogger: log.Desugar().WithOptions(zap.AddCallerSkip(1)).Sugar(), + skip2: log.Desugar().WithOptions(zap.AddCallerSkip(2)).Sugar(), + } + + coldOpts, err := repo.BadgerBlockstoreOptions(repo.UniversalBlockstore, coldPath, false) + if err != nil { + return xerrors.Errorf("error getting coldstore badger options: %w", err) + } + coldOpts.SyncWrites = false + coldOpts.Logger = blog + + hotOpts, err := repo.BadgerBlockstoreOptions(repo.HotBlockstore, hotPath, true) + if err != nil { + return xerrors.Errorf("error getting hotstore badger options: %w", err) + } + hotOpts.Logger = blog + + cold, err := badger.Open(coldOpts.Options) + if err != nil { + return xerrors.Errorf("error opening coldstore: %w", err) + } + defer cold.Close() //nolint + + hot, err := badger.Open(hotOpts.Options) + if err != nil { + return xerrors.Errorf("error opening hotstore: %w", err) + } + defer hot.Close() //nolint + + rd, wr := io.Pipe() + g := new(errgroup.Group) + + g.Go(func() error { + bwr := bufio.NewWriterSize(wr, 64<<20) + + _, err := hot.Backup(bwr, 0) + if err != nil { + _ = wr.CloseWithError(err) + return err + } + + err = bwr.Flush() + if err != nil { + _ = wr.CloseWithError(err) + return err + } + + return wr.Close() + }) + + g.Go(func() error { + err := cold.Load(rd, 1024) + if err != nil { + return err + } + + return cold.Sync() + }) + + err = g.Wait() + if err != nil { + return err + } + + // compact + gc the coldstore if so requested + if gcColdstore { + fmt.Println("compacting coldstore...") + nworkers := runtime.NumCPU() + if nworkers < 2 { + nworkers = 2 + } + + err = cold.Flatten(nworkers) + if err != nil { + return xerrors.Errorf("error compacting coldstore: %w", err) + } + + fmt.Println("garbage collecting coldstore...") + for err == nil { + err = cold.RunValueLogGC(0.0625) + } + + if err != badger.ErrNoRewrite { + return xerrors.Errorf("error garbage collecting coldstore: %w", err) + } + } + + return nil +} + +func deleteSplitstoreDir(lr repo.LockedRepo) error { + path, err := lr.SplitstorePath() + if err != nil { + return xerrors.Errorf("error getting splitstore path: %w", err) + } + + return os.RemoveAll(path) +} + +func clearSplitstoreDir(lr repo.LockedRepo) error { + path, err := lr.SplitstorePath() + if err != nil { + return xerrors.Errorf("error getting splitstore path: %w", err) + } + + entries, err := os.ReadDir(path) + if err != nil { + return xerrors.Errorf("error reading splitstore directory %s: %W", path, err) + } + + var result error + for _, e := range entries { + target := filepath.Join(path, e.Name()) + err = os.RemoveAll(target) + if err != nil { + log.Errorf("error removing %s: %s", target, err) + result = multierr.Append(result, err) + } + } + + return result +} + +func deleteSplitstoreKeys(lr repo.LockedRepo) error { + ds, err := lr.Datastore(context.TODO(), "/metadata") + if err != nil { + return xerrors.Errorf("error opening datastore: %w", err) + } + if closer, ok := ds.(io.Closer); ok { + defer closer.Close() //nolint + } + + var keys []datastore.Key + res, err := ds.Query(query.Query{Prefix: "/splitstore"}) + if err != nil { + return xerrors.Errorf("error querying datastore for splitstore keys: %w", err) + } + + for r := range res.Next() { + if r.Error != nil { + return xerrors.Errorf("datastore query error: %w", r.Error) + } + + keys = append(keys, datastore.NewKey(r.Key)) + } + + for _, k := range keys { + fmt.Printf("deleting %s from datastore...\n", k) + err = ds.Delete(k) + if err != nil { + return xerrors.Errorf("error deleting key %s from datastore: %w", k, err) + } + } + + return nil +} + +// badger logging through go-log +type badgerLogger struct { + *zap.SugaredLogger + skip2 *zap.SugaredLogger +} + +func (b *badgerLogger) Warningf(format string, args ...interface{}) {} +func (b *badgerLogger) Infof(format string, args ...interface{}) {} +func (b *badgerLogger) Debugf(format string, args ...interface{}) {} + +var splitstoreCheckCmd = &cli.Command{ + Name: "check", + Description: "runs a healthcheck on a splitstore installation", + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetFullNodeAPIV1(cctx) + if err != nil { + return err + } + defer closer() + + ctx := lcli.ReqContext(cctx) + return api.ChainCheckBlockstore(ctx) + }, +} + +var splitstoreInfoCmd = &cli.Command{ + Name: "info", + Description: "prints some basic splitstore information", + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetFullNodeAPIV1(cctx) + if err != nil { + return err + } + defer closer() + + ctx := lcli.ReqContext(cctx) + info, err := api.ChainBlockstoreInfo(ctx) + if err != nil { + return err + } + + for k, v := range info { + fmt.Print(k) + fmt.Print(": ") + fmt.Println(v) + } + + return nil + }, +} diff --git a/cmd/lotus-shed/storage-stats.go b/cmd/lotus-shed/storage-stats.go index a40f082be5e..a9a5744a6bd 100644 --- a/cmd/lotus-shed/storage-stats.go +++ b/cmd/lotus-shed/storage-stats.go @@ -2,10 +2,12 @@ package main import ( "encoding/json" + corebig "math/big" "os" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + filbig "github.com/filecoin-project/go-state-types/big" lcli "github.com/filecoin-project/lotus/cli" "github.com/ipfs/go-cid" "github.com/urfave/cli/v2" @@ -21,13 +23,16 @@ type networkTotalsOutput struct { } type networkTotals struct { - UniqueCids int `json:"total_unique_cids"` - UniqueProviders int `json:"total_unique_providers"` - UniqueClients int `json:"total_unique_clients"` - TotalDeals int `json:"total_num_deals"` - TotalBytes int64 `json:"total_stored_data_size"` - FilplusTotalDeals int `json:"filplus_total_num_deals"` - FilplusTotalBytes int64 `json:"filplus_total_stored_data_size"` + QaNetworkPower filbig.Int `json:"total_qa_power"` + RawNetworkPower filbig.Int `json:"total_raw_capacity"` + CapacityCarryingData float64 `json:"capacity_fraction_carrying_data"` + UniqueCids int `json:"total_unique_cids"` + UniqueProviders int `json:"total_unique_providers"` + UniqueClients int `json:"total_unique_clients"` + TotalDeals int `json:"total_num_deals"` + TotalBytes int64 `json:"total_stored_data_size"` + FilplusTotalDeals int `json:"filplus_total_num_deals"` + FilplusTotalBytes int64 `json:"filplus_total_stored_data_size"` seenClient map[address.Address]bool seenProvider map[address.Address]bool @@ -66,10 +71,17 @@ var storageStatsCmd = &cli.Command{ return err } + power, err := api.StateMinerPower(ctx, address.Address{}, head.Key()) + if err != nil { + return err + } + netTotals := networkTotals{ - seenClient: make(map[address.Address]bool), - seenProvider: make(map[address.Address]bool), - seenPieceCid: make(map[cid.Cid]bool), + QaNetworkPower: power.TotalPower.QualityAdjPower, + RawNetworkPower: power.TotalPower.RawBytePower, + seenClient: make(map[address.Address]bool), + seenProvider: make(map[address.Address]bool), + seenPieceCid: make(map[cid.Cid]bool), } deals, err := api.StateMarketDeals(ctx, head.Key()) @@ -103,6 +115,11 @@ var storageStatsCmd = &cli.Command{ netTotals.UniqueClients = len(netTotals.seenClient) netTotals.UniqueProviders = len(netTotals.seenProvider) + netTotals.CapacityCarryingData, _ = new(corebig.Rat).SetFrac( + corebig.NewInt(netTotals.TotalBytes), + netTotals.RawNetworkPower.Int, + ).Float64() + return json.NewEncoder(os.Stdout).Encode( networkTotalsOutput{ Epoch: int64(head.Height()), diff --git a/cmd/lotus-sim/simulation/blockbuilder/blockbuilder.go b/cmd/lotus-sim/simulation/blockbuilder/blockbuilder.go index 2ffc0bf140b..bd757614bbc 100644 --- a/cmd/lotus-sim/simulation/blockbuilder/blockbuilder.go +++ b/cmd/lotus-sim/simulation/blockbuilder/blockbuilder.go @@ -271,7 +271,7 @@ func (bb *BlockBuilder) StateManager() *stmgr.StateManager { } // ActorsVersion returns the actors version for the target block. -func (bb *BlockBuilder) ActorsVersion() actors.Version { +func (bb *BlockBuilder) ActorsVersion() (actors.Version, error) { return actors.VersionForNetwork(bb.NetworkVersion()) } diff --git a/cmd/lotus-sim/simulation/stages/funding_stage.go b/cmd/lotus-sim/simulation/stages/funding_stage.go index f57f852931c..faec6a504ef 100644 --- a/cmd/lotus-sim/simulation/stages/funding_stage.go +++ b/cmd/lotus-sim/simulation/stages/funding_stage.go @@ -145,7 +145,10 @@ func (fs *FundingStage) PackMessages(ctx context.Context, bb *blockbuilder.Block store := bb.ActorStore() epoch := bb.Height() - actorsVersion := bb.ActorsVersion() + actorsVersion, err := bb.ActorsVersion() + if err != nil { + return err + } var accounts, multisigs int defer func() { diff --git a/cmd/lotus-sim/simulation/stages/provecommit_stage.go b/cmd/lotus-sim/simulation/stages/provecommit_stage.go index 6cbca7de9fb..8b12fc68aa8 100644 --- a/cmd/lotus-sim/simulation/stages/provecommit_stage.go +++ b/cmd/lotus-sim/simulation/stages/provecommit_stage.go @@ -280,7 +280,11 @@ func (stage *ProveCommitStage) packProveCommitsMiner( // It will drop any pre-commits that have already expired. func (stage *ProveCommitStage) loadMiner(ctx context.Context, bb *blockbuilder.BlockBuilder, addr address.Address) error { epoch := bb.Height() - av := bb.ActorsVersion() + av, err := bb.ActorsVersion() + if err != nil { + return err + } + minerState, err := loadMiner(bb.ActorStore(), bb.ParentStateTree(), addr) if err != nil { return err @@ -291,7 +295,10 @@ func (stage *ProveCommitStage) loadMiner(ctx context.Context, bb *blockbuilder.B var total, dropped int err = minerState.ForEachPrecommittedSector(func(info miner.SectorPreCommitOnChainInfo) error { total++ - msd := policy.GetMaxProveCommitDuration(av, info.Info.SealProof) + msd, err := policy.GetMaxProveCommitDuration(av, info.Info.SealProof) + if err != nil { + return err + } if epoch > info.PreCommitEpoch+msd { dropped++ return nil @@ -327,7 +334,10 @@ func (stage *ProveCommitStage) filterProveCommits( } nextEpoch := bb.Height() - av := bb.ActorsVersion() + av, err := bb.ActorsVersion() + if err != nil { + return nil, err + } good := make([]abi.SectorNumber, 0, len(snos)) for _, sno := range snos { @@ -338,7 +348,10 @@ func (stage *ProveCommitStage) filterProveCommits( if info == nil { continue } - msd := policy.GetMaxProveCommitDuration(av, info.Info.SealProof) + msd, err := policy.GetMaxProveCommitDuration(av, info.Info.SealProof) + if err != nil { + return nil, err + } if nextEpoch > info.PreCommitEpoch+msd { continue } diff --git a/cmd/lotus-storage-miner/config.go b/cmd/lotus-storage-miner/config.go deleted file mode 100644 index e5e4fc4c44e..00000000000 --- a/cmd/lotus-storage-miner/config.go +++ /dev/null @@ -1,22 +0,0 @@ -package main - -import ( - "fmt" - - "github.com/urfave/cli/v2" - - "github.com/filecoin-project/lotus/node/config" -) - -var configCmd = &cli.Command{ - Name: "config", - Usage: "Output default configuration", - Action: func(cctx *cli.Context) error { - comm, err := config.ConfigComment(config.DefaultStorageMiner()) - if err != nil { - return err - } - fmt.Println(string(comm)) - return nil - }, -} diff --git a/cmd/lotus-storage-miner/init_restore.go b/cmd/lotus-storage-miner/init_restore.go deleted file mode 100644 index b495e1cd96a..00000000000 --- a/cmd/lotus-storage-miner/init_restore.go +++ /dev/null @@ -1,282 +0,0 @@ -package main - -import ( - "context" - "encoding/json" - "io/ioutil" - "os" - - "github.com/filecoin-project/lotus/api/v0api" - - "github.com/docker/go-units" - "github.com/ipfs/go-datastore" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/mitchellh/go-homedir" - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - "gopkg.in/cheggaaa/pb.v1" - - "github.com/filecoin-project/go-address" - paramfetch "github.com/filecoin-project/go-paramfetch" - "github.com/filecoin-project/go-state-types/big" - - lapi "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/types" - lcli "github.com/filecoin-project/lotus/cli" - "github.com/filecoin-project/lotus/extern/sector-storage/stores" - "github.com/filecoin-project/lotus/lib/backupds" - "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/lotus/node/repo" -) - -var initRestoreCmd = &cli.Command{ - Name: "restore", - Usage: "Initialize a lotus miner repo from a backup", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "nosync", - Usage: "don't check full-node sync status", - }, - &cli.StringFlag{ - Name: "config", - Usage: "config file (config.toml)", - }, - &cli.StringFlag{ - Name: "storage-config", - Usage: "storage paths config (storage.json)", - }, - }, - ArgsUsage: "[backupFile]", - Action: func(cctx *cli.Context) error { - log.Info("Initializing lotus miner using a backup") - if cctx.Args().Len() != 1 { - return xerrors.Errorf("expected 1 argument") - } - - ctx := lcli.ReqContext(cctx) - - log.Info("Trying to connect to full node RPC") - - if err := checkV1ApiSupport(ctx, cctx); err != nil { - return err - } - - api, closer, err := lcli.GetFullNodeAPIV1(cctx) // TODO: consider storing full node address in config - if err != nil { - return err - } - defer closer() - - log.Info("Checking full node version") - - v, err := api.Version(ctx) - if err != nil { - return err - } - - if !v.APIVersion.EqMajorMinor(lapi.FullAPIVersion1) { - return xerrors.Errorf("Remote API version didn't match (expected %s, remote %s)", lapi.FullAPIVersion1, v.APIVersion) - } - - if !cctx.Bool("nosync") { - if err := lcli.SyncWait(ctx, &v0api.WrapperV1Full{FullNode: api}, false); err != nil { - return xerrors.Errorf("sync wait: %w", err) - } - } - - bf, err := homedir.Expand(cctx.Args().First()) - if err != nil { - return xerrors.Errorf("expand backup file path: %w", err) - } - - st, err := os.Stat(bf) - if err != nil { - return xerrors.Errorf("stat backup file (%s): %w", bf, err) - } - - f, err := os.Open(bf) - if err != nil { - return xerrors.Errorf("opening backup file: %w", err) - } - defer f.Close() // nolint:errcheck - - log.Info("Checking if repo exists") - - repoPath := cctx.String(FlagMinerRepo) - r, err := repo.NewFS(repoPath) - if err != nil { - return err - } - - ok, err := r.Exists() - if err != nil { - return err - } - if ok { - return xerrors.Errorf("repo at '%s' is already initialized", cctx.String(FlagMinerRepo)) - } - - log.Info("Initializing repo") - - if err := r.Init(repo.StorageMiner); err != nil { - return err - } - - lr, err := r.Lock(repo.StorageMiner) - if err != nil { - return err - } - defer lr.Close() //nolint:errcheck - - if cctx.IsSet("config") { - log.Info("Restoring config") - - cf, err := homedir.Expand(cctx.String("config")) - if err != nil { - return xerrors.Errorf("expanding config path: %w", err) - } - - _, err = os.Stat(cf) - if err != nil { - return xerrors.Errorf("stat config file (%s): %w", cf, err) - } - - var cerr error - err = lr.SetConfig(func(raw interface{}) { - rcfg, ok := raw.(*config.StorageMiner) - if !ok { - cerr = xerrors.New("expected miner config") - return - } - - ff, err := config.FromFile(cf, rcfg) - if err != nil { - cerr = xerrors.Errorf("loading config: %w", err) - return - } - - *rcfg = *ff.(*config.StorageMiner) - }) - if cerr != nil { - return cerr - } - if err != nil { - return xerrors.Errorf("setting config: %w", err) - } - - } else { - log.Warn("--config NOT SET, WILL USE DEFAULT VALUES") - } - - if cctx.IsSet("storage-config") { - log.Info("Restoring storage path config") - - cf, err := homedir.Expand(cctx.String("storage-config")) - if err != nil { - return xerrors.Errorf("expanding storage config path: %w", err) - } - - cfb, err := ioutil.ReadFile(cf) - if err != nil { - return xerrors.Errorf("reading storage config: %w", err) - } - - var cerr error - err = lr.SetStorage(func(scfg *stores.StorageConfig) { - cerr = json.Unmarshal(cfb, scfg) - }) - if cerr != nil { - return xerrors.Errorf("unmarshalling storage config: %w", cerr) - } - if err != nil { - return xerrors.Errorf("setting storage config: %w", err) - } - } else { - log.Warn("--storage-config NOT SET. NO SECTOR PATHS WILL BE CONFIGURED") - } - - log.Info("Restoring metadata backup") - - mds, err := lr.Datastore(context.TODO(), "/metadata") - if err != nil { - return err - } - - bar := pb.New64(st.Size()) - br := bar.NewProxyReader(f) - bar.ShowTimeLeft = true - bar.ShowPercent = true - bar.ShowSpeed = true - bar.Units = pb.U_BYTES - - bar.Start() - err = backupds.RestoreInto(br, mds) - bar.Finish() - - if err != nil { - return xerrors.Errorf("restoring metadata: %w", err) - } - - log.Info("Checking actor metadata") - - abytes, err := mds.Get(datastore.NewKey("miner-address")) - if err != nil { - return xerrors.Errorf("getting actor address from metadata datastore: %w", err) - } - - maddr, err := address.NewFromBytes(abytes) - if err != nil { - return xerrors.Errorf("parsing actor address: %w", err) - } - - log.Info("ACTOR ADDRESS: ", maddr.String()) - - mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting miner info: %w", err) - } - - log.Info("SECTOR SIZE: ", units.BytesSize(float64(mi.SectorSize))) - - wk, err := api.StateAccountKey(ctx, mi.Worker, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("resolving worker key: %w", err) - } - - has, err := api.WalletHas(ctx, wk) - if err != nil { - return xerrors.Errorf("checking worker address: %w", err) - } - - if !has { - return xerrors.Errorf("worker address %s for miner actor %s not present in full node wallet", mi.Worker, maddr) - } - - log.Info("Checking proof parameters") - - if err := paramfetch.GetParams(ctx, build.ParametersJSON(), build.SrsJSON(), uint64(mi.SectorSize)); err != nil { - return xerrors.Errorf("fetching proof parameters: %w", err) - } - - log.Info("Initializing libp2p identity") - - p2pSk, err := makeHostKey(lr) - if err != nil { - return xerrors.Errorf("make host key: %w", err) - } - - peerid, err := peer.IDFromPrivateKey(p2pSk) - if err != nil { - return xerrors.Errorf("peer ID from private key: %w", err) - } - - log.Info("Configuring miner actor") - - if err := configureStorageMiner(ctx, api, maddr, peerid, big.Zero()); err != nil { - return err - } - - return nil - }, -} diff --git a/cmd/lotus/config.go b/cmd/lotus/config.go new file mode 100644 index 00000000000..fcb7e2b08f7 --- /dev/null +++ b/cmd/lotus/config.go @@ -0,0 +1,94 @@ +package main + +import ( + "fmt" + + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/node/repo" +) + +var configCmd = &cli.Command{ + Name: "config", + Usage: "Manage node config", + Subcommands: []*cli.Command{ + configDefaultCmd, + configUpdateCmd, + }, +} + +var configDefaultCmd = &cli.Command{ + Name: "default", + Usage: "Print default node config", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "no-comment", + Usage: "don't comment default values", + }, + }, + Action: func(cctx *cli.Context) error { + c := config.DefaultFullNode() + + cb, err := config.ConfigUpdate(c, nil, !cctx.Bool("no-comment")) + if err != nil { + return err + } + + fmt.Println(string(cb)) + + return nil + }, +} + +var configUpdateCmd = &cli.Command{ + Name: "updated", + Usage: "Print updated node config", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "no-comment", + Usage: "don't comment default values", + }, + }, + Action: func(cctx *cli.Context) error { + r, err := repo.NewFS(cctx.String("repo")) + if err != nil { + return err + } + + ok, err := r.Exists() + if err != nil { + return err + } + + if !ok { + return xerrors.Errorf("repo not initialized") + } + + lr, err := r.LockRO(repo.FullNode) + if err != nil { + return xerrors.Errorf("locking repo: %w", err) + } + + cfgNode, err := lr.Config() + if err != nil { + _ = lr.Close() + return xerrors.Errorf("getting node config: %w", err) + } + + if err := lr.Close(); err != nil { + return err + } + + cfgDef := config.DefaultFullNode() + + updated, err := config.ConfigUpdate(cfgNode, cfgDef, !cctx.Bool("no-comment")) + if err != nil { + return err + } + + fmt.Print(string(updated)) + return nil + }, +} diff --git a/cmd/lotus/daemon.go b/cmd/lotus/daemon.go index 0504c341863..0d5961aaea3 100644 --- a/cmd/lotus/daemon.go +++ b/cmd/lotus/daemon.go @@ -314,7 +314,7 @@ var DaemonCmd = &cli.Command{ stop, err := node.New(ctx, node.FullAPI(&api, node.Lite(isLite)), - node.Online(), + node.Base(), node.Repo(r), node.Override(new(dtypes.Bootstrapper), isBootstrapper), diff --git a/cmd/lotus/main.go b/cmd/lotus/main.go index 63d01f89162..66eae0f1e81 100644 --- a/cmd/lotus/main.go +++ b/cmd/lotus/main.go @@ -12,6 +12,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" lcli "github.com/filecoin-project/lotus/cli" + cliutil "github.com/filecoin-project/lotus/cli/util" "github.com/filecoin-project/lotus/lib/lotuslog" "github.com/filecoin-project/lotus/lib/tracing" "github.com/filecoin-project/lotus/node/repo" @@ -29,6 +30,7 @@ func main() { local := []*cli.Command{ DaemonCmd, backupCmd, + configCmd, } if AdvanceBlockCmd != nil { local = append(local, AdvanceBlockCmd) @@ -80,6 +82,7 @@ func main() { Name: "force-send", Usage: "if true, will ignore pre-send checks", }, + cliutil.FlagVeryVerbose, }, Commands: append(local, lcli.Commands...), diff --git a/cmd/tvx/extract_message.go b/cmd/tvx/extract_message.go index 8e993cbd369..71035867f29 100644 --- a/cmd/tvx/extract_message.go +++ b/cmd/tvx/extract_message.go @@ -337,6 +337,9 @@ func resolveFromChain(ctx context.Context, api v0api.FullNode, mcid cid.Cid, blo if err != nil { return nil, nil, nil, fmt.Errorf("failed to locate message: %w", err) } + if msgInfo == nil { + return nil, nil, nil, fmt.Errorf("failed to locate message: not found") + } log.Printf("located message at tipset %s (height: %d) with exit code: %s", msgInfo.TipSet, msgInfo.Height, msgInfo.Receipt.ExitCode) diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 00000000000..b962d5cc2b8 --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,145 @@ +# By default, this docker-compose file will start a lotus fullnode +# +# Some directives have been left commented out so they serve as an +# example for more advanced use. +# +# To provide a custom configuration file, or automatically import +# a wallet, uncomment the "configs" or "secrets" sections. +# +# start on a single node: +# +# docker-compose up +# +# start on docker swarm: +# +# docker swarm init (if you haven't already) +# docker stack deploy -c docker-compose.yaml mylotuscluster +# +# for more information, please visit docs.filecoin.io + +version: "3.8" + +volumes: + parameters: + lotus-repo: + lotus-miner-repo: + lotus-worker-repo: + +configs: + lotus-config-toml: + file: /path/to/lotus/config.toml + lotus-miner-config-toml: + file: /path/to/lotus-miner/config.toml + +secrets: + lotus-wallet: + file: /path/to/exported/lotus/wallet + +services: + lotus: + build: + context: . + target: lotus + dockerfile: Dockerfile.lotus + image: filecoin/lotus + volumes: + - parameters:/var/tmp/filecoin-proof-parameters + - lotus-repo:/var/lib/lotus + ports: + - 1234:1234 + environment: + - LOTUS_JAEGER_AGENT_HOST=jaeger + - LOTUS_JAEGER_AGENT_PORT=6831 + # - DOCKER_LOTUS_IMPORT_WALLET=/tmp/wallet + deploy: + restart_policy: + condition: on-failure + delay: 30s + # configs: + # - source: lotus-config-toml + # target: /var/lib/lotus/config.toml + # secrets: + # - source: lotus-wallet + # target: /tmp/wallet + command: + - daemon + lotus-gateway: + build: + context: . + target: lotus-gateway + dockerfile: Dockerfile.lotus + image: filecoin/lotus-gateway + depends_on: + - lotus + ports: + - 1235:1234 + environment: + - FULLNODE_API_INFO=/dns/lotus/tcp/1234/http + - LOTUS_JAEGER_AGENT_HOST=jaeger + - LOTUS_JAEGER_AGENT_PORT=6831 + deploy: + restart_policy: + condition: on-failure + delay: 30s + command: + - run + # + # Uncomment to run miner software + # + # lotus-miner: + # build: + # context: . + # target: lotus-miner + # dockerfile: Dockerfile.lotus + # image: filecoin/lotus-miner + # volumes: + # - parameters:/var/tmp/filecoin-proof-parameters + # - lotus-miner-repo:/var/lib/lotus-miner + # depends_on: + # - lotus + # ports: + # - 2345:2345 + # environment: + # - FULLNODE_API_INFO=/dns/lotus/tcp/1234/http + # - LOTUS_JAEGER_AGENT_HOST=jaeger + # - LOTUS_JAEGER_AGENT_PORT=6831 + # deploy: + # restart_policy: + # condition: on-failure + # delay: 30s + # configs: + # - source: lotus-miner-config-toml + # - target: /var/lib/lotus-miner/config.toml + # command: + # - run + # lotus-worker: + # build: + # context: . + # target: lotus-worker + # dockerfile: Dockerfile.lotus + # image: filecoin/lotus-worker + # volumes: + # - parameters:/var/tmp/filecoin-proof-parameters + # - lotus-worker-repo:/var/lib/lotus-worker + # depends_on: + # - lotus-worker + # environment: + # - MINER_API_INFO=/dns/lotus-miner/tcp/1234/http + # - LOTUS_JAEGER_AGENT_HOST=jaeger + # - LOTUS_JAEGER_AGENT_PORT=6831 + # deploy: + # restart_policy: + # condition: on-failure + # delay: 30s + # replicas: 2 + # command: + # - run + jaeger: + image: jaegertracing/all-in-one + ports: + - "6831:6831/udp" + - "16686:16686" + deploy: + restart_policy: + condition: on-failure + delay: 30s diff --git a/documentation/en/api-v0-methods-miner.md b/documentation/en/api-v0-methods-miner.md index 496f63a0851..86cf62bbcd8 100644 --- a/documentation/en/api-v0-methods-miner.md +++ b/documentation/en/api-v0-methods-miner.md @@ -94,10 +94,13 @@ * [ReturnSealPreCommit1](#ReturnSealPreCommit1) * [ReturnSealPreCommit2](#ReturnSealPreCommit2) * [ReturnUnsealPiece](#ReturnUnsealPiece) +* [Runtime](#Runtime) + * [RuntimeSubsystems](#RuntimeSubsystems) * [Sealing](#Sealing) * [SealingAbort](#SealingAbort) * [SealingSchedDiag](#SealingSchedDiag) * [Sector](#Sector) + * [SectorAddPieceToAny](#SectorAddPieceToAny) * [SectorCommitFlush](#SectorCommitFlush) * [SectorCommitPending](#SectorCommitPending) * [SectorGetExpectedSealDuration](#SectorGetExpectedSealDuration) @@ -118,6 +121,7 @@ * [SectorsRefs](#SectorsRefs) * [SectorsStatus](#SectorsStatus) * [SectorsSummary](#SectorsSummary) + * [SectorsUnsealPiece](#SectorsUnsealPiece) * [SectorsUpdate](#SectorsUpdate) * [Storage](#Storage) * [StorageAddLocal](#StorageAddLocal) @@ -227,6 +231,7 @@ Response: "PreCommitControl": null, "CommitControl": null, "TerminateControl": null, + "DealPublishControl": null, "DisableOwnerFallback": true, "DisableWorkerFallback": true } @@ -1519,6 +1524,28 @@ Inputs: Response: `{}` +## Runtime + + +### RuntimeSubsystems +RuntimeSubsystems returns the subsystems that are enabled +in this instance. + + +Perms: read + +Inputs: `null` + +Response: +```json +[ + "Mining", + "Sealing", + "SectorStorage", + "Markets" +] +``` + ## Sealing @@ -1560,6 +1587,54 @@ Response: `{}` ## Sector +### SectorAddPieceToAny +Add piece to an open sector. If no sectors with enough space are open, +either a new sector will be created, or this call will block until more +sectors can be created. + + +Perms: admin + +Inputs: +```json +[ + 1024, + {}, + { + "PublishCid": null, + "DealID": 5432, + "DealProposal": { + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceSize": 1032, + "VerifiedDeal": true, + "Client": "f01234", + "Provider": "f01234", + "Label": "string value", + "StartEpoch": 10101, + "EndEpoch": 10101, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "DealSchedule": { + "StartEpoch": 10101, + "EndEpoch": 10101 + }, + "KeepUnsealed": true + } +] +``` + +Response: +```json +{ + "Sector": 9, + "Offset": 1032 +} +``` + ### SectorCommitFlush SectorCommitFlush immediately sends a Commit message with sectors aggregated for Commit. Returns null if message wasn't sent @@ -1860,6 +1935,30 @@ Response: } ``` +### SectorsUnsealPiece + + +Perms: admin + +Inputs: +```json +[ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + 1040384, + 1024, + null, + null +] +``` + +Response: `{}` + ### SectorsUpdate diff --git a/documentation/en/api-v0-methods.md b/documentation/en/api-v0-methods.md index 7e7216f166e..4466cde8c88 100644 --- a/documentation/en/api-v0-methods.md +++ b/documentation/en/api-v0-methods.md @@ -17,6 +17,7 @@ * [ChainGetBlockMessages](#ChainGetBlockMessages) * [ChainGetGenesis](#ChainGetGenesis) * [ChainGetMessage](#ChainGetMessage) + * [ChainGetMessagesInTipset](#ChainGetMessagesInTipset) * [ChainGetNode](#ChainGetNode) * [ChainGetParentMessages](#ChainGetParentMessages) * [ChainGetParentReceipts](#ChainGetParentReceipts) @@ -533,6 +534,28 @@ Response: } ``` +### ChainGetMessagesInTipset +ChainGetMessagesInTipset returns message stores in current tipset + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `null` + ### ChainGetNode diff --git a/documentation/en/api-v1-unstable-methods.md b/documentation/en/api-v1-unstable-methods.md index 7619508293a..4d7c4dc3c47 100644 --- a/documentation/en/api-v1-unstable-methods.md +++ b/documentation/en/api-v1-unstable-methods.md @@ -11,12 +11,15 @@ * [Beacon](#Beacon) * [BeaconGetEntry](#BeaconGetEntry) * [Chain](#Chain) + * [ChainBlockstoreInfo](#ChainBlockstoreInfo) + * [ChainCheckBlockstore](#ChainCheckBlockstore) * [ChainDeleteObj](#ChainDeleteObj) * [ChainExport](#ChainExport) * [ChainGetBlock](#ChainGetBlock) * [ChainGetBlockMessages](#ChainGetBlockMessages) * [ChainGetGenesis](#ChainGetGenesis) * [ChainGetMessage](#ChainGetMessage) + * [ChainGetMessagesInTipset](#ChainGetMessagesInTipset) * [ChainGetNode](#ChainGetNode) * [ChainGetParentMessages](#ChainGetParentMessages) * [ChainGetParentReceipts](#ChainGetParentReceipts) @@ -349,6 +352,32 @@ The Chain method group contains methods for interacting with the blockchain, but that do not require any form of state computation. +### ChainBlockstoreInfo +ChainBlockstoreInfo returns some basic information about the blockstore + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "abc": 123 +} +``` + +### ChainCheckBlockstore +ChainCheckBlockstore performs an (asynchronous) health check on the chain/state blockstore +if supported by the underlying implementation. + + +Perms: admin + +Inputs: `null` + +Response: `{}` + ### ChainDeleteObj ChainDeleteObj deletes node referenced by the given CID @@ -535,6 +564,28 @@ Response: } ``` +### ChainGetMessagesInTipset +ChainGetMessagesInTipset returns message stores in current tipset + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `null` + ### ChainGetNode diff --git a/documentation/en/cli-lotus-miner.md b/documentation/en/cli-lotus-miner.md index b3dcebaae96..a292f6926d3 100644 --- a/documentation/en/cli-lotus-miner.md +++ b/documentation/en/cli-lotus-miner.md @@ -7,13 +7,13 @@ USAGE: lotus-miner [global options] command [command options] [arguments...] VERSION: - 1.11.0 + 1.11.1 COMMANDS: init Initialize a lotus miner repo run Start a lotus miner process stop Stop a running lotus miner - config Output default configuration + config Manage node config backup Create node metadata backup version Print version help, h Shows a list of commands or help for one command @@ -41,8 +41,11 @@ COMMANDS: GLOBAL OPTIONS: --actor value, -a value specify other actor to check state for (read only) - --color (default: false) - --miner-repo value, --storagerepo value Specify miner repo path. flag(storagerepo) and env(LOTUS_STORAGE_PATH) are DEPRECATION, will REMOVE SOON (default: "/Users/jennijuju/.lotusminer") [$LOTUS_MINER_PATH, $LOTUS_STORAGE_PATH] + --color use color in display output (default: depends on output being a TTY) + --miner-repo value, --storagerepo value Specify miner repo path. flag(storagerepo) and env(LOTUS_STORAGE_PATH) are DEPRECATION, will REMOVE SOON (default: "~/.lotusminer") [$LOTUS_MINER_PATH, $LOTUS_STORAGE_PATH] + --markets-repo value Markets repo path [$LOTUS_MARKETS_PATH] + --call-on-markets (experimental; may be removed) call this command against a markets node; use only with common commands like net, auth, pprof, etc. whose target may be ambiguous (default: false) + --vv enables very verbose mode, useful for debugging the CLI (default: false) --help, -h show help (default: false) --version, -v print the version (default: false) ``` @@ -57,6 +60,7 @@ USAGE: COMMANDS: restore Initialize a lotus miner repo from a backup + service Initialize a lotus miner sub-service help, h Shows a list of commands or help for one command OPTIONS: @@ -93,6 +97,24 @@ OPTIONS: ``` +### lotus-miner init service +``` +NAME: + lotus-miner init service - Initialize a lotus miner sub-service + +USAGE: + lotus-miner init service [command options] [backupFile] + +OPTIONS: + --config value config file (config.toml) + --nosync don't check full-node sync status (default: false) + --type value type of service to be enabled + --api-sealer value sealer API info (lotus-miner auth api-info --perm=admin) + --api-sector-index value sector Index API info (lotus-miner auth api-info --perm=admin) + --help, -h show help (default: false) + +``` + ## lotus-miner run ``` NAME: @@ -126,13 +148,47 @@ OPTIONS: ## lotus-miner config ``` NAME: - lotus-miner config - Output default configuration + lotus-miner config - Manage node config USAGE: - lotus-miner config [command options] [arguments...] + lotus-miner config command [command options] [arguments...] + +COMMANDS: + default Print default node config + updated Print updated node config + help, h Shows a list of commands or help for one command OPTIONS: - --help, -h show help (default: false) + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus-miner config default +``` +NAME: + lotus-miner config default - Print default node config + +USAGE: + lotus-miner config default [command options] [arguments...] + +OPTIONS: + --no-comment don't comment default values (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner config updated +``` +NAME: + lotus-miner config updated - Print updated node config + +USAGE: + lotus-miner config updated [command options] [arguments...] + +OPTIONS: + --no-comment don't comment default values (default: false) + --help, -h show help (default: false) ``` @@ -188,6 +244,7 @@ COMMANDS: control Manage control addresses propose-change-worker Propose a worker address change confirm-change-worker Confirm a worker address change + compact-allocated compact allocated sectors bitfield help, h Shows a list of commands or help for one command OPTIONS: @@ -295,7 +352,7 @@ USAGE: OPTIONS: --verbose (default: false) - --color (default: true) + --color use color in display output (default: depends on output being a TTY) --help, -h show help (default: false) ``` @@ -342,6 +399,22 @@ OPTIONS: ``` +### lotus-miner actor compact-allocated +``` +NAME: + lotus-miner actor compact-allocated - compact allocated sectors bitfield + +USAGE: + lotus-miner actor compact-allocated [command options] [arguments...] + +OPTIONS: + --mask-last-offset value Mask sector IDs from 0 to 'higest_allocated - offset' (default: 0) + --mask-upto-n value Mask sector IDs from 0 to 'n' (default: 0) + --really-do-it Actually send transaction performing the action (default: false) + --help, -h show help (default: false) + +``` + ## lotus-miner info ``` NAME: @@ -888,7 +961,7 @@ USAGE: OPTIONS: --verbose, -v print verbose transfer details (default: false) - --color use color in display output (default: true) + --color use color in display output (default: depends on output being a TTY) --completed show completed data transfers (default: false) --watch watch deal updates in real-time, rather than a one time list (default: false) --show-failed show failed/cancelled transfers (default: false) @@ -1344,7 +1417,7 @@ USAGE: OPTIONS: --show-removed show removed sectors (default: false) - --color, -c (default: true) + --color, -c use color in display output (default: depends on output being a TTY) --fast don't show on-chain info for better performance (default: false) --events display number of events the sector has received (default: false) --seal-time display how long it took for the sector to be sealed (default: false) @@ -1405,6 +1478,7 @@ OPTIONS: --new-expiration value new expiration epoch (default: 0) --v1-sectors renews all v1 sectors up to the maximum possible lifetime (default: false) --tolerance value when extending v1 sectors, don't try to extend sectors by fewer than this number of epochs (default: 20160) + --expiration-ignore value when extending v1 sectors, skip sectors whose current expiration is less than epochs from now (default: 120) --expiration-cutoff value when extending v1 sectors, skip sectors whose current expiration is more than epochs from now (infinity if unspecified) (default: 0) --help, -h show help (default: false) @@ -1739,7 +1813,7 @@ COMMANDS: help, h Shows a list of commands or help for one command OPTIONS: - --color (default: false) + --color use color in display output (default: depends on output being a TTY) --help, -h show help (default: false) --version, -v print the version (default: false) @@ -1754,7 +1828,7 @@ USAGE: lotus-miner storage list sectors [command options] [arguments...] OPTIONS: - --color (default: true) + --color use color in display output (default: depends on output being a TTY) --help, -h show help (default: false) ``` @@ -1816,7 +1890,7 @@ USAGE: lotus-miner sealing jobs [command options] [arguments...] OPTIONS: - --color (default: false) + --color use color in display output (default: depends on output being a TTY) --show-ret-done show returned but not consumed calls (default: false) --help, -h show help (default: false) @@ -1831,7 +1905,7 @@ USAGE: lotus-miner sealing workers [command options] [arguments...] OPTIONS: - --color (default: false) + --color use color in display output (default: depends on output being a TTY) --help, -h show help (default: false) ``` diff --git a/documentation/en/cli-lotus-worker.md b/documentation/en/cli-lotus-worker.md index a983a596cb9..177c45b92a3 100644 --- a/documentation/en/cli-lotus-worker.md +++ b/documentation/en/cli-lotus-worker.md @@ -7,7 +7,7 @@ USAGE: lotus-worker [global options] command [command options] [arguments...] VERSION: - 1.11.0 + 1.11.1 COMMANDS: run Start lotus worker @@ -20,7 +20,7 @@ COMMANDS: GLOBAL OPTIONS: --worker-repo value, --workerrepo value Specify worker repo path. flag workerrepo and env WORKER_PATH are DEPRECATION, will REMOVE SOON (default: "~/.lotusworker") [$LOTUS_WORKER_PATH, $WORKER_PATH] - --miner-repo value, --storagerepo value Specify miner repo path. flag storagerepo and env LOTUS_STORAGE_PATH are DEPRECATION, will REMOVE SOON (default: "/Users/jennijuju/.lotusminer") [$LOTUS_MINER_PATH, $LOTUS_STORAGE_PATH] + --miner-repo value, --storagerepo value Specify miner repo path. flag storagerepo and env LOTUS_STORAGE_PATH are DEPRECATION, will REMOVE SOON (default: "~/.lotusminer") [$LOTUS_MINER_PATH, $LOTUS_STORAGE_PATH] --enable-gpu-proving enable use of GPU for mining operations (default: true) --help, -h show help (default: false) --version, -v print the version (default: false) diff --git a/documentation/en/cli-lotus.md b/documentation/en/cli-lotus.md index aeefd8dfa40..a960299ff58 100644 --- a/documentation/en/cli-lotus.md +++ b/documentation/en/cli-lotus.md @@ -7,11 +7,12 @@ USAGE: lotus [global options] command [command options] [arguments...] VERSION: - 1.11.0 + 1.11.1 COMMANDS: daemon Start a lotus daemon process backup Create node metadata backup + config Manage node config version Print version help, h Shows a list of commands or help for one command BASIC: @@ -38,6 +39,7 @@ COMMANDS: GLOBAL OPTIONS: --interactive setting to false will disable interactive functionality of commands (default: false) --force-send if true, will ignore pre-send checks (default: false) + --vv enables very verbose mode, useful for debugging the CLI (default: false) --help, -h show help (default: false) --version, -v print the version (default: false) ``` @@ -108,6 +110,53 @@ OPTIONS: ``` +## lotus config +``` +NAME: + lotus config - Manage node config + +USAGE: + lotus config command [command options] [arguments...] + +COMMANDS: + default Print default node config + updated Print updated node config + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus config default +``` +NAME: + lotus config default - Print default node config + +USAGE: + lotus config default [command options] [arguments...] + +OPTIONS: + --no-comment don't comment default values (default: false) + --help, -h show help (default: false) + +``` + +### lotus config updated +``` +NAME: + lotus config updated - Print updated node config + +USAGE: + lotus config updated [command options] [arguments...] + +OPTIONS: + --no-comment don't comment default values (default: false) + --help, -h show help (default: false) + +``` + ## lotus version ``` NAME: @@ -535,7 +584,7 @@ CATEGORY: OPTIONS: --verbose, -v print verbose deal details (default: false) - --color use color in display output (default: true) + --color use color in display output (default: depends on output being a TTY) --show-failed show failed/failing deals (default: true) --completed show completed retrievals (default: false) --watch watch deal updates in real-time, rather than a one time list (default: false) @@ -609,7 +658,7 @@ CATEGORY: OPTIONS: --verbose, -v print verbose deal details (default: false) - --color use color in display output (default: true) + --color use color in display output (default: depends on output being a TTY) --show-failed show failed/failing deals (default: false) --watch watch deal updates in real-time, rather than a one time list (default: false) --help, -h show help (default: false) @@ -747,7 +796,7 @@ CATEGORY: OPTIONS: --verbose, -v print verbose transfer details (default: false) - --color use color in display output (default: true) + --color use color in display output (default: depends on output being a TTY) --completed show completed data transfers (default: false) --watch watch deal updates in real-time, rather than a one time list (default: false) --show-failed show failed/cancelled transfers (default: false) @@ -1673,7 +1722,7 @@ NAME: lotus state get-actor - Print actor information USAGE: - lotus state get-actor [command options] [actorrAddress] + lotus state get-actor [command options] [actorAddress] OPTIONS: --help, -h show help (default: false) @@ -2140,7 +2189,7 @@ USAGE: lotus chain export [command options] [outputPath] OPTIONS: - --tipset value + --tipset value specify tipset to start the export from (default: "@head") --recent-stateroots value specify the number of recent state roots to include in the export (default: 0) --skip-old-msgs (default: false) --help, -h show help (default: false) diff --git a/documentation/misc/RELEASE_ISSUE_TEMPLATE.md b/documentation/misc/RELEASE_ISSUE_TEMPLATE.md index 0912a8681a6..53cfd041827 100644 --- a/documentation/misc/RELEASE_ISSUE_TEMPLATE.md +++ b/documentation/misc/RELEASE_ISSUE_TEMPLATE.md @@ -43,13 +43,8 @@ Testing an RC: - [ ] **Stage 1 - Internal Testing** - Binaries - [ ] Ensure the RC release has downloadable binaries - - [ ] Validate the binary is able to run on at least one platform - Upgrade our testnet infra - - [ ] 1 bootstrap node - - [ ] 1 miner - - [ ] Scratch nodes - [ ] Wait 24 hours, confirm nodes stay in sync - - [ ] Remaining testnet infra - Upgrade our mainnet infra - [ ] Subset of development full archival nodes - [ ] Subset of bootstrappers (1 per region) @@ -68,24 +63,15 @@ Testing an RC: - [ ] (optional) let a sector go faulty, and see it be recovered - [ ] **Stage 2 - Community Testing** - - [ ] Inform beta miners (@lotus-early-testers-miner in Filecoin Slack #fil-lotus) - - [ ] Ask close ecosystem partners to test their projects (@lotus-early-testers-eco-dev in Filecoin slack #fil-lotus) - - [ ] Powergate - - [ ] Glif - - [ ] Zondax - - [ ] Stats dashboard - - [ ] Community dashboards - - [ ] Infura - - [ ] Sentinel - - [ ] Protofire - - [ ] Fleek + - [ ] Inform beta lotus users (@lotus-early-testers in Filecoin Slack #fil-lotus) + - [ ] **Stage 3 - Community Prod Testing** - [ ] Documentation - [ ] Ensure that [CHANGELOG.md](https://github.com/filecoin-project/lotus/blob/master/CHANGELOG.md) is up to date - [ ] Check if any [config](https://docs.filecoin.io/get-started/lotus/configuration-and-advanced-usage/#configuration) updates are needed - [ ] Invite the wider community through (link to the release issue): - - [ ] Check `Create a discussion for this release` when tagging for the major rcs(new features, hot-fixes) release + - [ ] Check `Create a discussion for this release` when tagging for the major/close-to-final rcs(new features, hot-fixes) release - [ ] Link the disucssion in #fil-lotus on Filecoin slack - [ ] **Stage 4 - Release** @@ -96,11 +82,10 @@ Testing an RC: - [ ] Merge `release-vX.Y.Z` into the `releases` branch. - [ ] Tag this merge commit (on the `releases` branch) with `vX.Y.Z` - [ ] Cut the release [here](https://github.com/filecoin-project/lotus/releases/new?prerelease=true&target=releases). - - [ ] Check `Create a discussion for this release` when tagging the release - [ ] Final announcements - [ ] Update network.filecoin.io for mainnet, calib and nerpa. - - [ ] repost in #fil-lotus in filecoin slack - - [ ] Inform node provides (Protofire, Digital Ocean..) + - [ ] repost in #fil-lotus-announcement in filecoin slack + - [ ] Inform node providers (Protofire, Digital Ocean..) - [ ] **Post-Release** - [ ] Merge the `releases` branch back into `master`, ignoring the changes to `version.go` (keep the `-dev` version from master). Do NOT delete the `releases` branch when doing so! @@ -109,11 +94,7 @@ Testing an RC: ## ❤️ Contributors -< list generated by scripts/mkreleaselog > - -Would you like to contribute to Lotus and don't know how? Well, there are a few places you can get started: - -- TODO +See the final release notes! ## ⁉️ Do you have questions? diff --git a/extern/filecoin-ffi b/extern/filecoin-ffi index d60fc680aa8..a7b3c2e6953 160000 --- a/extern/filecoin-ffi +++ b/extern/filecoin-ffi @@ -1 +1 @@ -Subproject commit d60fc680aa8abeafba698f738fed5b94c9bda33d +Subproject commit a7b3c2e695393fd716e9265ff8cba932a3e38dd4 diff --git a/extern/sector-storage/ffiwrapper/sealer_cgo.go b/extern/sector-storage/ffiwrapper/sealer_cgo.go index 10fcad6fd3d..820c53c4b82 100644 --- a/extern/sector-storage/ffiwrapper/sealer_cgo.go +++ b/extern/sector-storage/ffiwrapper/sealer_cgo.go @@ -11,7 +11,6 @@ import ( "os" "runtime" - "github.com/filecoin-project/lotus/extern/sector-storage/partialfile" "github.com/ipfs/go-cid" "golang.org/x/xerrors" @@ -24,6 +23,7 @@ import ( commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper" "github.com/filecoin-project/go-commp-utils/zerocomm" "github.com/filecoin-project/lotus/extern/sector-storage/fr32" + "github.com/filecoin-project/lotus/extern/sector-storage/partialfile" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) diff --git a/extern/sector-storage/ffiwrapper/unseal_ranges.go b/extern/sector-storage/ffiwrapper/unseal_ranges.go index bc39abde229..3a13c73a74a 100644 --- a/extern/sector-storage/ffiwrapper/unseal_ranges.go +++ b/extern/sector-storage/ffiwrapper/unseal_ranges.go @@ -1,13 +1,13 @@ package ffiwrapper import ( - "github.com/filecoin-project/lotus/extern/sector-storage/partialfile" "golang.org/x/xerrors" rlepluslazy "github.com/filecoin-project/go-bitfield/rle" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/extern/sector-storage/partialfile" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) diff --git a/extern/sector-storage/manager.go b/extern/sector-storage/manager.go index 79eca74d68c..bf676bffaa1 100644 --- a/extern/sector-storage/manager.go +++ b/extern/sector-storage/manager.go @@ -29,8 +29,6 @@ var log = logging.Logger("advmgr") var ErrNoWorkers = errors.New("no suitable workers found") -type URLs []string - type Worker interface { storiface.WorkerCalls diff --git a/extern/sector-storage/mock/mock.go b/extern/sector-storage/mock/mock.go index 40db3999f34..273f0928e41 100644 --- a/extern/sector-storage/mock/mock.go +++ b/extern/sector-storage/mock/mock.go @@ -75,6 +75,10 @@ func (mgr *SectorMgr) NewSector(ctx context.Context, sector storage.SectorRef) e return nil } +func (mgr *SectorMgr) SectorsUnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd *cid.Cid) error { + panic("SectorMgr: unsealing piece: implement me") +} + func (mgr *SectorMgr) AddPiece(ctx context.Context, sectorID storage.SectorRef, existingPieces []abi.UnpaddedPieceSize, size abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) { log.Warn("Add piece: ", sectorID, size, sectorID.ProofType) @@ -496,10 +500,6 @@ func (mgr *SectorMgr) ReturnFetch(ctx context.Context, callID storiface.CallID, panic("not supported") } -func (mgr *SectorMgr) SectorsUnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd *cid.Cid) error { - return nil -} - func (m mockVerifProver) VerifySeal(svi proof5.SealVerifyInfo) (bool, error) { plen, err := svi.SealProof.ProofSize() if err != nil { diff --git a/extern/sector-storage/partialfile/partialfile.go b/extern/sector-storage/partialfile/partialfile.go index 2ef68de738c..529e889eaf2 100644 --- a/extern/sector-storage/partialfile/partialfile.go +++ b/extern/sector-storage/partialfile/partialfile.go @@ -7,7 +7,6 @@ import ( "syscall" "github.com/detailyang/go-fallocate" - logging "github.com/ipfs/go-log/v2" "golang.org/x/xerrors" rlepluslazy "github.com/filecoin-project/go-bitfield/rle" @@ -15,6 +14,8 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + + logging "github.com/ipfs/go-log/v2" ) var log = logging.Logger("partialfile") diff --git a/extern/sector-storage/stores/http_handler.go b/extern/sector-storage/stores/http_handler.go index dc7797157ae..5b8477fc8da 100644 --- a/extern/sector-storage/stores/http_handler.go +++ b/extern/sector-storage/stores/http_handler.go @@ -7,12 +7,12 @@ import ( "os" "strconv" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/extern/sector-storage/partialfile" "github.com/gorilla/mux" logging "github.com/ipfs/go-log/v2" "golang.org/x/xerrors" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/extern/sector-storage/partialfile" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" "github.com/filecoin-project/lotus/extern/sector-storage/tarutil" @@ -53,11 +53,10 @@ func (handler *FetchHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { mux := mux.NewRouter() mux.HandleFunc("/remote/stat/{id}", handler.remoteStatFs).Methods("GET") + mux.HandleFunc("/remote/{type}/{id}/{spt}/allocated/{offset}/{size}", handler.remoteGetAllocated).Methods("GET") mux.HandleFunc("/remote/{type}/{id}", handler.remoteGetSector).Methods("GET") mux.HandleFunc("/remote/{type}/{id}", handler.remoteDeleteSector).Methods("DELETE") - mux.HandleFunc("/remote/{type}/{id}/{spt}/allocated/{offset}/{size}", handler.remoteGetAllocated).Methods("GET") - mux.ServeHTTP(w, r) } diff --git a/extern/sector-storage/stores/index.go b/extern/sector-storage/stores/index.go index a84adf01606..9fd7f6d7d84 100644 --- a/extern/sector-storage/stores/index.go +++ b/extern/sector-storage/stores/index.go @@ -66,6 +66,8 @@ type SectorIndex interface { // part of storage-miner api // atomically acquire locks on all sector file types. close ctx to unlock StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) + + StorageList(ctx context.Context) (map[ID][]Decl, error) } type Decl struct { diff --git a/extern/sector-storage/stores/local.go b/extern/sector-storage/stores/local.go index 5a10b21b906..cac16013934 100644 --- a/extern/sector-storage/stores/local.go +++ b/extern/sector-storage/stores/local.go @@ -158,6 +158,8 @@ func (p *path) sectorPath(sid abi.SectorID, fileType storiface.SectorFileType) s return filepath.Join(p.local, fileType.String(), storiface.SectorName(sid)) } +type URLs []string + func NewLocal(ctx context.Context, ls LocalStorage, index SectorIndex, urls []string) (*Local, error) { l := &Local{ localStorage: ls, diff --git a/extern/sector-storage/stores/mocks/index.go b/extern/sector-storage/stores/mocks/index.go index e06fa70cccc..59a6017b569 100644 --- a/extern/sector-storage/stores/mocks/index.go +++ b/extern/sector-storage/stores/mocks/index.go @@ -1,7 +1,7 @@ // Code generated by MockGen. DO NOT EDIT. // Source: index.go -// Package mock_stores is a generated GoMock package. +// Package mocks is a generated GoMock package. package mocks import ( @@ -125,6 +125,21 @@ func (mr *MockSectorIndexMockRecorder) StorageInfo(arg0, arg1 interface{}) *gomo return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageInfo", reflect.TypeOf((*MockSectorIndex)(nil).StorageInfo), arg0, arg1) } +// StorageList mocks base method. +func (m *MockSectorIndex) StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StorageList", ctx) + ret0, _ := ret[0].(map[stores.ID][]stores.Decl) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StorageList indicates an expected call of StorageList. +func (mr *MockSectorIndexMockRecorder) StorageList(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageList", reflect.TypeOf((*MockSectorIndex)(nil).StorageList), ctx) +} + // StorageLock mocks base method. func (m *MockSectorIndex) StorageLock(ctx context.Context, sector abi.SectorID, read, write storiface.SectorFileType) error { m.ctrl.T.Helper() diff --git a/extern/sector-storage/stores/mocks/stores.go b/extern/sector-storage/stores/mocks/stores.go index a408419a943..fdfd73a0774 100644 --- a/extern/sector-storage/stores/mocks/stores.go +++ b/extern/sector-storage/stores/mocks/stores.go @@ -1,7 +1,7 @@ // Code generated by MockGen. DO NOT EDIT. // Source: interface.go -// Package mock_stores is a generated GoMock package. +// Package mocks is a generated GoMock package. package mocks import ( diff --git a/extern/sector-storage/stores/remote.go b/extern/sector-storage/stores/remote.go index 1e1a54d47ba..6f8efc03ed6 100644 --- a/extern/sector-storage/stores/remote.go +++ b/extern/sector-storage/stores/remote.go @@ -297,6 +297,32 @@ func (r *Remote) fetch(ctx context.Context, url, outname string) error { } } +func (r *Remote) checkAllocated(ctx context.Context, url string, spt abi.RegisteredSealProof, offset, size abi.PaddedPieceSize) (bool, error) { + url = fmt.Sprintf("%s/%d/allocated/%d/%d", url, spt, offset.Unpadded(), size.Unpadded()) + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return false, xerrors.Errorf("request: %w", err) + } + req.Header = r.auth.Clone() + fmt.Printf("req using header: %#v \n", r.auth) + req = req.WithContext(ctx) + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return false, xerrors.Errorf("do request: %w", err) + } + defer resp.Body.Close() // nolint + + switch resp.StatusCode { + case http.StatusOK: + return true, nil + case http.StatusRequestedRangeNotSatisfiable: + return false, nil + default: + return false, xerrors.Errorf("unexpected http response: %d", resp.StatusCode) + } +} + func (r *Remote) MoveStorage(ctx context.Context, s storage.SectorRef, types storiface.SectorFileType) error { // Make sure we have the data local _, _, err := r.AcquireSector(ctx, s, types, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) @@ -419,31 +445,6 @@ func (r *Remote) FsStat(ctx context.Context, id ID) (fsutil.FsStat, error) { return out, nil } -func (r *Remote) checkAllocated(ctx context.Context, url string, spt abi.RegisteredSealProof, offset, size abi.PaddedPieceSize) (bool, error) { - url = fmt.Sprintf("%s/%d/allocated/%d/%d", url, spt, offset.Unpadded(), size.Unpadded()) - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return false, xerrors.Errorf("request: %w", err) - } - req.Header = r.auth.Clone() - req = req.WithContext(ctx) - - resp, err := http.DefaultClient.Do(req) - if err != nil { - return false, xerrors.Errorf("do request: %w", err) - } - defer resp.Body.Close() // nolint - - switch resp.StatusCode { - case http.StatusOK: - return true, nil - case http.StatusRequestedRangeNotSatisfiable: - return false, nil - default: - return false, xerrors.Errorf("unexpected http response: %d", resp.StatusCode) - } -} - func (r *Remote) readRemote(ctx context.Context, url string, offset, size abi.PaddedPieceSize) (io.ReadCloser, error) { if len(r.limit) >= cap(r.limit) { log.Infof("Throttling remote read, %d already running", len(r.limit)) diff --git a/extern/storage-sealing/cbor_gen.go b/extern/storage-sealing/cbor_gen.go index 9e12b8649e9..b71c2863cff 100644 --- a/extern/storage-sealing/cbor_gen.go +++ b/extern/storage-sealing/cbor_gen.go @@ -8,7 +8,7 @@ import ( "sort" abi "github.com/filecoin-project/go-state-types/abi" - market "github.com/filecoin-project/specs-actors/actors/builtin/market" + api "github.com/filecoin-project/lotus/api" miner "github.com/filecoin-project/specs-actors/actors/builtin/miner" cid "github.com/ipfs/go-cid" cbg "github.com/whyrusleeping/cbor-gen" @@ -46,7 +46,7 @@ func (t *Piece) MarshalCBOR(w io.Writer) error { return err } - // t.DealInfo (sealing.DealInfo) (struct) + // t.DealInfo (api.PieceDealInfo) (struct) if len("DealInfo") > cbg.MaxLength { return xerrors.Errorf("Value in field \"DealInfo\" was too long") } @@ -107,7 +107,7 @@ func (t *Piece) UnmarshalCBOR(r io.Reader) error { } } - // t.DealInfo (sealing.DealInfo) (struct) + // t.DealInfo (api.PieceDealInfo) (struct) case "DealInfo": { @@ -120,7 +120,7 @@ func (t *Piece) UnmarshalCBOR(r io.Reader) error { if err := br.UnreadByte(); err != nil { return err } - t.DealInfo = new(DealInfo) + t.DealInfo = new(api.PieceDealInfo) if err := t.DealInfo.UnmarshalCBOR(br); err != nil { return xerrors.Errorf("unmarshaling t.DealInfo pointer: %w", err) } @@ -136,384 +136,6 @@ func (t *Piece) UnmarshalCBOR(r io.Reader) error { return nil } -func (t *DealInfo) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{165}); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.PublishCid (cid.Cid) (struct) - if len("PublishCid") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"PublishCid\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PublishCid"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("PublishCid")); err != nil { - return err - } - - if t.PublishCid == nil { - if _, err := w.Write(cbg.CborNull); err != nil { - return err - } - } else { - if err := cbg.WriteCidBuf(scratch, w, *t.PublishCid); err != nil { - return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err) - } - } - - // t.DealID (abi.DealID) (uint64) - if len("DealID") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"DealID\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealID"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("DealID")); err != nil { - return err - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { - return err - } - - // t.DealProposal (market.DealProposal) (struct) - if len("DealProposal") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"DealProposal\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealProposal"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("DealProposal")); err != nil { - return err - } - - if err := t.DealProposal.MarshalCBOR(w); err != nil { - return err - } - - // t.DealSchedule (sealing.DealSchedule) (struct) - if len("DealSchedule") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"DealSchedule\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealSchedule"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("DealSchedule")); err != nil { - return err - } - - if err := t.DealSchedule.MarshalCBOR(w); err != nil { - return err - } - - // t.KeepUnsealed (bool) (bool) - if len("KeepUnsealed") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"KeepUnsealed\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("KeepUnsealed"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("KeepUnsealed")); err != nil { - return err - } - - if err := cbg.WriteBool(w, t.KeepUnsealed); err != nil { - return err - } - return nil -} - -func (t *DealInfo) UnmarshalCBOR(r io.Reader) error { - *t = DealInfo{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajMap { - return fmt.Errorf("cbor input should be of type map") - } - - if extra > cbg.MaxLength { - return fmt.Errorf("DealInfo: map struct too large (%d)", extra) - } - - var name string - n := extra - - for i := uint64(0); i < n; i++ { - - { - sval, err := cbg.ReadStringBuf(br, scratch) - if err != nil { - return err - } - - name = string(sval) - } - - switch name { - // t.PublishCid (cid.Cid) (struct) - case "PublishCid": - - { - - b, err := br.ReadByte() - if err != nil { - return err - } - if b != cbg.CborNull[0] { - if err := br.UnreadByte(); err != nil { - return err - } - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err) - } - - t.PublishCid = &c - } - - } - // t.DealID (abi.DealID) (uint64) - case "DealID": - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.DealID = abi.DealID(extra) - - } - // t.DealProposal (market.DealProposal) (struct) - case "DealProposal": - - { - - b, err := br.ReadByte() - if err != nil { - return err - } - if b != cbg.CborNull[0] { - if err := br.UnreadByte(); err != nil { - return err - } - t.DealProposal = new(market.DealProposal) - if err := t.DealProposal.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.DealProposal pointer: %w", err) - } - } - - } - // t.DealSchedule (sealing.DealSchedule) (struct) - case "DealSchedule": - - { - - if err := t.DealSchedule.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.DealSchedule: %w", err) - } - - } - // t.KeepUnsealed (bool) (bool) - case "KeepUnsealed": - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.KeepUnsealed = false - case 21: - t.KeepUnsealed = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } - - default: - // Field doesn't exist on this type, so ignore it - cbg.ScanForLinks(r, func(cid.Cid) {}) - } - } - - return nil -} -func (t *DealSchedule) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{162}); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.StartEpoch (abi.ChainEpoch) (int64) - if len("StartEpoch") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"StartEpoch\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("StartEpoch"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("StartEpoch")); err != nil { - return err - } - - if t.StartEpoch >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.StartEpoch)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.StartEpoch-1)); err != nil { - return err - } - } - - // t.EndEpoch (abi.ChainEpoch) (int64) - if len("EndEpoch") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"EndEpoch\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("EndEpoch"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("EndEpoch")); err != nil { - return err - } - - if t.EndEpoch >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.EndEpoch)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.EndEpoch-1)); err != nil { - return err - } - } - return nil -} - -func (t *DealSchedule) UnmarshalCBOR(r io.Reader) error { - *t = DealSchedule{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajMap { - return fmt.Errorf("cbor input should be of type map") - } - - if extra > cbg.MaxLength { - return fmt.Errorf("DealSchedule: map struct too large (%d)", extra) - } - - var name string - n := extra - - for i := uint64(0); i < n; i++ { - - { - sval, err := cbg.ReadStringBuf(br, scratch) - if err != nil { - return err - } - - name = string(sval) - } - - switch name { - // t.StartEpoch (abi.ChainEpoch) (int64) - case "StartEpoch": - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.StartEpoch = abi.ChainEpoch(extraI) - } - // t.EndEpoch (abi.ChainEpoch) (int64) - case "EndEpoch": - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.EndEpoch = abi.ChainEpoch(extraI) - } - - default: - // Field doesn't exist on this type, so ignore it - cbg.ScanForLinks(r, func(cid.Cid) {}) - } - } - - return nil -} func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if t == nil { _, err := w.Write(cbg.CborNull) diff --git a/extern/storage-sealing/checks.go b/extern/storage-sealing/checks.go index 5ee39e58f6c..5ba23026d04 100644 --- a/extern/storage-sealing/checks.go +++ b/extern/storage-sealing/checks.go @@ -62,7 +62,7 @@ func checkPieces(ctx context.Context, maddr address.Address, si SectorInfo, api } if proposal.PieceCID != p.Piece.PieceCID { - return &ErrInvalidDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers deal %d with wrong PieceCID: %x != %x", i, len(si.Pieces), si.SectorNumber, p.DealInfo.DealID, p.Piece.PieceCID, proposal.PieceCID)} + return &ErrInvalidDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers deal %d with wrong PieceCID: %s != %s", i, len(si.Pieces), si.SectorNumber, p.DealInfo.DealID, p.Piece.PieceCID, proposal.PieceCID)} } if p.Piece.Size != proposal.PieceSize { diff --git a/extern/storage-sealing/commit_batch.go b/extern/storage-sealing/commit_batch.go index 63bd3c7db57..c12a9f9334c 100644 --- a/extern/storage-sealing/commit_batch.go +++ b/extern/storage-sealing/commit_batch.go @@ -7,10 +7,6 @@ import ( "sync" "time" - "github.com/filecoin-project/go-state-types/network" - - "github.com/filecoin-project/lotus/chain/actors" - "github.com/ipfs/go-cid" "golang.org/x/xerrors" @@ -18,13 +14,16 @@ import ( "github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" "github.com/filecoin-project/lotus/node/config" @@ -46,6 +45,7 @@ type CommitBatcherApi interface { StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok TipSetToken) (*miner.SectorPreCommitOnChainInfo, error) StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error) StateNetworkVersion(ctx context.Context, tok TipSetToken) (network.Version, error) + StateMinerAvailableBalance(context.Context, address.Address, TipSetToken) (big.Int, error) } type AggregateInput struct { @@ -225,7 +225,7 @@ func (b *CommitBatcher) maybeStartBatch(notif bool) ([]sealiface.CommitBatchRes, } if individual { - res, err = b.processIndividually() + res, err = b.processIndividually(cfg) } else { res, err = b.processBatch(cfg) } @@ -338,9 +338,18 @@ func (b *CommitBatcher) processBatch(cfg sealiface.Config) ([]sealiface.CommitBa return []sealiface.CommitBatchRes{res}, xerrors.Errorf("getting network version: %s", err) } - aggFee := big.Div(big.Mul(policy.AggregateNetworkFee(nv, len(infos), bf), aggFeeNum), aggFeeDen) + aggFeeRaw, err := policy.AggregateNetworkFee(nv, len(infos), bf) + if err != nil { + log.Errorf("getting aggregate network fee: %s", err) + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("getting aggregate network fee: %s", err) + } + aggFee := big.Div(big.Mul(aggFeeRaw, aggFeeNum), aggFeeDen) needFunds := big.Add(collateral, aggFee) + needFunds, err = collateralSendAmount(b.mctx, b.api, b.maddr, cfg, needFunds) + if err != nil { + return []sealiface.CommitBatchRes{res}, err + } goodFunds := big.Add(maxFee, needFunds) @@ -361,12 +370,26 @@ func (b *CommitBatcher) processBatch(cfg sealiface.Config) ([]sealiface.CommitBa return []sealiface.CommitBatchRes{res}, nil } -func (b *CommitBatcher) processIndividually() ([]sealiface.CommitBatchRes, error) { +func (b *CommitBatcher) processIndividually(cfg sealiface.Config) ([]sealiface.CommitBatchRes, error) { mi, err := b.api.StateMinerInfo(b.mctx, b.maddr, nil) if err != nil { return nil, xerrors.Errorf("couldn't get miner info: %w", err) } + avail := types.TotalFilecoinInt + + if cfg.CollateralFromMinerBalance && !cfg.DisableCollateralFallback { + avail, err = b.api.StateMinerAvailableBalance(b.mctx, b.maddr, nil) + if err != nil { + return nil, xerrors.Errorf("getting available miner balance: %w", err) + } + + avail = big.Sub(avail, cfg.AvailableBalanceBuffer) + if avail.LessThan(big.Zero()) { + avail = big.Zero() + } + } + tok, _, err := b.api.ChainHead(b.mctx) if err != nil { return nil, err @@ -380,7 +403,7 @@ func (b *CommitBatcher) processIndividually() ([]sealiface.CommitBatchRes, error FailedSectors: map[abi.SectorNumber]string{}, } - mcid, err := b.processSingle(mi, sn, info, tok) + mcid, err := b.processSingle(cfg, mi, &avail, sn, info, tok) if err != nil { log.Errorf("process single error: %+v", err) // todo: return to user r.FailedSectors[sn] = err.Error() @@ -394,7 +417,7 @@ func (b *CommitBatcher) processIndividually() ([]sealiface.CommitBatchRes, error return res, nil } -func (b *CommitBatcher) processSingle(mi miner.MinerInfo, sn abi.SectorNumber, info AggregateInput, tok TipSetToken) (cid.Cid, error) { +func (b *CommitBatcher) processSingle(cfg sealiface.Config, mi miner.MinerInfo, avail *abi.TokenAmount, sn abi.SectorNumber, info AggregateInput, tok TipSetToken) (cid.Cid, error) { enc := new(bytes.Buffer) params := &miner.ProveCommitSectorParams{ SectorNumber: sn, @@ -410,6 +433,19 @@ func (b *CommitBatcher) processSingle(mi miner.MinerInfo, sn abi.SectorNumber, i return cid.Undef, err } + if cfg.CollateralFromMinerBalance { + c := big.Sub(collateral, *avail) + *avail = big.Sub(*avail, collateral) + collateral = c + + if collateral.LessThan(big.Zero()) { + collateral = big.Zero() + } + if (*avail).LessThan(big.Zero()) { + *avail = big.Zero() + } + } + goodFunds := big.Add(collateral, big.Int(b.feeCfg.MaxCommitGasFee)) from, _, err := b.addrSel(b.mctx, mi, api.CommitAddr, goodFunds, collateral) @@ -527,8 +563,18 @@ func (b *CommitBatcher) getCommitCutoff(si SectorInfo) (time.Time, error) { log.Errorf("getting precommit info: %s", err) return time.Now(), err } + av, err := actors.VersionForNetwork(nv) + if err != nil { + log.Errorf("unsupported network vrsion: %s", err) + return time.Now(), err + } + mpcd, err := policy.GetMaxProveCommitDuration(av, si.SectorType) + if err != nil { + log.Errorf("getting max prove commit duration: %s", err) + return time.Now(), err + } - cutoffEpoch := pci.PreCommitEpoch + policy.GetMaxProveCommitDuration(actors.VersionForNetwork(nv), si.SectorType) + cutoffEpoch := pci.PreCommitEpoch + mpcd for _, p := range si.Pieces { if p.DealInfo == nil { diff --git a/extern/storage-sealing/currentdealinfo.go b/extern/storage-sealing/currentdealinfo.go index 44fa68b5468..ed93512c28a 100644 --- a/extern/storage-sealing/currentdealinfo.go +++ b/extern/storage-sealing/currentdealinfo.go @@ -69,6 +69,10 @@ func (mgr *CurrentDealInfoManager) dealIDFromPublishDealsMsg(ctx context.Context return dealID, nil, xerrors.Errorf("looking for publish deal message %s: search msg failed: %w", publishCid, err) } + if lookup == nil { + return dealID, nil, xerrors.Errorf("looking for publish deal message %s: not found", publishCid) + } + if lookup.Receipt.ExitCode != exitcode.Ok { return dealID, nil, xerrors.Errorf("looking for publish deal message %s: non-ok exit code: %s", publishCid, lookup.Receipt.ExitCode) } diff --git a/extern/storage-sealing/currentdealinfo_test.go b/extern/storage-sealing/currentdealinfo_test.go index ee51d8c75db..b28dd461abd 100644 --- a/extern/storage-sealing/currentdealinfo_test.go +++ b/extern/storage-sealing/currentdealinfo_test.go @@ -25,7 +25,7 @@ import ( "github.com/stretchr/testify/require" ) -var errNotFound = errors.New("Could not find") +var errNotFound = errors.New("could not find") func TestGetCurrentDealInfo(t *testing.T) { ctx := context.Background() @@ -180,6 +180,12 @@ func TestGetCurrentDealInfo(t *testing.T) { expectedDealID: zeroDealID, expectedError: xerrors.Errorf("looking for publish deal message %s: search msg failed: something went wrong", dummyCid), }, + "search message not found": { + publishCid: dummyCid, + targetProposal: &proposal, + expectedDealID: zeroDealID, + expectedError: xerrors.Errorf("looking for publish deal message %s: not found", dummyCid), + }, "return code not ok": { publishCid: dummyCid, searchMessageLookup: &MsgLookup{ diff --git a/extern/storage-sealing/gen/main.go b/extern/storage-sealing/gen/main.go index 97c2bacd5bd..825ce8d284b 100644 --- a/extern/storage-sealing/gen/main.go +++ b/extern/storage-sealing/gen/main.go @@ -12,8 +12,6 @@ import ( func main() { err := gen.WriteMapEncodersToFile("./cbor_gen.go", "sealing", sealing.Piece{}, - sealing.DealInfo{}, - sealing.DealSchedule{}, sealing.SectorInfo{}, sealing.Log{}, ) diff --git a/extern/storage-sealing/input.go b/extern/storage-sealing/input.go index 85a5c429f4d..1a0b7bf1e8b 100644 --- a/extern/storage-sealing/input.go +++ b/extern/storage-sealing/input.go @@ -14,6 +14,7 @@ import ( "github.com/filecoin-project/go-statemachine" "github.com/filecoin-project/specs-storage/storage" + "github.com/filecoin-project/lotus/api" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" @@ -236,34 +237,34 @@ func (m *Sealing) handleAddPieceFailed(ctx statemachine.Context, sector SectorIn return nil } -func (m *Sealing) AddPieceToAnySector(ctx context.Context, size abi.UnpaddedPieceSize, data storage.Data, deal DealInfo) (abi.SectorNumber, abi.PaddedPieceSize, error) { +func (m *Sealing) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, data storage.Data, deal api.PieceDealInfo) (api.SectorOffset, error) { log.Infof("Adding piece for deal %d (publish msg: %s)", deal.DealID, deal.PublishCid) if (padreader.PaddedSize(uint64(size))) != size { - return 0, 0, xerrors.Errorf("cannot allocate unpadded piece") + return api.SectorOffset{}, xerrors.Errorf("cannot allocate unpadded piece") } sp, err := m.currentSealProof(ctx) if err != nil { - return 0, 0, xerrors.Errorf("getting current seal proof type: %w", err) + return api.SectorOffset{}, xerrors.Errorf("getting current seal proof type: %w", err) } ssize, err := sp.SectorSize() if err != nil { - return 0, 0, err + return api.SectorOffset{}, err } if size > abi.PaddedPieceSize(ssize).Unpadded() { - return 0, 0, xerrors.Errorf("piece cannot fit into a sector") + return api.SectorOffset{}, xerrors.Errorf("piece cannot fit into a sector") } if _, err := deal.DealProposal.Cid(); err != nil { - return 0, 0, xerrors.Errorf("getting proposal CID: %w", err) + return api.SectorOffset{}, xerrors.Errorf("getting proposal CID: %w", err) } m.inputLk.Lock() if _, exist := m.pendingPieces[proposalCID(deal)]; exist { m.inputLk.Unlock() - return 0, 0, xerrors.Errorf("piece for deal %s already pending", proposalCID(deal)) + return api.SectorOffset{}, xerrors.Errorf("piece for deal %s already pending", proposalCID(deal)) } resCh := make(chan struct { @@ -295,7 +296,7 @@ func (m *Sealing) AddPieceToAnySector(ctx context.Context, size abi.UnpaddedPiec res := <-resCh - return res.sn, res.offset.Padded(), res.err + return api.SectorOffset{Sector: res.sn, Offset: res.offset.Padded()}, res.err } // called with m.inputLk @@ -454,7 +455,7 @@ func (m *Sealing) StartPacking(sid abi.SectorNumber) error { return m.sectors.Send(uint64(sid), SectorStartPacking{}) } -func proposalCID(deal DealInfo) cid.Cid { +func proposalCID(deal api.PieceDealInfo) cid.Cid { pc, err := deal.DealProposal.Cid() if err != nil { log.Errorf("DealProposal.Cid error: %+v", err) diff --git a/extern/storage-sealing/mocks/mock_commit_batcher.go b/extern/storage-sealing/mocks/mock_commit_batcher.go index c4746e0ebf7..061121899c8 100644 --- a/extern/storage-sealing/mocks/mock_commit_batcher.go +++ b/extern/storage-sealing/mocks/mock_commit_batcher.go @@ -88,6 +88,21 @@ func (mr *MockCommitBatcherApiMockRecorder) SendMsg(arg0, arg1, arg2, arg3, arg4 return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockCommitBatcherApi)(nil).SendMsg), arg0, arg1, arg2, arg3, arg4, arg5, arg6) } +// StateMinerAvailableBalance mocks base method. +func (m *MockCommitBatcherApi) StateMinerAvailableBalance(arg0 context.Context, arg1 address.Address, arg2 sealing.TipSetToken) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerAvailableBalance", arg0, arg1, arg2) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerAvailableBalance indicates an expected call of StateMinerAvailableBalance. +func (mr *MockCommitBatcherApiMockRecorder) StateMinerAvailableBalance(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerAvailableBalance", reflect.TypeOf((*MockCommitBatcherApi)(nil).StateMinerAvailableBalance), arg0, arg1, arg2) +} + // StateMinerInfo mocks base method. func (m *MockCommitBatcherApi) StateMinerInfo(arg0 context.Context, arg1 address.Address, arg2 sealing.TipSetToken) (miner.MinerInfo, error) { m.ctrl.T.Helper() diff --git a/extern/storage-sealing/mocks/mock_precommit_batcher.go b/extern/storage-sealing/mocks/mock_precommit_batcher.go index 4a50740271c..ed97229b405 100644 --- a/extern/storage-sealing/mocks/mock_precommit_batcher.go +++ b/extern/storage-sealing/mocks/mock_precommit_batcher.go @@ -71,6 +71,21 @@ func (mr *MockPreCommitBatcherApiMockRecorder) SendMsg(arg0, arg1, arg2, arg3, a return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockPreCommitBatcherApi)(nil).SendMsg), arg0, arg1, arg2, arg3, arg4, arg5, arg6) } +// StateMinerAvailableBalance mocks base method. +func (m *MockPreCommitBatcherApi) StateMinerAvailableBalance(arg0 context.Context, arg1 address.Address, arg2 sealing.TipSetToken) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerAvailableBalance", arg0, arg1, arg2) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerAvailableBalance indicates an expected call of StateMinerAvailableBalance. +func (mr *MockPreCommitBatcherApiMockRecorder) StateMinerAvailableBalance(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerAvailableBalance", reflect.TypeOf((*MockPreCommitBatcherApi)(nil).StateMinerAvailableBalance), arg0, arg1, arg2) +} + // StateMinerInfo mocks base method. func (m *MockPreCommitBatcherApi) StateMinerInfo(arg0 context.Context, arg1 address.Address, arg2 sealing.TipSetToken) (miner.MinerInfo, error) { m.ctrl.T.Helper() diff --git a/extern/storage-sealing/precommit_batch.go b/extern/storage-sealing/precommit_batch.go index 8b132a2ebfb..719455b909f 100644 --- a/extern/storage-sealing/precommit_batch.go +++ b/extern/storage-sealing/precommit_batch.go @@ -7,9 +7,6 @@ import ( "sync" "time" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors/policy" - "github.com/ipfs/go-cid" "golang.org/x/xerrors" @@ -20,7 +17,9 @@ import ( miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" "github.com/filecoin-project/lotus/node/config" ) @@ -30,6 +29,7 @@ import ( type PreCommitBatcherApi interface { SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error) StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error) + StateMinerAvailableBalance(context.Context, address.Address, TipSetToken) (big.Int, error) ChainHead(ctx context.Context) (TipSetToken, abi.ChainEpoch, error) } @@ -226,6 +226,11 @@ func (b *PreCommitBatcher) processBatch(cfg sealiface.Config) ([]sealiface.PreCo deposit = big.Add(deposit, p.deposit) } + deposit, err := collateralSendAmount(b.mctx, b.api, b.maddr, cfg, deposit) + if err != nil { + return []sealiface.PreCommitBatchRes{res}, err + } + enc := new(bytes.Buffer) if err := params.MarshalCBOR(enc); err != nil { return []sealiface.PreCommitBatchRes{res}, xerrors.Errorf("couldn't serialize PreCommitSectorBatchParams: %w", err) diff --git a/extern/storage-sealing/precommit_policy_test.go b/extern/storage-sealing/precommit_policy_test.go index 52814167a57..a6c17d3fdc5 100644 --- a/extern/storage-sealing/precommit_policy_test.go +++ b/extern/storage-sealing/precommit_policy_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/filecoin-project/go-state-types/network" + api "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/ipfs/go-cid" @@ -58,9 +59,9 @@ func TestBasicPolicyMostConstrictiveSchedule(t *testing.T) { Size: abi.PaddedPieceSize(1024), PieceCID: fakePieceCid(t), }, - DealInfo: &sealing.DealInfo{ + DealInfo: &api.PieceDealInfo{ DealID: abi.DealID(42), - DealSchedule: sealing.DealSchedule{ + DealSchedule: api.DealSchedule{ StartEpoch: abi.ChainEpoch(70), EndEpoch: abi.ChainEpoch(75), }, @@ -71,9 +72,9 @@ func TestBasicPolicyMostConstrictiveSchedule(t *testing.T) { Size: abi.PaddedPieceSize(1024), PieceCID: fakePieceCid(t), }, - DealInfo: &sealing.DealInfo{ + DealInfo: &api.PieceDealInfo{ DealID: abi.DealID(43), - DealSchedule: sealing.DealSchedule{ + DealSchedule: api.DealSchedule{ StartEpoch: abi.ChainEpoch(80), EndEpoch: abi.ChainEpoch(100), }, @@ -98,9 +99,9 @@ func TestBasicPolicyIgnoresExistingScheduleIfExpired(t *testing.T) { Size: abi.PaddedPieceSize(1024), PieceCID: fakePieceCid(t), }, - DealInfo: &sealing.DealInfo{ + DealInfo: &api.PieceDealInfo{ DealID: abi.DealID(44), - DealSchedule: sealing.DealSchedule{ + DealSchedule: api.DealSchedule{ StartEpoch: abi.ChainEpoch(1), EndEpoch: abi.ChainEpoch(10), }, @@ -125,9 +126,9 @@ func TestMissingDealIsIgnored(t *testing.T) { Size: abi.PaddedPieceSize(1024), PieceCID: fakePieceCid(t), }, - DealInfo: &sealing.DealInfo{ + DealInfo: &api.PieceDealInfo{ DealID: abi.DealID(44), - DealSchedule: sealing.DealSchedule{ + DealSchedule: api.DealSchedule{ StartEpoch: abi.ChainEpoch(1), EndEpoch: abi.ChainEpoch(10), }, diff --git a/extern/storage-sealing/sealiface/config.go b/extern/storage-sealing/sealiface/config.go index 0410b92c09e..e33b3626319 100644 --- a/extern/storage-sealing/sealiface/config.go +++ b/extern/storage-sealing/sealiface/config.go @@ -24,6 +24,10 @@ type Config struct { FinalizeEarly bool + CollateralFromMinerBalance bool + AvailableBalanceBuffer abi.TokenAmount + DisableCollateralFallback bool + BatchPreCommits bool MaxPreCommitBatch int PreCommitBatchWait time.Duration diff --git a/extern/storage-sealing/sealing.go b/extern/storage-sealing/sealing.go index 8a70704c416..3e40d10f396 100644 --- a/extern/storage-sealing/sealing.go +++ b/extern/storage-sealing/sealing.go @@ -59,6 +59,7 @@ type SealingAPI interface { StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error) StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error) StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error) + StateMinerAvailableBalance(context.Context, address.Address, TipSetToken) (big.Int, error) StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, TipSetToken) (bool, error) StateMarketStorageDeal(context.Context, abi.DealID, TipSetToken) (*api.MarketDeal, error) StateMarketStorageDealProposal(context.Context, abi.DealID, TipSetToken) (market.DealProposal, error) @@ -124,7 +125,7 @@ type openSector struct { type pendingPiece struct { size abi.UnpaddedPieceSize - deal DealInfo + deal api.PieceDealInfo data storage.Data diff --git a/extern/storage-sealing/states_failed.go b/extern/storage-sealing/states_failed.go index 201c4456f51..bd5f489b40e 100644 --- a/extern/storage-sealing/states_failed.go +++ b/extern/storage-sealing/states_failed.go @@ -142,7 +142,7 @@ func (m *Sealing) handlePreCommitFailed(ctx statemachine.Context, sector SectorI } if pci.Info.SealedCID != *sector.CommR { - log.Warnf("sector %d is precommitted on chain, with different CommR: %x != %x", sector.SectorNumber, pci.Info.SealedCID, sector.CommR) + log.Warnf("sector %d is precommitted on chain, with different CommR: %s != %s", sector.SectorNumber, pci.Info.SealedCID, sector.CommR) return nil // TODO: remove when the actor allows re-precommit } @@ -354,7 +354,7 @@ func (m *Sealing) handleRecoverDealIDs(ctx statemachine.Context, sector SectorIn } if proposal.PieceCID != p.Piece.PieceCID { - log.Warnf("piece %d (of %d) of sector %d refers deal %d with wrong PieceCID: %x != %x", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, p.Piece.PieceCID, proposal.PieceCID) + log.Warnf("piece %d (of %d) of sector %d refers deal %d with wrong PieceCID: %s != %s", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, p.Piece.PieceCID, proposal.PieceCID) toFix = append(toFix, i) continue } diff --git a/extern/storage-sealing/states_proving.go b/extern/storage-sealing/states_proving.go index 212fd906f05..5e613b20b46 100644 --- a/extern/storage-sealing/states_proving.go +++ b/extern/storage-sealing/states_proving.go @@ -126,3 +126,22 @@ func (m *Sealing) handleRemoving(ctx statemachine.Context, sector SectorInfo) er return ctx.Send(SectorRemoved{}) } + +func (m *Sealing) handleProvingSector(ctx statemachine.Context, sector SectorInfo) error { + // TODO: track sector health / expiration + log.Infof("Proving sector %d", sector.SectorNumber) + + cfg, err := m.getConfig() + if err != nil { + return xerrors.Errorf("getting sealing config: %w", err) + } + + if err := m.sealer.ReleaseUnsealed(ctx.Context(), m.minerSector(sector.SectorType, sector.SectorNumber), sector.keepUnsealedRanges(true, cfg.AlwaysKeepUnsealedCopy)); err != nil { + log.Error(err) + } + + // TODO: Watch termination + // TODO: Auto-extend if set + + return nil +} diff --git a/extern/storage-sealing/states_sealing.go b/extern/storage-sealing/states_sealing.go index 1442d82cc60..7bedc1ed382 100644 --- a/extern/storage-sealing/states_sealing.go +++ b/extern/storage-sealing/states_sealing.go @@ -143,7 +143,14 @@ func (m *Sealing) getTicket(ctx statemachine.Context, sector SectorInfo) (abi.Se return nil, 0, allocated, xerrors.Errorf("getTicket: StateNetworkVersion: api error, not proceeding: %+v", err) } - msd := policy.GetMaxProveCommitDuration(actors.VersionForNetwork(nv), sector.SectorType) + av, err := actors.VersionForNetwork(nv) + if err != nil { + return nil, 0, allocated, xerrors.Errorf("getTicket: actor version for network error, not proceeding: %w", err) + } + msd, err := policy.GetMaxProveCommitDuration(av, sector.SectorType) + if err != nil { + return nil, 0, allocated, xerrors.Errorf("getTicket: max prove commit duration policy error, not proceeding: %w", err) + } if checkProveCommitExpired(pci.PreCommitEpoch, msd, epoch) { return nil, 0, allocated, xerrors.Errorf("ticket expired for precommitted sector") @@ -223,7 +230,16 @@ func (m *Sealing) handlePreCommit1(ctx statemachine.Context, sector SectorInfo) return nil } - msd := policy.GetMaxProveCommitDuration(actors.VersionForNetwork(nv), sector.SectorType) + av, err := actors.VersionForNetwork(nv) + if err != nil { + log.Errorf("handlePreCommit1: VersionForNetwork error, not proceeding: %w", err) + return nil + } + msd, err := policy.GetMaxProveCommitDuration(av, sector.SectorType) + if err != nil { + log.Errorf("handlePreCommit1: GetMaxProveCommitDuration error, not proceeding: %w", err) + return nil + } // if height > PreCommitEpoch + msd, there is no need to recalculate if checkProveCommitExpired(pci.PreCommitEpoch, msd, height) { @@ -311,7 +327,14 @@ func (m *Sealing) preCommitParams(ctx statemachine.Context, sector SectorInfo) ( return nil, big.Zero(), nil, ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("failed to get network version: %w", err)}) } - msd := policy.GetMaxProveCommitDuration(actors.VersionForNetwork(nv), sector.SectorType) + av, err := actors.VersionForNetwork(nv) + if err != nil { + return nil, big.Zero(), nil, ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("failed to get actors version: %w", err)}) + } + msd, err := policy.GetMaxProveCommitDuration(av, sector.SectorType) + if err != nil { + return nil, big.Zero(), nil, ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("failed to get max prove commit duration: %w", err)}) + } if minExpiration := sector.TicketEpoch + policy.MaxPreCommitRandomnessLookback + msd + miner.MinSectorExpiration; expiration < minExpiration { expiration = minExpiration @@ -357,8 +380,16 @@ func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInf } } - params, deposit, tok, err := m.preCommitParams(ctx, sector) - if params == nil || err != nil { + params, pcd, tok, err := m.preCommitParams(ctx, sector) + if err != nil { + return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("preCommitParams: %w", err)}) + } + if params == nil { + return nil // event was sent in preCommitParams + } + + deposit, err := collateralSendAmount(ctx.Context(), m.api, m.maddr, cfg, pcd) + if err != nil { return err } @@ -389,7 +420,7 @@ func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInf return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("pushing message to mpool: %w", err)}) } - return ctx.Send(SectorPreCommitted{Message: mcid, PreCommitDeposit: deposit, PreCommitInfo: *params}) + return ctx.Send(SectorPreCommitted{Message: mcid, PreCommitDeposit: pcd, PreCommitInfo: *params}) } func (m *Sealing) handleSubmitPreCommitBatch(ctx statemachine.Context, sector SectorInfo) error { @@ -398,9 +429,12 @@ func (m *Sealing) handleSubmitPreCommitBatch(ctx statemachine.Context, sector Se } params, deposit, _, err := m.preCommitParams(ctx, sector) - if params == nil || err != nil { + if err != nil { return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("preCommitParams: %w", err)}) } + if params == nil { + return nil // event was sent in preCommitParams + } res, err := m.precommiter.AddPreCommit(ctx.Context(), sector, deposit, params) if err != nil { @@ -524,7 +558,7 @@ func (m *Sealing) handleCommitting(ctx statemachine.Context, sector SectorInfo) log.Info("scheduling seal proof computation...") - log.Infof("KOMIT %d %x(%d); %x(%d); %v; r:%x; d:%x", sector.SectorNumber, sector.TicketValue, sector.TicketEpoch, sector.SeedValue, sector.SeedEpoch, sector.pieceInfos(), sector.CommR, sector.CommD) + log.Infof("KOMIT %d %x(%d); %x(%d); %v; r:%s; d:%s", sector.SectorNumber, sector.TicketValue, sector.TicketEpoch, sector.SeedValue, sector.SeedEpoch, sector.pieceInfos(), sector.CommR, sector.CommD) if sector.CommD == nil || sector.CommR == nil { return ctx.Send(SectorCommitFailed{xerrors.Errorf("sector had nil commR or commD")}) @@ -628,6 +662,11 @@ func (m *Sealing) handleSubmitCommit(ctx statemachine.Context, sector SectorInfo collateral = big.Zero() } + collateral, err = collateralSendAmount(ctx.Context(), m.api, m.maddr, cfg, collateral) + if err != nil { + return err + } + goodFunds := big.Add(collateral, big.Int(m.feeCfg.MaxCommitGasFee)) from, _, err := m.addrSel(ctx.Context(), mi, api.CommitAddr, goodFunds, collateral) @@ -739,22 +778,3 @@ func (m *Sealing) handleFinalizeSector(ctx statemachine.Context, sector SectorIn return ctx.Send(SectorFinalized{}) } - -func (m *Sealing) handleProvingSector(ctx statemachine.Context, sector SectorInfo) error { - // TODO: track sector health / expiration - log.Infof("Proving sector %d", sector.SectorNumber) - - cfg, err := m.getConfig() - if err != nil { - return xerrors.Errorf("getting sealing config: %w", err) - } - - if err := m.sealer.ReleaseUnsealed(ctx.Context(), m.minerSector(sector.SectorType, sector.SectorNumber), sector.keepUnsealedRanges(true, cfg.AlwaysKeepUnsealedCopy)); err != nil { - log.Error(err) - } - - // TODO: Watch termination - // TODO: Auto-extend if set - - return nil -} diff --git a/extern/storage-sealing/types.go b/extern/storage-sealing/types.go index 58c35cf36ff..c5aed505a65 100644 --- a/extern/storage-sealing/types.go +++ b/extern/storage-sealing/types.go @@ -11,39 +11,22 @@ import ( "github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/specs-storage/storage" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" - "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" ) // Piece is a tuple of piece and deal info type PieceWithDealInfo struct { Piece abi.PieceInfo - DealInfo DealInfo + DealInfo api.PieceDealInfo } // Piece is a tuple of piece info and optional deal type Piece struct { Piece abi.PieceInfo - DealInfo *DealInfo // nil for pieces which do not appear in deals (e.g. filler pieces) -} - -// DealInfo is a tuple of deal identity and its schedule -type DealInfo struct { - PublishCid *cid.Cid - DealID abi.DealID - DealProposal *market.DealProposal - DealSchedule DealSchedule - KeepUnsealed bool -} - -// DealSchedule communicates the time interval of a storage deal. The deal must -// appear in a sealed (proven) sector no later than StartEpoch, otherwise it -// is invalid. -type DealSchedule struct { - StartEpoch abi.ChainEpoch - EndEpoch abi.ChainEpoch + DealInfo *api.PieceDealInfo // nil for pieces which do not appear in deals (e.g. filler pieces) } type Log struct { diff --git a/extern/storage-sealing/types_test.go b/extern/storage-sealing/types_test.go index aa314c37a68..68e2b1111c8 100644 --- a/extern/storage-sealing/types_test.go +++ b/extern/storage-sealing/types_test.go @@ -10,6 +10,7 @@ import ( cborutil "github.com/filecoin-project/go-cbor-util" "github.com/filecoin-project/go-state-types/abi" + api "github.com/filecoin-project/lotus/api" market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" tutils "github.com/filecoin-project/specs-actors/v2/support/testing" ) @@ -22,9 +23,9 @@ func TestSectorInfoSerialization(t *testing.T) { t.Fatal(err) } - dealInfo := DealInfo{ + dealInfo := api.PieceDealInfo{ DealID: d, - DealSchedule: DealSchedule{ + DealSchedule: api.DealSchedule{ StartEpoch: 0, EndEpoch: 100, }, diff --git a/extern/storage-sealing/utils.go b/extern/storage-sealing/utils.go index dadef227d66..3dc4c4d1ea3 100644 --- a/extern/storage-sealing/utils.go +++ b/extern/storage-sealing/utils.go @@ -1,9 +1,16 @@ package sealing import ( + "context" "math/bits" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" ) func fillersFromRem(in abi.UnpaddedPieceSize) ([]abi.UnpaddedPieceSize, error) { @@ -55,3 +62,30 @@ func (m *Sealing) GetSectorInfo(sid abi.SectorNumber) (SectorInfo, error) { err := m.sectors.Get(uint64(sid)).Get(&out) return out, err } + +func collateralSendAmount(ctx context.Context, api interface { + StateMinerAvailableBalance(context.Context, address.Address, TipSetToken) (big.Int, error) +}, maddr address.Address, cfg sealiface.Config, collateral abi.TokenAmount) (abi.TokenAmount, error) { + if cfg.CollateralFromMinerBalance { + if cfg.DisableCollateralFallback { + return big.Zero(), nil + } + + avail, err := api.StateMinerAvailableBalance(ctx, maddr, nil) + if err != nil { + return big.Zero(), xerrors.Errorf("getting available miner balance: %w", err) + } + + avail = big.Sub(avail, cfg.AvailableBalanceBuffer) + if avail.LessThan(big.Zero()) { + avail = big.Zero() + } + + collateral = big.Sub(collateral, avail) + if collateral.LessThan(big.Zero()) { + collateral = big.Zero() + } + } + + return collateral, nil +} diff --git a/gen/api/proxygen.go b/gen/api/proxygen.go index 71c2f414dd8..3e0766c31d3 100644 --- a/gen/api/proxygen.go +++ b/gen/api/proxygen.go @@ -298,6 +298,9 @@ import ( } err = doTemplate(w, m, ` + +var ErrNotSupported = xerrors.New("method not supported") + {{range .Infos}} type {{.Name}}Struct struct { {{range .Include}} @@ -321,11 +324,14 @@ type {{.Name}}Stub struct { {{$name := .Name}} {{range .Methods}} func (s *{{$name}}Struct) {{.Name}}({{.NamedParams}}) ({{.Results}}) { + if s.Internal.{{.Name}} == nil { + return {{.DefRes}}ErrNotSupported + } return s.Internal.{{.Name}}({{.ParamNames}}) } func (s *{{$name}}Stub) {{.Name}}({{.NamedParams}}) ({{.Results}}) { - return {{.DefRes}}xerrors.New("method not supported") + return {{.DefRes}}ErrNotSupported } {{end}} {{end}} diff --git a/gen/main.go b/gen/main.go index 9548344fd2a..0018b241d62 100644 --- a/gen/main.go +++ b/gen/main.go @@ -53,6 +53,8 @@ func main() { api.SealedRefs{}, api.SealTicket{}, api.SealSeed{}, + api.PieceDealInfo{}, + api.DealSchedule{}, ) if err != nil { fmt.Println(err) diff --git a/go.mod b/go.mod index 36ea7835cc9..bea2278844a 100644 --- a/go.mod +++ b/go.mod @@ -33,29 +33,30 @@ require ( github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 github.com/filecoin-project/go-commp-utils v0.1.1-0.20210427191551-70bf140d31c7 github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 - github.com/filecoin-project/go-data-transfer v1.6.0 - github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a - github.com/filecoin-project/go-fil-markets v1.5.0 + github.com/filecoin-project/go-data-transfer v1.7.2 + github.com/filecoin-project/go-fil-commcid v0.1.0 + github.com/filecoin-project/go-fil-commp-hashhash v0.1.0 + github.com/filecoin-project/go-fil-markets v1.6.2 github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec github.com/filecoin-project/go-multistore v0.0.3 - github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20 + github.com/filecoin-project/go-padreader v0.0.0-20210723183308-812a16dc01b1 github.com/filecoin-project/go-paramfetch v0.0.2-0.20210614165157-25a6c7769498 - github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48 - github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe + github.com/filecoin-project/go-state-types v0.1.1-0.20210810190654-139e0e79e69e + github.com/filecoin-project/go-statemachine v1.0.1 github.com/filecoin-project/go-statestore v0.1.1 github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b github.com/filecoin-project/specs-actors v0.9.14 github.com/filecoin-project/specs-actors/v2 v2.3.5 github.com/filecoin-project/specs-actors/v3 v3.1.1 github.com/filecoin-project/specs-actors/v4 v4.0.1 - github.com/filecoin-project/specs-actors/v5 v5.0.1 + github.com/filecoin-project/specs-actors/v5 v5.0.4 github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 github.com/filecoin-project/test-vectors/schema v0.0.5 github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1 github.com/gdamore/tcell/v2 v2.2.0 github.com/go-kit/kit v0.10.0 github.com/go-ole/go-ole v1.2.4 // indirect - github.com/golang/mock v1.5.0 + github.com/golang/mock v1.6.0 github.com/google/uuid v1.1.2 github.com/gorilla/mux v1.7.4 github.com/gorilla/websocket v1.4.2 @@ -77,7 +78,7 @@ require ( github.com/ipfs/go-ds-pebble v0.0.2-0.20200921225637-ce220f8ac459 github.com/ipfs/go-filestore v1.0.0 github.com/ipfs/go-fs-lock v0.0.6 - github.com/ipfs/go-graphsync v0.6.1 + github.com/ipfs/go-graphsync v0.6.6 github.com/ipfs/go-ipfs-blockstore v1.0.3 github.com/ipfs/go-ipfs-chunker v0.0.5 github.com/ipfs/go-ipfs-ds-help v1.0.0 @@ -89,7 +90,7 @@ require ( github.com/ipfs/go-ipfs-util v0.0.2 github.com/ipfs/go-ipld-cbor v0.0.5 github.com/ipfs/go-ipld-format v0.2.0 - github.com/ipfs/go-log/v2 v2.1.3 + github.com/ipfs/go-log/v2 v2.3.0 github.com/ipfs/go-merkledag v0.3.2 github.com/ipfs/go-metrics-interface v0.0.1 github.com/ipfs/go-metrics-prometheus v0.0.2 @@ -99,38 +100,38 @@ require ( github.com/ipld/go-car v0.1.1-0.20201119040415-11b6074b6d4d github.com/ipld/go-ipld-prime v0.5.1-0.20201021195245-109253e8a018 github.com/kelseyhightower/envconfig v1.4.0 - github.com/lib/pq v1.7.0 + github.com/lib/pq v1.10.2 github.com/libp2p/go-buffer-pool v0.0.2 github.com/libp2p/go-eventbus v0.2.1 github.com/libp2p/go-libp2p v0.14.2 github.com/libp2p/go-libp2p-connmgr v0.2.4 - github.com/libp2p/go-libp2p-core v0.8.5 - github.com/libp2p/go-libp2p-discovery v0.5.0 + github.com/libp2p/go-libp2p-core v0.8.6 + github.com/libp2p/go-libp2p-discovery v0.5.1 github.com/libp2p/go-libp2p-kad-dht v0.11.0 github.com/libp2p/go-libp2p-mplex v0.4.1 github.com/libp2p/go-libp2p-noise v0.2.0 - github.com/libp2p/go-libp2p-peerstore v0.2.7 - github.com/libp2p/go-libp2p-pubsub v0.4.2-0.20210212194758-6c1addf493eb - github.com/libp2p/go-libp2p-quic-transport v0.10.0 + github.com/libp2p/go-libp2p-peerstore v0.2.8 + github.com/libp2p/go-libp2p-pubsub v0.5.3 + github.com/libp2p/go-libp2p-quic-transport v0.11.2 github.com/libp2p/go-libp2p-record v0.1.3 github.com/libp2p/go-libp2p-routing-helpers v0.2.3 - github.com/libp2p/go-libp2p-swarm v0.5.0 + github.com/libp2p/go-libp2p-swarm v0.5.3 github.com/libp2p/go-libp2p-tls v0.1.3 github.com/libp2p/go-libp2p-yamux v0.5.4 github.com/libp2p/go-maddr-filter v0.1.0 github.com/mattn/go-colorable v0.1.6 // indirect - github.com/mattn/go-isatty v0.0.12 + github.com/mattn/go-isatty v0.0.13 github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 github.com/mitchellh/go-homedir v1.1.0 github.com/multiformats/go-base32 v0.0.3 - github.com/multiformats/go-multiaddr v0.3.1 + github.com/multiformats/go-multiaddr v0.3.3 github.com/multiformats/go-multiaddr-dns v0.3.1 github.com/multiformats/go-multibase v0.0.3 - github.com/multiformats/go-multihash v0.0.14 + github.com/multiformats/go-multihash v0.0.15 github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333 github.com/opentracing/opentracing-go v1.2.0 github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a - github.com/prometheus/client_golang v1.6.0 + github.com/prometheus/client_golang v1.10.0 github.com/raulk/clock v1.1.0 github.com/raulk/go-watchdog v1.0.1 github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25 @@ -144,17 +145,16 @@ require ( github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 github.com/whyrusleeping/pubsub v0.0.0-20190708150250-92bcb0691325 github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542 - go.etcd.io/bbolt v1.3.4 go.opencensus.io v0.23.0 go.uber.org/dig v1.10.0 // indirect go.uber.org/fx v1.9.0 go.uber.org/multierr v1.6.0 go.uber.org/zap v1.16.0 - golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6 + golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20210426080607-c94f62235c83 + golang.org/x/sys v0.0.0-20210511113859-b0526f3d8744 golang.org/x/time v0.0.0-20191024005414-555d28b269f0 - golang.org/x/tools v0.0.0-20210106214847-113979e3529a + golang.org/x/tools v0.1.5 golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 gopkg.in/cheggaaa/pb.v1 v1.0.28 gotest.tools v2.2.0+incompatible @@ -165,8 +165,6 @@ replace github.com/libp2p/go-libp2p-yamux => github.com/libp2p/go-libp2p-yamux v replace github.com/filecoin-project/lotus => ./ -replace github.com/golangci/golangci-lint => github.com/golangci/golangci-lint v1.18.0 - replace github.com/filecoin-project/filecoin-ffi => ./extern/filecoin-ffi replace github.com/filecoin-project/test-vectors => ./extern/test-vectors diff --git a/go.sum b/go.sum index f9499e2f9d5..89a1e8d3a60 100644 --- a/go.sum +++ b/go.sum @@ -27,8 +27,9 @@ dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1 dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= -github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9 h1:HD8gA2tkByhMAwYaFAX9w2l7vxvBQ5NMoxDrkhqhtn4= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M= +github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= @@ -199,8 +200,9 @@ github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e/go.mod h1: github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= -github.com/dgraph-io/badger v1.6.1 h1:w9pSFNSdq/JPM1N12Fz/F/bzo993Is1W+Q7HjPzi7yg= github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU= +github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= +github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= github.com/dgraph-io/badger/v2 v2.0.3/go.mod h1:3KY8+bsP8wI0OEnQJAKpd4wIJW/Mm32yw2j/9FUVnIM= github.com/dgraph-io/badger/v2 v2.2007.2 h1:EjjK0KqwaFMlPin1ajhP943VPENHJdEz1KLIegjaI3k= github.com/dgraph-io/badger/v2 v2.2007.2/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= @@ -269,22 +271,25 @@ github.com/filecoin-project/go-bitfield v0.2.4 h1:uZ7MeE+XfM5lqrHJZ93OnhQKc/rveW github.com/filecoin-project/go-bitfield v0.2.4/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 h1:av5fw6wmm58FYMgJeoB/lK9XXrgdugYiTqkdxjTy9k8= github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= -github.com/filecoin-project/go-commp-utils v0.0.0-20201119054358-b88f7a96a434/go.mod h1:6s95K91mCyHY51RPWECZieD3SGWTqIFLf1mPOes9l5U= github.com/filecoin-project/go-commp-utils v0.1.1-0.20210427191551-70bf140d31c7 h1:U9Z+76pHCKBmtdxFV7JFZJj7OVm12I6dEKwtMVbq5p0= github.com/filecoin-project/go-commp-utils v0.1.1-0.20210427191551-70bf140d31c7/go.mod h1:6s95K91mCyHY51RPWECZieD3SGWTqIFLf1mPOes9l5U= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= github.com/filecoin-project/go-data-transfer v1.0.1/go.mod h1:UxvfUAY9v3ub0a21BSK9u3pB2aq30Y0KMsG+w9/ysyo= -github.com/filecoin-project/go-data-transfer v1.6.0 h1:DHIzEc23ydRCCBwtFet3MfgO8gMpZEnw60Y+s71oX6o= -github.com/filecoin-project/go-data-transfer v1.6.0/go.mod h1:E3WW4mCEYwU2y65swPEajSZoFWFmfXt7uwGduoACZQc= +github.com/filecoin-project/go-data-transfer v1.7.0/go.mod h1:GLRr5BmLEqsLwXfiRDG7uJvph22KGL2M4iOuF8EINaU= +github.com/filecoin-project/go-data-transfer v1.7.2 h1:iL3q5pxSloA7V2QucFofoVN3lquULz+Ml0KrNqMT5ZU= +github.com/filecoin-project/go-data-transfer v1.7.2/go.mod h1:GLRr5BmLEqsLwXfiRDG7uJvph22KGL2M4iOuF8EINaU= github.com/filecoin-project/go-ds-versioning v0.1.0 h1:y/X6UksYTsK8TLCI7rttCKEvl8btmWxyFMEeeWGUxIQ= github.com/filecoin-project/go-ds-versioning v0.1.0/go.mod h1:mp16rb4i2QPmxBnmanUx8i/XANp+PFCCJWiAb+VW4/s= github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= -github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a h1:hyJ+pUm/4U4RdEZBlg6k8Ma4rDiuvqyGpoICXAxwsTg= github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= +github.com/filecoin-project/go-fil-commcid v0.1.0 h1:3R4ds1A9r6cr8mvZBfMYxTS88OqLYEo6roi+GiIeOh8= +github.com/filecoin-project/go-fil-commcid v0.1.0/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= +github.com/filecoin-project/go-fil-commp-hashhash v0.1.0 h1:imrrpZWEHRnNqqv0tN7LXep5bFEVOVmQWHJvl2mgsGo= +github.com/filecoin-project/go-fil-commp-hashhash v0.1.0/go.mod h1:73S8WSEWh9vr0fDJVnKADhfIv/d6dCbAGaAGWbdJEI8= github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335/go.mod h1:AJySOJC00JRWEZzRG2KsfUnqEf5ITXxeX09BE9N4f9c= -github.com/filecoin-project/go-fil-markets v1.5.0 h1:3KEs01L8XFCEgujZ6ggFjr1XWjpjTQcmSSeo3I99I0k= -github.com/filecoin-project/go-fil-markets v1.5.0/go.mod h1:7be6zzFwaN8kxVeYZf/UUj/JilHC0ogPvWqE1TW8Ptk= +github.com/filecoin-project/go-fil-markets v1.6.2 h1:ib1sGUOF+hf50YwP7+p9yoK+9g84YcXzvuenxd6MYoE= +github.com/filecoin-project/go-fil-markets v1.6.2/go.mod h1:ZuFDagROUV6GfvBU//KReTQDw+EZci4rH7jMYTD10vs= github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM= @@ -296,8 +301,9 @@ github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec h1:r github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec/go.mod h1:XBBpuKIMaXIIzeqzO1iucq4GvbF8CxmXRFoezRh+Cx4= github.com/filecoin-project/go-multistore v0.0.3 h1:vaRBY4YiA2UZFPK57RNuewypB8u0DzzQwqsL0XarpnI= github.com/filecoin-project/go-multistore v0.0.3/go.mod h1:kaNqCC4IhU4B1uyr7YWFHd23TL4KM32aChS0jNkyUvQ= -github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20 h1:+/4aUeUoKr6AKfPE3mBhXA5spIV6UcKdTYDPNU2Tdmg= github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20/go.mod h1:mPn+LRRd5gEKNAtc+r3ScpW2JRU/pj4NBKdADYWHiak= +github.com/filecoin-project/go-padreader v0.0.0-20210723183308-812a16dc01b1 h1:0BogtftbcgyBx4lP2JWM00ZK7/pXmgnrDqKp9aLTgVs= +github.com/filecoin-project/go-padreader v0.0.0-20210723183308-812a16dc01b1/go.mod h1:VYVPJqwpsfmtoHnAmPx6MUwmrK6HIcDqZJiuZhtmfLQ= github.com/filecoin-project/go-paramfetch v0.0.2-0.20210614165157-25a6c7769498 h1:G10ezOvpH1CLXQ19EA9VWNwyL0mg536ujSayjV0yg0k= github.com/filecoin-project/go-paramfetch v0.0.2-0.20210614165157-25a6c7769498/go.mod h1:1FH85P8U+DUEmWk1Jkw3Bw7FrwTVUNHk/95PSPG+dts= github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= @@ -305,10 +311,12 @@ github.com/filecoin-project/go-state-types v0.0.0-20200904021452-1883f36ca2f4/go github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= -github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48 h1:Jc4OprDp3bRDxbsrXNHPwJabZJM3iDy+ri8/1e0ZnX4= github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= -github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe h1:dF8u+LEWeIcTcfUcCf3WFVlc81Fr2JKg8zPzIbBDKDw= +github.com/filecoin-project/go-state-types v0.1.1-0.20210810190654-139e0e79e69e h1:XAgb6HmgXaGRklNjhZoNMSIYriKLqjWXIqYMotg6iSs= +github.com/filecoin-project/go-state-types v0.1.1-0.20210810190654-139e0e79e69e/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= +github.com/filecoin-project/go-statemachine v1.0.1 h1:LQ60+JDVjMdLxXmVFM2jjontzOYnfVE7u02CXV3WKSw= +github.com/filecoin-project/go-statemachine v1.0.1/go.mod h1:jZdXXiHa61n4NmgWFG4w8tnqgvZVHYbJ3yW7+y8bF54= github.com/filecoin-project/go-statestore v0.1.0/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI= github.com/filecoin-project/go-statestore v0.1.1 h1:ufMFq00VqnT2CAuDpcGnwLnCX1I/c3OROw/kXVNSTZk= github.com/filecoin-project/go-statestore v0.1.1/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI= @@ -331,8 +339,8 @@ github.com/filecoin-project/specs-actors/v4 v4.0.0/go.mod h1:TkHXf/l7Wyw4ZejyXIP github.com/filecoin-project/specs-actors/v4 v4.0.1 h1:AiWrtvJZ63MHGe6rn7tPu4nSUY8bA1KDNszqJaD5+Fg= github.com/filecoin-project/specs-actors/v4 v4.0.1/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng= github.com/filecoin-project/specs-actors/v5 v5.0.0-20210512015452-4fe3889fff57/go.mod h1:283yBMMUSDB2abcjP/hhrwTkhb9h3sfM6KGrep/ZlBI= -github.com/filecoin-project/specs-actors/v5 v5.0.1 h1:PrYm5AKdMlJ/55eRW5laWcnaX66gyyDYBWvH38kNAMo= -github.com/filecoin-project/specs-actors/v5 v5.0.1/go.mod h1:74euMDIXorusOBs/QL/LNkYsXZdDpLJwojWw6T03pdE= +github.com/filecoin-project/specs-actors/v5 v5.0.4 h1:OY7BdxJWlUfUFXWV/kpNBYGXNPasDIedf42T3sGx08s= +github.com/filecoin-project/specs-actors/v5 v5.0.4/go.mod h1:5BAKRAMsOOlD8+qCw4UvT/lTLInCJ3JwOWZbX8Ipwq4= github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 h1:Ur/l2+6qN+lQiqjozWWc5p9UDaAMDZKTlDS98oRnlIw= github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g= github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg= @@ -393,6 +401,8 @@ github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968 h1:s+PDl6lozQ+dEUtUtQnO7+A2iPG3sK1pI4liU+jxn90= github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus/v5 v5.0.3 h1:ZqHaoEF7TBzh4jzPmqVhE/5A1z9of6orkAe5uHoAeME= @@ -425,8 +435,8 @@ github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFU github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0 h1:jlYHihg//f7RRwuPfptm04yp4s7O6Kw8EZiVYIGcH0g= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -439,8 +449,10 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf h1:gFVkHXmVAhEbxZVDln5V9GKrLaluNoFHDbrZwAWZgws= @@ -453,8 +465,10 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3 h1:x95R7cp+rSeeqAMI2knLtQ0DKlaBhv2NrtrOvafPHRo= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -605,8 +619,9 @@ github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjv github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaHzfGTzuE3s= github.com/ipfs/go-ds-badger v0.0.7/go.mod h1:qt0/fWzZDoPW6jpQeqUjR5kBfhDNB65jd9YlmAvpQBk= github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9DrGVwx/NOwE= -github.com/ipfs/go-ds-badger v0.2.3 h1:J27YvAcpuA5IvZUbeBxOcQgqnYHUPxoygc6QxxkodZ4= github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= +github.com/ipfs/go-ds-badger v0.2.7 h1:ju5REfIm+v+wgVnQ19xGLYPHYHbYLR6qJfmMbCDSK1I= +github.com/ipfs/go-ds-badger v0.2.7/go.mod h1:02rnztVKA4aZwDuaRPTf8mpqcKmXP7mLl6JPxd14JHA= github.com/ipfs/go-ds-badger2 v0.1.0/go.mod h1:pbR1p817OZbdId9EvLOhKBgUVTM3BMCSTan78lDDVaw= github.com/ipfs/go-ds-badger2 v0.1.1-0.20200708190120-187fc06f714e h1:Xi1nil8K2lBOorBS6Ys7+hmUCzH8fr3U9ipdL/IrcEI= github.com/ipfs/go-ds-badger2 v0.1.1-0.20200708190120-187fc06f714e/go.mod h1:lJnws7amT9Ehqzta0gwMrRsURU04caT0iRPr1W8AsOU= @@ -626,8 +641,9 @@ github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28 github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE= github.com/ipfs/go-graphsync v0.4.2/go.mod h1:/VmbZTUdUMTbNkgzAiCEucIIAU3BkLE2cZrDCVUhyi0= github.com/ipfs/go-graphsync v0.4.3/go.mod h1:mPOwDYv128gf8gxPFgXnz4fNrSYPsWyqisJ7ych+XDY= -github.com/ipfs/go-graphsync v0.6.1 h1:i9wN7YkBXWwIsUjVQeuaDxFB59yWZrG1xL564Nz7aGE= -github.com/ipfs/go-graphsync v0.6.1/go.mod h1:e2ZxnClqBBYAtd901g9vXMJzS47labjAtOzsWtOzKNk= +github.com/ipfs/go-graphsync v0.6.4/go.mod h1:5WyaeigpNdpiYQuW2vwpuecOoEfB4h747ZGEOKmAGTg= +github.com/ipfs/go-graphsync v0.6.6 h1:In7jjzvSXlrAUz4OjN41lxYf/dzkf1bVeVxLpwKMRo8= +github.com/ipfs/go-graphsync v0.6.6/go.mod h1:GdHT8JeuIZ0R4lSjFR16Oe4zPi5dXwKi9zR9ADVlcdk= github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk= github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= @@ -693,8 +709,9 @@ github.com/ipfs/go-log v1.0.0/go.mod h1:JO7RzlMK6rA+CIxFMLOuB6Wf5b81GDiKElL7UPSI github.com/ipfs/go-log v1.0.1/go.mod h1:HuWlQttfN6FWNHRhlY5yMk/lW7evQC0HHGOxEwMRR8I= github.com/ipfs/go-log v1.0.2/go.mod h1:1MNjMxe0u6xvJZgeqbJ8vdo2TKaGwZ1a0Bpza+sr2Sk= github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A= -github.com/ipfs/go-log v1.0.4 h1:6nLQdX4W8P9yZZFH7mO+X/PzjN8Laozm/lMJ6esdgzY= github.com/ipfs/go-log v1.0.4/go.mod h1:oDCg2FkjogeFOhqqb+N39l2RpTNPL6F/StPkB3kPgcs= +github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= +github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= github.com/ipfs/go-log/v2 v2.0.1/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= github.com/ipfs/go-log/v2 v2.0.2/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= @@ -703,10 +720,9 @@ github.com/ipfs/go-log/v2 v2.0.8/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscw github.com/ipfs/go-log/v2 v2.1.1/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= github.com/ipfs/go-log/v2 v2.1.2/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= -github.com/ipfs/go-log/v2 v2.1.3 h1:1iS3IU7aXRlbgUpN8yTTpJ53NXYjAe37vcI5+5nYrzk= -github.com/ipfs/go-log/v2 v2.1.3 h1:1iS3IU7aXRlbgUpN8yTTpJ53NXYjAe37vcI5+5nYrzk= -github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= +github.com/ipfs/go-log/v2 v2.3.0 h1:31Re/cPqFHpsRHgyVwjWADPoF0otB1WrjTy8ZFYwEZU= +github.com/ipfs/go-log/v2 v2.3.0/go.mod h1:QqGoj30OTpnKaG/LKTGTxoP2mmQtjVMEnK72gynbe/g= github.com/ipfs/go-merkledag v0.0.3/go.mod h1:Oc5kIXLHokkE1hWGMBHw+oxehkAaTOqtEb7Zbh6BhLA= github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKys/4GQQfto= github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= @@ -796,12 +812,14 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kabukky/httpscerts v0.0.0-20150320125433-617593d7dcb3/go.mod h1:BYpt4ufZiIGv2nXn4gMxnfKV306n3mWXgNu/d2TqdTU= github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= @@ -815,6 +833,10 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/klauspost/compress v1.11.7 h1:0hzRabrMN4tSTvMfnL3SCv1ZGeAP23ynzodBgaHeMeg= +github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/cpuid/v2 v2.0.4 h1:g0I61F2K2DjRHz1cnxlkNSBIaePVoJIjjnHui8QHbiw= +github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= @@ -829,11 +851,12 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lib/pq v1.7.0 h1:h93mCPfUSkaul3Ka/VG8uZdmW1uMHDGxzu0NWHuJmHY= -github.com/lib/pq v1.7.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8= +github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= -github.com/libp2p/go-addr-util v0.0.2 h1:7cWK5cdA5x72jX0g8iLrQWm5TRJZ6CzGdPEhWj7plWU= github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwnUISBM3WG15E= +github.com/libp2p/go-addr-util v0.1.0 h1:acKsntI33w2bTU7tC9a0SaPimJGfSI0bFKC18ChxeVI= +github.com/libp2p/go-addr-util v0.1.0/go.mod h1:6I3ZYuFr2O/9D+SoyM0zEw0EF3YkldtTX406BpdQMqw= github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs= github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= @@ -934,8 +957,9 @@ github.com/libp2p/go-libp2p-core v0.7.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJB github.com/libp2p/go-libp2p-core v0.8.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-core v0.8.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-core v0.8.2/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.8.5 h1:aEgbIcPGsKy6zYcC+5AJivYFedhYa4sW7mIpWpUaLKw= github.com/libp2p/go-libp2p-core v0.8.5/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.6 h1:3S8g006qG6Tjpj1JdRK2S+TWc2DJQKX/RG9fdLeiLSU= +github.com/libp2p/go-libp2p-core v0.8.6/go.mod h1:dgHr0l0hIKfWpGpqAMbpo19pen9wJfdCGv51mTmdpmM= github.com/libp2p/go-libp2p-crypto v0.0.1/go.mod h1:yJkNyDmO341d5wwXxDUGO0LykUVT72ImHNUqh5D/dBE= github.com/libp2p/go-libp2p-crypto v0.0.2/go.mod h1:eETI5OUfBnvARGOHrJz2eWNyTUxEGZnBxMcbUjfIj4I= github.com/libp2p/go-libp2p-crypto v0.1.0 h1:k9MFy+o2zGDNGsaoZl0MA3iZ75qXxr9OOoAZF+sD5OQ= @@ -947,8 +971,9 @@ github.com/libp2p/go-libp2p-discovery v0.1.0/go.mod h1:4F/x+aldVHjHDHuX85x1zWoFT github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfxg97AEdo4GYBt6BadWg= github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQOu38Fu7LJGEOK2gQltw= github.com/libp2p/go-libp2p-discovery v0.4.0/go.mod h1:bZ0aJSrFc/eX2llP0ryhb1kpgkPyTo23SJ5b7UQCMh4= -github.com/libp2p/go-libp2p-discovery v0.5.0 h1:Qfl+e5+lfDgwdrXdu4YNCWyEo3fWuP+WgN9mN0iWviQ= github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= +github.com/libp2p/go-libp2p-discovery v0.5.1 h1:CJylx+h2+4+s68GvrM4pGNyfNhOYviWBPtVv5PA7sfo= +github.com/libp2p/go-libp2p-discovery v0.5.1/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= github.com/libp2p/go-libp2p-host v0.0.1/go.mod h1:qWd+H1yuU0m5CwzAkvbSjqKairayEHdR5MMl7Cwa7Go= github.com/libp2p/go-libp2p-host v0.0.3/go.mod h1:Y/qPyA6C8j2coYyos1dfRm0I8+nvd4TGrDGt4tA7JR8= github.com/libp2p/go-libp2p-interface-connmgr v0.0.1/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= @@ -1002,20 +1027,22 @@ github.com/libp2p/go-libp2p-peerstore v0.2.2/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRj github.com/libp2p/go-libp2p-peerstore v0.2.3/go.mod h1:K8ljLdFn590GMttg/luh4caB/3g0vKuY01psze0upRw= github.com/libp2p/go-libp2p-peerstore v0.2.4/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= -github.com/libp2p/go-libp2p-peerstore v0.2.7 h1:83JoLxyR9OYTnNfB5vvFqvMUv/xDNa6NoPHnENhBsGw= github.com/libp2p/go-libp2p-peerstore v0.2.7/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= +github.com/libp2p/go-libp2p-peerstore v0.2.8 h1:nJghUlUkFVvyk7ccsM67oFA6kqUkwyCM1G4WPVMCWYA= +github.com/libp2p/go-libp2p-peerstore v0.2.8/go.mod h1:gGiPlXdz7mIHd2vfAsHzBNAMqSDkt2UBFwgcITgw1lA= github.com/libp2p/go-libp2p-pnet v0.2.0 h1:J6htxttBipJujEjz1y0a5+eYoiPcFHhSYHH6na5f0/k= github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1VZNHYcK8cLgFJLZ4s= github.com/libp2p/go-libp2p-protocol v0.1.0/go.mod h1:KQPHpAabB57XQxGrXCNvbL6UEXfQqUgC/1adR2Xtflk= github.com/libp2p/go-libp2p-pubsub v0.1.1/go.mod h1:ZwlKzRSe1eGvSIdU5bD7+8RZN/Uzw0t1Bp9R1znpR/Q= github.com/libp2p/go-libp2p-pubsub v0.3.2-0.20200527132641-c0712c6e92cf/go.mod h1:TxPOBuo1FPdsTjFnv+FGZbNbWYsp74Culx+4ViQpato= -github.com/libp2p/go-libp2p-pubsub v0.4.2-0.20210212194758-6c1addf493eb h1:HExLcdXn8fgtXPciUw97O5NNhBn31dt6d9fVUD4cngo= -github.com/libp2p/go-libp2p-pubsub v0.4.2-0.20210212194758-6c1addf493eb/go.mod h1:izkeMLvz6Ht8yAISXjx60XUQZMq9ZMe5h2ih4dLIBIQ= +github.com/libp2p/go-libp2p-pubsub v0.5.3 h1:XCn5xvgA/AKpbbaeqbomfKtQCbT9QsU39tYsVj0IndQ= +github.com/libp2p/go-libp2p-pubsub v0.5.3/go.mod h1:gVOzwebXVdSMDQBTfH8ACO5EJ4SQrvsHqCmYsCZpD0E= github.com/libp2p/go-libp2p-quic-transport v0.1.1/go.mod h1:wqG/jzhF3Pu2NrhJEvE+IE0NTHNXslOPn9JQzyCAxzU= github.com/libp2p/go-libp2p-quic-transport v0.5.0/go.mod h1:IEcuC5MLxvZ5KuHKjRu+dr3LjCT1Be3rcD/4d8JrX8M= -github.com/libp2p/go-libp2p-quic-transport v0.10.0 h1:koDCbWD9CCHwcHZL3/WEvP2A+e/o5/W5L3QS/2SPMA0= github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqUEJqjiiY8xmEuq3HUDS993MkA= +github.com/libp2p/go-libp2p-quic-transport v0.11.2 h1:p1YQDZRHH4Cv2LPtHubqlQ9ggz4CKng/REZuXZbZMhM= +github.com/libp2p/go-libp2p-quic-transport v0.11.2/go.mod h1:wlanzKtIh6pHrq+0U3p3DY9PJfGqxMgPaGKaK5LifwQ= github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q= github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= github.com/libp2p/go-libp2p-record v0.1.1/go.mod h1:VRgKajOyMVgP/F0L5g3kH7SVskp17vFi2xheb5uMJtg= @@ -1043,8 +1070,9 @@ github.com/libp2p/go-libp2p-swarm v0.2.7/go.mod h1:ZSJ0Q+oq/B1JgfPHJAT2HTall+xYR github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM= github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= github.com/libp2p/go-libp2p-swarm v0.3.1/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= -github.com/libp2p/go-libp2p-swarm v0.5.0 h1:HIK0z3Eqoo8ugmN8YqWAhD2RORgR+3iNXYG4U2PFd1E= github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat6JknzUehFx8xW4= +github.com/libp2p/go-libp2p-swarm v0.5.3 h1:hsYaD/y6+kZff1o1Mc56NcuwSg80lIphTS/zDk3mO4M= +github.com/libp2p/go-libp2p-swarm v0.5.3/go.mod h1:NBn7eNW2lu568L7Ns9wdFrOhgRlkRnIDg0FLKbuu3i8= github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= @@ -1053,8 +1081,9 @@ github.com/libp2p/go-libp2p-testing v0.1.0/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eq github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc= github.com/libp2p/go-libp2p-testing v0.3.0/go.mod h1:efZkql4UZ7OVsEfaxNHZPzIehtsBXMrXnCfJIgDti5g= -github.com/libp2p/go-libp2p-testing v0.4.0 h1:PrwHRi0IGqOwVQWR3xzgigSlhlLfxgfXgkHxr77EghQ= github.com/libp2p/go-libp2p-testing v0.4.0/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= +github.com/libp2p/go-libp2p-testing v0.4.2 h1:IOiA5mMigi+eEjf4J+B7fepDhsjtsoWA9QbsCqbNp5U= +github.com/libp2p/go-libp2p-testing v0.4.2/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= github.com/libp2p/go-libp2p-tls v0.1.3 h1:twKMhMu44jQO+HgQK9X8NHO5HkeJu2QbhLzLJpa8oNM= github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= github.com/libp2p/go-libp2p-transport v0.0.1/go.mod h1:UzbUs9X+PHOSw7S3ZmeOxfnwaQY5vGDzZmKPod3N3tk= @@ -1065,8 +1094,9 @@ github.com/libp2p/go-libp2p-transport-upgrader v0.0.4/go.mod h1:RGq+tupk+oj7PzL2 github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns= github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o= -github.com/libp2p/go-libp2p-transport-upgrader v0.4.2 h1:4JsnbfJzgZeRS9AWN7B9dPqn/LY/HoQTlO9gtdJTIYM= github.com/libp2p/go-libp2p-transport-upgrader v0.4.2/go.mod h1:NR8ne1VwfreD5VIWIU62Agt/J18ekORFU/j1i2y8zvk= +github.com/libp2p/go-libp2p-transport-upgrader v0.4.6 h1:SHt3g0FslnqIkEWF25YOB8UCOCTpGAVvHRWQYJ+veiI= +github.com/libp2p/go-libp2p-transport-upgrader v0.4.6/go.mod h1:JE0WQuQdy+uLZ5zOaI3Nw9dWGYJIA7mywEtP2lMvnyk= github.com/libp2p/go-libp2p-yamux v0.5.1 h1:sX4WQPHMhRxJE5UZTfjEuBvlQWXB5Bo3A2JK9ZJ9EM0= github.com/libp2p/go-libp2p-yamux v0.5.1/go.mod h1:dowuvDu8CRWmr0iqySMiSxK+W0iL5cMVO9S94Y6gkv4= github.com/libp2p/go-maddr-filter v0.0.1/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= @@ -1095,6 +1125,7 @@ github.com/libp2p/go-nat v0.0.5 h1:qxnwkco8RLKqVh1NmjQ+tJ8p8khNLFxuElYG/TwqW4Q= github.com/libp2p/go-nat v0.0.5/go.mod h1:B7NxsVNPZmRLvMOwiEO1scOSyjA56zxYAGv1yQgRkEU= github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= +github.com/libp2p/go-netroute v0.1.5/go.mod h1:V1SR3AaECRkEQCoFFzYwVYWvYIEtlxx89+O3qcpCl4A= github.com/libp2p/go-netroute v0.1.6 h1:ruPJStbYyXVYGQ81uzEDzuvbYRLKRrLvTYd33yomC38= github.com/libp2p/go-netroute v0.1.6/go.mod h1:AqhkMh0VuWmfgtxKPp3Oc1LdU5QSWS7wl0QLhSZqXxQ= github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0= @@ -1109,8 +1140,9 @@ github.com/libp2p/go-reuseport v0.0.2/go.mod h1:SPD+5RwGC7rcnzngoYC86GjPzjSywuQy github.com/libp2p/go-reuseport-transport v0.0.1/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2Wb5JSyHNncjf1Oi2dEbzM= -github.com/libp2p/go-reuseport-transport v0.0.4 h1:OZGz0RB620QDGpv300n1zaOcKGGAoGVf8h9txtt/1uM= github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw= +github.com/libp2p/go-reuseport-transport v0.0.5 h1:lJzi+vSYbyJj2faPKLxNGWEIBcaV/uJmyvsUxXy2mLw= +github.com/libp2p/go-reuseport-transport v0.0.5/go.mod h1:TC62hhPc8qs5c/RoXDZG6YmjK+/YWUPC0yYmeUecbjc= github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= github.com/libp2p/go-sockaddr v0.1.0/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= github.com/libp2p/go-sockaddr v0.1.1 h1:yD80l2ZOdGksnOyHrhxDdTDFrf7Oy+v3FMVArIRgZxQ= @@ -1126,8 +1158,9 @@ github.com/libp2p/go-tcp-transport v0.0.4/go.mod h1:+E8HvC8ezEVOxIo3V5vCK9l1y/19 github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc= github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY= github.com/libp2p/go-tcp-transport v0.2.0/go.mod h1:vX2U0CnWimU4h0SGSEsg++AzvBcroCGYw28kh94oLe0= -github.com/libp2p/go-tcp-transport v0.2.1 h1:ExZiVQV+h+qL16fzCWtd1HSzPsqWottJ8KXwWaVi8Ns= github.com/libp2p/go-tcp-transport v0.2.1/go.mod h1:zskiJ70MEfWz2MKxvFB/Pv+tPIB1PpPUrHIWQ8aFw7M= +github.com/libp2p/go-tcp-transport v0.2.7 h1:Z8Kc/Kb8tD84WiaH55xAlaEnkqzrp88jSEySCKV4+gg= +github.com/libp2p/go-tcp-transport v0.2.7/go.mod h1:lue9p1b3VmZj1MhhEGB/etmvF/nBQ0X9CW2DutBT3MM= github.com/libp2p/go-testutil v0.0.1/go.mod h1:iAcJc/DKJQanJ5ws2V+u5ywdL2n12X1WbbEG+Jjy69I= github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc= github.com/libp2p/go-ws-transport v0.0.1/go.mod h1:p3bKjDWHEgtuKKj+2OdPYs5dAPIjtpQGHF2tJfGz7Ww= @@ -1148,8 +1181,9 @@ github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-b github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lucas-clemente/quic-go v0.11.2/go.mod h1:PpMmPfPKO9nKJ/psF49ESTAGQSdfXxlg1otPbEB2nOw= github.com/lucas-clemente/quic-go v0.16.0/go.mod h1:I0+fcNTdb9eS1ZcjQZbDVPGchJ86chcIxPALn9lEJqE= -github.com/lucas-clemente/quic-go v0.19.3 h1:eCDQqvGBB+kCTkA0XrAFtNe81FMa0/fn4QSoeAbmiF4= github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= +github.com/lucas-clemente/quic-go v0.21.2 h1:8LqqL7nBQFDUINadW0fHV/xSaCQJgmJC0Gv+qUnjd78= +github.com/lucas-clemente/quic-go v0.21.2/go.mod h1:vF5M1XqhBAHgbjKcJOXY3JZz3GP0T3FQhz/uyOUS38Q= github.com/lucasb-eyer/go-colorful v1.0.3 h1:QIbQXiugsb+q10B+MI+7DI1oQLdmnep86tWFlaaUAac= github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= @@ -1167,10 +1201,17 @@ github.com/marten-seemann/qpack v0.1.0/go.mod h1:LFt1NU/Ptjip0C2CPkhimBz5CGE3WGD github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= github.com/marten-seemann/qtls v0.9.1/go.mod h1:T1MmAdDPyISzxlK6kjRr0pcZFBVd1OZbBb/j3cvzHhk= -github.com/marten-seemann/qtls v0.10.0 h1:ECsuYUKalRL240rRD4Ri33ISb7kAQ3qGDlrrl55b2pc= github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs= -github.com/marten-seemann/qtls-go1-15 v0.1.1 h1:LIH6K34bPVttyXnUWixk0bzH6/N07VxbSabxn5A5gZQ= github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= +github.com/marten-seemann/qtls-go1-15 v0.1.4/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= +github.com/marten-seemann/qtls-go1-15 v0.1.5 h1:Ci4EIUN6Rlb+D6GmLdej/bCQ4nPYNtVXQB+xjiXE1nk= +github.com/marten-seemann/qtls-go1-15 v0.1.5/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= +github.com/marten-seemann/qtls-go1-16 v0.1.4 h1:xbHbOGGhrenVtII6Co8akhLEdrawwB2iHl5yhJRpnco= +github.com/marten-seemann/qtls-go1-16 v0.1.4/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk= +github.com/marten-seemann/qtls-go1-17 v0.1.0-rc.1 h1:/rpmWuGvceLwwWuaKPdjpR4JJEUH0tq64/I3hvzaNLM= +github.com/marten-seemann/qtls-go1-17 v0.1.0-rc.1/go.mod h1:fz4HIxByo+LlWcreM4CZOYNuz3taBQ8rN2X6FqvaWo8= +github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= +github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= @@ -1182,8 +1223,9 @@ github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.13 h1:qdl+GuBjcsKKDco5BsxPJlId98mSWNKqYA+Co0SC1yA= +github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.10 h1:CoZ3S2P7pvtP45xOtBw+/mDL2z0RKI576gSkzRRpdGg= @@ -1206,14 +1248,21 @@ github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7 github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= +github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= +github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= +github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU= +github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= +github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= -github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= +github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= @@ -1246,8 +1295,9 @@ github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y9 github.com/multiformats/go-multiaddr v0.2.1/go.mod h1:s/Apk6IyxfvMjDafnhJgJ3/46z7tZ04iMk5wP4QMGGE= github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u0xW5UouOmQQrn6a3Y= github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI= -github.com/multiformats/go-multiaddr v0.3.1 h1:1bxa+W7j9wZKTZREySx1vPMs2TqrYWjVZ7zE6/XLG1I= github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc= +github.com/multiformats/go-multiaddr v0.3.3 h1:vo2OTSAqnENB2rLk79pLtr+uhj+VAzSe3uef5q0lRSs= +github.com/multiformats/go-multiaddr v0.3.3/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9xQkib3fyB+nZXHLag0= github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.3/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= @@ -1278,8 +1328,9 @@ github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa github.com/multiformats/go-multihash v0.0.9/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= -github.com/multiformats/go-multihash v0.0.14 h1:QoBceQYQQtNUuf6s7wHxnE2c8bhbMqhfGzNI032se/I= github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multihash v0.0.15 h1:hWOPdrNqDjwHDx82vsYGSDZNyktOJJ2dzZJzFkOV1jM= +github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg= github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.0.4/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= @@ -1294,6 +1345,7 @@ github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXS github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2W/KhfNY= github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= @@ -1308,8 +1360,9 @@ github.com/nikkolasg/hexjson v0.0.0-20181101101858-78e39397e00c h1:5bFTChQxSKNwy github.com/nikkolasg/hexjson v0.0.0-20181101101858-78e39397e00c/go.mod h1:7qN3Y0BvzRUf4LofcoJplQL10lsFDb4PYlePTVwrP28= github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229 h1:E2B8qYyeSgv5MXpmzZXRNp8IAQ4vjxIjhpAf5hv/tAg= github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E= -github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= @@ -1319,16 +1372,19 @@ github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= -github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak= +github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333 h1:CznVS40zms0Dj5he4ERo+fRPtO0qxUk8lA8Xu3ddet0= github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333/go.mod h1:Ag6rSXkHIckQmjFBCweJEEt1mrTPBv8b9W4aU/NQWfI= @@ -1379,8 +1435,11 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.6.0 h1:YVPodQOcK15POxhgARIvnDRVpLcuK8mglnMrWfyrw6A= github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= +github.com/prometheus/client_golang v1.10.0 h1:/o0BDeWzLWXNZ+4q5gXltUvaMpJqckTa+jTNoB+z4cg= +github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -1395,8 +1454,10 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.18.0 h1:WCVKW7aL6LEe1uryfI9dnEc2ZqNB1Fn0ok930v0iL1Y= +github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289/go.mod h1:FGbBv5OPKjch+jNUJmEQpMZytIdyW0NdBtWFcfSKusc= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1407,8 +1468,11 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.1.0 h1:jhMy6QXfi3y2HEzFoyuCj40z4OZIIHHPtFyCMftmvKA= github.com/prometheus/procfs v0.1.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/raulk/clock v1.1.0 h1:dpb29+UKMbLqiU/jqIJptgLR1nn23HLgMY0sTCDza5Y= github.com/raulk/clock v1.1.0/go.mod h1:3MpVxdZ/ODBQDxbN+kzshf5OSZwPjtMDx6BBXBmOeY0= github.com/raulk/go-watchdog v1.0.1 h1:qgm3DIJAeb+2byneLrQJ7kvmDLGxN2vy3apXyGaDKN4= @@ -1611,6 +1675,7 @@ github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1: github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/zondax/hid v0.9.0 h1:eiT3P6vNxAEVxXMw66eZUAAnU2zD33JBkfG/EnfAKl8= github.com/zondax/hid v0.9.0/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= github.com/zondax/ledger-go v0.12.1 h1:hYRcyznPRJp+5mzF2sazTLP2nGvGjYDD2VzhHhFomLU= @@ -1649,8 +1714,9 @@ go.uber.org/dig v1.10.0 h1:yLmDDj9/zuDjv3gz8GQGviXMs9TfysIUMUilCpgzUJY= go.uber.org/dig v1.10.0/go.mod h1:X34SnWGr8Fyla9zQNO2GSO2D+TIuqB14OS8JhYocIyw= go.uber.org/fx v1.9.0 h1:7OAz8ucp35AU8eydejpYG7QrbE8rLKzGhHbZlJi5LYY= go.uber.org/fx v1.9.0/go.mod h1:mFdUyAUuJ3w4jAckiKSKbldsxy1ojpAMJ+dVZg5Y0Aw= -go.uber.org/goleak v1.0.0 h1:qsup4IcBdlmsnGfqyLl4Ntn3C2XCCuKAE7DwHpScyUo= go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= @@ -1701,8 +1767,10 @@ golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 h1:It14KIkyBFYkHkwZ7k45minvA9aorojkyjGk9KJ5B/w= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf h1:B2n+Zi5QeYRDAEodEu72OS36gmTWjgpXr2+cWcBW90o= +golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/exp v0.0.0-20181106170214-d68db9428509/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1735,8 +1803,9 @@ golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180524181706-dfa909b99c79/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1784,9 +1853,13 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201022231255-08b38378de70/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6 h1:0PC75Fz/kyMGhL0e1QnypqK2kQMqKt9csD1GnMJR+Zk= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1804,6 +1877,7 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180202135801-37707fdb30a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1869,14 +1943,26 @@ golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200926100807-9d91bd62050c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210317225723-c4fcb01b228e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210426080607-c94f62235c83 h1:kHSDPqCtsHZOg0nVylfTo20DDhE9gG4Y0jn7hKQ0QAM= golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210511113859-b0526f3d8744 h1:yhBbb4IRs2HS9PPlAg6DMC6mUOKexJBNsLf4Z+6En1Q= +golang.org/x/sys v0.0.0-20210511113859-b0526f3d8744/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf h1:MZ2shdL+ZM/XzY3ZGOnh4Nlpnxz5GSOhOmtHo3iPU6M= golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1934,8 +2020,11 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200711155855-7342f9734a7d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20201112185108-eeaa07dd7696/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2015,8 +2104,10 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -2043,8 +2134,9 @@ gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= diff --git a/itests/api_test.go b/itests/api_test.go index 5487a2c3879..ba77701a245 100644 --- a/itests/api_test.go +++ b/itests/api_test.go @@ -121,6 +121,7 @@ func (ts *apiSuite) testSearchMsg(t *testing.T) { searchRes, err := full.StateSearchMsg(ctx, types.EmptyTSK, sm.Cid(), lapi.LookbackNoLimit, true) require.NoError(t, err) + require.NotNil(t, searchRes) require.Equalf(t, res.TipSet, searchRes.TipSet, "search ts: %s, different from wait ts: %s", searchRes.TipSet, res.TipSet) } @@ -186,6 +187,7 @@ func (ts *apiSuite) testNonGenesisMiner(t *testing.T) { ens.Miner(&newMiner, full, kit.OwnerAddr(full.DefaultKey), kit.ProofType(abi.RegisteredSealProof_StackedDrg2KiBV1), // we're using v0 actors with old proofs. + kit.WithAllSubsystems(), ).Start().InterconnectAll() ta, err := newMiner.ActorAddress(ctx) diff --git a/itests/batch_deal_test.go b/itests/batch_deal_test.go index 300a44fa2f3..01622486a8f 100644 --- a/itests/batch_deal_test.go +++ b/itests/batch_deal_test.go @@ -58,7 +58,7 @@ func TestBatchDealInput(t *testing.T) { )) client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts) ens.InterconnectAll().BeginMining(blockTime) - dh := kit.NewDealHarness(t, client, miner) + dh := kit.NewDealHarness(t, client, miner, miner) err := miner.MarketSetAsk(ctx, big.Zero(), big.Zero(), 200, 128, 32<<30) require.NoError(t, err) @@ -90,7 +90,11 @@ func TestBatchDealInput(t *testing.T) { res, _, _, err := kit.CreateImportFile(ctx, client, rseed, piece) require.NoError(t, err) - deal := dh.StartDeal(ctx, res.Root, false, dealStartEpoch) + dp := dh.DefaultStartDealParams() + dp.Data.Root = res.Root + dp.DealStartEpoch = dealStartEpoch + + deal := dh.StartDeal(ctx, dp) dh.WaitDealSealed(ctx, deal, false, true, checkNoPadding) } diff --git a/itests/ccupgrade_test.go b/itests/ccupgrade_test.go index eac2523bf6e..dfd0144f21e 100644 --- a/itests/ccupgrade_test.go +++ b/itests/ccupgrade_test.go @@ -61,7 +61,7 @@ func runTestCCUpgrade(t *testing.T, upgradeHeight abi.ChainEpoch) { err = miner.SectorMarkForUpgrade(ctx, sl[0]) require.NoError(t, err) - dh := kit.NewDealHarness(t, client, miner) + dh := kit.NewDealHarness(t, client, miner, miner) deal, res, inPath := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{ Rseed: 6, SuspendUntilCryptoeconStable: true, @@ -73,9 +73,13 @@ func runTestCCUpgrade(t *testing.T, upgradeHeight abi.ChainEpoch) { { exp, err := client.StateSectorExpiration(ctx, maddr, CC, types.EmptyTSK) - require.NoError(t, err) - require.NotNil(t, exp) - require.Greater(t, 50000, int(exp.OnTime)) + if err != nil { + require.Contains(t, err.Error(), "failed to find sector 3") // already cleaned up + } else { + require.NoError(t, err) + require.NotNil(t, exp) + require.Greater(t, 50000, int(exp.OnTime)) + } } { exp, err := client.StateSectorExpiration(ctx, maddr, Upgraded, types.EmptyTSK) diff --git a/itests/deadlines_test.go b/itests/deadlines_test.go index 9768c3c607a..19b0a10dc3a 100644 --- a/itests/deadlines_test.go +++ b/itests/deadlines_test.go @@ -76,6 +76,7 @@ func TestDeadlineToggling(t *testing.T) { minerE kit.TestMiner ) opts := []kit.NodeOpt{kit.ConstructorOpts(kit.NetworkUpgradeAt(network.Version12, upgradeH))} + opts = append(opts, kit.WithAllSubsystems()) ens := kit.NewEnsemble(t, kit.MockProofs()). FullNode(&client, opts...). Miner(&minerA, &client, opts...). diff --git a/itests/deals_concurrent_test.go b/itests/deals_concurrent_test.go index 44b25c7b36e..69e1b4e7fd2 100644 --- a/itests/deals_concurrent_test.go +++ b/itests/deals_concurrent_test.go @@ -7,11 +7,13 @@ import ( "testing" "time" + "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/stretchr/testify/require" datatransfer "github.com/filecoin-project/go-data-transfer" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/itests/kit" "github.com/filecoin-project/lotus/node" "github.com/filecoin-project/lotus/node/modules" @@ -19,14 +21,65 @@ import ( "github.com/filecoin-project/lotus/node/repo" ) -func TestDealCyclesConcurrent(t *testing.T) { +// TestDealWithMarketAndMinerNode is running concurrently a number of storage and retrieval deals towards a miner +// architecture where the `mining/sealing/proving` node is a separate process from the `markets` node +func TestDealWithMarketAndMinerNode(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode") } kit.QuietMiningLogs() - blockTime := 10 * time.Millisecond + oldDelay := policy.GetPreCommitChallengeDelay() + policy.SetPreCommitChallengeDelay(5) + t.Cleanup(func() { + policy.SetPreCommitChallengeDelay(oldDelay) + }) + + // For these tests where the block time is artificially short, just use + // a deal start epoch that is guaranteed to be far enough in the future + // so that the deal starts sealing in time + startEpoch := abi.ChainEpoch(8 << 10) + + runTest := func(t *testing.T, n int, fastRetrieval bool, carExport bool) { + api.RunningNodeType = api.NodeMiner // TODO(anteva): fix me + + client, main, market, _ := kit.EnsembleWithMinerAndMarketNodes(t, kit.ThroughRPC()) + + dh := kit.NewDealHarness(t, client, main, market) + + dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{ + N: n, + FastRetrieval: fastRetrieval, + CarExport: carExport, + StartEpoch: startEpoch, + }) + } + + // this test is expensive because we don't use mock proofs; do a single cycle. + cycles := []int{4} + for _, n := range cycles { + n := n + ns := fmt.Sprintf("%d", n) + t.Run(ns+"-fastretrieval-CAR", func(t *testing.T) { runTest(t, n, true, true) }) + t.Run(ns+"-fastretrieval-NoCAR", func(t *testing.T) { runTest(t, n, true, false) }) + t.Run(ns+"-stdretrieval-CAR", func(t *testing.T) { runTest(t, n, false, true) }) + t.Run(ns+"-stdretrieval-NoCAR", func(t *testing.T) { runTest(t, n, false, false) }) + } +} + +func TestDealCyclesConcurrent(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + + oldDelay := policy.GetPreCommitChallengeDelay() + policy.SetPreCommitChallengeDelay(5) + t.Cleanup(func() { + policy.SetPreCommitChallengeDelay(oldDelay) + }) + + kit.QuietMiningLogs() // For these tests where the block time is artificially short, just use // a deal start epoch that is guaranteed to be far enough in the future @@ -35,8 +88,8 @@ func TestDealCyclesConcurrent(t *testing.T) { runTest := func(t *testing.T, n int, fastRetrieval bool, carExport bool) { client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs()) - ens.InterconnectAll().BeginMining(blockTime) - dh := kit.NewDealHarness(t, client, miner) + ens.InterconnectAll().BeginMining(250 * time.Millisecond) + dh := kit.NewDealHarness(t, client, miner, miner) dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{ N: n, @@ -46,8 +99,8 @@ func TestDealCyclesConcurrent(t *testing.T) { }) } - // TODO: add 2, 4, 8, more when this graphsync issue is fixed: https://github.com/ipfs/go-graphsync/issues/175# - cycles := []int{1} + // this test is cheap because we use mock proofs, do various cycles + cycles := []int{2, 4, 8, 16} for _, n := range cycles { n := n ns := fmt.Sprintf("%d", n) @@ -58,26 +111,35 @@ func TestDealCyclesConcurrent(t *testing.T) { } } -func TestSimultenousTransferLimit(t *testing.T) { +func TestSimultanenousTransferLimit(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode") } kit.QuietMiningLogs() - blockTime := 10 * time.Millisecond + oldDelay := policy.GetPreCommitChallengeDelay() + policy.SetPreCommitChallengeDelay(5) + t.Cleanup(func() { + policy.SetPreCommitChallengeDelay(oldDelay) + }) // For these tests where the block time is artificially short, just use // a deal start epoch that is guaranteed to be far enough in the future // so that the deal starts sealing in time startEpoch := abi.ChainEpoch(2 << 12) + const ( + graphsyncThrottle = 2 + concurrency = 20 + ) runTest := func(t *testing.T) { client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ConstructorOpts( - node.ApplyIf(node.IsType(repo.StorageMiner), node.Override(new(dtypes.StagingGraphsync), modules.StagingGraphsync(2))), + node.ApplyIf(node.IsType(repo.StorageMiner), node.Override(new(dtypes.StagingGraphsync), modules.StagingGraphsync(graphsyncThrottle))), + node.Override(new(dtypes.Graphsync), modules.Graphsync(graphsyncThrottle)), )) - ens.InterconnectAll().BeginMining(blockTime) - dh := kit.NewDealHarness(t, client, miner) + ens.InterconnectAll().BeginMining(250 * time.Millisecond) + dh := kit.NewDealHarness(t, client, miner, miner) ctx, cancel := context.WithCancel(context.Background()) @@ -96,7 +158,7 @@ func TestSimultenousTransferLimit(t *testing.T) { select { case u := <-du: t.Logf("%d - %s", u.TransferID, datatransfer.Statuses[u.Status]) - if u.Status == datatransfer.Ongoing { + if u.Status == datatransfer.Ongoing && u.Transferred > 0 { ongoing[u.TransferID] = struct{}{} } else { delete(ongoing, u.TransferID) @@ -111,16 +173,34 @@ func TestSimultenousTransferLimit(t *testing.T) { } }() + t.Logf("running concurrent deals: %d", concurrency) + dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{ - N: 1, // TODO: set to 20 after https://github.com/ipfs/go-graphsync/issues/175 is fixed + N: concurrency, FastRetrieval: true, StartEpoch: startEpoch, }) + t.Logf("all deals finished") + cancel() wg.Wait() - require.LessOrEqual(t, maxOngoing, 2) + // The eventing systems across go-data-transfer and go-graphsync + // are racy, and that's why we can't enforce graphsyncThrottle exactly, + // without making this test racy. + // + // Essentially what could happen is that the graphsync layer starts the + // next transfer before the go-data-transfer FSM has the opportunity to + // move the previously completed transfer to the next stage, thus giving + // the appearance that more than graphsyncThrottle transfers are + // in progress. + // + // Concurrency (20) is x10 higher than graphsyncThrottle (2), so if all + // 20 transfers are not happening at once, we know the throttle is + // in effect. Thus we are a little bit lenient here to account for the + // above races and allow up to graphsyncThrottle*2. + require.LessOrEqual(t, maxOngoing, graphsyncThrottle*2) } runTest(t) diff --git a/itests/deals_offline_test.go b/itests/deals_offline_test.go index c3f19048b73..003f12b1106 100644 --- a/itests/deals_offline_test.go +++ b/itests/deals_offline_test.go @@ -6,32 +6,26 @@ import ( "testing" "time" + commcid "github.com/filecoin-project/go-fil-commcid" + commp "github.com/filecoin-project/go-fil-commp-hashhash" "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/itests/kit" "github.com/stretchr/testify/require" ) func TestOfflineDealFlow(t *testing.T) { - blocktime := 10 * time.Millisecond - // For these tests where the block time is artificially short, just use - // a deal start epoch that is guaranteed to be far enough in the future - // so that the deal starts sealing in time - startEpoch := abi.ChainEpoch(2 << 12) - - runTest := func(t *testing.T, fastRet bool) { + runTest := func(t *testing.T, fastRet bool, upscale abi.PaddedPieceSize) { ctx := context.Background() - client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs()) - ens.InterconnectAll().BeginMining(blocktime) + client, miner, ens := kit.EnsembleMinimal(t, kit.WithAllSubsystems()) // no mock proofs + ens.InterconnectAll().BeginMining(250 * time.Millisecond) - dh := kit.NewDealHarness(t, client, miner) + dh := kit.NewDealHarness(t, client, miner, miner) // Create a random file and import on the client. - res, inFile := client.CreateImportFile(ctx, 1, 0) + res, inFile := client.CreateImportFile(ctx, 1, 200) // Get the piece size and commP rootCid := res.Root @@ -39,31 +33,32 @@ func TestOfflineDealFlow(t *testing.T) { require.NoError(t, err) t.Log("FILE CID:", rootCid) - // Create a storage deal with the miner - maddr, err := miner.ActorAddress(ctx) - require.NoError(t, err) - - addr, err := client.WalletDefaultAddress(ctx) - require.NoError(t, err) + // test whether padding works as intended + if upscale > 0 { + newRawCp, err := commp.PadCommP( + pieceInfo.PieceCID.Hash()[len(pieceInfo.PieceCID.Hash())-32:], + uint64(pieceInfo.PieceSize), + uint64(upscale), + ) + require.NoError(t, err) + + pieceInfo.PieceSize = upscale + pieceInfo.PieceCID, err = commcid.DataCommitmentV1ToCID(newRawCp) + require.NoError(t, err) + } - // Manual storage deal (offline deal) - dataRef := &storagemarket.DataRef{ + dp := dh.DefaultStartDealParams() + dp.DealStartEpoch = abi.ChainEpoch(4 << 10) + dp.FastRetrieval = fastRet + // Replace with params for manual storage deal (offline deal) + dp.Data = &storagemarket.DataRef{ TransferType: storagemarket.TTManual, Root: rootCid, PieceCid: &pieceInfo.PieceCID, PieceSize: pieceInfo.PieceSize.Unpadded(), } - proposalCid, err := client.ClientStartDeal(ctx, &api.StartDealParams{ - Data: dataRef, - Wallet: addr, - Miner: maddr, - EpochPrice: types.NewInt(1000000), - DealStartEpoch: startEpoch, - MinBlocksDuration: uint64(build.MinDealDuration), - FastRetrieval: fastRet, - }) - require.NoError(t, err) + proposalCid := dh.StartDeal(ctx, dp) // Wait for the deal to reach StorageDealCheckForAcceptance on the client cd, err := client.ClientGetDealInfo(ctx, *proposalCid) @@ -96,6 +91,7 @@ func TestOfflineDealFlow(t *testing.T) { } - t.Run("stdretrieval", func(t *testing.T) { runTest(t, false) }) - t.Run("fastretrieval", func(t *testing.T) { runTest(t, true) }) + t.Run("stdretrieval", func(t *testing.T) { runTest(t, false, 0) }) + t.Run("fastretrieval", func(t *testing.T) { runTest(t, true, 0) }) + t.Run("fastretrieval", func(t *testing.T) { runTest(t, true, 1024) }) } diff --git a/itests/deals_padding_test.go b/itests/deals_padding_test.go new file mode 100644 index 00000000000..cd15d30d7e4 --- /dev/null +++ b/itests/deals_padding_test.go @@ -0,0 +1,76 @@ +package itests + +import ( + "context" + "testing" + "time" + + commcid "github.com/filecoin-project/go-fil-commcid" + commp "github.com/filecoin-project/go-fil-commp-hashhash" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/stretchr/testify/require" +) + +func TestDealPadding(t *testing.T) { + + kit.QuietMiningLogs() + + var blockTime = 250 * time.Millisecond + startEpoch := abi.ChainEpoch(2 << 12) + policy.SetPreCommitChallengeDelay(2) + + client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC(), kit.WithAllSubsystems()) // no mock proofs. + ens.InterconnectAll().BeginMining(blockTime) + dh := kit.NewDealHarness(t, client, miner, miner) + + ctx := context.Background() + client.WaitTillChain(ctx, kit.BlockMinedBy(miner.ActorAddr)) + + // Create a random file, would originally be a 256-byte sector + res, inFile := client.CreateImportFile(ctx, 1, 200) + + // Get the piece size and commP + pieceInfo, err := client.ClientDealPieceCID(ctx, res.Root) + require.NoError(t, err) + t.Log("FILE CID:", res.Root) + + runTest := func(t *testing.T, upscale abi.PaddedPieceSize) { + // test whether padding works as intended + newRawCp, err := commp.PadCommP( + pieceInfo.PieceCID.Hash()[len(pieceInfo.PieceCID.Hash())-32:], + uint64(pieceInfo.PieceSize), + uint64(upscale), + ) + require.NoError(t, err) + + pcid, err := commcid.DataCommitmentV1ToCID(newRawCp) + require.NoError(t, err) + + dp := dh.DefaultStartDealParams() + dp.Data.Root = res.Root + dp.Data.PieceCid = &pcid + dp.Data.PieceSize = upscale.Unpadded() + dp.DealStartEpoch = startEpoch + proposalCid := dh.StartDeal(ctx, dp) + + // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this + time.Sleep(time.Second) + + di, err := client.ClientGetDealInfo(ctx, *proposalCid) + require.NoError(t, err) + require.True(t, di.PieceCID.Equals(pcid)) + + dh.WaitDealSealed(ctx, proposalCid, false, false, nil) + + // Retrieve the deal + outFile := dh.PerformRetrieval(ctx, proposalCid, res.Root, false) + + kit.AssertFilesEqual(t, inFile, outFile) + } + + t.Run("padQuarterSector", func(t *testing.T) { runTest(t, 512) }) + t.Run("padHalfSector", func(t *testing.T) { runTest(t, 1024) }) + t.Run("padFullSector", func(t *testing.T) { runTest(t, 2048) }) +} diff --git a/itests/deals_power_test.go b/itests/deals_power_test.go index ebf1895e3fc..0c29ad06028 100644 --- a/itests/deals_power_test.go +++ b/itests/deals_power_test.go @@ -24,13 +24,13 @@ func TestFirstDealEnablesMining(t *testing.T) { ens := kit.NewEnsemble(t, kit.MockProofs()) ens.FullNode(&client) - ens.Miner(&genMiner, &client) - ens.Miner(&provider, &client, kit.PresealSectors(0)) + ens.Miner(&genMiner, &client, kit.WithAllSubsystems()) + ens.Miner(&provider, &client, kit.WithAllSubsystems(), kit.PresealSectors(0)) ens.Start().InterconnectAll().BeginMining(50 * time.Millisecond) ctx := context.Background() - dh := kit.NewDealHarness(t, &client, &provider) + dh := kit.NewDealHarness(t, &client, &provider, &provider) ref, _ := client.CreateImportFile(ctx, 5, 0) @@ -50,7 +50,9 @@ func TestFirstDealEnablesMining(t *testing.T) { }() // now perform the deal. - deal := dh.StartDeal(ctx, ref.Root, false, 0) + dp := dh.DefaultStartDealParams() + dp.Data.Root = ref.Root + deal := dh.StartDeal(ctx, dp) // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this time.Sleep(time.Second) diff --git a/itests/deals_pricing_test.go b/itests/deals_pricing_test.go index 357abec1ed9..eb28af0bd1e 100644 --- a/itests/deals_pricing_test.go +++ b/itests/deals_pricing_test.go @@ -14,7 +14,7 @@ import ( func TestQuotePriceForUnsealedRetrieval(t *testing.T) { var ( ctx = context.Background() - blocktime = time.Second + blocktime = 50 * time.Millisecond ) kit.QuietMiningLogs() @@ -35,7 +35,7 @@ func TestQuotePriceForUnsealedRetrieval(t *testing.T) { err = miner.MarketSetRetrievalAsk(ctx, ask) require.NoError(t, err) - dh := kit.NewDealHarness(t, client, miner) + dh := kit.NewDealHarness(t, client, miner, miner) deal1, res1, _ := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{Rseed: 6}) @@ -123,7 +123,7 @@ func TestZeroPricePerByteRetrieval(t *testing.T) { err = miner.MarketSetRetrievalAsk(ctx, ask) require.NoError(t, err) - dh := kit.NewDealHarness(t, client, miner) + dh := kit.NewDealHarness(t, client, miner, miner) dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{ N: 1, StartEpoch: startEpoch, diff --git a/itests/deals_publish_test.go b/itests/deals_publish_test.go index 16f84038bbb..6cefde6b95f 100644 --- a/itests/deals_publish_test.go +++ b/itests/deals_publish_test.go @@ -11,9 +11,13 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/builtin/market" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/wallet" "github.com/filecoin-project/lotus/itests/kit" "github.com/filecoin-project/lotus/markets/storageadapter" "github.com/filecoin-project/lotus/node" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/node/modules" + "github.com/filecoin-project/lotus/storage" market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" "github.com/stretchr/testify/require" ) @@ -28,17 +32,35 @@ func TestPublishDealsBatching(t *testing.T) { kit.QuietMiningLogs() - opts := node.Override(new(*storageadapter.DealPublisher), - storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{ - Period: publishPeriod, - MaxDealsPerMsg: maxDealsPerMsg, - }), + publisherKey, err := wallet.GenerateKey(types.KTSecp256k1) + require.NoError(t, err) + + opts := node.Options( + node.Override(new(*storageadapter.DealPublisher), + storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{ + Period: publishPeriod, + MaxDealsPerMsg: maxDealsPerMsg, + }), + ), + node.Override(new(*storage.AddressSelector), modules.AddressSelector(&config.MinerAddressConfig{ + DealPublishControl: []string{ + publisherKey.Address.String(), + }, + DisableOwnerFallback: true, + DisableWorkerFallback: true, + })), + kit.LatestActorsAt(-1), ) - client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ConstructorOpts(opts)) + client, miner, ens := kit.EnsembleMinimal(t, kit.Account(publisherKey, types.FromFil(10)), kit.MockProofs(), kit.ConstructorOpts(opts)) ens.InterconnectAll().BeginMining(10 * time.Millisecond) - dh := kit.NewDealHarness(t, client, miner) + _, err = client.WalletImport(ctx, &publisherKey.KeyInfo) + require.NoError(t, err) + + miner.SetControlAddresses(publisherKey.Address) + + dh := kit.NewDealHarness(t, client, miner, miner) // Starts a deal and waits until it's published runDealTillPublish := func(rseed int) { @@ -47,7 +69,10 @@ func TestPublishDealsBatching(t *testing.T) { upds, err := client.ClientGetDealUpdates(ctx) require.NoError(t, err) - dh.StartDeal(ctx, res.Root, false, startEpoch) + dp := dh.DefaultStartDealParams() + dp.Data.Root = res.Root + dp.DealStartEpoch = startEpoch + dh.StartDeal(ctx, dp) // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this time.Sleep(time.Second) @@ -92,6 +117,7 @@ func TestPublishDealsBatching(t *testing.T) { err = pubDealsParams.UnmarshalCBOR(bytes.NewReader(msg.Params)) require.NoError(t, err) require.Len(t, pubDealsParams.Deals, int(maxDealsPerMsg)) + require.Equal(t, publisherKey.Address.String(), msg.From.String()) } } require.Equal(t, 1, count) diff --git a/itests/deals_test.go b/itests/deals_test.go index 8aff414e005..a461586a17d 100644 --- a/itests/deals_test.go +++ b/itests/deals_test.go @@ -4,6 +4,7 @@ import ( "testing" "time" + "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/itests/kit" ) @@ -14,11 +15,15 @@ func TestDealsWithSealingAndRPC(t *testing.T) { kit.QuietMiningLogs() - var blockTime = 50 * time.Millisecond + oldDelay := policy.GetPreCommitChallengeDelay() + policy.SetPreCommitChallengeDelay(5) + t.Cleanup(func() { + policy.SetPreCommitChallengeDelay(oldDelay) + }) - client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC()) // no mock proofs. - ens.InterconnectAll().BeginMining(blockTime) - dh := kit.NewDealHarness(t, client, miner) + client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC(), kit.WithAllSubsystems()) // no mock proofs. + ens.InterconnectAll().BeginMining(250 * time.Millisecond) + dh := kit.NewDealHarness(t, client, miner, miner) t.Run("stdretrieval", func(t *testing.T) { dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1}) diff --git a/itests/gateway_test.go b/itests/gateway_test.go index 11f7595700b..f9e4a0fb6fd 100644 --- a/itests/gateway_test.go +++ b/itests/gateway_test.go @@ -5,6 +5,7 @@ import ( "context" "fmt" "math" + "net" "testing" "time" @@ -192,7 +193,7 @@ func TestGatewayDealFlow(t *testing.T) { // so that the deal starts sealing in time dealStartEpoch := abi.ChainEpoch(2 << 12) - dh := kit.NewDealHarness(t, nodes.lite, nodes.miner) + dh := kit.NewDealHarness(t, nodes.lite, nodes.miner, nodes.miner) dealCid, res, _ := dh.MakeOnlineDeal(context.Background(), kit.MakeFullDealParams{ Rseed: 6, StartEpoch: dealStartEpoch, @@ -270,7 +271,10 @@ func startNodes( handler, err := gateway.Handler(gwapi) require.NoError(t, err) - srv, _ := kit.CreateRPCServer(t, handler) + l, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + + srv, _ := kit.CreateRPCServer(t, handler, l) // Create a gateway client API that connects to the gateway server var gapi api.Gateway diff --git a/itests/get_messages_in_ts_test.go b/itests/get_messages_in_ts_test.go new file mode 100644 index 00000000000..61219a316c3 --- /dev/null +++ b/itests/get_messages_in_ts_test.go @@ -0,0 +1,104 @@ +package itests + +import ( + "context" + "testing" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/stretchr/testify/require" + + "time" + + "github.com/filecoin-project/go-state-types/big" +) + +func TestChainGetMessagesInTs(t *testing.T) { + ctx := context.Background() + + kit.QuietMiningLogs() + + client, _, ens := kit.EnsembleMinimal(t, kit.MockProofs()) + ens.InterconnectAll().BeginMining(10 * time.Millisecond) + + // create a new address where to send funds. + addr, err := client.WalletNew(ctx, types.KTBLS) + require.NoError(t, err) + + // get the existing balance from the default wallet to then split it. + bal, err := client.WalletBalance(ctx, client.DefaultKey.Address) + require.NoError(t, err) + + const iterations = 100 + + // we'll send half our balance (saving the other half for gas), + // in `iterations` increments. + toSend := big.Div(bal, big.NewInt(2)) + each := big.Div(toSend, big.NewInt(iterations)) + + waitAllCh := make(chan struct{}) + go func() { + headChangeCh, err := client.ChainNotify(ctx) + require.NoError(t, err) + <-headChangeCh //skip hccurrent + + count := 0 + for { + select { + case headChanges := <-headChangeCh: + for _, change := range headChanges { + if change.Type == store.HCApply { + msgs, err := client.ChainGetMessagesInTipset(ctx, change.Val.Key()) + require.NoError(t, err) + count += len(msgs) + if count == iterations { + waitAllCh <- struct{}{} + } + } + } + } + } + }() + + var sms []*types.SignedMessage + for i := 0; i < iterations; i++ { + msg := &types.Message{ + From: client.DefaultKey.Address, + To: addr, + Value: each, + } + + sm, err := client.MpoolPushMessage(ctx, msg, nil) + require.NoError(t, err) + require.EqualValues(t, i, sm.Message.Nonce) + + sms = append(sms, sm) + } + + select { + case <-waitAllCh: + case <-time.After(time.Minute): + t.Errorf("timeout to wait for pack messages") + } + + for _, sm := range sms { + msgLookup, err := client.StateWaitMsg(ctx, sm.Cid(), 3, api.LookbackNoLimit, true) + require.NoError(t, err) + + ts, err := client.ChainGetTipSet(ctx, msgLookup.TipSet) + require.NoError(t, err) + + msgs, err := client.ChainGetMessagesInTipset(ctx, ts.Parents()) + require.NoError(t, err) + + var found bool + for _, msg := range msgs { + if msg.Cid == sm.Cid() { + found = true + } + } + require.EqualValues(t, true, found, "expect got message in tipset %v", msgLookup.TipSet) + } +} diff --git a/itests/kit/control.go b/itests/kit/control.go new file mode 100644 index 00000000000..73ac39b7a14 --- /dev/null +++ b/itests/kit/control.go @@ -0,0 +1,42 @@ +package kit + +import ( + "context" + + "github.com/stretchr/testify/require" + + addr "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/types" +) + +func (tm *TestMiner) SetControlAddresses(addrs ...addr.Address) { + ctx := context.TODO() + + mi, err := tm.FullNode.StateMinerInfo(ctx, tm.ActorAddr, types.EmptyTSK) + require.NoError(tm.t, err) + + cwp := &miner2.ChangeWorkerAddressParams{ + NewWorker: mi.Worker, + NewControlAddrs: addrs, + } + + sp, err := actors.SerializeParams(cwp) + require.NoError(tm.t, err) + + smsg, err := tm.FullNode.MpoolPushMessage(ctx, &types.Message{ + From: mi.Owner, + To: tm.ActorAddr, + Method: miner.Methods.ChangeWorkerAddress, + + Value: big.Zero(), + Params: sp, + }, nil) + require.NoError(tm.t, err) + + tm.FullNode.WaitMsg(ctx, smsg.Cid()) +} diff --git a/itests/kit/deals.go b/itests/kit/deals.go index d9129b76a86..0832447f20b 100644 --- a/itests/kit/deals.go +++ b/itests/kit/deals.go @@ -28,7 +28,8 @@ import ( type DealHarness struct { t *testing.T client *TestFullNode - miner *TestMiner + main *TestMiner + market *TestMiner } type MakeFullDealParams struct { @@ -62,11 +63,12 @@ type MakeFullDealParams struct { } // NewDealHarness creates a test harness that contains testing utilities for deals. -func NewDealHarness(t *testing.T, client *TestFullNode, miner *TestMiner) *DealHarness { +func NewDealHarness(t *testing.T, client *TestFullNode, main *TestMiner, market *TestMiner) *DealHarness { return &DealHarness{ t: t, client: client, - miner: miner, + main: main, + market: market, } } @@ -86,7 +88,11 @@ func (dh *DealHarness) MakeOnlineDeal(ctx context.Context, params MakeFullDealPa dh.t.Logf("deal-making continuing; current height is %d", ts.Height()) } - deal = dh.StartDeal(ctx, res.Root, params.FastRet, params.StartEpoch) + dp := dh.DefaultStartDealParams() + dp.Data.Root = res.Root + dp.DealStartEpoch = params.StartEpoch + dp.FastRetrieval = params.FastRet + deal = dh.StartDeal(ctx, dp) // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this time.Sleep(time.Second) @@ -95,29 +101,28 @@ func (dh *DealHarness) MakeOnlineDeal(ctx context.Context, params MakeFullDealPa return deal, res, path } -// StartDeal starts a storage deal between the client and the miner. -func (dh *DealHarness) StartDeal(ctx context.Context, fcid cid.Cid, fastRet bool, startEpoch abi.ChainEpoch) *cid.Cid { - maddr, err := dh.miner.ActorAddress(ctx) - require.NoError(dh.t, err) +func (dh *DealHarness) DefaultStartDealParams() api.StartDealParams { + dp := api.StartDealParams{ + Data: &storagemarket.DataRef{TransferType: storagemarket.TTGraphsync}, + EpochPrice: types.NewInt(1000000), + MinBlocksDuration: uint64(build.MinDealDuration), + } - addr, err := dh.client.WalletDefaultAddress(ctx) + var err error + dp.Miner, err = dh.main.ActorAddress(context.Background()) require.NoError(dh.t, err) - deal, err := dh.client.ClientStartDeal(ctx, &api.StartDealParams{ - Data: &storagemarket.DataRef{ - TransferType: storagemarket.TTGraphsync, - Root: fcid, - }, - Wallet: addr, - Miner: maddr, - EpochPrice: types.NewInt(1000000), - DealStartEpoch: startEpoch, - MinBlocksDuration: uint64(build.MinDealDuration), - FastRetrieval: fastRet, - }) + dp.Wallet, err = dh.client.WalletDefaultAddress(context.Background()) require.NoError(dh.t, err) - return deal + return dp +} + +// StartDeal starts a storage deal between the client and the miner. +func (dh *DealHarness) StartDeal(ctx context.Context, dealParams api.StartDealParams) *cid.Cid { + dealProposalCid, err := dh.client.ClientStartDeal(ctx, &dealParams) + require.NoError(dh.t, err) + return dealProposalCid } // WaitDealSealed waits until the deal is sealed. @@ -146,7 +151,7 @@ loop: break loop } - mds, err := dh.miner.MarketListIncompleteDeals(ctx) + mds, err := dh.market.MarketListIncompleteDeals(ctx) require.NoError(dh.t, err) var minerState storagemarket.StorageDealStatus @@ -170,7 +175,7 @@ func (dh *DealHarness) WaitDealPublished(ctx context.Context, deal *cid.Cid) { subCtx, cancel := context.WithCancel(ctx) defer cancel() - updates, err := dh.miner.MarketGetDealUpdates(subCtx) + updates, err := dh.market.MarketGetDealUpdates(subCtx) require.NoError(dh.t, err) for { @@ -197,19 +202,19 @@ func (dh *DealHarness) WaitDealPublished(ctx context.Context, deal *cid.Cid) { } func (dh *DealHarness) StartSealingWaiting(ctx context.Context) { - snums, err := dh.miner.SectorsList(ctx) + snums, err := dh.main.SectorsList(ctx) require.NoError(dh.t, err) for _, snum := range snums { - si, err := dh.miner.SectorsStatus(ctx, snum, false) + si, err := dh.main.SectorsStatus(ctx, snum, false) require.NoError(dh.t, err) dh.t.Logf("Sector state: %s", si.State) if si.State == api.SectorState(sealing.WaitDeals) { - require.NoError(dh.t, dh.miner.SectorStartSealing(ctx, snum)) + require.NoError(dh.t, dh.main.SectorStartSealing(ctx, snum)) } - dh.miner.FlushSealingBatches(ctx) + dh.main.FlushSealingBatches(ctx) } } @@ -290,6 +295,7 @@ func (dh *DealHarness) RunConcurrentDeals(opts RunConcurrentDealsOpts) { for i := 0; i < opts.N; i++ { i := i errgrp.Go(func() (err error) { + defer dh.t.Logf("finished concurrent deal %d/%d", i, opts.N) defer func() { // This is necessary because golang can't deal with test // failures being reported from children goroutines ¯\_(ツ)_/¯ @@ -297,11 +303,17 @@ func (dh *DealHarness) RunConcurrentDeals(opts RunConcurrentDealsOpts) { err = fmt.Errorf("deal failed: %s", r) } }() + + dh.t.Logf("making storage deal %d/%d", i, opts.N) + deal, res, inPath := dh.MakeOnlineDeal(context.Background(), MakeFullDealParams{ Rseed: 5 + i, FastRet: opts.FastRetrieval, StartEpoch: opts.StartEpoch, }) + + dh.t.Logf("retrieving deal %d/%d", i, opts.N) + outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, opts.CarExport) AssertFilesEqual(dh.t, inPath, outPath) return nil diff --git a/itests/kit/ensemble.go b/itests/kit/ensemble.go index 788aa40c0d6..77a743d0cea 100644 --- a/itests/kit/ensemble.go +++ b/itests/kit/ensemble.go @@ -4,7 +4,9 @@ import ( "bytes" "context" "crypto/rand" + "fmt" "io/ioutil" + "net" "sync" "testing" "time" @@ -194,6 +196,10 @@ func (n *Ensemble) Miner(miner *TestMiner, full *TestFullNode, opts ...NodeOpt) actorAddr, err := address.NewIDAddress(genesis2.MinerStart + uint64(minerCnt)) require.NoError(n.t, err) + if options.mainMiner != nil { + actorAddr = options.mainMiner.ActorAddr + } + ownerKey := options.ownerKey if !n.bootstrapped { var ( @@ -228,13 +234,17 @@ func (n *Ensemble) Miner(miner *TestMiner, full *TestFullNode, opts ...NodeOpt) require.NotNil(n.t, ownerKey, "worker key can't be null if initializing a miner after genesis") } + rl, err := net.Listen("tcp", "127.0.0.1:") + require.NoError(n.t, err) + *miner = TestMiner{ - t: n.t, - ActorAddr: actorAddr, - OwnerKey: ownerKey, - FullNode: full, - PresealDir: tdir, - options: options, + t: n.t, + ActorAddr: actorAddr, + OwnerKey: ownerKey, + FullNode: full, + PresealDir: tdir, + options: options, + RemoteListener: rl, } miner.Libp2p.PeerID = peerId @@ -263,10 +273,11 @@ func (n *Ensemble) Start() *Ensemble { // Create all inactive full nodes. for i, full := range n.inactive.fullnodes { + r := repo.NewMemory(nil) opts := []node.Option{ node.FullAPI(&full.FullNode, node.Lite(full.options.lite)), - node.Online(), - node.Repo(repo.NewMemory(nil)), + node.Base(), + node.Repo(r), node.MockHost(n.mn), node.Test(), @@ -334,39 +345,56 @@ func (n *Ensemble) Start() *Ensemble { // Create all inactive miners. for i, m := range n.inactive.miners { if n.bootstrapped { - // this is a miner created after genesis, so it won't have a preseal. - // we need to create it on chain. - params, aerr := actors.SerializeParams(&power2.CreateMinerParams{ - Owner: m.OwnerKey.Address, - Worker: m.OwnerKey.Address, - SealProofType: m.options.proofType, - Peer: abi.PeerID(m.Libp2p.PeerID), - }) - require.NoError(n.t, aerr) - - createStorageMinerMsg := &types.Message{ - From: m.OwnerKey.Address, - To: power.Address, - Value: big.Zero(), - - Method: power.Methods.CreateMiner, - Params: params, - - GasLimit: 0, - GasPremium: big.NewInt(5252), + if m.options.mainMiner == nil { + // this is a miner created after genesis, so it won't have a preseal. + // we need to create it on chain. + params, aerr := actors.SerializeParams(&power2.CreateMinerParams{ + Owner: m.OwnerKey.Address, + Worker: m.OwnerKey.Address, + SealProofType: m.options.proofType, + Peer: abi.PeerID(m.Libp2p.PeerID), + }) + require.NoError(n.t, aerr) + + createStorageMinerMsg := &types.Message{ + From: m.OwnerKey.Address, + To: power.Address, + Value: big.Zero(), + + Method: power.Methods.CreateMiner, + Params: params, + } + signed, err := m.FullNode.FullNode.MpoolPushMessage(ctx, createStorageMinerMsg, nil) + require.NoError(n.t, err) + + mw, err := m.FullNode.FullNode.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, api.LookbackNoLimit, true) + require.NoError(n.t, err) + require.Equal(n.t, exitcode.Ok, mw.Receipt.ExitCode) + + var retval power2.CreateMinerReturn + err = retval.UnmarshalCBOR(bytes.NewReader(mw.Receipt.Return)) + require.NoError(n.t, err, "failed to create miner") + + m.ActorAddr = retval.IDAddress + } else { + params, err := actors.SerializeParams(&miner2.ChangePeerIDParams{NewID: abi.PeerID(m.Libp2p.PeerID)}) + require.NoError(n.t, err) + + msg := &types.Message{ + To: m.options.mainMiner.ActorAddr, + From: m.options.mainMiner.OwnerKey.Address, + Method: miner.Methods.ChangePeerID, + Params: params, + Value: types.NewInt(0), + } + + signed, err2 := m.FullNode.FullNode.MpoolPushMessage(ctx, msg, nil) + require.NoError(n.t, err2) + + mw, err2 := m.FullNode.FullNode.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, api.LookbackNoLimit, true) + require.NoError(n.t, err2) + require.Equal(n.t, exitcode.Ok, mw.Receipt.ExitCode) } - signed, err := m.FullNode.FullNode.MpoolPushMessage(ctx, createStorageMinerMsg, nil) - require.NoError(n.t, err) - - mw, err := m.FullNode.FullNode.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, api.LookbackNoLimit, true) - require.NoError(n.t, err) - require.Equal(n.t, exitcode.Ok, mw.Receipt.ExitCode) - - var retval power2.CreateMinerReturn - err = retval.UnmarshalCBOR(bytes.NewReader(mw.Receipt.Return)) - require.NoError(n.t, err, "failed to create miner") - - m.ActorAddr = retval.IDAddress } has, err := m.FullNode.WalletHas(ctx, m.OwnerKey.Address) @@ -388,6 +416,36 @@ func (n *Ensemble) Start() *Ensemble { lr, err := r.Lock(repo.StorageMiner) require.NoError(n.t, err) + c, err := lr.Config() + require.NoError(n.t, err) + + cfg, ok := c.(*config.StorageMiner) + if !ok { + n.t.Fatalf("invalid config from repo, got: %T", c) + } + cfg.Common.API.RemoteListenAddress = m.RemoteListener.Addr().String() + cfg.Subsystems.EnableMarkets = m.options.subsystems.Has(SMarkets) + cfg.Subsystems.EnableMining = m.options.subsystems.Has(SMining) + cfg.Subsystems.EnableSealing = m.options.subsystems.Has(SSealing) + cfg.Subsystems.EnableSectorStorage = m.options.subsystems.Has(SSectorStorage) + + if m.options.mainMiner != nil { + token, err := m.options.mainMiner.FullNode.AuthNew(ctx, api.AllPermissions) + require.NoError(n.t, err) + + cfg.Subsystems.SectorIndexApiInfo = fmt.Sprintf("%s:%s", token, m.options.mainMiner.ListenAddr) + cfg.Subsystems.SealerApiInfo = fmt.Sprintf("%s:%s", token, m.options.mainMiner.ListenAddr) + + fmt.Println("config for market node, setting SectorIndexApiInfo to: ", cfg.Subsystems.SectorIndexApiInfo) + fmt.Println("config for market node, setting SealerApiInfo to: ", cfg.Subsystems.SealerApiInfo) + } + + err = lr.SetConfig(func(raw interface{}) { + rcfg := raw.(*config.StorageMiner) + *rcfg = *cfg + }) + require.NoError(n.t, err) + ks, err := lr.KeyStore() require.NoError(n.t, err) @@ -417,28 +475,30 @@ func (n *Ensemble) Start() *Ensemble { err = lr.Close() require.NoError(n.t, err) - enc, err := actors.SerializeParams(&miner2.ChangePeerIDParams{NewID: abi.PeerID(m.Libp2p.PeerID)}) - require.NoError(n.t, err) + if m.options.mainMiner == nil { + enc, err := actors.SerializeParams(&miner2.ChangePeerIDParams{NewID: abi.PeerID(m.Libp2p.PeerID)}) + require.NoError(n.t, err) - msg := &types.Message{ - From: m.OwnerKey.Address, - To: m.ActorAddr, - Method: miner.Methods.ChangePeerID, - Params: enc, - Value: types.NewInt(0), - } + msg := &types.Message{ + From: m.OwnerKey.Address, + To: m.ActorAddr, + Method: miner.Methods.ChangePeerID, + Params: enc, + Value: types.NewInt(0), + } - _, err = m.FullNode.MpoolPushMessage(ctx, msg, nil) - require.NoError(n.t, err) + _, err2 := m.FullNode.MpoolPushMessage(ctx, msg, nil) + require.NoError(n.t, err2) + } var mineBlock = make(chan lotusminer.MineReq) opts := []node.Option{ - node.StorageMiner(&m.StorageMiner), - node.Online(), + node.StorageMiner(&m.StorageMiner, cfg.Subsystems), + node.Base(), node.Repo(r), node.Test(), - node.MockHost(n.mn), + node.If(!m.options.disableLibp2p, node.MockHost(n.mn)), node.Override(new(v1api.FullNode), m.FullNode.FullNode), node.Override(new(*lotusminer.Miner), lotusminer.NewTestMiner(mineBlock, m.ActorAddr)), @@ -575,7 +635,7 @@ func (n *Ensemble) InterconnectAll() *Ensemble { } // Connect connects one full node to the provided full nodes. -func (n *Ensemble) Connect(from api.Common, to ...api.Common) *Ensemble { +func (n *Ensemble) Connect(from api.Net, to ...api.Net) *Ensemble { addr, err := from.NetAddrsListen(context.Background()) require.NoError(n.t, err) diff --git a/itests/kit/ensemble_presets.go b/itests/kit/ensemble_presets.go index 7cae12a6885..b7ff80aa122 100644 --- a/itests/kit/ensemble_presets.go +++ b/itests/kit/ensemble_presets.go @@ -1,6 +1,9 @@ package kit -import "testing" +import ( + "testing" + "time" +) // EnsembleMinimal creates and starts an Ensemble with a single full node and a single miner. // It does not interconnect nodes nor does it begin mining. @@ -8,6 +11,8 @@ import "testing" // This function supports passing both ensemble and node functional options. // Functional options are applied to all nodes. func EnsembleMinimal(t *testing.T, opts ...interface{}) (*TestFullNode, *TestMiner, *Ensemble) { + opts = append(opts, WithAllSubsystems()) + eopts, nopts := siftOptions(t, opts) var ( @@ -18,12 +23,37 @@ func EnsembleMinimal(t *testing.T, opts ...interface{}) (*TestFullNode, *TestMin return &full, &miner, ens } +func EnsembleWithMinerAndMarketNodes(t *testing.T, opts ...interface{}) (*TestFullNode, *TestMiner, *TestMiner, *Ensemble) { + eopts, nopts := siftOptions(t, opts) + + var ( + fullnode TestFullNode + main, market TestMiner + ) + + mainNodeOpts := []NodeOpt{WithSubsystems(SSealing, SSectorStorage, SMining), DisableLibp2p()} + mainNodeOpts = append(mainNodeOpts, nopts...) + + blockTime := 100 * time.Millisecond + ens := NewEnsemble(t, eopts...).FullNode(&fullnode, nopts...).Miner(&main, &fullnode, mainNodeOpts...).Start() + ens.BeginMining(blockTime) + + marketNodeOpts := []NodeOpt{OwnerAddr(fullnode.DefaultKey), MainMiner(&main), WithSubsystems(SMarkets)} + marketNodeOpts = append(marketNodeOpts, nopts...) + + ens.Miner(&market, &fullnode, marketNodeOpts...).Start().Connect(market, fullnode) + + return &fullnode, &main, &market, ens +} + // EnsembleTwoOne creates and starts an Ensemble with two full nodes and one miner. // It does not interconnect nodes nor does it begin mining. // // This function supports passing both ensemble and node functional options. // Functional options are applied to all nodes. func EnsembleTwoOne(t *testing.T, opts ...interface{}) (*TestFullNode, *TestFullNode, *TestMiner, *Ensemble) { + opts = append(opts, WithAllSubsystems()) + eopts, nopts := siftOptions(t, opts) var ( @@ -40,6 +70,8 @@ func EnsembleTwoOne(t *testing.T, opts ...interface{}) (*TestFullNode, *TestFull // This function supports passing both ensemble and node functional options. // Functional options are applied to all nodes. func EnsembleOneTwo(t *testing.T, opts ...interface{}) (*TestFullNode, *TestMiner, *TestMiner, *Ensemble) { + opts = append(opts, WithAllSubsystems()) + eopts, nopts := siftOptions(t, opts) var ( diff --git a/itests/kit/funds.go b/itests/kit/funds.go index 417cf9ce1b1..e49c708ea9b 100644 --- a/itests/kit/funds.go +++ b/itests/kit/funds.go @@ -4,9 +4,11 @@ import ( "context" "testing" - "github.com/filecoin-project/go-state-types/abi" "github.com/stretchr/testify/require" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/types" @@ -27,8 +29,12 @@ func SendFunds(ctx context.Context, t *testing.T, sender *TestFullNode, recipien sm, err := sender.MpoolPushMessage(ctx, msg, nil) require.NoError(t, err) - res, err := sender.StateWaitMsg(ctx, sm.Cid(), 3, api.LookbackNoLimit, true) - require.NoError(t, err) + sender.WaitMsg(ctx, sm.Cid()) +} + +func (f *TestFullNode) WaitMsg(ctx context.Context, msg cid.Cid) { + res, err := f.StateWaitMsg(ctx, msg, 3, api.LookbackNoLimit, true) + require.NoError(f.t, err) - require.EqualValues(t, 0, res.Receipt.ExitCode, "did not successfully send funds") + require.EqualValues(f.t, 0, res.Receipt.ExitCode, "message did not successfully execute") } diff --git a/itests/kit/init.go b/itests/kit/init.go index 8df4922b864..dc8463cb4e4 100644 --- a/itests/kit/init.go +++ b/itests/kit/init.go @@ -5,6 +5,7 @@ import ( "os" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/policy" logging "github.com/ipfs/go-log/v2" @@ -13,6 +14,8 @@ import ( func init() { _ = logging.SetLogLevel("*", "INFO") + policy.SetProviderCollateralSupplyTarget(big.Zero(), big.NewInt(1)) + policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048)) policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1) policy.SetMinVerifiedDealSize(abi.NewStoragePower(256)) diff --git a/itests/kit/node_miner.go b/itests/kit/node_miner.go index eea2bc0c1c9..ff406629ca6 100644 --- a/itests/kit/node_miner.go +++ b/itests/kit/node_miner.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "io/ioutil" + "net" "os" "path/filepath" "strings" @@ -27,6 +28,35 @@ import ( "github.com/multiformats/go-multiaddr" ) +type MinerSubsystem int + +const ( + SMarkets MinerSubsystem = 1 << iota + SMining + SSealing + SSectorStorage + + MinerSubsystems = iota +) + +func (ms MinerSubsystem) Add(single MinerSubsystem) MinerSubsystem { + return ms | single +} + +func (ms MinerSubsystem) Has(single MinerSubsystem) bool { + return ms&single == single +} + +func (ms MinerSubsystem) All() [MinerSubsystems]bool { + var out [MinerSubsystems]bool + + for i := range out { + out[i] = ms&(1< 0 + } + + return out +} + // TestMiner represents a miner enrolled in an Ensemble. type TestMiner struct { api.StorageMiner @@ -50,6 +80,8 @@ type TestMiner struct { PrivKey libp2pcrypto.PrivKey } + RemoteListener net.Listener + options nodeOpts } diff --git a/itests/kit/node_opts.go b/itests/kit/node_opts.go index ae99f3f2912..87707aa16c8 100644 --- a/itests/kit/node_opts.go +++ b/itests/kit/node_opts.go @@ -25,6 +25,10 @@ type nodeOpts struct { rpc bool ownerKey *wallet.Key extraNodeOpts []node.Option + + subsystems MinerSubsystem + mainMiner *TestMiner + disableLibp2p bool optBuilders []OptBuilder proofType abi.RegisteredSealProof } @@ -43,6 +47,40 @@ type OptBuilder func(activeNodes []*TestFullNode) node.Option // NodeOpt is a functional option for test nodes. type NodeOpt func(opts *nodeOpts) error +func WithAllSubsystems() NodeOpt { + return func(opts *nodeOpts) error { + opts.subsystems = opts.subsystems.Add(SMarkets) + opts.subsystems = opts.subsystems.Add(SMining) + opts.subsystems = opts.subsystems.Add(SSealing) + opts.subsystems = opts.subsystems.Add(SSectorStorage) + + return nil + } +} + +func WithSubsystems(systems ...MinerSubsystem) NodeOpt { + return func(opts *nodeOpts) error { + for _, s := range systems { + opts.subsystems = opts.subsystems.Add(s) + } + return nil + } +} + +func DisableLibp2p() NodeOpt { + return func(opts *nodeOpts) error { + opts.disableLibp2p = true + return nil + } +} + +func MainMiner(m *TestMiner) NodeOpt { + return func(opts *nodeOpts) error { + opts.mainMiner = m + return nil + } +} + // OwnerBalance specifies the balance to be attributed to a miner's owner // account. Only relevant when creating a miner. func OwnerBalance(balance abi.TokenAmount) NodeOpt { diff --git a/itests/kit/rpc.go b/itests/kit/rpc.go index dab45df0773..35153eb644b 100644 --- a/itests/kit/rpc.go +++ b/itests/kit/rpc.go @@ -2,6 +2,8 @@ package kit import ( "context" + "fmt" + "net" "net/http" "net/http/httptest" "testing" @@ -13,8 +15,13 @@ import ( "github.com/stretchr/testify/require" ) -func CreateRPCServer(t *testing.T, handler http.Handler) (*httptest.Server, multiaddr.Multiaddr) { - testServ := httptest.NewServer(handler) +func CreateRPCServer(t *testing.T, handler http.Handler, listener net.Listener) (*httptest.Server, multiaddr.Multiaddr) { + testServ := &httptest.Server{ + Listener: listener, + Config: &http.Server{Handler: handler}, + } + testServ.Start() + t.Cleanup(testServ.Close) t.Cleanup(testServ.CloseClientConnections) @@ -28,7 +35,10 @@ func fullRpc(t *testing.T, f *TestFullNode) *TestFullNode { handler, err := node.FullNodeHandler(f.FullNode, false) require.NoError(t, err) - srv, maddr := CreateRPCServer(t, handler) + l, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + + srv, maddr := CreateRPCServer(t, handler, l) cl, stop, err := client.NewFullNodeRPCV1(context.Background(), "ws://"+srv.Listener.Addr().String()+"/rpc/v1", nil) require.NoError(t, err) @@ -42,9 +52,11 @@ func minerRpc(t *testing.T, m *TestMiner) *TestMiner { handler, err := node.MinerHandler(m.StorageMiner, false) require.NoError(t, err) - srv, maddr := CreateRPCServer(t, handler) + srv, maddr := CreateRPCServer(t, handler, m.RemoteListener) - cl, stop, err := client.NewStorageMinerRPCV0(context.Background(), "ws://"+srv.Listener.Addr().String()+"/rpc/v0", nil) + fmt.Println("creating RPC server for", m.ActorAddr, "at: ", srv.Listener.Addr().String()) + url := "ws://" + srv.Listener.Addr().String() + "/rpc/v0" + cl, stop, err := client.NewStorageMinerRPCV0(context.Background(), url, nil) require.NoError(t, err) t.Cleanup(stop) diff --git a/itests/nonce_test.go b/itests/nonce_test.go new file mode 100644 index 00000000000..b50fcbe2660 --- /dev/null +++ b/itests/nonce_test.go @@ -0,0 +1,57 @@ +package itests + +import ( + "context" + "testing" + "time" + + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/stretchr/testify/require" +) + +func TestNonceIncremental(t *testing.T) { + ctx := context.Background() + + kit.QuietMiningLogs() + + client, _, ens := kit.EnsembleMinimal(t, kit.MockProofs()) + ens.InterconnectAll().BeginMining(10 * time.Millisecond) + + // create a new address where to send funds. + addr, err := client.WalletNew(ctx, types.KTBLS) + require.NoError(t, err) + + // get the existing balance from the default wallet to then split it. + bal, err := client.WalletBalance(ctx, client.DefaultKey.Address) + require.NoError(t, err) + + const iterations = 100 + + // we'll send half our balance (saving the other half for gas), + // in `iterations` increments. + toSend := big.Div(bal, big.NewInt(2)) + each := big.Div(toSend, big.NewInt(iterations)) + + var sms []*types.SignedMessage + for i := 0; i < iterations; i++ { + msg := &types.Message{ + From: client.DefaultKey.Address, + To: addr, + Value: each, + } + + sm, err := client.MpoolPushMessage(ctx, msg, nil) + require.NoError(t, err) + require.EqualValues(t, i, sm.Message.Nonce) + + sms = append(sms, sm) + } + + for _, sm := range sms { + _, err := client.StateWaitMsg(ctx, sm.Cid(), 3, api.LookbackNoLimit, true) + require.NoError(t, err) + } +} diff --git a/itests/paych_api_test.go b/itests/paych_api_test.go index 668eb14aab5..647db21e00f 100644 --- a/itests/paych_api_test.go +++ b/itests/paych_api_test.go @@ -41,7 +41,7 @@ func TestPaymentChannelsAPI(t *testing.T) { ens := kit.NewEnsemble(t, kit.MockProofs()). FullNode(&paymentCreator). FullNode(&paymentReceiver). - Miner(&miner, &paymentCreator). + Miner(&miner, &paymentCreator, kit.WithAllSubsystems()). Start(). InterconnectAll() bms := ens.BeginMining(blockTime) diff --git a/itests/paych_cli_test.go b/itests/paych_cli_test.go index 8a069044973..82955e6c1e8 100644 --- a/itests/paych_cli_test.go +++ b/itests/paych_cli_test.go @@ -17,7 +17,6 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin/paych" - "github.com/filecoin-project/lotus/chain/actors/policy" cbor "github.com/ipfs/go-ipld-cbor" "github.com/stretchr/testify/require" @@ -27,13 +26,7 @@ import ( "github.com/filecoin-project/lotus/chain/types" ) -func init() { - policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1) - policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048)) - policy.SetMinVerifiedDealSize(abi.NewStoragePower(256)) -} - -// TestPaymentChannels does a basic test to exercise the payment channel CLI +// TestPaymentChannelsBasic does a basic test to exercise the payment channel CLI // commands func TestPaymentChannelsBasic(t *testing.T) { _ = os.Setenv("BELLMAN_NO_GPU", "1") @@ -420,7 +413,7 @@ func startPaychCreatorReceiverMiner(ctx context.Context, t *testing.T, paymentCr kit.NewEnsemble(t, kit.MockProofs()). FullNode(paymentCreator, opts). FullNode(paymentReceiver, opts). - Miner(&miner, paymentCreator). + Miner(&miner, paymentCreator, kit.WithAllSubsystems()). Start(). InterconnectAll(). BeginMining(blocktime) diff --git a/itests/sector_finalize_early_test.go b/itests/sector_finalize_early_test.go index 3eb980f9e2e..fa5cc9dd303 100644 --- a/itests/sector_finalize_early_test.go +++ b/itests/sector_finalize_early_test.go @@ -35,7 +35,7 @@ func TestDealsWithFinalizeEarly(t *testing.T) { }, nil })))) // no mock proofs. ens.InterconnectAll().BeginMining(blockTime) - dh := kit.NewDealHarness(t, client, miner) + dh := kit.NewDealHarness(t, client, miner, miner) ctx := context.Background() diff --git a/itests/sector_miner_collateral_test.go b/itests/sector_miner_collateral_test.go new file mode 100644 index 00000000000..8e7525dba1d --- /dev/null +++ b/itests/sector_miner_collateral_test.go @@ -0,0 +1,132 @@ +package itests + +import ( + "context" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/filecoin-project/lotus/node" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/node/repo" +) + +func TestMinerBalanceCollateral(t *testing.T) { + kit.QuietMiningLogs() + + blockTime := 5 * time.Millisecond + + runTest := func(t *testing.T, enabled bool, nSectors int, batching bool) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + opts := kit.ConstructorOpts( + kit.LatestActorsAt(-1), + node.ApplyIf(node.IsType(repo.StorageMiner), node.Override(new(dtypes.GetSealingConfigFunc), func() (dtypes.GetSealingConfigFunc, error) { + return func() (sealiface.Config, error) { + return sealiface.Config{ + MaxWaitDealsSectors: 4, + MaxSealingSectors: 4, + MaxSealingSectorsForDeals: 4, + AlwaysKeepUnsealedCopy: true, + WaitDealsDelay: time.Hour, + + BatchPreCommits: batching, + AggregateCommits: batching, + + PreCommitBatchWait: time.Hour, + CommitBatchWait: time.Hour, + + MinCommitBatch: nSectors, + MaxPreCommitBatch: nSectors, + MaxCommitBatch: nSectors, + + CollateralFromMinerBalance: enabled, + AvailableBalanceBuffer: big.Zero(), + DisableCollateralFallback: false, + AggregateAboveBaseFee: big.Zero(), + }, nil + }, nil + })), + ) + full, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts) + ens.InterconnectAll().BeginMining(blockTime) + full.WaitTillChain(ctx, kit.HeightAtLeast(10)) + + toCheck := miner.StartPledge(ctx, nSectors, 0, nil) + + for len(toCheck) > 0 { + states := map[api.SectorState]int{} + for n := range toCheck { + st, err := miner.StorageMiner.SectorsStatus(ctx, n, false) + require.NoError(t, err) + states[st.State]++ + if st.State == api.SectorState(sealing.Proving) { + delete(toCheck, n) + } + if strings.Contains(string(st.State), "Fail") { + t.Fatal("sector in a failed state", st.State) + } + } + + build.Clock.Sleep(100 * time.Millisecond) + } + + // check that sector messages had zero value set + sl, err := miner.SectorsList(ctx) + require.NoError(t, err) + + for _, number := range sl { + si, err := miner.SectorsStatus(ctx, number, false) + require.NoError(t, err) + + require.NotNil(t, si.PreCommitMsg) + pc, err := full.ChainGetMessage(ctx, *si.PreCommitMsg) + require.NoError(t, err) + if enabled { + require.Equal(t, big.Zero(), pc.Value) + } else { + require.NotEqual(t, big.Zero(), pc.Value) + } + + require.NotNil(t, si.CommitMsg) + c, err := full.ChainGetMessage(ctx, *si.CommitMsg) + require.NoError(t, err) + if enabled { + require.Equal(t, big.Zero(), c.Value) + } + // commit value might be zero even with !enabled because in test devnets + // precommit deposit tends to be greater than collateral required at + // commit time. + } + } + + t.Run("nobatch", func(t *testing.T) { + runTest(t, true, 1, false) + }) + t.Run("batch-1", func(t *testing.T) { + runTest(t, true, 1, true) // individual commit instead of aggregate + }) + t.Run("batch-4", func(t *testing.T) { + runTest(t, true, 4, true) + }) + + t.Run("nobatch-frombalance-disabled", func(t *testing.T) { + runTest(t, false, 1, false) + }) + t.Run("batch-1-frombalance-disabled", func(t *testing.T) { + runTest(t, false, 1, true) // individual commit instead of aggregate + }) + t.Run("batch-4-frombalance-disabled", func(t *testing.T) { + runTest(t, false, 4, true) + }) +} diff --git a/itests/wdpost_dispute_test.go b/itests/wdpost_dispute_test.go index 554672ce3c3..f7388203273 100644 --- a/itests/wdpost_dispute_test.go +++ b/itests/wdpost_dispute_test.go @@ -39,11 +39,12 @@ func TestWindowPostDispute(t *testing.T) { // it doesn't submit proofs. // // Then we're going to manually submit bad proofs. - opts := kit.ConstructorOpts(kit.LatestActorsAt(-1)) + opts := []kit.NodeOpt{kit.ConstructorOpts(kit.LatestActorsAt(-1))} + opts = append(opts, kit.WithAllSubsystems()) ens := kit.NewEnsemble(t, kit.MockProofs()). - FullNode(&client, opts). - Miner(&chainMiner, &client, opts). - Miner(&evilMiner, &client, opts, kit.PresealSectors(0)). + FullNode(&client, opts...). + Miner(&chainMiner, &client, opts...). + Miner(&evilMiner, &client, append(opts, kit.PresealSectors(0))...). Start() defaultFrom, err := client.WalletDefaultAddress(ctx) diff --git a/itests/wdpost_test.go b/itests/wdpost_test.go index e5a4fcee18d..6764350ccb0 100644 --- a/itests/wdpost_test.go +++ b/itests/wdpost_test.go @@ -213,12 +213,18 @@ func TestWindowPostBaseFeeNoBurn(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + sched := kit.DefaultTestUpgradeSchedule + lastUpgradeHeight := sched[len(sched)-1].Height + och := build.UpgradeClausHeight - build.UpgradeClausHeight = 10 + build.UpgradeClausHeight = lastUpgradeHeight + 1 client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs()) ens.InterconnectAll().BeginMining(blocktime) + // Wait till all upgrades are done and we've passed the clause epoch. + client.WaitTillChain(ctx, kit.HeightAtLeast(build.UpgradeClausHeight+1)) + maddr, err := miner.ActorAddress(ctx) require.NoError(t, err) @@ -268,6 +274,12 @@ func TestWindowPostBaseFeeBurn(t *testing.T) { client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts) ens.InterconnectAll().BeginMining(blocktime) + // Ideally we'd be a bit more precise here, but getting the information we need from the + // test framework is more work than it's worth. + // + // We just need to wait till all upgrades are done. + client.WaitTillChain(ctx, kit.HeightAtLeast(20)) + maddr, err := miner.ActorAddress(ctx) require.NoError(t, err) diff --git a/lib/rpcenc/reader.go b/lib/rpcenc/reader.go index 8bd51270543..23944af6cd7 100644 --- a/lib/rpcenc/reader.go +++ b/lib/rpcenc/reader.go @@ -78,27 +78,38 @@ func ReaderParamEncoder(addr string) jsonrpc.Option { }) } -type waitReadCloser struct { +// watchReadCloser watches the ReadCloser and closes the watch channel when +// either: (1) the ReaderCloser fails on Read (including with a benign error +// like EOF), or (2) when Close is called. +// +// Use it be notified of terminal states, in situations where a Read failure (or +// EOF) is considered a terminal state too (besides Close). +type watchReadCloser struct { io.ReadCloser - wait chan struct{} + watch chan struct{} + closeOnce sync.Once } -func (w *waitReadCloser) Read(p []byte) (int, error) { +func (w *watchReadCloser) Read(p []byte) (int, error) { n, err := w.ReadCloser.Read(p) if err != nil { - close(w.wait) + w.closeOnce.Do(func() { + close(w.watch) + }) } return n, err } -func (w *waitReadCloser) Close() error { - close(w.wait) +func (w *watchReadCloser) Close() error { + w.closeOnce.Do(func() { + close(w.watch) + }) return w.ReadCloser.Close() } func ReaderParamDecoder() (http.HandlerFunc, jsonrpc.ServerOption) { var readersLk sync.Mutex - readers := map[uuid.UUID]chan *waitReadCloser{} + readers := map[uuid.UUID]chan *watchReadCloser{} hnd := func(resp http.ResponseWriter, req *http.Request) { strId := path.Base(req.URL.Path) @@ -111,14 +122,14 @@ func ReaderParamDecoder() (http.HandlerFunc, jsonrpc.ServerOption) { readersLk.Lock() ch, found := readers[u] if !found { - ch = make(chan *waitReadCloser) + ch = make(chan *watchReadCloser) readers[u] = ch } readersLk.Unlock() - wr := &waitReadCloser{ + wr := &watchReadCloser{ ReadCloser: req.Body, - wait: make(chan struct{}), + watch: make(chan struct{}), } tctx, cancel := context.WithTimeout(req.Context(), Timeout) @@ -134,7 +145,9 @@ func ReaderParamDecoder() (http.HandlerFunc, jsonrpc.ServerOption) { } select { - case <-wr.wait: + case <-wr.watch: + // TODO should we check if we failed the Read, and if so + // return an HTTP 500? i.e. turn watch into a chan error? case <-req.Context().Done(): log.Errorf("context error in reader stream handler (2): %v", req.Context().Err()) resp.WriteHeader(500) @@ -167,7 +180,7 @@ func ReaderParamDecoder() (http.HandlerFunc, jsonrpc.ServerOption) { readersLk.Lock() ch, found := readers[u] if !found { - ch = make(chan *waitReadCloser) + ch = make(chan *watchReadCloser) readers[u] = ch } readersLk.Unlock() diff --git a/markets/loggers/loggers.go b/markets/loggers/loggers.go index e5f669f2f5c..2acf987cb13 100644 --- a/markets/loggers/loggers.go +++ b/markets/loggers/loggers.go @@ -40,7 +40,7 @@ func DataTransferLogger(event datatransfer.Event, state datatransfer.ChannelStat "sent", state.Sent(), "received", state.Received(), "queued", state.Queued(), - "received count", len(state.ReceivedCids()), + "received count", state.ReceivedCidsLen(), "total size", state.TotalSize(), "remote peer", state.OtherPeer(), "event message", event.Message, diff --git a/markets/retrievaladapter/provider.go b/markets/retrievaladapter/provider.go index 95b7e1b3ce0..2f630580569 100644 --- a/markets/retrievaladapter/provider.go +++ b/markets/retrievaladapter/provider.go @@ -4,37 +4,42 @@ import ( "context" "io" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api/v1api" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/storage/sectorblocks" + "github.com/hashicorp/go-multierror" "golang.org/x/xerrors" "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" "github.com/filecoin-project/lotus/chain/actors/builtin/paych" "github.com/filecoin-project/lotus/chain/types" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" - "github.com/filecoin-project/lotus/storage" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-fil-markets/shared" "github.com/filecoin-project/go-state-types/abi" specstorage "github.com/filecoin-project/specs-storage/storage" + + logging "github.com/ipfs/go-log/v2" ) var log = logging.Logger("retrievaladapter") type retrievalProviderNode struct { - miner *storage.Miner + maddr address.Address + secb sectorblocks.SectorBuilder pp sectorstorage.PieceProvider full v1api.FullNode } // NewRetrievalProviderNode returns a new node adapter for a retrieval provider that talks to the // Lotus Node -func NewRetrievalProviderNode(miner *storage.Miner, pp sectorstorage.PieceProvider, full v1api.FullNode) retrievalmarket.RetrievalProviderNode { - return &retrievalProviderNode{miner, pp, full} +func NewRetrievalProviderNode(maddr dtypes.MinerAddress, secb sectorblocks.SectorBuilder, pp sectorstorage.PieceProvider, full v1api.FullNode) retrievalmarket.RetrievalProviderNode { + return &retrievalProviderNode{address.Address(maddr), secb, pp, full} } func (rpn *retrievalProviderNode) GetMinerWorkerAddress(ctx context.Context, miner address.Address, tok shared.TipSetToken) (address.Address, error) { @@ -49,13 +54,12 @@ func (rpn *retrievalProviderNode) GetMinerWorkerAddress(ctx context.Context, min func (rpn *retrievalProviderNode) UnsealSector(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (io.ReadCloser, error) { log.Debugf("get sector %d, offset %d, length %d", sectorID, offset, length) - - si, err := rpn.miner.GetSectorInfo(sectorID) + si, err := rpn.sectorsStatus(ctx, sectorID, false) if err != nil { return nil, err } - mid, err := address.IDFromAddress(rpn.miner.Address()) + mid, err := address.IDFromAddress(rpn.maddr) if err != nil { return nil, err } @@ -65,7 +69,7 @@ func (rpn *retrievalProviderNode) UnsealSector(ctx context.Context, sectorID abi Miner: abi.ActorID(mid), Number: sectorID, }, - ProofType: si.SectorType, + ProofType: si.SealProof, } var commD cid.Cid @@ -75,7 +79,7 @@ func (rpn *retrievalProviderNode) UnsealSector(ctx context.Context, sectorID abi // Get a reader for the piece, unsealing the piece if necessary log.Debugf("read piece in sector %d, offset %d, length %d from miner %d", sectorID, offset, length, mid) - r, unsealed, err := rpn.pp.ReadPiece(ctx, ref, storiface.UnpaddedByteIndex(offset), length, si.TicketValue, commD) + r, unsealed, err := rpn.pp.ReadPiece(ctx, ref, storiface.UnpaddedByteIndex(offset), length, si.Ticket.Value, commD) if err != nil { return nil, xerrors.Errorf("failed to unseal piece from sector %d: %w", sectorID, err) } @@ -101,12 +105,12 @@ func (rpn *retrievalProviderNode) GetChainHead(ctx context.Context) (shared.TipS } func (rpn *retrievalProviderNode) IsUnsealed(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (bool, error) { - si, err := rpn.miner.GetSectorInfo(sectorID) + si, err := rpn.sectorsStatus(ctx, sectorID, true) if err != nil { - return false, xerrors.Errorf("failed to get sectorinfo, err=%s", err) + return false, xerrors.Errorf("failed to get sector info: %w", err) } - mid, err := address.IDFromAddress(rpn.miner.Address()) + mid, err := address.IDFromAddress(rpn.maddr) if err != nil { return false, err } @@ -116,7 +120,7 @@ func (rpn *retrievalProviderNode) IsUnsealed(ctx context.Context, sectorID abi.S Miner: abi.ActorID(mid), Number: sectorID, }, - ProofType: si.SectorType, + ProofType: si.SealProof, } log.Debugf("will call IsUnsealed now sector=%+v, offset=%d, size=%d", sectorID, offset, length) @@ -135,10 +139,14 @@ func (rpn *retrievalProviderNode) GetRetrievalPricingInput(ctx context.Context, } tsk := head.Key() + var mErr error + for _, dealID := range storageDeals { ds, err := rpn.full.StateMarketStorageDeal(ctx, dealID, tsk) if err != nil { - return resp, xerrors.Errorf("failed to look up deal %d on chain: err=%w", dealID, err) + log.Warnf("failed to look up deal %d on chain: err=%w", dealID, err) + mErr = multierror.Append(mErr, err) + continue } if ds.Proposal.VerifiedDeal { resp.VerifiedDeal = true @@ -158,8 +166,46 @@ func (rpn *retrievalProviderNode) GetRetrievalPricingInput(ctx context.Context, // Note: The piece size can never actually be zero. We only use it to here // to assert that we didn't find a matching piece. if resp.PieceSize == 0 { - return resp, xerrors.New("failed to find matching piece") + if mErr == nil { + return resp, xerrors.New("failed to find matching piece") + } + + return resp, xerrors.Errorf("failed to fetch storage deal state: %w", mErr) } return resp, nil } + +func (rpn *retrievalProviderNode) sectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) { + sInfo, err := rpn.secb.SectorsStatus(ctx, sid, false) + if err != nil { + return api.SectorInfo{}, err + } + + if !showOnChainInfo { + return sInfo, nil + } + + onChainInfo, err := rpn.full.StateSectorGetInfo(ctx, rpn.maddr, sid, types.EmptyTSK) + if err != nil { + return sInfo, err + } + if onChainInfo == nil { + return sInfo, nil + } + sInfo.SealProof = onChainInfo.SealProof + sInfo.Activation = onChainInfo.Activation + sInfo.Expiration = onChainInfo.Expiration + sInfo.DealWeight = onChainInfo.DealWeight + sInfo.VerifiedDealWeight = onChainInfo.VerifiedDealWeight + sInfo.InitialPledge = onChainInfo.InitialPledge + + ex, err := rpn.full.StateSectorExpiration(ctx, rpn.maddr, sid, types.EmptyTSK) + if err != nil { + return sInfo, nil + } + sInfo.OnTime = ex.OnTime + sInfo.Early = ex.Early + + return sInfo, nil +} diff --git a/markets/retrievaladapter/provider_test.go b/markets/retrievaladapter/provider_test.go index 5cdf5d0600f..eca3b11527e 100644 --- a/markets/retrievaladapter/provider_test.go +++ b/markets/retrievaladapter/provider_test.go @@ -66,6 +66,31 @@ func TestGetPricingInput(t *testing.T) { expectedErrorStr: "failed to find matching piece", }, + "error when fails to fetch deal state": { + fFnc: func(n *mocks.MockFullNode) { + out1 := &api.MarketDeal{ + Proposal: market.DealProposal{ + PieceCID: pcid, + PieceSize: paddedSize, + }, + } + out2 := &api.MarketDeal{ + Proposal: market.DealProposal{ + PieceCID: testnet.GenerateCids(1)[0], + VerifiedDeal: true, + }, + } + + n.EXPECT().ChainHead(gomock.Any()).Return(tsk, nil).Times(1) + gomock.InOrder( + n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[0], key).Return(out1, xerrors.New("error 1")), + n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[1], key).Return(out2, xerrors.New("error 2")), + ) + + }, + expectedErrorStr: "failed to fetch storage deal state", + }, + "verified is true even if one deal is verified and we get the correct piecesize": { fFnc: func(n *mocks.MockFullNode) { out1 := &api.MarketDeal{ @@ -92,6 +117,32 @@ func TestGetPricingInput(t *testing.T) { expectedVerified: true, }, + "success even if one deal state fetch errors out but the other deal is verified and has the required piececid": { + fFnc: func(n *mocks.MockFullNode) { + out1 := &api.MarketDeal{ + Proposal: market.DealProposal{ + PieceCID: testnet.GenerateCids(1)[0], + }, + } + out2 := &api.MarketDeal{ + Proposal: market.DealProposal{ + PieceCID: pcid, + PieceSize: paddedSize, + VerifiedDeal: true, + }, + } + + n.EXPECT().ChainHead(gomock.Any()).Return(tsk, nil).Times(1) + gomock.InOrder( + n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[0], key).Return(out1, xerrors.New("some error")), + n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[1], key).Return(out2, nil), + ) + + }, + expectedPieceSize: unpaddedSize, + expectedVerified: true, + }, + "verified is false if both deals are unverified and we get the correct piece size": { fFnc: func(n *mocks.MockFullNode) { out1 := &api.MarketDeal{ diff --git a/markets/storageadapter/client.go b/markets/storageadapter/client.go index 9357cc271d9..80ead2be3b4 100644 --- a/markets/storageadapter/client.go +++ b/markets/storageadapter/client.go @@ -160,8 +160,16 @@ func (c *ClientNodeAdapter) ValidatePublishedDeal(ctx context.Context, deal stor return 0, xerrors.Errorf("failed to resolve from msg ID addr: %w", err) } - if fromid != mi.Worker { - return 0, xerrors.Errorf("deal wasn't published by storage provider: from=%s, provider=%s", pubmsg.From, deal.Proposal.Provider) + var pubOk bool + pubAddrs := append([]address.Address{mi.Worker, mi.Owner}, mi.ControlAddresses...) + for _, a := range pubAddrs { + if fromid == a { + pubOk = true + break + } + } + if !pubOk { + return 0, xerrors.Errorf("deal wasn't published by storage provider: from=%s, provider=%s,%+v", pubmsg.From, deal.Proposal.Provider, pubAddrs) } if pubmsg.To != miner2.StorageMarketActorAddr { diff --git a/markets/storageadapter/dealpublisher.go b/markets/storageadapter/dealpublisher.go index 157c85ed76f..9f7ba162953 100644 --- a/markets/storageadapter/dealpublisher.go +++ b/markets/storageadapter/dealpublisher.go @@ -7,27 +7,33 @@ import ( "sync" "time" + "github.com/ipfs/go-cid" "go.uber.org/fx" + "golang.org/x/xerrors" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/go-state-types/big" + market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" - "github.com/filecoin-project/go-address" "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/builtin/market" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" - market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/storage" ) type dealPublisherAPI interface { ChainHead(context.Context) (*types.TipSet, error) MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) StateMinerInfo(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error) + + WalletBalance(context.Context, address.Address) (types.BigInt, error) + WalletHas(context.Context, address.Address) (bool, error) + StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error) + StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) } // DealPublisher batches deal publishing so that many deals can be included in @@ -40,6 +46,7 @@ type dealPublisherAPI interface { // publish message with all deals in the queue. type DealPublisher struct { api dealPublisherAPI + as *storage.AddressSelector ctx context.Context Shutdown context.CancelFunc @@ -87,14 +94,14 @@ type PublishMsgConfig struct { func NewDealPublisher( feeConfig *config.MinerFeeConfig, publishMsgCfg PublishMsgConfig, -) func(lc fx.Lifecycle, full api.FullNode) *DealPublisher { - return func(lc fx.Lifecycle, full api.FullNode) *DealPublisher { +) func(lc fx.Lifecycle, full api.FullNode, as *storage.AddressSelector) *DealPublisher { + return func(lc fx.Lifecycle, full api.FullNode, as *storage.AddressSelector) *DealPublisher { maxFee := abi.NewTokenAmount(0) if feeConfig != nil { maxFee = abi.TokenAmount(feeConfig.MaxPublishDealsFee) } publishSpec := &api.MessageSendSpec{MaxFee: maxFee} - dp := newDealPublisher(full, publishMsgCfg, publishSpec) + dp := newDealPublisher(full, as, publishMsgCfg, publishSpec) lc.Append(fx.Hook{ OnStop: func(ctx context.Context) error { dp.Shutdown() @@ -107,12 +114,14 @@ func NewDealPublisher( func newDealPublisher( dpapi dealPublisherAPI, + as *storage.AddressSelector, publishMsgCfg PublishMsgConfig, publishSpec *api.MessageSendSpec, ) *DealPublisher { ctx, cancel := context.WithCancel(context.Background()) return &DealPublisher{ api: dpapi, + as: as, ctx: ctx, Shutdown: cancel, maxDealsPerPublishMsg: publishMsgCfg.MaxDealsPerMsg, @@ -345,9 +354,14 @@ func (p *DealPublisher) publishDealProposals(deals []market2.ClientDealProposal) return cid.Undef, xerrors.Errorf("serializing PublishStorageDeals params failed: %w", err) } + addr, _, err := p.as.AddressFor(p.ctx, p.api, mi, api.DealPublishAddr, big.Zero(), big.Zero()) + if err != nil { + return cid.Undef, xerrors.Errorf("selecting address for publishing deals: %w", err) + } + smsg, err := p.api.MpoolPushMessage(p.ctx, &types.Message{ To: market.Address, - From: mi.Worker, + From: addr, Value: types.NewInt(0), Method: market.Methods.PublishStorageDeals, Params: params, diff --git a/markets/storageadapter/dealpublisher_test.go b/markets/storageadapter/dealpublisher_test.go index 746c67d0ef9..b2f107bf4e9 100644 --- a/markets/storageadapter/dealpublisher_test.go +++ b/markets/storageadapter/dealpublisher_test.go @@ -25,6 +25,7 @@ import ( ) func TestDealPublisher(t *testing.T) { + t.Skip("this test randomly fails in various subtests; see issue #6799") testCases := []struct { name string publishPeriod time.Duration @@ -94,7 +95,7 @@ func TestDealPublisher(t *testing.T) { dpapi := newDPAPI(t) // Create a deal publisher - dp := newDealPublisher(dpapi, PublishMsgConfig{ + dp := newDealPublisher(dpapi, nil, PublishMsgConfig{ Period: tc.publishPeriod, MaxDealsPerMsg: tc.maxDealsPerMsg, }, &api.MessageSendSpec{MaxFee: abi.NewTokenAmount(1)}) @@ -134,7 +135,7 @@ func TestForcePublish(t *testing.T) { // Create a deal publisher start := time.Now() publishPeriod := time.Hour - dp := newDealPublisher(dpapi, PublishMsgConfig{ + dp := newDealPublisher(dpapi, nil, PublishMsgConfig{ Period: publishPeriod, MaxDealsPerMsg: 10, }, &api.MessageSendSpec{MaxFee: abi.NewTokenAmount(1)}) @@ -320,6 +321,22 @@ func (d *dpAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spec * return &types.SignedMessage{Message: *msg}, nil } +func (d *dpAPI) WalletBalance(ctx context.Context, a address.Address) (types.BigInt, error) { + panic("don't call me") +} + +func (d *dpAPI) WalletHas(ctx context.Context, a address.Address) (bool, error) { + panic("don't call me") +} + +func (d *dpAPI) StateAccountKey(ctx context.Context, a address.Address, key types.TipSetKey) (address.Address, error) { + panic("don't call me") +} + +func (d *dpAPI) StateLookupID(ctx context.Context, a address.Address, key types.TipSetKey) (address.Address, error) { + panic("don't call me") +} + func getClientActor(t *testing.T) address.Address { return tutils.NewActorAddr(t, "client") } diff --git a/markets/storageadapter/provider.go b/markets/storageadapter/provider.go index fbeaf3b3dca..b899c081074 100644 --- a/markets/storageadapter/provider.go +++ b/markets/storageadapter/provider.go @@ -95,11 +95,11 @@ func (n *ProviderNodeAdapter) OnDealComplete(ctx context.Context, deal storagema return nil, xerrors.Errorf("deal.PublishCid can't be nil") } - sdInfo := sealing.DealInfo{ + sdInfo := api.PieceDealInfo{ DealID: deal.DealID, DealProposal: &deal.Proposal, PublishCid: deal.PublishCid, - DealSchedule: sealing.DealSchedule{ + DealSchedule: api.DealSchedule{ StartEpoch: deal.ClientDealProposal.Proposal.StartEpoch, EndEpoch: deal.ClientDealProposal.Proposal.EndEpoch, }, @@ -240,19 +240,19 @@ func (n *ProviderNodeAdapter) LocatePieceForDealWithinSector(ctx context.Context // TODO: better strategy (e.g. look for already unsealed) var best api.SealedRef - var bestSi sealing.SectorInfo + var bestSi api.SectorInfo for _, r := range refs { - si, err := n.secb.Miner.GetSectorInfo(r.SectorID) + si, err := n.secb.SectorBuilder.SectorsStatus(ctx, r.SectorID, false) if err != nil { return 0, 0, 0, xerrors.Errorf("getting sector info: %w", err) } - if si.State == sealing.Proving { + if si.State == api.SectorState(sealing.Proving) { best = r bestSi = si break } } - if bestSi.State == sealing.UndefinedSectorState { + if bestSi.State == api.SectorState(sealing.UndefinedSectorState) { return 0, 0, 0, xerrors.New("no sealed sector found") } return best.SectorID, best.Offset, best.Size.Padded(), nil diff --git a/metrics/proxy.go b/metrics/proxy.go index 7253a76c2e3..94798f5aa10 100644 --- a/metrics/proxy.go +++ b/metrics/proxy.go @@ -11,54 +11,54 @@ import ( func MetricedStorMinerAPI(a api.StorageMiner) api.StorageMiner { var out api.StorageMinerStruct - proxy(a, &out.Internal) - proxy(a, &out.CommonStruct.Internal) + proxy(a, &out) return &out } func MetricedFullAPI(a api.FullNode) api.FullNode { var out api.FullNodeStruct - proxy(a, &out.Internal) - proxy(a, &out.CommonStruct.Internal) + proxy(a, &out) return &out } func MetricedWorkerAPI(a api.Worker) api.Worker { var out api.WorkerStruct - proxy(a, &out.Internal) + proxy(a, &out) return &out } func MetricedWalletAPI(a api.Wallet) api.Wallet { var out api.WalletStruct - proxy(a, &out.Internal) + proxy(a, &out) return &out } func MetricedGatewayAPI(a api.Gateway) api.Gateway { var out api.GatewayStruct - proxy(a, &out.Internal) + proxy(a, &out) return &out } -func proxy(in interface{}, out interface{}) { - rint := reflect.ValueOf(out).Elem() - ra := reflect.ValueOf(in) +func proxy(in interface{}, outstr interface{}) { + outs := api.GetInternalStructs(outstr) + for _, out := range outs { + rint := reflect.ValueOf(out).Elem() + ra := reflect.ValueOf(in) - for f := 0; f < rint.NumField(); f++ { - field := rint.Type().Field(f) - fn := ra.MethodByName(field.Name) - - rint.Field(f).Set(reflect.MakeFunc(field.Type, func(args []reflect.Value) (results []reflect.Value) { - ctx := args[0].Interface().(context.Context) - // upsert function name into context - ctx, _ = tag.New(ctx, tag.Upsert(Endpoint, field.Name)) - stop := Timer(ctx, APIRequestDuration) - defer stop() - // pass tagged ctx back into function call - args[0] = reflect.ValueOf(ctx) - return fn.Call(args) - })) + for f := 0; f < rint.NumField(); f++ { + field := rint.Type().Field(f) + fn := ra.MethodByName(field.Name) + rint.Field(f).Set(reflect.MakeFunc(field.Type, func(args []reflect.Value) (results []reflect.Value) { + ctx := args[0].Interface().(context.Context) + // upsert function name into context + ctx, _ = tag.New(ctx, tag.Upsert(Endpoint, field.Name)) + stop := Timer(ctx, APIRequestDuration) + defer stop() + // pass tagged ctx back into function call + args[0] = reflect.ValueOf(ctx) + return fn.Call(args) + })) + } } } diff --git a/miner/miner.go b/miner/miner.go index ee7d40c4fd3..1727f69420b 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -6,6 +6,7 @@ import ( "crypto/rand" "encoding/binary" "fmt" + "os" "sync" "time" @@ -325,7 +326,9 @@ minerLoop: if err := m.sf.MinedBlock(b.Header, base.TipSet.Height()+base.NullRounds); err != nil { log.Errorf(" SLASH FILTER ERROR: %s", err) - continue + if os.Getenv("LOTUS_MINER_NO_SLASHFILTER") != "_yes_i_know_i_can_and_probably_will_lose_all_my_fil_and_power_" { + continue + } } blkKey := fmt.Sprintf("%d", b.Header.Height) diff --git a/node/builder.go b/node/builder.go index 884261a89b9..6963cf4a455 100644 --- a/node/builder.go +++ b/node/builder.go @@ -6,16 +6,10 @@ import ( "os" "time" + "github.com/filecoin-project/lotus/node/impl/net" metricsi "github.com/ipfs/go-metrics-interface" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/chain" - "github.com/filecoin-project/lotus/chain/exchange" - rpcstmgr "github.com/filecoin-project/lotus/chain/stmgr/rpc" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/chain/wallet" - "github.com/filecoin-project/lotus/node/hello" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/system" logging "github.com/ipfs/go-log/v2" @@ -33,52 +27,22 @@ import ( "go.uber.org/fx" "golang.org/x/xerrors" - "github.com/filecoin-project/go-fil-markets/discovery" - discoveryimpl "github.com/filecoin-project/go-fil-markets/discovery/impl" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/storedask" - - storage2 "github.com/filecoin-project/specs-storage/storage" - - "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/beacon" - "github.com/filecoin-project/lotus/chain/gen" - "github.com/filecoin-project/lotus/chain/gen/slashfilter" - "github.com/filecoin-project/lotus/chain/market" - "github.com/filecoin-project/lotus/chain/messagepool" - "github.com/filecoin-project/lotus/chain/messagesigner" - "github.com/filecoin-project/lotus/chain/metrics" - "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" - ledgerwallet "github.com/filecoin-project/lotus/chain/wallet/ledger" - "github.com/filecoin-project/lotus/chain/wallet/remotewallet" - sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" - "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/sector-storage/stores" - "github.com/filecoin-project/lotus/extern/sector-storage/storiface" - sealing "github.com/filecoin-project/lotus/extern/storage-sealing" "github.com/filecoin-project/lotus/journal" "github.com/filecoin-project/lotus/lib/peermgr" _ "github.com/filecoin-project/lotus/lib/sigs/bls" _ "github.com/filecoin-project/lotus/lib/sigs/secp" - "github.com/filecoin-project/lotus/markets/dealfilter" "github.com/filecoin-project/lotus/markets/storageadapter" - "github.com/filecoin-project/lotus/miner" "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/lotus/node/impl" "github.com/filecoin-project/lotus/node/impl/common" - "github.com/filecoin-project/lotus/node/impl/full" "github.com/filecoin-project/lotus/node/modules" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/helpers" "github.com/filecoin-project/lotus/node/modules/lp2p" "github.com/filecoin-project/lotus/node/modules/testing" "github.com/filecoin-project/lotus/node/repo" - "github.com/filecoin-project/lotus/paychmgr" - "github.com/filecoin-project/lotus/paychmgr/settler" - "github.com/filecoin-project/lotus/storage" - "github.com/filecoin-project/lotus/storage/sectorblocks" ) //nolint:deadcode,varcheck @@ -167,9 +131,11 @@ type Settings struct { nodeType repo.RepoType - Online bool // Online option applied + Base bool // Base option applied Config bool // Config option applied Lite bool // Start node in "lite" mode + + enableLibp2pNode bool } // Basic lotus-app services @@ -246,257 +212,22 @@ func isFullOrLiteNode(s *Settings) bool { return s.nodeType == repo.FullNode } func isFullNode(s *Settings) bool { return s.nodeType == repo.FullNode && !s.Lite } func isLiteNode(s *Settings) bool { return s.nodeType == repo.FullNode && s.Lite } -// Chain node provides access to the Filecoin blockchain, by setting up a full -// validator node, or by delegating some actions to other nodes (lite mode) -var ChainNode = Options( - // Full node or lite node - // TODO: Fix offline mode - - // Consensus settings - Override(new(dtypes.DrandSchedule), modules.BuiltinDrandConfig), - Override(new(stmgr.UpgradeSchedule), stmgr.DefaultUpgradeSchedule()), - Override(new(dtypes.NetworkName), modules.NetworkName), - Override(new(modules.Genesis), modules.ErrorGenesis), - Override(new(dtypes.AfterGenesisSet), modules.SetGenesis), - Override(SetGenesisKey, modules.DoSetGenesis), - Override(new(beacon.Schedule), modules.RandomSchedule), - - // Network bootstrap - Override(new(dtypes.BootstrapPeers), modules.BuiltinBootstrap), - Override(new(dtypes.DrandBootstrap), modules.DrandBootstrap), - - // Consensus: crypto dependencies - Override(new(ffiwrapper.Verifier), ffiwrapper.ProofVerifier), - - // Consensus: VM - Override(new(vm.SyscallBuilder), vm.Syscalls), - - // Consensus: Chain storage/access - Override(new(*store.ChainStore), modules.ChainStore), - Override(new(*stmgr.StateManager), modules.StateManager), - Override(new(dtypes.ChainBitswap), modules.ChainBitswap), - Override(new(dtypes.ChainBlockService), modules.ChainBlockService), // todo: unused - - // Consensus: Chain sync - - // We don't want the SyncManagerCtor to be used as an fx constructor, but rather as a value. - // It will be called implicitly by the Syncer constructor. - Override(new(chain.SyncManagerCtor), func() chain.SyncManagerCtor { return chain.NewSyncManager }), - Override(new(*chain.Syncer), modules.NewSyncer), - Override(new(exchange.Client), exchange.NewClient), - - // Chain networking - Override(new(*hello.Service), hello.NewHelloService), - Override(new(exchange.Server), exchange.NewServer), - Override(new(*peermgr.PeerMgr), peermgr.NewPeerMgr), - - // Chain mining API dependencies - Override(new(*slashfilter.SlashFilter), modules.NewSlashFilter), - - // Service: Message Pool - Override(new(dtypes.DefaultMaxFeeFunc), modules.NewDefaultMaxFeeFunc), - Override(new(*messagepool.MessagePool), modules.MessagePool), - Override(new(*dtypes.MpoolLocker), new(dtypes.MpoolLocker)), - - // Shared graphsync (markets, serving chain) - Override(new(dtypes.Graphsync), modules.Graphsync(config.DefaultSimultaneousTransfers)), - - // Service: Wallet - Override(new(*messagesigner.MessageSigner), messagesigner.NewMessageSigner), - Override(new(*wallet.LocalWallet), wallet.NewWallet), - Override(new(wallet.Default), From(new(*wallet.LocalWallet))), - Override(new(api.Wallet), From(new(wallet.MultiWallet))), - - // Service: Payment channels - Override(new(paychmgr.PaychAPI), From(new(modules.PaychAPI))), - Override(new(*paychmgr.Store), modules.NewPaychStore), - Override(new(*paychmgr.Manager), modules.NewManager), - Override(HandlePaymentChannelManagerKey, modules.HandlePaychManager), - Override(SettlePaymentChannelsKey, settler.SettlePaymentChannels), - - // Markets (common) - Override(new(*discoveryimpl.Local), modules.NewLocalDiscovery), - - // Markets (retrieval) - Override(new(discovery.PeerResolver), modules.RetrievalResolver), - Override(new(retrievalmarket.RetrievalClient), modules.RetrievalClient), - Override(new(dtypes.ClientDataTransfer), modules.NewClientGraphsyncDataTransfer), - - // Markets (storage) - Override(new(*market.FundManager), market.NewFundManager), - Override(new(dtypes.ClientDatastore), modules.NewClientDatastore), - Override(new(storagemarket.StorageClient), modules.StorageClient), - Override(new(storagemarket.StorageClientNode), storageadapter.NewClientNodeAdapter), - Override(HandleMigrateClientFundsKey, modules.HandleMigrateClientFunds), - - Override(new(*full.GasPriceCache), full.NewGasPriceCache), - - // Lite node API - ApplyIf(isLiteNode, - Override(new(messagepool.Provider), messagepool.NewProviderLite), - Override(new(messagesigner.MpoolNonceAPI), From(new(modules.MpoolNonceAPI))), - Override(new(full.ChainModuleAPI), From(new(api.Gateway))), - Override(new(full.GasModuleAPI), From(new(api.Gateway))), - Override(new(full.MpoolModuleAPI), From(new(api.Gateway))), - Override(new(full.StateModuleAPI), From(new(api.Gateway))), - Override(new(stmgr.StateManagerAPI), rpcstmgr.NewRPCStateManager), - ), - - // Full node API / service startup - ApplyIf(isFullNode, - Override(new(messagepool.Provider), messagepool.NewProvider), - Override(new(messagesigner.MpoolNonceAPI), From(new(*messagepool.MessagePool))), - Override(new(full.ChainModuleAPI), From(new(full.ChainModule))), - Override(new(full.GasModuleAPI), From(new(full.GasModule))), - Override(new(full.MpoolModuleAPI), From(new(full.MpoolModule))), - Override(new(full.StateModuleAPI), From(new(full.StateModule))), - Override(new(stmgr.StateManagerAPI), From(new(*stmgr.StateManager))), - - Override(RunHelloKey, modules.RunHello), - Override(RunChainExchangeKey, modules.RunChainExchange), - Override(RunPeerMgrKey, modules.RunPeerMgr), - Override(HandleIncomingMessagesKey, modules.HandleIncomingMessages), - Override(HandleIncomingBlocksKey, modules.HandleIncomingBlocks), - ), -) - -var MinerNode = Options( - // API dependencies - Override(new(api.Common), From(new(common.CommonAPI))), - Override(new(sectorstorage.StorageAuth), modules.StorageAuth), - - // Actor config - Override(new(dtypes.MinerAddress), modules.MinerAddress), - Override(new(dtypes.MinerID), modules.MinerID), - Override(new(abi.RegisteredSealProof), modules.SealProofType), - Override(new(dtypes.NetworkName), modules.StorageNetworkName), - - // Sector storage - Override(new(*stores.Index), stores.NewIndex), - Override(new(stores.SectorIndex), From(new(*stores.Index))), - Override(new(stores.LocalStorage), From(new(repo.LockedRepo))), - Override(new(*stores.Local), modules.LocalStorage), - Override(new(*stores.Remote), modules.RemoteStorage), - Override(new(*sectorstorage.Manager), modules.SectorStorage), - Override(new(sectorstorage.SectorManager), From(new(*sectorstorage.Manager))), - Override(new(storiface.WorkerReturn), From(new(sectorstorage.SectorManager))), - Override(new(sectorstorage.Unsealer), From(new(*sectorstorage.Manager))), - - // Sector storage: Proofs - Override(new(ffiwrapper.Verifier), ffiwrapper.ProofVerifier), - Override(new(ffiwrapper.Prover), ffiwrapper.ProofProver), - Override(new(storage2.Prover), From(new(sectorstorage.SectorManager))), - - // Sealing - Override(new(sealing.SectorIDCounter), modules.SectorIDCounter), - Override(GetParamsKey, modules.GetParams), - - // Mining / proving - Override(new(*slashfilter.SlashFilter), modules.NewSlashFilter), - Override(new(*storage.Miner), modules.StorageMiner(config.DefaultStorageMiner().Fees)), - Override(new(*miner.Miner), modules.SetupBlockProducer), - Override(new(gen.WinningPoStProver), storage.NewWinningPoStProver), - - Override(new(*storage.AddressSelector), modules.AddressSelector(nil)), - - // Markets - Override(new(dtypes.StagingMultiDstore), modules.StagingMultiDatastore), - Override(new(dtypes.StagingBlockstore), modules.StagingBlockstore), - Override(new(dtypes.StagingDAG), modules.StagingDAG), - Override(new(dtypes.StagingGraphsync), modules.StagingGraphsync(config.DefaultSimultaneousTransfers)), - Override(new(dtypes.ProviderPieceStore), modules.NewProviderPieceStore), - Override(new(*sectorblocks.SectorBlocks), sectorblocks.NewSectorBlocks), - - // Markets (retrieval) - Override(new(sectorstorage.PieceProvider), sectorstorage.NewPieceProvider), - Override(new(dtypes.RetrievalPricingFunc), modules.RetrievalPricingFunc(config.DealmakingConfig{ - RetrievalPricing: &config.RetrievalPricing{ - Strategy: config.RetrievalPricingDefaultMode, - Default: &config.RetrievalPricingDefault{}, - }, - })), - Override(new(sectorstorage.PieceProvider), sectorstorage.NewPieceProvider), - Override(new(retrievalmarket.RetrievalProvider), modules.RetrievalProvider), - Override(new(dtypes.RetrievalDealFilter), modules.RetrievalDealFilter(nil)), - - Override(HandleRetrievalKey, modules.HandleRetrieval), - - // Markets (storage) - Override(new(dtypes.ProviderDataTransfer), modules.NewProviderDAGServiceDataTransfer), - Override(new(*storedask.StoredAsk), modules.NewStorageAsk), - Override(new(dtypes.StorageDealFilter), modules.BasicDealFilter(nil)), - Override(new(storagemarket.StorageProvider), modules.StorageProvider), - Override(new(*storageadapter.DealPublisher), storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{})), - Override(new(storagemarket.StorageProviderNode), storageadapter.NewProviderNodeAdapter(nil, nil)), - Override(HandleMigrateProviderFundsKey, modules.HandleMigrateProviderFunds), - Override(HandleDealsKey, modules.HandleDeals), - - // Config (todo: get a real property system) - Override(new(dtypes.ConsiderOnlineStorageDealsConfigFunc), modules.NewConsiderOnlineStorageDealsConfigFunc), - Override(new(dtypes.SetConsiderOnlineStorageDealsConfigFunc), modules.NewSetConsideringOnlineStorageDealsFunc), - Override(new(dtypes.ConsiderOnlineRetrievalDealsConfigFunc), modules.NewConsiderOnlineRetrievalDealsConfigFunc), - Override(new(dtypes.SetConsiderOnlineRetrievalDealsConfigFunc), modules.NewSetConsiderOnlineRetrievalDealsConfigFunc), - Override(new(dtypes.StorageDealPieceCidBlocklistConfigFunc), modules.NewStorageDealPieceCidBlocklistConfigFunc), - Override(new(dtypes.SetStorageDealPieceCidBlocklistConfigFunc), modules.NewSetStorageDealPieceCidBlocklistConfigFunc), - Override(new(dtypes.ConsiderOfflineStorageDealsConfigFunc), modules.NewConsiderOfflineStorageDealsConfigFunc), - Override(new(dtypes.SetConsiderOfflineStorageDealsConfigFunc), modules.NewSetConsideringOfflineStorageDealsFunc), - Override(new(dtypes.ConsiderOfflineRetrievalDealsConfigFunc), modules.NewConsiderOfflineRetrievalDealsConfigFunc), - Override(new(dtypes.SetConsiderOfflineRetrievalDealsConfigFunc), modules.NewSetConsiderOfflineRetrievalDealsConfigFunc), - Override(new(dtypes.ConsiderVerifiedStorageDealsConfigFunc), modules.NewConsiderVerifiedStorageDealsConfigFunc), - Override(new(dtypes.SetConsiderVerifiedStorageDealsConfigFunc), modules.NewSetConsideringVerifiedStorageDealsFunc), - Override(new(dtypes.ConsiderUnverifiedStorageDealsConfigFunc), modules.NewConsiderUnverifiedStorageDealsConfigFunc), - Override(new(dtypes.SetConsiderUnverifiedStorageDealsConfigFunc), modules.NewSetConsideringUnverifiedStorageDealsFunc), - Override(new(dtypes.SetSealingConfigFunc), modules.NewSetSealConfigFunc), - Override(new(dtypes.GetSealingConfigFunc), modules.NewGetSealConfigFunc), - Override(new(dtypes.SetExpectedSealDurationFunc), modules.NewSetExpectedSealDurationFunc), - Override(new(dtypes.GetExpectedSealDurationFunc), modules.NewGetExpectedSealDurationFunc), - Override(new(dtypes.SetMaxDealStartDelayFunc), modules.NewSetMaxDealStartDelayFunc), - Override(new(dtypes.GetMaxDealStartDelayFunc), modules.NewGetMaxDealStartDelayFunc), -) - -// Online sets up basic libp2p node -func Online() Option { - +func Base() Option { return Options( - // make sure that online is applied before Config. - // This is important because Config overrides some of Online units - func(s *Settings) error { s.Online = true; return nil }, + func(s *Settings) error { s.Base = true; return nil }, // mark Base as applied ApplyIf(func(s *Settings) bool { return s.Config }, - Error(errors.New("the Online option must be set before Config option")), + Error(errors.New("the Base() option must be set before Config option")), + ), + ApplyIf(func(s *Settings) bool { return s.enableLibp2pNode }, + LibP2P, ), - - LibP2P, - ApplyIf(isFullOrLiteNode, ChainNode), ApplyIf(IsType(repo.StorageMiner), MinerNode), ) } -func StorageMiner(out *api.StorageMiner) Option { - return Options( - ApplyIf(func(s *Settings) bool { return s.Config }, - Error(errors.New("the StorageMiner option must be set before Config option")), - ), - ApplyIf(func(s *Settings) bool { return s.Online }, - Error(errors.New("the StorageMiner option must be set before Online option")), - ), - - func(s *Settings) error { - s.nodeType = repo.StorageMiner - return nil - }, - - func(s *Settings) error { - resAPI := &impl.StorageMinerAPI{} - s.invokes[ExtractApiKey] = fx.Populate(resAPI) - *out = resAPI - return nil - }, - ) -} - // Config sets up constructors based on the provided Config -func ConfigCommon(cfg *config.Common) Option { +func ConfigCommon(cfg *config.Common, enableLibp2pNode bool) Option { return Options( func(s *Settings) error { s.Config = true; return nil }, Override(new(dtypes.APIEndpoint), func() (dtypes.APIEndpoint, error) { @@ -505,14 +236,21 @@ func ConfigCommon(cfg *config.Common) Option { Override(SetApiEndpointKey, func(lr repo.LockedRepo, e dtypes.APIEndpoint) error { return lr.SetAPIEndpoint(e) }), - Override(new(sectorstorage.URLs), func(e dtypes.APIEndpoint) (sectorstorage.URLs, error) { + Override(new(stores.URLs), func(e dtypes.APIEndpoint) (stores.URLs, error) { ip := cfg.API.RemoteListenAddress - var urls sectorstorage.URLs + var urls stores.URLs urls = append(urls, "http://"+ip+"/remote") // TODO: This makes no assumptions, and probably could... return urls, nil }), - ApplyIf(func(s *Settings) bool { return s.Online }, + ApplyIf(func(s *Settings) bool { return s.Base }), // apply only if Base has already been applied + If(!enableLibp2pNode, + Override(new(api.Net), new(api.NetStub)), + Override(new(api.Common), From(new(common.CommonAPI))), + ), + If(enableLibp2pNode, + Override(new(api.Net), From(new(net.NetAPI))), + Override(new(api.Common), From(new(common.CommonAPI))), Override(StartListeningKey, lp2p.StartListening(cfg.Libp2p.ListenAddresses)), Override(ConnectionManagerKey, lp2p.ConnectionManager( cfg.Libp2p.ConnMgrLow, @@ -525,92 +263,12 @@ func ConfigCommon(cfg *config.Common) Option { ApplyIf(func(s *Settings) bool { return len(cfg.Libp2p.BootstrapPeers) > 0 }, Override(new(dtypes.BootstrapPeers), modules.ConfigBootstrap(cfg.Libp2p.BootstrapPeers)), ), - ), - Override(AddrsFactoryKey, lp2p.AddrsFactory( - cfg.Libp2p.AnnounceAddresses, - cfg.Libp2p.NoAnnounceAddresses)), - Override(new(dtypes.MetadataDS), modules.Datastore(cfg.Backup.DisableMetadataLog)), - ) -} -func ConfigFullNode(c interface{}) Option { - cfg, ok := c.(*config.FullNode) - if !ok { - return Error(xerrors.Errorf("invalid config from repo, got: %T", c)) - } - - ipfsMaddr := cfg.Client.IpfsMAddr - return Options( - ConfigCommon(&cfg.Common), - - If(cfg.Client.UseIpfs, - Override(new(dtypes.ClientBlockstore), modules.IpfsClientBlockstore(ipfsMaddr, cfg.Client.IpfsOnlineMode)), - If(cfg.Client.IpfsUseForRetrieval, - Override(new(dtypes.ClientRetrievalStoreManager), modules.ClientBlockstoreRetrievalStoreManager), - ), - ), - Override(new(dtypes.Graphsync), modules.Graphsync(cfg.Client.SimultaneousTransfers)), - - If(cfg.Metrics.HeadNotifs, - Override(HeadMetricsKey, metrics.SendHeadNotifs(cfg.Metrics.Nickname)), + Override(AddrsFactoryKey, lp2p.AddrsFactory( + cfg.Libp2p.AnnounceAddresses, + cfg.Libp2p.NoAnnounceAddresses)), ), - - If(cfg.Wallet.RemoteBackend != "", - Override(new(*remotewallet.RemoteWallet), remotewallet.SetupRemoteWallet(cfg.Wallet.RemoteBackend)), - ), - If(cfg.Wallet.EnableLedger, - Override(new(*ledgerwallet.LedgerWallet), ledgerwallet.NewWallet), - ), - If(cfg.Wallet.DisableLocal, - Unset(new(*wallet.LocalWallet)), - Override(new(wallet.Default), wallet.NilDefault), - ), - ) -} - -func ConfigStorageMiner(c interface{}) Option { - cfg, ok := c.(*config.StorageMiner) - if !ok { - return Error(xerrors.Errorf("invalid config from repo, got: %T", c)) - } - - pricingConfig := cfg.Dealmaking.RetrievalPricing - if pricingConfig.Strategy == config.RetrievalPricingExternalMode { - if pricingConfig.External == nil { - return Error(xerrors.New("retrieval pricing policy has been to set to external but external policy config is nil")) - } - - if pricingConfig.External.Path == "" { - return Error(xerrors.New("retrieval pricing policy has been to set to external but external script path is empty")) - } - } else if pricingConfig.Strategy != config.RetrievalPricingDefaultMode { - return Error(xerrors.New("retrieval pricing policy must be either default or external")) - } - - return Options( - ConfigCommon(&cfg.Common), - - If(cfg.Dealmaking.Filter != "", - Override(new(dtypes.StorageDealFilter), modules.BasicDealFilter(dealfilter.CliStorageDealFilter(cfg.Dealmaking.Filter))), - ), - - If(cfg.Dealmaking.RetrievalFilter != "", - Override(new(dtypes.RetrievalDealFilter), modules.RetrievalDealFilter(dealfilter.CliRetrievalDealFilter(cfg.Dealmaking.RetrievalFilter))), - ), - - Override(new(dtypes.RetrievalPricingFunc), modules.RetrievalPricingFunc(cfg.Dealmaking)), - - Override(new(*storageadapter.DealPublisher), storageadapter.NewDealPublisher(&cfg.Fees, storageadapter.PublishMsgConfig{ - Period: time.Duration(cfg.Dealmaking.PublishMsgPeriod), - MaxDealsPerMsg: cfg.Dealmaking.MaxDealsPerPublishMsg, - })), - Override(new(storagemarket.StorageProviderNode), storageadapter.NewProviderNodeAdapter(&cfg.Fees, &cfg.Dealmaking)), - - Override(new(dtypes.StagingGraphsync), modules.StagingGraphsync(cfg.Dealmaking.SimultaneousTransfers)), - - Override(new(sectorstorage.SealerConfig), cfg.Storage), - Override(new(*storage.AddressSelector), modules.AddressSelector(&cfg.Addresses)), - Override(new(*storage.Miner), modules.StorageMiner(cfg.Fees)), + Override(new(dtypes.MetadataDS), modules.Datastore(cfg.Backup.DisableMetadataLog)), ) } @@ -643,19 +301,25 @@ func Repo(r repo.Repo) Option { Override(new(dtypes.UniversalBlockstore), modules.UniversalBlockstore), If(cfg.EnableSplitstore, + If(cfg.Splitstore.ColdStoreType == "universal", + Override(new(dtypes.ColdBlockstore), From(new(dtypes.UniversalBlockstore)))), + If(cfg.Splitstore.ColdStoreType == "discard", + Override(new(dtypes.ColdBlockstore), modules.DiscardColdBlockstore)), If(cfg.Splitstore.HotStoreType == "badger", Override(new(dtypes.HotBlockstore), modules.BadgerHotBlockstore)), Override(new(dtypes.SplitBlockstore), modules.SplitBlockstore(cfg)), Override(new(dtypes.BasicChainBlockstore), modules.ChainSplitBlockstore), Override(new(dtypes.BasicStateBlockstore), modules.StateSplitBlockstore), Override(new(dtypes.BaseBlockstore), From(new(dtypes.SplitBlockstore))), - Override(new(dtypes.ExposedBlockstore), From(new(dtypes.SplitBlockstore))), + Override(new(dtypes.ExposedBlockstore), modules.ExposedSplitBlockstore), + Override(new(dtypes.GCReferenceProtector), modules.SplitBlockstoreGCReferenceProtector), ), If(!cfg.EnableSplitstore, Override(new(dtypes.BasicChainBlockstore), modules.ChainFlatBlockstore), Override(new(dtypes.BasicStateBlockstore), modules.StateFlatBlockstore), Override(new(dtypes.BaseBlockstore), From(new(dtypes.UniversalBlockstore))), Override(new(dtypes.ExposedBlockstore), From(new(dtypes.UniversalBlockstore))), + Override(new(dtypes.GCReferenceProtector), modules.NoopGCReferenceProtector), ), Override(new(dtypes.ChainBlockstore), From(new(dtypes.BasicChainBlockstore))), @@ -686,31 +350,6 @@ func Repo(r repo.Repo) Option { } } -type FullOption = Option - -func Lite(enable bool) FullOption { - return func(s *Settings) error { - s.Lite = enable - return nil - } -} - -func FullAPI(out *api.FullNode, fopts ...FullOption) Option { - return Options( - func(s *Settings) error { - s.nodeType = repo.FullNode - return nil - }, - Options(fopts...), - func(s *Settings) error { - resAPI := &impl.FullNodeAPI{} - s.invokes[ExtractApiKey] = fx.Populate(resAPI) - *out = resAPI - return nil - }, - ) -} - type StopFunc func(context.Context) error // New builds and starts new Filecoin node diff --git a/node/builder_chain.go b/node/builder_chain.go new file mode 100644 index 00000000000..1447a4df781 --- /dev/null +++ b/node/builder_chain.go @@ -0,0 +1,218 @@ +package node + +import ( + "go.uber.org/fx" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-fil-markets/discovery" + discoveryimpl "github.com/filecoin-project/go-fil-markets/discovery/impl" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/storagemarket" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain" + "github.com/filecoin-project/lotus/chain/beacon" + "github.com/filecoin-project/lotus/chain/exchange" + "github.com/filecoin-project/lotus/chain/gen/slashfilter" + "github.com/filecoin-project/lotus/chain/market" + "github.com/filecoin-project/lotus/chain/messagepool" + "github.com/filecoin-project/lotus/chain/messagesigner" + "github.com/filecoin-project/lotus/chain/metrics" + "github.com/filecoin-project/lotus/chain/stmgr" + rpcstmgr "github.com/filecoin-project/lotus/chain/stmgr/rpc" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/vm" + "github.com/filecoin-project/lotus/chain/wallet" + ledgerwallet "github.com/filecoin-project/lotus/chain/wallet/ledger" + "github.com/filecoin-project/lotus/chain/wallet/remotewallet" + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + "github.com/filecoin-project/lotus/lib/peermgr" + "github.com/filecoin-project/lotus/markets/storageadapter" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/node/hello" + "github.com/filecoin-project/lotus/node/impl" + "github.com/filecoin-project/lotus/node/impl/full" + "github.com/filecoin-project/lotus/node/modules" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/node/repo" + "github.com/filecoin-project/lotus/paychmgr" + "github.com/filecoin-project/lotus/paychmgr/settler" +) + +// Chain node provides access to the Filecoin blockchain, by setting up a full +// validator node, or by delegating some actions to other nodes (lite mode) +var ChainNode = Options( + // Full node or lite node + // TODO: Fix offline mode + + // Consensus settings + Override(new(dtypes.DrandSchedule), modules.BuiltinDrandConfig), + Override(new(stmgr.UpgradeSchedule), stmgr.DefaultUpgradeSchedule()), + Override(new(dtypes.NetworkName), modules.NetworkName), + Override(new(modules.Genesis), modules.ErrorGenesis), + Override(new(dtypes.AfterGenesisSet), modules.SetGenesis), + Override(SetGenesisKey, modules.DoSetGenesis), + Override(new(beacon.Schedule), modules.RandomSchedule), + + // Network bootstrap + Override(new(dtypes.BootstrapPeers), modules.BuiltinBootstrap), + Override(new(dtypes.DrandBootstrap), modules.DrandBootstrap), + + // Consensus: crypto dependencies + Override(new(ffiwrapper.Verifier), ffiwrapper.ProofVerifier), + Override(new(ffiwrapper.Prover), ffiwrapper.ProofProver), + + // Consensus: VM + Override(new(vm.SyscallBuilder), vm.Syscalls), + + // Consensus: Chain storage/access + Override(new(*store.ChainStore), modules.ChainStore), + Override(new(*stmgr.StateManager), modules.StateManager), + Override(new(dtypes.ChainBitswap), modules.ChainBitswap), + Override(new(dtypes.ChainBlockService), modules.ChainBlockService), // todo: unused + + // Consensus: Chain sync + + // We don't want the SyncManagerCtor to be used as an fx constructor, but rather as a value. + // It will be called implicitly by the Syncer constructor. + Override(new(chain.SyncManagerCtor), func() chain.SyncManagerCtor { return chain.NewSyncManager }), + Override(new(*chain.Syncer), modules.NewSyncer), + Override(new(exchange.Client), exchange.NewClient), + + // Chain networking + Override(new(*hello.Service), hello.NewHelloService), + Override(new(exchange.Server), exchange.NewServer), + Override(new(*peermgr.PeerMgr), peermgr.NewPeerMgr), + + // Chain mining API dependencies + Override(new(*slashfilter.SlashFilter), modules.NewSlashFilter), + + // Service: Message Pool + Override(new(dtypes.DefaultMaxFeeFunc), modules.NewDefaultMaxFeeFunc), + Override(new(*messagepool.MessagePool), modules.MessagePool), + Override(new(*dtypes.MpoolLocker), new(dtypes.MpoolLocker)), + + // Shared graphsync (markets, serving chain) + Override(new(dtypes.Graphsync), modules.Graphsync(config.DefaultFullNode().Client.SimultaneousTransfers)), + + // Service: Wallet + Override(new(*messagesigner.MessageSigner), messagesigner.NewMessageSigner), + Override(new(*wallet.LocalWallet), wallet.NewWallet), + Override(new(wallet.Default), From(new(*wallet.LocalWallet))), + Override(new(api.Wallet), From(new(wallet.MultiWallet))), + + // Service: Payment channels + Override(new(paychmgr.PaychAPI), From(new(modules.PaychAPI))), + Override(new(*paychmgr.Store), modules.NewPaychStore), + Override(new(*paychmgr.Manager), modules.NewManager), + Override(HandlePaymentChannelManagerKey, modules.HandlePaychManager), + Override(SettlePaymentChannelsKey, settler.SettlePaymentChannels), + + // Markets (common) + Override(new(*discoveryimpl.Local), modules.NewLocalDiscovery), + + // Markets (retrieval) + Override(new(discovery.PeerResolver), modules.RetrievalResolver), + Override(new(retrievalmarket.RetrievalClient), modules.RetrievalClient), + Override(new(dtypes.ClientDataTransfer), modules.NewClientGraphsyncDataTransfer), + + // Markets (storage) + Override(new(*market.FundManager), market.NewFundManager), + Override(new(dtypes.ClientDatastore), modules.NewClientDatastore), + Override(new(storagemarket.StorageClient), modules.StorageClient), + Override(new(storagemarket.StorageClientNode), storageadapter.NewClientNodeAdapter), + Override(HandleMigrateClientFundsKey, modules.HandleMigrateClientFunds), + + Override(new(*full.GasPriceCache), full.NewGasPriceCache), + + // Lite node API + ApplyIf(isLiteNode, + Override(new(messagepool.Provider), messagepool.NewProviderLite), + Override(new(messagesigner.MpoolNonceAPI), From(new(modules.MpoolNonceAPI))), + Override(new(full.ChainModuleAPI), From(new(api.Gateway))), + Override(new(full.GasModuleAPI), From(new(api.Gateway))), + Override(new(full.MpoolModuleAPI), From(new(api.Gateway))), + Override(new(full.StateModuleAPI), From(new(api.Gateway))), + Override(new(stmgr.StateManagerAPI), rpcstmgr.NewRPCStateManager), + ), + + // Full node API / service startup + ApplyIf(isFullNode, + Override(new(messagepool.Provider), messagepool.NewProvider), + Override(new(messagesigner.MpoolNonceAPI), From(new(*messagepool.MessagePool))), + Override(new(full.ChainModuleAPI), From(new(full.ChainModule))), + Override(new(full.GasModuleAPI), From(new(full.GasModule))), + Override(new(full.MpoolModuleAPI), From(new(full.MpoolModule))), + Override(new(full.StateModuleAPI), From(new(full.StateModule))), + Override(new(stmgr.StateManagerAPI), From(new(*stmgr.StateManager))), + + Override(RunHelloKey, modules.RunHello), + Override(RunChainExchangeKey, modules.RunChainExchange), + Override(RunPeerMgrKey, modules.RunPeerMgr), + Override(HandleIncomingMessagesKey, modules.HandleIncomingMessages), + Override(HandleIncomingBlocksKey, modules.HandleIncomingBlocks), + ), +) + +func ConfigFullNode(c interface{}) Option { + cfg, ok := c.(*config.FullNode) + if !ok { + return Error(xerrors.Errorf("invalid config from repo, got: %T", c)) + } + + enableLibp2pNode := true // always enable libp2p for full nodes + + ipfsMaddr := cfg.Client.IpfsMAddr + return Options( + ConfigCommon(&cfg.Common, enableLibp2pNode), + + If(cfg.Client.UseIpfs, + Override(new(dtypes.ClientBlockstore), modules.IpfsClientBlockstore(ipfsMaddr, cfg.Client.IpfsOnlineMode)), + If(cfg.Client.IpfsUseForRetrieval, + Override(new(dtypes.ClientRetrievalStoreManager), modules.ClientBlockstoreRetrievalStoreManager), + ), + ), + Override(new(dtypes.Graphsync), modules.Graphsync(cfg.Client.SimultaneousTransfers)), + + If(cfg.Metrics.HeadNotifs, + Override(HeadMetricsKey, metrics.SendHeadNotifs(cfg.Metrics.Nickname)), + ), + + If(cfg.Wallet.RemoteBackend != "", + Override(new(*remotewallet.RemoteWallet), remotewallet.SetupRemoteWallet(cfg.Wallet.RemoteBackend)), + ), + If(cfg.Wallet.EnableLedger, + Override(new(*ledgerwallet.LedgerWallet), ledgerwallet.NewWallet), + ), + If(cfg.Wallet.DisableLocal, + Unset(new(*wallet.LocalWallet)), + Override(new(wallet.Default), wallet.NilDefault), + ), + ) +} + +type FullOption = Option + +func Lite(enable bool) FullOption { + return func(s *Settings) error { + s.Lite = enable + return nil + } +} + +func FullAPI(out *api.FullNode, fopts ...FullOption) Option { + return Options( + func(s *Settings) error { + s.nodeType = repo.FullNode + s.enableLibp2pNode = true + return nil + }, + Options(fopts...), + func(s *Settings) error { + resAPI := &impl.FullNodeAPI{} + s.invokes[ExtractApiKey] = fx.Populate(resAPI) + *out = resAPI + return nil + }, + ) +} diff --git a/node/builder_miner.go b/node/builder_miner.go new file mode 100644 index 00000000000..3be055de79b --- /dev/null +++ b/node/builder_miner.go @@ -0,0 +1,224 @@ +package node + +import ( + "errors" + "time" + + "go.uber.org/fx" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + rmnet "github.com/filecoin-project/go-fil-markets/retrievalmarket/network" + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-fil-markets/storagemarket/impl/storedask" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/markets/retrievaladapter" + storage2 "github.com/filecoin-project/specs-storage/storage" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/gen" + "github.com/filecoin-project/lotus/chain/gen/slashfilter" + sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + "github.com/filecoin-project/lotus/extern/sector-storage/stores" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + "github.com/filecoin-project/lotus/markets/dealfilter" + "github.com/filecoin-project/lotus/markets/storageadapter" + "github.com/filecoin-project/lotus/miner" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/node/impl" + "github.com/filecoin-project/lotus/node/modules" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/node/repo" + "github.com/filecoin-project/lotus/storage" + "github.com/filecoin-project/lotus/storage/sectorblocks" +) + +var MinerNode = Options( + Override(new(sectorstorage.StorageAuth), modules.StorageAuth), + + // Actor config + Override(new(dtypes.MinerAddress), modules.MinerAddress), + Override(new(dtypes.MinerID), modules.MinerID), + Override(new(abi.RegisteredSealProof), modules.SealProofType), + Override(new(dtypes.NetworkName), modules.StorageNetworkName), + + // Mining / proving + Override(new(*storage.AddressSelector), modules.AddressSelector(nil)), +) + +func ConfigStorageMiner(c interface{}) Option { + cfg, ok := c.(*config.StorageMiner) + if !ok { + return Error(xerrors.Errorf("invalid config from repo, got: %T", c)) + } + + pricingConfig := cfg.Dealmaking.RetrievalPricing + if pricingConfig.Strategy == config.RetrievalPricingExternalMode { + if pricingConfig.External == nil { + return Error(xerrors.New("retrieval pricing policy has been to set to external but external policy config is nil")) + } + + if pricingConfig.External.Path == "" { + return Error(xerrors.New("retrieval pricing policy has been to set to external but external script path is empty")) + } + } else if pricingConfig.Strategy != config.RetrievalPricingDefaultMode { + return Error(xerrors.New("retrieval pricing policy must be either default or external")) + } + + enableLibp2pNode := cfg.Subsystems.EnableMarkets // we enable libp2p nodes if the storage market subsystem is enabled, otherwise we don't + + return Options( + ConfigCommon(&cfg.Common, enableLibp2pNode), + + Override(new(api.MinerSubsystems), modules.ExtractEnabledMinerSubsystems(cfg.Subsystems)), + Override(new(stores.LocalStorage), From(new(repo.LockedRepo))), + Override(new(*stores.Local), modules.LocalStorage), + Override(new(*stores.Remote), modules.RemoteStorage), + Override(new(dtypes.RetrievalPricingFunc), modules.RetrievalPricingFunc(cfg.Dealmaking)), + + If(!cfg.Subsystems.EnableMining, + If(cfg.Subsystems.EnableSealing, Error(xerrors.Errorf("sealing can only be enabled on a mining node"))), + If(cfg.Subsystems.EnableSectorStorage, Error(xerrors.Errorf("sealing can only be enabled on a mining node"))), + ), + If(cfg.Subsystems.EnableMining, + If(!cfg.Subsystems.EnableSealing, Error(xerrors.Errorf("sealing can't be disabled on a mining node yet"))), + If(!cfg.Subsystems.EnableSectorStorage, Error(xerrors.Errorf("sealing can't be disabled on a mining node yet"))), + + // Sector storage: Proofs + Override(new(ffiwrapper.Verifier), ffiwrapper.ProofVerifier), + Override(new(ffiwrapper.Prover), ffiwrapper.ProofProver), + Override(new(storage2.Prover), From(new(sectorstorage.SectorManager))), + + // Sealing (todo should be under EnableSealing, but storagefsm is currently bundled with storage.Miner) + Override(new(sealing.SectorIDCounter), modules.SectorIDCounter), + Override(GetParamsKey, modules.GetParams), + + Override(new(dtypes.SetSealingConfigFunc), modules.NewSetSealConfigFunc), + Override(new(dtypes.GetSealingConfigFunc), modules.NewGetSealConfigFunc), + + // Mining / proving + Override(new(*slashfilter.SlashFilter), modules.NewSlashFilter), + Override(new(*storage.Miner), modules.StorageMiner(config.DefaultStorageMiner().Fees)), + Override(new(*miner.Miner), modules.SetupBlockProducer), + Override(new(gen.WinningPoStProver), storage.NewWinningPoStProver), + Override(new(*storage.Miner), modules.StorageMiner(cfg.Fees)), + Override(new(sectorblocks.SectorBuilder), From(new(*storage.Miner))), + ), + + If(cfg.Subsystems.EnableSectorStorage, + // Sector storage + Override(new(*stores.Index), stores.NewIndex), + Override(new(stores.SectorIndex), From(new(*stores.Index))), + Override(new(*sectorstorage.Manager), modules.SectorStorage), + Override(new(sectorstorage.Unsealer), From(new(*sectorstorage.Manager))), + Override(new(sectorstorage.SectorManager), From(new(*sectorstorage.Manager))), + Override(new(storiface.WorkerReturn), From(new(sectorstorage.SectorManager))), + ), + + If(!cfg.Subsystems.EnableSectorStorage, + Override(new(sectorstorage.StorageAuth), modules.StorageAuthWithURL(cfg.Subsystems.SectorIndexApiInfo)), + Override(new(modules.MinerStorageService), modules.ConnectStorageService(cfg.Subsystems.SectorIndexApiInfo)), + Override(new(sectorstorage.Unsealer), From(new(modules.MinerStorageService))), + Override(new(sectorblocks.SectorBuilder), From(new(modules.MinerStorageService))), + ), + If(!cfg.Subsystems.EnableSealing, + Override(new(modules.MinerSealingService), modules.ConnectSealingService(cfg.Subsystems.SealerApiInfo)), + Override(new(stores.SectorIndex), From(new(modules.MinerSealingService))), + ), + + If(cfg.Subsystems.EnableMarkets, + // Markets + Override(new(dtypes.StagingMultiDstore), modules.StagingMultiDatastore), + Override(new(dtypes.StagingBlockstore), modules.StagingBlockstore), + Override(new(dtypes.StagingDAG), modules.StagingDAG), + Override(new(dtypes.StagingGraphsync), modules.StagingGraphsync(cfg.Dealmaking.SimultaneousTransfers)), + Override(new(dtypes.ProviderPieceStore), modules.NewProviderPieceStore), + Override(new(*sectorblocks.SectorBlocks), sectorblocks.NewSectorBlocks), + + // Markets (retrieval deps) + Override(new(sectorstorage.PieceProvider), sectorstorage.NewPieceProvider), + Override(new(dtypes.RetrievalPricingFunc), modules.RetrievalPricingFunc(config.DealmakingConfig{ + RetrievalPricing: &config.RetrievalPricing{ + Strategy: config.RetrievalPricingDefaultMode, + Default: &config.RetrievalPricingDefault{}, + }, + })), + Override(new(dtypes.RetrievalPricingFunc), modules.RetrievalPricingFunc(cfg.Dealmaking)), + + // Markets (retrieval) + Override(new(retrievalmarket.RetrievalProviderNode), retrievaladapter.NewRetrievalProviderNode), + Override(new(rmnet.RetrievalMarketNetwork), modules.RetrievalNetwork), + Override(new(retrievalmarket.RetrievalProvider), modules.RetrievalProvider), + Override(new(dtypes.RetrievalDealFilter), modules.RetrievalDealFilter(nil)), + Override(HandleRetrievalKey, modules.HandleRetrieval), + + // Markets (storage) + Override(new(dtypes.ProviderDataTransfer), modules.NewProviderDAGServiceDataTransfer), + Override(new(*storedask.StoredAsk), modules.NewStorageAsk), + Override(new(dtypes.StorageDealFilter), modules.BasicDealFilter(nil)), + Override(new(storagemarket.StorageProvider), modules.StorageProvider), + Override(new(*storageadapter.DealPublisher), storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{})), + Override(HandleMigrateProviderFundsKey, modules.HandleMigrateProviderFunds), + Override(HandleDealsKey, modules.HandleDeals), + + // Config (todo: get a real property system) + Override(new(dtypes.ConsiderOnlineStorageDealsConfigFunc), modules.NewConsiderOnlineStorageDealsConfigFunc), + Override(new(dtypes.SetConsiderOnlineStorageDealsConfigFunc), modules.NewSetConsideringOnlineStorageDealsFunc), + Override(new(dtypes.ConsiderOnlineRetrievalDealsConfigFunc), modules.NewConsiderOnlineRetrievalDealsConfigFunc), + Override(new(dtypes.SetConsiderOnlineRetrievalDealsConfigFunc), modules.NewSetConsiderOnlineRetrievalDealsConfigFunc), + Override(new(dtypes.StorageDealPieceCidBlocklistConfigFunc), modules.NewStorageDealPieceCidBlocklistConfigFunc), + Override(new(dtypes.SetStorageDealPieceCidBlocklistConfigFunc), modules.NewSetStorageDealPieceCidBlocklistConfigFunc), + Override(new(dtypes.ConsiderOfflineStorageDealsConfigFunc), modules.NewConsiderOfflineStorageDealsConfigFunc), + Override(new(dtypes.SetConsiderOfflineStorageDealsConfigFunc), modules.NewSetConsideringOfflineStorageDealsFunc), + Override(new(dtypes.ConsiderOfflineRetrievalDealsConfigFunc), modules.NewConsiderOfflineRetrievalDealsConfigFunc), + Override(new(dtypes.SetConsiderOfflineRetrievalDealsConfigFunc), modules.NewSetConsiderOfflineRetrievalDealsConfigFunc), + Override(new(dtypes.ConsiderVerifiedStorageDealsConfigFunc), modules.NewConsiderVerifiedStorageDealsConfigFunc), + Override(new(dtypes.SetConsiderVerifiedStorageDealsConfigFunc), modules.NewSetConsideringVerifiedStorageDealsFunc), + Override(new(dtypes.ConsiderUnverifiedStorageDealsConfigFunc), modules.NewConsiderUnverifiedStorageDealsConfigFunc), + Override(new(dtypes.SetConsiderUnverifiedStorageDealsConfigFunc), modules.NewSetConsideringUnverifiedStorageDealsFunc), + Override(new(dtypes.SetExpectedSealDurationFunc), modules.NewSetExpectedSealDurationFunc), + Override(new(dtypes.GetExpectedSealDurationFunc), modules.NewGetExpectedSealDurationFunc), + Override(new(dtypes.SetMaxDealStartDelayFunc), modules.NewSetMaxDealStartDelayFunc), + Override(new(dtypes.GetMaxDealStartDelayFunc), modules.NewGetMaxDealStartDelayFunc), + + If(cfg.Dealmaking.Filter != "", + Override(new(dtypes.StorageDealFilter), modules.BasicDealFilter(dealfilter.CliStorageDealFilter(cfg.Dealmaking.Filter))), + ), + + If(cfg.Dealmaking.RetrievalFilter != "", + Override(new(dtypes.RetrievalDealFilter), modules.RetrievalDealFilter(dealfilter.CliRetrievalDealFilter(cfg.Dealmaking.RetrievalFilter))), + ), + Override(new(*storageadapter.DealPublisher), storageadapter.NewDealPublisher(&cfg.Fees, storageadapter.PublishMsgConfig{ + Period: time.Duration(cfg.Dealmaking.PublishMsgPeriod), + MaxDealsPerMsg: cfg.Dealmaking.MaxDealsPerPublishMsg, + })), + Override(new(storagemarket.StorageProviderNode), storageadapter.NewProviderNodeAdapter(&cfg.Fees, &cfg.Dealmaking)), + ), + + Override(new(sectorstorage.SealerConfig), cfg.Storage), + Override(new(*storage.AddressSelector), modules.AddressSelector(&cfg.Addresses)), + ) +} + +func StorageMiner(out *api.StorageMiner, subsystemsCfg config.MinerSubsystemConfig) Option { + return Options( + ApplyIf(func(s *Settings) bool { return s.Config }, + Error(errors.New("the StorageMiner option must be set before Config option")), + ), + + func(s *Settings) error { + s.nodeType = repo.StorageMiner + s.enableLibp2pNode = subsystemsCfg.EnableMarkets + return nil + }, + + func(s *Settings) error { + resAPI := &impl.StorageMinerAPI{} + s.invokes[ExtractApiKey] = fx.Populate(resAPI) + *out = resAPI + return nil + }, + ) +} diff --git a/node/config/cfgdocgen/gen.go b/node/config/cfgdocgen/gen.go new file mode 100644 index 00000000000..8d0efb65e6b --- /dev/null +++ b/node/config/cfgdocgen/gen.go @@ -0,0 +1,131 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "sort" + "strings" +) + +func run() error { + tfb, err := ioutil.ReadFile("./node/config/types.go") + if err != nil { + return err + } + + // could use the ast lib, but this is simpler + + type st int + const ( + stGlobal st = iota // looking for typedef + stType st = iota // in typedef + ) + + lines := strings.Split(string(tfb), "\n") + state := stGlobal + + type field struct { + Name string + Type string + Comment string + } + + var currentType string + var currentComment []string + + out := map[string][]field{} + + for l := range lines { + line := strings.TrimSpace(lines[l]) + + switch state { + case stGlobal: + if strings.HasPrefix(line, "type ") { + currentType = line + currentType = strings.TrimPrefix(currentType, "type") + currentType = strings.TrimSuffix(currentType, "{") + currentType = strings.TrimSpace(currentType) + currentType = strings.TrimSuffix(currentType, "struct") + currentType = strings.TrimSpace(currentType) + currentComment = nil + state = stType + continue + } + case stType: + if strings.HasPrefix(line, "// ") { + cline := strings.TrimSpace(strings.TrimPrefix(line, "//")) + currentComment = append(currentComment, cline) + continue + } + + comment := currentComment + currentComment = nil + + if strings.HasPrefix(line, "}") { + state = stGlobal + continue + } + + f := strings.Fields(line) + if len(f) < 2 { // empty or embedded struct + continue + } + + name := f[0] + typ := f[1] + + out[currentType] = append(out[currentType], field{ + Name: name, + Type: typ, + Comment: strings.Join(comment, "\n"), + }) + } + } + + var outt []string + for t := range out { + outt = append(outt, t) + } + sort.Strings(outt) + + fmt.Print(`// Code generated by github.com/filecoin-project/lotus/node/config/cfgdocgen. DO NOT EDIT. + +package config + +type DocField struct { + Name string + Type string + Comment string +} + +var Doc = map[string][]DocField{ +`) + + for _, typeName := range outt { + typ := out[typeName] + + fmt.Printf("\t\"%s\": []DocField{\n", typeName) + + for _, f := range typ { + fmt.Println("\t\t{") + fmt.Printf("\t\t\tName: \"%s\",\n", f.Name) + fmt.Printf("\t\t\tType: \"%s\",\n\n", f.Type) + fmt.Printf("\t\t\tComment: `%s`,\n", f.Comment) + fmt.Println("\t\t},") + } + + fmt.Printf("\t},\n") + } + + fmt.Println(`}`) + + return nil +} + +func main() { + if err := run(); err != nil { + fmt.Println(err.Error()) + os.Exit(1) + } +} diff --git a/node/config/def.go b/node/config/def.go index 240fadbd93f..c5c455c6894 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -22,246 +22,10 @@ const ( RetrievalPricingExternalMode = "external" ) -// Common is common config between full node and miner -type Common struct { - API API - Backup Backup - Libp2p Libp2p - Pubsub Pubsub -} - -// FullNode is a full node config -type FullNode struct { - Common - Client Client - Metrics Metrics - Wallet Wallet - Fees FeeConfig - Chainstore Chainstore -} - -// // Common - -type Backup struct { - DisableMetadataLog bool -} - -// StorageMiner is a miner config -type StorageMiner struct { - Common - - Dealmaking DealmakingConfig - Sealing SealingConfig - Storage sectorstorage.SealerConfig - Fees MinerFeeConfig - Addresses MinerAddressConfig -} - -type DealmakingConfig struct { - ConsiderOnlineStorageDeals bool - ConsiderOfflineStorageDeals bool - ConsiderOnlineRetrievalDeals bool - ConsiderOfflineRetrievalDeals bool - ConsiderVerifiedStorageDeals bool - ConsiderUnverifiedStorageDeals bool - PieceCidBlocklist []cid.Cid - ExpectedSealDuration Duration - // Maximum amount of time proposed deal StartEpoch can be in future - MaxDealStartDelay Duration - // The amount of time to wait for more deals to arrive before - // publishing - PublishMsgPeriod Duration - // The maximum number of deals to include in a single PublishStorageDeals - // message - MaxDealsPerPublishMsg uint64 - // The maximum collateral that the provider will put up against a deal, - // as a multiplier of the minimum collateral bound - MaxProviderCollateralMultiplier uint64 - - // The maximum number of parallel online data transfers (storage+retrieval) - SimultaneousTransfers uint64 - - Filter string - RetrievalFilter string - - RetrievalPricing *RetrievalPricing -} - -type RetrievalPricing struct { - Strategy string // possible values: "default", "external" - - Default *RetrievalPricingDefault - External *RetrievalPricingExternal -} - -type RetrievalPricingExternal struct { - // Path of the external script that will be run to price a retrieval deal. - // This parameter is ONLY applicable if the retrieval pricing policy strategy has been configured to "external". - Path string -} - -type RetrievalPricingDefault struct { - // VerifiedDealsFreeTransfer configures zero fees for data transfer for a retrieval deal - // of a payloadCid that belongs to a verified storage deal. - // This parameter is ONLY applicable if the retrieval pricing policy strategy has been configured to "default". - // default value is true - VerifiedDealsFreeTransfer bool -} - -type SealingConfig struct { - // 0 = no limit - MaxWaitDealsSectors uint64 - - // includes failed, 0 = no limit - MaxSealingSectors uint64 - - // includes failed, 0 = no limit - MaxSealingSectorsForDeals uint64 - - WaitDealsDelay Duration - - AlwaysKeepUnsealedCopy bool - - // Run sector finalization before submitting sector proof to the chain - FinalizeEarly bool - - // enable / disable precommit batching (takes effect after nv13) - BatchPreCommits bool - // maximum precommit batch size - batches will be sent immediately above this size - MaxPreCommitBatch int - // how long to wait before submitting a batch after crossing the minimum batch size - PreCommitBatchWait Duration - // time buffer for forceful batch submission before sectors/deal in batch would start expiring - PreCommitBatchSlack Duration - - // enable / disable commit aggregation (takes effect after nv13) - AggregateCommits bool - // maximum batched commit size - batches will be sent immediately above this size - MinCommitBatch int - MaxCommitBatch int - // how long to wait before submitting a batch after crossing the minimum batch size - CommitBatchWait Duration - // time buffer for forceful batch submission before sectors/deals in batch would start expiring - CommitBatchSlack Duration - - // network BaseFee below which to stop doing commit aggregation, instead - // submitting proofs to the chain individually - AggregateAboveBaseFee types.FIL - - TerminateBatchMax uint64 - TerminateBatchMin uint64 - TerminateBatchWait Duration - - // Keep this many sectors in sealing pipeline, start CC if needed - // todo TargetSealingSectors uint64 - - // todo TargetSectors - stop auto-pleding new sectors after this many sectors are sealed, default CC upgrade for deals sectors if above -} - -type BatchFeeConfig struct { - Base types.FIL - PerSector types.FIL -} - func (b *BatchFeeConfig) FeeForSectors(nSectors int) abi.TokenAmount { return big.Add(big.Int(b.Base), big.Mul(big.NewInt(int64(nSectors)), big.Int(b.PerSector))) } -type MinerFeeConfig struct { - MaxPreCommitGasFee types.FIL - MaxCommitGasFee types.FIL - - // maxBatchFee = maxBase + maxPerSector * nSectors - MaxPreCommitBatchGasFee BatchFeeConfig - MaxCommitBatchGasFee BatchFeeConfig - - MaxTerminateGasFee types.FIL - MaxWindowPoStGasFee types.FIL - MaxPublishDealsFee types.FIL - MaxMarketBalanceAddFee types.FIL -} - -type MinerAddressConfig struct { - PreCommitControl []string - CommitControl []string - TerminateControl []string - - // DisableOwnerFallback disables usage of the owner address for messages - // sent automatically - DisableOwnerFallback bool - // DisableWorkerFallback disables usage of the worker address for messages - // sent automatically, if control addresses are configured. - // A control address that doesn't have enough funds will still be chosen - // over the worker address if this flag is set. - DisableWorkerFallback bool -} - -// API contains configs for API endpoint -type API struct { - ListenAddress string - RemoteListenAddress string - Timeout Duration -} - -// Libp2p contains configs for libp2p -type Libp2p struct { - ListenAddresses []string - AnnounceAddresses []string - NoAnnounceAddresses []string - BootstrapPeers []string - ProtectedPeers []string - - ConnMgrLow uint - ConnMgrHigh uint - ConnMgrGrace Duration -} - -type Pubsub struct { - Bootstrapper bool - DirectPeers []string - IPColocationWhitelist []string - RemoteTracer string -} - -type Chainstore struct { - EnableSplitstore bool - Splitstore Splitstore -} - -type Splitstore struct { - HotStoreType string - TrackingStoreType string - MarkSetType string - EnableFullCompaction bool - EnableGC bool // EXPERIMENTAL - Archival bool -} - -// // Full Node - -type Metrics struct { - Nickname string - HeadNotifs bool -} - -type Client struct { - UseIpfs bool - IpfsOnlineMode bool - IpfsMAddr string - IpfsUseForRetrieval bool - SimultaneousTransfers uint64 -} - -type Wallet struct { - RemoteBackend string - EnableLedger bool - DisableLocal bool -} - -type FeeConfig struct { - DefaultMaxFee types.FIL -} - func defCommon() Common { return Common{ API: API{ @@ -305,7 +69,11 @@ func DefaultFullNode() *FullNode { Chainstore: Chainstore{ EnableSplitstore: false, Splitstore: Splitstore{ - HotStoreType: "badger", + ColdStoreType: "universal", + HotStoreType: "badger", + MarkSetType: "map", + + HotStoreFullGCFrequency: 20, }, }, } @@ -323,6 +91,10 @@ func DefaultStorageMiner() *StorageMiner { AlwaysKeepUnsealedCopy: true, FinalizeEarly: false, + CollateralFromMinerBalance: false, + AvailableBalanceBuffer: types.FIL(big.Zero()), + DisableCollateralFallback: false, + BatchPreCommits: true, MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize, // up to 256 sectors PreCommitBatchWait: Duration(24 * time.Hour), // this should be less than 31.5 hours, which is the expiration of a precommit ticket @@ -384,6 +156,13 @@ func DefaultStorageMiner() *StorageMiner { }, }, + Subsystems: MinerSubsystemConfig{ + EnableMining: true, + EnableSealing: true, + EnableSectorStorage: true, + EnableMarkets: true, + }, + Fees: MinerFeeConfig{ MaxPreCommitGasFee: types.MustParseFIL("0.025"), MaxCommitGasFee: types.MustParseFIL("0.05"), @@ -404,8 +183,10 @@ func DefaultStorageMiner() *StorageMiner { }, Addresses: MinerAddressConfig{ - PreCommitControl: []string{}, - CommitControl: []string{}, + PreCommitControl: []string{}, + CommitControl: []string{}, + TerminateControl: []string{}, + DealPublishControl: []string{}, }, } cfg.Common.API.ListenAddress = "/ip4/127.0.0.1/tcp/2345/http" diff --git a/node/config/doc_gen.go b/node/config/doc_gen.go new file mode 100644 index 00000000000..5d4a91d5ff8 --- /dev/null +++ b/node/config/doc_gen.go @@ -0,0 +1,779 @@ +// Code generated by github.com/filecoin-project/lotus/node/config/cfgdocgen. DO NOT EDIT. + +package config + +type DocField struct { + Name string + Type string + Comment string +} + +var Doc = map[string][]DocField{ + "API": []DocField{ + { + Name: "ListenAddress", + Type: "string", + + Comment: `Binding address for the Lotus API`, + }, + { + Name: "RemoteListenAddress", + Type: "string", + + Comment: ``, + }, + { + Name: "Timeout", + Type: "Duration", + + Comment: ``, + }, + }, + "Backup": []DocField{ + { + Name: "DisableMetadataLog", + Type: "bool", + + Comment: `Note that in case of metadata corruption it might be much harder to recover +your node if metadata log is disabled`, + }, + }, + "BatchFeeConfig": []DocField{ + { + Name: "Base", + Type: "types.FIL", + + Comment: ``, + }, + { + Name: "PerSector", + Type: "types.FIL", + + Comment: ``, + }, + }, + "Chainstore": []DocField{ + { + Name: "EnableSplitstore", + Type: "bool", + + Comment: ``, + }, + { + Name: "Splitstore", + Type: "Splitstore", + + Comment: ``, + }, + }, + "Client": []DocField{ + { + Name: "UseIpfs", + Type: "bool", + + Comment: ``, + }, + { + Name: "IpfsOnlineMode", + Type: "bool", + + Comment: ``, + }, + { + Name: "IpfsMAddr", + Type: "string", + + Comment: ``, + }, + { + Name: "IpfsUseForRetrieval", + Type: "bool", + + Comment: ``, + }, + { + Name: "SimultaneousTransfers", + Type: "uint64", + + Comment: `The maximum number of simultaneous data transfers between the client +and storage providers`, + }, + }, + "Common": []DocField{ + { + Name: "API", + Type: "API", + + Comment: ``, + }, + { + Name: "Backup", + Type: "Backup", + + Comment: ``, + }, + { + Name: "Libp2p", + Type: "Libp2p", + + Comment: ``, + }, + { + Name: "Pubsub", + Type: "Pubsub", + + Comment: ``, + }, + }, + "DealmakingConfig": []DocField{ + { + Name: "ConsiderOnlineStorageDeals", + Type: "bool", + + Comment: `When enabled, the miner can accept online deals`, + }, + { + Name: "ConsiderOfflineStorageDeals", + Type: "bool", + + Comment: `When enabled, the miner can accept offline deals`, + }, + { + Name: "ConsiderOnlineRetrievalDeals", + Type: "bool", + + Comment: `When enabled, the miner can accept retrieval deals`, + }, + { + Name: "ConsiderOfflineRetrievalDeals", + Type: "bool", + + Comment: `When enabled, the miner can accept offline retrieval deals`, + }, + { + Name: "ConsiderVerifiedStorageDeals", + Type: "bool", + + Comment: `When enabled, the miner can accept verified deals`, + }, + { + Name: "ConsiderUnverifiedStorageDeals", + Type: "bool", + + Comment: `When enabled, the miner can accept unverified deals`, + }, + { + Name: "PieceCidBlocklist", + Type: "[]cid.Cid", + + Comment: `A list of Data CIDs to reject when making deals`, + }, + { + Name: "ExpectedSealDuration", + Type: "Duration", + + Comment: `Maximum expected amount of time getting the deal into a sealed sector will take +This includes the time the deal will need to get transferred and published +before being assigned to a sector`, + }, + { + Name: "MaxDealStartDelay", + Type: "Duration", + + Comment: `Maximum amount of time proposed deal StartEpoch can be in future`, + }, + { + Name: "PublishMsgPeriod", + Type: "Duration", + + Comment: `When a deal is ready to publish, the amount of time to wait for more +deals to be ready to publish before publishing them all as a batch`, + }, + { + Name: "MaxDealsPerPublishMsg", + Type: "uint64", + + Comment: `The maximum number of deals to include in a single PublishStorageDeals +message`, + }, + { + Name: "MaxProviderCollateralMultiplier", + Type: "uint64", + + Comment: `The maximum collateral that the provider will put up against a deal, +as a multiplier of the minimum collateral bound`, + }, + { + Name: "SimultaneousTransfers", + Type: "uint64", + + Comment: `The maximum number of parallel online data transfers (storage+retrieval)`, + }, + { + Name: "Filter", + Type: "string", + + Comment: `A command used for fine-grained evaluation of storage deals +see https://docs.filecoin.io/mine/lotus/miner-configuration/#using-filters-for-fine-grained-storage-and-retrieval-deal-acceptance for more details`, + }, + { + Name: "RetrievalFilter", + Type: "string", + + Comment: `A command used for fine-grained evaluation of retrieval deals +see https://docs.filecoin.io/mine/lotus/miner-configuration/#using-filters-for-fine-grained-storage-and-retrieval-deal-acceptance for more details`, + }, + { + Name: "RetrievalPricing", + Type: "*RetrievalPricing", + + Comment: ``, + }, + }, + "FeeConfig": []DocField{ + { + Name: "DefaultMaxFee", + Type: "types.FIL", + + Comment: ``, + }, + }, + "FullNode": []DocField{ + { + Name: "Client", + Type: "Client", + + Comment: ``, + }, + { + Name: "Metrics", + Type: "Metrics", + + Comment: ``, + }, + { + Name: "Wallet", + Type: "Wallet", + + Comment: ``, + }, + { + Name: "Fees", + Type: "FeeConfig", + + Comment: ``, + }, + { + Name: "Chainstore", + Type: "Chainstore", + + Comment: ``, + }, + }, + "Libp2p": []DocField{ + { + Name: "ListenAddresses", + Type: "[]string", + + Comment: `Binding address for the libp2p host - 0 means random port. +Format: multiaddress; see https://multiformats.io/multiaddr/`, + }, + { + Name: "AnnounceAddresses", + Type: "[]string", + + Comment: `Addresses to explicitally announce to other peers. If not specified, +all interface addresses are announced +Format: multiaddress`, + }, + { + Name: "NoAnnounceAddresses", + Type: "[]string", + + Comment: `Addresses to not announce +Format: multiaddress`, + }, + { + Name: "BootstrapPeers", + Type: "[]string", + + Comment: ``, + }, + { + Name: "ProtectedPeers", + Type: "[]string", + + Comment: ``, + }, + { + Name: "ConnMgrLow", + Type: "uint", + + Comment: ``, + }, + { + Name: "ConnMgrHigh", + Type: "uint", + + Comment: ``, + }, + { + Name: "ConnMgrGrace", + Type: "Duration", + + Comment: ``, + }, + }, + "Metrics": []DocField{ + { + Name: "Nickname", + Type: "string", + + Comment: ``, + }, + { + Name: "HeadNotifs", + Type: "bool", + + Comment: ``, + }, + }, + "MinerAddressConfig": []DocField{ + { + Name: "PreCommitControl", + Type: "[]string", + + Comment: `Addresses to send PreCommit messages from`, + }, + { + Name: "CommitControl", + Type: "[]string", + + Comment: `Addresses to send Commit messages from`, + }, + { + Name: "TerminateControl", + Type: "[]string", + + Comment: ``, + }, + { + Name: "DealPublishControl", + Type: "[]string", + + Comment: ``, + }, + { + Name: "DisableOwnerFallback", + Type: "bool", + + Comment: `DisableOwnerFallback disables usage of the owner address for messages +sent automatically`, + }, + { + Name: "DisableWorkerFallback", + Type: "bool", + + Comment: `DisableWorkerFallback disables usage of the worker address for messages +sent automatically, if control addresses are configured. +A control address that doesn't have enough funds will still be chosen +over the worker address if this flag is set.`, + }, + }, + "MinerFeeConfig": []DocField{ + { + Name: "MaxPreCommitGasFee", + Type: "types.FIL", + + Comment: ``, + }, + { + Name: "MaxCommitGasFee", + Type: "types.FIL", + + Comment: ``, + }, + { + Name: "MaxPreCommitBatchGasFee", + Type: "BatchFeeConfig", + + Comment: `maxBatchFee = maxBase + maxPerSector * nSectors`, + }, + { + Name: "MaxCommitBatchGasFee", + Type: "BatchFeeConfig", + + Comment: ``, + }, + { + Name: "MaxTerminateGasFee", + Type: "types.FIL", + + Comment: ``, + }, + { + Name: "MaxWindowPoStGasFee", + Type: "types.FIL", + + Comment: `WindowPoSt is a high-value operation, so the default fee should be high.`, + }, + { + Name: "MaxPublishDealsFee", + Type: "types.FIL", + + Comment: ``, + }, + { + Name: "MaxMarketBalanceAddFee", + Type: "types.FIL", + + Comment: ``, + }, + }, + "MinerSubsystemConfig": []DocField{ + { + Name: "EnableMining", + Type: "bool", + + Comment: ``, + }, + { + Name: "EnableSealing", + Type: "bool", + + Comment: ``, + }, + { + Name: "EnableSectorStorage", + Type: "bool", + + Comment: ``, + }, + { + Name: "EnableMarkets", + Type: "bool", + + Comment: ``, + }, + { + Name: "SealerApiInfo", + Type: "string", + + Comment: ``, + }, + { + Name: "SectorIndexApiInfo", + Type: "string", + + Comment: ``, + }, + }, + "Pubsub": []DocField{ + { + Name: "Bootstrapper", + Type: "bool", + + Comment: `Run the node in bootstrap-node mode`, + }, + { + Name: "DirectPeers", + Type: "[]string", + + Comment: `DirectPeers specifies peers with direct peering agreements. These peers are +connected outside of the mesh, with all (valid) message unconditionally +forwarded to them. The router will maintain open connections to these peers. +Note that the peering agreement should be reciprocal with direct peers +symmetrically configured at both ends. +Type: Array of multiaddress peerinfo strings, must include peerid (/p2p/12D3K...`, + }, + { + Name: "IPColocationWhitelist", + Type: "[]string", + + Comment: ``, + }, + { + Name: "RemoteTracer", + Type: "string", + + Comment: ``, + }, + }, + "RetrievalPricing": []DocField{ + { + Name: "Strategy", + Type: "string", + + Comment: ``, + }, + { + Name: "Default", + Type: "*RetrievalPricingDefault", + + Comment: ``, + }, + { + Name: "External", + Type: "*RetrievalPricingExternal", + + Comment: ``, + }, + }, + "RetrievalPricingDefault": []DocField{ + { + Name: "VerifiedDealsFreeTransfer", + Type: "bool", + + Comment: `VerifiedDealsFreeTransfer configures zero fees for data transfer for a retrieval deal +of a payloadCid that belongs to a verified storage deal. +This parameter is ONLY applicable if the retrieval pricing policy strategy has been configured to "default". +default value is true`, + }, + }, + "RetrievalPricingExternal": []DocField{ + { + Name: "Path", + Type: "string", + + Comment: `Path of the external script that will be run to price a retrieval deal. +This parameter is ONLY applicable if the retrieval pricing policy strategy has been configured to "external".`, + }, + }, + "SealingConfig": []DocField{ + { + Name: "MaxWaitDealsSectors", + Type: "uint64", + + Comment: `Upper bound on how many sectors can be waiting for more deals to be packed in it before it begins sealing at any given time. +If the miner is accepting multiple deals in parallel, up to MaxWaitDealsSectors of new sectors will be created. +If more than MaxWaitDealsSectors deals are accepted in parallel, only MaxWaitDealsSectors deals will be processed in parallel +Note that setting this number too high in relation to deal ingestion rate may result in poor sector packing efficiency +0 = no limit`, + }, + { + Name: "MaxSealingSectors", + Type: "uint64", + + Comment: `Upper bound on how many sectors can be sealing at the same time when creating new CC sectors (0 = unlimited)`, + }, + { + Name: "MaxSealingSectorsForDeals", + Type: "uint64", + + Comment: `Upper bound on how many sectors can be sealing at the same time when creating new sectors with deals (0 = unlimited)`, + }, + { + Name: "WaitDealsDelay", + Type: "Duration", + + Comment: `Period of time that a newly created sector will wait for more deals to be packed in to before it starts to seal. +Sectors which are fully filled will start sealing immediately`, + }, + { + Name: "AlwaysKeepUnsealedCopy", + Type: "bool", + + Comment: `Whether to keep unsealed copies of deal data regardless of whether the client requested that. This lets the miner +avoid the relatively high cost of unsealing the data later, at the cost of more storage space`, + }, + { + Name: "FinalizeEarly", + Type: "bool", + + Comment: `Run sector finalization before submitting sector proof to the chain`, + }, + { + Name: "CollateralFromMinerBalance", + Type: "bool", + + Comment: `Whether to use available miner balance for sector collateral instead of sending it with each message`, + }, + { + Name: "AvailableBalanceBuffer", + Type: "types.FIL", + + Comment: `Minimum available balance to keep in the miner actor before sending it with messages`, + }, + { + Name: "DisableCollateralFallback", + Type: "bool", + + Comment: `Don't send collateral with messages even if there is no available balance in the miner actor`, + }, + { + Name: "BatchPreCommits", + Type: "bool", + + Comment: `enable / disable precommit batching (takes effect after nv13)`, + }, + { + Name: "MaxPreCommitBatch", + Type: "int", + + Comment: `maximum precommit batch size - batches will be sent immediately above this size`, + }, + { + Name: "PreCommitBatchWait", + Type: "Duration", + + Comment: `how long to wait before submitting a batch after crossing the minimum batch size`, + }, + { + Name: "PreCommitBatchSlack", + Type: "Duration", + + Comment: `time buffer for forceful batch submission before sectors/deal in batch would start expiring`, + }, + { + Name: "AggregateCommits", + Type: "bool", + + Comment: `enable / disable commit aggregation (takes effect after nv13)`, + }, + { + Name: "MinCommitBatch", + Type: "int", + + Comment: `maximum batched commit size - batches will be sent immediately above this size`, + }, + { + Name: "MaxCommitBatch", + Type: "int", + + Comment: ``, + }, + { + Name: "CommitBatchWait", + Type: "Duration", + + Comment: `how long to wait before submitting a batch after crossing the minimum batch size`, + }, + { + Name: "CommitBatchSlack", + Type: "Duration", + + Comment: `time buffer for forceful batch submission before sectors/deals in batch would start expiring`, + }, + { + Name: "AggregateAboveBaseFee", + Type: "types.FIL", + + Comment: `network BaseFee below which to stop doing commit aggregation, instead +submitting proofs to the chain individually`, + }, + { + Name: "TerminateBatchMax", + Type: "uint64", + + Comment: ``, + }, + { + Name: "TerminateBatchMin", + Type: "uint64", + + Comment: ``, + }, + { + Name: "TerminateBatchWait", + Type: "Duration", + + Comment: ``, + }, + }, + "Splitstore": []DocField{ + { + Name: "ColdStoreType", + Type: "string", + + Comment: `ColdStoreType specifies the type of the coldstore. +It can be "universal" (default) or "discard" for discarding cold blocks.`, + }, + { + Name: "HotStoreType", + Type: "string", + + Comment: `HotStoreType specifies the type of the hotstore. +Only currently supported value is "badger".`, + }, + { + Name: "MarkSetType", + Type: "string", + + Comment: `MarkSetType specifies the type of the markset. +It can be "map" (default) for in memory marking or "badger" for on-disk marking.`, + }, + { + Name: "HotStoreMessageRetention", + Type: "uint64", + + Comment: `HotStoreMessageRetention specifies the retention policy for messages, in finalities beyond +the compaction boundary; default is 0.`, + }, + { + Name: "HotStoreFullGCFrequency", + Type: "uint64", + + Comment: `HotStoreFullGCFrequency specifies how often to perform a full (moving) GC on the hotstore. +A value of 0 disables, while a value 1 will do full GC in every compaction. +Default is 20 (about once a week).`, + }, + }, + "StorageMiner": []DocField{ + { + Name: "Subsystems", + Type: "MinerSubsystemConfig", + + Comment: ``, + }, + { + Name: "Dealmaking", + Type: "DealmakingConfig", + + Comment: ``, + }, + { + Name: "Sealing", + Type: "SealingConfig", + + Comment: ``, + }, + { + Name: "Storage", + Type: "sectorstorage.SealerConfig", + + Comment: ``, + }, + { + Name: "Fees", + Type: "MinerFeeConfig", + + Comment: ``, + }, + { + Name: "Addresses", + Type: "MinerAddressConfig", + + Comment: ``, + }, + }, + "Wallet": []DocField{ + { + Name: "RemoteBackend", + Type: "string", + + Comment: ``, + }, + { + Name: "EnableLedger", + Type: "bool", + + Comment: ``, + }, + { + Name: "DisableLocal", + Type: "bool", + + Comment: ``, + }, + }, +} diff --git a/node/config/doc_util.go b/node/config/doc_util.go new file mode 100644 index 00000000000..ee70a9cfd9a --- /dev/null +++ b/node/config/doc_util.go @@ -0,0 +1,44 @@ +package config + +import ( + "fmt" + "strings" +) + +func findDoc(root interface{}, section, name string) *DocField { + rt := fmt.Sprintf("%T", root)[len("*config."):] + + doc := findDocSect(rt, section, name) + if doc != nil { + return doc + } + + return findDocSect("Common", section, name) +} + +func findDocSect(root string, section, name string) *DocField { + path := strings.Split(section, ".") + + docSection := Doc[root] + for _, e := range path { + if docSection == nil { + return nil + } + + for _, field := range docSection { + if field.Name == e { + docSection = Doc[field.Type] + break + } + + } + } + + for _, df := range docSection { + if df.Name == name { + return &df + } + } + + return nil +} diff --git a/node/config/load.go b/node/config/load.go index 61e6e8f9717..08210604455 100644 --- a/node/config/load.go +++ b/node/config/load.go @@ -5,6 +5,10 @@ import ( "fmt" "io" "os" + "reflect" + "regexp" + "strings" + "unicode" "github.com/BurntSushi/toml" "github.com/kelseyhightower/envconfig" @@ -42,15 +46,116 @@ func FromReader(reader io.Reader, def interface{}) (interface{}, error) { return cfg, nil } -func ConfigComment(t interface{}) ([]byte, error) { - buf := new(bytes.Buffer) - _, _ = buf.WriteString("# Default config:\n") - e := toml.NewEncoder(buf) - if err := e.Encode(t); err != nil { - return nil, xerrors.Errorf("encoding config: %w", err) +func ConfigUpdate(cfgCur, cfgDef interface{}, comment bool) ([]byte, error) { + var nodeStr, defStr string + if cfgDef != nil { + buf := new(bytes.Buffer) + e := toml.NewEncoder(buf) + if err := e.Encode(cfgDef); err != nil { + return nil, xerrors.Errorf("encoding default config: %w", err) + } + + defStr = buf.String() + } + + { + buf := new(bytes.Buffer) + e := toml.NewEncoder(buf) + if err := e.Encode(cfgCur); err != nil { + return nil, xerrors.Errorf("encoding node config: %w", err) + } + + nodeStr = buf.String() + } + + if comment { + // create a map of default lines so we can comment those out later + defLines := strings.Split(defStr, "\n") + defaults := map[string]struct{}{} + for i := range defLines { + l := strings.TrimSpace(defLines[i]) + if len(l) == 0 { + continue + } + if l[0] == '#' || l[0] == '[' { + continue + } + defaults[l] = struct{}{} + } + + nodeLines := strings.Split(nodeStr, "\n") + var outLines []string + + sectionRx := regexp.MustCompile(`\[(.+)]`) + var section string + + for i, line := range nodeLines { + // if this is a section, track it + trimmed := strings.TrimSpace(line) + if len(trimmed) > 0 { + if trimmed[0] == '[' { + m := sectionRx.FindSubmatch([]byte(trimmed)) + if len(m) != 2 { + return nil, xerrors.Errorf("section didn't match (line %d)", i) + } + section = string(m[1]) + + // never comment sections + outLines = append(outLines, line) + continue + } + } + + pad := strings.Repeat(" ", len(line)-len(strings.TrimLeftFunc(line, unicode.IsSpace))) + + // see if we have docs for this field + { + lf := strings.Fields(line) + if len(lf) > 1 { + doc := findDoc(cfgCur, section, lf[0]) + + if doc != nil { + // found docfield, emit doc comment + if len(doc.Comment) > 0 { + for _, docLine := range strings.Split(doc.Comment, "\n") { + outLines = append(outLines, pad+"# "+docLine) + } + outLines = append(outLines, pad+"#") + } + + outLines = append(outLines, pad+"# type: "+doc.Type) + } + } + } + + // if there is the same line in the default config, comment it out it output + if _, found := defaults[strings.TrimSpace(nodeLines[i])]; (cfgDef == nil || found) && len(line) > 0 { + line = pad + "#" + line[len(pad):] + } + outLines = append(outLines, line) + if len(line) > 0 { + outLines = append(outLines, "") + } + } + + nodeStr = strings.Join(outLines, "\n") + } + + // sanity-check that the updated config parses the same way as the current one + if cfgDef != nil { + cfgUpdated, err := FromReader(strings.NewReader(nodeStr), cfgDef) + if err != nil { + return nil, xerrors.Errorf("parsing updated config: %w", err) + } + + if !reflect.DeepEqual(cfgCur, cfgUpdated) { + return nil, xerrors.Errorf("updated config didn't match current config") + } } - b := buf.Bytes() - b = bytes.ReplaceAll(b, []byte("\n"), []byte("\n#")) - b = bytes.ReplaceAll(b, []byte("#["), []byte("[")) - return b, nil + + return []byte(nodeStr), nil +} + +func ConfigComment(t interface{}) ([]byte, error) { + return ConfigUpdate(t, nil, true) } diff --git a/node/config/types.go b/node/config/types.go new file mode 100644 index 00000000000..fe42aa27ee9 --- /dev/null +++ b/node/config/types.go @@ -0,0 +1,325 @@ +package config + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/types" + sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" +) + +// // NOTE: ONLY PUT STRUCT DEFINITIONS IN THIS FILE +// // +// // After making edits here, run 'make cfgdoc-gen' (or 'make gen') + +// Common is common config between full node and miner +type Common struct { + API API + Backup Backup + Libp2p Libp2p + Pubsub Pubsub +} + +// FullNode is a full node config +type FullNode struct { + Common + Client Client + Metrics Metrics + Wallet Wallet + Fees FeeConfig + Chainstore Chainstore +} + +// // Common + +type Backup struct { + // When set to true disables metadata log (.lotus/kvlog). This can save disk + // space by reducing metadata redundancy. + // + // Note that in case of metadata corruption it might be much harder to recover + // your node if metadata log is disabled + DisableMetadataLog bool +} + +// StorageMiner is a miner config +type StorageMiner struct { + Common + + Subsystems MinerSubsystemConfig + Dealmaking DealmakingConfig + Sealing SealingConfig + Storage sectorstorage.SealerConfig + Fees MinerFeeConfig + Addresses MinerAddressConfig +} + +type MinerSubsystemConfig struct { + EnableMining bool + EnableSealing bool + EnableSectorStorage bool + EnableMarkets bool + + SealerApiInfo string // if EnableSealing == false + SectorIndexApiInfo string // if EnableSectorStorage == false +} + +type DealmakingConfig struct { + // When enabled, the miner can accept online deals + ConsiderOnlineStorageDeals bool + // When enabled, the miner can accept offline deals + ConsiderOfflineStorageDeals bool + // When enabled, the miner can accept retrieval deals + ConsiderOnlineRetrievalDeals bool + // When enabled, the miner can accept offline retrieval deals + ConsiderOfflineRetrievalDeals bool + // When enabled, the miner can accept verified deals + ConsiderVerifiedStorageDeals bool + // When enabled, the miner can accept unverified deals + ConsiderUnverifiedStorageDeals bool + // A list of Data CIDs to reject when making deals + PieceCidBlocklist []cid.Cid + // Maximum expected amount of time getting the deal into a sealed sector will take + // This includes the time the deal will need to get transferred and published + // before being assigned to a sector + ExpectedSealDuration Duration + // Maximum amount of time proposed deal StartEpoch can be in future + MaxDealStartDelay Duration + // When a deal is ready to publish, the amount of time to wait for more + // deals to be ready to publish before publishing them all as a batch + PublishMsgPeriod Duration + // The maximum number of deals to include in a single PublishStorageDeals + // message + MaxDealsPerPublishMsg uint64 + // The maximum collateral that the provider will put up against a deal, + // as a multiplier of the minimum collateral bound + MaxProviderCollateralMultiplier uint64 + + // The maximum number of parallel online data transfers (storage+retrieval) + SimultaneousTransfers uint64 + + // A command used for fine-grained evaluation of storage deals + // see https://docs.filecoin.io/mine/lotus/miner-configuration/#using-filters-for-fine-grained-storage-and-retrieval-deal-acceptance for more details + Filter string + // A command used for fine-grained evaluation of retrieval deals + // see https://docs.filecoin.io/mine/lotus/miner-configuration/#using-filters-for-fine-grained-storage-and-retrieval-deal-acceptance for more details + RetrievalFilter string + + RetrievalPricing *RetrievalPricing +} + +type RetrievalPricing struct { + Strategy string // possible values: "default", "external" + + Default *RetrievalPricingDefault + External *RetrievalPricingExternal +} + +type RetrievalPricingExternal struct { + // Path of the external script that will be run to price a retrieval deal. + // This parameter is ONLY applicable if the retrieval pricing policy strategy has been configured to "external". + Path string +} + +type RetrievalPricingDefault struct { + // VerifiedDealsFreeTransfer configures zero fees for data transfer for a retrieval deal + // of a payloadCid that belongs to a verified storage deal. + // This parameter is ONLY applicable if the retrieval pricing policy strategy has been configured to "default". + // default value is true + VerifiedDealsFreeTransfer bool +} + +type SealingConfig struct { + // Upper bound on how many sectors can be waiting for more deals to be packed in it before it begins sealing at any given time. + // If the miner is accepting multiple deals in parallel, up to MaxWaitDealsSectors of new sectors will be created. + // If more than MaxWaitDealsSectors deals are accepted in parallel, only MaxWaitDealsSectors deals will be processed in parallel + // Note that setting this number too high in relation to deal ingestion rate may result in poor sector packing efficiency + // 0 = no limit + MaxWaitDealsSectors uint64 + + // Upper bound on how many sectors can be sealing at the same time when creating new CC sectors (0 = unlimited) + MaxSealingSectors uint64 + + // Upper bound on how many sectors can be sealing at the same time when creating new sectors with deals (0 = unlimited) + MaxSealingSectorsForDeals uint64 + + // Period of time that a newly created sector will wait for more deals to be packed in to before it starts to seal. + // Sectors which are fully filled will start sealing immediately + WaitDealsDelay Duration + + // Whether to keep unsealed copies of deal data regardless of whether the client requested that. This lets the miner + // avoid the relatively high cost of unsealing the data later, at the cost of more storage space + AlwaysKeepUnsealedCopy bool + + // Run sector finalization before submitting sector proof to the chain + FinalizeEarly bool + + // Whether to use available miner balance for sector collateral instead of sending it with each message + CollateralFromMinerBalance bool + // Minimum available balance to keep in the miner actor before sending it with messages + AvailableBalanceBuffer types.FIL + // Don't send collateral with messages even if there is no available balance in the miner actor + DisableCollateralFallback bool + + // enable / disable precommit batching (takes effect after nv13) + BatchPreCommits bool + // maximum precommit batch size - batches will be sent immediately above this size + MaxPreCommitBatch int + // how long to wait before submitting a batch after crossing the minimum batch size + PreCommitBatchWait Duration + // time buffer for forceful batch submission before sectors/deal in batch would start expiring + PreCommitBatchSlack Duration + + // enable / disable commit aggregation (takes effect after nv13) + AggregateCommits bool + // maximum batched commit size - batches will be sent immediately above this size + MinCommitBatch int + MaxCommitBatch int + // how long to wait before submitting a batch after crossing the minimum batch size + CommitBatchWait Duration + // time buffer for forceful batch submission before sectors/deals in batch would start expiring + CommitBatchSlack Duration + + // network BaseFee below which to stop doing commit aggregation, instead + // submitting proofs to the chain individually + AggregateAboveBaseFee types.FIL + + TerminateBatchMax uint64 + TerminateBatchMin uint64 + TerminateBatchWait Duration + + // Keep this many sectors in sealing pipeline, start CC if needed + // todo TargetSealingSectors uint64 + + // todo TargetSectors - stop auto-pleding new sectors after this many sectors are sealed, default CC upgrade for deals sectors if above +} + +type BatchFeeConfig struct { + Base types.FIL + PerSector types.FIL +} + +type MinerFeeConfig struct { + MaxPreCommitGasFee types.FIL + MaxCommitGasFee types.FIL + + // maxBatchFee = maxBase + maxPerSector * nSectors + MaxPreCommitBatchGasFee BatchFeeConfig + MaxCommitBatchGasFee BatchFeeConfig + + MaxTerminateGasFee types.FIL + // WindowPoSt is a high-value operation, so the default fee should be high. + MaxWindowPoStGasFee types.FIL + MaxPublishDealsFee types.FIL + MaxMarketBalanceAddFee types.FIL +} + +type MinerAddressConfig struct { + // Addresses to send PreCommit messages from + PreCommitControl []string + // Addresses to send Commit messages from + CommitControl []string + TerminateControl []string + DealPublishControl []string + + // DisableOwnerFallback disables usage of the owner address for messages + // sent automatically + DisableOwnerFallback bool + // DisableWorkerFallback disables usage of the worker address for messages + // sent automatically, if control addresses are configured. + // A control address that doesn't have enough funds will still be chosen + // over the worker address if this flag is set. + DisableWorkerFallback bool +} + +// API contains configs for API endpoint +type API struct { + // Binding address for the Lotus API + ListenAddress string + RemoteListenAddress string + Timeout Duration +} + +// Libp2p contains configs for libp2p +type Libp2p struct { + // Binding address for the libp2p host - 0 means random port. + // Format: multiaddress; see https://multiformats.io/multiaddr/ + ListenAddresses []string + // Addresses to explicitally announce to other peers. If not specified, + // all interface addresses are announced + // Format: multiaddress + AnnounceAddresses []string + // Addresses to not announce + // Format: multiaddress + NoAnnounceAddresses []string + BootstrapPeers []string + ProtectedPeers []string + + ConnMgrLow uint + ConnMgrHigh uint + ConnMgrGrace Duration +} + +type Pubsub struct { + // Run the node in bootstrap-node mode + Bootstrapper bool + // DirectPeers specifies peers with direct peering agreements. These peers are + // connected outside of the mesh, with all (valid) message unconditionally + // forwarded to them. The router will maintain open connections to these peers. + // Note that the peering agreement should be reciprocal with direct peers + // symmetrically configured at both ends. + // Type: Array of multiaddress peerinfo strings, must include peerid (/p2p/12D3K... + DirectPeers []string + IPColocationWhitelist []string + RemoteTracer string +} + +type Chainstore struct { + EnableSplitstore bool + Splitstore Splitstore +} + +type Splitstore struct { + // ColdStoreType specifies the type of the coldstore. + // It can be "universal" (default) or "discard" for discarding cold blocks. + ColdStoreType string + // HotStoreType specifies the type of the hotstore. + // Only currently supported value is "badger". + HotStoreType string + // MarkSetType specifies the type of the markset. + // It can be "map" (default) for in memory marking or "badger" for on-disk marking. + MarkSetType string + + // HotStoreMessageRetention specifies the retention policy for messages, in finalities beyond + // the compaction boundary; default is 0. + HotStoreMessageRetention uint64 + // HotStoreFullGCFrequency specifies how often to perform a full (moving) GC on the hotstore. + // A value of 0 disables, while a value 1 will do full GC in every compaction. + // Default is 20 (about once a week). + HotStoreFullGCFrequency uint64 +} + +// // Full Node + +type Metrics struct { + Nickname string + HeadNotifs bool +} + +type Client struct { + UseIpfs bool + IpfsOnlineMode bool + IpfsMAddr string + IpfsUseForRetrieval bool + // The maximum number of simultaneous data transfers between the client + // and storage providers + SimultaneousTransfers uint64 +} + +type Wallet struct { + RemoteBackend string + EnableLedger bool + DisableLocal bool +} + +type FeeConfig struct { + DefaultMaxFee types.FIL +} diff --git a/node/impl/client/client.go b/node/impl/client/client.go index 29eb8550e35..7ba6463e607 100644 --- a/node/impl/client/client.go +++ b/node/impl/client/client.go @@ -436,7 +436,19 @@ func (a *API) ClientFindData(ctx context.Context, root cid.Cid, piece *cid.Cid) if piece != nil && !piece.Equals(*p.PieceCID) { continue } - out = append(out, a.makeRetrievalQuery(ctx, p, root, piece, rm.QueryParams{})) + + // do not rely on local data with respect to peer id + // fetch an up-to-date miner peer id from chain + mi, err := a.StateMinerInfo(ctx, p.Address, types.EmptyTSK) + if err != nil { + return nil, err + } + pp := rm.RetrievalPeer{ + Address: p.Address, + ID: *mi.PeerId, + } + + out = append(out, a.makeRetrievalQuery(ctx, pp, root, piece, rm.QueryParams{})) } return out, nil @@ -680,6 +692,8 @@ func readSubscribeEvents(ctx context.Context, dealID retrievalmarket.DealID, sub return nil case rm.DealStatusRejected: return xerrors.Errorf("Retrieval Proposal Rejected: %s", state.Message) + case rm.DealStatusCancelled: + return xerrors.Errorf("Retrieval was cancelled externally: %s", state.Message) case rm.DealStatusDealNotFound, rm.DealStatusErrored: diff --git a/node/impl/common/common.go b/node/impl/common/common.go index f1c57665cff..a681e4a4a90 100644 --- a/node/impl/common/common.go +++ b/node/impl/common/common.go @@ -2,32 +2,18 @@ package common import ( "context" - "sort" - "strings" "github.com/gbrlsnchs/jwt/v3" "github.com/google/uuid" + logging "github.com/ipfs/go-log/v2" "go.uber.org/fx" "golang.org/x/xerrors" - logging "github.com/ipfs/go-log/v2" - "github.com/libp2p/go-libp2p-core/host" - metrics "github.com/libp2p/go-libp2p-core/metrics" - "github.com/libp2p/go-libp2p-core/network" - "github.com/libp2p/go-libp2p-core/peer" - protocol "github.com/libp2p/go-libp2p-core/protocol" - swarm "github.com/libp2p/go-libp2p-swarm" - basichost "github.com/libp2p/go-libp2p/p2p/host/basic" - "github.com/libp2p/go-libp2p/p2p/net/conngater" - ma "github.com/multiformats/go-multiaddr" - "github.com/filecoin-project/go-jsonrpc/auth" - "github.com/filecoin-project/lotus/api" apitypes "github.com/filecoin-project/lotus/api/types" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/node/modules/lp2p" ) var session = uuid.New() @@ -36,12 +22,6 @@ type CommonAPI struct { fx.In APISecret *dtypes.APIAlg - RawHost lp2p.RawHost - Host host.Host - Router lp2p.BaseIpfsRouting - ConnGater *conngater.BasicConnectionGater - Reporter metrics.Reporter - Sk *dtypes.ScoreKeeper ShutdownChan dtypes.ShutdownChan } @@ -66,156 +46,10 @@ func (a *CommonAPI) AuthNew(ctx context.Context, perms []auth.Permission) ([]byt return jwt.Sign(&p, (*jwt.HMACSHA)(a.APISecret)) } -func (a *CommonAPI) NetConnectedness(ctx context.Context, pid peer.ID) (network.Connectedness, error) { - return a.Host.Network().Connectedness(pid), nil -} -func (a *CommonAPI) NetPubsubScores(context.Context) ([]api.PubsubScore, error) { - scores := a.Sk.Get() - out := make([]api.PubsubScore, len(scores)) - i := 0 - for k, v := range scores { - out[i] = api.PubsubScore{ID: k, Score: v} - i++ - } - - sort.Slice(out, func(i, j int) bool { - return strings.Compare(string(out[i].ID), string(out[j].ID)) > 0 - }) - - return out, nil -} - -func (a *CommonAPI) NetPeers(context.Context) ([]peer.AddrInfo, error) { - conns := a.Host.Network().Conns() - out := make([]peer.AddrInfo, len(conns)) - - for i, conn := range conns { - out[i] = peer.AddrInfo{ - ID: conn.RemotePeer(), - Addrs: []ma.Multiaddr{ - conn.RemoteMultiaddr(), - }, - } - } - - return out, nil -} - -func (a *CommonAPI) NetPeerInfo(_ context.Context, p peer.ID) (*api.ExtendedPeerInfo, error) { - info := &api.ExtendedPeerInfo{ID: p} - - agent, err := a.Host.Peerstore().Get(p, "AgentVersion") - if err == nil { - info.Agent = agent.(string) - } - - for _, a := range a.Host.Peerstore().Addrs(p) { - info.Addrs = append(info.Addrs, a.String()) - } - sort.Strings(info.Addrs) - - protocols, err := a.Host.Peerstore().GetProtocols(p) - if err == nil { - sort.Strings(protocols) - info.Protocols = protocols - } - - if cm := a.Host.ConnManager().GetTagInfo(p); cm != nil { - info.ConnMgrMeta = &api.ConnMgrInfo{ - FirstSeen: cm.FirstSeen, - Value: cm.Value, - Tags: cm.Tags, - Conns: cm.Conns, - } - } - - return info, nil -} - -func (a *CommonAPI) NetConnect(ctx context.Context, p peer.AddrInfo) error { - if swrm, ok := a.Host.Network().(*swarm.Swarm); ok { - swrm.Backoff().Clear(p.ID) - } - - return a.Host.Connect(ctx, p) -} - -func (a *CommonAPI) NetAddrsListen(context.Context) (peer.AddrInfo, error) { - return peer.AddrInfo{ - ID: a.Host.ID(), - Addrs: a.Host.Addrs(), - }, nil -} - -func (a *CommonAPI) NetDisconnect(ctx context.Context, p peer.ID) error { - return a.Host.Network().ClosePeer(p) -} - -func (a *CommonAPI) NetFindPeer(ctx context.Context, p peer.ID) (peer.AddrInfo, error) { - return a.Router.FindPeer(ctx, p) -} - -func (a *CommonAPI) NetAutoNatStatus(ctx context.Context) (i api.NatInfo, err error) { - autonat := a.RawHost.(*basichost.BasicHost).GetAutoNat() - - if autonat == nil { - return api.NatInfo{ - Reachability: network.ReachabilityUnknown, - }, nil - } - - var maddr string - if autonat.Status() == network.ReachabilityPublic { - pa, err := autonat.PublicAddr() - if err != nil { - return api.NatInfo{}, err - } - maddr = pa.String() - } - - return api.NatInfo{ - Reachability: autonat.Status(), - PublicAddr: maddr, - }, nil -} - -func (a *CommonAPI) NetAgentVersion(ctx context.Context, p peer.ID) (string, error) { - ag, err := a.Host.Peerstore().Get(p, "AgentVersion") - if err != nil { - return "", err - } - - if ag == nil { - return "unknown", nil - } - - return ag.(string), nil -} - -func (a *CommonAPI) NetBandwidthStats(ctx context.Context) (metrics.Stats, error) { - return a.Reporter.GetBandwidthTotals(), nil -} - -func (a *CommonAPI) NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error) { - out := make(map[string]metrics.Stats) - for p, s := range a.Reporter.GetBandwidthByPeer() { - out[p.String()] = s - } - return out, nil -} - -func (a *CommonAPI) NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) { - return a.Reporter.GetBandwidthByProtocol(), nil -} - func (a *CommonAPI) Discover(ctx context.Context) (apitypes.OpenRPCDocument, error) { return build.OpenRPCDiscoverJSON_Full(), nil } -func (a *CommonAPI) ID(context.Context) (peer.ID, error) { - return a.Host.ID(), nil -} - func (a *CommonAPI) Version(context.Context) (api.APIVersion, error) { v, err := api.VersionForType(api.RunningNodeType) if err != nil { @@ -250,5 +84,3 @@ func (a *CommonAPI) Session(ctx context.Context) (uuid.UUID, error) { func (a *CommonAPI) Closing(ctx context.Context) (<-chan struct{}, error) { return make(chan struct{}), nil // relies on jsonrpc closing } - -var _ api.Common = &CommonAPI{} diff --git a/node/impl/full.go b/node/impl/full.go index 50fd09cdf65..f9c83ded032 100644 --- a/node/impl/full.go +++ b/node/impl/full.go @@ -14,6 +14,7 @@ import ( "github.com/filecoin-project/lotus/node/impl/common" "github.com/filecoin-project/lotus/node/impl/full" "github.com/filecoin-project/lotus/node/impl/market" + "github.com/filecoin-project/lotus/node/impl/net" "github.com/filecoin-project/lotus/node/impl/paych" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/lp2p" @@ -23,6 +24,7 @@ var log = logging.Logger("node") type FullNodeAPI struct { common.CommonAPI + net.NetAPI full.ChainAPI client.API full.MpoolAPI diff --git a/node/impl/full/chain.go b/node/impl/full/chain.go index d26c2d7ea04..c5c2334ad7a 100644 --- a/node/impl/full/chain.go +++ b/node/impl/full/chain.go @@ -83,6 +83,9 @@ type ChainAPI struct { // expose externally. In the future, this will be segregated into two // blockstores. ExposedBlockstore dtypes.ExposedBlockstore + + // BaseBlockstore is the underlying blockstore + BaseBlockstore dtypes.BaseBlockstore } func (m *ChainModule) ChainNotify(ctx context.Context) (<-chan []*api.HeadChange, error) { @@ -228,6 +231,33 @@ func (a *ChainAPI) ChainGetParentReceipts(ctx context.Context, bcid cid.Cid) ([] return out, nil } +func (a *ChainAPI) ChainGetMessagesInTipset(ctx context.Context, tsk types.TipSetKey) ([]api.Message, error) { + ts, err := a.Chain.GetTipSetFromKey(tsk) + if err != nil { + return nil, err + } + + // genesis block has no parent messages... + if ts.Height() == 0 { + return nil, nil + } + + cm, err := a.Chain.MessagesForTipset(ts) + if err != nil { + return nil, err + } + + var out []api.Message + for _, m := range cm { + out = append(out, api.Message{ + Cid: m.Cid(), + Message: m.VMMessage(), + }) + } + + return out, nil +} + func (m *ChainModule) ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) { ts, err := m.Chain.GetTipSetFromKey(tsk) if err != nil { @@ -617,3 +647,21 @@ func (a *ChainAPI) ChainExport(ctx context.Context, nroots abi.ChainEpoch, skipo return out, nil } + +func (a *ChainAPI) ChainCheckBlockstore(ctx context.Context) error { + checker, ok := a.BaseBlockstore.(interface{ Check() error }) + if !ok { + return xerrors.Errorf("underlying blockstore does not support health checks") + } + + return checker.Check() +} + +func (a *ChainAPI) ChainBlockstoreInfo(ctx context.Context) (map[string]interface{}, error) { + info, ok := a.BaseBlockstore.(interface{ Info() map[string]interface{} }) + if !ok { + return nil, xerrors.Errorf("underlying blockstore does not provide info") + } + + return info.Info(), nil +} diff --git a/node/impl/full/multisig.go b/node/impl/full/multisig.go index e44509d7cbb..0d20c3f03ea 100644 --- a/node/impl/full/multisig.go +++ b/node/impl/full/multisig.go @@ -30,8 +30,12 @@ func (a *MsigAPI) messageBuilder(ctx context.Context, from address.Address) (mul if err != nil { return nil, err } + av, err := actors.VersionForNetwork(nver) + if err != nil { + return nil, err + } - return multisig.Message(actors.VersionForNetwork(nver), from), nil + return multisig.Message(av, from), nil } // TODO: remove gp (gasPrice) from arguments diff --git a/node/impl/full/state.go b/node/impl/full/state.go index b3639c5e04c..df08887873c 100644 --- a/node/impl/full/state.go +++ b/node/impl/full/state.go @@ -705,7 +705,7 @@ func (a *StateAPI) StateChangedActors(ctx context.Context, old cid.Cid, new cid. return nil, xerrors.Errorf("failed to load new state tree: %w", err) } - return state.Diff(oldTree, newTree) + return state.Diff(ctx, oldTree, newTree) } func (a *StateAPI) StateMinerSectorCount(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MinerSectors, error) { @@ -1332,13 +1332,16 @@ func (m *StateModule) StateDealProviderCollateralBounds(ctx context.Context, siz return api.DealCollateralBounds{}, xerrors.Errorf("getting reward baseline power: %w", err) } - min, max := policy.DealProviderCollateralBounds(size, + min, max, err := policy.DealProviderCollateralBounds(size, verified, powClaim.RawBytePower, powClaim.QualityAdjPower, rewPow, circ.FilCirculating, m.StateManager.GetNtwkVersion(ctx, ts.Height())) + if err != nil { + return api.DealCollateralBounds{}, xerrors.Errorf("getting deal provider coll bounds: %w", err) + } return api.DealCollateralBounds{ Min: types.BigDiv(types.BigMul(min, dealProviderCollateralNum), dealProviderCollateralDen), Max: max, diff --git a/node/impl/common/conngater.go b/node/impl/net/conngater.go similarity index 91% rename from node/impl/common/conngater.go rename to node/impl/net/conngater.go index ab387631c74..07e9784d977 100644 --- a/node/impl/common/conngater.go +++ b/node/impl/net/conngater.go @@ -1,4 +1,4 @@ -package common +package net import ( "context" @@ -14,7 +14,7 @@ import ( var cLog = logging.Logger("conngater") -func (a *CommonAPI) NetBlockAdd(ctx context.Context, acl api.NetBlockList) error { +func (a *NetAPI) NetBlockAdd(ctx context.Context, acl api.NetBlockList) error { for _, p := range acl.Peers { err := a.ConnGater.BlockPeer(p) if err != nil { @@ -89,7 +89,7 @@ func (a *CommonAPI) NetBlockAdd(ctx context.Context, acl api.NetBlockList) error return nil } -func (a *CommonAPI) NetBlockRemove(ctx context.Context, acl api.NetBlockList) error { +func (a *NetAPI) NetBlockRemove(ctx context.Context, acl api.NetBlockList) error { for _, p := range acl.Peers { err := a.ConnGater.UnblockPeer(p) if err != nil { @@ -124,7 +124,7 @@ func (a *CommonAPI) NetBlockRemove(ctx context.Context, acl api.NetBlockList) er return nil } -func (a *CommonAPI) NetBlockList(ctx context.Context) (result api.NetBlockList, err error) { +func (a *NetAPI) NetBlockList(ctx context.Context) (result api.NetBlockList, err error) { result.Peers = a.ConnGater.ListBlockedPeers() for _, ip := range a.ConnGater.ListBlockedAddrs() { result.IPAddrs = append(result.IPAddrs, ip.String()) diff --git a/node/impl/net/net.go b/node/impl/net/net.go new file mode 100644 index 00000000000..a1003ffe5f2 --- /dev/null +++ b/node/impl/net/net.go @@ -0,0 +1,183 @@ +package net + +import ( + "context" + "sort" + "strings" + + "go.uber.org/fx" + + "github.com/libp2p/go-libp2p-core/host" + "github.com/libp2p/go-libp2p-core/metrics" + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p-core/protocol" + swarm "github.com/libp2p/go-libp2p-swarm" + basichost "github.com/libp2p/go-libp2p/p2p/host/basic" + "github.com/libp2p/go-libp2p/p2p/net/conngater" + ma "github.com/multiformats/go-multiaddr" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/node/modules/lp2p" +) + +type NetAPI struct { + fx.In + + RawHost lp2p.RawHost + Host host.Host + Router lp2p.BaseIpfsRouting + ConnGater *conngater.BasicConnectionGater + Reporter metrics.Reporter + Sk *dtypes.ScoreKeeper +} + +func (a *NetAPI) ID(context.Context) (peer.ID, error) { + return a.Host.ID(), nil +} + +func (a *NetAPI) NetConnectedness(ctx context.Context, pid peer.ID) (network.Connectedness, error) { + return a.Host.Network().Connectedness(pid), nil +} + +func (a *NetAPI) NetPubsubScores(context.Context) ([]api.PubsubScore, error) { + scores := a.Sk.Get() + out := make([]api.PubsubScore, len(scores)) + i := 0 + for k, v := range scores { + out[i] = api.PubsubScore{ID: k, Score: v} + i++ + } + + sort.Slice(out, func(i, j int) bool { + return strings.Compare(string(out[i].ID), string(out[j].ID)) > 0 + }) + + return out, nil +} + +func (a *NetAPI) NetPeers(context.Context) ([]peer.AddrInfo, error) { + conns := a.Host.Network().Conns() + out := make([]peer.AddrInfo, len(conns)) + + for i, conn := range conns { + out[i] = peer.AddrInfo{ + ID: conn.RemotePeer(), + Addrs: []ma.Multiaddr{ + conn.RemoteMultiaddr(), + }, + } + } + + return out, nil +} + +func (a *NetAPI) NetPeerInfo(_ context.Context, p peer.ID) (*api.ExtendedPeerInfo, error) { + info := &api.ExtendedPeerInfo{ID: p} + + agent, err := a.Host.Peerstore().Get(p, "AgentVersion") + if err == nil { + info.Agent = agent.(string) + } + + for _, a := range a.Host.Peerstore().Addrs(p) { + info.Addrs = append(info.Addrs, a.String()) + } + sort.Strings(info.Addrs) + + protocols, err := a.Host.Peerstore().GetProtocols(p) + if err == nil { + sort.Strings(protocols) + info.Protocols = protocols + } + + if cm := a.Host.ConnManager().GetTagInfo(p); cm != nil { + info.ConnMgrMeta = &api.ConnMgrInfo{ + FirstSeen: cm.FirstSeen, + Value: cm.Value, + Tags: cm.Tags, + Conns: cm.Conns, + } + } + + return info, nil +} + +func (a *NetAPI) NetConnect(ctx context.Context, p peer.AddrInfo) error { + if swrm, ok := a.Host.Network().(*swarm.Swarm); ok { + swrm.Backoff().Clear(p.ID) + } + + return a.Host.Connect(ctx, p) +} + +func (a *NetAPI) NetAddrsListen(context.Context) (peer.AddrInfo, error) { + return peer.AddrInfo{ + ID: a.Host.ID(), + Addrs: a.Host.Addrs(), + }, nil +} + +func (a *NetAPI) NetDisconnect(ctx context.Context, p peer.ID) error { + return a.Host.Network().ClosePeer(p) +} + +func (a *NetAPI) NetFindPeer(ctx context.Context, p peer.ID) (peer.AddrInfo, error) { + return a.Router.FindPeer(ctx, p) +} + +func (a *NetAPI) NetAutoNatStatus(ctx context.Context) (i api.NatInfo, err error) { + autonat := a.RawHost.(*basichost.BasicHost).GetAutoNat() + + if autonat == nil { + return api.NatInfo{ + Reachability: network.ReachabilityUnknown, + }, nil + } + + var maddr string + if autonat.Status() == network.ReachabilityPublic { + pa, err := autonat.PublicAddr() + if err != nil { + return api.NatInfo{}, err + } + maddr = pa.String() + } + + return api.NatInfo{ + Reachability: autonat.Status(), + PublicAddr: maddr, + }, nil +} + +func (a *NetAPI) NetAgentVersion(ctx context.Context, p peer.ID) (string, error) { + ag, err := a.Host.Peerstore().Get(p, "AgentVersion") + if err != nil { + return "", err + } + + if ag == nil { + return "unknown", nil + } + + return ag.(string), nil +} + +func (a *NetAPI) NetBandwidthStats(ctx context.Context) (metrics.Stats, error) { + return a.Reporter.GetBandwidthTotals(), nil +} + +func (a *NetAPI) NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error) { + out := make(map[string]metrics.Stats) + for p, s := range a.Reporter.GetBandwidthByPeer() { + out[p.String()] = s + } + return out, nil +} + +func (a *NetAPI) NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) { + return a.Reporter.GetBandwidthByProtocol(), nil +} + +var _ api.Net = &NetAPI{} diff --git a/node/impl/storminer.go b/node/impl/storminer.go index 61c69b2ba97..0fbd1211143 100644 --- a/node/impl/storminer.go +++ b/node/impl/storminer.go @@ -8,6 +8,7 @@ import ( "strconv" "time" + "github.com/filecoin-project/go-jsonrpc/auth" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/gen" @@ -16,16 +17,15 @@ import ( "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p-core/host" "github.com/libp2p/go-libp2p-core/peer" + "go.uber.org/fx" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" datatransfer "github.com/filecoin-project/go-data-transfer" "github.com/filecoin-project/go-fil-markets/piecestore" - retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket" - storagemarket "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-jsonrpc/auth" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" @@ -39,7 +39,6 @@ import ( "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/markets/storageadapter" "github.com/filecoin-project/lotus/miner" - "github.com/filecoin-project/lotus/node/impl/common" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/storage" "github.com/filecoin-project/lotus/storage/sectorblocks" @@ -47,56 +46,70 @@ import ( ) type StorageMinerAPI struct { - common.CommonAPI - - SectorBlocks *sectorblocks.SectorBlocks - - PieceStore dtypes.ProviderPieceStore - StorageProvider storagemarket.StorageProvider - RetrievalProvider retrievalmarket.RetrievalProvider - Miner *storage.Miner - BlockMiner *miner.Miner - Full api.FullNode - StorageMgr *sectorstorage.Manager `optional:"true"` - IStorageMgr sectorstorage.SectorManager - *stores.Index - storiface.WorkerReturn - DataTransfer dtypes.ProviderDataTransfer - Host host.Host - AddrSel *storage.AddressSelector - DealPublisher *storageadapter.DealPublisher - - Epp gen.WinningPoStProver + fx.In + + api.Common + api.Net + + EnabledSubsystems api.MinerSubsystems + + Full api.FullNode + LocalStore *stores.Local + RemoteStore *stores.Remote + + // Markets + PieceStore dtypes.ProviderPieceStore `optional:"true"` + StorageProvider storagemarket.StorageProvider `optional:"true"` + RetrievalProvider retrievalmarket.RetrievalProvider `optional:"true"` + DataTransfer dtypes.ProviderDataTransfer `optional:"true"` + DealPublisher *storageadapter.DealPublisher `optional:"true"` + SectorBlocks *sectorblocks.SectorBlocks `optional:"true"` + Host host.Host `optional:"true"` + + // Miner / storage + Miner *storage.Miner `optional:"true"` + BlockMiner *miner.Miner `optional:"true"` + StorageMgr *sectorstorage.Manager `optional:"true"` + IStorageMgr sectorstorage.SectorManager `optional:"true"` + stores.SectorIndex + storiface.WorkerReturn `optional:"true"` + AddrSel *storage.AddressSelector + + Epp gen.WinningPoStProver `optional:"true"` DS dtypes.MetadataDS - ConsiderOnlineStorageDealsConfigFunc dtypes.ConsiderOnlineStorageDealsConfigFunc - SetConsiderOnlineStorageDealsConfigFunc dtypes.SetConsiderOnlineStorageDealsConfigFunc - ConsiderOnlineRetrievalDealsConfigFunc dtypes.ConsiderOnlineRetrievalDealsConfigFunc - SetConsiderOnlineRetrievalDealsConfigFunc dtypes.SetConsiderOnlineRetrievalDealsConfigFunc - StorageDealPieceCidBlocklistConfigFunc dtypes.StorageDealPieceCidBlocklistConfigFunc - SetStorageDealPieceCidBlocklistConfigFunc dtypes.SetStorageDealPieceCidBlocklistConfigFunc - ConsiderOfflineStorageDealsConfigFunc dtypes.ConsiderOfflineStorageDealsConfigFunc - SetConsiderOfflineStorageDealsConfigFunc dtypes.SetConsiderOfflineStorageDealsConfigFunc - ConsiderOfflineRetrievalDealsConfigFunc dtypes.ConsiderOfflineRetrievalDealsConfigFunc - SetConsiderOfflineRetrievalDealsConfigFunc dtypes.SetConsiderOfflineRetrievalDealsConfigFunc - ConsiderVerifiedStorageDealsConfigFunc dtypes.ConsiderVerifiedStorageDealsConfigFunc - SetConsiderVerifiedStorageDealsConfigFunc dtypes.SetConsiderVerifiedStorageDealsConfigFunc - ConsiderUnverifiedStorageDealsConfigFunc dtypes.ConsiderUnverifiedStorageDealsConfigFunc - SetConsiderUnverifiedStorageDealsConfigFunc dtypes.SetConsiderUnverifiedStorageDealsConfigFunc - SetSealingConfigFunc dtypes.SetSealingConfigFunc - GetSealingConfigFunc dtypes.GetSealingConfigFunc - GetExpectedSealDurationFunc dtypes.GetExpectedSealDurationFunc - SetExpectedSealDurationFunc dtypes.SetExpectedSealDurationFunc -} + ConsiderOnlineStorageDealsConfigFunc dtypes.ConsiderOnlineStorageDealsConfigFunc `optional:"true"` + SetConsiderOnlineStorageDealsConfigFunc dtypes.SetConsiderOnlineStorageDealsConfigFunc `optional:"true"` + ConsiderOnlineRetrievalDealsConfigFunc dtypes.ConsiderOnlineRetrievalDealsConfigFunc `optional:"true"` + SetConsiderOnlineRetrievalDealsConfigFunc dtypes.SetConsiderOnlineRetrievalDealsConfigFunc `optional:"true"` + StorageDealPieceCidBlocklistConfigFunc dtypes.StorageDealPieceCidBlocklistConfigFunc `optional:"true"` + SetStorageDealPieceCidBlocklistConfigFunc dtypes.SetStorageDealPieceCidBlocklistConfigFunc `optional:"true"` + ConsiderOfflineStorageDealsConfigFunc dtypes.ConsiderOfflineStorageDealsConfigFunc `optional:"true"` + SetConsiderOfflineStorageDealsConfigFunc dtypes.SetConsiderOfflineStorageDealsConfigFunc `optional:"true"` + ConsiderOfflineRetrievalDealsConfigFunc dtypes.ConsiderOfflineRetrievalDealsConfigFunc `optional:"true"` + SetConsiderOfflineRetrievalDealsConfigFunc dtypes.SetConsiderOfflineRetrievalDealsConfigFunc `optional:"true"` + ConsiderVerifiedStorageDealsConfigFunc dtypes.ConsiderVerifiedStorageDealsConfigFunc `optional:"true"` + SetConsiderVerifiedStorageDealsConfigFunc dtypes.SetConsiderVerifiedStorageDealsConfigFunc `optional:"true"` + ConsiderUnverifiedStorageDealsConfigFunc dtypes.ConsiderUnverifiedStorageDealsConfigFunc `optional:"true"` + SetConsiderUnverifiedStorageDealsConfigFunc dtypes.SetConsiderUnverifiedStorageDealsConfigFunc `optional:"true"` + SetSealingConfigFunc dtypes.SetSealingConfigFunc `optional:"true"` + GetSealingConfigFunc dtypes.GetSealingConfigFunc `optional:"true"` + GetExpectedSealDurationFunc dtypes.GetExpectedSealDurationFunc `optional:"true"` + SetExpectedSealDurationFunc dtypes.SetExpectedSealDurationFunc `optional:"true"` +} + +func (sm *StorageMinerAPI) ServeRemote(perm bool) func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + if perm == true { + if !auth.HasPerm(r.Context(), nil, api.PermAdmin) { + w.WriteHeader(401) + _ = json.NewEncoder(w).Encode(struct{ Error string }{"unauthorized: missing write permission"}) + return + } + } -func (sm *StorageMinerAPI) ServeRemote(w http.ResponseWriter, r *http.Request) { - if !auth.HasPerm(r.Context(), nil, api.PermAdmin) { - w.WriteHeader(401) - _ = json.NewEncoder(w).Encode(struct{ Error string }{"unauthorized: missing write permission"}) - return + sm.StorageMgr.ServeHTTP(w, r) } - - sm.StorageMgr.ServeHTTP(w, r) } func (sm *StorageMinerAPI) WorkerStats(context.Context) (map[uuid.UUID]storiface.WorkerStats, error) { @@ -136,12 +149,12 @@ func (sm *StorageMinerAPI) PledgeSector(ctx context.Context) (abi.SectorID, erro // wait for the sector to enter the Packing state // TODO: instead of polling implement some pubsub-type thing in storagefsm for { - info, err := sm.Miner.GetSectorInfo(sr.ID.Number) + info, err := sm.Miner.SectorsStatus(ctx, sr.ID.Number, false) if err != nil { return abi.SectorID{}, xerrors.Errorf("getting pledged sector info: %w", err) } - if info.State != sealing.UndefinedSectorState { + if info.State != api.SectorState(sealing.UndefinedSectorState) { return sr.ID, nil } @@ -154,62 +167,11 @@ func (sm *StorageMinerAPI) PledgeSector(ctx context.Context) (abi.SectorID, erro } func (sm *StorageMinerAPI) SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) { - info, err := sm.Miner.GetSectorInfo(sid) + sInfo, err := sm.Miner.SectorsStatus(ctx, sid, false) if err != nil { return api.SectorInfo{}, err } - deals := make([]abi.DealID, len(info.Pieces)) - for i, piece := range info.Pieces { - if piece.DealInfo == nil { - continue - } - deals[i] = piece.DealInfo.DealID - } - - log := make([]api.SectorLog, len(info.Log)) - for i, l := range info.Log { - log[i] = api.SectorLog{ - Kind: l.Kind, - Timestamp: l.Timestamp, - Trace: l.Trace, - Message: l.Message, - } - } - - sInfo := api.SectorInfo{ - SectorID: sid, - State: api.SectorState(info.State), - CommD: info.CommD, - CommR: info.CommR, - Proof: info.Proof, - Deals: deals, - Ticket: api.SealTicket{ - Value: info.TicketValue, - Epoch: info.TicketEpoch, - }, - Seed: api.SealSeed{ - Value: info.SeedValue, - Epoch: info.SeedEpoch, - }, - PreCommitMsg: info.PreCommitMessage, - CommitMsg: info.CommitMessage, - Retries: info.InvalidProofs, - ToUpgrade: sm.Miner.IsMarkedForUpgrade(sid), - - LastErr: info.LastErr, - Log: log, - // on chain info - SealProof: 0, - Activation: 0, - Expiration: 0, - DealWeight: big.Zero(), - VerifiedDealWeight: big.Zero(), - InitialPledge: big.Zero(), - OnTime: 0, - Early: 0, - } - if !showOnChainInfo { return sInfo, nil } @@ -238,6 +200,14 @@ func (sm *StorageMinerAPI) SectorsStatus(ctx context.Context, sid abi.SectorNumb return sInfo, nil } +func (sm *StorageMinerAPI) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r sto.Data, d api.PieceDealInfo) (api.SectorOffset, error) { + return sm.Miner.SectorAddPieceToAny(ctx, size, r, d) +} + +func (sm *StorageMinerAPI) SectorsUnsealPiece(ctx context.Context, sector sto.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd *cid.Cid) error { + return sm.StorageMgr.SectorsUnsealPiece(ctx, sector, offset, size, randomness, commd) +} + // List all staged sectors func (sm *StorageMinerAPI) SectorsList(context.Context) ([]abi.SectorNumber, error) { sectors, err := sm.Miner.ListSectors() @@ -300,7 +270,17 @@ func (sm *StorageMinerAPI) SectorsSummary(ctx context.Context) (map[api.SectorSt } func (sm *StorageMinerAPI) StorageLocal(ctx context.Context) (map[stores.ID]string, error) { - return sm.StorageMgr.StorageLocal(ctx) + l, err := sm.LocalStore.Local(ctx) + if err != nil { + return nil, err + } + + out := map[stores.ID]string{} + for _, st := range l { + out[st.ID] = st.LocalPath + } + + return out, nil } func (sm *StorageMinerAPI) SectorsRefs(context.Context) (map[string][]api.SealedRef, error) { @@ -320,7 +300,7 @@ func (sm *StorageMinerAPI) SectorsRefs(context.Context) (map[string][]api.Sealed } func (sm *StorageMinerAPI) StorageStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error) { - return sm.StorageMgr.FsStat(ctx, id) + return sm.RemoteStore.FsStat(ctx, id) } func (sm *StorageMinerAPI) SectorStartSealing(ctx context.Context, number abi.SectorNumber) error { @@ -454,6 +434,11 @@ func (sm *StorageMinerAPI) MarketListRetrievalDeals(ctx context.Context) ([]retr deals := sm.RetrievalProvider.ListDeals() for _, deal := range deals { + if deal.ChannelID != nil { + if deal.ChannelID.Initiator == "" || deal.ChannelID.Responder == "" { + deal.ChannelID = nil // don't try to push unparsable peer IDs over jsonrpc + } + } out = append(out, deal) } @@ -683,7 +668,7 @@ func (sm *StorageMinerAPI) CheckProvable(ctx context.Context, pp abi.RegisteredP var rg storiface.RGetter if expensive { rg = func(ctx context.Context, id abi.SectorID) (cid.Cid, error) { - si, err := sm.Miner.GetSectorInfo(id.Number) + si, err := sm.Miner.SectorsStatus(ctx, id.Number, false) if err != nil { return cid.Undef, err } @@ -720,4 +705,8 @@ func (sm *StorageMinerAPI) ComputeProof(ctx context.Context, ssi []builtin.Secto return sm.Epp.ComputeProof(ctx, ssi, rand) } +func (sm *StorageMinerAPI) RuntimeSubsystems(context.Context) (res api.MinerSubsystems, err error) { + return sm.EnabledSubsystems, nil +} + var _ api.StorageMiner = &StorageMinerAPI{} diff --git a/node/modules/blockstore.go b/node/modules/blockstore.go index 787d782b7ea..2486b9744d5 100644 --- a/node/modules/blockstore.go +++ b/node/modules/blockstore.go @@ -37,6 +37,10 @@ func UniversalBlockstore(lc fx.Lifecycle, mctx helpers.MetricsCtx, r repo.Locked return bs, err } +func DiscardColdBlockstore(lc fx.Lifecycle, bs dtypes.UniversalBlockstore) (dtypes.ColdBlockstore, error) { + return blockstore.NewDiscardStore(bs), nil +} + func BadgerHotBlockstore(lc fx.Lifecycle, r repo.LockedRepo) (dtypes.HotBlockstore, error) { path, err := r.SplitstorePath() if err != nil { @@ -66,19 +70,18 @@ func BadgerHotBlockstore(lc fx.Lifecycle, r repo.LockedRepo) (dtypes.HotBlocksto return bs, nil } -func SplitBlockstore(cfg *config.Chainstore) func(lc fx.Lifecycle, r repo.LockedRepo, ds dtypes.MetadataDS, cold dtypes.UniversalBlockstore, hot dtypes.HotBlockstore) (dtypes.SplitBlockstore, error) { - return func(lc fx.Lifecycle, r repo.LockedRepo, ds dtypes.MetadataDS, cold dtypes.UniversalBlockstore, hot dtypes.HotBlockstore) (dtypes.SplitBlockstore, error) { +func SplitBlockstore(cfg *config.Chainstore) func(lc fx.Lifecycle, r repo.LockedRepo, ds dtypes.MetadataDS, cold dtypes.ColdBlockstore, hot dtypes.HotBlockstore) (dtypes.SplitBlockstore, error) { + return func(lc fx.Lifecycle, r repo.LockedRepo, ds dtypes.MetadataDS, cold dtypes.ColdBlockstore, hot dtypes.HotBlockstore) (dtypes.SplitBlockstore, error) { path, err := r.SplitstorePath() if err != nil { return nil, err } cfg := &splitstore.Config{ - TrackingStoreType: cfg.Splitstore.TrackingStoreType, - MarkSetType: cfg.Splitstore.MarkSetType, - EnableFullCompaction: cfg.Splitstore.EnableFullCompaction, - EnableGC: cfg.Splitstore.EnableGC, - Archival: cfg.Splitstore.Archival, + MarkSetType: cfg.Splitstore.MarkSetType, + DiscardColdBlocks: cfg.Splitstore.ColdStoreType == "discard", + HotStoreMessageRetention: cfg.Splitstore.HotStoreMessageRetention, + HotStoreFullGCFrequency: cfg.Splitstore.HotStoreFullGCFrequency, } ss, err := splitstore.Open(path, ds, hot, cold, cfg) if err != nil { @@ -94,6 +97,18 @@ func SplitBlockstore(cfg *config.Chainstore) func(lc fx.Lifecycle, r repo.Locked } } +func SplitBlockstoreGCReferenceProtector(_ fx.Lifecycle, s dtypes.SplitBlockstore) dtypes.GCReferenceProtector { + return s.(dtypes.GCReferenceProtector) +} + +func NoopGCReferenceProtector(_ fx.Lifecycle) dtypes.GCReferenceProtector { + return dtypes.NoopGCReferenceProtector{} +} + +func ExposedSplitBlockstore(_ fx.Lifecycle, s dtypes.SplitBlockstore) dtypes.ExposedBlockstore { + return s.(*splitstore.SplitStore).Expose() +} + func StateFlatBlockstore(_ fx.Lifecycle, _ helpers.MetricsCtx, bs dtypes.UniversalBlockstore) (dtypes.BasicStateBlockstore, error) { return bs, nil } diff --git a/node/modules/chain.go b/node/modules/chain.go index 95432294884..c4017b8c0bf 100644 --- a/node/modules/chain.go +++ b/node/modules/chain.go @@ -58,7 +58,7 @@ func ChainBlockService(bs dtypes.ExposedBlockstore, rem dtypes.ChainBitswap) dty return blockservice.New(bs, rem) } -func MessagePool(lc fx.Lifecycle, mpp messagepool.Provider, ds dtypes.MetadataDS, nn dtypes.NetworkName, j journal.Journal) (*messagepool.MessagePool, error) { +func MessagePool(lc fx.Lifecycle, mpp messagepool.Provider, ds dtypes.MetadataDS, nn dtypes.NetworkName, j journal.Journal, protector dtypes.GCReferenceProtector) (*messagepool.MessagePool, error) { mp, err := messagepool.New(mpp, ds, nn, j) if err != nil { return nil, xerrors.Errorf("constructing mpool: %w", err) @@ -68,6 +68,7 @@ func MessagePool(lc fx.Lifecycle, mpp messagepool.Provider, ds dtypes.MetadataDS return mp.Close() }, }) + protector.AddProtector(mp.ForEachPendingMessage) return mp, nil } diff --git a/node/modules/dtypes/protector.go b/node/modules/dtypes/protector.go new file mode 100644 index 00000000000..0d9625fc1cd --- /dev/null +++ b/node/modules/dtypes/protector.go @@ -0,0 +1,13 @@ +package dtypes + +import ( + cid "github.com/ipfs/go-cid" +) + +type GCReferenceProtector interface { + AddProtector(func(func(cid.Cid) error) error) +} + +type NoopGCReferenceProtector struct{} + +func (p NoopGCReferenceProtector) AddProtector(func(func(cid.Cid) error) error) {} diff --git a/node/modules/dtypes/storage.go b/node/modules/dtypes/storage.go index e35d02811a7..b4420f701f8 100644 --- a/node/modules/dtypes/storage.go +++ b/node/modules/dtypes/storage.go @@ -24,9 +24,12 @@ import ( type MetadataDS datastore.Batching type ( - // UniversalBlockstore is the cold blockstore. + // UniversalBlockstore is the universal blockstore backend. UniversalBlockstore blockstore.Blockstore + // ColdBlockstore is the Cold blockstore abstraction for the splitstore + ColdBlockstore blockstore.Blockstore + // HotBlockstore is the Hot blockstore abstraction for the splitstore HotBlockstore blockstore.Blockstore @@ -83,6 +86,7 @@ type ClientDataTransfer datatransfer.Manager type ProviderDealStore *statestore.StateStore type ProviderPieceStore piecestore.PieceStore + type ProviderRequestValidator *requestvalidation.UnifiedRequestValidator // ProviderDataTransfer is a data transfer manager for the provider diff --git a/node/modules/storageminer.go b/node/modules/storageminer.go index 8508850d3e9..5497eab5813 100644 --- a/node/modules/storageminer.go +++ b/node/modules/storageminer.go @@ -8,6 +8,7 @@ import ( "net/http" "os" "path/filepath" + "strings" "time" "github.com/filecoin-project/lotus/markets/pricing" @@ -44,7 +45,7 @@ import ( smnet "github.com/filecoin-project/go-fil-markets/storagemarket/network" "github.com/filecoin-project/go-jsonrpc/auth" "github.com/filecoin-project/go-multistore" - paramfetch "github.com/filecoin-project/go-paramfetch" + "github.com/filecoin-project/go-paramfetch" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-statestore" "github.com/filecoin-project/go-storedcounter" @@ -67,7 +68,6 @@ import ( "github.com/filecoin-project/lotus/journal" "github.com/filecoin-project/lotus/markets" marketevents "github.com/filecoin-project/lotus/markets/loggers" - "github.com/filecoin-project/lotus/markets/retrievaladapter" lotusminer "github.com/filecoin-project/lotus/miner" "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/modules/dtypes" @@ -188,6 +188,15 @@ func AddressSelector(addrConf *config.MinerAddressConfig) func() (*storage.Addre as.TerminateControl = append(as.TerminateControl, addr) } + for _, s := range addrConf.DealPublishControl { + addr, err := address.NewFromString(s) + if err != nil { + return nil, xerrors.Errorf("parsing deal publishing control address: %w", err) + } + + as.DealPublishControl = append(as.DealPublishControl, addr) + } + return as, nil } } @@ -198,7 +207,6 @@ type StorageMinerParams struct { Lifecycle fx.Lifecycle MetricsCtx helpers.MetricsCtx API v1api.FullNode - Host host.Host MetadataDS dtypes.MetadataDS Sealer sectorstorage.SectorManager SectorIDCounter sealing.SectorIDCounter @@ -217,7 +225,6 @@ func StorageMiner(fc config.MinerFeeConfig) func(params StorageMinerParams) (*st lc = params.Lifecycle api = params.API sealer = params.Sealer - h = params.Host sc = params.SectorIDCounter verif = params.Verifier prover = params.Prover @@ -238,7 +245,7 @@ func StorageMiner(fc config.MinerFeeConfig) func(params StorageMinerParams) (*st return nil, err } - sm, err := storage.NewMiner(api, maddr, h, ds, sealer, sc, verif, prover, gsd, fc, j, as) + sm, err := storage.NewMiner(api, maddr, ds, sealer, sc, verif, prover, gsd, fc, j, as) if err != nil { return nil, err } @@ -643,6 +650,10 @@ func RetrievalDealFilter(userFilter dtypes.RetrievalDealFilter) func(onlineOk dt } } +func RetrievalNetwork(h host.Host) rmnet.RetrievalMarketNetwork { + return rmnet.NewFromLibp2pHost(h) +} + // RetrievalPricingFunc configures the pricing function to use for retrieval deals. func RetrievalPricingFunc(cfg config.DealmakingConfig) func(_ dtypes.ConsiderOnlineRetrievalDealsConfigFunc, _ dtypes.ConsiderOfflineRetrievalDealsConfigFunc) dtypes.RetrievalPricingFunc { @@ -658,35 +669,26 @@ func RetrievalPricingFunc(cfg config.DealmakingConfig) func(_ dtypes.ConsiderOnl } // RetrievalProvider creates a new retrieval provider attached to the provider blockstore -func RetrievalProvider(h host.Host, - miner *storage.Miner, - full v1api.FullNode, +func RetrievalProvider( + maddr dtypes.MinerAddress, + adapter retrievalmarket.RetrievalProviderNode, + netwk rmnet.RetrievalMarketNetwork, ds dtypes.MetadataDS, pieceStore dtypes.ProviderPieceStore, mds dtypes.StagingMultiDstore, dt dtypes.ProviderDataTransfer, - pieceProvider sectorstorage.PieceProvider, pricingFnc dtypes.RetrievalPricingFunc, userFilter dtypes.RetrievalDealFilter, ) (retrievalmarket.RetrievalProvider, error) { - adapter := retrievaladapter.NewRetrievalProviderNode(miner, pieceProvider, full) - - maddr, err := minerAddrFromDS(ds) - if err != nil { - return nil, err - } - - netwk := rmnet.NewFromLibp2pHost(h) opt := retrievalimpl.DealDeciderOpt(retrievalimpl.DealDecider(userFilter)) - - return retrievalimpl.NewProvider(maddr, adapter, netwk, pieceStore, mds, dt, namespace.Wrap(ds, datastore.NewKey("/retrievals/provider")), + return retrievalimpl.NewProvider(address.Address(maddr), adapter, netwk, pieceStore, mds, dt, namespace.Wrap(ds, datastore.NewKey("/retrievals/provider")), retrievalimpl.RetrievalPricingFunc(pricingFnc), opt) } var WorkerCallsPrefix = datastore.NewKey("/worker/calls") var ManagerWorkPrefix = datastore.NewKey("/stmgr/calls") -func LocalStorage(mctx helpers.MetricsCtx, lc fx.Lifecycle, ls stores.LocalStorage, si stores.SectorIndex, urls sectorstorage.URLs) (*stores.Local, error) { +func LocalStorage(mctx helpers.MetricsCtx, lc fx.Lifecycle, ls stores.LocalStorage, si stores.SectorIndex, urls stores.URLs) (*stores.Local, error) { ctx := helpers.LifecycleCtx(mctx, lc) return stores.NewLocal(ctx, ls, si, urls) } @@ -724,6 +726,18 @@ func StorageAuth(ctx helpers.MetricsCtx, ca v0api.Common) (sectorstorage.Storage return sectorstorage.StorageAuth(headers), nil } +func StorageAuthWithURL(apiInfo string) func(ctx helpers.MetricsCtx, ca v0api.Common) (sectorstorage.StorageAuth, error) { + return func(ctx helpers.MetricsCtx, ca v0api.Common) (sectorstorage.StorageAuth, error) { + s := strings.Split(apiInfo, ":") + if len(s) != 2 { + return nil, errors.New("unexpected format of `apiInfo`") + } + headers := http.Header{} + headers.Add("Authorization", "Bearer "+s[0]) + return sectorstorage.StorageAuth(headers), nil + } +} + func NewConsiderOnlineStorageDealsConfigFunc(r repo.LockedRepo) (dtypes.ConsiderOnlineStorageDealsConfigFunc, error) { return func() (out bool, err error) { err = readCfg(r, func(cfg *config.StorageMiner) { @@ -861,6 +875,10 @@ func NewSetSealConfigFunc(r repo.LockedRepo) (dtypes.SetSealingConfigFunc, error AlwaysKeepUnsealedCopy: cfg.AlwaysKeepUnsealedCopy, FinalizeEarly: cfg.FinalizeEarly, + CollateralFromMinerBalance: cfg.CollateralFromMinerBalance, + AvailableBalanceBuffer: types.FIL(cfg.AvailableBalanceBuffer), + DisableCollateralFallback: cfg.DisableCollateralFallback, + BatchPreCommits: cfg.BatchPreCommits, MaxPreCommitBatch: cfg.MaxPreCommitBatch, PreCommitBatchWait: config.Duration(cfg.PreCommitBatchWait), @@ -891,6 +909,10 @@ func ToSealingConfig(cfg *config.StorageMiner) sealiface.Config { AlwaysKeepUnsealedCopy: cfg.Sealing.AlwaysKeepUnsealedCopy, FinalizeEarly: cfg.Sealing.FinalizeEarly, + CollateralFromMinerBalance: cfg.Sealing.CollateralFromMinerBalance, + AvailableBalanceBuffer: types.BigInt(cfg.Sealing.AvailableBalanceBuffer), + DisableCollateralFallback: cfg.Sealing.DisableCollateralFallback, + BatchPreCommits: cfg.Sealing.BatchPreCommits, MaxPreCommitBatch: cfg.Sealing.MaxPreCommitBatch, PreCommitBatchWait: time.Duration(cfg.Sealing.PreCommitBatchWait), @@ -985,3 +1007,19 @@ func mutateCfg(r repo.LockedRepo, mutator func(*config.StorageMiner)) error { return multierr.Combine(typeErr, setConfigErr) } + +func ExtractEnabledMinerSubsystems(cfg config.MinerSubsystemConfig) (res api.MinerSubsystems) { + if cfg.EnableMining { + res = append(res, api.SubsystemMining) + } + if cfg.EnableSealing { + res = append(res, api.SubsystemSealing) + } + if cfg.EnableSectorStorage { + res = append(res, api.SubsystemSectorStorage) + } + if cfg.EnableMarkets { + res = append(res, api.SubsystemMarkets) + } + return res +} diff --git a/node/modules/storageminer_svc.go b/node/modules/storageminer_svc.go new file mode 100644 index 00000000000..0a4be219212 --- /dev/null +++ b/node/modules/storageminer_svc.go @@ -0,0 +1,71 @@ +package modules + +import ( + "context" + + "github.com/filecoin-project/lotus/storage/sectorblocks" + + "go.uber.org/fx" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/client" + cliutil "github.com/filecoin-project/lotus/cli/util" + "github.com/filecoin-project/lotus/node/modules/helpers" +) + +type MinerSealingService api.StorageMiner +type MinerStorageService api.StorageMiner + +var _ sectorblocks.SectorBuilder = *new(MinerSealingService) + +func connectMinerService(apiInfo string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (api.StorageMiner, error) { + return func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (api.StorageMiner, error) { + ctx := helpers.LifecycleCtx(mctx, lc) + info := cliutil.ParseApiInfo(apiInfo) + addr, err := info.DialArgs("v0") + if err != nil { + return nil, xerrors.Errorf("could not get DialArgs: %w", err) + } + + log.Infof("Checking (svc) api version of %s", addr) + + mapi, closer, err := client.NewStorageMinerRPCV0(ctx, addr, info.AuthHeader()) + if err != nil { + return nil, err + } + lc.Append(fx.Hook{ + OnStart: func(ctx context.Context) error { + v, err := mapi.Version(ctx) + if err != nil { + return xerrors.Errorf("checking version: %w", err) + } + + if !v.APIVersion.EqMajorMinor(api.MinerAPIVersion0) { + return xerrors.Errorf("remote service API version didn't match (expected %s, remote %s)", api.MinerAPIVersion0, v.APIVersion) + } + + return nil + }, + OnStop: func(context.Context) error { + closer() + return nil + }}) + + return mapi, nil + } +} + +func ConnectSealingService(apiInfo string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (MinerSealingService, error) { + return func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (MinerSealingService, error) { + log.Info("Connecting sealing service to miner") + return connectMinerService(apiInfo)(mctx, lc) + } +} + +func ConnectStorageService(apiInfo string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (MinerStorageService, error) { + return func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (MinerStorageService, error) { + log.Info("Connecting storage service to miner") + return connectMinerService(apiInfo)(mctx, lc) + } +} diff --git a/node/repo/fsrepo.go b/node/repo/fsrepo.go index a40ae62d069..5c1c91bc559 100644 --- a/node/repo/fsrepo.go +++ b/node/repo/fsrepo.go @@ -49,13 +49,31 @@ const ( StorageMiner Worker Wallet + Markets ) +func (t RepoType) String() string { + s := [...]string{ + "__invalid__", + "FullNode", + "StorageMiner", + "Worker", + "Wallet", + "Markets", + } + if t < 0 || int(t) > len(s) { + return "__invalid__" + } + return s[t] +} + func defConfForType(t RepoType) interface{} { switch t { case FullNode: return config.DefaultFullNode() - case StorageMiner: + case StorageMiner, Markets: + // markets is a specialised miner service + // this taxonomy needs to be cleaned up return config.DefaultStorageMiner() case Worker: return &struct{}{} @@ -327,6 +345,21 @@ func (fsr *fsLockedRepo) Blockstore(ctx context.Context, domain BlockstoreDomain return } + // + // Tri-state environment variable LOTUS_CHAIN_BADGERSTORE_DISABLE_FSYNC + // - unset == the default (currently fsync enabled) + // - set with a false-y value == fsync enabled no matter what a future default is + // - set with any other value == fsync is disabled ignored defaults (recommended for day-to-day use) + // + if nosyncBs, nosyncBsSet := os.LookupEnv("LOTUS_CHAIN_BADGERSTORE_DISABLE_FSYNC"); nosyncBsSet { + nosyncBs = strings.ToLower(nosyncBs) + if nosyncBs == "" || nosyncBs == "0" || nosyncBs == "false" || nosyncBs == "no" { + opts.SyncWrites = true + } else { + opts.SyncWrites = false + } + } + bs, err := badgerbs.Open(opts) if err != nil { fsr.bsErr = err diff --git a/node/rpc.go b/node/rpc.go index 9b84792bb55..b283f6ac10a 100644 --- a/node/rpc.go +++ b/node/rpc.go @@ -23,6 +23,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/api/v1api" + "github.com/filecoin-project/lotus/lib/rpcenc" "github.com/filecoin-project/lotus/metrics" "github.com/filecoin-project/lotus/node/impl" ) @@ -117,18 +118,20 @@ func MinerHandler(a api.StorageMiner, permissioned bool) (http.Handler, error) { mapi = api.PermissionedStorMinerAPI(mapi) } - rpcServer := jsonrpc.NewServer() + readerHandler, readerServerOpt := rpcenc.ReaderParamDecoder() + rpcServer := jsonrpc.NewServer(readerServerOpt) rpcServer.Register("Filecoin", mapi) m.Handle("/rpc/v0", rpcServer) - m.PathPrefix("/remote").HandlerFunc(a.(*impl.StorageMinerAPI).ServeRemote) + m.Handle("/rpc/streams/v0/push/{uuid}", readerHandler) + m.PathPrefix("/remote").HandlerFunc(a.(*impl.StorageMinerAPI).ServeRemote(permissioned)) // debugging m.Handle("/debug/metrics", metrics.Exporter()) m.PathPrefix("/").Handler(http.DefaultServeMux) // pprof if !permissioned { - return rpcServer, nil + return m, nil } ah := &auth.Handler{ diff --git a/node/testopts.go b/node/testopts.go index f348fc55510..ca1e8112759 100644 --- a/node/testopts.go +++ b/node/testopts.go @@ -10,8 +10,8 @@ import ( func MockHost(mn mocknet.Mocknet) Option { return Options( - ApplyIf(func(s *Settings) bool { return !s.Online }, - Error(errors.New("MockHost must be specified after Online")), + ApplyIf(func(s *Settings) bool { return !s.Base }, + Error(errors.New("MockHost must be specified after Base")), ), Override(new(lp2p.RawHost), lp2p.MockHost), diff --git a/paychmgr/paych.go b/paychmgr/paych.go index c4ef3deb01a..ed72c35e0b0 100644 --- a/paychmgr/paych.go +++ b/paychmgr/paych.go @@ -88,7 +88,11 @@ func (ca *channelAccessor) messageBuilder(ctx context.Context, from address.Addr return nil, err } - return paych.Message(actors.VersionForNetwork(nwVersion), from), nil + av, err := actors.VersionForNetwork(nwVersion) + if err != nil { + return nil, err + } + return paych.Message(av, from), nil } func (ca *channelAccessor) getChannelInfo(addr address.Address) (*ChannelInfo, error) { diff --git a/scripts/dev/sminer-init b/scripts/dev/sminer-init index 2f4a3f7afa3..767921511c8 100755 --- a/scripts/dev/sminer-init +++ b/scripts/dev/sminer-init @@ -7,4 +7,4 @@ export TRUST_PARAMS=1 tag=${TAG:-debug} go run -tags=$tag ./cmd/lotus wallet import ~/.genesis-sectors/pre-seal-t01000.key -go run -tags=$tag ./cmd/lotus-storage-miner init --actor=t01000 --genesis-miner --pre-sealed-sectors=~/.genesis-sectors --pre-sealed-metadata=~/.genesis-sectors/pre-seal-t01000.json +go run -tags=$tag ./cmd/lotus-miner init --actor=t01000 --genesis-miner --pre-sealed-sectors=~/.genesis-sectors --pre-sealed-metadata=~/.genesis-sectors/pre-seal-t01000.json diff --git a/scripts/docker-lotus-entrypoint.sh b/scripts/docker-lotus-entrypoint.sh new file mode 100755 index 00000000000..308a4b6eb55 --- /dev/null +++ b/scripts/docker-lotus-entrypoint.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +if [ ! -z DOCKER_LOTUS_IMPORT_SNAPSHOT ]; then + GATE="$LOTUS_PATH"/date_initialized + # Don't init if already initialized. + if [ ! -f "$GATE" ]; then + echo importing minimal snapshot + /usr/local/bin/lotus daemon --import-snapshot "$DOCKER_LOTUS_IMPORT_SNAPSHOT" --halt-after-import + # Block future inits + date > "$GATE" + fi +fi + +# import wallet, if provided +if [ ! -z DOCKER_LOTUS_IMPORT_WALLET ]; then + /usr/local/bin/lotus-shed keyinfo import "$DOCKER_LOTUS_IMPORT_WALLET" +fi + +exec /usr/local/bin/lotus $@ diff --git a/scripts/docker-lotus-miner-entrypoint.sh b/scripts/docker-lotus-miner-entrypoint.sh new file mode 100755 index 00000000000..1cb153176f5 --- /dev/null +++ b/scripts/docker-lotus-miner-entrypoint.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +if [ ! -z DOCKER_LOTUS_MINER_INIT ]; then + GATE="$LOTUS_PATH"/date_initialized + + # Don't init if already initialized. + if [ -f "GATE" ]; then + echo lotus-miner already initialized. + exit 0 + fi + + echo starting init + /usr/local/bin/lotus-miner init + + # Block future inits + date > "$GATE" +fi + +exec /usr/local/bin/lotus-miner $@ diff --git a/storage/adapter_storage_miner.go b/storage/adapter_storage_miner.go index 895e7846db3..531fe2d03a4 100644 --- a/storage/adapter_storage_miner.go +++ b/storage/adapter_storage_miner.go @@ -76,6 +76,15 @@ func (s SealingAPIAdapter) StateMinerInfo(ctx context.Context, maddr address.Add return s.delegate.StateMinerInfo(ctx, maddr, tsk) } +func (s SealingAPIAdapter) StateMinerAvailableBalance(ctx context.Context, maddr address.Address, tok sealing.TipSetToken) (big.Int, error) { + tsk, err := types.TipSetKeyFromBytes(tok) + if err != nil { + return big.Zero(), xerrors.Errorf("failed to unmarshal TipSetToken to TipSetKey: %w", err) + } + + return s.delegate.StateMinerAvailableBalance(ctx, maddr, tsk) +} + func (s SealingAPIAdapter) StateMinerWorkerAddress(ctx context.Context, maddr address.Address, tok sealing.TipSetToken) (address.Address, error) { // TODO: update storage-fsm to just StateMinerInfo mi, err := s.StateMinerInfo(ctx, maddr, tok) diff --git a/storage/addresses.go b/storage/addresses.go index a8e5e7101e2..f8f06ed9813 100644 --- a/storage/addresses.go +++ b/storage/addresses.go @@ -5,6 +5,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" @@ -24,6 +25,12 @@ type AddressSelector struct { } func (as *AddressSelector) AddressFor(ctx context.Context, a addrSelectApi, mi miner.MinerInfo, use api.AddrUse, goodFunds, minFunds abi.TokenAmount) (address.Address, abi.TokenAmount, error) { + if as == nil { + // should only happen in some tests + log.Warnw("smart address selection disabled, using worker address") + return mi.Worker, big.Zero(), nil + } + var addrs []address.Address switch use { case api.PreCommitAddr: @@ -32,6 +39,8 @@ func (as *AddressSelector) AddressFor(ctx context.Context, a addrSelectApi, mi m addrs = append(addrs, as.CommitControl...) case api.TerminateSectorsAddr: addrs = append(addrs, as.TerminateControl...) + case api.DealPublishAddr: + addrs = append(addrs, as.DealPublishControl...) default: defaultCtl := map[address.Address]struct{}{} for _, a := range mi.ControlAddresses { @@ -43,6 +52,7 @@ func (as *AddressSelector) AddressFor(ctx context.Context, a addrSelectApi, mi m configCtl := append([]address.Address{}, as.PreCommitControl...) configCtl = append(configCtl, as.CommitControl...) configCtl = append(configCtl, as.TerminateControl...) + configCtl = append(configCtl, as.DealPublishControl...) for _, addr := range configCtl { if addr.Protocol() != address.ID { diff --git a/storage/miner.go b/storage/miner.go index 3d29e0ef11e..cdacc273492 100644 --- a/storage/miner.go +++ b/storage/miner.go @@ -6,11 +6,9 @@ import ( "time" "github.com/filecoin-project/go-bitfield" - "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" logging "github.com/ipfs/go-log/v2" - "github.com/libp2p/go-libp2p-core/host" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" @@ -52,7 +50,6 @@ var log = logging.Logger("storageminer") type Miner struct { api fullNodeFilteredAPI feeCfg config.MinerFeeConfig - h host.Host sealer sectorstorage.SectorManager ds datastore.Batching sc sealing.SectorIDCounter @@ -89,6 +86,7 @@ type fullNodeFilteredAPI interface { StateSectorGetInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error) StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error) StateMinerInfo(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error) + StateMinerAvailableBalance(ctx context.Context, maddr address.Address, tok types.TipSetKey) (types.BigInt, error) StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) ([]api.Deadline, error) StateMinerPartitions(context.Context, address.Address, uint64, types.TipSetKey) ([]api.Partition, error) StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) @@ -130,7 +128,6 @@ type fullNodeFilteredAPI interface { // NewMiner creates a new Miner object. func NewMiner(api fullNodeFilteredAPI, maddr address.Address, - h host.Host, ds datastore.Batching, sealer sectorstorage.SectorManager, sc sealing.SectorIDCounter, @@ -143,7 +140,6 @@ func NewMiner(api fullNodeFilteredAPI, m := &Miner{ api: api, feeCfg: feeCfg, - h: h, sealer: sealer, ds: ds, sc: sc, diff --git a/storage/miner_sealing.go b/storage/miner_sealing.go index 6a1195826e4..38b24e8c13c 100644 --- a/storage/miner_sealing.go +++ b/storage/miner_sealing.go @@ -2,16 +2,19 @@ package storage import ( "context" - "io" "github.com/ipfs/go-cid" + "golang.org/x/xerrors" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/specs-storage/storage" + "github.com/filecoin-project/lotus/api" sealing "github.com/filecoin-project/lotus/extern/storage-sealing" "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" + "github.com/filecoin-project/lotus/storage/sectorblocks" ) // TODO: refactor this to be direct somehow @@ -20,10 +23,6 @@ func (m *Miner) Address() address.Address { return m.sealing.Address() } -func (m *Miner) AddPieceToAnySector(ctx context.Context, size abi.UnpaddedPieceSize, r io.Reader, d sealing.DealInfo) (abi.SectorNumber, abi.PaddedPieceSize, error) { - return m.sealing.AddPieceToAnySector(ctx, size, r, d) -} - func (m *Miner) StartPackingSector(sectorNum abi.SectorNumber) error { return m.sealing.StartPacking(sectorNum) } @@ -32,10 +31,6 @@ func (m *Miner) ListSectors() ([]sealing.SectorInfo, error) { return m.sealing.ListSectors() } -func (m *Miner) GetSectorInfo(sid abi.SectorNumber) (sealing.SectorInfo, error) { - return m.sealing.GetSectorInfo(sid) -} - func (m *Miner) PledgeSector(ctx context.Context) (storage.SectorRef, error) { return m.sealing.PledgeSector(ctx) } @@ -83,3 +78,73 @@ func (m *Miner) MarkForUpgrade(id abi.SectorNumber) error { func (m *Miner) IsMarkedForUpgrade(id abi.SectorNumber) bool { return m.sealing.IsMarkedForUpgrade(id) } + +func (m *Miner) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storage.Data, d api.PieceDealInfo) (api.SectorOffset, error) { + return m.sealing.SectorAddPieceToAny(ctx, size, r, d) +} + +func (m *Miner) SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) { + if showOnChainInfo { + return api.SectorInfo{}, xerrors.Errorf("on-chain info not supported") + } + + info, err := m.sealing.GetSectorInfo(sid) + if err != nil { + return api.SectorInfo{}, err + } + + deals := make([]abi.DealID, len(info.Pieces)) + for i, piece := range info.Pieces { + if piece.DealInfo == nil { + continue + } + deals[i] = piece.DealInfo.DealID + } + + log := make([]api.SectorLog, len(info.Log)) + for i, l := range info.Log { + log[i] = api.SectorLog{ + Kind: l.Kind, + Timestamp: l.Timestamp, + Trace: l.Trace, + Message: l.Message, + } + } + + sInfo := api.SectorInfo{ + SectorID: sid, + State: api.SectorState(info.State), + CommD: info.CommD, + CommR: info.CommR, + Proof: info.Proof, + Deals: deals, + Ticket: api.SealTicket{ + Value: info.TicketValue, + Epoch: info.TicketEpoch, + }, + Seed: api.SealSeed{ + Value: info.SeedValue, + Epoch: info.SeedEpoch, + }, + PreCommitMsg: info.PreCommitMessage, + CommitMsg: info.CommitMessage, + Retries: info.InvalidProofs, + ToUpgrade: m.IsMarkedForUpgrade(sid), + + LastErr: info.LastErr, + Log: log, + // on chain info + SealProof: info.SectorType, + Activation: 0, + Expiration: 0, + DealWeight: big.Zero(), + VerifiedDealWeight: big.Zero(), + InitialPledge: big.Zero(), + OnTime: 0, + Early: 0, + } + + return sInfo, nil +} + +var _ sectorblocks.SectorBuilder = &Miner{} diff --git a/storage/sectorblocks/blocks.go b/storage/sectorblocks/blocks.go index bc8456a1f28..ad4ffc0db8a 100644 --- a/storage/sectorblocks/blocks.go +++ b/storage/sectorblocks/blocks.go @@ -16,11 +16,10 @@ import ( cborutil "github.com/filecoin-project/go-cbor-util" "github.com/filecoin-project/go-state-types/abi" - sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/storage" ) type SealSerialization uint8 @@ -48,17 +47,22 @@ func DsKeyToDealID(key datastore.Key) (uint64, error) { return dealID, nil } +type SectorBuilder interface { + SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storage.Data, d api.PieceDealInfo) (api.SectorOffset, error) + SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) +} + type SectorBlocks struct { - *storage.Miner + SectorBuilder keys datastore.Batching keyLk sync.Mutex } -func NewSectorBlocks(miner *storage.Miner, ds dtypes.MetadataDS) *SectorBlocks { +func NewSectorBlocks(sb SectorBuilder, ds dtypes.MetadataDS) *SectorBlocks { sbc := &SectorBlocks{ - Miner: miner, - keys: namespace.Wrap(ds, dsPrefix), + SectorBuilder: sb, + keys: namespace.Wrap(ds, dsPrefix), } return sbc @@ -96,19 +100,19 @@ func (st *SectorBlocks) writeRef(dealID abi.DealID, sectorID abi.SectorNumber, o return st.keys.Put(DealIDToDsKey(dealID), newRef) // TODO: batch somehow } -func (st *SectorBlocks) AddPiece(ctx context.Context, size abi.UnpaddedPieceSize, r io.Reader, d sealing.DealInfo) (abi.SectorNumber, abi.PaddedPieceSize, error) { - sn, offset, err := st.Miner.AddPieceToAnySector(ctx, size, r, d) +func (st *SectorBlocks) AddPiece(ctx context.Context, size abi.UnpaddedPieceSize, r io.Reader, d api.PieceDealInfo) (abi.SectorNumber, abi.PaddedPieceSize, error) { + so, err := st.SectorBuilder.SectorAddPieceToAny(ctx, size, r, d) if err != nil { return 0, 0, err } // TODO: DealID has very low finality here - err = st.writeRef(d.DealID, sn, offset, size) + err = st.writeRef(d.DealID, so.Sector, so.Offset, size) if err != nil { return 0, 0, xerrors.Errorf("writeRef: %w", err) } - return sn, offset, nil + return so.Sector, so.Offset, nil } func (st *SectorBlocks) List() (map[uint64][]api.SealedRef, error) { diff --git a/storage/wdpost_run.go b/storage/wdpost_run.go index 0fe7ea85179..20d1eeadc3a 100644 --- a/storage/wdpost_run.go +++ b/storage/wdpost_run.go @@ -659,6 +659,7 @@ func (s *WindowPoStScheduler) runPoStCycle(ctx context.Context, di dline.Info, t if !bytes.Equal(checkRand, rand) { log.Warnw("windowpost randomness changed", "old", rand, "new", checkRand, "ts-height", ts.Height(), "challenge-height", di.Challenge, "tsk", ts.Key()) + rand = checkRand continue } @@ -738,8 +739,12 @@ func (s *WindowPoStScheduler) batchPartitions(partitions []api.Partition, nv net } // Also respect the AddressedPartitionsMax (which is the same as DeclarationsMax (which is all really just MaxPartitionsPerDeadline)) - if partitionsPerMsg > policy.GetDeclarationsMax(nv) { - partitionsPerMsg = policy.GetDeclarationsMax(nv) + declMax, err := policy.GetDeclarationsMax(nv) + if err != nil { + return nil, xerrors.Errorf("getting max declarations: %w", err) + } + if partitionsPerMsg > declMax { + partitionsPerMsg = declMax } // The number of messages will be: diff --git a/testplans/lotus-soup/go.mod b/testplans/lotus-soup/go.mod index 4253a49fb6e..55da298db37 100644 --- a/testplans/lotus-soup/go.mod +++ b/testplans/lotus-soup/go.mod @@ -9,11 +9,11 @@ require ( github.com/drand/drand v1.2.1 github.com/filecoin-project/go-address v0.0.5 github.com/filecoin-project/go-data-transfer v1.6.0 - github.com/filecoin-project/go-fil-markets v1.4.0 + github.com/filecoin-project/go-fil-markets v1.5.0 github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48 github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b - github.com/filecoin-project/lotus v1.10.0-rc3.0.20210616215353-9c7db6d305e3 + github.com/filecoin-project/lotus v1.10.1-0.20210707122128-1fe08f5973f4 github.com/filecoin-project/specs-actors v0.9.14 github.com/google/uuid v1.1.2 github.com/gorilla/mux v1.7.4 diff --git a/testplans/lotus-soup/go.sum b/testplans/lotus-soup/go.sum index e1690a213b2..9969c51824d 100644 --- a/testplans/lotus-soup/go.sum +++ b/testplans/lotus-soup/go.sum @@ -286,18 +286,14 @@ github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod github.com/filecoin-project/go-data-transfer v1.0.1/go.mod h1:UxvfUAY9v3ub0a21BSK9u3pB2aq30Y0KMsG+w9/ysyo= github.com/filecoin-project/go-data-transfer v1.6.0 h1:DHIzEc23ydRCCBwtFet3MfgO8gMpZEnw60Y+s71oX6o= github.com/filecoin-project/go-data-transfer v1.6.0/go.mod h1:E3WW4mCEYwU2y65swPEajSZoFWFmfXt7uwGduoACZQc= -github.com/filecoin-project/go-data-transfer v1.4.3 h1:ECEw69NOfmEZ7XN1NSBvj3KTbbH2mIczQs+Z2w4bD7c= -github.com/filecoin-project/go-data-transfer v1.4.3/go.mod h1:n8kbDQXWrY1c4UgfMa9KERxNCWbOTDwdNhf2MpN9dpo= github.com/filecoin-project/go-ds-versioning v0.1.0 h1:y/X6UksYTsK8TLCI7rttCKEvl8btmWxyFMEeeWGUxIQ= github.com/filecoin-project/go-ds-versioning v0.1.0/go.mod h1:mp16rb4i2QPmxBnmanUx8i/XANp+PFCCJWiAb+VW4/s= github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a h1:hyJ+pUm/4U4RdEZBlg6k8Ma4rDiuvqyGpoICXAxwsTg= github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335/go.mod h1:AJySOJC00JRWEZzRG2KsfUnqEf5ITXxeX09BE9N4f9c= -github.com/filecoin-project/go-fil-markets v1.2.5 h1:bQgtXbwxKyPxSEQoUI5EaTHJ0qfzyd5NosspuADCm6Y= -github.com/filecoin-project/go-fil-markets v1.2.5/go.mod h1:7JIqNBmFvOyBzk/EiPYnweVdQnWhshixb5B9b1653Ag= -github.com/filecoin-project/go-fil-markets v1.4.0 h1:J4L6o+FVOmS7ZWV6wxLPiuoDzGC7iS3S5NRFL1enEr0= -github.com/filecoin-project/go-fil-markets v1.4.0/go.mod h1:7be6zzFwaN8kxVeYZf/UUj/JilHC0ogPvWqE1TW8Ptk= +github.com/filecoin-project/go-fil-markets v1.5.0 h1:3KEs01L8XFCEgujZ6ggFjr1XWjpjTQcmSSeo3I99I0k= +github.com/filecoin-project/go-fil-markets v1.5.0/go.mod h1:7be6zzFwaN8kxVeYZf/UUj/JilHC0ogPvWqE1TW8Ptk= github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM= @@ -327,10 +323,8 @@ github.com/filecoin-project/go-statestore v0.1.1 h1:ufMFq00VqnT2CAuDpcGnwLnCX1I/ github.com/filecoin-project/go-statestore v0.1.1/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI= github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b h1:fkRZSPrYpk42PV3/lIXiL0LHetxde7vyYYvSsttQtfg= github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b/go.mod h1:Q0GQOBtKf1oE10eSXSlhN45kDBdGvEcVOqMiffqX+N8= -github.com/filecoin-project/lotus v1.10.0-rc3.0.20210616215353-9c7db6d305e3 h1:oeVa5wjoNx888oIs83L+LqAG75yqa5DCj94I2dRK+Ms= -github.com/filecoin-project/lotus v1.10.0-rc3.0.20210616215353-9c7db6d305e3/go.mod h1:a4kSO7IY58nxXhc29lpZwgZksbdTQFQ4nhBscFYPAjw= -github.com/filecoin-project/lotus v1.9.0 h1:TDKDLbmgYTL8M0mlfd9HmJVEYRlSSOQnakg4+9rfyWM= -github.com/filecoin-project/lotus v1.9.0/go.mod h1:4YC/8rizrrp2wKOYvHQEjCxZbziXi68BhrzvI+FCye0= +github.com/filecoin-project/lotus v1.10.1-0.20210707122128-1fe08f5973f4 h1:u5/uky+PdeaGuEGsExtVP8UUB8No/e873xjqcb7h3CM= +github.com/filecoin-project/lotus v1.10.1-0.20210707122128-1fe08f5973f4/go.mod h1:8ooe5Rzw80rJL0br81A8NNiwZ4BUVzPRwAnDxUG4E7g= github.com/filecoin-project/specs-actors v0.9.4/go.mod h1:BStZQzx5x7TmCkLv0Bpa07U6cPKol6fd3w9KjMPZ6Z4= github.com/filecoin-project/specs-actors v0.9.12/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= github.com/filecoin-project/specs-actors v0.9.13/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= @@ -338,21 +332,18 @@ github.com/filecoin-project/specs-actors v0.9.14 h1:68PVstg2UB3ZsMLF+DKFTAs/YKsq github.com/filecoin-project/specs-actors v0.9.14/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= github.com/filecoin-project/specs-actors/v2 v2.0.1/go.mod h1:v2NZVYinNIKA9acEMBm5wWXxqv5+frFEbekBFemYghY= github.com/filecoin-project/specs-actors/v2 v2.3.2/go.mod h1:UuJQLoTx/HPvvWeqlIFmC/ywlOLHNe8SNQ3OunFbu2Y= -github.com/filecoin-project/specs-actors/v2 v2.3.5-0.20210114162132-5b58b773f4fb h1:orr/sMzrDZUPAveRE+paBdu1kScIUO5zm+HYeh+VlhA= github.com/filecoin-project/specs-actors/v2 v2.3.5-0.20210114162132-5b58b773f4fb/go.mod h1:LljnY2Mn2homxZsmokJZCpRuhOPxfXhvcek5gWkmqAc= github.com/filecoin-project/specs-actors/v2 v2.3.5 h1:PbT4tPlSXZ8sRgajhb4D8AOEmiaaZ+jg6tc6BBv8VQc= github.com/filecoin-project/specs-actors/v2 v2.3.5/go.mod h1:LljnY2Mn2homxZsmokJZCpRuhOPxfXhvcek5gWkmqAc= -github.com/filecoin-project/specs-actors/v3 v3.1.0 h1:s4qiPw8pgypqBGAy853u/zdZJ7K9cTZdM1rTiSonHrg= github.com/filecoin-project/specs-actors/v3 v3.1.0/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww= github.com/filecoin-project/specs-actors/v3 v3.1.1 h1:BE8fsns1GnEOxt1DTE5LxBK2FThXtWmCChgcJoHTg0E= github.com/filecoin-project/specs-actors/v3 v3.1.1/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww= -github.com/filecoin-project/specs-actors/v4 v4.0.0 h1:vMALksY5G3J5rj3q9rbcyB+f4Tk1xrLqSgdB3jOok4s= github.com/filecoin-project/specs-actors/v4 v4.0.0/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng= github.com/filecoin-project/specs-actors/v4 v4.0.1 h1:AiWrtvJZ63MHGe6rn7tPu4nSUY8bA1KDNszqJaD5+Fg= github.com/filecoin-project/specs-actors/v4 v4.0.1/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng= github.com/filecoin-project/specs-actors/v5 v5.0.0-20210512015452-4fe3889fff57/go.mod h1:283yBMMUSDB2abcjP/hhrwTkhb9h3sfM6KGrep/ZlBI= -github.com/filecoin-project/specs-actors/v5 v5.0.0-20210609212542-73e0409ac77c h1:GnDJ6q3QEm2ytTKjPFQSvczAltgCSb3j9F1FeynwvPA= -github.com/filecoin-project/specs-actors/v5 v5.0.0-20210609212542-73e0409ac77c/go.mod h1:b/btpRl84Q9SeDKlyIoORBQwe2OTmq14POrYrVvBWCM= +github.com/filecoin-project/specs-actors/v5 v5.0.1 h1:PrYm5AKdMlJ/55eRW5laWcnaX66gyyDYBWvH38kNAMo= +github.com/filecoin-project/specs-actors/v5 v5.0.1/go.mod h1:74euMDIXorusOBs/QL/LNkYsXZdDpLJwojWw6T03pdE= github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 h1:Ur/l2+6qN+lQiqjozWWc5p9UDaAMDZKTlDS98oRnlIw= github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g= github.com/filecoin-project/test-vectors/schema v0.0.5/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E= @@ -659,8 +650,6 @@ github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28 github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE= github.com/ipfs/go-graphsync v0.4.2/go.mod h1:/VmbZTUdUMTbNkgzAiCEucIIAU3BkLE2cZrDCVUhyi0= github.com/ipfs/go-graphsync v0.4.3/go.mod h1:mPOwDYv128gf8gxPFgXnz4fNrSYPsWyqisJ7ych+XDY= -github.com/ipfs/go-graphsync v0.6.0 h1:x6UvDUGA7wjaKNqx5Vbo7FGT8aJ5ryYA0dMQ5jN3dF0= -github.com/ipfs/go-graphsync v0.6.0/go.mod h1:e2ZxnClqBBYAtd901g9vXMJzS47labjAtOzsWtOzKNk= github.com/ipfs/go-graphsync v0.6.1/go.mod h1:e2ZxnClqBBYAtd901g9vXMJzS47labjAtOzsWtOzKNk= github.com/ipfs/go-graphsync v0.6.2-0.20210428121800-88edb5462e17 h1:rOoF88dVuDGbIx7idSdimN7JvXriyOIT96WD3eX9sHA= github.com/ipfs/go-graphsync v0.6.2-0.20210428121800-88edb5462e17/go.mod h1:5WyaeigpNdpiYQuW2vwpuecOoEfB4h747ZGEOKmAGTg= @@ -744,6 +733,7 @@ github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Ax github.com/ipfs/go-merkledag v0.0.3/go.mod h1:Oc5kIXLHokkE1hWGMBHw+oxehkAaTOqtEb7Zbh6BhLA= github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKys/4GQQfto= github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= +github.com/ipfs/go-merkledag v0.2.4/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= github.com/ipfs/go-merkledag v0.3.1/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= github.com/ipfs/go-merkledag v0.3.2 h1:MRqj40QkrWkvPswXs4EfSslhZ4RVPRbxwX11js0t1xY= github.com/ipfs/go-merkledag v0.3.2/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= @@ -761,6 +751,7 @@ github.com/ipfs/go-peertaskqueue v0.2.0/go.mod h1:5/eNrBEbtSKWCG+kQK8K8fGNixoYUn github.com/ipfs/go-todocounter v0.0.1/go.mod h1:l5aErvQc8qKE2r7NDMjmq5UNAvuZy0rC8BHOplkWvZ4= github.com/ipfs/go-unixfs v0.0.4/go.mod h1:eIo/p9ADu/MFOuyxzwU+Th8D6xoxU//r590vUpWyfz8= github.com/ipfs/go-unixfs v0.2.1/go.mod h1:IwAAgul1UQIcNZzKPYZWOCijryFBeCV79cNubPzol+k= +github.com/ipfs/go-unixfs v0.2.2-0.20190827150610-868af2e9e5cb/go.mod h1:IwAAgul1UQIcNZzKPYZWOCijryFBeCV79cNubPzol+k= github.com/ipfs/go-unixfs v0.2.4 h1:6NwppOXefWIyysZ4LR/qUBPvXd5//8J3jiMdvpbw6Lo= github.com/ipfs/go-unixfs v0.2.4/go.mod h1:SUdisfUjNoSDzzhGVxvCL9QO/nKdwXdr+gbMUdqcbYw= github.com/ipfs/go-verifcid v0.0.1 h1:m2HI7zIuR5TFyQ1b79Da5N9dnnCP1vcu2QqawmWlK2E= @@ -771,13 +762,16 @@ github.com/ipfs/iptb v1.4.0 h1:YFYTrCkLMRwk/35IMyC6+yjoQSHTEcNcefBStLJzgvo= github.com/ipfs/iptb v1.4.0/go.mod h1:1rzHpCYtNp87/+hTxG5TfCVn/yMY3dKnLn8tBiMfdmg= github.com/ipfs/iptb-plugins v0.2.1 h1:au4HWn9/pRPbkxA08pDx2oRAs4cnbgQWgV0teYXuuGA= github.com/ipfs/iptb-plugins v0.2.1/go.mod h1:QXMbtIWZ+jRsW8a4h13qAKU7jcM7qaittO8wOsTP0Rs= +github.com/ipld/go-car v0.1.0/go.mod h1:RCWzaUh2i4mOEkB3W45Vc+9jnS/M6Qay5ooytiBHl3g= github.com/ipld/go-car v0.1.1-0.20200923150018-8cdef32e2da4/go.mod h1:xrMEcuSq+D1vEwl+YAXsg/JfA98XGpXDwnkIL4Aimqw= github.com/ipld/go-car v0.1.1-0.20201119040415-11b6074b6d4d h1:iphSzTuPqyDgH7WUVZsdqUnQNzYgIblsVr1zhVNA33U= github.com/ipld/go-car v0.1.1-0.20201119040415-11b6074b6d4d/go.mod h1:2Gys8L8MJ6zkh1gktTSXreY63t4UbyvNp5JaudTyxHQ= +github.com/ipld/go-ipld-prime v0.0.2-0.20191108012745-28a82f04c785/go.mod h1:bDDSvVz7vaK12FNvMeRYnpRFkSUPNQOiCYQezMD/P3w= github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e/go.mod h1:uVIwe/u0H4VdKv3kaN1ck7uCb6yD9cFLS9/ELyXbsw8= github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM= github.com/ipld/go-ipld-prime v0.5.1-0.20201021195245-109253e8a018 h1:RbRHv8epkmvBYA5cGfz68GUSbOgx5j/7ObLIl4Rsif0= github.com/ipld/go-ipld-prime v0.5.1-0.20201021195245-109253e8a018/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM= +github.com/ipld/go-ipld-prime-proto v0.0.0-20191113031812-e32bd156a1e5/go.mod h1:gcvzoEDBjwycpXt3LBE061wT9f46szXGHAmj9uoP6fU= github.com/ipld/go-ipld-prime-proto v0.0.0-20200428191222-c1ffdadc01e1/go.mod h1:OAV6xBmuTLsPZ+epzKkPB1e25FHk/vCtyatkdHcArLs= github.com/ipld/go-ipld-prime-proto v0.0.0-20200922192210-9a2bfd4440a6/go.mod h1:3pHYooM9Ea65jewRwrb2u5uHZCNkNTe9ABsVB+SrkH0= github.com/ipld/go-ipld-prime-proto v0.1.0 h1:j7gjqrfwbT4+gXpHwEx5iMssma3mnctC7YaCimsFP70= @@ -1575,6 +1569,7 @@ github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/ github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25/go.mod h1:lbP8tGiBjZ5YWIc2fzuRpTaz0b/53vT6PEs3QuAWzuU= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= diff --git a/testplans/lotus-soup/testkit/role_bootstrapper.go b/testplans/lotus-soup/testkit/role_bootstrapper.go index 14f74c5edd0..4a6ac56c9c0 100644 --- a/testplans/lotus-soup/testkit/role_bootstrapper.go +++ b/testplans/lotus-soup/testkit/role_bootstrapper.go @@ -120,10 +120,11 @@ func PrepareBootstrapper(t *TestEnvironment) (*Bootstrapper, error) { bootstrapperIP := t.NetClient.MustGetDataNetworkIP().String() n := &LotusNode{} + r := repo.NewMemory(nil) stop, err := node.New(context.Background(), node.FullAPI(&n.FullApi), - node.Online(), - node.Repo(repo.NewMemory(nil)), + node.Base(), + node.Repo(r), node.Override(new(modules.Genesis), modtest.MakeGenesisMem(&genesisBuffer, genesisTemplate)), withApiEndpoint(fmt.Sprintf("/ip4/0.0.0.0/tcp/%s", t.PortNumber("node_rpc", "0"))), withListenAddress(bootstrapperIP), diff --git a/testplans/lotus-soup/testkit/role_client.go b/testplans/lotus-soup/testkit/role_client.go index 9fcd429020f..d18a835d2c4 100644 --- a/testplans/lotus-soup/testkit/role_client.go +++ b/testplans/lotus-soup/testkit/role_client.go @@ -66,7 +66,7 @@ func PrepareClient(t *TestEnvironment) (*LotusClient, error) { n := &LotusNode{} stop, err := node.New(context.Background(), node.FullAPI(&n.FullApi), - node.Online(), + node.Base(), node.Repo(nodeRepo), withApiEndpoint(fmt.Sprintf("/ip4/0.0.0.0/tcp/%s", t.PortNumber("node_rpc", "0"))), withGenesis(genesisMsg.Genesis), diff --git a/testplans/lotus-soup/testkit/role_miner.go b/testplans/lotus-soup/testkit/role_miner.go index a0248cfddb4..52bcfc98b74 100644 --- a/testplans/lotus-soup/testkit/role_miner.go +++ b/testplans/lotus-soup/testkit/role_miner.go @@ -27,6 +27,7 @@ import ( "github.com/filecoin-project/lotus/markets/storageadapter" "github.com/filecoin-project/lotus/miner" "github.com/filecoin-project/lotus/node" + "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/impl" "github.com/filecoin-project/lotus/node/modules" "github.com/filecoin-project/lotus/node/repo" @@ -52,6 +53,7 @@ type LotusMiner struct { NodeRepo repo.Repo FullNetAddrs []peer.AddrInfo GenesisMsg *GenesisMsg + Subsystems config.MinerSubsystemConfig t *TestEnvironment } @@ -141,12 +143,22 @@ func PrepareMiner(t *TestEnvironment) (*LotusMiner, error) { return nil, err } + var subsystems config.MinerSubsystemConfig + { lr, err := minerRepo.Lock(repo.StorageMiner) if err != nil { return nil, err } + c, err := lr.Config() + if err != nil { + return nil, err + } + + cfg := c.(*config.StorageMiner) + subsystems = cfg.Subsystems + ks, err := lr.KeyStore() if err != nil { return nil, err @@ -239,7 +251,7 @@ func PrepareMiner(t *TestEnvironment) (*LotusMiner, error) { stop1, err := node.New(context.Background(), node.FullAPI(&n.FullApi), - node.Online(), + node.Base(), node.Repo(nodeRepo), withGenesis(genesisMsg.Genesis), withApiEndpoint(fmt.Sprintf("/ip4/0.0.0.0/tcp/%s", t.PortNumber("node_rpc", "0"))), @@ -260,8 +272,8 @@ func PrepareMiner(t *TestEnvironment) (*LotusMiner, error) { } minerOpts := []node.Option{ - node.StorageMiner(&n.MinerApi), - node.Online(), + node.StorageMiner(&n.MinerApi, subsystems), + node.Base(), node.Repo(minerRepo), node.Override(new(api.FullNode), n.FullApi), node.Override(new(*storageadapter.DealPublisher), storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{ @@ -416,7 +428,7 @@ func PrepareMiner(t *TestEnvironment) (*LotusMiner, error) { return err.ErrorOrNil() } - m := &LotusMiner{n, minerRepo, nodeRepo, fullNetAddrs, genesisMsg, t} + m := &LotusMiner{n, minerRepo, nodeRepo, fullNetAddrs, genesisMsg, subsystems, t} return m, nil } @@ -443,7 +455,7 @@ func RestoreMiner(t *TestEnvironment, m *LotusMiner) (*LotusMiner, error) { stop1, err := node.New(context.Background(), node.FullAPI(&n.FullApi), - node.Online(), + node.Base(), node.Repo(nodeRepo), //withGenesis(genesisMsg.Genesis), withApiEndpoint(fmt.Sprintf("/ip4/0.0.0.0/tcp/%s", t.PortNumber("node_rpc", "0"))), @@ -457,8 +469,8 @@ func RestoreMiner(t *TestEnvironment, m *LotusMiner) (*LotusMiner, error) { } minerOpts := []node.Option{ - node.StorageMiner(&n.MinerApi), - node.Online(), + node.StorageMiner(&n.MinerApi, m.Subsystems), + node.Base(), node.Repo(minerRepo), node.Override(new(api.FullNode), n.FullApi), withApiEndpoint(fmt.Sprintf("/ip4/0.0.0.0/tcp/%s", t.PortNumber("miner_rpc", "0"))), @@ -501,7 +513,7 @@ func RestoreMiner(t *TestEnvironment, m *LotusMiner) (*LotusMiner, error) { t.RecordMessage("connected to full node of miner %d on %v", i, fullNetAddrs[i]) } - pm := &LotusMiner{n, minerRepo, nodeRepo, fullNetAddrs, genesisMsg, t} + pm := &LotusMiner{n, minerRepo, nodeRepo, fullNetAddrs, genesisMsg, m.Subsystems, t} return pm, err } @@ -600,7 +612,7 @@ func startStorageMinerAPIServer(t *TestEnvironment, repo repo.Repo, minerApi api rpcServer.Register("Filecoin", minerApi) mux.Handle("/rpc/v0", rpcServer) - mux.PathPrefix("/remote").HandlerFunc(minerApi.(*impl.StorageMinerAPI).ServeRemote) + mux.PathPrefix("/remote").HandlerFunc(minerApi.(*impl.StorageMinerAPI).ServeRemote(true)) mux.PathPrefix("/").Handler(http.DefaultServeMux) // pprof exporter, err := prometheus.NewExporter(prometheus.Options{ diff --git a/tools/packer/scripts/butterflynet/lotus-init.sh b/tools/packer/scripts/butterflynet/lotus-init.sh index f7afd4dfa57..cfbf93f786a 100755 --- a/tools/packer/scripts/butterflynet/lotus-init.sh +++ b/tools/packer/scripts/butterflynet/lotus-init.sh @@ -6,7 +6,7 @@ GATE="$LOTUS_PATH"/date_initialized # Don't init if already initialized. -if [ -f "GATE" ]; then +if [ -f "$GATE" ]; then echo lotus already initialized. exit 0 fi diff --git a/tools/packer/scripts/calibrationnet/lotus-init.sh b/tools/packer/scripts/calibrationnet/lotus-init.sh index d68b3357cc8..77260fa29e4 100755 --- a/tools/packer/scripts/calibrationnet/lotus-init.sh +++ b/tools/packer/scripts/calibrationnet/lotus-init.sh @@ -6,7 +6,7 @@ GATE="$LOTUS_PATH"/date_initialized # Don't init if already initialized. -if [ -f "GATE" ]; then +if [ -f "$GATE" ]; then echo lotus already initialized. exit 0 fi diff --git a/tools/packer/scripts/mainnet/lotus-init.sh b/tools/packer/scripts/mainnet/lotus-init.sh index a014f617e23..b2285336522 100755 --- a/tools/packer/scripts/mainnet/lotus-init.sh +++ b/tools/packer/scripts/mainnet/lotus-init.sh @@ -6,7 +6,7 @@ GATE="$LOTUS_PATH"/date_initialized # Don't init if already initialized. -if [ -f "GATE" ]; then +if [ -f "$GATE" ]; then echo lotus already initialized. exit 0 fi diff --git a/tools/packer/scripts/nerpanet/lotus-init.sh b/tools/packer/scripts/nerpanet/lotus-init.sh index 968ae395ca0..a0f19ae925b 100755 --- a/tools/packer/scripts/nerpanet/lotus-init.sh +++ b/tools/packer/scripts/nerpanet/lotus-init.sh @@ -6,7 +6,7 @@ GATE="$LOTUS_PATH"/date_initialized # Don't init if already initialized. -if [ -f "GATE" ]; then +if [ -f "$GATE" ]; then echo lotus already initialized. exit 0 fi