diff --git a/.github/workflows/packager.yml b/.github/workflows/packager.yml new file mode 100644 index 0000000000..7485aca976 --- /dev/null +++ b/.github/workflows/packager.yml @@ -0,0 +1,736 @@ +name: packager + +on: + push: + branches: + - 'main' + paths: + - '**' + tags: + - 'v*.*.*' + - 'v*.*.*-*' + +jobs: + build: + runs-on: ubuntu-18.04 + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + fetch-depth: 0 + - name: Set up Go + uses: actions/setup-go@master + with: + go-version: 1.19 + - name: Adding TAG to ENV + run: echo "GIT_TAG=`echo $(git describe --tags --abbrev=0)`" >> $GITHUB_ENV + + - name: Cleaning repo + run: make clean + - name: Building for amd64 + run: make bor + + - name: Making directory structure + run: mkdir -p packaging/deb/bor/usr/bin + - name: Making directory structure for toml + run: mkdir -p packaging/deb/bor/var/lib/bor + - name: Copying necessary files + run: cp -rp build/bin/bor packaging/deb/bor/usr/bin/ + - name: copying control file + run: cp -rp packaging/templates/package_scripts/control packaging/deb/bor/DEBIAN/control + - name: removing systemd file for binary + run: rm -rf lib/systemd/system/bor.service + + - name: Creating package for binary for bor ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + + - name: Running package build + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + + - name: Removing the bor binary + run: rm -rf packaging/deb/bor/usr/bin/bor + + - name: making directory structure for systemd + run: mkdir -p packaging/deb/bor/lib/systemd/system + - name: Copying systemd file + run: cp -rp packaging/templates/systemd/bor.service packaging/deb/bor/lib/systemd/system/bor.service + + - name: Prepping ${{ env.NETWORK }} ${{ env.NODE }} node for ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + NODE: sentry + NETWORK: mumbai + - name: Putting toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/testnet-v4/sentry/sentry/bor/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/ + env: + ARCH: amd64 + NODE: sentry + NETWORK: mumbai + - name: Copying the preinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst + env: + ARCH: amd64 + NODE: sentry + NETWORK: mumbai + - name: Copying the postinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst + env: + ARCH: amd64 + NODE: sentry + NETWORK: mumbai + - name: Copying the prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm + env: + ARCH: amd64 + NODE: sentry + NETWORK: mumbai + - name: Copying the postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm + env: + ARCH: amd64 + NODE: sentry + NETWORK: mumbai + - name: Copying systemd file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/systemd/bor.service packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/lib/systemd/system/ + env: + ARCH: amd64 + NODE: sentry + NETWORK: mumbai + - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: amd64 + NODE: sentry + NETWORK: mumbai + - name: Running package build for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + NODE: sentry + NETWORK: mumbai + + - name: Setting up ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + NODE: sentry + NETWORK: mainnet + - name: Copying control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: amd64 + NODE: sentry + NETWORK: mainnet + - name: Putting toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/ + env: + ARCH: amd64 + NODE: sentry + NETWORK: mainnet + - name: Copying the preinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst + env: + ARCH: amd64 + NODE: sentry + NETWORK: mainnet + - name: Copying the postinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst + env: + ARCH: amd64 + NODE: sentry + NETWORK: mainnet + - name: Copying the prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm + env: + ARCH: amd64 + NODE: sentry + NETWORK: mainnet + - name: Copying the postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm + env: + ARCH: amd64 + NODE: sentry + NETWORK: mainnet + - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: amd64 + NODE: sentry + NETWORK: mainnet + - name: Building ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + NODE: sentry + NETWORK: mainnet + + - name: Prepping Bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + NODE: validator + NETWORK: mumbai + - name: Copying control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.validator packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: amd64 + NODE: validator + NETWORK: mumbai + - name: Copying Postinstall script for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst + env: + ARCH: amd64 + NODE: validator + NETWORK: mumbai + - name: Copying Prerm script for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm + env: + ARCH: amd64 + NODE: validator + NETWORK: mumbai + - name: Copying Postrm script for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm + env: + ARCH: amd64 + NODE: validator + NETWORK: mumbai + - name: Copying config.toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/testnet-v4/sentry/validator/bor/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/ + env: + ARCH: amd64 + NODE: validator + NETWORK: mumbai + - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: amd64 + NODE: validator + NETWORK: mumbai + - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + NODE: validator + NETWORK: mumbai + + - name: Prepping Bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + NODE: validator + NETWORK: mainnet + - name: Copying control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.validator packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: amd64 + NODE: validator + NETWORK: mainnet + - name: Copying the preinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst + env: + ARCH: amd64 + NODE: validator + NETWORK: mainnet + - name: Copying Postinstall script for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst + env: + ARCH: amd64 + NODE: validator + NETWORK: mainnet + - name: Copying the prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm + env: + ARCH: amd64 + NODE: validator + NETWORK: mainnet + - name: Copying the postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm + env: + ARCH: amd64 + NODE: validator + NETWORK: mainnet + - name: Copying config.toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/mainnet-v1/sentry/validator/bor/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/ + env: + ARCH: amd64 + NODE: validator + NETWORK: mainnet + - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: amd64 + NODE: validator + NETWORK: mainnet + - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + NODE: validator + NETWORK: mainnet + + - name: Creating bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + NODE: archive + NETWORK: mumbai + - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: amd64 + NODE: archive + NETWORK: mumbai + - name: Copying profile preinst file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst + env: + ARCH: amd64 + NODE: archive + NETWORK: mumbai + - name: Copying the profile postinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst + env: + ARCH: amd64 + NODE: archive + NETWORK: mumbai + - name: Copying profile prerm file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm + env: + ARCH: amd64 + NODE: archive + NETWORK: mumbai + - name: Copying profile postrm file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm + env: + ARCH: amd64 + NODE: archive + NETWORK: mumbai + - name: Copying the toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/testnet-v4/archive/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/ + env: + ARCH: amd64 + NODE: archive + NETWORK: mumbai + - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + NODE: archive + NETWORK: mumbai + + - name: Creating bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + NODE: archive + NETWORK: mainnet + - name: Copying control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: amd64 + NODE: archive + NETWORK: mainnet + - name: Copying profile preinst file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst + env: + ARCH: amd64 + NODE: archive + NETWORK: mainnet + - name: Copying the profile postinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst + env: + ARCH: amd64 + NODE: archive + NETWORK: mainnet + - name: Copying profile prerm file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm + env: + ARCH: amd64 + NODE: archive + NETWORK: mainnet + - name: Copying profile postrm file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm + env: + ARCH: amd64 + NODE: archive + NETWORK: mainnet + - name: Copying the toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/mainnet-v1/archive/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/ + env: + ARCH: amd64 + NODE: archive + NETWORK: mainnet + - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + NODE: archive + NETWORK: mainnet + + - name: Cleaning build directory for arm64 build + run: make clean + + - name: Removing systemd file + run: rm -rf packaging/deb/bor/lib/systemd/system/bor.service + + - name: Updating the apt-get + run: sudo apt-get update -y + + - name: Adding requirements for cross compile + run: sudo apt-get install g++-aarch64-linux-gnu gcc-aarch64-linux-gnu + + - name: removing systemd file for binary + run: rm -rf lib/systemd/system/bor.service + + - name: Building bor for arm64 + run: GOARCH=arm64 GOOS=linux CC=aarch64-linux-gnu-gcc CXX=aarch64-linux-gnu-g++ CGO_ENABLED=1 go build -o build/bin/bor ./cmd/cli/main.go + + - name: Copying bor arm64 for use with packaging + run: cp -rp build/bin/bor packaging/deb/bor/usr/bin/ + + - name: Creating package for binary only bor + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + - name: Copying control file + run: cp -rp packaging/templates/package_scripts/control.arm64 packaging/deb/bor-${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: arm64 + - name: Running package build + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + + - name: Removing the bor binary + run: rm -rf packaging/deb/bor/usr/bin/bor + + - name: Copying systemd file + run: cp -rp packaging/templates/systemd/bor.service packaging/deb/bor/lib/systemd/system/bor.service + + - name: Updating the control file to use with the arm64 profile + run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor/DEBIAN/control + + - name: Setting up bor for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + NODE: sentry + NETWORK: mumbai + - name: Copying control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: arm64 + NODE: sentry + NETWORK: mumbai + - name: Putting toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/testnet-v4/sentry/sentry/bor/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/ + env: + ARCH: arm64 + NODE: sentry + NETWORK: mumbai + - name: Copying the preinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst + env: + ARCH: arm64 + NODE: sentry + NETWORK: mumbai + - name: Copying the prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm + env: + ARCH: arm64 + NODE: sentry + NETWORK: mumbai + - name: Copying the postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm + env: + ARCH: arm64 + NODE: sentry + NETWORK: mumbai + - name: Copying the postinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst + env: + ARCH: arm64 + NODE: sentry + NETWORK: mumbai + - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: arm64 + NODE: sentry + NETWORK: mumbai + - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + NODE: sentry + NETWORK: mumbai + + - name: Setting up bor for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + NODE: sentry + NETWORK: mainnet + - name: Copying control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: arm64 + NODE: sentry + NETWORK: mainnet + - name: Putting toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/ + env: + ARCH: arm64 + NODE: sentry + NETWORK: mainnet + - name: Copying the preinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst + env: + ARCH: arm64 + NODE: sentry + NETWORK: mainnet + - name: Copying the prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm + env: + ARCH: arm64 + NODE: sentry + NETWORK: mainnet + - name: Copying the postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm + env: + ARCH: arm64 + NODE: sentry + NETWORK: mainnet + - name: Copying the postinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst + env: + ARCH: arm64 + NODE: sentry + NETWORK: mainnet + - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: arm64 + NODE: sentry + NETWORK: mainnet + - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + NODE: sentry + NETWORK: mainnet + + - name: Prepping Bor for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + NODE: validator + NETWORK: mumbai + - name: Copying control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.validator.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: arm64 + NODE: validator + NETWORK: mumbai + - name: Copying the preinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst + env: + ARCH: arm64 + NODE: validator + NETWORK: mumbai + - name: Copying Postinstall script for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst + env: + ARCH: arm64 + NODE: validator + NETWORK: mumbai + - name: Copying the prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm + env: + ARCH: arm64 + NODE: validator + NETWORK: mumbai + - name: Copying the postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm + env: + ARCH: arm64 + NODE: validator + NETWORK: mumbai + - name: Copying config.toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/testnet-v4/sentry/validator/bor/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/ + env: + ARCH: arm64 + NODE: validator + NETWORK: mumbai + - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: arm64 + NODE: validator + NETWORK: mumbai + - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + NODE: validator + NETWORK: mumbai + + - name: Prepping Bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + NODE: validator + NETWORK: mainnet + - name: Copying control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.validator.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: arm64 + NODE: validator + NETWORK: mainnet + - name: Copying the preinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst + env: + ARCH: arm64 + NODE: validator + NETWORK: mainnet + - name: Copying Postinstall script for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst + env: + ARCH: arm64 + NODE: validator + NETWORK: mainnet + - name: Copying the prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm + env: + ARCH: arm64 + NODE: validator + NETWORK: mainnet + - name: Copying the postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm + env: + ARCH: arm64 + NODE: validator + NETWORK: mainnet + - name: Copying config.toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/mainnet-v1/sentry/validator/bor/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/ + env: + ARCH: arm64 + NODE: validator + NETWORK: mainnet + - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: arm64 + NODE: validator + NETWORK: mainnet + - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + NODE: validator + NETWORK: mainnet + + - name: Updating the control file to use with the arm64 profile + run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor/DEBIAN/control + + - name: Creating bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + NODE: archive + NETWORK: mumbai + - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: arm64 + NODE: archive + NETWORK: mumbai + - name: Copying over profile postinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst + env: + ARCH: arm64 + NODE: archive + NETWORK: mumbai + - name: Copying prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm + env: + ARCH: arm64 + NODE: archive + NETWORK: mumbai + - name: Copying postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm + env: + ARCH: arm64 + NODE: archive + NETWORK: mumbai + - name: Copying the toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/testnet-v4/archive/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/ + env: + ARCH: arm64 + NODE: archive + NETWORK: mumbai + - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + NODE: archive + NETWORK: mumbai + + - name: Creating bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + NODE: archive + NETWORK: mainnet + - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: arm64 + NODE: archive + NETWORK: mainnet + - name: Copying over profile postinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst + env: + ARCH: arm64 + NODE: archive + NETWORK: mainnet + - name: Copying prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm + env: + ARCH: arm64 + NODE: archive + NETWORK: mainnet + - name: Copying postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm + env: + ARCH: arm64 + NODE: archive + NETWORK: mainnet + - name: Copying the toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/mainnet-v1/archive/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/ + env: + ARCH: arm64 + NODE: archive + NETWORK: mainnet + - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + NODE: archive + NETWORK: mainnet + + - name: Confirming package built + run: ls -ltr packaging/deb/ | grep bor + + - name: Release bor Packages + uses: softprops/action-gh-release@v1 + with: + tag_name: ${{ env.GIT_TAG }} + prerelease: true + files: | + packaging/deb/bor**.deb + binary/bo** diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index b615cf639e..2ceda3d2ee 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -21,7 +21,7 @@ jobs: - name: Set up Go uses: actions/setup-go@master with: - go-version: 1.17.x + go-version: 1.19.x - name: Prepare id: prepare @@ -29,6 +29,12 @@ jobs: TAG=${GITHUB_REF#refs/tags/} echo ::set-output name=tag_name::${TAG} + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB }} + password: ${{ secrets.DOCKERHUB_KEY }} + - name: Set up QEMU uses: docker/setup-qemu-action@v1 @@ -39,5 +45,3 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} VERSION: ${{ steps.prepare.outputs.tag_name }} SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} - DOCKER_USERNAME: ${{ secrets.DOCKERHUB }} - DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_KEY }} diff --git a/.goreleaser.yml b/.goreleaser.yml index acafc4abc0..6f770ba739 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -1,7 +1,7 @@ project_name: bor release: - disable: false + disable: true draft: true prerelease: auto diff --git a/Dockerfile b/Dockerfile index 7a2770ce9a..6c65faf12d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,17 +1,17 @@ FROM golang:latest -ARG BOR_DIR=/bor +ARG BOR_DIR=/var/lib/bor ENV BOR_DIR=$BOR_DIR RUN apt-get update -y && apt-get upgrade -y \ && apt install build-essential git -y \ - && mkdir -p /bor + && mkdir -p ${BOR_DIR} WORKDIR ${BOR_DIR} COPY . . RUN make bor -RUN cp build/bin/bor /usr/local/bin/ +RUN cp build/bin/bor /usr/bin/ ENV SHELL /bin/bash EXPOSE 8545 8546 8547 30303 30303/udp diff --git a/Dockerfile.alltools b/Dockerfile.alltools index a3f36d4a04..1c4437e251 100644 --- a/Dockerfile.alltools +++ b/Dockerfile.alltools @@ -13,6 +13,6 @@ RUN set -x \ && apk add --update --no-cache \ ca-certificates \ && rm -rf /var/cache/apk/* -COPY --from=builder /bor/build/bin/* /usr/local/bin/ +COPY --from=builder /bor/build/bin/* /usr/bin/ EXPOSE 8545 8546 30303 30303/udp diff --git a/Dockerfile.release b/Dockerfile.release index 66dd589e82..2a026566d7 100644 --- a/Dockerfile.release +++ b/Dockerfile.release @@ -1,10 +1,15 @@ FROM alpine:3.14 +ARG BOR_DIR=/var/lib/bor +ENV BOR_DIR=$BOR_DIR + RUN apk add --no-cache ca-certificates && \ - mkdir -p /etc/bor -COPY bor /usr/local/bin/ -COPY builder/files/genesis-mainnet-v1.json /etc/bor/ -COPY builder/files/genesis-testnet-v4.json /etc/bor/ + mkdir -p ${BOR_DIR} + +WORKDIR ${BOR_DIR} +COPY bor /usr/bin/ +COPY builder/files/genesis-mainnet-v1.json ${BOR_DIR} +COPY builder/files/genesis-testnet-v4.json ${BOR_DIR} EXPOSE 8545 8546 8547 30303 30303/udp ENTRYPOINT ["bor"] diff --git a/Makefile b/Makefile index f0f9385e7b..a8a4b66e8d 100644 --- a/Makefile +++ b/Makefile @@ -37,7 +37,7 @@ protoc: generate-mocks: go generate mockgen -destination=./tests/bor/mocks/IHeimdallClient.go -package=mocks ./consensus/bor IHeimdallClient go generate mockgen -destination=./eth/filters/IBackend.go -package=filters ./eth/filters Backend - + geth: $(GORUN) build/ci.go install ./cmd/geth @echo "Done building." @@ -199,7 +199,7 @@ geth-windows-amd64: @ls -ld $(GOBIN)/geth-windows-* | grep amd64 PACKAGE_NAME := github.com/maticnetwork/bor -GOLANG_CROSS_VERSION ?= v1.18.1 +GOLANG_CROSS_VERSION ?= v1.19.1 .PHONY: release-dry-run release-dry-run: @@ -227,6 +227,7 @@ release: -e DOCKER_PASSWORD \ -e SLACK_WEBHOOK \ -v /var/run/docker.sock:/var/run/docker.sock \ + -v $(HOME)/.docker/config.json:/root/.docker/config.json \ -v `pwd`:/go/src/$(PACKAGE_NAME) \ -w /go/src/$(PACKAGE_NAME) \ goreleaser/goreleaser-cross:${GOLANG_CROSS_VERSION} \ diff --git a/builder/files/bor.service b/builder/files/bor.service index 2deff3dbc9..758553299e 100644 --- a/builder/files/bor.service +++ b/builder/files/bor.service @@ -6,7 +6,7 @@ [Service] Restart=on-failure RestartSec=5s - ExecStart=/usr/local/bin/bor server -config="/var/lib/bor/config.toml" + ExecStart=/usr/local/bin/bor server -config "/var/lib/bor/config.toml" Type=simple User=bor KillSignal=SIGINT diff --git a/builder/files/config.toml b/builder/files/config.toml index 870c164a8d..0f2919807f 100644 --- a/builder/files/config.toml +++ b/builder/files/config.toml @@ -6,6 +6,7 @@ chain = "mainnet" # identity = "Pratiks-MacBook-Pro.local" # log-level = "INFO" datadir = "/var/lib/bor/data" +# ancient = "" # keystore = "/var/lib/bor/keystore" syncmode = "full" # gcmode = "full" @@ -52,7 +53,7 @@ syncmode = "full" # pricebump = 10 [miner] - gaslimit = 20000000 + gaslimit = 30000000 gasprice = "30000000000" # mine = true # etherbase = "VALIDATOR ADDRESS" @@ -60,38 +61,42 @@ syncmode = "full" # [jsonrpc] - # ipcdisable = false - # ipcpath = "" - # gascap = 50000000 - # txfeecap = 5.0 - # [jsonrpc.http] - # enabled = false - # port = 8545 - # prefix = "" - # host = "localhost" - # api = ["eth", "net", "web3", "txpool", "bor"] - # vhosts = ["*"] - # corsdomain = ["*"] - # [jsonrpc.ws] - # enabled = false - # port = 8546 - # prefix = "" - # host = "localhost" - # api = ["web3", "net"] - # origins = ["*"] - # [jsonrpc.graphql] - # enabled = false - # port = 0 - # prefix = "" - # host = "" - # vhosts = ["*"] - # corsdomain = ["*"] +# ipcdisable = false +# ipcpath = "" +# gascap = 50000000 +# txfeecap = 5.0 +# [jsonrpc.http] +# enabled = false +# port = 8545 +# prefix = "" +# host = "localhost" +# api = ["eth", "net", "web3", "txpool", "bor"] +# vhosts = ["*"] +# corsdomain = ["*"] +# [jsonrpc.ws] +# enabled = false +# port = 8546 +# prefix = "" +# host = "localhost" +# api = ["web3", "net"] +# origins = ["*"] +# [jsonrpc.graphql] +# enabled = false +# port = 0 +# prefix = "" +# host = "" +# vhosts = ["*"] +# corsdomain = ["*"] +# [jsonrpc.timeouts] +# read = "30s" +# write = "30s" +# idle = "2m0s" -# [gpo] +[gpo] # blocks = 20 # percentile = 60 # maxprice = "5000000000000" - # ignoreprice = "2" + ignoreprice = "30000000000" [telemetry] metrics = true @@ -122,6 +127,7 @@ syncmode = "full" # preimages = false # txlookuplimit = 2350000 # triesinmemory = 128 + # timeout = "1h0m0s" [accounts] # allow-insecure-unlock = true diff --git a/builder/files/genesis-mainnet-v1.json b/builder/files/genesis-mainnet-v1.json index d3f0d02206..b01313bd57 100644 --- a/builder/files/genesis-mainnet-v1.json +++ b/builder/files/genesis-mainnet-v1.json @@ -15,14 +15,17 @@ "londonBlock": 23850000, "bor": { "jaipurBlock": 23850000, + "delhiBlock": 38189056, "period": { "0": 2 }, "producerDelay": { - "0": 6 + "0": 6, + "38189056": 4 }, "sprint": { - "0": 64 + "0": 64, + "38189056": 16 }, "backupMultiplier": { "0": 2 diff --git a/core/blockchain.go b/core/blockchain.go index 8103e4a05e..74fd4bfeda 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -68,6 +68,7 @@ var ( snapshotStorageReadTimer = metrics.NewRegisteredTimer("chain/snapshot/storage/reads", nil) snapshotCommitTimer = metrics.NewRegisteredTimer("chain/snapshot/commits", nil) + blockImportTimer = metrics.NewRegisteredMeter("chain/imports", nil) blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil) blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil) blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil) @@ -1518,6 +1519,11 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool) it := newInsertIterator(chain, results, bc.validator) block, err := it.next() + // Update the block import meter; it will just record chains we've received + // from other peers. (Note that the actual chain which gets imported would be + // quite low). + blockImportTimer.Mark(int64(len(headers))) + // Check the validity of incoming chain isValid, err1 := bc.forker.ValidateReorg(bc.CurrentBlock().Header(), headers) if err1 != nil { diff --git a/core/forkchoice.go b/core/forkchoice.go index 018afdfac9..7dd1a86307 100644 --- a/core/forkchoice.go +++ b/core/forkchoice.go @@ -114,9 +114,7 @@ func (f *ForkChoice) ReorgNeeded(current *types.Header, header *types.Header) (b func (f *ForkChoice) ValidateReorg(current *types.Header, chain []*types.Header) (bool, error) { // Call the bor chain validator service if f.validator != nil { - if isValid := f.validator.IsValidChain(current, chain); !isValid { - return false, nil - } + return f.validator.IsValidChain(current, chain) } return true, nil diff --git a/core/forkchoice_test.go b/core/forkchoice_test.go index 2e7b40d8ff..2493d4701f 100644 --- a/core/forkchoice_test.go +++ b/core/forkchoice_test.go @@ -13,7 +13,7 @@ import ( // chainValidatorFake is a mock for the chain validator service type chainValidatorFake struct { - validate func(currentHeader *types.Header, chain []*types.Header) bool + validate func(currentHeader *types.Header, chain []*types.Header) (bool, error) } // chainReaderFake is a mock for the chain reader service @@ -21,7 +21,7 @@ type chainReaderFake struct { getTd func(hash common.Hash, number uint64) *big.Int } -func newChainValidatorFake(validate func(currentHeader *types.Header, chain []*types.Header) bool) *chainValidatorFake { +func newChainValidatorFake(validate func(currentHeader *types.Header, chain []*types.Header) (bool, error)) *chainValidatorFake { return &chainValidatorFake{validate: validate} } @@ -46,18 +46,18 @@ func TestPastChainInsert(t *testing.T) { getTd := func(hash common.Hash, number uint64) *big.Int { return big.NewInt(int64(number)) } - validate := func(currentHeader *types.Header, chain []*types.Header) bool { + validate := func(currentHeader *types.Header, chain []*types.Header) (bool, error) { // Put all explicit conditions here // If canonical chain is empty and we're importing a chain of 64 blocks if currentHeader.Number.Uint64() == uint64(0) && len(chain) == 64 { - return true + return true, nil } // If canonical chain is of len 64 and we're importing a past chain from 54-64, then accept it if currentHeader.Number.Uint64() == uint64(64) && chain[0].Number.Uint64() == 55 && len(chain) == 10 { - return true + return true, nil } - return false + return false, nil } mockChainReader := newChainReaderFake(getTd) mockChainValidator := newChainValidatorFake(validate) @@ -116,18 +116,18 @@ func TestFutureChainInsert(t *testing.T) { getTd := func(hash common.Hash, number uint64) *big.Int { return big.NewInt(int64(number)) } - validate := func(currentHeader *types.Header, chain []*types.Header) bool { + validate := func(currentHeader *types.Header, chain []*types.Header) (bool, error) { // Put all explicit conditions here // If canonical chain is empty and we're importing a chain of 64 blocks if currentHeader.Number.Uint64() == uint64(0) && len(chain) == 64 { - return true + return true, nil } // If length of future chains > some value, they should not be accepted if currentHeader.Number.Uint64() == uint64(64) && len(chain) <= 10 { - return true + return true, nil } - return false + return false, nil } mockChainReader := newChainReaderFake(getTd) mockChainValidator := newChainValidatorFake(validate) @@ -174,18 +174,18 @@ func TestOverlappingChainInsert(t *testing.T) { getTd := func(hash common.Hash, number uint64) *big.Int { return big.NewInt(int64(number)) } - validate := func(currentHeader *types.Header, chain []*types.Header) bool { + validate := func(currentHeader *types.Header, chain []*types.Header) (bool, error) { // Put all explicit conditions here // If canonical chain is empty and we're importing a chain of 64 blocks if currentHeader.Number.Uint64() == uint64(0) && len(chain) == 64 { - return true + return true, nil } // If length of chain is > some fixed value then don't accept it if currentHeader.Number.Uint64() == uint64(64) && len(chain) <= 20 { - return true + return true, nil } - return false + return false, nil } mockChainReader := newChainReaderFake(getTd) mockChainValidator := newChainValidatorFake(validate) @@ -227,7 +227,7 @@ func (c *chainReaderFake) GetTd(hash common.Hash, number uint64) *big.Int { func (w *chainValidatorFake) IsValidPeer(remoteHeader *types.Header, fetchHeadersByNumber func(number uint64, amount int, skip int, reverse bool) ([]*types.Header, []common.Hash, error)) (bool, error) { return true, nil } -func (w *chainValidatorFake) IsValidChain(current *types.Header, headers []*types.Header) bool { +func (w *chainValidatorFake) IsValidChain(current *types.Header, headers []*types.Header) (bool, error) { return w.validate(current, headers) } func (w *chainValidatorFake) ProcessCheckpoint(endBlockNum uint64, endBlockHash common.Hash) {} diff --git a/docs/README.md b/docs/README.md index 5ebdbd7e26..2f75b218e4 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,9 +1,7 @@ # Documentation -- [Command-line-interface](./cli) - -- [Configuration file](./config.md) +[The new command line interface (CLI)](./cli) in this version of Bor aims to give users more control over the codebase when interacting with and starting a node. We have made every effort to keep most of the flags similar to the old CLI, except for a few notable changes. One major change is the use of the --config flag, which previously represented fields without available flags. It now represents all flags available to the user, and will overwrite any other flags if provided. As a node operator, you still have the flexibility to modify flags as needed. Please note that this change does not affect the internal functionality of the node, and it remains compatible with Geth and the Ethereum Virtual Machine (EVM). ## Additional notes @@ -13,8 +11,16 @@ $ bor server ``` -- Toml files used earlier just to configure static/trusted nodes are being deprecated. Instead, a toml file now can be used instead of flags and can contain all configuration for the node to run. The link to a sample config file is given above. To simply run bor with a configuration file, the following command can be used. + See [here](./cli/server.md) for more flag details. + +- The `bor dumpconfig` sub-command prints the default configurations, in the TOML format, on the terminal. One can `pipe (>)` this to a file (say `config.toml`) and use it to start bor. + +- A toml file now can be used instead of flags and can contain all configuration for the node to run. To simply run bor with a configuration file, the following command can be used. ``` $ bor server --config ``` + +- You can find an example config file [here](./cli/example_config.toml) to know more about what each flag is used for, what are the defaults and recommended values for different networks. + +- Toml files used earlier (with `--config` flag) to configure additional fields (like static and trusted nodes) are being deprecated and have been converted to flags. diff --git a/docs/cli/README.md b/docs/cli/README.md index bf37d6ef56..d52a4fd836 100644 --- a/docs/cli/README.md +++ b/docs/cli/README.md @@ -44,6 +44,10 @@ - [```server```](./server.md) +- [```snapshot```](./snapshot.md) + +- [```snapshot prune-state```](./snapshot_prune-state.md) + - [```status```](./status.md) - [```version```](./version.md) \ No newline at end of file diff --git a/docs/cli/account_import.md b/docs/cli/account_import.md index d7b02195bc..697d951fd3 100644 --- a/docs/cli/account_import.md +++ b/docs/cli/account_import.md @@ -6,4 +6,4 @@ The ```account import``` command imports an account in Json format to the Bor da - ```datadir```: Path of the data directory to store information -- ```keystore```: Path of the data directory to store information \ No newline at end of file +- ```keystore```: Path of the data directory to store keys \ No newline at end of file diff --git a/docs/cli/account_list.md b/docs/cli/account_list.md index 61ebf9e776..a11b4a05e7 100644 --- a/docs/cli/account_list.md +++ b/docs/cli/account_list.md @@ -6,4 +6,4 @@ The `account list` command lists all the accounts in the Bor data directory. - ```datadir```: Path of the data directory to store information -- ```keystore```: Path of the data directory to store information \ No newline at end of file +- ```keystore```: Path of the data directory to store keys \ No newline at end of file diff --git a/docs/cli/account_new.md b/docs/cli/account_new.md index dd62061ba0..bd47ecb371 100644 --- a/docs/cli/account_new.md +++ b/docs/cli/account_new.md @@ -6,4 +6,4 @@ The `account new` command creates a new local account file on the Bor data direc - ```datadir```: Path of the data directory to store information -- ```keystore```: Path of the data directory to store information \ No newline at end of file +- ```keystore```: Path of the data directory to store keys \ No newline at end of file diff --git a/docs/cli/bootnode.md b/docs/cli/bootnode.md index 48e933a934..064de39014 100644 --- a/docs/cli/bootnode.md +++ b/docs/cli/bootnode.md @@ -2,16 +2,16 @@ ## Options -- ```listen-addr```: listening address of bootnode (:) +- ```listen-addr```: listening address of bootnode (:) (default: 0.0.0.0:30303) -- ```v5```: Enable UDP v5 +- ```v5```: Enable UDP v5 (default: false) -- ```log-level```: Log level (trace|debug|info|warn|error|crit) +- ```log-level```: Log level (trace|debug|info|warn|error|crit) (default: info) -- ```nat```: port mapping mechanism (any|none|upnp|pmp|extip:) +- ```nat```: port mapping mechanism (any|none|upnp|pmp|extip:) (default: none) - ```node-key```: file or hex node key - ```save-key```: path to save the ecdsa private key -- ```dry-run```: validates parameters and prints bootnode configurations, but does not start bootnode \ No newline at end of file +- ```dry-run```: validates parameters and prints bootnode configurations, but does not start bootnode (default: false) \ No newline at end of file diff --git a/docs/cli/chain_sethead.md b/docs/cli/chain_sethead.md index bf97990e62..09cd37baa1 100644 --- a/docs/cli/chain_sethead.md +++ b/docs/cli/chain_sethead.md @@ -8,6 +8,6 @@ The ```chain sethead ``` command sets the current chain to a certain blo ## Options -- ```address```: Address of the grpc endpoint +- ```address```: Address of the grpc endpoint (default: 127.0.0.1:3131) -- ```yes```: Force set head \ No newline at end of file +- ```yes```: Force set head (default: false) \ No newline at end of file diff --git a/docs/cli/debug_block.md b/docs/cli/debug_block.md index ced7e482ee..efcead2626 100644 --- a/docs/cli/debug_block.md +++ b/docs/cli/debug_block.md @@ -4,6 +4,6 @@ The ```bor debug block ``` command will create an archive containing tra ## Options -- ```address```: Address of the grpc endpoint +- ```address```: Address of the grpc endpoint (default: 127.0.0.1:3131) - ```output```: Output directory \ No newline at end of file diff --git a/docs/cli/debug_pprof.md b/docs/cli/debug_pprof.md index 86a84b6065..2e7e40b677 100644 --- a/docs/cli/debug_pprof.md +++ b/docs/cli/debug_pprof.md @@ -4,8 +4,8 @@ The ```debug pprof ``` command will create an archive containing bor ppro ## Options -- ```address```: Address of the grpc endpoint +- ```address```: Address of the grpc endpoint (default: 127.0.0.1:3131) -- ```seconds```: seconds to trace +- ```seconds```: seconds to trace (default: 2) - ```output```: Output directory \ No newline at end of file diff --git a/docs/cli/example_config.toml b/docs/cli/example_config.toml new file mode 100644 index 0000000000..64ef60ae12 --- /dev/null +++ b/docs/cli/example_config.toml @@ -0,0 +1,147 @@ +# This configuration file is for reference and learning purpose only. +# The default value of the flags is provided below (except a few flags which has custom defaults which are explicitly mentioned). +# Recommended values for mainnet and/or mumbai are also provided. + +chain = "mainnet" # Name of the chain to sync ("mumbai", "mainnet") or path to a genesis file +identity = "Annon-Identity" # Name/Identity of the node (default = OS hostname) +log-level = "INFO" # Set log level for the server +datadir = "var/lib/bor" # Path of the data directory to store information +ancient = "" # Data directory for ancient chain segments (default = inside chaindata) +keystore = "" # Path of the directory where keystores are located +syncmode = "full" # Blockchain sync mode (only "full" sync supported) +gcmode = "full" # Blockchain garbage collection mode ("full", "archive") +snapshot = true # Enables the snapshot-database mode +"bor.logs" = false # Enables bor log retrieval +ethstats = "" # Reporting URL of a ethstats service (nodename:secret@host:port) + +["eth.requiredblocks"] # Comma separated block number-to-hash mappings to require for peering (=) (default = empty map) + "31000000" = "0x2087b9e2b353209c2c21e370c82daa12278efd0fe5f0febe6c29035352cf050e" + "32000000" = "0x875500011e5eecc0c554f95d07b31cf59df4ca2505f4dbbfffa7d4e4da917c68" + +[p2p] + maxpeers = 50 # Maximum number of network peers (network disabled if set to 0) + maxpendpeers = 50 # Maximum number of pending connection attempts + bind = "0.0.0.0" # Network binding address + port = 30303 # Network listening port + nodiscover = false # Disables the peer discovery mechanism (manual peer addition) + nat = "any" # NAT port mapping mechanism (any|none|upnp|pmp|extip:) + [p2p.discovery] + v5disc = false # Enables the experimental RLPx V5 (Topic Discovery) mechanism + bootnodes = [] # Comma separated enode URLs for P2P discovery bootstrap + bootnodesv4 = [] # List of initial v4 bootnodes + bootnodesv5 = [] # List of initial v5 bootnodes + static-nodes = [] # List of static nodes + trusted-nodes = [] # List of trusted nodes + dns = [] # List of enrtree:// URLs which will be queried for nodes to connect to + +[heimdall] + url = "http://localhost:1317" # URL of Heimdall service + "bor.without" = false # Run without Heimdall service (for testing purpose) + grpc-address = "" # Address of Heimdall gRPC service + +[txpool] + locals = [] # Comma separated accounts to treat as locals (no flush, priority inclusion) + nolocals = false # Disables price exemptions for locally submitted transactions + journal = "transactions.rlp" # Disk journal for local transaction to survive node restarts + rejournal = "1h0m0s" # Time interval to regenerate the local transaction journal + pricelimit = 1 # Minimum gas price limit to enforce for acceptance into the pool (mainnet = 30000000000) + pricebump = 10 # Price bump percentage to replace an already existing transaction + accountslots = 16 # Minimum number of executable transaction slots guaranteed per account + globalslots = 32768 # Maximum number of executable transaction slots for all accounts + accountqueue = 16 # Maximum number of non-executable transaction slots permitted per account + globalqueue = 32768 # Maximum number of non-executable transaction slots for all accounts + lifetime = "3h0m0s" # Maximum amount of time non-executable transaction are queued + +[miner] + mine = false # Enable mining + etherbase = "" # Public address for block mining rewards + extradata = "" # Block extra data set by the miner (default = client version) + gaslimit = 30000000 # Target gas ceiling for mined blocks + gasprice = "1000000000" # Minimum gas price for mining a transaction (recommended for mainnet = 30000000000, default suitable for mumbai/devnet) + +[jsonrpc] + ipcdisable = false # Disable the IPC-RPC server + ipcpath = "" # Filename for IPC socket/pipe within the datadir (explicit paths escape it) + gascap = 50000000 # Sets a cap on gas that can be used in eth_call/estimateGas (0=infinite) + txfeecap = 5.0 # Sets a cap on transaction fee (in ether) that can be sent via the RPC APIs (0 = no cap) + [jsonrpc.http] + enabled = false # Enable the HTTP-RPC server + port = 8545 # http.port + prefix = "" # http.rpcprefix + host = "localhost" # HTTP-RPC server listening interface + api = ["eth", "net", "web3", "txpool", "bor"] # API's offered over the HTTP-RPC interface + vhosts = ["localhost"] # Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard. + corsdomain = ["localhost"] # Comma separated list of domains from which to accept cross origin requests (browser enforced) + [jsonrpc.ws] + enabled = false # Enable the WS-RPC server + port = 8546 # WS-RPC server listening port + prefix = "" # HTTP path prefix on which JSON-RPC is served. Use '/' to serve on all paths. + host = "localhost" # ws.addr + api = ["net", "web3"] # API's offered over the WS-RPC interface + origins = ["localhost"] # Origins from which to accept websockets requests + [jsonrpc.graphql] + enabled = false # Enable GraphQL on the HTTP-RPC server. Note that GraphQL can only be started if an HTTP server is started as well. + port = 0 # + prefix = "" # + host = "" # + vhosts = ["localhost"] # Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard. + corsdomain = ["localhost"] # Comma separated list of domains from which to accept cross origin requests (browser enforced) + [jsonrpc.timeouts] + read = "30s" + write = "30s" + idle = "2m0s" + +[gpo] + blocks = 20 # Number of recent blocks to check for gas prices + percentile = 60 # Suggested gas price is the given percentile of a set of recent transaction gas prices + maxprice = "5000000000000" # Maximum gas price will be recommended by gpo + ignoreprice = "2" # Gas price below which gpo will ignore transactions (recommended for mainnet = 30000000000, default suitable for mumbai/devnet) + +[telemetry] + metrics = false # Enable metrics collection and reporting + expensive = false # Enable expensive metrics collection and reporting + prometheus-addr = "127.0.0.1:7071" # Address for Prometheus Server + opencollector-endpoint = "127.0.0.1:4317" # OpenCollector Endpoint (host:port) + [telemetry.influx] + influxdb = false # Enable metrics export/push to an external InfluxDB database (v1) + endpoint = "" # InfluxDB API endpoint to report metrics to + database = "" # InfluxDB database name to push reported metrics to + username = "" # Username to authorize access to the database + password = "" # Password to authorize access to the database + influxdbv2 = false # Enable metrics export/push to an external InfluxDB v2 database + token = "" # Token to authorize access to the database (v2 only) + bucket = "" # InfluxDB bucket name to push reported metrics to (v2 only) + organization = "" # InfluxDB organization name (v2 only) + [telemetry.influx.tags] # Comma-separated InfluxDB tags (key/values) attached to all measurements + cloud = "aws" + host = "annon-host" + ip = "99.911.221.66" + region = "us-north-1" + +[cache] + cache = 1024 # Megabytes of memory allocated to internal caching (recommended for mainnet = 4096, default suitable for mumbai/devnet) + gc = 25 # Percentage of cache memory allowance to use for trie pruning (default = 25% full mode, 0% archive mode) + snapshot = 10 # Percentage of cache memory allowance to use for snapshot caching (default = 10% full mode, 20% archive mode) + database = 50 # Percentage of cache memory allowance to use for database io + trie = 15 # Percentage of cache memory allowance to use for trie caching (default = 15% full mode, 30% archive mode) + journal = "triecache" # Disk journal directory for trie cache to survive node restarts + rejournal = "1h0m0s" # Time interval to regenerate the trie cache journal + noprefetch = false # Disable heuristic state prefetch during block import (less CPU and disk IO, more time waiting for data) + preimages = false # Enable recording the SHA3/keccak preimages of trie keys + txlookuplimit = 2350000 # Number of recent blocks to maintain transactions index for (default = about 56 days, 0 = entire chain) + triesinmemory = 128 # Number of block states (tries) to keep in memory + timeout = "1h0m0s" # Time after which the Merkle Patricia Trie is stored to disc from memory + +[accounts] + unlock = [] # Comma separated list of accounts to unlock + password = "" # Password file to use for non-interactive password input + allow-insecure-unlock = false # Allow insecure account unlocking when account-related RPCs are exposed by http + lightkdf = false # Reduce key-derivation RAM & CPU usage at some expense of KDF strength + disable-bor-wallet = true # Disable the personal wallet endpoints + +[grpc] + addr = ":3131" # Address and port to bind the GRPC server + +[developer] + dev = false # Enable developer mode with ephemeral proof-of-authority network and a pre-funded developer account, mining enabled + period = 0 # Block period to use in developer mode (0 = mine only if transaction pending) diff --git a/docs/cli/peers_add.md b/docs/cli/peers_add.md index 5bc4ed1448..7b879cdf0d 100644 --- a/docs/cli/peers_add.md +++ b/docs/cli/peers_add.md @@ -4,6 +4,6 @@ The ```peers add ``` command joins the local client to another remote pee ## Options -- ```address```: Address of the grpc endpoint +- ```address```: Address of the grpc endpoint (default: 127.0.0.1:3131) -- ```trusted```: Add the peer as a trusted \ No newline at end of file +- ```trusted```: Add the peer as a trusted (default: false) \ No newline at end of file diff --git a/docs/cli/peers_list.md b/docs/cli/peers_list.md index 41f398b764..5d30d1d32e 100644 --- a/docs/cli/peers_list.md +++ b/docs/cli/peers_list.md @@ -4,4 +4,4 @@ The ```peers list``` command lists the connected peers. ## Options -- ```address```: Address of the grpc endpoint \ No newline at end of file +- ```address```: Address of the grpc endpoint (default: 127.0.0.1:3131) \ No newline at end of file diff --git a/docs/cli/peers_remove.md b/docs/cli/peers_remove.md index 2cac1e7656..f731f12f6f 100644 --- a/docs/cli/peers_remove.md +++ b/docs/cli/peers_remove.md @@ -4,6 +4,6 @@ The ```peers remove ``` command disconnects the local client from a conne ## Options -- ```address```: Address of the grpc endpoint +- ```address```: Address of the grpc endpoint (default: 127.0.0.1:3131) -- ```trusted```: Add the peer as a trusted \ No newline at end of file +- ```trusted```: Add the peer as a trusted (default: false) \ No newline at end of file diff --git a/docs/cli/peers_status.md b/docs/cli/peers_status.md index 65a0fe9d8f..9806bfb638 100644 --- a/docs/cli/peers_status.md +++ b/docs/cli/peers_status.md @@ -4,4 +4,4 @@ The ```peers status ``` command displays the status of a peer by its id ## Options -- ```address```: Address of the grpc endpoint \ No newline at end of file +- ```address```: Address of the grpc endpoint (default: 127.0.0.1:3131) \ No newline at end of file diff --git a/docs/cli/removedb.md b/docs/cli/removedb.md index 473d47ecef..7ee09568b9 100644 --- a/docs/cli/removedb.md +++ b/docs/cli/removedb.md @@ -4,6 +4,6 @@ The ```bor removedb``` command will remove the blockchain and state databases at ## Options -- ```address```: Address of the grpc endpoint +- ```address```: Address of the grpc endpoint (default: 127.0.0.1:3131) - ```datadir```: Path of the data directory to store information \ No newline at end of file diff --git a/docs/cli/server.md b/docs/cli/server.md index d52b135fa3..5bc0ff1024 100644 --- a/docs/cli/server.md +++ b/docs/cli/server.md @@ -4,51 +4,53 @@ The ```bor server``` command runs the Bor client. ## Options -- ```chain```: Name of the chain to sync +- ```chain```: Name of the chain to sync ('mumbai', 'mainnet') or path to a genesis file (default: mainnet) - ```identity```: Name/Identity of the node -- ```log-level```: Set log level for the server +- ```log-level```: Set log level for the server (default: INFO) - ```datadir```: Path of the data directory to store information -- ```keystore```: Path of the directory to store keystores +- ```datadir.ancient```: Data directory for ancient chain segments (default = inside chaindata) + +- ```keystore```: Path of the directory where keystores are located - ```config```: File for the config file -- ```syncmode```: Blockchain sync mode (only "full" sync supported) +- ```syncmode```: Blockchain sync mode (only "full" sync supported) (default: full) -- ```gcmode```: Blockchain garbage collection mode ("full", "archive") +- ```gcmode```: Blockchain garbage collection mode ("full", "archive") (default: full) - ```eth.requiredblocks```: Comma separated block number-to-hash mappings to require for peering (=) -- ```snapshot```: Enables the snapshot-database mode (default = true) +- ```snapshot```: Enables the snapshot-database mode (default: true) -- ```bor.logs```: Enables bor log retrieval (default = false) +- ```bor.logs```: Enables bor log retrieval (default: false) -- ```bor.heimdall```: URL of Heimdall service +- ```bor.heimdall```: URL of Heimdall service (default: http://localhost:1317) -- ```bor.withoutheimdall```: Run without Heimdall service (for testing purpose) +- ```bor.withoutheimdall```: Run without Heimdall service (for testing purpose) (default: false) - ```bor.heimdallgRPC```: Address of Heimdall gRPC service - ```ethstats```: Reporting URL of a ethstats service (nodename:secret@host:port) -- ```gpo.blocks```: Number of recent blocks to check for gas prices +- ```gpo.blocks```: Number of recent blocks to check for gas prices (default: 20) -- ```gpo.percentile```: Suggested gas price is the given percentile of a set of recent transaction gas prices +- ```gpo.percentile```: Suggested gas price is the given percentile of a set of recent transaction gas prices (default: 60) -- ```gpo.maxprice```: Maximum gas price will be recommended by gpo +- ```gpo.maxprice```: Maximum gas price will be recommended by gpo (default: 5000000000000) -- ```gpo.ignoreprice```: Gas price below which gpo will ignore transactions +- ```gpo.ignoreprice```: Gas price below which gpo will ignore transactions (default: 2) -- ```disable-bor-wallet```: Disable the personal wallet endpoints +- ```disable-bor-wallet```: Disable the personal wallet endpoints (default: true) -- ```grpc.addr```: Address and port to bind the GRPC server +- ```grpc.addr```: Address and port to bind the GRPC server (default: :3131) -- ```dev```: Enable developer mode with ephemeral proof-of-authority network and a pre-funded developer account, mining enabled +- ```dev```: Enable developer mode with ephemeral proof-of-authority network and a pre-funded developer account, mining enabled (default: false) -- ```dev.period```: Block period to use in developer mode (0 = mine only if transaction pending) +- ```dev.period```: Block period to use in developer mode (0 = mine only if transaction pending) (default: 0) ### Account Management Options @@ -56,113 +58,113 @@ The ```bor server``` command runs the Bor client. - ```password```: Password file to use for non-interactive password input -- ```allow-insecure-unlock```: Allow insecure account unlocking when account-related RPCs are exposed by http +- ```allow-insecure-unlock```: Allow insecure account unlocking when account-related RPCs are exposed by http (default: false) -- ```lightkdf```: Reduce key-derivation RAM & CPU usage at some expense of KDF strength +- ```lightkdf```: Reduce key-derivation RAM & CPU usage at some expense of KDF strength (default: false) ### Cache Options -- ```cache```: Megabytes of memory allocated to internal caching (default = 4096 mainnet full node) +- ```cache```: Megabytes of memory allocated to internal caching (default: 1024) -- ```cache.database```: Percentage of cache memory allowance to use for database io +- ```cache.database```: Percentage of cache memory allowance to use for database io (default: 50) -- ```cache.trie```: Percentage of cache memory allowance to use for trie caching (default = 15% full mode, 30% archive mode) +- ```cache.trie```: Percentage of cache memory allowance to use for trie caching (default: 15) -- ```cache.trie.journal```: Disk journal directory for trie cache to survive node restarts +- ```cache.trie.journal```: Disk journal directory for trie cache to survive node restarts (default: triecache) -- ```cache.trie.rejournal```: Time interval to regenerate the trie cache journal +- ```cache.trie.rejournal```: Time interval to regenerate the trie cache journal (default: 1h0m0s) -- ```cache.gc```: Percentage of cache memory allowance to use for trie pruning (default = 25% full mode, 0% archive mode) +- ```cache.gc```: Percentage of cache memory allowance to use for trie pruning (default: 25) -- ```cache.snapshot```: Percentage of cache memory allowance to use for snapshot caching (default = 10% full mode, 20% archive mode) +- ```cache.snapshot```: Percentage of cache memory allowance to use for snapshot caching (default: 10) -- ```cache.noprefetch```: Disable heuristic state prefetch during block import (less CPU and disk IO, more time waiting for data) +- ```cache.noprefetch```: Disable heuristic state prefetch during block import (less CPU and disk IO, more time waiting for data) (default: false) -- ```cache.preimages```: Enable recording the SHA3/keccak preimages of trie keys +- ```cache.preimages```: Enable recording the SHA3/keccak preimages of trie keys (default: false) -- ```cache.triesinmemory```: Number of block states (tries) to keep in memory (default = 128) +- ```cache.triesinmemory```: Number of block states (tries) to keep in memory (default = 128) (default: 128) -- ```txlookuplimit```: Number of recent blocks to maintain transactions index for (default = about 56 days, 0 = entire chain) +- ```txlookuplimit```: Number of recent blocks to maintain transactions index for (default: 2350000) ### JsonRPC Options -- ```rpc.gascap```: Sets a cap on gas that can be used in eth_call/estimateGas (0=infinite) +- ```rpc.gascap```: Sets a cap on gas that can be used in eth_call/estimateGas (0=infinite) (default: 50000000) -- ```rpc.txfeecap```: Sets a cap on transaction fee (in ether) that can be sent via the RPC APIs (0 = no cap) +- ```rpc.txfeecap```: Sets a cap on transaction fee (in ether) that can be sent via the RPC APIs (0 = no cap) (default: 5) -- ```ipcdisable```: Disable the IPC-RPC server +- ```ipcdisable```: Disable the IPC-RPC server (default: false) - ```ipcpath```: Filename for IPC socket/pipe within the datadir (explicit paths escape it) -- ```http.corsdomain```: Comma separated list of domains from which to accept cross origin requests (browser enforced) +- ```http.corsdomain```: Comma separated list of domains from which to accept cross origin requests (browser enforced) (default: localhost) -- ```http.vhosts```: Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard. +- ```http.vhosts```: Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard. (default: localhost) -- ```ws.origins```: Origins from which to accept websockets requests +- ```ws.origins```: Origins from which to accept websockets requests (default: localhost) -- ```graphql.corsdomain```: Comma separated list of domains from which to accept cross origin requests (browser enforced) +- ```graphql.corsdomain```: Comma separated list of domains from which to accept cross origin requests (browser enforced) (default: localhost) -- ```graphql.vhosts```: Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard. +- ```graphql.vhosts```: Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard. (default: localhost) -- ```http```: Enable the HTTP-RPC server +- ```http```: Enable the HTTP-RPC server (default: false) -- ```http.addr```: HTTP-RPC server listening interface +- ```http.addr```: HTTP-RPC server listening interface (default: localhost) -- ```http.port```: HTTP-RPC server listening port +- ```http.port```: HTTP-RPC server listening port (default: 8545) - ```http.rpcprefix```: HTTP path path prefix on which JSON-RPC is served. Use '/' to serve on all paths. -- ```http.api```: API's offered over the HTTP-RPC interface +- ```http.api```: API's offered over the HTTP-RPC interface (default: eth,net,web3,txpool,bor) -- ```ws```: Enable the WS-RPC server +- ```ws```: Enable the WS-RPC server (default: false) -- ```ws.addr```: WS-RPC server listening interface +- ```ws.addr```: WS-RPC server listening interface (default: localhost) -- ```ws.port```: WS-RPC server listening port +- ```ws.port```: WS-RPC server listening port (default: 8546) - ```ws.rpcprefix```: HTTP path prefix on which JSON-RPC is served. Use '/' to serve on all paths. -- ```ws.api```: API's offered over the WS-RPC interface +- ```ws.api```: API's offered over the WS-RPC interface (default: net,web3) -- ```graphql```: Enable GraphQL on the HTTP-RPC server. Note that GraphQL can only be started if an HTTP server is started as well. +- ```graphql```: Enable GraphQL on the HTTP-RPC server. Note that GraphQL can only be started if an HTTP server is started as well. (default: false) ### P2P Options -- ```bind```: Network binding address +- ```bind```: Network binding address (default: 0.0.0.0) -- ```port```: Network listening port +- ```port```: Network listening port (default: 30303) - ```bootnodes```: Comma separated enode URLs for P2P discovery bootstrap -- ```maxpeers```: Maximum number of network peers (network disabled if set to 0) +- ```maxpeers```: Maximum number of network peers (network disabled if set to 0) (default: 50) -- ```maxpendpeers```: Maximum number of pending connection attempts (defaults used if set to 0) +- ```maxpendpeers```: Maximum number of pending connection attempts (default: 50) -- ```nat```: NAT port mapping mechanism (any|none|upnp|pmp|extip:) +- ```nat```: NAT port mapping mechanism (any|none|upnp|pmp|extip:) (default: any) -- ```nodiscover```: Disables the peer discovery mechanism (manual peer addition) +- ```nodiscover```: Disables the peer discovery mechanism (manual peer addition) (default: false) -- ```v5disc```: Enables the experimental RLPx V5 (Topic Discovery) mechanism +- ```v5disc```: Enables the experimental RLPx V5 (Topic Discovery) mechanism (default: false) ### Sealer Options -- ```mine```: Enable mining +- ```mine```: Enable mining (default: false) -- ```miner.etherbase```: Public address for block mining rewards (default = first account) +- ```miner.etherbase```: Public address for block mining rewards - ```miner.extradata```: Block extra data set by the miner (default = client version) -- ```miner.gaslimit```: Target gas ceiling for mined blocks +- ```miner.gaslimit```: Target gas ceiling (gas limit) for mined blocks (default: 30000000) -- ```miner.gasprice```: Minimum gas price for mining a transaction +- ```miner.gasprice```: Minimum gas price for mining a transaction (default: 1000000000) ### Telemetry Options -- ```metrics```: Enable metrics collection and reporting +- ```metrics```: Enable metrics collection and reporting (default: false) -- ```metrics.expensive```: Enable expensive metrics collection and reporting +- ```metrics.expensive```: Enable expensive metrics collection and reporting (default: false) -- ```metrics.influxdb```: Enable metrics export/push to an external InfluxDB database (v1) +- ```metrics.influxdb```: Enable metrics export/push to an external InfluxDB database (v1) (default: false) - ```metrics.influxdb.endpoint```: InfluxDB API endpoint to report metrics to @@ -174,11 +176,11 @@ The ```bor server``` command runs the Bor client. - ```metrics.influxdb.tags```: Comma-separated InfluxDB tags (key/values) attached to all measurements -- ```metrics.prometheus-addr```: Address for Prometheus Server +- ```metrics.prometheus-addr```: Address for Prometheus Server (default: 127.0.0.1:7071) -- ```metrics.opencollector-endpoint```: OpenCollector Endpoint (host:port) +- ```metrics.opencollector-endpoint```: OpenCollector Endpoint (host:port) (default: 127.0.0.1:4317) -- ```metrics.influxdbv2```: Enable metrics export/push to an external InfluxDB v2 database +- ```metrics.influxdbv2```: Enable metrics export/push to an external InfluxDB v2 database (default: false) - ```metrics.influxdb.token```: Token to authorize access to the database (v2 only) @@ -190,22 +192,22 @@ The ```bor server``` command runs the Bor client. - ```txpool.locals```: Comma separated accounts to treat as locals (no flush, priority inclusion) -- ```txpool.nolocals```: Disables price exemptions for locally submitted transactions +- ```txpool.nolocals```: Disables price exemptions for locally submitted transactions (default: false) -- ```txpool.journal```: Disk journal for local transaction to survive node restarts +- ```txpool.journal```: Disk journal for local transaction to survive node restarts (default: transactions.rlp) -- ```txpool.rejournal```: Time interval to regenerate the local transaction journal +- ```txpool.rejournal```: Time interval to regenerate the local transaction journal (default: 1h0m0s) -- ```txpool.pricelimit```: Minimum gas price limit to enforce for acceptance into the pool +- ```txpool.pricelimit```: Minimum gas price limit to enforce for acceptance into the pool (default: 1) -- ```txpool.pricebump```: Price bump percentage to replace an already existing transaction +- ```txpool.pricebump```: Price bump percentage to replace an already existing transaction (default: 10) -- ```txpool.accountslots```: Minimum number of executable transaction slots guaranteed per account +- ```txpool.accountslots```: Minimum number of executable transaction slots guaranteed per account (default: 16) -- ```txpool.globalslots```: Maximum number of executable transaction slots for all accounts +- ```txpool.globalslots```: Maximum number of executable transaction slots for all accounts (default: 32768) -- ```txpool.accountqueue```: Maximum number of non-executable transaction slots permitted per account +- ```txpool.accountqueue```: Maximum number of non-executable transaction slots permitted per account (default: 16) -- ```txpool.globalqueue```: Maximum number of non-executable transaction slots for all accounts +- ```txpool.globalqueue```: Maximum number of non-executable transaction slots for all accounts (default: 32768) -- ```txpool.lifetime```: Maximum amount of time non-executable transaction are queued \ No newline at end of file +- ```txpool.lifetime```: Maximum amount of time non-executable transaction are queued (default: 3h0m0s) \ No newline at end of file diff --git a/docs/cli/snapshot.md b/docs/cli/snapshot.md new file mode 100644 index 0000000000..376220749b --- /dev/null +++ b/docs/cli/snapshot.md @@ -0,0 +1,5 @@ +# snapshot + +The ```snapshot``` command groups snapshot related actions: + +- [```snapshot prune-state```](./snapshot_prune-state.md): Prune state databases at the given datadir location. \ No newline at end of file diff --git a/docs/cli/snapshot_prune-state.md b/docs/cli/snapshot_prune-state.md new file mode 100644 index 0000000000..73742faeac --- /dev/null +++ b/docs/cli/snapshot_prune-state.md @@ -0,0 +1,21 @@ +# Prune state + +The ```bor snapshot prune-state``` command will prune historical state data with the help of the state snapshot. All trie nodes and contract codes that do not belong to the specified version state will be deleted from the database. After pruning, only two version states are available: genesis and the specific one. + +## Options + +- ```datadir```: Path of the data directory to store information + +- ```keystore```: Path of the data directory to store keys + +- ```datadir.ancient```: Path of the ancient data directory to store information + +- ```bloomfilter.size```: Size of the bloom filter (default: 2048) + +### Cache Options + +- ```cache```: Megabytes of memory allocated to internal caching (default: 1024) + +- ```cache.trie```: Percentage of cache memory allowance to use for trie caching (default: 25) + +- ```cache.trie.journal```: Path of the trie journal directory to store information (default: triecache) \ No newline at end of file diff --git a/docs/config.md b/docs/config.md deleted file mode 100644 index 57f4c25fef..0000000000 --- a/docs/config.md +++ /dev/null @@ -1,146 +0,0 @@ - -# Config - -- The `bor dumpconfig` command prints the default configurations, in the TOML format, on the terminal. - - One can `pipe (>)` this to a file (say `config.toml`) and use it to start bor. - - Command to provide a config file: `bor server -config config.toml` -- Bor uses TOML, HCL, and JSON format config files. -- This is the format of the config file in TOML: - - **NOTE: The values of these following flags are just for reference** - - `config.toml` file: -``` -chain = "mainnet" -identity = "myIdentity" -log-level = "INFO" -datadir = "/var/lib/bor/data" -keystore = "path/to/keystore" -syncmode = "full" -gcmode = "full" -snapshot = true -ethstats = "" - -["eth.requiredblocks"] - -[p2p] -maxpeers = 50 -maxpendpeers = 50 -bind = "0.0.0.0" -port = 30303 -nodiscover = false -nat = "any" - -[p2p.discovery] -v5disc = false -bootnodes = ["enode://d860a01f9722d78051619d1e2351aba3f43f943f6f00718d1b9baa4101932a1f5011f16bb2b1bb35db20d6fe28fa0bf09636d26a87d31de9ec6203eeedb1f666@18.138.108.67:30303", "enode://22a8232c3abc76a16ae9d6c3b164f98775fe226f0917b0ca871128a74a8e9630b458460865bab457221f1d448dd9791d24c4e5d88786180ac185df813a68d4de@3.209.45.79:30303"] -bootnodesv4 = [] -bootnodesv5 = ["enr:-KG4QOtcP9X1FbIMOe17QNMKqDxCpm14jcX5tiOE4_TyMrFqbmhPZHK_ZPG2Gxb1GE2xdtodOfx9-cgvNtxnRyHEmC0ghGV0aDKQ9aX9QgAAAAD__________4JpZIJ2NIJpcIQDE8KdiXNlY3AyNTZrMaEDhpehBDbZjM_L9ek699Y7vhUJ-eAdMyQW_Fil522Y0fODdGNwgiMog3VkcIIjKA", "enr:-KG4QDyytgmE4f7AnvW-ZaUOIi9i79qX4JwjRAiXBZCU65wOfBu-3Nb5I7b_Rmg3KCOcZM_C3y5pg7EBU5XGrcLTduQEhGV0aDKQ9aX9QgAAAAD__________4JpZIJ2NIJpcIQ2_DUbiXNlY3AyNTZrMaEDKnz_-ps3UUOfHWVYaskI5kWYO_vtYMGYCQRAR3gHDouDdGNwgiMog3VkcIIjKA"] -static-nodes = ["enode://8499da03c47d637b20eee24eec3c356c9a2e6148d6fe25ca195c7949ab8ec2c03e3556126b0d7ed644675e78c4318b08691b7b57de10e5f0d40d05b09238fa0a@52.187.207.27:30303"] -trusted-nodes = ["enode://2b252ab6a1d0f971d9722cb839a42cb81db019ba44c08754628ab4a823487071b5695317c8ccd085219c3a03af063495b2f1da8d18218da2d6a82981b45e6ffc@65.108.70.101:30303"] -dns = [] - -[heimdall] -url = "http://localhost:1317" -"bor.without" = false - -[txpool] -locals = ["$ADDRESS1", "$ADDRESS2"] -nolocals = false -journal = "" -rejournal = "1h0m0s" -pricelimit = 30000000000 -pricebump = 10 -accountslots = 16 -globalslots = 32768 -accountqueue = 16 -globalqueue = 32768 -lifetime = "3h0m0s" - -[miner] -mine = false -etherbase = "" -extradata = "" -gaslimit = 20000000 -gasprice = "30000000000" - -[jsonrpc] -ipcdisable = false -ipcpath = "/var/lib/bor/bor.ipc" -gascap = 50000000 -txfeecap = 5e+00 - -[jsonrpc.http] -enabled = false -port = 8545 -prefix = "" -host = "localhost" -api = ["eth", "net", "web3", "txpool", "bor"] -vhosts = ["*"] -corsdomain = ["*"] - -[jsonrpc.ws] -enabled = false -port = 8546 -prefix = "" -host = "localhost" -api = ["web3", "net"] -vhosts = ["*"] -corsdomain = ["*"] - -[jsonrpc.graphql] -enabled = false -port = 0 -prefix = "" -host = "" -api = [] -vhosts = ["*"] -corsdomain = ["*"] - -[gpo] -blocks = 20 -percentile = 60 -maxprice = "5000000000000" -ignoreprice = "2" - -[telemetry] -metrics = false -expensive = false -prometheus-addr = "" -opencollector-endpoint = "" - -[telemetry.influx] -influxdb = false -endpoint = "" -database = "" -username = "" -password = "" -influxdbv2 = false -token = "" -bucket = "" -organization = "" - -[cache] -cache = 1024 -gc = 25 -snapshot = 10 -database = 50 -trie = 15 -journal = "triecache" -rejournal = "1h0m0s" -noprefetch = false -preimages = false -txlookuplimit = 2350000 - -[accounts] -unlock = ["$ADDRESS1", "$ADDRESS2"] -password = "path/to/password.txt" -allow-insecure-unlock = false -lightkdf = false -disable-bor-wallet = false - -[grpc] -addr = ":3131" - -[developer] -dev = false -period = 0 -``` diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index f92bc652a6..135defc0b9 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -357,10 +357,6 @@ func (d *Downloader) LegacySync(id string, head common.Hash, td, ttd *big.Int, m return err // This is an expected fault, don't keep printing it in a spin-loop } - if errors.Is(err, whitelist.ErrNoRemoteCheckoint) { - log.Warn("Doesn't have remote checkpoint yet", "peer", id, "err", err) - } - log.Warn("Synchronisation failed, retrying", "peer", id, "err", err) return err @@ -1581,6 +1577,13 @@ func (d *Downloader) importBlockResults(results []*fetchResult) error { // of the blocks delivered from the downloader, and the indexing will be off. log.Debug("Downloaded item processing failed on sidechain import", "index", index, "err", err) } + + // If we've received too long future chain error (from whitelisting service), + // return that as the root error and `errInvalidChain` as context. + if errors.Is(err, whitelist.ErrLongFutureChain) { + return fmt.Errorf("%v: %w", errInvalidChain, err) + } + return fmt.Errorf("%w: %v", errInvalidChain, err) } return nil diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index d8765ef077..a9242fba5b 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -1426,8 +1426,8 @@ func (w *whitelistFake) IsValidPeer(_ *types.Header, _ func(number uint64, amoun return w.validate(w.count) } -func (w *whitelistFake) IsValidChain(current *types.Header, headers []*types.Header) bool { - return true +func (w *whitelistFake) IsValidChain(current *types.Header, headers []*types.Header) (bool, error) { + return true, nil } func (w *whitelistFake) ProcessCheckpoint(_ uint64, _ common.Hash) {} diff --git a/eth/downloader/whitelist/service.go b/eth/downloader/whitelist/service.go index 0e905cce28..3cb402c442 100644 --- a/eth/downloader/whitelist/service.go +++ b/eth/downloader/whitelist/service.go @@ -30,6 +30,7 @@ func NewService(maxCapacity uint) *Service { var ( ErrCheckpointMismatch = errors.New("checkpoint mismatch") + ErrLongFutureChain = errors.New("received future chain of unacceptable length") ErrNoRemoteCheckoint = errors.New("remote peer doesn't have a checkoint") ) @@ -74,16 +75,16 @@ func (w *Service) IsValidPeer(remoteHeader *types.Header, fetchHeadersByNumber f // IsValidChain checks the validity of chain by comparing it // against the local checkpoint entries -func (w *Service) IsValidChain(currentHeader *types.Header, chain []*types.Header) bool { +func (w *Service) IsValidChain(currentHeader *types.Header, chain []*types.Header) (bool, error) { // Check if we have checkpoints to validate incoming chain in memory if len(w.checkpointWhitelist) == 0 { // We don't have any entries, no additional validation will be possible - return true + return true, nil } // Return if we've received empty chain if len(chain) == 0 { - return false + return false, nil } var ( @@ -95,7 +96,7 @@ func (w *Service) IsValidChain(currentHeader *types.Header, chain []*types.Heade if chain[len(chain)-1].Number.Uint64() < oldestCheckpointNumber { // We have future whitelisted entries, so no additional validation will be possible // This case will occur when bor is in middle of sync, but heimdall is ahead/fully synced. - return true + return true, nil } // Split the chain into past and future chain @@ -109,18 +110,18 @@ func (w *Service) IsValidChain(currentHeader *types.Header, chain []*types.Heade // Don't accept future chain of unacceptable length (from current block) if len(futureChain)+offset > int(w.checkpointInterval) { - return false + return false, ErrLongFutureChain } // Iterate over the chain and validate against the last checkpoint // It will handle all cases where the incoming chain has atleast one checkpoint for i := len(pastChain) - 1; i >= 0; i-- { if _, ok := w.checkpointWhitelist[pastChain[i].Number.Uint64()]; ok { - return pastChain[i].Hash() == w.checkpointWhitelist[pastChain[i].Number.Uint64()] + return pastChain[i].Hash() == w.checkpointWhitelist[pastChain[i].Number.Uint64()], nil } } - return true + return true, nil } func splitChain(current uint64, chain []*types.Header) ([]*types.Header, []*types.Header) { diff --git a/eth/downloader/whitelist/service_test.go b/eth/downloader/whitelist/service_test.go index c21490d125..df23df2fc9 100644 --- a/eth/downloader/whitelist/service_test.go +++ b/eth/downloader/whitelist/service_test.go @@ -119,8 +119,9 @@ func TestIsValidChain(t *testing.T) { s := NewMockService(10, 10) chainA := createMockChain(1, 20) // A1->A2...A19->A20 // case1: no checkpoint whitelist, should consider the chain as valid - res := s.IsValidChain(nil, chainA) + res, err := s.IsValidChain(nil, chainA) require.Equal(t, res, true, "expected chain to be valid") + require.Equal(t, err, nil, "expected error to be nil") tempChain := createMockChain(21, 22) // A21->A22 @@ -132,8 +133,9 @@ func TestIsValidChain(t *testing.T) { // case2: We're behind the oldest whitelisted block entry, should consider // the chain as valid as we're still far behind the latest blocks - res = s.IsValidChain(chainA[len(chainA)-1], chainA) + res, err = s.IsValidChain(chainA[len(chainA)-1], chainA) require.Equal(t, res, true, "expected chain to be valid") + require.Equal(t, err, nil, "expected error to be nil") // Clear checkpoint whitelist and add blocks A5 and A15 in whitelist s.PurgeCheckpointWhitelist() @@ -144,8 +146,9 @@ func TestIsValidChain(t *testing.T) { // case3: Try importing a past chain having valid checkpoint, should // consider the chain as valid - res = s.IsValidChain(chainA[len(chainA)-1], chainA) + res, err = s.IsValidChain(chainA[len(chainA)-1], chainA) require.Equal(t, res, true, "expected chain to be valid") + require.Equal(t, err, nil, "expected error to be nil") // Clear checkpoint whitelist and mock blocks in whitelist tempChain = createMockChain(20, 20) // A20 @@ -156,22 +159,25 @@ func TestIsValidChain(t *testing.T) { require.Equal(t, s.length(), 1, "expected 1 items in whitelist") // case4: Try importing a past chain having invalid checkpoint - res = s.IsValidChain(chainA[len(chainA)-1], chainA) + res, _ = s.IsValidChain(chainA[len(chainA)-1], chainA) require.Equal(t, res, false, "expected chain to be invalid") + // Not checking error here because we return nil in case of checkpoint mismatch // create a future chain to be imported of length <= `checkpointInterval` chainB := createMockChain(21, 30) // B21->B22...B29->B30 // case5: Try importing a future chain of acceptable length - res = s.IsValidChain(chainA[len(chainA)-1], chainB) + res, err = s.IsValidChain(chainA[len(chainA)-1], chainB) require.Equal(t, res, true, "expected chain to be valid") + require.Equal(t, err, nil, "expected error to be nil") // create a future chain to be imported of length > `checkpointInterval` chainB = createMockChain(21, 40) // C21->C22...C39->C40 // case5: Try importing a future chain of unacceptable length - res = s.IsValidChain(chainA[len(chainA)-1], chainB) + res, err = s.IsValidChain(chainA[len(chainA)-1], chainB) require.Equal(t, res, false, "expected chain to be invalid") + require.Equal(t, err, ErrLongFutureChain, "expected error") } func TestSplitChain(t *testing.T) { diff --git a/interfaces.go b/interfaces.go index ff6d80b1ec..88a173adea 100644 --- a/interfaces.go +++ b/interfaces.go @@ -242,7 +242,7 @@ type StateSyncFilter struct { // interface for whitelist service type ChainValidator interface { IsValidPeer(remoteHeader *types.Header, fetchHeadersByNumber func(number uint64, amount int, skip int, reverse bool) ([]*types.Header, []common.Hash, error)) (bool, error) - IsValidChain(currentHeader *types.Header, chain []*types.Header) bool + IsValidChain(currentHeader *types.Header, chain []*types.Header) (bool, error) ProcessCheckpoint(endBlockNum uint64, endBlockHash common.Hash) GetCheckpointWhitelist() map[uint64]common.Hash PurgeCheckpointWhitelist() diff --git a/internal/cli/command.go b/internal/cli/command.go index 93dca4cb3e..95f7776df6 100644 --- a/internal/cli/command.go +++ b/internal/cli/command.go @@ -189,6 +189,16 @@ func Commands() map[string]MarkDownCommandFactory { Meta2: meta2, }, nil }, + "snapshot": func() (MarkDownCommand, error) { + return &SnapshotCommand{ + UI: ui, + }, nil + }, + "snapshot prune-state": func() (MarkDownCommand, error) { + return &PruneStateCommand{ + Meta: meta, + }, nil + }, } } @@ -248,7 +258,7 @@ func (m *Meta) NewFlagSet(n string) *flagset.Flagset { f.StringFlag(&flagset.StringFlag{ Name: "keystore", Value: &m.keyStoreDir, - Usage: "Path of the data directory to store information", + Usage: "Path of the data directory to store keys", }) return f diff --git a/internal/cli/dumpconfig.go b/internal/cli/dumpconfig.go index dad0be923d..a748af3357 100644 --- a/internal/cli/dumpconfig.go +++ b/internal/cli/dumpconfig.go @@ -52,12 +52,16 @@ func (c *DumpconfigCommand) Run(args []string) int { userConfig := command.GetConfig() // convert the big.Int and time.Duration fields to their corresponding Raw fields + userConfig.JsonRPC.HttpTimeout.ReadTimeoutRaw = userConfig.JsonRPC.HttpTimeout.ReadTimeout.String() + userConfig.JsonRPC.HttpTimeout.WriteTimeoutRaw = userConfig.JsonRPC.HttpTimeout.WriteTimeout.String() + userConfig.JsonRPC.HttpTimeout.IdleTimeoutRaw = userConfig.JsonRPC.HttpTimeout.IdleTimeout.String() userConfig.TxPool.RejournalRaw = userConfig.TxPool.Rejournal.String() userConfig.TxPool.LifeTimeRaw = userConfig.TxPool.LifeTime.String() userConfig.Sealer.GasPriceRaw = userConfig.Sealer.GasPrice.String() userConfig.Gpo.MaxPriceRaw = userConfig.Gpo.MaxPrice.String() userConfig.Gpo.IgnorePriceRaw = userConfig.Gpo.IgnorePrice.String() userConfig.Cache.RejournalRaw = userConfig.Cache.Rejournal.String() + userConfig.Cache.TrieTimeoutRaw = userConfig.Cache.TrieTimeout.String() if err := toml.NewEncoder(os.Stdout).Encode(userConfig); err != nil { c.UI.Error(err.Error()) diff --git a/internal/cli/flagset/flagset.go b/internal/cli/flagset/flagset.go index 933fe59060..74249df395 100644 --- a/internal/cli/flagset/flagset.go +++ b/internal/cli/flagset/flagset.go @@ -24,9 +24,10 @@ func NewFlagSet(name string) *Flagset { } type FlagVar struct { - Name string - Usage string - Group string + Name string + Usage string + Group string + Default any } func (f *Flagset) addFlag(fl *FlagVar) { @@ -38,7 +39,11 @@ func (f *Flagset) Help() string { items := []string{} for _, item := range f.flags { - items = append(items, fmt.Sprintf(" -%s\n %s", item.Name, item.Usage)) + if item.Default != nil { + items = append(items, fmt.Sprintf(" -%s\n %s (default: %v)", item.Name, item.Usage, item.Default)) + } else { + items = append(items, fmt.Sprintf(" -%s\n %s", item.Name, item.Usage)) + } } return str + strings.Join(items, "\n\n") @@ -85,7 +90,11 @@ func (f *Flagset) MarkDown() string { } for _, item := range groups[k] { - items = append(items, fmt.Sprintf("- ```%s```: %s", item.Name, item.Usage)) + if item.Default != nil { + items = append(items, fmt.Sprintf("- ```%s```: %s (default: %v)", item.Name, item.Usage, item.Default)) + } else { + items = append(items, fmt.Sprintf("- ```%s```: %s", item.Name, item.Usage)) + } } } @@ -110,27 +119,39 @@ type BoolFlag struct { func (f *Flagset) BoolFlag(b *BoolFlag) { f.addFlag(&FlagVar{ - Name: b.Name, - Usage: b.Usage, - Group: b.Group, + Name: b.Name, + Usage: b.Usage, + Group: b.Group, + Default: b.Default, }) f.set.BoolVar(b.Value, b.Name, b.Default, b.Usage) } type StringFlag struct { - Name string - Usage string - Default string - Value *string - Group string + Name string + Usage string + Default string + Value *string + Group string + HideDefaultFromDoc bool } func (f *Flagset) StringFlag(b *StringFlag) { - f.addFlag(&FlagVar{ - Name: b.Name, - Usage: b.Usage, - Group: b.Group, - }) + if b.Default == "" || b.HideDefaultFromDoc { + f.addFlag(&FlagVar{ + Name: b.Name, + Usage: b.Usage, + Group: b.Group, + Default: nil, + }) + } else { + f.addFlag(&FlagVar{ + Name: b.Name, + Usage: b.Usage, + Group: b.Group, + Default: b.Default, + }) + } f.set.StringVar(b.Value, b.Name, b.Default, b.Usage) } @@ -144,9 +165,10 @@ type IntFlag struct { func (f *Flagset) IntFlag(i *IntFlag) { f.addFlag(&FlagVar{ - Name: i.Name, - Usage: i.Usage, - Group: i.Group, + Name: i.Name, + Usage: i.Usage, + Group: i.Group, + Default: i.Default, }) f.set.IntVar(i.Value, i.Name, i.Default, i.Usage) } @@ -161,18 +183,20 @@ type Uint64Flag struct { func (f *Flagset) Uint64Flag(i *Uint64Flag) { f.addFlag(&FlagVar{ - Name: i.Name, - Usage: i.Usage, - Group: i.Group, + Name: i.Name, + Usage: i.Usage, + Group: i.Group, + Default: fmt.Sprintf("%d", i.Default), }) f.set.Uint64Var(i.Value, i.Name, i.Default, i.Usage) } type BigIntFlag struct { - Name string - Usage string - Value *big.Int - Group string + Name string + Usage string + Value *big.Int + Group string + Default *big.Int } func (b *BigIntFlag) String() string { @@ -204,9 +228,10 @@ func (b *BigIntFlag) Set(value string) error { func (f *Flagset) BigIntFlag(b *BigIntFlag) { f.addFlag(&FlagVar{ - Name: b.Name, - Usage: b.Usage, - Group: b.Group, + Name: b.Name, + Usage: b.Usage, + Group: b.Group, + Default: b.Default, }) f.set.Var(b, b.Name, b.Usage) } @@ -247,11 +272,21 @@ func (i *SliceStringFlag) Set(value string) error { } func (f *Flagset) SliceStringFlag(s *SliceStringFlag) { - f.addFlag(&FlagVar{ - Name: s.Name, - Usage: s.Usage, - Group: s.Group, - }) + if s.Default == nil || len(s.Default) == 0 { + f.addFlag(&FlagVar{ + Name: s.Name, + Usage: s.Usage, + Group: s.Group, + Default: nil, + }) + } else { + f.addFlag(&FlagVar{ + Name: s.Name, + Usage: s.Usage, + Group: s.Group, + Default: strings.Join(s.Default, ","), + }) + } f.set.Var(s, s.Name, s.Usage) } @@ -265,33 +300,39 @@ type DurationFlag struct { func (f *Flagset) DurationFlag(d *DurationFlag) { f.addFlag(&FlagVar{ - Name: d.Name, - Usage: d.Usage, - Group: d.Group, + Name: d.Name, + Usage: d.Usage, + Group: d.Group, + Default: d.Default, }) f.set.DurationVar(d.Value, d.Name, d.Default, "") } type MapStringFlag struct { - Name string - Usage string - Value *map[string]string - Group string + Name string + Usage string + Value *map[string]string + Group string + Default map[string]string } -func (m *MapStringFlag) String() string { - if m.Value == nil { +func formatMapString(m map[string]string) string { + if len(m) == 0 { return "" } ls := []string{} - for k, v := range *m.Value { + for k, v := range m { ls = append(ls, k+"="+v) } return strings.Join(ls, ",") } +func (m *MapStringFlag) String() string { + return formatMapString(*m.Value) +} + func (m *MapStringFlag) Set(value string) error { if m.Value == nil { m.Value = &map[string]string{} @@ -311,11 +352,21 @@ func (m *MapStringFlag) Set(value string) error { } func (f *Flagset) MapStringFlag(m *MapStringFlag) { - f.addFlag(&FlagVar{ - Name: m.Name, - Usage: m.Usage, - Group: m.Group, - }) + if m.Default == nil || len(m.Default) == 0 { + f.addFlag(&FlagVar{ + Name: m.Name, + Usage: m.Usage, + Group: m.Group, + Default: nil, + }) + } else { + f.addFlag(&FlagVar{ + Name: m.Name, + Usage: m.Usage, + Group: m.Group, + Default: formatMapString(m.Default), + }) + } f.set.Var(m, m.Name, m.Usage) } @@ -329,9 +380,10 @@ type Float64Flag struct { func (f *Flagset) Float64Flag(i *Float64Flag) { f.addFlag(&FlagVar{ - Name: i.Name, - Usage: i.Usage, - Group: i.Group, + Name: i.Name, + Usage: i.Usage, + Group: i.Group, + Default: i.Default, }) f.set.Float64Var(i.Value, i.Name, i.Default, "") } diff --git a/internal/cli/removedb.go b/internal/cli/removedb.go index 4a604086ed..224dae95d5 100644 --- a/internal/cli/removedb.go +++ b/internal/cli/removedb.go @@ -24,9 +24,10 @@ type RemoveDBCommand struct { } const ( - chaindataPath string = "chaindata" - ancientPath string = "ancient" - lightchaindataPath string = "lightchaindata" + chaindataPath string = "chaindata" + ancientPath string = "ancient" + trieCacheJournalPath string = "triecache" + lightchaindataPath string = "lightchaindata" ) // MarkDown implements cli.MarkDown interface diff --git a/internal/cli/server/chains/mainnet.go b/internal/cli/server/chains/mainnet.go index 7aee9cd606..b2570e9b2f 100644 --- a/internal/cli/server/chains/mainnet.go +++ b/internal/cli/server/chains/mainnet.go @@ -30,14 +30,17 @@ var mainnetBor = &Chain{ LondonBlock: big.NewInt(23850000), Bor: ¶ms.BorConfig{ JaipurBlock: big.NewInt(23850000), + DelhiBlock: big.NewInt(38189056), Period: map[string]uint64{ "0": 2, }, ProducerDelay: map[string]uint64{ - "0": 6, + "0": 6, + "38189056": 4, }, Sprint: map[string]uint64{ - "0": 64, + "0": 64, + "38189056": 16, }, BackupMultiplier: map[string]uint64{ "0": 2, diff --git a/internal/cli/server/config.go b/internal/cli/server/config.go index 1a526d39ce..c0543dcb88 100644 --- a/internal/cli/server/config.go +++ b/internal/cli/server/config.go @@ -34,6 +34,7 @@ import ( "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rpc" ) type Config struct { @@ -54,6 +55,9 @@ type Config struct { // DataDir is the directory to store the state in DataDir string `hcl:"datadir,optional" toml:"datadir,optional"` + // Ancient is the directory to store the state in + Ancient string `hcl:"ancient,optional" toml:"ancient,optional"` + // KeyStoreDir is the directory to store keystores KeyStoreDir string `hcl:"keystore,optional" toml:"keystore,optional"` @@ -245,6 +249,8 @@ type JsonRPCConfig struct { // Graphql has the json-rpc graphql related settings Graphql *APIConfig `hcl:"graphql,block" toml:"graphql,block"` + + HttpTimeout *HttpTimeouts `hcl:"timeouts,block" toml:"timeouts,block"` } type GRPCConfig struct { @@ -278,6 +284,33 @@ type APIConfig struct { Origins []string `hcl:"origins,optional" toml:"origins,optional"` } +// Used from rpc.HTTPTimeouts +type HttpTimeouts struct { + // ReadTimeout is the maximum duration for reading the entire + // request, including the body. + // + // Because ReadTimeout does not let Handlers make per-request + // decisions on each request body's acceptable deadline or + // upload rate, most users will prefer to use + // ReadHeaderTimeout. It is valid to use them both. + ReadTimeout time.Duration `hcl:"-,optional" toml:"-"` + ReadTimeoutRaw string `hcl:"read,optional" toml:"read,optional"` + + // WriteTimeout is the maximum duration before timing out + // writes of the response. It is reset whenever a new + // request's header is read. Like ReadTimeout, it does not + // let Handlers make decisions on a per-request basis. + WriteTimeout time.Duration `hcl:"-,optional" toml:"-"` + WriteTimeoutRaw string `hcl:"write,optional" toml:"write,optional"` + + // IdleTimeout is the maximum amount of time to wait for the + // next request when keep-alives are enabled. If IdleTimeout + // is zero, the value of ReadTimeout is used. If both are + // zero, ReadHeaderTimeout is used. + IdleTimeout time.Duration `hcl:"-,optional" toml:"-"` + IdleTimeoutRaw string `hcl:"idle,optional" toml:"idle,optional"` +} + type GpoConfig struct { // Blocks is the number of blocks to track to compute the price oracle Blocks uint64 `hcl:"blocks,optional" toml:"blocks,optional"` @@ -377,6 +410,9 @@ type CacheConfig struct { // Number of block states to keep in memory (default = 128) TriesInMemory uint64 `hcl:"triesinmemory,optional" toml:"triesinmemory,optional"` + // Time after which the Merkle Patricia Trie is stored to disc from memory + TrieTimeout time.Duration `hcl:"-,optional" toml:"-"` + TrieTimeoutRaw string `hcl:"timeout,optional" toml:"timeout,optional"` } type AccountsConfig struct { @@ -411,6 +447,7 @@ func DefaultConfig() *Config { RequiredBlocks: map[string]string{}, LogLevel: "INFO", DataDir: DefaultDataDir(), + Ancient: "", P2P: &P2PConfig{ MaxPeers: 50, MaxPendPeers: 50, @@ -442,7 +479,7 @@ func DefaultConfig() *Config { NoLocals: false, Journal: "transactions.rlp", Rejournal: 1 * time.Hour, - PriceLimit: 1, + PriceLimit: 1, // geth's default PriceBump: 10, AccountSlots: 16, GlobalSlots: 32768, @@ -453,8 +490,8 @@ func DefaultConfig() *Config { Sealer: &SealerConfig{ Enabled: false, Etherbase: "", - GasCeil: 30_000_000, - GasPrice: big.NewInt(1 * params.GWei), + GasCeil: 30_000_000, // geth's default + GasPrice: big.NewInt(1 * params.GWei), // geth's default ExtraData: "", }, Gpo: &GpoConfig{ @@ -490,6 +527,11 @@ func DefaultConfig() *Config { Cors: []string{"localhost"}, VHost: []string{"localhost"}, }, + HttpTimeout: &HttpTimeouts{ + ReadTimeout: 30 * time.Second, + WriteTimeout: 30 * time.Second, + IdleTimeout: 120 * time.Second, + }, }, Ethstats: "", Telemetry: &TelemetryConfig{ @@ -511,7 +553,7 @@ func DefaultConfig() *Config { }, }, Cache: &CacheConfig{ - Cache: 1024, + Cache: 1024, // geth's default (suitable for mumbai) PercDatabase: 50, PercTrie: 15, PercGc: 25, @@ -522,6 +564,7 @@ func DefaultConfig() *Config { Preimages: false, TxLookupLimit: 2350000, TriesInMemory: 128, + TrieTimeout: 60 * time.Minute, }, Accounts: &AccountsConfig{ Unlock: []string{}, @@ -581,9 +624,13 @@ func (c *Config) fillTimeDurations() error { td *time.Duration str *string }{ + {"jsonrpc.timeouts.read", &c.JsonRPC.HttpTimeout.ReadTimeout, &c.JsonRPC.HttpTimeout.ReadTimeoutRaw}, + {"jsonrpc.timeouts.write", &c.JsonRPC.HttpTimeout.WriteTimeout, &c.JsonRPC.HttpTimeout.WriteTimeoutRaw}, + {"jsonrpc.timeouts.idle", &c.JsonRPC.HttpTimeout.IdleTimeout, &c.JsonRPC.HttpTimeout.IdleTimeoutRaw}, {"txpool.lifetime", &c.TxPool.LifeTime, &c.TxPool.LifeTimeRaw}, {"txpool.rejournal", &c.TxPool.Rejournal, &c.TxPool.RejournalRaw}, {"cache.rejournal", &c.Cache.Rejournal, &c.Cache.RejournalRaw}, + {"cache.timeout", &c.Cache.TrieTimeout, &c.Cache.TrieTimeoutRaw}, } for _, x := range tds { @@ -641,19 +688,12 @@ func (c *Config) loadChain() error { c.P2P.Discovery.DNS = c.chain.DNS } - // depending on the chain we have different cache values - if c.Chain == "mainnet" { - c.Cache.Cache = 4096 - } else { - c.Cache.Cache = 1024 - } - return nil } //nolint:gocognit func (c *Config) buildEth(stack *node.Node, accountManager *accounts.Manager) (*ethconfig.Config, error) { - dbHandles, err := makeDatabaseHandles() + dbHandles, err := MakeDatabaseHandles() if err != nil { return nil, err } @@ -848,6 +888,7 @@ func (c *Config) buildEth(stack *node.Node, accountManager *accounts.Manager) (* n.NoPrefetch = c.Cache.NoPrefetch n.Preimages = c.Cache.Preimages n.TxLookupLimit = c.Cache.TxLookupLimit + n.TrieTimeout = c.Cache.TrieTimeout } n.RPCGasCap = c.JsonRPC.GasCap @@ -901,6 +942,10 @@ func (c *Config) buildEth(stack *node.Node, accountManager *accounts.Manager) (* n.BorLogs = c.BorLogs n.DatabaseHandles = dbHandles + if c.Ancient != "" { + n.DatabaseFreezer = c.Ancient + } + return &n, nil } @@ -1011,6 +1056,11 @@ func (c *Config) buildNode() (*node.Config, error) { WSPathPrefix: c.JsonRPC.Ws.Prefix, GraphQLCors: c.JsonRPC.Graphql.Cors, GraphQLVirtualHosts: c.JsonRPC.Graphql.VHost, + HTTPTimeouts: rpc.HTTPTimeouts{ + ReadTimeout: c.JsonRPC.HttpTimeout.ReadTimeout, + WriteTimeout: c.JsonRPC.HttpTimeout.WriteTimeout, + IdleTimeout: c.JsonRPC.HttpTimeout.IdleTimeout, + }, } // dev mode @@ -1100,7 +1150,7 @@ func (c *Config) Merge(cc ...*Config) error { return nil } -func makeDatabaseHandles() (int, error) { +func MakeDatabaseHandles() (int, error) { limit, err := fdlimit.Maximum() if err != nil { return -1, err diff --git a/internal/cli/server/config_test.go b/internal/cli/server/config_test.go index 752afc495b..3e6bb76b59 100644 --- a/internal/cli/server/config_test.go +++ b/internal/cli/server/config_test.go @@ -1,7 +1,6 @@ package server import ( - "math/big" "testing" "time" @@ -101,42 +100,6 @@ func TestDefaultDatatypeOverride(t *testing.T) { assert.Equal(t, c0, expected) } -func TestConfigLoadFile(t *testing.T) { - t.Parallel() - - readFile := func(path string) { - config, err := readConfigFile(path) - assert.NoError(t, err) - - assert.Equal(t, config, &Config{ - DataDir: "./data", - P2P: &P2PConfig{ - MaxPeers: 30, - }, - TxPool: &TxPoolConfig{ - LifeTime: 1 * time.Second, - }, - Gpo: &GpoConfig{ - MaxPrice: big.NewInt(100), - }, - Sealer: &SealerConfig{}, - Cache: &CacheConfig{}, - }) - } - - // read file in hcl format - t.Run("hcl", func(t *testing.T) { - t.Parallel() - readFile("./testdata/test.hcl") - }) - - // read file in json format - t.Run("json", func(t *testing.T) { - t.Parallel() - readFile("./testdata/test.json") - }) -} - var dummyEnodeAddr = "enode://0cb82b395094ee4a2915e9714894627de9ed8498fb881cec6db7c65e8b9a5bd7f2f25cc84e71e89d0947e51c76e85d0847de848c7782b13c0255247a6758178c@44.232.55.71:30303" func TestConfigBootnodesDefault(t *testing.T) { diff --git a/internal/cli/server/flags.go b/internal/cli/server/flags.go index 9fb8492ff7..e52077da97 100644 --- a/internal/cli/server/flags.go +++ b/internal/cli/server/flags.go @@ -11,15 +11,16 @@ func (c *Command) Flags() *flagset.Flagset { f.StringFlag(&flagset.StringFlag{ Name: "chain", - Usage: "Name of the chain to sync", + Usage: "Name of the chain to sync ('mumbai', 'mainnet') or path to a genesis file", Value: &c.cliConfig.Chain, Default: c.cliConfig.Chain, }) f.StringFlag(&flagset.StringFlag{ - Name: "identity", - Usage: "Name/Identity of the node", - Value: &c.cliConfig.Identity, - Default: c.cliConfig.Identity, + Name: "identity", + Usage: "Name/Identity of the node", + Value: &c.cliConfig.Identity, + Default: c.cliConfig.Identity, + HideDefaultFromDoc: true, }) f.StringFlag(&flagset.StringFlag{ Name: "log-level", @@ -28,14 +29,21 @@ func (c *Command) Flags() *flagset.Flagset { Default: c.cliConfig.LogLevel, }) f.StringFlag(&flagset.StringFlag{ - Name: "datadir", - Usage: "Path of the data directory to store information", - Value: &c.cliConfig.DataDir, - Default: c.cliConfig.DataDir, + Name: "datadir", + Usage: "Path of the data directory to store information", + Value: &c.cliConfig.DataDir, + Default: c.cliConfig.DataDir, + HideDefaultFromDoc: true, + }) + f.StringFlag(&flagset.StringFlag{ + Name: "datadir.ancient", + Usage: "Data directory for ancient chain segments (default = inside chaindata)", + Value: &c.cliConfig.Ancient, + Default: c.cliConfig.Ancient, }) f.StringFlag(&flagset.StringFlag{ Name: "keystore", - Usage: "Path of the directory to store keystores", + Usage: "Path of the directory where keystores are located", Value: &c.cliConfig.KeyStoreDir, }) f.StringFlag(&flagset.StringFlag{ @@ -56,19 +64,20 @@ func (c *Command) Flags() *flagset.Flagset { Default: c.cliConfig.GcMode, }) f.MapStringFlag(&flagset.MapStringFlag{ - Name: "eth.requiredblocks", - Usage: "Comma separated block number-to-hash mappings to require for peering (=)", - Value: &c.cliConfig.RequiredBlocks, + Name: "eth.requiredblocks", + Usage: "Comma separated block number-to-hash mappings to require for peering (=)", + Value: &c.cliConfig.RequiredBlocks, + Default: c.cliConfig.RequiredBlocks, }) f.BoolFlag(&flagset.BoolFlag{ Name: "snapshot", - Usage: `Enables the snapshot-database mode (default = true)`, + Usage: `Enables the snapshot-database mode`, Value: &c.cliConfig.Snapshot, Default: c.cliConfig.Snapshot, }) f.BoolFlag(&flagset.BoolFlag{ Name: "bor.logs", - Usage: `Enables bor log retrieval (default = false)`, + Usage: `Enables bor log retrieval`, Value: &c.cliConfig.BorLogs, Default: c.cliConfig.BorLogs, }) @@ -194,7 +203,7 @@ func (c *Command) Flags() *flagset.Flagset { }) f.StringFlag(&flagset.StringFlag{ Name: "miner.etherbase", - Usage: "Public address for block mining rewards (default = first account)", + Usage: "Public address for block mining rewards", Value: &c.cliConfig.Sealer.Etherbase, Default: c.cliConfig.Sealer.Etherbase, Group: "Sealer", @@ -208,16 +217,17 @@ func (c *Command) Flags() *flagset.Flagset { }) f.Uint64Flag(&flagset.Uint64Flag{ Name: "miner.gaslimit", - Usage: "Target gas ceiling for mined blocks", + Usage: "Target gas ceiling (gas limit) for mined blocks", Value: &c.cliConfig.Sealer.GasCeil, Default: c.cliConfig.Sealer.GasCeil, Group: "Sealer", }) f.BigIntFlag(&flagset.BigIntFlag{ - Name: "miner.gasprice", - Usage: "Minimum gas price for mining a transaction", - Value: c.cliConfig.Sealer.GasPrice, - Group: "Sealer", + Name: "miner.gasprice", + Usage: "Minimum gas price for mining a transaction", + Value: c.cliConfig.Sealer.GasPrice, + Group: "Sealer", + Default: c.cliConfig.Sealer.GasPrice, }) // ethstats @@ -242,20 +252,22 @@ func (c *Command) Flags() *flagset.Flagset { Default: c.cliConfig.Gpo.Percentile, }) f.BigIntFlag(&flagset.BigIntFlag{ - Name: "gpo.maxprice", - Usage: "Maximum gas price will be recommended by gpo", - Value: c.cliConfig.Gpo.MaxPrice, + Name: "gpo.maxprice", + Usage: "Maximum gas price will be recommended by gpo", + Value: c.cliConfig.Gpo.MaxPrice, + Default: c.cliConfig.Gpo.MaxPrice, }) f.BigIntFlag(&flagset.BigIntFlag{ - Name: "gpo.ignoreprice", - Usage: "Gas price below which gpo will ignore transactions", - Value: c.cliConfig.Gpo.IgnorePrice, + Name: "gpo.ignoreprice", + Usage: "Gas price below which gpo will ignore transactions", + Value: c.cliConfig.Gpo.IgnorePrice, + Default: c.cliConfig.Gpo.IgnorePrice, }) // cache options f.Uint64Flag(&flagset.Uint64Flag{ Name: "cache", - Usage: "Megabytes of memory allocated to internal caching (default = 4096 mainnet full node)", + Usage: "Megabytes of memory allocated to internal caching", Value: &c.cliConfig.Cache.Cache, Default: c.cliConfig.Cache.Cache, Group: "Cache", @@ -269,7 +281,7 @@ func (c *Command) Flags() *flagset.Flagset { }) f.Uint64Flag(&flagset.Uint64Flag{ Name: "cache.trie", - Usage: "Percentage of cache memory allowance to use for trie caching (default = 15% full mode, 30% archive mode)", + Usage: "Percentage of cache memory allowance to use for trie caching", Value: &c.cliConfig.Cache.PercTrie, Default: c.cliConfig.Cache.PercTrie, Group: "Cache", @@ -290,14 +302,14 @@ func (c *Command) Flags() *flagset.Flagset { }) f.Uint64Flag(&flagset.Uint64Flag{ Name: "cache.gc", - Usage: "Percentage of cache memory allowance to use for trie pruning (default = 25% full mode, 0% archive mode)", + Usage: "Percentage of cache memory allowance to use for trie pruning", Value: &c.cliConfig.Cache.PercGc, Default: c.cliConfig.Cache.PercGc, Group: "Cache", }) f.Uint64Flag(&flagset.Uint64Flag{ Name: "cache.snapshot", - Usage: "Percentage of cache memory allowance to use for snapshot caching (default = 10% full mode, 20% archive mode)", + Usage: "Percentage of cache memory allowance to use for snapshot caching", Value: &c.cliConfig.Cache.PercSnapshot, Default: c.cliConfig.Cache.PercSnapshot, Group: "Cache", @@ -325,7 +337,7 @@ func (c *Command) Flags() *flagset.Flagset { }) f.Uint64Flag(&flagset.Uint64Flag{ Name: "txlookuplimit", - Usage: "Number of recent blocks to maintain transactions index for (default = about 56 days, 0 = entire chain)", + Usage: "Number of recent blocks to maintain transactions index for", Value: &c.cliConfig.Cache.TxLookupLimit, Default: c.cliConfig.Cache.TxLookupLimit, Group: "Cache", @@ -510,7 +522,7 @@ func (c *Command) Flags() *flagset.Flagset { }) f.Uint64Flag(&flagset.Uint64Flag{ Name: "maxpendpeers", - Usage: "Maximum number of pending connection attempts (defaults used if set to 0)", + Usage: "Maximum number of pending connection attempts", Value: &c.cliConfig.P2P.MaxPendPeers, Default: c.cliConfig.P2P.MaxPendPeers, Group: "P2P", @@ -588,10 +600,11 @@ func (c *Command) Flags() *flagset.Flagset { Group: "Telemetry", }) f.MapStringFlag(&flagset.MapStringFlag{ - Name: "metrics.influxdb.tags", - Usage: "Comma-separated InfluxDB tags (key/values) attached to all measurements", - Value: &c.cliConfig.Telemetry.InfluxDB.Tags, - Group: "Telemetry", + Name: "metrics.influxdb.tags", + Usage: "Comma-separated InfluxDB tags (key/values) attached to all measurements", + Value: &c.cliConfig.Telemetry.InfluxDB.Tags, + Group: "Telemetry", + Default: c.cliConfig.Telemetry.InfluxDB.Tags, }) f.StringFlag(&flagset.StringFlag{ Name: "metrics.prometheus-addr", diff --git a/internal/cli/server/server.go b/internal/cli/server/server.go index 8d68fd69f0..f0cea4de06 100644 --- a/internal/cli/server/server.go +++ b/internal/cli/server/server.go @@ -259,7 +259,13 @@ func (s *Server) Stop() { func (s *Server) setupMetrics(config *TelemetryConfig, serviceName string) error { // Check the global metrics if they're matching with the provided config if metrics.Enabled != config.Enabled || metrics.EnabledExpensive != config.Expensive { - log.Warn("Metric misconfiguration, some of them might not be visible") + log.Warn( + "Metric misconfiguration, some of them might not be visible", + "metrics", metrics.Enabled, + "config.metrics", config.Enabled, + "expensive", metrics.EnabledExpensive, + "config.expensive", config.Expensive, + ) } // Update the values anyways (for services which don't need immediate attention) diff --git a/internal/cli/server/testdata/test.hcl b/internal/cli/server/testdata/test.hcl deleted file mode 100644 index 44138970fc..0000000000 --- a/internal/cli/server/testdata/test.hcl +++ /dev/null @@ -1,13 +0,0 @@ -datadir = "./data" - -p2p { - maxpeers = 30 -} - -txpool { - lifetime = "1s" -} - -gpo { - maxprice = "100" -} \ No newline at end of file diff --git a/internal/cli/server/testdata/test.json b/internal/cli/server/testdata/test.json deleted file mode 100644 index a08e5aceb1..0000000000 --- a/internal/cli/server/testdata/test.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "datadir": "./data", - "p2p": { - "maxpeers": 30 - }, - "txpool": { - "lifetime": "1s" - }, - "gpo": { - "maxprice": "100" - } -} \ No newline at end of file diff --git a/internal/cli/snapshot.go b/internal/cli/snapshot.go new file mode 100644 index 0000000000..3c8e4ec97d --- /dev/null +++ b/internal/cli/snapshot.go @@ -0,0 +1,183 @@ +// Snapshot related commands + +package cli + +import ( + "strings" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/state/pruner" + "github.com/ethereum/go-ethereum/internal/cli/flagset" + "github.com/ethereum/go-ethereum/internal/cli/server" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/node" + + "github.com/mitchellh/cli" +) + +// SnapshotCommand is the command to group the snapshot commands +type SnapshotCommand struct { + UI cli.Ui +} + +// MarkDown implements cli.MarkDown interface +func (a *SnapshotCommand) MarkDown() string { + items := []string{ + "# snapshot", + "The ```snapshot``` command groups snapshot related actions:", + "- [```snapshot prune-state```](./snapshot_prune-state.md): Prune state databases at the given datadir location.", + } + + return strings.Join(items, "\n\n") +} + +// Help implements the cli.Command interface +func (c *SnapshotCommand) Help() string { + return `Usage: bor snapshot + + This command groups snapshot related actions. + + Prune the state trie: + + $ bor snapshot prune-state` +} + +// Synopsis implements the cli.Command interface +func (c *SnapshotCommand) Synopsis() string { + return "Snapshot related commands" +} + +// Run implements the cli.Command interface +func (c *SnapshotCommand) Run(args []string) int { + return cli.RunResultHelp +} + +type PruneStateCommand struct { + *Meta + + datadirAncient string + cache uint64 + cacheTrie uint64 + cacheTrieJournal string + bloomfilterSize uint64 +} + +// MarkDown implements cli.MarkDown interface +func (c *PruneStateCommand) MarkDown() string { + items := []string{ + "# Prune state", + "The ```bor snapshot prune-state``` command will prune historical state data with the help of the state snapshot. All trie nodes and contract codes that do not belong to the specified version state will be deleted from the database. After pruning, only two version states are available: genesis and the specific one.", + c.Flags().MarkDown(), + } + + return strings.Join(items, "\n\n") +} + +// Help implements the cli.Command interface +func (c *PruneStateCommand) Help() string { + return `Usage: bor snapshot prune-state + + This command will prune state databases at the given datadir location` + c.Flags().Help() +} + +// Synopsis implements the cli.Command interface +func (c *PruneStateCommand) Synopsis() string { + return "Prune state databases" +} + +// Flags: datadir, datadir.ancient, cache.trie.journal, bloomfilter.size +func (c *PruneStateCommand) Flags() *flagset.Flagset { + flags := c.NewFlagSet("prune-state") + + flags.StringFlag(&flagset.StringFlag{ + Name: "datadir.ancient", + Value: &c.datadirAncient, + Usage: "Path of the ancient data directory to store information", + Default: "", + }) + + flags.Uint64Flag(&flagset.Uint64Flag{ + Name: "cache", + Usage: "Megabytes of memory allocated to internal caching", + Value: &c.cache, + Default: 1024.0, + Group: "Cache", + }) + + flags.Uint64Flag(&flagset.Uint64Flag{ + Name: "cache.trie", + Usage: "Percentage of cache memory allowance to use for trie caching", + Value: &c.cacheTrie, + Default: 25, + Group: "Cache", + }) + + flags.StringFlag(&flagset.StringFlag{ + Name: "cache.trie.journal", + Value: &c.cacheTrieJournal, + Usage: "Path of the trie journal directory to store information", + Default: trieCacheJournalPath, + Group: "Cache", + }) + + flags.Uint64Flag(&flagset.Uint64Flag{ + Name: "bloomfilter.size", + Value: &c.bloomfilterSize, + Usage: "Size of the bloom filter", + Default: 2048, + }) + + return flags +} + +// Run implements the cli.Command interface +func (c *PruneStateCommand) Run(args []string) int { + flags := c.Flags() + + if err := flags.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + datadir := c.dataDir + if datadir == "" { + c.UI.Error("datadir is required") + return 1 + } + + // Create the node + node, err := node.New(&node.Config{ + DataDir: datadir, + }) + + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + + dbHandles, err := server.MakeDatabaseHandles() + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + + chaindb, err := node.OpenDatabaseWithFreezer(chaindataPath, int(c.cache), dbHandles, c.datadirAncient, "", false) + + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + + pruner, err := pruner.NewPruner(chaindb, node.ResolvePath(""), node.ResolvePath(c.cacheTrieJournal), c.bloomfilterSize) + if err != nil { + log.Error("Failed to open snapshot tree", "err", err) + return 1 + } + + if err = pruner.Prune(common.Hash{}); err != nil { + log.Error("Failed to prune state", "err", err) + return 1 + } + + return 0 +} diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index c1584e5867..7df46b1f33 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -820,20 +820,6 @@ func (s *PublicBlockChainAPI) GetHeaderByHash(ctx context.Context, hash common.H return nil } -// getAuthor: returns the author of the Block -func (s *PublicBlockChainAPI) getAuthor(head *types.Header) *common.Address { - // get author using Author() function from: /consensus/clique/clique.go - // In Production: get author using Author() function from: /consensus/bor/bor.go - author, err := s.b.Engine().Author(head) - // make sure we don't send error to the user, return 0x0 instead - if err != nil { - add := common.HexToAddress("0x0000000000000000000000000000000000000000") - return &add - } - // change the coinbase (0x0) with the miner address - return &author -} - // GetBlockByNumber returns the requested canonical block. // - When blockNr is -1 the chain head is returned. // - When blockNr is -2 the pending chain head is returned. @@ -842,7 +828,6 @@ func (s *PublicBlockChainAPI) getAuthor(head *types.Header) *common.Address { func (s *PublicBlockChainAPI) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber, fullTx bool) (map[string]interface{}, error) { block, err := s.b.BlockByNumber(ctx, number) if block != nil && err == nil { - response, err := s.rpcMarshalBlock(ctx, block, true, fullTx) if err == nil && number == rpc.PendingBlockNumber { // Pending blocks need to nil out a few fields @@ -851,12 +836,6 @@ func (s *PublicBlockChainAPI) GetBlockByNumber(ctx context.Context, number rpc.B } } - if err == nil && number != rpc.PendingBlockNumber { - author := s.getAuthor(block.Header()) - - response["miner"] = author - } - // append marshalled bor transaction if err == nil && response != nil { response = s.appendRPCMarshalBorTransaction(ctx, block, response, fullTx) @@ -875,10 +854,6 @@ func (s *PublicBlockChainAPI) GetBlockByHash(ctx context.Context, hash common.Ha response, err := s.rpcMarshalBlock(ctx, block, true, fullTx) // append marshalled bor transaction if err == nil && response != nil { - author := s.getAuthor(block.Header()) - - response["miner"] = author - return s.appendRPCMarshalBorTransaction(ctx, block, response, fullTx), err } return response, err diff --git a/metrics/metrics.go b/metrics/metrics.go index e54bb3e0d2..aabcf8c628 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -52,6 +52,9 @@ func init() { // check for existence of `config` flag if flag == configFlag && i < len(os.Args)-1 { configFile = strings.TrimLeft(os.Args[i+1], "-") // find the value of flag + } else if len(flag) > 6 && flag[:6] == configFlag { + // Checks for `=` separated flag (e.g. config=path) + configFile = strings.TrimLeft(flag[6:], "=") } for _, enabler := range enablerFlags { @@ -99,7 +102,8 @@ func updateMetricsFromConfig(path string) { conf := &CliConfig{} - if _, err := toml.Decode(tomlData, &conf); err != nil || conf == nil { + _, err = toml.Decode(tomlData, &conf) + if err != nil || conf == nil || conf.Telemetry == nil { return } diff --git a/packaging/deb/README.md b/packaging/deb/README.md new file mode 100644 index 0000000000..7e84275f38 --- /dev/null +++ b/packaging/deb/README.md @@ -0,0 +1,23 @@ +# Debian + + + +For debian packages you will need to add the following layouts during the build + + + +bor/ + DEBIAN/control + DEBIAN/postinst + usr/local/bin/bor + lib/systemd/system/bor.service + +This will be wrapped during the build package process building + + +Note this is still a work in progress: + +TODO: removal/purge on removal using dpkg + cleanup of control files to list what we want + copyright inclusuion + diff --git a/packaging/deb/bor/DEBIAN/changelog b/packaging/deb/bor/DEBIAN/changelog new file mode 100644 index 0000000000..e69de29bb2 diff --git a/packaging/deb/bor/DEBIAN/control b/packaging/deb/bor/DEBIAN/control new file mode 100644 index 0000000000..e69de29bb2 diff --git a/packaging/deb/bor/DEBIAN/postinst b/packaging/deb/bor/DEBIAN/postinst new file mode 100755 index 0000000000..e23f4d6897 --- /dev/null +++ b/packaging/deb/bor/DEBIAN/postinst @@ -0,0 +1,4 @@ +#!/bin/bash +# This is a postinstallation script so the service can be configured and started when requested +# +sudo adduser --disabled-password --disabled-login --shell /usr/sbin/nologin --quiet --system --no-create-home --home /nonexistent bor diff --git a/packaging/deb/bor/DEBIAN/postrm b/packaging/deb/bor/DEBIAN/postrm new file mode 100755 index 0000000000..7602789a01 --- /dev/null +++ b/packaging/deb/bor/DEBIAN/postrm @@ -0,0 +1,6 @@ +#!/bin/bash +# +############### +# Remove bor installs +############## +sudo rm -rf /usr/bin/bor diff --git a/packaging/deb/bor/DEBIAN/prerm b/packaging/deb/bor/DEBIAN/prerm new file mode 100755 index 0000000000..e40aed2c80 --- /dev/null +++ b/packaging/deb/bor/DEBIAN/prerm @@ -0,0 +1,9 @@ +#!/bin/bash +# +# +############## +# Stop bor before removal +############## +#sudo systemctl stop bor.service +############# + diff --git a/packaging/requirements/README.md b/packaging/requirements/README.md new file mode 100644 index 0000000000..48cdce8528 --- /dev/null +++ b/packaging/requirements/README.md @@ -0,0 +1 @@ +placeholder diff --git a/packaging/rpm/TODO b/packaging/rpm/TODO new file mode 100644 index 0000000000..e69de29bb2 diff --git a/packaging/templates/mainnet-v1/archive/config.toml b/packaging/templates/mainnet-v1/archive/config.toml new file mode 100644 index 0000000000..9eaafd3bee --- /dev/null +++ b/packaging/templates/mainnet-v1/archive/config.toml @@ -0,0 +1,135 @@ +chain = "mainnet" +# identity = "node_name" +# log-level = "INFO" +datadir = "/var/lib/bor/data" +# ancient = "" +# keystore = "" +syncmode = "full" +gcmode = "archive" +# snapshot = true +# ethstats = "" + +# ["eth.requiredblocks"] + +[p2p] + maxpeers = 50 + port = 30303 + # maxpendpeers = 50 + # bind = "0.0.0.0" + # nodiscover = false + # nat = "any" + # [p2p.discovery] + # v5disc = false + # bootnodes = [] + # bootnodesv4 = [] + # bootnodesv5 = [] + # static-nodes = [] + # trusted-nodes = [] + # dns = [] + +# [heimdall] + # url = "http://localhost:1317" + # "bor.without" = false + # grpc-address = "" + +[txpool] + nolocals = true + pricelimit = 30000000000 + accountslots = 16 + globalslots = 32768 + accountqueue = 16 + globalqueue = 32768 + lifetime = "1h30m0s" + # locals = [] + # journal = "" + # rejournal = "1h0m0s" + # pricebump = 10 + +[miner] + gaslimit = 30000000 + gasprice = "30000000000" + # mine = false + # etherbase = "" + # extradata = "" + +[jsonrpc] + ipcpath = "/var/lib/bor/bor.ipc" + # ipcdisable = false + # gascap = 50000000 + # txfeecap = 5.0 + [jsonrpc.http] + enabled = true + port = 8545 + host = "127.0.0.1" + api = ["eth", "net", "web3", "txpool", "bor"] + vhosts = ["*"] + corsdomain = ["*"] + # prefix = "" + [jsonrpc.ws] + enabled = true + port = 8546 + # prefix = "" + # host = "localhost" + # api = ["web3", "net"] + origins = ["*"] + # [jsonrpc.graphql] + # enabled = false + # port = 0 + # prefix = "" + # host = "" + # vhosts = ["*"] + # corsdomain = ["*"] + # [jsonrpc.timeouts] + # read = "30s" + # write = "30s" + # idle = "2m0s" + +[gpo] + # blocks = 20 + # percentile = 60 + # maxprice = "5000000000000" + ignoreprice = "30000000000" + +[telemetry] + metrics = true + # expensive = false + # prometheus-addr = "" + # opencollector-endpoint = "" + # [telemetry.influx] + # influxdb = false + # endpoint = "" + # database = "" + # username = "" + # password = "" + # influxdbv2 = false + # token = "" + # bucket = "" + # organization = "" + # [telemetry.influx.tags] + +[cache] + cache = 4096 + gc = 0 + snapshot = 20 + # database = 50 + trie = 30 + # journal = "triecache" + # rejournal = "1h0m0s" + # noprefetch = false + # preimages = false + # txlookuplimit = 2350000 + # timeout = "1h0m0s" + +# [accounts] + # unlock = [] + # password = "" + # allow-insecure-unlock = false + # lightkdf = false + # disable-bor-wallet = false + +# [grpc] + # addr = ":3131" + +# [developer] + # dev = false + # period = 0 diff --git a/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml b/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml new file mode 100644 index 0000000000..94dd6634f0 --- /dev/null +++ b/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml @@ -0,0 +1,135 @@ +chain = "mainnet" +# identity = "node_name" +# log-level = "INFO" +datadir = "/var/lib/bor/data" +# ancient = "" +# keystore = "" +syncmode = "full" +# gcmode = "full" +# snapshot = true +# ethstats = "" + +# ["eth.requiredblocks"] + +[p2p] + maxpeers = 50 + port = 30303 + # maxpendpeers = 50 + # bind = "0.0.0.0" + # nodiscover = false + # nat = "any" + # [p2p.discovery] + # v5disc = false + # bootnodes = [] + # bootnodesv4 = [] + # bootnodesv5 = [] + # static-nodes = [] + # trusted-nodes = [] + # dns = [] + +# [heimdall] + # url = "http://localhost:1317" + # "bor.without" = false + # grpc-address = "" + +[txpool] + nolocals = true + pricelimit = 30000000000 + accountslots = 16 + globalslots = 32768 + accountqueue = 16 + globalqueue = 32768 + lifetime = "1h30m0s" + # locals = [] + # journal = "" + # rejournal = "1h0m0s" + # pricebump = 10 + +[miner] + gaslimit = 30000000 + gasprice = "30000000000" + # mine = false + # etherbase = "" + # extradata = "" + +[jsonrpc] + ipcpath = "/var/lib/bor/bor.ipc" + # ipcdisable = false + # gascap = 50000000 + # txfeecap = 5.0 + [jsonrpc.http] + enabled = true + port = 8545 + host = "127.0.0.1" + api = ["eth", "net", "web3", "txpool", "bor"] + vhosts = ["*"] + corsdomain = ["*"] + # prefix = "" + # [jsonrpc.ws] + # enabled = false + # port = 8546 + # prefix = "" + # host = "localhost" + # api = ["web3", "net"] + # origins = ["*"] + # [jsonrpc.graphql] + # enabled = false + # port = 0 + # prefix = "" + # host = "" + # vhosts = ["*"] + # corsdomain = ["*"] + # [jsonrpc.timeouts] + # read = "30s" + # write = "30s" + # idle = "2m0s" + +[gpo] + # blocks = 20 + # percentile = 60 + # maxprice = "5000000000000" + ignoreprice = "30000000000" + +[telemetry] + metrics = true + # expensive = false + # prometheus-addr = "" + # opencollector-endpoint = "" + # [telemetry.influx] + # influxdb = false + # endpoint = "" + # database = "" + # username = "" + # password = "" + # influxdbv2 = false + # token = "" + # bucket = "" + # organization = "" + # [telemetry.influx.tags] + +[cache] + cache = 4096 + # gc = 25 + # snapshot = 10 + # database = 50 + # trie = 15 + # journal = "triecache" + # rejournal = "1h0m0s" + # noprefetch = false + # preimages = false + # txlookuplimit = 2350000 + # timeout = "1h0m0s" + +# [accounts] + # unlock = [] + # password = "" + # allow-insecure-unlock = false + # lightkdf = false + # disable-bor-wallet = false + +# [grpc] + # addr = ":3131" + +# [developer] + # dev = false + # period = 0 diff --git a/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml b/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml new file mode 100644 index 0000000000..9c55683c96 --- /dev/null +++ b/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml @@ -0,0 +1,137 @@ +# NOTE: Update and uncomment: `keystore`, `password`, and `unlock` fields. + +chain = "mainnet" +# identity = "node_name" +# log-level = "INFO" +datadir = "/var/lib/bor/data" +# ancient = "" +# keystore = "$BOR_DIR/keystore" +syncmode = "full" +# gcmode = "full" +# snapshot = true +# ethstats = "" + +# ["eth.requiredblocks"] + +[p2p] + maxpeers = 1 + port = 30303 + nodiscover = true + # maxpendpeers = 50 + # bind = "0.0.0.0" + # nat = "any" + # [p2p.discovery] + # v5disc = false + # bootnodes = [] + # bootnodesv4 = [] + # bootnodesv5 = [] + # static-nodes = [] + # trusted-nodes = [] + # dns = [] + +# [heimdall] + # url = "http://localhost:1317" + # "bor.without" = false + # grpc-address = "" + +[txpool] + nolocals = true + pricelimit = 30000000000 + accountslots = 16 + globalslots = 32768 + accountqueue = 16 + globalqueue = 32768 + lifetime = "1h30m0s" + # locals = [] + # journal = "" + # rejournal = "1h0m0s" + # pricebump = 10 + +[miner] + mine = true + gaslimit = 30000000 + gasprice = "30000000000" + # etherbase = "" + # extradata = "" + +[jsonrpc] + ipcpath = "/var/lib/bor/bor.ipc" + # ipcdisable = false + # gascap = 50000000 + # txfeecap = 5.0 + [jsonrpc.http] + enabled = true + port = 8545 + host = "127.0.0.1" + api = ["eth", "net", "web3", "txpool", "bor"] + vhosts = ["*"] + corsdomain = ["*"] + # prefix = "" + # [jsonrpc.ws] + # enabled = false + # port = 8546 + # prefix = "" + # host = "localhost" + # api = ["web3", "net"] + # origins = ["*"] + # [jsonrpc.graphql] + # enabled = false + # port = 0 + # prefix = "" + # host = "" + # vhosts = ["*"] + # corsdomain = ["*"] + # [jsonrpc.timeouts] + # read = "30s" + # write = "30s" + # idle = "2m0s" + +[gpo] + # blocks = 20 + # percentile = 60 + # maxprice = "5000000000000" + ignoreprice = "30000000000" + +[telemetry] + metrics = true + # expensive = false + # prometheus-addr = "" + # opencollector-endpoint = "" + # [telemetry.influx] + # influxdb = false + # endpoint = "" + # database = "" + # username = "" + # password = "" + # influxdbv2 = false + # token = "" + # bucket = "" + # organization = "" + # [telemetry.influx.tags] + +[cache] + cache = 4096 + # gc = 25 + # snapshot = 10 + # database = 50 + # trie = 15 + # journal = "triecache" + # rejournal = "1h0m0s" + # noprefetch = false + # preimages = false + # txlookuplimit = 2350000 + # timeout = "1h0m0s" + +[accounts] + allow-insecure-unlock = true + # password = "$BOR_DIR/password.txt" + # unlock = ["$ADDRESS"] + # lightkdf = false + # disable-bor-wallet = false + +# [grpc] + # addr = ":3131" + +# [developer] + # dev = false + # period = 0 diff --git a/packaging/templates/mainnet-v1/without-sentry/bor/config.toml b/packaging/templates/mainnet-v1/without-sentry/bor/config.toml new file mode 100644 index 0000000000..573f1f3be8 --- /dev/null +++ b/packaging/templates/mainnet-v1/without-sentry/bor/config.toml @@ -0,0 +1,137 @@ +# NOTE: Update and uncomment: `keystore`, `password`, and `unlock` fields. + +chain = "mainnet" +# identity = "node_name" +# log-level = "INFO" +datadir = "/var/lib/bor/data" +# ancient = "" +# keystore = "$BOR_DIR/keystore" +syncmode = "full" +# gcmode = "full" +# snapshot = true +# ethstats = "" + +# ["eth.requiredblocks"] + +[p2p] + maxpeers = 50 + port = 30303 + # maxpendpeers = 50 + # bind = "0.0.0.0" + # nodiscover = false + # nat = "any" + # [p2p.discovery] + # v5disc = false + # bootnodes = [] + # bootnodesv4 = [] + # bootnodesv5 = [] + # static-nodes = [] + # trusted-nodes = [] + # dns = [] + +# [heimdall] + # url = "http://localhost:1317" + # "bor.without" = false + # grpc-address = "" + +[txpool] + nolocals = true + pricelimit = 30000000000 + accountslots = 16 + globalslots = 32768 + accountqueue = 16 + globalqueue = 32768 + lifetime = "1h30m0s" + # locals = [] + # journal = "" + # rejournal = "1h0m0s" + # pricebump = 10 + +[miner] + mine = true + gaslimit = 30000000 + gasprice = "30000000000" + # etherbase = "" + # extradata = "" + +[jsonrpc] + ipcpath = "/var/lib/bor/bor.ipc" + # ipcdisable = false + # gascap = 50000000 + # txfeecap = 5.0 + [jsonrpc.http] + enabled = true + port = 8545 + host = "127.0.0.1" + api = ["eth", "net", "web3", "txpool", "bor"] + vhosts = ["*"] + corsdomain = ["*"] + # prefix = "" + # [jsonrpc.ws] + # enabled = false + # port = 8546 + # prefix = "" + # host = "localhost" + # api = ["web3", "net"] + # origins = ["*"] + # [jsonrpc.graphql] + # enabled = false + # port = 0 + # prefix = "" + # host = "" + # vhosts = ["*"] + # corsdomain = ["*"] + # [jsonrpc.timeouts] + # read = "30s" + # write = "30s" + # idle = "2m0s" + +[gpo] +# blocks = 20 +# percentile = 60 +# maxprice = "5000000000000" + ignoreprice = "30000000000" + +[telemetry] + metrics = true + # expensive = false + # prometheus-addr = "" + # opencollector-endpoint = "" + # [telemetry.influx] + # influxdb = false + # endpoint = "" + # database = "" + # username = "" + # password = "" + # influxdbv2 = false + # token = "" + # bucket = "" + # organization = "" + # [telemetry.influx.tags] + +[cache] + cache = 4096 +# gc = 25 +# snapshot = 10 +# database = 50 +# trie = 15 +# journal = "triecache" +# rejournal = "1h0m0s" +# noprefetch = false +# preimages = false +# txlookuplimit = 2350000 +# timeout = "1h0m0s" + +[accounts] + allow-insecure-unlock = true + # password = "$BOR_DIR/password.txt" + # unlock = ["$ADDRESS"] + # lightkdf = false + # disable-bor-wallet = false + +# [grpc] + # addr = ":3131" + +# [developer] + # dev = false + # period = 0 diff --git a/packaging/templates/package_scripts/changelog b/packaging/templates/package_scripts/changelog new file mode 100644 index 0000000000..2395bcaef1 --- /dev/null +++ b/packaging/templates/package_scripts/changelog @@ -0,0 +1,3 @@ +bor (2.10.11) unstable; urgency=low + +-- Polygon Team Mon, 10 Nov 2022 00:37:31 +0100 \ No newline at end of file diff --git a/packaging/templates/package_scripts/changelog.profile b/packaging/templates/package_scripts/changelog.profile new file mode 100644 index 0000000000..b84fa22646 --- /dev/null +++ b/packaging/templates/package_scripts/changelog.profile @@ -0,0 +1,3 @@ +bor-profile (2.10.11) unstable; urgency=low + +-- Polygon Team Mon, 10 Nov 2022 00:37:31 +0100 \ No newline at end of file diff --git a/packaging/templates/package_scripts/control b/packaging/templates/package_scripts/control new file mode 100644 index 0000000000..cb62165a5e --- /dev/null +++ b/packaging/templates/package_scripts/control @@ -0,0 +1,12 @@ +Source: bor +Version: 0.3.3 +Section: develop +Priority: standard +Maintainer: Polygon +Build-Depends: debhelper-compat (= 13) +Package: bor +Rules-Requires-Root: yes +Architecture: amd64 +Multi-Arch: foreign +Depends: +Description: This is the bor package from Polygon Technology. diff --git a/packaging/templates/package_scripts/control.arm64 b/packaging/templates/package_scripts/control.arm64 new file mode 100644 index 0000000000..56276cb43a --- /dev/null +++ b/packaging/templates/package_scripts/control.arm64 @@ -0,0 +1,13 @@ +Source: bor +Version: 0.3.3 +Section: develop +Priority: standard +Maintainer: Polygon +Build-Depends: debhelper-compat (= 13) +Rules-Requires-Root: yes +Package: bor +Architecture: arm64 +Multi-Arch: foreign +Depends: +Description: This is the bor package from Polygon Technology. + diff --git a/packaging/templates/package_scripts/control.profile.amd64 b/packaging/templates/package_scripts/control.profile.amd64 new file mode 100644 index 0000000000..4ddd8424ff --- /dev/null +++ b/packaging/templates/package_scripts/control.profile.amd64 @@ -0,0 +1,14 @@ +Source: bor-profile +Version: 0.3.3 +Section: develop +Priority: standard +Maintainer: Polygon +Build-Depends: debhelper-compat (= 13) +Rules-Requires-Root: yes +Package: bor-profile +Architecture: amd64 +Multi-Arch: foreign +Depends: +Description: This is the bor package from Polygon Technology. + + diff --git a/packaging/templates/package_scripts/control.profile.arm64 b/packaging/templates/package_scripts/control.profile.arm64 new file mode 100644 index 0000000000..9f9301c925 --- /dev/null +++ b/packaging/templates/package_scripts/control.profile.arm64 @@ -0,0 +1,12 @@ +Source: bor-profile +Version: 0.3.3 +Section: develop +Priority: standard +Maintainer: Polygon +Build-Depends: debhelper-compat (= 13) +Rules-Requires-Root: yes +Package: bor-profile +Architecture: arm64 +Multi-Arch: foreign +Depends: +Description: This is the bor package from Polygon Technology. diff --git a/packaging/templates/package_scripts/control.validator b/packaging/templates/package_scripts/control.validator new file mode 100644 index 0000000000..d43250c891 --- /dev/null +++ b/packaging/templates/package_scripts/control.validator @@ -0,0 +1,12 @@ +Source: bor-profile +Version: 0.3.3 +Section: develop +Priority: standard +Maintainer: Polygon +Build-Depends: debhelper-compat (= 13) +Package: bor-profile +Rules-Requires-Root: yes +Architecture: amd64 +Multi-Arch: foreign +Depends: +Description: This is the bor package from Polygon Technology. diff --git a/packaging/templates/package_scripts/control.validator.arm64 b/packaging/templates/package_scripts/control.validator.arm64 new file mode 100644 index 0000000000..5a50f8cb39 --- /dev/null +++ b/packaging/templates/package_scripts/control.validator.arm64 @@ -0,0 +1,13 @@ +Source: bor-profile +Version: 0.3.3 +Section: develop +Priority: standard +Maintainer: Polygon +Build-Depends: debhelper-compat (= 13) +Rules-Requires-Root: yes +Package: bor-profile +Architecture: arm64 +Multi-Arch: foreign +Depends: +Description: This is the bor package from Polygon Technology. + diff --git a/packaging/templates/package_scripts/postinst b/packaging/templates/package_scripts/postinst new file mode 100755 index 0000000000..7272b4b1aa --- /dev/null +++ b/packaging/templates/package_scripts/postinst @@ -0,0 +1,12 @@ +#!/bin/bash +# This is a postinstallation script so the service can be configured and started when requested +# +sudo adduser --disabled-password --disabled-login --shell /usr/sbin/nologin --quiet --system --no-create-home --home /nonexistent bor +if [ -d "/var/lib/bor" ] +then + echo "Directory /var/lib/bor exists." +else + mkdir -p /var/lib/bor + sudo chown -R bor /var/lib/bor +fi +sudo systemctl daemon-reload diff --git a/packaging/templates/package_scripts/postinst.profile b/packaging/templates/package_scripts/postinst.profile new file mode 100755 index 0000000000..e9a497906d --- /dev/null +++ b/packaging/templates/package_scripts/postinst.profile @@ -0,0 +1,11 @@ +#!/bin/bash +# This is a postinstallation script so the service can be configured and started when requested +# +if [ -d "/var/lib/bor" ] +then + echo "Directory /var/lib/bor exists." +else + mkdir -p /var/lib/bor + sudo chown -R bor /var/lib/bor +fi +sudo systemctl daemon-reload diff --git a/packaging/templates/package_scripts/postrm b/packaging/templates/package_scripts/postrm new file mode 100755 index 0000000000..55bbb87a4f --- /dev/null +++ b/packaging/templates/package_scripts/postrm @@ -0,0 +1,8 @@ +#!/bin/bash +# +############### +# Remove bor installs +############## +sudo rm -rf /var/lib/bor/config.toml +sudo rm -rf /lib/systemd/system/bor.service +sudo systemctl daemon-reload diff --git a/packaging/templates/package_scripts/preinst b/packaging/templates/package_scripts/preinst new file mode 100755 index 0000000000..b9efb0091d --- /dev/null +++ b/packaging/templates/package_scripts/preinst @@ -0,0 +1,7 @@ +#!/bin/bash +# +################# +# Stop existing bor in case of upgrade +################ +#sudo systemctl stop bor.service +###################### diff --git a/packaging/templates/package_scripts/prerm b/packaging/templates/package_scripts/prerm new file mode 100755 index 0000000000..b2b2b4fce9 --- /dev/null +++ b/packaging/templates/package_scripts/prerm @@ -0,0 +1,8 @@ +#!/bin/bash +# +# +############## +# Stop bor before removal +############## +#sudo systemctl stop bor.service +############# \ No newline at end of file diff --git a/packaging/templates/systemd/bor.service b/packaging/templates/systemd/bor.service new file mode 100644 index 0000000000..b92bdd3cc5 --- /dev/null +++ b/packaging/templates/systemd/bor.service @@ -0,0 +1,16 @@ +[Unit] + Description=bor + StartLimitIntervalSec=500 + StartLimitBurst=5 + +[Service] + Restart=on-failure + RestartSec=5s + ExecStart=/usr/bin/bor server -config "/var/lib/bor/config.toml" + Type=simple + KillSignal=SIGINT + User=bor + TimeoutStopSec=120 + +[Install] + WantedBy=multi-user.target diff --git a/packaging/templates/testnet-v4/archive/config.toml b/packaging/templates/testnet-v4/archive/config.toml new file mode 100644 index 0000000000..1762fdf117 --- /dev/null +++ b/packaging/templates/testnet-v4/archive/config.toml @@ -0,0 +1,135 @@ +chain = "mumbai" +# identity = "node_name" +# log-level = "INFO" +datadir = "/var/lib/bor/data" +# ancient = "" +# keystore = "" +syncmode = "full" +gcmode = "archive" +# snapshot = true +# ethstats = "" + +# ["eth.requiredblocks"] + +[p2p] + maxpeers = 50 + port = 30303 + # maxpendpeers = 50 + # bind = "0.0.0.0" + # nodiscover = false + # nat = "any" + # [p2p.discovery] + # v5disc = false + # bootnodes = [] + # bootnodesv4 = [] + # bootnodesv5 = [] + # static-nodes = [] + # trusted-nodes = [] + # dns = [] + +# [heimdall] + # url = "http://localhost:1317" + # "bor.without" = false + # grpc-address = "" + +[txpool] + nolocals = true + accountslots = 16 + globalslots = 131072 + accountqueue = 64 + globalqueue = 131072 + lifetime = "1h30m0s" + # locals = [] + # journal = "" + # rejournal = "1h0m0s" + # pricelimit = 1 + # pricebump = 10 + +[miner] + gaslimit = 30000000 + # gasprice = "1000000000" + # mine = false + # etherbase = "" + # extradata = "" + +[jsonrpc] + ipcpath = "/var/lib/bor/bor.ipc" + # ipcdisable = false + # gascap = 50000000 + # txfeecap = 5.0 + [jsonrpc.http] + enabled = true + port = 8545 + host = "0.0.0.0" + api = ["eth", "net", "web3", "txpool", "bor"] + vhosts = ["*"] + corsdomain = ["*"] + # prefix = "" + [jsonrpc.ws] + enabled = true + port = 8546 + # prefix = "" + # host = "localhost" + # api = ["web3", "net"] + origins = ["*"] + # [jsonrpc.graphql] + # enabled = false + # port = 0 + # prefix = "" + # host = "" + # vhosts = ["*"] + # corsdomain = ["*"] + # [jsonrpc.timeouts] + # read = "30s" + # write = "30s" + # idle = "2m0s" + +# [gpo] + # blocks = 20 + # percentile = 60 + # maxprice = "5000000000000" + # ignoreprice = "2" + +[telemetry] + metrics = true + # expensive = false + # prometheus-addr = "" + # opencollector-endpoint = "" + # [telemetry.influx] + # influxdb = false + # endpoint = "" + # database = "" + # username = "" + # password = "" + # influxdbv2 = false + # token = "" + # bucket = "" + # organization = "" + # [telemetry.influx.tags] + +[cache] + # cache = 1024 + gc = 0 + snapshot = 20 + # database = 50 + trie = 30 + # journal = "triecache" + # rejournal = "1h0m0s" + # noprefetch = false + # preimages = false + # txlookuplimit = 2350000 + # timeout = "1h0m0s" + +# [accounts] + # unlock = [] + # password = "" + # allow-insecure-unlock = false + # lightkdf = false + # disable-bor-wallet = false + +# [grpc] + # addr = ":3131" + +# [developer] + # dev = false + # period = 0 diff --git a/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml b/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml new file mode 100644 index 0000000000..ae191cec2c --- /dev/null +++ b/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml @@ -0,0 +1,135 @@ +chain = "mumbai" +# identity = "node_name" +# log-level = "INFO" +datadir = "/var/lib/bor/data" +# ancient = "" +# keystore = "" +syncmode = "full" +# gcmode = "full" +# snapshot = true +# ethstats = "" + +# ["eth.requiredblocks"] + +[p2p] + maxpeers = 50 + port = 30303 + # maxpendpeers = 50 + # bind = "0.0.0.0" + # nodiscover = false + # nat = "any" + # [p2p.discovery] + # v5disc = false + # bootnodes = [] + # bootnodesv4 = [] + # bootnodesv5 = [] + # static-nodes = [] + # trusted-nodes = [] + # dns = [] + +# [heimdall] + # url = "http://localhost:1317" + # "bor.without" = false + # grpc-address = "" + +[txpool] + nolocals = true + accountslots = 16 + globalslots = 131072 + accountqueue = 64 + globalqueue = 131072 + lifetime = "1h30m0s" + # locals = [] + # journal = "" + # rejournal = "1h0m0s" + # pricelimit = 1 + # pricebump = 10 + +[miner] + gaslimit = 30000000 + # gasprice = "1000000000" + # mine = false + # etherbase = "" + # extradata = "" + +[jsonrpc] + ipcpath = "/var/lib/bor/bor.ipc" + # ipcdisable = false + # gascap = 50000000 + # txfeecap = 5.0 + [jsonrpc.http] + enabled = true + port = 8545 + host = "0.0.0.0" + api = ["eth", "net", "web3", "txpool", "bor"] + vhosts = ["*"] + corsdomain = ["*"] + # prefix = "" + # [jsonrpc.ws] + # enabled = false + # port = 8546 + # prefix = "" + # host = "localhost" + # api = ["web3", "net"] + # origins = ["*"] + # [jsonrpc.graphql] + # enabled = false + # port = 0 + # prefix = "" + # host = "" + # vhosts = ["*"] + # corsdomain = ["*"] + # [jsonrpc.timeouts] + # read = "30s" + # write = "30s" + # idle = "2m0s" + +# [gpo] + # blocks = 20 + # percentile = 60 + # maxprice = "5000000000000" + # ignoreprice = "2" + +[telemetry] + metrics = true + # expensive = false + # prometheus-addr = "" + # opencollector-endpoint = "" + # [telemetry.influx] + # influxdb = false + # endpoint = "" + # database = "" + # username = "" + # password = "" + # influxdbv2 = false + # token = "" + # bucket = "" + # organization = "" + # [telemetry.influx.tags] + +# [cache] + # cache = 1024 + # gc = 25 + # snapshot = 10 + # database = 50 + # trie = 15 + # journal = "triecache" + # rejournal = "1h0m0s" + # noprefetch = false + # preimages = false + # txlookuplimit = 2350000 + # timeout = "1h0m0s" + +# [accounts] + # unlock = [] + # password = "" + # allow-insecure-unlock = false + # lightkdf = false + # disable-bor-wallet = false + +# [grpc] + # addr = ":3131" + +# [developer] + # dev = false + # period = 0 diff --git a/packaging/templates/testnet-v4/sentry/validator/bor/config.toml b/packaging/templates/testnet-v4/sentry/validator/bor/config.toml new file mode 100644 index 0000000000..b441cc137d --- /dev/null +++ b/packaging/templates/testnet-v4/sentry/validator/bor/config.toml @@ -0,0 +1,137 @@ +# NOTE: Update and uncomment: `keystore`, `password`, and `unlock` fields. + +chain = "mumbai" +# identity = "node_name" +# log-level = "INFO" +datadir = "/var/lib/bor/data" +# ancient = "" +# keystore = "$BOR_DIR/keystore" +syncmode = "full" +# gcmode = "full" +# snapshot = true +# ethstats = "" + +# ["eth.requiredblocks"] + +[p2p] + maxpeers = 1 + port = 30303 + nodiscover = true + # maxpendpeers = 50 + # bind = "0.0.0.0" + # nat = "any" + # [p2p.discovery] + # v5disc = false + # bootnodes = [] + # bootnodesv4 = [] + # bootnodesv5 = [] + # static-nodes = [] + # trusted-nodes = [] + # dns = [] + +# [heimdall] + # url = "http://localhost:1317" + # "bor.without" = false + # grpc-address = "" + +[txpool] + nolocals = true + accountslots = 16 + globalslots = 131072 + accountqueue = 64 + globalqueue = 131072 + lifetime = "1h30m0s" + # locals = [] + # journal = "" + # rejournal = "1h0m0s" + # pricelimit = 1 + # pricebump = 10 + +[miner] + mine = true + gaslimit = 30000000 + # gasprice = "1000000000" + # etherbase = "" + # extradata = "" + +[jsonrpc] + ipcpath = "/var/lib/bor/bor.ipc" + # ipcdisable = false + # gascap = 50000000 + # txfeecap = 5.0 + [jsonrpc.http] + enabled = true + port = 8545 + host = "0.0.0.0" + api = ["eth", "net", "web3", "txpool", "bor"] + vhosts = ["*"] + corsdomain = ["*"] + # prefix = "" + # [jsonrpc.ws] + # enabled = false + # port = 8546 + # prefix = "" + # host = "localhost" + # api = ["web3", "net"] + # origins = ["*"] + # [jsonrpc.graphql] + # enabled = false + # port = 0 + # prefix = "" + # host = "" + # vhosts = ["*"] + # corsdomain = ["*"] + # [jsonrpc.timeouts] + # read = "30s" + # write = "30s" + # idle = "2m0s" + +# [gpo] + # blocks = 20 + # percentile = 60 + # maxprice = "5000000000000" + # ignoreprice = "2" + +[telemetry] + metrics = true + # expensive = false + # prometheus-addr = "" + # opencollector-endpoint = "" + # [telemetry.influx] + # influxdb = false + # endpoint = "" + # database = "" + # username = "" + # password = "" + # influxdbv2 = false + # token = "" + # bucket = "" + # organization = "" + # [telemetry.influx.tags] + +# [cache] + # cache = 1024 + # gc = 25 + # snapshot = 10 + # database = 50 + # trie = 15 + # journal = "triecache" + # rejournal = "1h0m0s" + # noprefetch = false + # preimages = false + # txlookuplimit = 2350000 + # timeout = "1h0m0s" + +[accounts] + allow-insecure-unlock = true + # password = "$BOR_DIR/password.txt" + # unlock = ["$ADDRESS"] + # lightkdf = false + # disable-bor-wallet = false + +# [grpc] + # addr = ":3131" + +# [developer] + # dev = false + # period = 0 diff --git a/packaging/templates/testnet-v4/without-sentry/bor/config.toml b/packaging/templates/testnet-v4/without-sentry/bor/config.toml new file mode 100644 index 0000000000..05a254e184 --- /dev/null +++ b/packaging/templates/testnet-v4/without-sentry/bor/config.toml @@ -0,0 +1,137 @@ +# NOTE: Update and uncomment: `keystore`, `password`, and `unlock` fields. + +chain = "mumbai" +# identity = "node_name" +# log-level = "INFO" +datadir = "/var/lib/bor/data" +# ancient = "" +# keystore = "$BOR_DIR/keystore" +syncmode = "full" +# gcmode = "full" +# snapshot = true +# ethstats = "" + +# ["eth.requiredblocks"] + +[p2p] + maxpeers = 50 + port = 30303 + # maxpendpeers = 50 + # bind = "0.0.0.0" + # nodiscover = false + # nat = "any" + # [p2p.discovery] + # v5disc = false + # bootnodes = [] + # bootnodesv4 = [] + # bootnodesv5 = [] + # static-nodes = [] + # trusted-nodes = [] + # dns = [] + +# [heimdall] + # url = "http://localhost:1317" + # "bor.without" = false + # grpc-address = "" + +[txpool] + nolocals = true + accountslots = 16 + globalslots = 131072 + accountqueue = 64 + globalqueue = 131072 + lifetime = "1h30m0s" + # locals = [] + # journal = "" + # rejournal = "1h0m0s" + # pricelimit = 1 + # pricebump = 10 + +[miner] + mine = true + gaslimit = 30000000 + # gasprice = "1000000000" + # etherbase = "" + # extradata = "" + +[jsonrpc] + ipcpath = "/var/lib/bor/bor.ipc" + # ipcdisable = false + # gascap = 50000000 + # txfeecap = 5.0 + [jsonrpc.http] + enabled = true + port = 8545 + host = "0.0.0.0" + api = ["eth", "net", "web3", "txpool", "bor"] + vhosts = ["*"] + corsdomain = ["*"] + # prefix = "" + # [jsonrpc.ws] + # enabled = false + # port = 8546 + # prefix = "" + # host = "localhost" + # api = ["web3", "net"] + # origins = ["*"] + # [jsonrpc.graphql] + # enabled = false + # port = 0 + # prefix = "" + # host = "" + # vhosts = ["*"] + # corsdomain = ["*"] + # [jsonrpc.timeouts] + # read = "30s" + # write = "30s" + # idle = "2m0s" + +# [gpo] +# blocks = 20 +# percentile = 60 +# maxprice = "5000000000000" +# ignoreprice = "2" + +[telemetry] + metrics = true + # expensive = false + # prometheus-addr = "" + # opencollector-endpoint = "" + # [telemetry.influx] + # influxdb = false + # endpoint = "" + # database = "" + # username = "" + # password = "" + # influxdbv2 = false + # token = "" + # bucket = "" + # organization = "" + # [telemetry.influx.tags] + +# [cache] +# cache = 1024 +# gc = 25 +# snapshot = 10 +# database = 50 +# trie = 15 +# journal = "triecache" +# rejournal = "1h0m0s" +# noprefetch = false +# preimages = false +# txlookuplimit = 2350000 +# timeout = "1h0m0s" + +[accounts] + allow-insecure-unlock = true + # password = "$BOR_DIR/password.txt" + # unlock = ["$ADDRESS"] + # lightkdf = false + # disable-bor-wallet = false + +# [grpc] + # addr = ":3131" + +# [developer] + # dev = false + # period = 0 diff --git a/params/config.go b/params/config.go index d97d6957fa..94729224bb 100644 --- a/params/config.go +++ b/params/config.go @@ -404,14 +404,17 @@ var ( LondonBlock: big.NewInt(23850000), Bor: &BorConfig{ JaipurBlock: big.NewInt(23850000), + DelhiBlock: big.NewInt(38189056), Period: map[string]uint64{ "0": 2, }, ProducerDelay: map[string]uint64{ - "0": 6, + "0": 6, + "38189056": 4, }, Sprint: map[string]uint64{ - "0": 64, + "0": 64, + "38189056": 16, }, BackupMultiplier: map[string]uint64{ "0": 2, diff --git a/params/version.go b/params/version.go index 64b58283bb..199e49095f 100644 --- a/params/version.go +++ b/params/version.go @@ -23,8 +23,8 @@ import ( const ( VersionMajor = 0 // Major version component of the current release VersionMinor = 3 // Minor version component of the current release - VersionPatch = 1 // Patch version component of the current release - VersionMeta = "mumbai" // Version metadata to append to the version string + VersionPatch = 3 // Patch version component of the current release + VersionMeta = "stable" // Version metadata to append to the version string ) // Version holds the textual version string. diff --git a/scripts/getconfig.go b/scripts/getconfig.go index 817125b1e0..09026a2479 100644 --- a/scripts/getconfig.go +++ b/scripts/getconfig.go @@ -361,6 +361,34 @@ func writeTempStaticTrustedTOML(path string) { log.Fatal(err) } } + + if data.Has("Node.HTTPTimeouts.ReadTimeout") { + err = os.WriteFile("./tempHTTPTimeoutsReadTimeout.toml", []byte(data.Get("Node.HTTPTimeouts.ReadTimeout").(string)), 0600) + if err != nil { + log.Fatal(err) + } + } + + if data.Has("Node.HTTPTimeouts.WriteTimeout") { + err = os.WriteFile("./tempHTTPTimeoutsWriteTimeout.toml", []byte(data.Get("Node.HTTPTimeouts.WriteTimeout").(string)), 0600) + if err != nil { + log.Fatal(err) + } + } + + if data.Has("Node.HTTPTimeouts.IdleTimeout") { + err = os.WriteFile("./tempHTTPTimeoutsIdleTimeout.toml", []byte(data.Get("Node.HTTPTimeouts.IdleTimeout").(string)), 0600) + if err != nil { + log.Fatal(err) + } + } + + if data.Has("Eth.TrieTimeout") { + err = os.WriteFile("./tempHTTPTimeoutsTrieTimeout.toml", []byte(data.Get("Eth.TrieTimeout").(string)), 0600) + if err != nil { + log.Fatal(err) + } + } } func getStaticTrustedNodes(args []string) { @@ -379,7 +407,7 @@ func getStaticTrustedNodes(args []string) { fmt.Println("only TOML config file is supported through CLI") } } else { - path := "~/.bor/data/bor/static-nodes.json" + path := "./static-nodes.json" if !checkFileExists(path) { return } @@ -584,7 +612,7 @@ func commentFlags(path string, updatedArgs []string) { flag = strconv.Itoa(passwordFlag) + "-" + flag } - if flag != "static-nodes" && flag != "trusted-nodes" { + if flag != "static-nodes" && flag != "trusted-nodes" && flag != "read" && flag != "write" && flag != "idle" && flag != "timeout" { flag = nameTagMap[flag] tempFlag := false diff --git a/scripts/getconfig.sh b/scripts/getconfig.sh index a2971c4f12..d00bf35ec8 100755 --- a/scripts/getconfig.sh +++ b/scripts/getconfig.sh @@ -24,6 +24,14 @@ then fi read -p "* Your validator address (e.g. 0xca67a8D767e45056DC92384b488E9Af654d78DE2), or press Enter to skip if running a sentry node: " ADD +if [[ -f $HOME/.bor/data/bor/static-nodes.json ]] +then +cp $HOME/.bor/data/bor/static-nodes.json ./static-nodes.json +else +read -p "* You dont have '~/.bor/data/bor/static-nodes.json' file. If you want to use static nodes, enter the path to 'static-nodes.json' here (press Enter to skip): " STAT +if [[ -f $STAT ]]; then cp $STAT ./static-nodes.json; fi +fi + printf "\nThank you, your inputs are:\n" echo "Path to start.sh: "$startPath echo "Address: "$ADD @@ -104,6 +112,54 @@ else echo "neither JSON nor TOML TrustedNodes found" fi +if [[ -f ./tempHTTPTimeoutsReadTimeout.toml ]] +then + echo "HTTPTimeouts.ReadTimeout found" + read=$(head -1 ./tempHTTPTimeoutsReadTimeout.toml) + shopt -s nocasematch; if [[ "$OS" == "darwin"* ]]; then + sed -i '' "s%read = \"30s\"%read = \"${read}\"%" $confPath + else + sed -i "s%read = \"30s\"%read = \"${read}\"%" $confPath + fi + rm ./tempHTTPTimeoutsReadTimeout.toml +fi + +if [[ -f ./tempHTTPTimeoutsWriteTimeout.toml ]] +then + echo "HTTPTimeouts.WriteTimeout found" + write=$(head -1 ./tempHTTPTimeoutsWriteTimeout.toml) + shopt -s nocasematch; if [[ "$OS" == "darwin"* ]]; then + sed -i '' "s%write = \"30s\"%write = \"${write}\"%" $confPath + else + sed -i "s%write = \"30s\"%write = \"${write}\"%" $confPath + fi + rm ./tempHTTPTimeoutsWriteTimeout.toml +fi + +if [[ -f ./tempHTTPTimeoutsIdleTimeout.toml ]] +then + echo "HTTPTimeouts.IdleTimeout found" + idle=$(head -1 ./tempHTTPTimeoutsIdleTimeout.toml) + shopt -s nocasematch; if [[ "$OS" == "darwin"* ]]; then + sed -i '' "s%idle = \"2m0s\"%idle = \"${idle}\"%" $confPath + else + sed -i "s%idle = \"2m0s\"%idle = \"${idle}\"%" $confPath + fi + rm ./tempHTTPTimeoutsIdleTimeout.toml +fi + +if [[ -f ./tempHTTPTimeoutsTrieTimeout.toml ]] +then + echo "Eth.TrieTimeout found" + timeout=$(head -1 ./tempHTTPTimeoutsTrieTimeout.toml) + shopt -s nocasematch; if [[ "$OS" == "darwin"* ]]; then + sed -i '' "s%timeout = \"1h0m0s\"%timeout = \"${timeout}\"%" $confPath + else + sed -i "s%timeout = \"1h0m0s\"%timeout = \"${timeout}\"%" $confPath + fi + rm ./tempHTTPTimeoutsTrieTimeout.toml +fi + printf "\n" # comment flags in $configPath that were not passed through $startPath @@ -113,4 +169,9 @@ chmod +x $tmpDir/3305fe263dd4a999d58f96deb064e21bb70123d9.sh $tmpDir/3305fe263dd4a999d58f96deb064e21bb70123d9.sh $ADD rm $tmpDir/3305fe263dd4a999d58f96deb064e21bb70123d9.sh +if [[ -f $HOME/.bor/data/bor/static-nodes.json ]] +then +rm ./static-nodes.json +fi + exit 0