diff --git a/.github/workflows/docker-build-scan.yaml b/.github/workflows/docker-build-scan.yaml index 30ad5196c7fec..f6a84fc57d38a 100644 --- a/.github/workflows/docker-build-scan.yaml +++ b/.github/workflows/docker-build-scan.yaml @@ -1,92 +1,89 @@ name: Docker Build Scan on: + pull_request: + branches: + - 'master' + - 'celo*' workflow_dispatch: jobs: - Build-Scan-Container-op-ufm: - uses: celo-org/reusable-workflows/.github/workflows/container-cicd-local.yaml@v1.11.2 - with: - dockerfile: op-ufm/Dockerfile - - Build-Scan-Container-ops-bedrock-l1: - uses: celo-org/reusable-workflows/.github/workflows/container-cicd-local.yaml@v1.11.2 - with: - dockerfile: ops-bedrock/Dockerfile.l1 - context: ops-bedrock - - Build-Scan-Container-ops-bedrock-l2: - uses: celo-org/reusable-workflows/.github/workflows/container-cicd-local.yaml@v1.11.2 - with: - dockerfile: ops-bedrock/Dockerfile.l2 - context: ops-bedrock - - Build-Scan-Container-indexer: - uses: celo-org/reusable-workflows/.github/workflows/container-cicd-local.yaml@v1.11.2 - with: - dockerfile: indexer/Dockerfile - - Build-Scan-Container-op-heartbeat: - uses: celo-org/reusable-workflows/.github/workflows/container-cicd-local.yaml@v1.11.2 - with: - dockerfile: op-heartbeat/Dockerfile - - Build-Scan-Container-op-exporter: - uses: celo-org/reusable-workflows/.github/workflows/container-cicd-local.yaml@v1.11.2 - with: - dockerfile: op-exporter/Dockerfile - - Build-Scan-Container-op-program: - uses: celo-org/reusable-workflows/.github/workflows/container-cicd-local.yaml@v1.11.2 - with: - dockerfile: op-program/Dockerfile - - Build-Scan-Container-ops-bedrock: - uses: celo-org/reusable-workflows/.github/workflows/container-cicd-local.yaml@v1.11.2 - with: - dockerfile: ops-bedrock/Dockerfile.stateviz - - Build-Scan-Container-ci-builder: - uses: celo-org/reusable-workflows/.github/workflows/container-cicd-local.yaml@v1.11.2 - with: - dockerfile: ops/docker/ci-builder/Dockerfile - - Build-Scan-Container-proxyd: - uses: celo-org/reusable-workflows/.github/workflows/container-cicd-local.yaml@v1.11.2 - with: - dockerfile: proxyd/Dockerfile - - Build-Scan-Container-op-node: - uses: celo-org/reusable-workflows/.github/workflows/container-cicd-local.yaml@v1.11.2 - with: - dockerfile: op-node/Dockerfile - - Build-Scan-Container-op-batcher: - uses: celo-org/reusable-workflows/.github/workflows/container-cicd-local.yaml@v1.11.2 - with: - dockerfile: op-batcher/Dockerfile - - Build-Scan-Container-indexer-ui: - uses: celo-org/reusable-workflows/.github/workflows/container-cicd-local.yaml@v1.11.2 - with: - dockerfile: indexer/ui/Dockerfile - - Build-Scan-Container-op-proposer: - uses: celo-org/reusable-workflows/.github/workflows/container-cicd-local.yaml@v1.11.2 - with: - dockerfile: op-proposer/Dockerfile - - Build-Scan-Container-op-challenger: - uses: celo-org/reusable-workflows/.github/workflows/container-cicd-local.yaml@v1.11.2 - with: - dockerfile: op-challenger/Dockerfile - - Build-Scan-Container-endpoint-monitor: - uses: celo-org/reusable-workflows/.github/workflows/container-cicd-local.yaml@v1.11.2 - with: - dockerfile: endpoint-monitor/Dockerfile - - Build-Scan-Container-opwheel: - uses: celo-org/reusable-workflows/.github/workflows/container-cicd-local.yaml@v1.11.2 - with: - dockerfile: op-wheel/Dockerfile - + detect-files-changed: + runs-on: ubuntu-latest + outputs: + files-changed: ${{ steps.detect-files-changed.outputs.all_changed_files }} + steps: + - uses: actions/checkout@v4 + - name: Detect files changed + id: detect-files-changed + uses: tj-actions/changed-files@v44 + with: + separator: ',' + + build-cel2-migration-tool: + runs-on: ubuntu-latest + needs: detect-files-changed + if: | + contains(needs.detect-files-changed.outputs.files-changed, 'op-chain-ops/cmd/celo-migrate') || + contains(needs.detect-files-changed.outputs.files-changed, 'op-chain-ops/Dockerfile') + permissions: + contents: read + id-token: write + security-events: write + steps: + - uses: actions/checkout@v4 + - name: Login at GCP Artifact Registry + uses: celo-org/reusable-workflows/.github/actions/auth-gcp-artifact-registry@v2.0 + with: + workload-id-provider: 'projects/1094498259535/locations/global/workloadIdentityPools/gh-optimism/providers/github-by-repos' + service-account: 'celo-optimism-gh@devopsre.iam.gserviceaccount.com' + docker-gcp-registries: us-west1-docker.pkg.dev + - name: Build and push container + uses: celo-org/reusable-workflows/.github/actions/build-container@v2.0 + with: + platforms: linux/amd64 + registry: us-west1-docker.pkg.dev/devopsre/dev-images/cel2-migration-tool + tags: ${{ github.sha }} + context: ./ + dockerfile: ./op-chain-ops/Dockerfile + push: true + trivy: false + + # Build op-node op-batcher op-proposer using docker-bake + build-op-stack: + runs-on: ubuntu-latest + needs: detect-files-changed + if: | + contains(needs.detect-files-changed.outputs.files-changed, 'go.sum') || + contains(needs.detect-files-changed.outputs.files-changed, 'ops/docker') || + contains(needs.detect-files-changed.outputs.files-changed, 'op-node/') || + contains(needs.detect-files-changed.outputs.files-changed, 'op-batcher/') || + contains(needs.detect-files-changed.outputs.files-changed, 'op-proposer/') || + contains(needs.detect-files-changed.outputs.files-changed, 'op-service/') + permissions: + contents: read + id-token: write + security-events: write + env: + GIT_COMMIT: ${{ github.sha }} + GIT_DATE: ${{ github.event.head_commit.timestamp }} + IMAGE_TAGS: ${{ github.sha }},latest + REGISTRY: us-west1-docker.pkg.dev + REPOSITORY: blockchaintestsglobaltestnet/dev-images + steps: + - uses: actions/checkout@v4 + - name: Login at GCP Artifact Registry + uses: celo-org/reusable-workflows/.github/actions/auth-gcp-artifact-registry@v2.0 + with: + workload-id-provider: 'projects/1094498259535/locations/global/workloadIdentityPools/gh-optimism/providers/github-by-repos' + service-account: 'celo-optimism-gh@devopsre.iam.gserviceaccount.com' + docker-gcp-registries: us-west1-docker.pkg.dev + # We need a custom steps as it's using docker bake + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - name: Build and push + uses: docker/bake-action@v5 + with: + push: true + source: . + files: docker-bake.hcl + targets: op-node,op-batcher,op-proposer diff --git a/op-chain-ops/Dockerfile b/op-chain-ops/Dockerfile new file mode 100644 index 0000000000000..532a73bb5acbb --- /dev/null +++ b/op-chain-ops/Dockerfile @@ -0,0 +1,29 @@ +FROM golang:1.21.1-alpine3.18 as builder + +RUN apk --no-cache add make + +COPY ./go.mod /app/go.mod +COPY ./go.sum /app/go.sum + +WORKDIR /app + +RUN go mod download + +COPY ./op-service /app/op-service +COPY ./op-node /app/op-node +COPY ./op-plasma /app/op-plasma +COPY ./op-chain-ops /app/op-chain-ops +WORKDIR /app/op-chain-ops +RUN make celo-migrate + +FROM alpine:3.18 +RUN apk --no-cache add ca-certificates bash rsync + +# RUN addgroup -S app && adduser -S app -G app +# USER app +WORKDIR /app + +COPY --from=builder /app/op-chain-ops/bin/celo-migrate /app +ENV PATH="/app:${PATH}" + +ENTRYPOINT ["/app/celo-migrate"] diff --git a/op-chain-ops/cmd/celo-migrate/README.md b/op-chain-ops/cmd/celo-migrate/README.md new file mode 100644 index 0000000000000..9cf2493263a5b --- /dev/null +++ b/op-chain-ops/cmd/celo-migrate/README.md @@ -0,0 +1,117 @@ +# Celo L2 Migration Script + +## Overview + +This script has two main sections. The first migrates Celo blocks to a format compatible with `op-geth`, and the second performs necessary state changes such as deploying L2 smart contracts. + +### Block migration + +The block migration itself has two parts: It first migrates the ancient / frozen blocks, which is all blocks before the last 90000. Because the ancients db is append-only, it copies these blocks into a new database after making the necessary transformations. The script then copies the rest of the chaindata directory (excluding `/ancients`) using the system level `rsync` command. All non-ancient blocks are then transformed in-place in the new db, leaving the old db unchanged. + +### State migration + +After all blocks have been migrated, the script performs a series of modifications to the state db. This is also done in-place in the `--new-db` directory. First, the state migration deploys the L2 smart contracts by iterating through the genesis allocs passed to the script and setting the nonce, balance, code and storage for each address accordingly, overwritting existing data if necessary. Finally, the state migration will commit the state changes to produce a new state root and create the first Cel2 block. + +### Notes + +Once the state changes are complete the migration is finished. The longest running section of the script is the ancients migration, and it can be resumed / skipped if interupted part way. The rest of the script cannot be resumed and will restart from the last migrated ancient block if interupted or re-run. + +The script outputs a `rollup-config.json` file that is passed to the sequencer in order to start the L2 network. + +See `--help` for how to run each portion of the script individually, along with other configuration options. + +### Running the script + +First, build the script by running + +```bash +make celo-migrate +``` + +from the `op-chain-ops` directory. + +You can then run the script as follows. + +```bash +go run ./cmd/celo-migrate --help +``` + +NOTE: You will need `rsync` to run this script if it's not already installed + +#### Running with local test setup (Alfajores / Holesky) + +To test the script locally, we can migrate an alfajores database and use Holesky as our L1. The input files needed for this can be found in `./testdata`. The necessary smart contracts have already been deployed on Holesky. + +##### Pull down the latest alfajores database snapshot + +```bash +gcloud alpha storage cp gs://celo-chain-backup/alfajores/chaindata-latest.tar.zst alfajores.tar.zst +``` + +Unzip and rename + +```bash +tar --use-compress-program=unzstd -xvf alfajores.tar.zst +mv chaindata ./data/alfajores_old +``` + +##### Generate test allocs file + +The state migration takes in a allocs file that specifies the l2 state changes to be made during the migration. This file can be generated from the deploy config and l1 contract addresses by running the following from the `contracts-bedrock` directory. + +```bash +CONTRACT_ADDRESSES_PATH=../../op-chain-ops/cmd/celo-migrate/testdata/deployment-l1-holesky.json \ +DEPLOY_CONFIG_PATH=../../op-chain-ops/cmd/celo-migrate/testdata/deploy-config-holesky-alfajores.json \ +STATE_DUMP_PATH=../../op-chain-ops/cmd/celo-migrate/testdata/l2-allocs-alfajores.json \ +forge script ./scripts/L2Genesis.s.sol:L2Genesis \ +--sig 'runWithStateDump()' +``` + +This should output the allocs file to `./testdata/l2-allocs-alfajores.json`. If you encounter difficulties with this and want to just continue testing the script, you can alternatively find the allocs file [here](https://gist.github.com/jcortejoso/7f90ba9b67c669791014661ccb6de81a). + +##### Run script with test configuration + +```bash +go run ./cmd/celo-migrate full \ +--deploy-config ./cmd/celo-migrate/testdata/deploy-config-holesky-alfajores.json \ +--l1-deployments ./cmd/celo-migrate/testdata/deployment-l1-holesky.json \ +--l1-rpc https://ethereum-holesky-rpc.publicnode.com \ +--l2-allocs ./cmd/celo-migrate/testdata/l2-allocs-alfajores.json \ +--outfile.rollup-config ./cmd/celo-migrate/testdata/rollup-config.json \ +--old-db ./data/alfajores_old \ +--new-db ./data/alfajores_new +``` + +The first time you run the script it should take ~5 minutes. The first part of the script will migrate ancient blocks, and will take the majority of the time. + +During the ancients migration you can play around with stopping and re-running the script, which should always resume where it left off. If you run the script subsequent times after ancient migrations have been run, the script should skip ancient migrations and proceed to migrating non-ancient blocks quickly. + +Note that partial migration progress beyond the ancient blocks (i.e. non-frozen blocks and state changes) will not be preserved between runs by default. + +#### Running for Cel2 migration + +##### Generate allocs file + +You can generate the allocs file needed to run the migration with the following script in `contracts-bedrock` + +```bash +CONTRACT_ADDRESSES_PATH= \ +DEPLOY_CONFIG_PATH= \ +STATE_DUMP_PATH= \ +forge script scripts/L2Genesis.s.sol:L2Genesis \ +--sig 'runWithStateDump()' +``` + +##### Dress rehearsal / pre-migration + +To minimize downtime caused by the migration, node operators can prepare their Cel2 databases by running this script a day ahead of the actual migration. This will pre-populate the new database with most of the ancient blocks needed for the final migration, and will also serve as a dress rehearsal for the rest of the migration. + +NOTE: The pre-migration should be run using a chaindata snapshot, rather than a db that is being used by a node. To avoid network downtime, we recommend that node operators do not stop any nodes in order to perform the pre-migration. + +Node operators should inspect their migration logs after the dress rehearsal to ensure the migration completed succesfully and direct any questions to the Celo developer community on Discord before the actual migration. + +##### Final migration + +On the day of the actual cel2 migration, this script can be re-run using the same parameters as for the dress rehearsal but with the latest Celo Mainnet database snapshot as `--old-db`. The script will only need to migrate any ancient blocks frozen after the dress rehearsal, all non-frozen blocks, and state. + +Unlike the pre-migration, the final migration can be run directly on the db used by the Celo node rather than a snapshot. diff --git a/op-chain-ops/cmd/celo-migrate/ancients.go b/op-chain-ops/cmd/celo-migrate/ancients.go new file mode 100644 index 0000000000000..b9e1fb975974f --- /dev/null +++ b/op-chain-ops/cmd/celo-migrate/ancients.go @@ -0,0 +1,195 @@ +package main + +import ( + "context" + "fmt" + "path/filepath" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "golang.org/x/sync/errgroup" +) + +// RLPBlockRange is a range of blocks in RLP format +type RLPBlockRange struct { + start uint64 + hashes [][]byte + headers [][]byte + bodies [][]byte + receipts [][]byte + tds [][]byte +} + +func migrateAncientsDb(oldDBPath, newDBPath string, batchSize, bufferSize uint64) (uint64, uint64, error) { + oldFreezer, err := rawdb.NewChainFreezer(filepath.Join(oldDBPath, "ancient"), "", false) // Can't be readonly because we need the .meta files to be created + if err != nil { + return 0, 0, fmt.Errorf("failed to open old freezer: %w", err) + } + defer oldFreezer.Close() + + newFreezer, err := rawdb.NewChainFreezer(filepath.Join(newDBPath, "ancient"), "", false) + if err != nil { + return 0, 0, fmt.Errorf("failed to open new freezer: %w", err) + } + defer newFreezer.Close() + + numAncientsOld, err := oldFreezer.Ancients() + if err != nil { + return 0, 0, fmt.Errorf("failed to get number of ancients in old freezer: %w", err) + } + + numAncientsNewBefore, err := newFreezer.Ancients() + if err != nil { + return 0, 0, fmt.Errorf("failed to get number of ancients in new freezer: %w", err) + } + + if numAncientsNewBefore >= numAncientsOld { + log.Info("Ancient Block Migration Skipped", "process", "ancients", "ancientsInOldDB", numAncientsOld, "ancientsInNewDB", numAncientsNewBefore) + return numAncientsNewBefore, numAncientsNewBefore, nil + } + + log.Info("Ancient Block Migration Started", "process", "ancients", "startBlock", numAncientsNewBefore, "endBlock", numAncientsOld, "count", numAncientsOld-numAncientsNewBefore, "step", batchSize) + + g, ctx := errgroup.WithContext(context.Background()) + readChan := make(chan RLPBlockRange, bufferSize) + transformChan := make(chan RLPBlockRange, bufferSize) + + g.Go(func() error { + return readAncientBlocks(ctx, oldFreezer, numAncientsNewBefore, numAncientsOld, batchSize, readChan) + }) + g.Go(func() error { return transformBlocks(ctx, readChan, transformChan) }) + g.Go(func() error { return writeAncientBlocks(ctx, newFreezer, transformChan) }) + + if err = g.Wait(); err != nil { + return 0, 0, fmt.Errorf("failed to migrate ancients: %w", err) + } + + numAncientsNewAfter, err := newFreezer.Ancients() + if err != nil { + return 0, 0, fmt.Errorf("failed to get number of ancients in new freezer: %w", err) + } + + log.Info("Ancient Block Migration Ended", "process", "ancients", "ancientsInOldDB", numAncientsOld, "ancientsInNewDB", numAncientsNewAfter, "migrated", numAncientsNewAfter-numAncientsNewBefore) + return numAncientsNewBefore, numAncientsNewAfter, nil +} + +func readAncientBlocks(ctx context.Context, freezer *rawdb.Freezer, startBlock, endBlock, batchSize uint64, out chan<- RLPBlockRange) error { + defer close(out) + + for i := startBlock; i < endBlock; i += batchSize { + select { + case <-ctx.Done(): + return ctx.Err() + default: + count := min(batchSize, endBlock-i+1) + start := i + + blockRange := RLPBlockRange{ + start: start, + hashes: make([][]byte, count), + headers: make([][]byte, count), + bodies: make([][]byte, count), + receipts: make([][]byte, count), + tds: make([][]byte, count), + } + var err error + + blockRange.hashes, err = freezer.AncientRange(rawdb.ChainFreezerHashTable, start, count, 0) + if err != nil { + return fmt.Errorf("failed to read hashes from old freezer: %w", err) + } + blockRange.headers, err = freezer.AncientRange(rawdb.ChainFreezerHeaderTable, start, count, 0) + if err != nil { + return fmt.Errorf("failed to read headers from old freezer: %w", err) + } + blockRange.bodies, err = freezer.AncientRange(rawdb.ChainFreezerBodiesTable, start, count, 0) + if err != nil { + return fmt.Errorf("failed to read bodies from old freezer: %w", err) + } + blockRange.receipts, err = freezer.AncientRange(rawdb.ChainFreezerReceiptTable, start, count, 0) + if err != nil { + return fmt.Errorf("failed to read receipts from old freezer: %w", err) + } + blockRange.tds, err = freezer.AncientRange(rawdb.ChainFreezerDifficultyTable, start, count, 0) + if err != nil { + return fmt.Errorf("failed to read tds from old freezer: %w", err) + } + + out <- blockRange + } + } + return nil +} + +func transformBlocks(ctx context.Context, in <-chan RLPBlockRange, out chan<- RLPBlockRange) error { + // Transform blocks from the in channel and send them to the out channel + defer close(out) + for blockRange := range in { + select { + case <-ctx.Done(): + return ctx.Err() + default: + for i := range blockRange.hashes { + blockNumber := blockRange.start + uint64(i) + + newHeader, err := transformHeader(blockRange.headers[i]) + if err != nil { + return fmt.Errorf("can't transform header: %w", err) + } + newBody, err := transformBlockBody(blockRange.bodies[i]) + if err != nil { + return fmt.Errorf("can't transform body: %w", err) + } + + if yes, newHash := hasSameHash(newHeader, blockRange.hashes[i]); !yes { + log.Error("Hash mismatch", "block", blockNumber, "oldHash", common.BytesToHash(blockRange.hashes[i]), "newHash", newHash) + return fmt.Errorf("hash mismatch at block %d", blockNumber) + } + + blockRange.headers[i] = newHeader + blockRange.bodies[i] = newBody + } + out <- blockRange + } + } + return nil +} + +func writeAncientBlocks(ctx context.Context, freezer *rawdb.Freezer, in <-chan RLPBlockRange) error { + // Write blocks from the in channel to the newDb + for blockRange := range in { + select { + case <-ctx.Done(): + return ctx.Err() + default: + _, err := freezer.ModifyAncients(func(aWriter ethdb.AncientWriteOp) error { + for i := range blockRange.hashes { + blockNumber := blockRange.start + uint64(i) + if err := aWriter.AppendRaw(rawdb.ChainFreezerHashTable, blockNumber, blockRange.hashes[i]); err != nil { + return fmt.Errorf("can't write hash to Freezer: %w", err) + } + if err := aWriter.AppendRaw(rawdb.ChainFreezerHeaderTable, blockNumber, blockRange.headers[i]); err != nil { + return fmt.Errorf("can't write header to Freezer: %w", err) + } + if err := aWriter.AppendRaw(rawdb.ChainFreezerBodiesTable, blockNumber, blockRange.bodies[i]); err != nil { + return fmt.Errorf("can't write body to Freezer: %w", err) + } + if err := aWriter.AppendRaw(rawdb.ChainFreezerReceiptTable, blockNumber, blockRange.receipts[i]); err != nil { + return fmt.Errorf("can't write receipts to Freezer: %w", err) + } + if err := aWriter.AppendRaw(rawdb.ChainFreezerDifficultyTable, blockNumber, blockRange.tds[i]); err != nil { + return fmt.Errorf("can't write td to Freezer: %w", err) + } + } + return nil + }) + if err != nil { + return fmt.Errorf("failed to write block range: %w", err) + } + log.Info("Wrote ancient blocks", "start", blockRange.start, "end", blockRange.start+uint64(len(blockRange.hashes)-1), "count", len(blockRange.hashes)) + } + } + return nil +} diff --git a/op-chain-ops/cmd/celo-migrate/db.go b/op-chain-ops/cmd/celo-migrate/db.go new file mode 100644 index 0000000000000..e7f685909d536 --- /dev/null +++ b/op-chain-ops/cmd/celo-migrate/db.go @@ -0,0 +1,102 @@ +package main + +import ( + "encoding/binary" + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/ethdb" +) + +// Constants for the database +const ( + DBCache = 1024 // size of the cache in MB + DBHandles = 60 // number of handles + LastMigratedNonAncientBlockKey = "celoLastMigratedNonAncientBlock" +) + +var ( + headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header +) + +// encodeBlockNumber encodes a block number as big endian uint64 +func encodeBlockNumber(number uint64) []byte { + enc := make([]byte, 8) + binary.BigEndian.PutUint64(enc, number) + return enc +} + +// headerKey = headerPrefix + num (uint64 big endian) + hash +func headerKey(number uint64, hash common.Hash) []byte { + return append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...) +} + +// readLastMigratedNonAncientBlock returns the last migration number. If it doesn't exist, it returns 0. +func readLastMigratedNonAncientBlock(db ethdb.KeyValueReader) uint64 { + data, err := db.Get([]byte(LastMigratedNonAncientBlockKey)) + if err != nil { + return 0 + } + number := binary.BigEndian.Uint64(data) + return number +} + +// writeLastMigratedNonAncientBlock stores the last migration number. +func writeLastMigratedNonAncientBlock(db ethdb.KeyValueWriter, number uint64) error { + enc := make([]byte, 8) + binary.BigEndian.PutUint64(enc, number) + return db.Put([]byte(LastMigratedNonAncientBlockKey), enc) +} + +// deleteLastMigratedNonAncientBlock removes the last migration number. +func deleteLastMigratedNonAncientBlock(db ethdb.KeyValueWriter) error { + return db.Delete([]byte(LastMigratedNonAncientBlockKey)) +} + +// openDB opens the chaindata database at the given path. Note this path is below the datadir +func openDB(chaindataPath string) (ethdb.Database, error) { + if _, err := os.Stat(chaindataPath); errors.Is(err, os.ErrNotExist) { + return nil, err + } + + ldb, err := rawdb.Open(rawdb.OpenOptions{ + Type: "leveldb", + Directory: chaindataPath, + AncientsDirectory: filepath.Join(chaindataPath, "ancient"), + Namespace: "", + Cache: DBCache, + Handles: DBHandles, + ReadOnly: false, + }) + if err != nil { + return nil, err + } + return ldb, nil +} + +func createNewDbIfNotExists(newDBPath string) error { + if err := os.MkdirAll(newDBPath, 0755); err != nil { + return fmt.Errorf("failed to create new database directory: %w", err) + } + return nil +} + +func cleanupNonAncientDb(dir string) error { + files, err := os.ReadDir(dir) + if err != nil { + return fmt.Errorf("failed to read directory: %w", err) + } + for _, file := range files { + if file.Name() != "ancient" { + err := os.RemoveAll(filepath.Join(dir, file.Name())) + if err != nil { + return fmt.Errorf("failed to remove file: %w", err) + } + } + } + return nil +} diff --git a/op-chain-ops/cmd/celo-migrate/main.go b/op-chain-ops/cmd/celo-migrate/main.go index aa810bd4c0bec..f64953d341613 100644 --- a/op-chain-ops/cmd/celo-migrate/main.go +++ b/op-chain-ops/cmd/celo-migrate/main.go @@ -1,42 +1,31 @@ package main import ( - "bytes" "context" "errors" "fmt" "math/big" "os" - "path/filepath" - "time" + "os/exec" + "runtime/debug" "github.com/ethereum-optimism/optimism/op-chain-ops/genesis" "github.com/ethereum-optimism/optimism/op-service/jsonutil" oplog "github.com/ethereum-optimism/optimism/op-service/log" - "github.com/ethereum-optimism/optimism/op-service/predeploys" - "github.com/holiman/uint256" - "github.com/mattn/go-isatty" - - "github.com/urfave/cli/v2" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" - "github.com/ethereum/go-ethereum/trie" - "github.com/ethereum/go-ethereum/triedb" + + "github.com/mattn/go-isatty" + "github.com/urfave/cli/v2" + "golang.org/x/exp/slog" ) var ( deployConfigFlag = &cli.PathFlag{ Name: "deploy-config", - Usage: "Path to the JSON file that was used for the bedrock contracts deployment. A test example can be found here 'op-chain-ops/genesis/testdata/test-deploy-config-full.json' and documentation for the fields is at https://docs.optimism.io/builders/chain-operators/management/configuration", + Usage: "Path to the JSON file that was used for the l1 contracts deployment. A test example can be found here 'op-chain-ops/genesis/testdata/test-deploy-config-full.json' and documentation for the fields is at https://docs.optimism.io/builders/chain-operators/management/configuration", Required: true, } l1DeploymentsFlag = &cli.PathFlag{ @@ -51,7 +40,7 @@ var ( } l2AllocsFlag = &cli.PathFlag{ Name: "l2-allocs", - Usage: "Path to L2 genesis allocs file", + Usage: "Path to L2 genesis allocs file. You can find instructions on how to generate this file in the README", Required: true, } outfileRollupConfigFlag = &cli.PathFlag{ @@ -59,361 +48,314 @@ var ( Usage: "Path to write the rollup config JSON file, to be provided to op-node with the 'rollup.config' flag", Required: true, } - dbPathFlag = &cli.StringFlag{ - Name: "db-path", - Usage: "Path to the Celo database, not including the `celo/chaindata` part", + oldDBPathFlag = &cli.PathFlag{ + Name: "old-db", + Usage: "Path to the old Celo chaindata dir, can be found at '/celo/chaindata'", + Required: true, + } + newDBPathFlag = &cli.PathFlag{ + Name: "new-db", + Usage: "Path to write migrated Celo chaindata, note the new node implementation expects to find this chaindata at the following path '/geth/chaindata", Required: true, } - dryRunFlag = &cli.BoolFlag{ - Name: "dry-run", - Usage: "Dry run the upgrade by not committing the database", + batchSizeFlag = &cli.Uint64Flag{ + Name: "batch-size", + Usage: "Batch size to use for block migration, larger batch sizes can speed up migration but require more memory. If increasing the batch size consider also increasing the memory-limit", + Value: 50000, // TODO(Alec) optimize default parameters + } + bufferSizeFlag = &cli.Uint64Flag{ + Name: "buffer-size", + Usage: "Buffer size to use for ancient block migration channels. Defaults to 0. Included to facilitate testing for performance improvements.", + Value: 0, + } + memoryLimitFlag = &cli.Int64Flag{ + Name: "memory-limit", + Usage: "Memory limit in MiB, should be set lower than the available amount of memory in your system to prevent out of memory errors", + Value: 7500, + } + clearAllFlag = &cli.BoolFlag{ + Name: "clear-all", + Usage: "Use this to start with a fresh new db, deleting all data including ancients. CAUTION: Re-migrating ancients takes time.", + } + keepNonAncientsFlag = &cli.BoolFlag{ + Name: "keep-non-ancients", + Usage: "CAUTION: Not recommended for production. Use to keep all data in the new db as is, including any partially migrated non-ancient blocks and state data. If non-ancient blocks are partially migrated, the script will attempt to resume the migration.", + } + onlyAncientsFlag = &cli.BoolFlag{ + Name: "only-ancients", + Usage: "Use to only migrate ancient blocks. Ignored when running full migration", } - flags = []cli.Flag{ + blockMigrationFlags = []cli.Flag{ + onlyAncientsFlag, + oldDBPathFlag, + newDBPathFlag, + batchSizeFlag, + bufferSizeFlag, + memoryLimitFlag, + clearAllFlag, + keepNonAncientsFlag, + } + stateMigrationFlags = []cli.Flag{ + newDBPathFlag, deployConfigFlag, l1DeploymentsFlag, l1RPCFlag, l2AllocsFlag, outfileRollupConfigFlag, - dbPathFlag, - dryRunFlag, } + // Ignore onlyAncients flag and duplicate newDBPathFlag for full migration + fullMigrationFlags = append(blockMigrationFlags[1:], stateMigrationFlags[1:]...) +) - // TODO: read those form the deploy config - // TODO(pl): select values - EIP1559Denominator = uint64(50) - EIP1559DenominatorCanyon = uint64(250) - EIP1559Elasticity = uint64(10) +type blockMigrationOptions struct { + oldDBPath string + newDBPath string + batchSize uint64 + bufferSize uint64 + memoryLimit int64 + clearAll bool + keepNonAncients bool + onlyAncients bool +} - OutFilePerm = os.FileMode(0o440) -) +type stateMigrationOptions struct { + deployConfig string + l1Deployments string + l1RPC string + l2AllocsPath string + outfileRollupConfig string + newDBPath string +} + +func parseBlockMigrationOptions(ctx *cli.Context) blockMigrationOptions { + return blockMigrationOptions{ + oldDBPath: ctx.String(oldDBPathFlag.Name), + newDBPath: ctx.String(newDBPathFlag.Name), + batchSize: ctx.Uint64(batchSizeFlag.Name), + bufferSize: ctx.Uint64(bufferSizeFlag.Name), + memoryLimit: ctx.Int64(memoryLimitFlag.Name), + clearAll: ctx.Bool(clearAllFlag.Name), + keepNonAncients: ctx.Bool(keepNonAncientsFlag.Name), + onlyAncients: ctx.Bool(onlyAncientsFlag.Name), + } +} + +func parseStateMigrationOptions(ctx *cli.Context) stateMigrationOptions { + return stateMigrationOptions{ + newDBPath: ctx.String(newDBPathFlag.Name), + deployConfig: ctx.Path(deployConfigFlag.Name), + l1Deployments: ctx.Path(l1DeploymentsFlag.Name), + l1RPC: ctx.String(l1RPCFlag.Name), + l2AllocsPath: ctx.Path(l2AllocsFlag.Name), + outfileRollupConfig: ctx.Path(outfileRollupConfigFlag.Name), + } +} func main() { + color := isatty.IsTerminal(os.Stderr.Fd()) - handler := log.NewTerminalHandler(os.Stderr, color) + handler := log.NewTerminalHandlerWithLevel(os.Stderr, slog.LevelDebug, color) oplog.SetGlobalLogHandler(handler) + log.Info("Beginning Cel2 Migration") + app := &cli.App{ - Name: "migrate", - Usage: "Migrate Celo state to a CeL2 DB", - Flags: flags, - Action: func(ctx *cli.Context) error { - deployConfig := ctx.Path("deploy-config") - l1Deployments := ctx.Path("l1-deployments") - l1RPC := ctx.String("l1-rpc") - l2AllocsPath := ctx.Path("l2-allocs") - outfileRollupConfig := ctx.Path("outfile.rollup-config") - dbPath := ctx.String("db-path") - dryRun := ctx.Bool("dry-run") - - // Read deployment configuration - log.Info("Deploy config", "path", deployConfig) - config, err := genesis.NewDeployConfig(deployConfig) - if err != nil { + Name: "celo-migrate", + Usage: "Migrate Celo block and state data to a CeL2 DB", + Commands: []*cli.Command{ + { + Name: "blocks", + Aliases: []string{"b"}, + Usage: "Migrate Celo block data to a CeL2 DB", + Flags: blockMigrationFlags, + Action: func(ctx *cli.Context) error { + return runBlockMigration(parseBlockMigrationOptions(ctx)) + }, + }, + { + Name: "state", + Aliases: []string{"s"}, + Usage: "Migrate Celo state data to a CeL2 DB. Makes necessary state changes and generates a rollup config file.", + Flags: stateMigrationFlags, + Action: func(ctx *cli.Context) error { + return runStateMigration(parseStateMigrationOptions(ctx)) + }, + }, + { + Name: "full", + Aliases: []string{"f", "all", "a"}, + Usage: "Perform a full migration of both block and state data to a CeL2 DB", + Flags: fullMigrationFlags, + Action: func(ctx *cli.Context) error { + if err := runBlockMigration(parseBlockMigrationOptions(ctx)); err != nil { + return fmt.Errorf("failed to run block migration: %w", err) + } + + if err := runStateMigration(parseStateMigrationOptions(ctx)); err != nil { + return fmt.Errorf("failed to run state migration: %w", err) + } + + return nil + }, + }, + }, + OnUsageError: func(ctx *cli.Context, err error, isSubcommand bool) error { + if isSubcommand { return err } + _ = cli.ShowAppHelp(ctx) + return fmt.Errorf("please provide a valid command") + }, + } - if config.DeployCeloContracts { - return errors.New("DeployCeloContracts is not supported in migration") - } - if config.FundDevAccounts { - return errors.New("FundDevAccounts is not supported in migration") - } + if err := app.Run(os.Args); err != nil { + log.Crit("error in migration", "err", err) + } + log.Info("Finished migration successfully!") +} - // Try reading the L1 deployment information - deployments, err := genesis.NewL1Deployments(l1Deployments) - if err != nil { - return fmt.Errorf("cannot read L1 deployments at %s: %w", l1Deployments, err) - } - config.SetDeployments(deployments) +func runBlockMigration(opts blockMigrationOptions) error { - // Get latest block information from L1 - var l1StartBlock *types.Block - client, err := ethclient.Dial(l1RPC) - if err != nil { - return fmt.Errorf("cannot dial %s: %w", l1RPC, err) - } + // Check that `rsync` command is available. We use this to copy the db excluding ancients, which we will copy separately + if _, err := exec.LookPath("rsync"); err != nil { + return fmt.Errorf("please install `rsync` to run block migration") + } - if config.L1StartingBlockTag == nil { - l1StartBlock, err = client.BlockByNumber(context.Background(), nil) - if err != nil { - return fmt.Errorf("cannot fetch latest block: %w", err) - } - tag := rpc.BlockNumberOrHashWithHash(l1StartBlock.Hash(), true) - config.L1StartingBlockTag = (*genesis.MarshalableRPCBlockNumberOrHash)(&tag) - } else if config.L1StartingBlockTag.BlockHash != nil { - l1StartBlock, err = client.BlockByHash(context.Background(), *config.L1StartingBlockTag.BlockHash) - if err != nil { - return fmt.Errorf("cannot fetch block by hash: %w", err) - } - } else if config.L1StartingBlockTag.BlockNumber != nil { - l1StartBlock, err = client.BlockByNumber(context.Background(), big.NewInt(config.L1StartingBlockTag.BlockNumber.Int64())) - if err != nil { - return fmt.Errorf("cannot fetch block by number: %w", err) - } - } + debug.SetMemoryLimit(opts.memoryLimit * 1 << 20) // Set memory limit, converting from MiB to bytes - // Ensure that there is a starting L1 block - if l1StartBlock == nil { - return fmt.Errorf("no starting L1 block") - } + log.Info("Block Migration Started", "oldDBPath", opts.oldDBPath, "newDBPath", opts.newDBPath, "batchSize", opts.batchSize, "memoryLimit", opts.memoryLimit, "clearAll", opts.clearAll, "keepNonAncients", opts.keepNonAncients, "onlyAncients", opts.onlyAncients) - // Sanity check the config. Do this after filling in the L1StartingBlockTag - // if it is not defined. - if err := config.Check(); err != nil { - return err - } + var err error + + if err = createNewDbIfNotExists(opts.newDBPath); err != nil { + return fmt.Errorf("failed to create new database: %w", err) + } - log.Info("Using L1 Start Block", "number", l1StartBlock.Number(), "hash", l1StartBlock.Hash().Hex()) + if opts.clearAll { + if err = os.RemoveAll(opts.newDBPath); err != nil { + return fmt.Errorf("failed to remove new database: %w", err) + } + } else if !opts.keepNonAncients { + if err = cleanupNonAncientDb(opts.newDBPath); err != nil { + return fmt.Errorf("failed to reset non-ancient database: %w", err) + } + } - // Build the L2 genesis block - l2Allocs, err := genesis.LoadForgeAllocs(l2AllocsPath) - if err != nil { - return err - } + var numAncientsNewBefore uint64 + var numAncientsNewAfter uint64 + if numAncientsNewBefore, numAncientsNewAfter, err = migrateAncientsDb(opts.oldDBPath, opts.newDBPath, opts.batchSize, opts.bufferSize); err != nil { + return fmt.Errorf("failed to migrate ancients database: %w", err) + } - l2Genesis, err := genesis.BuildL2Genesis(config, l2Allocs, l1StartBlock) - if err != nil { - return fmt.Errorf("error creating l2 genesis: %w", err) - } + var numNonAncients uint64 + if !opts.onlyAncients { + if numNonAncients, err = migrateNonAncientsDb(opts.oldDBPath, opts.newDBPath, numAncientsNewAfter-1, opts.batchSize); err != nil { + return fmt.Errorf("failed to migrate non-ancients database: %w", err) + } + } else { + log.Info("Skipping non-ancients migration") + } - // Write changes to state to actual state database - cel2Header, err := ApplyMigrationChangesToDB(l2Genesis, dbPath, !dryRun) - if err != nil { - return err - } - log.Info("Updated Cel2 state") + log.Info("Block Migration Completed", "migratedAncients", numAncientsNewAfter-numAncientsNewBefore, "migratedNonAncients", numNonAncients) - rollupConfig, err := config.RollupConfig(l1StartBlock, cel2Header.Hash(), cel2Header.Number.Uint64()) - if err != nil { - return err - } - if err := rollupConfig.Check(); err != nil { - return fmt.Errorf("generated rollup config does not pass validation: %w", err) - } + return nil +} - log.Info("Writing rollup config", "file", outfileRollupConfig) - if err := jsonutil.WriteJSON(outfileRollupConfig, rollupConfig, OutFilePerm); err != nil { - return err - } +func runStateMigration(opts stateMigrationOptions) error { + log.Info("State Migration Started", "newDBPath", opts.newDBPath, "deployConfig", opts.deployConfig, "l1Deployments", opts.l1Deployments, "l1RPC", opts.l1RPC, "l2AllocsPath", opts.l2AllocsPath, "outfileRollupConfig", opts.outfileRollupConfig) - return nil - }, + // Read deployment configuration + config, err := genesis.NewDeployConfig(opts.deployConfig) + if err != nil { + return err } - if err := app.Run(os.Args); err != nil { - log.Crit("error in migration", "err", err) + if config.DeployCeloContracts { + return errors.New("DeployCeloContracts is not supported in migration") + } + if config.FundDevAccounts { + return errors.New("FundDevAccounts is not supported in migration") } - log.Info("Finished migration successfully!") -} -func ApplyMigrationChangesToDB(genesis *core.Genesis, dbPath string, commit bool) (*types.Header, error) { - log.Info("Opening Celo database", "dbPath", dbPath) - ldb, err := openCeloDb(dbPath) + // Try reading the L1 deployment information + deployments, err := genesis.NewL1Deployments(opts.l1Deployments) if err != nil { - return nil, fmt.Errorf("cannot open DB: %w", err) + return fmt.Errorf("cannot read L1 deployments at %s: %w", opts.l1Deployments, err) } - log.Info("Loaded Celo L1 DB", "db", ldb) + config.SetDeployments(deployments) - // Grab the hash of the tip of the legacy chain. - hash := rawdb.ReadHeadHeaderHash(ldb) - log.Info("Reading chain tip from database", "hash", hash) + // Get latest block information from L1 + var l1StartBlock *types.Block + client, err := ethclient.Dial(opts.l1RPC) + if err != nil { + return fmt.Errorf("cannot dial %s: %w", opts.l1RPC, err) + } - // Grab the header number. - num := rawdb.ReadHeaderNumber(ldb, hash) - if num == nil { - return nil, fmt.Errorf("cannot find header number for %s", hash) + if config.L1StartingBlockTag == nil { + l1StartBlock, err = client.BlockByNumber(context.Background(), nil) + if err != nil { + return fmt.Errorf("cannot fetch latest block: %w", err) + } + tag := rpc.BlockNumberOrHashWithHash(l1StartBlock.Hash(), true) + config.L1StartingBlockTag = (*genesis.MarshalableRPCBlockNumberOrHash)(&tag) + } else if config.L1StartingBlockTag.BlockHash != nil { + l1StartBlock, err = client.BlockByHash(context.Background(), *config.L1StartingBlockTag.BlockHash) + if err != nil { + return fmt.Errorf("cannot fetch block by hash: %w", err) + } + } else if config.L1StartingBlockTag.BlockNumber != nil { + l1StartBlock, err = client.BlockByNumber(context.Background(), big.NewInt(config.L1StartingBlockTag.BlockNumber.Int64())) + if err != nil { + return fmt.Errorf("cannot fetch block by number: %w", err) + } } - log.Info("Reading chain tip num from database", "number", num) - // Grab the full header. - header := rawdb.ReadHeader(ldb, hash, *num) - log.Info("Read header from database", "header", header) + // Ensure that there is a starting L1 block + if l1StartBlock == nil { + return fmt.Errorf("no starting L1 block") + } - // We need to update the chain config to set the correct hardforks. - genesisHash := rawdb.ReadCanonicalHash(ldb, 0) - cfg := rawdb.ReadChainConfig(ldb, genesisHash) - if cfg == nil { - log.Crit("chain config not found") + // Sanity check the config. Do this after filling in the L1StartingBlockTag + // if it is not defined. + if err := config.Check(); err != nil { + return err } - log.Info("Read chain config from database", "config", cfg) - // Set up the backing store. - // TODO(pl): Do we need the preimages setting here? - underlyingDB := state.NewDatabaseWithConfig(ldb, &triedb.Config{Preimages: true}) + log.Info("Using L1 Start Block", "number", l1StartBlock.Number(), "hash", l1StartBlock.Hash().Hex()) - // Open up the state database. - db, err := state.New(header.Root, underlyingDB, nil) + // Build the L2 genesis block + l2Allocs, err := genesis.LoadForgeAllocs(opts.l2AllocsPath) if err != nil { - return nil, fmt.Errorf("cannot open StateDB: %w", err) - } - - // So far we applied changes in the memory VM and collected changes in the genesis struct - // Now we iterate through all accounts that have been written there and set them inside the statedb. - // This will change the state root - // Another property is that the total balance changes must be 0 - accountCounter := 0 - overwriteCounter := 0 - for k, v := range genesis.Alloc { - accountCounter++ - if db.Exist(k) { - equal := bytes.Equal(db.GetCode(k), v.Code) - - log.Warn("Operating on existing state", "account", k, "equalCode", equal) - overwriteCounter++ - } - // TODO(pl): decide what to do with existing accounts. - db.CreateAccount(k) - - // CreateAccount above copied the balance, check if we change it - if db.GetBalance(k).Cmp(uint256.MustFromBig(v.Balance)) != 0 { - // TODO(pl): make this a hard error once the migration has been tested more - log.Warn("Moving account changed native balance", "address", k, "oldBalance", db.GetBalance(k), "newBalance", v.Balance) - } - - db.SetNonce(k, v.Nonce) - db.SetBalance(k, uint256.MustFromBig(v.Balance)) - db.SetCode(k, v.Code) - db.SetStorage(k, v.Storage) - - log.Info("Moved account", "address", k) + return err } - log.Info("Migrated OP contracts into state DB", "copiedAccounts", accountCounter, "overwrittenAccounts", overwriteCounter) - migrationBlock := new(big.Int).Add(header.Number, common.Big1) + l2Genesis, err := genesis.BuildL2Genesis(config, l2Allocs, l1StartBlock) + if err != nil { + return fmt.Errorf("error creating l2 genesis: %w", err) + } - // We're done messing around with the database, so we can now commit the changes to the DB. - // Note that this doesn't actually write the changes to disk. - log.Info("Committing state DB") - newRoot, err := db.Commit(migrationBlock.Uint64(), true) + // Write changes to state to actual state database + cel2Header, err := applyStateMigrationChanges(config, l2Genesis, opts.newDBPath) if err != nil { - return nil, err - } - - // Create the header for the Bedrock transition block. - cel2Header := &types.Header{ - ParentHash: header.Hash(), - UncleHash: types.EmptyUncleHash, - Coinbase: predeploys.SequencerFeeVaultAddr, - Root: newRoot, - TxHash: types.EmptyTxsHash, - ReceiptHash: types.EmptyReceiptsHash, - Bloom: types.Bloom{}, - Difficulty: new(big.Int).Set(common.Big0), - Number: migrationBlock, - GasLimit: header.GasLimit, - GasUsed: 0, - Time: uint64(time.Now().Unix()), // TODO(pl): Needed to avoid L1-L2 time mismatches - Extra: []byte("CeL2 migration"), - MixDigest: common.Hash{}, - Nonce: types.BlockNonce{}, - BaseFee: new(big.Int).Set(header.BaseFee), - } - log.Info("Build Cel2 migration header", "header", cel2Header) - - // Create the Bedrock transition block from the header. Note that there are no transactions, - // uncle blocks, or receipts in the Bedrock transition block. - cel2Block := types.NewBlock(cel2Header, nil, nil, nil, trie.NewStackTrie(nil)) - - // We did it! - log.Info( - "Built Cel2 migration block", - "hash", cel2Block.Hash(), - "root", cel2Block.Root(), - "number", cel2Block.NumberU64(), - "gas-used", cel2Block.GasUsed(), - "gas-limit", cel2Block.GasLimit(), - ) - - // If we're not actually writing this to disk, then we're done. - if !commit { - log.Info("Dry run complete") - return nil, nil - } - - // Otherwise we need to write the changes to disk. First we commit the state changes. - log.Info("Committing trie DB") - if err := db.Database().TrieDB().Commit(newRoot, true); err != nil { - return nil, err - } - - // Next we write the Cel2 genesis block to the database. - rawdb.WriteTd(ldb, cel2Block.Hash(), cel2Block.NumberU64(), cel2Block.Difficulty()) - rawdb.WriteBlock(ldb, cel2Block) - rawdb.WriteReceipts(ldb, cel2Block.Hash(), cel2Block.NumberU64(), nil) - rawdb.WriteCanonicalHash(ldb, cel2Block.Hash(), cel2Block.NumberU64()) - rawdb.WriteHeadBlockHash(ldb, cel2Block.Hash()) - rawdb.WriteHeadFastBlockHash(ldb, cel2Block.Hash()) - rawdb.WriteHeadHeaderHash(ldb, cel2Block.Hash()) - - // Mark the first CeL2 block as finalized - rawdb.WriteFinalizedBlockHash(ldb, cel2Block.Hash()) - - // Set the standard options. - cfg.LondonBlock = cel2Block.Number() - cfg.BerlinBlock = cel2Block.Number() - cfg.ArrowGlacierBlock = cel2Block.Number() - cfg.GrayGlacierBlock = cel2Block.Number() - cfg.MergeNetsplitBlock = cel2Block.Number() - cfg.TerminalTotalDifficulty = big.NewInt(0) - cfg.TerminalTotalDifficultyPassed = true - cfg.ShanghaiTime = &cel2Header.Time - cfg.CancunTime = &cel2Header.Time - - // Set the Optimism options. - cfg.BedrockBlock = cel2Block.Number() - // Enable Regolith from the start of Bedrock - cfg.RegolithTime = new(uint64) // what are those? do we need those? - cfg.Optimism = ¶ms.OptimismConfig{ - EIP1559Denominator: EIP1559Denominator, - EIP1559DenominatorCanyon: EIP1559DenominatorCanyon, - EIP1559Elasticity: EIP1559Elasticity, - } - cfg.CanyonTime = &cel2Header.Time - cfg.EcotoneTime = &cel2Header.Time - cfg.Cel2Time = &cel2Header.Time - - // Write the chain config to disk. - // TODO(pl): Why do we need to write this with the genesis hash, not `cel2Block.Hash()`?` - rawdb.WriteChainConfig(ldb, genesisHash, cfg) - log.Info("Wrote updated chain config", "config", cfg) - - // We're done! - log.Info( - "Wrote CeL2 migration block", - "height", cel2Header.Number, - "root", cel2Header.Root.String(), - "hash", cel2Header.Hash().String(), - "timestamp", cel2Header.Time, - ) - - // Close the database handle - if err := ldb.Close(); err != nil { - return nil, err - } - - return cel2Header, nil -} + return err + } + log.Info("Updated Cel2 state") -// Opens a Celo database, stored in the `celo` subfolder -func openCeloDb(path string) (ethdb.Database, error) { - if _, err := os.Stat(path); errors.Is(err, os.ErrNotExist) { - return nil, err - } - - chaindataPath := filepath.Join(path, "celo", "chaindata") - ancientPath := filepath.Join(chaindataPath, "ancient") - ldb, err := rawdb.Open(rawdb.OpenOptions{ - Type: "leveldb", - Directory: chaindataPath, - AncientsDirectory: ancientPath, - Namespace: "", - Cache: 1024, - Handles: 60, - ReadOnly: false, - }) + rollupConfig, err := config.RollupConfig(l1StartBlock, cel2Header.Hash(), cel2Header.Number.Uint64()) if err != nil { - return nil, err + return err } - return ldb, nil + if err := rollupConfig.Check(); err != nil { + return fmt.Errorf("generated rollup config does not pass validation: %w", err) + } + + log.Info("Writing rollup config", "file", opts.outfileRollupConfig) + if err := jsonutil.WriteJSON(opts.outfileRollupConfig, rollupConfig, OutFilePerm); err != nil { + return err + } + + log.Info("State Migration Completed") + + return nil } diff --git a/op-chain-ops/cmd/celo-migrate/non-ancients.go b/op-chain-ops/cmd/celo-migrate/non-ancients.go new file mode 100644 index 0000000000000..bab92aacd3f19 --- /dev/null +++ b/op-chain-ops/cmd/celo-migrate/non-ancients.go @@ -0,0 +1,123 @@ +package main + +import ( + "fmt" + "os" + "os/exec" + "strings" + + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/log" +) + +func migrateNonAncientsDb(oldDbPath, newDbPath string, lastAncientBlock, batchSize uint64) (uint64, error) { + // First copy files from old database to new database + log.Info("Copy files from old database (excluding ancients)", "process", "non-ancients") + + // Get rsync help output + cmdHelp := exec.Command("rsync", "--help") + output, _ := cmdHelp.CombinedOutput() + + // Convert output to string + outputStr := string(output) + + // TODO(Alec) have rsync run as part of pre-migration (but not the transformation or state) + // can use --update and --delete to keep things synced between dbs + + // Check for supported options + var cmd *exec.Cmd + // Prefer --info=progress2 over --progress + if strings.Contains(outputStr, "--info") { + cmd = exec.Command("rsync", "-v", "-a", "--info=progress2", "--exclude=ancient", oldDbPath+"/", newDbPath) + } else if strings.Contains(outputStr, "--progress") { + cmd = exec.Command("rsync", "-v", "-a", "--progress", "--exclude=ancient", oldDbPath+"/", newDbPath) + } else { + cmd = exec.Command("rsync", "-v", "-a", "--exclude=ancient", oldDbPath+"/", newDbPath) + } + log.Info("Running rsync command", "command", cmd.String()) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return 0, fmt.Errorf("failed to copy old database to new database: %w", err) + } + + // Open the new database without access to AncientsDb + newDB, err := rawdb.NewLevelDBDatabase(newDbPath, DBCache, DBHandles, "", false) + if err != nil { + return 0, fmt.Errorf("failed to open new database: %w", err) + } + defer newDB.Close() + + // get the last block number + hash := rawdb.ReadHeadHeaderHash(newDB) + lastBlock := *rawdb.ReadHeaderNumber(newDB, hash) + lastMigratedNonAncientBlock := readLastMigratedNonAncientBlock(newDB) // returns 0 if not found + + // if migration was interrupted, start from the last migrated block + fromBlock := max(lastAncientBlock, lastMigratedNonAncientBlock) + 1 + + if fromBlock >= lastBlock { + log.Info("Non-Ancient Block Migration Skipped", "process", "non-ancients", "lastAncientBlock", lastAncientBlock, "endBlock", lastBlock, "lastMigratedNonAncientBlock", lastMigratedNonAncientBlock) + if lastMigratedNonAncientBlock != lastBlock { + return 0, fmt.Errorf("migration range empty but last migrated block is not the last block in the database") + } + return 0, nil + } + + log.Info("Non-Ancient Block Migration Started", "process", "non-ancients", "startBlock", fromBlock, "endBlock", lastBlock, "count", lastBlock-fromBlock, "lastAncientBlock", lastAncientBlock, "lastMigratedNonAncientBlock", lastMigratedNonAncientBlock) + + for i := fromBlock; i <= lastBlock; i += batchSize { + numbersHash := rawdb.ReadAllHashesInRange(newDB, i, i+batchSize-1) + + log.Info("Processing Block Range", "process", "non-ancients", "from", i, "to(inclusve)", i+batchSize-1, "count", len(numbersHash)) + for _, numberHash := range numbersHash { + // read header and body + header := rawdb.ReadHeaderRLP(newDB, numberHash.Hash, numberHash.Number) + body := rawdb.ReadBodyRLP(newDB, numberHash.Hash, numberHash.Number) + + // transform header and body + newHeader, err := transformHeader(header) + if err != nil { + return 0, fmt.Errorf("failed to transform header: block %d - %x: %w", numberHash.Number, numberHash.Hash, err) + } + newBody, err := transformBlockBody(body) + if err != nil { + return 0, fmt.Errorf("failed to transform body: block %d - %x: %w", numberHash.Number, numberHash.Hash, err) + } + + if yes, newHash := hasSameHash(newHeader, numberHash.Hash[:]); !yes { + log.Error("Hash mismatch", "block", numberHash.Number, "oldHash", numberHash.Hash, "newHash", newHash) + return 0, fmt.Errorf("hash mismatch at block %d - %x", numberHash.Number, numberHash.Hash) + } + + // write header and body + batch := newDB.NewBatch() + rawdb.WriteBodyRLP(batch, numberHash.Hash, numberHash.Number, newBody) + _ = batch.Put(headerKey(numberHash.Number, numberHash.Hash), newHeader) + _ = writeLastMigratedNonAncientBlock(batch, numberHash.Number) + if err := batch.Write(); err != nil { + return 0, fmt.Errorf("failed to write header and body: block %d - %x: %w", numberHash.Number, numberHash.Hash, err) + } + } + } + + toBeRemoved := rawdb.ReadAllHashesInRange(newDB, 1, lastAncientBlock) + log.Info("Removing frozen blocks", "process", "non-ancients", "count", len(toBeRemoved)) + batch := newDB.NewBatch() + for _, numberHash := range toBeRemoved { + rawdb.DeleteBlockWithoutNumber(batch, numberHash.Hash, numberHash.Number) + rawdb.DeleteCanonicalHash(batch, numberHash.Number) + } + if err := batch.Write(); err != nil { + return 0, fmt.Errorf("failed to delete frozen blocks: %w", err) + } + + // if migration finished, remove the last migration number + if err := deleteLastMigratedNonAncientBlock(newDB); err != nil { + return 0, fmt.Errorf("failed to delete last migration number: %w", err) + } + + log.Info("Non-Ancient Block Migration Ended", "process", "non-ancients", "migratedBlocks", lastBlock-fromBlock+1, "removedBlocks", len(toBeRemoved)) + + return lastBlock - fromBlock + 1, nil +} diff --git a/op-chain-ops/cmd/celo-migrate/state.go b/op-chain-ops/cmd/celo-migrate/state.go new file mode 100644 index 0000000000000..adf355c000d31 --- /dev/null +++ b/op-chain-ops/cmd/celo-migrate/state.go @@ -0,0 +1,252 @@ +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "math/big" + "os" + + "github.com/ethereum-optimism/optimism/op-chain-ops/genesis" + "github.com/ethereum-optimism/optimism/op-service/predeploys" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/triedb" + + "github.com/holiman/uint256" +) + +var ( + OutFilePerm = os.FileMode(0o440) + + alfajoresChainId uint64 = 44787 + accountOverwriteWhitelist = map[uint64]map[common.Address]struct{}{ + // Add any addresses that should be allowed to overwrite existing accounts here. + alfajoresChainId: { + // Create2Deployer + common.HexToAddress("0x13b0D85CcB8bf860b6b79AF3029fCA081AE9beF2"): {}, + }, + } +) + +func applyStateMigrationChanges(config *genesis.DeployConfig, genesis *core.Genesis, dbPath string) (*types.Header, error) { + log.Info("Opening Celo database", "dbPath", dbPath) + + ldb, err := openDB(dbPath) + if err != nil { + return nil, fmt.Errorf("cannot open DB: %w", err) + } + log.Info("Loaded Celo L1 DB", "db", ldb) + + // Grab the hash of the tip of the legacy chain. + hash := rawdb.ReadHeadHeaderHash(ldb) + log.Info("Reading chain tip from database", "hash", hash) + + // Grab the header number. + num := rawdb.ReadHeaderNumber(ldb, hash) + if num == nil { + return nil, fmt.Errorf("cannot find header number for %s", hash) + } + log.Info("Reading chain tip num from database", "number", num) + + // Grab the full header. + header := rawdb.ReadHeader(ldb, hash, *num) + log.Info("Read header from database", "header", header) + + // We need to update the chain config to set the correct hardforks. + genesisHash := rawdb.ReadCanonicalHash(ldb, 0) + cfg := rawdb.ReadChainConfig(ldb, genesisHash) + if cfg == nil { + log.Crit("chain config not found") + } + log.Info("Read chain config from database", "config", cfg) + + // Set up the backing store. + // TODO(pl): Do we need the preimages setting here? + underlyingDB := state.NewDatabaseWithConfig(ldb, &triedb.Config{Preimages: true}) + + // Open up the state database. + db, err := state.New(header.Root, underlyingDB, nil) + if err != nil { + return nil, fmt.Errorf("cannot open StateDB: %w", err) + } + + // Apply the changes to the state DB. + err = applyAllocsToState(db, genesis, cfg) + if err != nil { + return nil, err + } + + migrationBlock := new(big.Int).Add(header.Number, common.Big1) + + // We're done messing around with the database, so we can now commit the changes to the DB. + // Note that this doesn't actually write the changes to disk. + log.Info("Committing state DB") + newRoot, err := db.Commit(migrationBlock.Uint64(), true) + if err != nil { + return nil, err + } + + baseFee := new(big.Int).SetUint64(params.InitialBaseFee) + if header.BaseFee != nil { + baseFee = header.BaseFee + } + // Create the header for the Cel2 transition block. + cel2Header := &types.Header{ + ParentHash: header.Hash(), + UncleHash: types.EmptyUncleHash, + Coinbase: predeploys.SequencerFeeVaultAddr, + Root: newRoot, + TxHash: types.EmptyTxsHash, + ReceiptHash: types.EmptyReceiptsHash, + Bloom: types.Bloom{}, + Difficulty: new(big.Int).Set(common.Big0), + Number: migrationBlock, + GasLimit: header.GasLimit, + GasUsed: 0, + Time: header.Time + 5, + Extra: []byte("CeL2 migration"), + MixDigest: common.Hash{}, + Nonce: types.BlockNonce{}, + BaseFee: baseFee, + WithdrawalsHash: &types.EmptyWithdrawalsHash, + BlobGasUsed: new(uint64), + ExcessBlobGas: new(uint64), + } + log.Info("Build Cel2 migration header", "header", cel2Header) + + // Create the Cel2 transition block from the header. Note that there are no transactions, + // uncle blocks, or receipts in the Cel2 transition block. + cel2Block := types.NewBlock(cel2Header, nil, nil, nil, trie.NewStackTrie(nil)) + + // We did it! + log.Info( + "Built Cel2 migration block", + "hash", cel2Block.Hash(), + "root", cel2Block.Root(), + "number", cel2Block.NumberU64(), + ) + + log.Info("Committing trie DB") + if err := db.Database().TrieDB().Commit(newRoot, true); err != nil { + return nil, err + } + + // Next we write the Cel2 genesis block to the database. + rawdb.WriteTd(ldb, cel2Block.Hash(), cel2Block.NumberU64(), cel2Block.Difficulty()) + rawdb.WriteBlock(ldb, cel2Block) + rawdb.WriteReceipts(ldb, cel2Block.Hash(), cel2Block.NumberU64(), nil) + rawdb.WriteCanonicalHash(ldb, cel2Block.Hash(), cel2Block.NumberU64()) + rawdb.WriteHeadBlockHash(ldb, cel2Block.Hash()) + rawdb.WriteHeadFastBlockHash(ldb, cel2Block.Hash()) + rawdb.WriteHeadHeaderHash(ldb, cel2Block.Hash()) + + // Mark the first CeL2 block as finalized + rawdb.WriteFinalizedBlockHash(ldb, cel2Block.Hash()) + + // Set the standard options. + cfg.LondonBlock = cel2Block.Number() + cfg.BerlinBlock = cel2Block.Number() + cfg.ArrowGlacierBlock = cel2Block.Number() + cfg.GrayGlacierBlock = cel2Block.Number() + cfg.MergeNetsplitBlock = cel2Block.Number() + cfg.TerminalTotalDifficulty = big.NewInt(0) + cfg.TerminalTotalDifficultyPassed = true + cfg.ShanghaiTime = &cel2Header.Time + cfg.CancunTime = &cel2Header.Time + + // Set the Optimism options. + cfg.BedrockBlock = cel2Block.Number() + // Enable Regolith from the start of Bedrock + cfg.RegolithTime = new(uint64) // what are those? do we need those? + cfg.Optimism = ¶ms.OptimismConfig{ + EIP1559Denominator: config.EIP1559Denominator, + EIP1559DenominatorCanyon: config.EIP1559DenominatorCanyon, + EIP1559Elasticity: config.EIP1559Elasticity, + } + cfg.CanyonTime = &cel2Header.Time + cfg.EcotoneTime = &cel2Header.Time + cfg.FjordTime = &cel2Header.Time + cfg.Cel2Time = &cel2Header.Time + + // Write the chain config to disk. + // TODO(pl): Why do we need to write this with the genesis hash, not `cel2Block.Hash()`?` + rawdb.WriteChainConfig(ldb, genesisHash, cfg) + marhslledConfig, err := json.Marshal(cfg) + if err != nil { + return nil, fmt.Errorf("failed to marshal chain config to JSON: %w", err) + } + log.Info("Wrote updated chain config", "config", string(marhslledConfig)) + + // We're done! + log.Info( + "Wrote CeL2 migration block", + "height", cel2Header.Number, + "root", cel2Header.Root.String(), + "hash", cel2Header.Hash().String(), + "timestamp", cel2Header.Time, + ) + + // Close the database handle + if err := ldb.Close(); err != nil { + return nil, err + } + + return cel2Header, nil +} + +// applyAllocsToState applies the account allocations from the allocation file to the state database. +// It creates new accounts, sets their nonce, balance, code, and storage values. +// If an account already exists, it adds the balance of the new account to the existing balance. +// If the code of an existing account is different from the code in the genesis block, it logs a warning. +// This changes the state root, so `Commit` needs to be called after this function. +func applyAllocsToState(db *state.StateDB, genesis *core.Genesis, config *params.ChainConfig) error { + log.Info("Starting to migrate OP contracts into state DB") + + accountCounter := 0 + overwriteCounter := 0 + for k, v := range genesis.Alloc { + accountCounter++ + + balance := uint256.MustFromBig(v.Balance) + + if db.Exist(k) { + // If the account already has balance, add it to the balance of the new account + balance = balance.Add(balance, db.GetBalance(k)) + + currentCode := db.GetCode(k) + equalCode := bytes.Equal(currentCode, v.Code) + if currentCode != nil && !equalCode { + if whitelist, exists := accountOverwriteWhitelist[config.ChainID.Uint64()]; exists { + if _, ok := whitelist[k]; ok { + log.Info("Account already exists with different code and is whitelisted, overwriting...", "address", k) + } else { + log.Warn("Account already exists with different code and is not whitelisted, overwriting...", "address", k, "oldCode", db.GetCode(k), "newCode", v.Code) + } + } else { + log.Warn("Account already exists with different code and no whitelist exists", "address", k, "oldCode", db.GetCode(k), "newCode", v.Code) + } + + overwriteCounter++ + } + } + db.CreateAccount(k) + + db.SetNonce(k, v.Nonce) + db.SetBalance(k, balance) + db.SetCode(k, v.Code) + for key, value := range v.Storage { + db.SetState(k, key, value) + } + + log.Info("Moved account", "address", k) + } + log.Info("Migrated OP contracts into state DB", "copiedAccounts", accountCounter, "overwrittenAccounts", overwriteCounter) + return nil +} diff --git a/op-chain-ops/cmd/celo-migrate/testdata/deploy-config-holesky-alfajores.json b/op-chain-ops/cmd/celo-migrate/testdata/deploy-config-holesky-alfajores.json new file mode 100644 index 0000000000000..6b9dbe97e0682 --- /dev/null +++ b/op-chain-ops/cmd/celo-migrate/testdata/deploy-config-holesky-alfajores.json @@ -0,0 +1,89 @@ +{ + "l1StartingBlockTag": "0xbbed3612407993e67f8ca7a423b181837ae164a531941e78f5ee48e766d39cad", + + "l1ChainID": 17000, + "l2ChainID": 44787, + "l2BlockTime": 2, + "l1BlockTime": 12, + + "maxSequencerDrift": 600, + "sequencerWindowSize": 3600, + "channelTimeout": 300, + + "p2pSequencerAddress": "0x644C82d76A43Fe9c76eda0EEd0f0DC17235c3005", + "batchInboxAddress": "0xff00000000000000000000000000000000044787", + "batchSenderAddress": "0x1660B1F70De0f32490b50f976e8983213dCF7FD5", + + "l2OutputOracleSubmissionInterval": 120, + "l2OutputOracleStartingBlockNumber": 0, + "l2OutputOracleStartingTimestamp": 1718312256, + + "l2OutputOracleProposer": "0x1BA11Ec6581FC8C3e35D6E345aEC977796Ffe89b", + "l2OutputOracleChallenger": "0xc07C5A1fBF6c7BC6b4f321E7dd031c0E1E98d32d", + + "finalizationPeriodSeconds": 12, + + "proxyAdminOwner": "0xc07C5A1fBF6c7BC6b4f321E7dd031c0E1E98d32d", + "baseFeeVaultRecipient": "0xc07C5A1fBF6c7BC6b4f321E7dd031c0E1E98d32d", + "l1FeeVaultRecipient": "0xc07C5A1fBF6c7BC6b4f321E7dd031c0E1E98d32d", + "sequencerFeeVaultRecipient": "0xc07C5A1fBF6c7BC6b4f321E7dd031c0E1E98d32d", + "finalSystemOwner": "0xc07C5A1fBF6c7BC6b4f321E7dd031c0E1E98d32d", + "superchainConfigGuardian": "0xc07C5A1fBF6c7BC6b4f321E7dd031c0E1E98d32d", + + "baseFeeVaultMinimumWithdrawalAmount": "0x8ac7230489e80000", + "l1FeeVaultMinimumWithdrawalAmount": "0x8ac7230489e80000", + "sequencerFeeVaultMinimumWithdrawalAmount": "0x8ac7230489e80000", + "baseFeeVaultWithdrawalNetwork": 0, + "l1FeeVaultWithdrawalNetwork": 0, + "sequencerFeeVaultWithdrawalNetwork": 0, + + "gasPriceOracleOverhead": 0, + "gasPriceOracleScalar": 1000000, + + "enableGovernance": false, + "governanceTokenSymbol": "OP", + "governanceTokenName": "Optimism", + "governanceTokenOwner": "0xc07C5A1fBF6c7BC6b4f321E7dd031c0E1E98d32d", + + "l2GenesisBlockGasLimit": "0x1c9c380", + "l2GenesisBlockBaseFeePerGas": "0x3b9aca00", + "l2GenesisRegolithTimeOffset": "0x0", + + "eip1559Denominator": 50, + "eip1559DenominatorCanyon": 250, + "eip1559Elasticity": 6, + + "l2GenesisEcotoneTimeOffset": "0x0", + "l2GenesisDeltaTimeOffset": "0x0", + "l2GenesisCanyonTimeOffset": "0x0", + + "systemConfigStartBlock": 0, + + "requiredProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000", + "recommendedProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000", + + "faultGameAbsolutePrestate": "0x03c7ae758795765c6664a5d39bf63841c71ff191e9189522bad8ebff5d4eca98", + "faultGameMaxDepth": 44, + "faultGameClockExtension": 0, + "faultGameMaxClockDuration": 600, + "faultGameGenesisBlock": 0, + "faultGameGenesisOutputRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", + "faultGameSplitDepth": 14, + "faultGameWithdrawalDelay": 604800, + + "preimageOracleMinProposalSize": 1800000, + "preimageOracleChallengePeriod": 86400, + + "fundDevAccounts": false, + "useFaultProofs": false, + "proofMaturityDelaySeconds": 604800, + "disputeGameFinalityDelaySeconds": 302400, + "respectedGameType": 0, + + "usePlasma": false, + "daCommitmentType": "KeccakCommitment", + "daChallengeWindow": 160, + "daResolveWindow": 160, + "daBondSize": 1000000, + "daResolverRefundPercentage": 0 +} diff --git a/op-chain-ops/cmd/celo-migrate/testdata/deployment-l1-holesky.json b/op-chain-ops/cmd/celo-migrate/testdata/deployment-l1-holesky.json new file mode 100644 index 0000000000000..b37b79f4d4c8f --- /dev/null +++ b/op-chain-ops/cmd/celo-migrate/testdata/deployment-l1-holesky.json @@ -0,0 +1,34 @@ +{ + "AddressManager": "0x2d256f3b82f673Ee377C393fBF2Cf3DcA5D1D901", + "AnchorStateRegistry": "0x036fDE501893043825356Ce49dfd554809F07597", + "AnchorStateRegistryProxy": "0xe5077701c64782954d27384da76D95ABf320460f", + "DelayedWETH": "0x408Ad04Dd953958B080226025E17d6Ba12987EB7", + "DelayedWETHProxy": "0x27f7Ade64F031A39553Be8104bF8B0b410735845", + "DisputeGameFactory": "0xd7771F9687804Bba1D360B08AD9e4d8CB4523738", + "DisputeGameFactoryProxy": "0x193FdDF22D31c227f1Af1286cf2B051d701FF86E", + "L1CrossDomainMessenger": "0x1e3513a619AA4f2550CDD95709B92C1FE0397184", + "L1CrossDomainMessengerProxy": "0x35841aC1f5FdC5b812562adB17F6A0B9A178F643", + "L1ERC721Bridge": "0x695b01393f0539ec64AC316d4998E4130309efB0", + "L1ERC721BridgeProxy": "0x2b9C1e5b9a0D01256388cc4A0F8F290E839F2d82", + "L1StandardBridge": "0x2d1A818544b657Bc5d1E8c8B80F953bd0CA1C9B2", + "L1StandardBridgeProxy": "0xD10A531CB9b80BD507501F34D87Ad4083E9b7F98", + "L2OutputOracle": "0x04CD14625ff0Da62d6E0820a816b4dD3eCd0FF27", + "L2OutputOracleProxy": "0x5636f9D582DB69EAf1Eb9f05B0738225C91fBC1E", + "Mips": "0x60E1b8b535626Fc9fFCdf6147B45879634645771", + "OptimismMintableERC20Factory": "0x3fcd69a03857aA6e79AE9408fc7c887EE70FC145", + "OptimismMintableERC20FactoryProxy": "0x23c80F2503b93a58EC620D20b6b9B6AB8cCa2a12", + "OptimismPortal": "0xdF803FAC1d84a31Ff5aee841f11659f9a3787CE5", + "OptimismPortal2": "0x60bc423dDf0B24fa5104EcacAC5000674Ac3EBfB", + "OptimismPortalProxy": "0xa292B051eA58e2558243f4A9f74262B1796c9648", + "PreimageOracle": "0xEC19353B7364Fb85b9b0A57EaEEC6aCeBbFb6a53", + "ProtocolVersions": "0x077d61D4fb3378025950Bb60AD69179B38921107", + "ProtocolVersionsProxy": "0x791D5101840A547F1EE91148d34E061412A57ECD", + "ProxyAdmin": "0x4ddC758DA1697Ad58D86D03150872c042390dCa2", + "SafeProxyFactory": "0xa6B71E26C5e0845f74c812102Ca7114b6a896AB2", + "SafeSingleton": "0xd9Db270c1B5E3Bd161E8c8503c55cEABeE709552", + "SuperchainConfig": "0xA4f7dB67A6e098613B107be3F8441475Ec30FCC2", + "SuperchainConfigProxy": "0xB21214DA32a85A0d43372310D62095cf91d67765", + "SystemConfig": "0xeFA98Ba3ada6c6AC4bB84074820685E1F01C835d", + "SystemConfigProxy": "0x733043Aa78d25F6759d9e6Ce2B2897bE6d630E08", + "SystemOwnerSafe": "0xD2a6B91aB77691D6F8688eAFA7a5f188bc5baA3a" +} diff --git a/op-chain-ops/cmd/celo-migrate/testdata/rollup-config.json b/op-chain-ops/cmd/celo-migrate/testdata/rollup-config.json new file mode 100644 index 0000000000000..8dfd1f25e28d8 --- /dev/null +++ b/op-chain-ops/cmd/celo-migrate/testdata/rollup-config.json @@ -0,0 +1,36 @@ +{ + "genesis": { + "l1": { + "hash": "0xbbed3612407993e67f8ca7a423b181837ae164a531941e78f5ee48e766d39cad", + "number": 1729797 + }, + "l2": { + "hash": "0x2664d0a1f45dc9a010e553e815a25f33c6d949cbb0d38e179c6209fc0486aa41", + "number": 23912613 + }, + "l2_time": 1718312256, + "system_config": { + "batcherAddr": "0x1660b1f70de0f32490b50f976e8983213dcf7fd5", + "overhead": "0x0000000000000000000000000000000000000000000000000000000000000000", + "scalar": "0x00000000000000000000000000000000000000000000000000000000000f4240", + "gasLimit": 30000000 + } + }, + "block_time": 2, + "max_sequencer_drift": 600, + "seq_window_size": 3600, + "channel_timeout": 300, + "l1_chain_id": 17000, + "l2_chain_id": 44787, + "regolith_time": 0, + "cel2_time": 0, + "canyon_time": 0, + "delta_time": 0, + "ecotone_time": 0, + "batch_inbox_address": "0xff00000000000000000000000000000000044787", + "deposit_contract_address": "0xa292b051ea58e2558243f4a9f74262b1796c9648", + "l1_system_config_address": "0x733043aa78d25f6759d9e6ce2b2897be6d630e08", + "protocol_versions_address": "0x0000000000000000000000000000000000000000", + "da_challenge_contract_address": "0x0000000000000000000000000000000000000000" +} + diff --git a/op-chain-ops/cmd/celo-migrate/transform.go b/op-chain-ops/cmd/celo-migrate/transform.go new file mode 100644 index 0000000000000..5a80e8a51566f --- /dev/null +++ b/op-chain-ops/cmd/celo-migrate/transform.go @@ -0,0 +1,109 @@ +package main + +import ( + "bytes" + "errors" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/rlp" +) + +var ( + IstanbulExtraVanity = 32 // Fixed number of extra-data bytes reserved for validator vanity +) + +// IstanbulAggregatedSeal is the aggregated seal for Istanbul blocks +type IstanbulAggregatedSeal struct { + // Bitmap is a bitmap having an active bit for each validator that signed this block + Bitmap *big.Int + // Signature is an aggregated BLS signature resulting from signatures by each validator that signed this block + Signature []byte + // Round is the round in which the signature was created. + Round *big.Int +} + +// IstanbulExtra is the extra-data for Istanbul blocks +type IstanbulExtra struct { + // AddedValidators are the validators that have been added in the block + AddedValidators []common.Address + // AddedValidatorsPublicKeys are the BLS public keys for the validators added in the block + AddedValidatorsPublicKeys [][96]byte + // RemovedValidators is a bitmap having an active bit for each removed validator in the block + RemovedValidators *big.Int + // Seal is an ECDSA signature by the proposer + Seal []byte + // AggregatedSeal contains the aggregated BLS signature created via IBFT consensus. + AggregatedSeal IstanbulAggregatedSeal + // ParentAggregatedSeal contains and aggregated BLS signature for the previous block. + ParentAggregatedSeal IstanbulAggregatedSeal +} + +// transformHeader removes the aggregated seal from the header +func transformHeader(header []byte) ([]byte, error) { + newHeader := new(types.Header) + err := rlp.DecodeBytes(header, &newHeader) + if err != nil { + return nil, err + } + + if len(newHeader.Extra) < IstanbulExtraVanity { + return nil, errors.New("invalid istanbul header extra-data") + } + + istanbulExtra := IstanbulExtra{} + err = rlp.DecodeBytes(newHeader.Extra[IstanbulExtraVanity:], &istanbulExtra) + if err != nil { + return nil, err + } + + istanbulExtra.AggregatedSeal = IstanbulAggregatedSeal{} + + payload, err := rlp.EncodeToBytes(&istanbulExtra) + if err != nil { + return nil, err + } + + newHeader.Extra = append(newHeader.Extra[:IstanbulExtraVanity], payload...) + + return rlp.EncodeToBytes(newHeader) +} + +func hasSameHash(newHeader, oldHash []byte) (bool, common.Hash) { + newHash := crypto.Keccak256Hash(newHeader) + return bytes.Equal(oldHash, newHash.Bytes()), newHash +} + +// transformBlockBody migrates the block body from the old format to the new format (works with []byte input output) +func transformBlockBody(oldBodyData []byte) ([]byte, error) { + // decode body into celo-blockchain Body structure + // remove epochSnarkData and randomness data + var celoBody struct { + Transactions types.Transactions + Randomness rlp.RawValue + EpochSnarkData rlp.RawValue + } + if err := rlp.DecodeBytes(oldBodyData, &celoBody); err != nil { + // body may have already been transformed in a previous migration + body := types.Body{} + if err := rlp.DecodeBytes(oldBodyData, &body); err == nil { + return oldBodyData, nil + } + return nil, fmt.Errorf("failed to RLP decode body: %w", err) + } + + // transform into op-geth types.Body structure + newBody := types.Body{ + Transactions: celoBody.Transactions, + Uncles: []*types.Header{}, + } + newBodyData, err := rlp.EncodeToBytes(newBody) + if err != nil { + return nil, fmt.Errorf("failed to RLP encode body: %w", err) + } + + return newBodyData, nil +} diff --git a/packages/contracts-bedrock/foundry.toml b/packages/contracts-bedrock/foundry.toml index 9edf752f983d6..da9bfc37dff96 100644 --- a/packages/contracts-bedrock/foundry.toml +++ b/packages/contracts-bedrock/foundry.toml @@ -49,7 +49,8 @@ fs_permissions = [ { access='read', path = './forge-artifacts/' }, { access='write', path='./semver-lock.json' }, { access='read-write', path='./.testdata/' }, - { access='read', path='./kout-deployment' } + { access='read', path='./kout-deployment' }, + { access='read-write', path='../../op-chain-ops/cmd/celo-migrate/testdata/' }, ] libs = ["node_modules", "lib"]