diff --git a/contracts-review-prep.md b/contracts-review-prep.md deleted file mode 100644 index 883362f7c..000000000 --- a/contracts-review-prep.md +++ /dev/null @@ -1,65 +0,0 @@ -# Contracts Review Preparation - -## High-level Overview - -### Reason for changes - -The goal was to be build a foundation to be able to support token bridging with custom logic on receiving chain (not wrapped), as well as custom bridging logic (assets, which accrue value over time, like LRTs). -For clarity, we only developed a framework, the exact logic for custom tokens and custom bridging will follow. - -### Major changes - -In order to achieve it, we separated the liquidity managing logic from the Shared Bridge to `Asset Handlers`. The basic cases will be handled by `Native Token Vaults`, which are handling all of the standard `ERC20 tokens`, as well as `ETH`. - -### New concepts - -- assetHandler => contract that manages liquidity (burns/mints, locks/unlocks) for specific token (or a set of them) -- assetId => identifier to track bridged assets across chains linked to specific asset handler - -## Known Issues - -### storage layout - -L2SharedBridge will be a system contract, L2NativeTokenVault will replace it (the storage layout is still not yet backwards compatible) - -### bridgehubDeposit API change - -> /// @notice Allows bridgehub to acquire mintValue for L1->L2 transactions. - - /// @dev If the corresponding L2 transaction fails, refunds are issued to a refund recipient on L2. - function bridgehubDepositBaseToken( - uint256 _chainId, - bytes32 _assetId, - -Note, that the new SB is not compatible with both: - -- old Mailbox on Era -- old Bridgehub -- And vice versa. - -We need to either: - -- ensure that all 3 get upgraded at the same time. Upgrading BH and SB at the same time is feasible IMHO. But upgrading DP (Mailbox) in the same transaction may not be. -- have a concrete plan for such case. E.g. explicitly tell that the legacy deposits will stop working. In this case we need to check (at the very least visually) that the worst thing that can happen is just deposits not working and not some funds lost -- Add the corresponding legacy functions - This text you see here is \*actually- written in Markdown! To get a feel - for Markdown's syntax, type some text into the left window and - watch the results in the right. - -### not allowing legacy withdrawals - -> require(!\_isEraLegacyEthWithdrawal(\_chainId, \_l2BatchNumber), "ShB: legacy eth withdrawal"); - -No method to finalize an old withdrawal. -We will manually finalize all legacy withdrawals before the upgrade, i.e. withdrawals that happened before the previous Bridgehub upgrade. - -### Custom Errors not implemented - -> require(expectedDepositAmount == \_depositAmount, "3T"); // The token has non-standard transfer logic - -Custom errors will be introduced for all contracts. - -## Migration plan - -- Bulkheads will need to be migrated (methods added) -- Tokens will have to be transferred (methods added) diff --git a/da-contracts/contracts/CalldataDA.sol b/da-contracts/contracts/CalldataDA.sol index 3841f0c5e..f49a63d99 100644 --- a/da-contracts/contracts/CalldataDA.sol +++ b/da-contracts/contracts/CalldataDA.sol @@ -2,7 +2,7 @@ pragma solidity 0.8.24; -import {OperatorDAInputLengthTooSmall, InvalidNumberOfBlobs, InvalidBlobsHashes, InvalidL2DAOutputHash, OneBlobWithCalldata, PubdataInputTooSmall, PubdataLengthTooBig, InvalidPubdataHash} from "./DAContractsErrors.sol"; +import {OperatorDAInputTooSmall, InvalidNumberOfBlobs, InvalidL2DAOutputHash, OnlyOneBlobWithCalldataAllowed, PubdataInputTooSmall, PubdataLengthTooBig, InvalidPubdataHash} from "./DAContractsErrors.sol"; /// @dev Total number of bytes in a blob. Blob = 4096 field elements * 31 bytes per field element /// @dev EIP-4844 defines it as 131_072 but we use 4096 * 31 within our circuits to always fit within a field element @@ -45,7 +45,7 @@ abstract contract CalldataDA { // Check that it accommodates enough pubdata for the state diff hash, hash of pubdata + the number of blobs. if (_operatorDAInput.length < BLOB_DATA_OFFSET) { - revert OperatorDAInputLengthTooSmall(_operatorDAInput.length, BLOB_DATA_OFFSET); + revert OperatorDAInputTooSmall(_operatorDAInput.length, BLOB_DATA_OFFSET); } stateDiffHash = bytes32(_operatorDAInput[:32]); @@ -61,7 +61,7 @@ abstract contract CalldataDA { blobsLinearHashes = new bytes32[](_maxBlobsSupported); if (_operatorDAInput.length < BLOB_DATA_OFFSET + 32 * blobsProvided) { - revert InvalidBlobsHashes(_operatorDAInput.length, BLOB_DATA_OFFSET + 32 * blobsProvided); + revert OperatorDAInputTooSmall(_operatorDAInput.length, BLOB_DATA_OFFSET + 32 * blobsProvided); } _cloneCalldata(blobsLinearHashes, _operatorDAInput[BLOB_DATA_OFFSET:], blobsProvided); @@ -90,7 +90,7 @@ abstract contract CalldataDA { bytes calldata _pubdataInput ) internal pure virtual returns (bytes32[] memory blobCommitments, bytes calldata _pubdata) { if (_blobsProvided != 1) { - revert OneBlobWithCalldata(); + revert OnlyOneBlobWithCalldataAllowed(); } if (_pubdataInput.length < BLOB_COMMITMENT_SIZE) { revert PubdataInputTooSmall(_pubdataInput.length, BLOB_COMMITMENT_SIZE); diff --git a/da-contracts/contracts/DAContractsErrors.sol b/da-contracts/contracts/DAContractsErrors.sol index 039748629..73ee16dca 100644 --- a/da-contracts/contracts/DAContractsErrors.sol +++ b/da-contracts/contracts/DAContractsErrors.sol @@ -16,29 +16,26 @@ error PointEvalCallFailed(bytes); // 0x4daa985d error PointEvalFailed(bytes); -// 0xf4a3e629 -error OperatorDAInputLengthTooSmall(uint256 operatorDAInputLength, uint256 blobDataOffset); +// 0x885ae069 +error OperatorDAInputTooSmall(uint256 operatorDAInputLength, uint256 minAllowedLength); // 0xbeb96791 error InvalidNumberOfBlobs(uint256 blobsProvided, uint256 maxBlobsSupported); -// 0xcd384e46 -error InvalidBlobsHashes(uint256 operatorDAInputLength, uint256 blobsProvided); - // 0xd2531c15 error InvalidL2DAOutputHash(bytes32 l2DAValidatorOutputHash); -// 0x3db6e664 -error OneBlobWithCalldata(); +// 0x04e05fd1 +error OnlyOneBlobWithCalldataAllowed(); // 0x2dc9747d -error PubdataInputTooSmall(uint256 pubdataInputLength, uint256 blobCommitmentSize); +error PubdataInputTooSmall(uint256 pubdataInputLength, uint256 totalBlobsCommitmentSize); // 0x9044dff9 -error PubdataLengthTooBig(uint256 pubdataLength, uint256 blobSizeBytes); +error PubdataLengthTooBig(uint256 pubdataLength, uint256 totalBlobSizeBytes); // 0x5513177c -error InvalidPubdataHash(bytes32 fullPubdataHash, bytes32 pubdata); +error InvalidPubdataHash(bytes32 fullPubdataHash, bytes32 providedPubdataHash); // 0xc771423e error BlobCommitmentNotPublished(); diff --git a/da-contracts/contracts/RollupL1DAValidator.sol b/da-contracts/contracts/RollupL1DAValidator.sol index d03b8127e..bf2e3dba9 100644 --- a/da-contracts/contracts/RollupL1DAValidator.sol +++ b/da-contracts/contracts/RollupL1DAValidator.sol @@ -13,7 +13,7 @@ import {InvalidPubdataSource, PubdataCommitmentsEmpty, InvalidPubdataCommitments uint256 constant BLOBS_SUPPORTED = 6; /// @dev The number of blocks within each we allow blob to be used for DA. -/// On Ethereum blobs expire within 4096 slots, i.e. 4096 * 32 blocks. We reserve +/// On Ethereum blobs expire within 4096 epochs, i.e. 4096 * 32 blocks. We reserve /// half of the time in order to ensure reader's ability to read the blob's content. uint256 constant BLOB_EXPIRATION_BLOCKS = (4096 * 32) / 2; diff --git a/gas-bound-caller/hardhat.config.ts b/gas-bound-caller/hardhat.config.ts index 56c38a783..d46561e37 100644 --- a/gas-bound-caller/hardhat.config.ts +++ b/gas-bound-caller/hardhat.config.ts @@ -5,28 +5,11 @@ import "@matterlabs/hardhat-zksync-verify"; import "@nomiclabs/hardhat-ethers"; import "hardhat-typechain"; -// This version of system contracts requires a pre release of the compiler -const COMPILER_VERSION = "1.5.0"; -const PRE_RELEASE_VERSION = "prerelease-a167aa3-code4rena"; -function getZksolcUrl(): string { - // @ts-ignore - const platform = { darwin: "macosx", linux: "linux", win32: "windows" }[process.platform]; - // @ts-ignore - const toolchain = { linux: "-musl", win32: "-gnu", darwin: "" }[process.platform]; - const arch = process.arch === "x64" ? "amd64" : process.arch; - const ext = process.platform === "win32" ? ".exe" : ""; - - return `https://github.com/matter-labs/era-compiler-solidity/releases/download/${PRE_RELEASE_VERSION}/zksolc-${platform}-${arch}${toolchain}-v${COMPILER_VERSION}${ext}`; -} - -console.log(`Using zksolc from ${getZksolcUrl()}`); - export default { zksolc: { version: "1.5.0", compilerSource: "binary", settings: { - compilerPath: getZksolcUrl(), isSystem: true, }, }, diff --git a/l1-contracts/contracts/bridge/L1Nullifier.sol b/l1-contracts/contracts/bridge/L1Nullifier.sol index 89ee00277..3eb7d9c5e 100644 --- a/l1-contracts/contracts/bridge/L1Nullifier.sol +++ b/l1-contracts/contracts/bridge/L1Nullifier.sol @@ -28,7 +28,7 @@ import {DataEncoding} from "../common/libraries/DataEncoding.sol"; import {IBridgehub} from "../bridgehub/IBridgehub.sol"; import {L2_BASE_TOKEN_SYSTEM_CONTRACT_ADDR, L2_ASSET_ROUTER_ADDR} from "../common/L2ContractAddresses.sol"; import {DataEncoding} from "../common/libraries/DataEncoding.sol"; -import {LegacyBridgeNotSet, Unauthorized, SharedBridgeKey, DepositExists, AddressAlreadySet, InvalidProof, DepositDoesNotExist, SharedBridgeValueNotSet, WithdrawalAlreadyFinalized, L2WithdrawalMessageWrongLength, InvalidSelector, SharedBridgeValueNotSet, ZeroAddress} from "../common/L1ContractErrors.sol"; +import {LegacyMethodForNonL1Token, LegacyBridgeNotSet, Unauthorized, SharedBridgeKey, DepositExists, AddressAlreadySet, InvalidProof, DepositDoesNotExist, SharedBridgeValueNotSet, WithdrawalAlreadyFinalized, L2WithdrawalMessageWrongLength, InvalidSelector, SharedBridgeValueNotSet, ZeroAddress} from "../common/L1ContractErrors.sol"; import {WrongL2Sender, NativeTokenVaultAlreadySet, EthTransferFailed, WrongMsgLength} from "./L1BridgeContractErrors.sol"; /// @author Matter Labs @@ -570,11 +570,14 @@ contract L1Nullifier is IL1Nullifier, ReentrancyGuard, Ownable2StepUpgradeable, // slither-disable-next-line unused-return (amount, ) = UnsafeBytes.readUint256(_l2ToL1message, offset); assetId = BRIDGE_HUB.baseTokenAssetId(_chainId); - address baseToken = BRIDGE_HUB.baseToken(_chainId); transferData = DataEncoding.encodeBridgeMintData({ _originalCaller: address(0), _remoteReceiver: l1Receiver, - _originToken: baseToken, + // Note, that `assetId` could belong to a token native to an L2, and so + // the logic for determining the correct origin token address will be complex. + // It is expected that this value won't be used in the NativeTokenVault and so providing + // any value is acceptable here. + _originToken: address(0), _amount: amount, _erc20Metadata: new bytes(0) }); @@ -642,9 +645,13 @@ contract L1Nullifier is IL1Nullifier, ReentrancyGuard, Ownable2StepUpgradeable, bytes32[] calldata _merkleProof ) external { bytes32 assetId = l1NativeTokenVault.assetId(_l1Token); + bytes32 ntvAssetId = DataEncoding.encodeNTVAssetId(block.chainid, _l1Token); if (assetId == bytes32(0)) { - assetId = DataEncoding.encodeNTVAssetId(block.chainid, _l1Token); + assetId = ntvAssetId; + } else if (assetId != ntvAssetId) { + revert LegacyMethodForNonL1Token(); } + // For legacy deposits, the l2 receiver is not required to check tx data hash // The token address does not have to be provided for this functionality either. bytes memory assetData = DataEncoding.encodeBridgeBurnData(_amount, address(0), address(0)); diff --git a/l1-contracts/contracts/bridge/asset-router/AssetRouterBase.sol b/l1-contracts/contracts/bridge/asset-router/AssetRouterBase.sol index a82deec82..ba30261f1 100644 --- a/l1-contracts/contracts/bridge/asset-router/AssetRouterBase.sol +++ b/l1-contracts/contracts/bridge/asset-router/AssetRouterBase.sol @@ -88,7 +88,7 @@ abstract contract AssetRouterBase is IAssetRouterBase, Ownable2StepUpgradeable, } _setAssetHandler(assetId, _assetHandlerAddress); assetDeploymentTracker[assetId] = msg.sender; - emit AssetDeploymentTrackerRegistered(assetId, _assetRegistrationData, sender); + emit AssetDeploymentTrackerRegistered(assetId, _assetRegistrationData, msg.sender); } /*////////////////////////////////////////////////////////////// diff --git a/l1-contracts/contracts/bridge/asset-router/L1AssetRouter.sol b/l1-contracts/contracts/bridge/asset-router/L1AssetRouter.sol index db9838704..3ac8349a7 100644 --- a/l1-contracts/contracts/bridge/asset-router/L1AssetRouter.sol +++ b/l1-contracts/contracts/bridge/asset-router/L1AssetRouter.sol @@ -22,7 +22,7 @@ import {DataEncoding} from "../../common/libraries/DataEncoding.sol"; import {AddressAliasHelper} from "../../vendor/AddressAliasHelper.sol"; import {TWO_BRIDGES_MAGIC_VALUE, ETH_TOKEN_ADDRESS} from "../../common/Config.sol"; import {NativeTokenVaultAlreadySet} from "../L1BridgeContractErrors.sol"; -import {LegacyBridgeUsesNonNativeToken, NonEmptyMsgValue, UnsupportedEncodingVersion, AssetIdNotSupported, AssetHandlerDoesNotExist, Unauthorized, ZeroAddress, TokenNotSupported, AddressAlreadyUsed, TokensWithFeesNotSupported} from "../../common/L1ContractErrors.sol"; +import {LegacyEncodingUsedForNonL1Token, LegacyBridgeUsesNonNativeToken, NonEmptyMsgValue, UnsupportedEncodingVersion, AssetIdNotSupported, AssetHandlerDoesNotExist, Unauthorized, ZeroAddress, TokenNotSupported, AddressAlreadyUsed, TokensWithFeesNotSupported} from "../../common/L1ContractErrors.sol"; import {L2_ASSET_ROUTER_ADDR} from "../../common/L2ContractAddresses.sol"; import {IBridgehub, L2TransactionRequestTwoBridgesInner, L2TransactionRequestDirect} from "../../bridgehub/IBridgehub.sol"; @@ -386,6 +386,12 @@ contract L1AssetRouter is AssetRouterBase, IL1AssetRouter, ReentrancyGuard { ); bytes32 assetId = _ensureTokenRegisteredWithNTV(_l1Token); + // We ensure that the legacy data format can not be used for tokens that did not originate from L1. + bytes32 expectedAssetId = DataEncoding.encodeNTVAssetId(block.chainid, _l1Token); + if (assetId != expectedAssetId) { + revert LegacyEncodingUsedForNonL1Token(); + } + if (assetId == ETH_TOKEN_ASSET_ID) { // In the old SDK/contracts the user had to always provide `0` as the deposit amount for ETH token, while // ultimately the provided `msg.value` was used as the deposit amount. This check is needed for backwards compatibility. diff --git a/l1-contracts/contracts/bridge/ntv/NativeTokenVault.sol b/l1-contracts/contracts/bridge/ntv/NativeTokenVault.sol index 16d0bff01..021cab58a 100644 --- a/l1-contracts/contracts/bridge/ntv/NativeTokenVault.sol +++ b/l1-contracts/contracts/bridge/ntv/NativeTokenVault.sol @@ -223,9 +223,9 @@ abstract contract NativeTokenVault is } } - function tryRegisterTokenFromBurnData(bytes calldata _data, bytes32 _expectedAssetId) external { + function tryRegisterTokenFromBurnData(bytes calldata _burnData, bytes32 _expectedAssetId) external { // slither-disable-next-line unused-return - (, , address tokenAddress) = DataEncoding.decodeBridgeBurnData(_data); + (, , address tokenAddress) = DataEncoding.decodeBridgeBurnData(_burnData); if (tokenAddress == address(0)) { revert ZeroAddress(); @@ -338,8 +338,7 @@ abstract contract NativeTokenVault is address _receiver, address _nativeToken ) internal virtual returns (bytes memory _bridgeMintData) { - address nativeToken = tokenAddress[_assetId]; - if (nativeToken == WETH_TOKEN) { + if (_nativeToken == WETH_TOKEN) { // This ensures that WETH_TOKEN can never be bridged from chains it is native to. // It can only be withdrawn from the chain where it has already gotten. revert BurningNativeWETHNotSupported(); diff --git a/l1-contracts/contracts/bridgehub/CTMDeploymentTracker.sol b/l1-contracts/contracts/bridgehub/CTMDeploymentTracker.sol index d8ef1ba3a..4409af221 100644 --- a/l1-contracts/contracts/bridgehub/CTMDeploymentTracker.sol +++ b/l1-contracts/contracts/bridgehub/CTMDeploymentTracker.sol @@ -104,7 +104,11 @@ contract CTMDeploymentTracker is ICTMDeploymentTracker, Ownable2StepUpgradeable /// @notice The function called by the Bridgehub after the L2 transaction has been initiated. /// @dev Not used in this contract. In case the transaction fails, we can just re-try it. - function bridgehubConfirmL2Transaction(uint256 _chainId, bytes32 _txDataHash, bytes32 _txHash) external {} + function bridgehubConfirmL2Transaction( + uint256 _chainId, + bytes32 _txDataHash, + bytes32 _txHash + ) external onlyBridgehub {} /// @notice Used to register the ctm asset in L2 AssetRouter. /// @param _originalCaller the address that called the Router diff --git a/l1-contracts/contracts/common/L1ContractErrors.sol b/l1-contracts/contracts/common/L1ContractErrors.sol index 7303618b0..e8754aa35 100644 --- a/l1-contracts/contracts/common/L1ContractErrors.sol +++ b/l1-contracts/contracts/common/L1ContractErrors.sol @@ -362,6 +362,8 @@ error IncorrectTokenAddressFromNTV(bytes32 assetId, address tokenAddress); error InvalidProofLengthForFinalNode(); // 0x7acd7817 error TokenIsNotLegacy(); +// 0xfade089a +error LegacyEncodingUsedForNonL1Token(); // 0xa51fa558 error TokenIsLegacy(); // 0x29963361 @@ -387,6 +389,8 @@ error InvalidNTVBurnData(); error InvalidSystemLogsLength(); // 0x8efef97a error LegacyBridgeNotSet(); +// 0x767eed08 +error LegacyMethodForNonL1Token(); enum SharedBridgeKey { PostUpgradeFirstBatch, diff --git a/l1-contracts/contracts/governance/ChainAdmin.sol b/l1-contracts/contracts/governance/ChainAdmin.sol index d61c01d17..6423f5a91 100644 --- a/l1-contracts/contracts/governance/ChainAdmin.sol +++ b/l1-contracts/contracts/governance/ChainAdmin.sol @@ -19,6 +19,8 @@ import {ReentrancyGuard} from "../common/ReentrancyGuard.sol"; /// @dev Note, that it does not implement any form of access control by default, but instead utilizes /// so called "restrictions": contracts that implement the `IRestriction` interface and ensure that /// particular restrictions are ensured for the contract, including access control, security invariants, etc. +/// @dev This is a new EXPERIMENTAL version of the `ChainAdmin` implementation. While chains may opt into using it, +/// using the old `ChainAdminSingleOwner` is recommended. contract ChainAdmin is IChainAdmin, ReentrancyGuard { using EnumerableSet for EnumerableSet.AddressSet; diff --git a/l1-contracts/contracts/governance/ChainAdminSingleOwner.sol b/l1-contracts/contracts/governance/ChainAdminSingleOwner.sol new file mode 100644 index 000000000..7f4074f22 --- /dev/null +++ b/l1-contracts/contracts/governance/ChainAdminSingleOwner.sol @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: MIT + +pragma solidity 0.8.24; + +import {Ownable2Step} from "@openzeppelin/contracts-v4/access/Ownable2Step.sol"; +import {IChainAdminSingleOwner} from "./IChainAdminSingleOwner.sol"; +import {IAdmin} from "../state-transition/chain-interfaces/IAdmin.sol"; +import {NoCallsProvided, Unauthorized, ZeroAddress} from "../common/L1ContractErrors.sol"; + +/// @author Matter Labs +/// @custom:security-contact security@matterlabs.dev +/// @notice The contract is designed to hold the `admin` role in ZKSync Chain (State Transition) contracts. +/// The owner of the contract can perform any external calls and also save the information needed for +/// the blockchain node to accept the protocol upgrade. Another role - `tokenMultiplierSetter` can be used in the contract +/// to change the base token gas price in the Chain contract. +contract ChainAdminSingleOwner is IChainAdminSingleOwner, Ownable2Step { + /// @notice Mapping of protocol versions to their expected upgrade timestamps. + /// @dev Needed for the offchain node administration to know when to start building batches with the new protocol version. + mapping(uint256 protocolVersion => uint256 upgradeTimestamp) public protocolVersionToUpgradeTimestamp; + + /// @notice The address which can call `setTokenMultiplier` function to change the base token gas price in the Chain contract. + /// @dev The token base price can be changed quite often, so the private key for this role is supposed to be stored in the node + /// and used by the automated service in a way similar to the sequencer workflow. + address public tokenMultiplierSetter; + + constructor(address _initialOwner, address _initialTokenMultiplierSetter) { + if (_initialOwner == address(0)) { + revert ZeroAddress(); + } + _transferOwnership(_initialOwner); + // Can be zero if no one has this permission. + tokenMultiplierSetter = _initialTokenMultiplierSetter; + emit NewTokenMultiplierSetter(address(0), _initialTokenMultiplierSetter); + } + + /// @notice Updates the address responsible for setting token multipliers on the Chain contract . + /// @param _tokenMultiplierSetter The new address to be set as the token multiplier setter. + function setTokenMultiplierSetter(address _tokenMultiplierSetter) external onlyOwner { + emit NewTokenMultiplierSetter(tokenMultiplierSetter, _tokenMultiplierSetter); + tokenMultiplierSetter = _tokenMultiplierSetter; + } + + /// @notice Set the expected upgrade timestamp for a specific protocol version. + /// @param _protocolVersion The ZKsync chain protocol version. + /// @param _upgradeTimestamp The timestamp at which the chain node should expect the upgrade to happen. + function setUpgradeTimestamp(uint256 _protocolVersion, uint256 _upgradeTimestamp) external onlyOwner { + protocolVersionToUpgradeTimestamp[_protocolVersion] = _upgradeTimestamp; + emit UpdateUpgradeTimestamp(_protocolVersion, _upgradeTimestamp); + } + + /// @notice Execute multiple calls as part of contract administration. + /// @param _calls Array of Call structures defining target, value, and data for each call. + /// @param _requireSuccess If true, reverts transaction on any call failure. + /// @dev Intended for batch processing of contract interactions, managing gas efficiency and atomicity of operations. + function multicall(Call[] calldata _calls, bool _requireSuccess) external payable onlyOwner { + if (_calls.length == 0) { + revert NoCallsProvided(); + } + // solhint-disable-next-line gas-length-in-loops + for (uint256 i = 0; i < _calls.length; ++i) { + // slither-disable-next-line arbitrary-send-eth + (bool success, bytes memory returnData) = _calls[i].target.call{value: _calls[i].value}(_calls[i].data); + if (_requireSuccess && !success) { + // Propagate an error if the call fails. + assembly { + revert(add(returnData, 0x20), mload(returnData)) + } + } + emit CallExecuted(_calls[i], success, returnData); + } + } + + /// @notice Sets the token multiplier in the specified Chain contract. + /// @param _chainContract The chain contract address where the token multiplier will be set. + /// @param _nominator The numerator part of the token multiplier. + /// @param _denominator The denominator part of the token multiplier. + function setTokenMultiplier(IAdmin _chainContract, uint128 _nominator, uint128 _denominator) external { + if (msg.sender != tokenMultiplierSetter) { + revert Unauthorized(msg.sender); + } + _chainContract.setTokenMultiplier(_nominator, _denominator); + } + + /// @dev Contract might receive/hold ETH as part of the maintenance process. + receive() external payable {} +} diff --git a/l1-contracts/contracts/governance/IChainAdminSingleOwner.sol b/l1-contracts/contracts/governance/IChainAdminSingleOwner.sol new file mode 100644 index 000000000..9de89a9db --- /dev/null +++ b/l1-contracts/contracts/governance/IChainAdminSingleOwner.sol @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: MIT + +pragma solidity 0.8.24; + +import {IAdmin} from "../state-transition/chain-interfaces/IAdmin.sol"; + +/// @title ChainAdmin contract interface +/// @author Matter Labs +/// @custom:security-contact security@matterlabs.dev +interface IChainAdminSingleOwner { + /// @dev Represents a call to be made during multicall. + /// @param target The address to which the call will be made. + /// @param value The amount of Ether (in wei) to be sent along with the call. + /// @param data The calldata to be executed on the `target` address. + struct Call { + address target; + uint256 value; + bytes data; + } + + /// @notice Emitted when the expected upgrade timestamp for a specific protocol version is set. + event UpdateUpgradeTimestamp(uint256 indexed _protocolVersion, uint256 _upgradeTimestamp); + + /// @notice Emitted when the call is executed from the contract. + event CallExecuted(Call _call, bool _success, bytes _returnData); + + /// @notice Emitted when the new token multiplier address is set. + event NewTokenMultiplierSetter(address _oldTokenMultiplierSetter, address _newTokenMultiplierSetter); + + function setTokenMultiplierSetter(address _tokenMultiplierSetter) external; + + function setUpgradeTimestamp(uint256 _protocolVersion, uint256 _upgradeTimestamp) external; + + function multicall(Call[] calldata _calls, bool _requireSuccess) external payable; + + function setTokenMultiplier(IAdmin _chainContract, uint128 _nominator, uint128 _denominator) external; +} diff --git a/l1-contracts/contracts/state-transition/L1StateTransitionErrors.sol b/l1-contracts/contracts/state-transition/L1StateTransitionErrors.sol index a7fb2e589..7ba2540d4 100644 --- a/l1-contracts/contracts/state-transition/L1StateTransitionErrors.sol +++ b/l1-contracts/contracts/state-transition/L1StateTransitionErrors.sol @@ -63,28 +63,25 @@ error PriorityOpsDataRightPathLengthIsNotZero(); error PriorityOpsDataItemHashesLengthIsNotZero(); // 0x885ae069 -error OperatorDAInputTooSmall(uint256 operatorDAInputLength, uint256 BlobDataOffset); +error OperatorDAInputTooSmall(uint256 operatorDAInputLength, uint256 minAllowedLength); // 0xbeb96791 error InvalidNumberOfBlobs(uint256 blobsProvided, uint256 maxBlobsSupported); -// 0xcd384e46 -error InvalidBlobsHashes(uint256 operatorDAInputLength, uint256 minNumberOfBlobHashes); - // 0xd2531c15 error InvalidL2DAOutputHash(bytes32 l2DAValidatorOutputHash); -// 0x77a3c423 -error OnlyOneBlobWithCalldata(); +// 0x04e05fd1 +error OnlyOneBlobWithCalldataAllowed(); -// 0x086bb220 -error PubdataTooSmall(uint256 pubdataInputLength, uint256 blobCommitmentSize); +// 0x2dc9747d +error PubdataInputTooSmall(uint256 pubdataInputLength, uint256 totalBlobsCommitmentSize); -// 0xcba35a08 -error PubdataTooLong(uint256 pubdataLength, uint256 blobSizeBytes); +// 0x9044dff9 +error PubdataLengthTooBig(uint256 pubdataLength, uint256 totalBlobSizeBytes); // 0x5513177c -error InvalidPubdataHash(bytes32 fullPubdataHash, bytes32 pubdata); +error InvalidPubdataHash(bytes32 fullPubdataHash, bytes32 providedPubdataHash); // 0x5717f940 error InvalidPubdataSource(uint8 pubdataSource); @@ -95,9 +92,6 @@ error BlobHashBlobCommitmentMismatchValue(); // 0x7fbff2dd error L1DAValidatorInvalidSender(address msgSender); -// 0x5ade0455 -error RootMismatch(); - // 0xc06789fa error InvalidCommitment(); diff --git a/l1-contracts/contracts/state-transition/data-availability/CalldataDA.sol b/l1-contracts/contracts/state-transition/data-availability/CalldataDA.sol index d319d92df..fd3d91bd0 100644 --- a/l1-contracts/contracts/state-transition/data-availability/CalldataDA.sol +++ b/l1-contracts/contracts/state-transition/data-availability/CalldataDA.sol @@ -2,7 +2,7 @@ pragma solidity 0.8.24; -import {OperatorDAInputTooSmall, InvalidBlobsHashes, InvalidNumberOfBlobs, InvalidL2DAOutputHash, OnlyOneBlobWithCalldata, PubdataTooSmall, PubdataTooLong, InvalidPubdataHash} from "../L1StateTransitionErrors.sol"; +import {OperatorDAInputTooSmall, InvalidNumberOfBlobs, InvalidL2DAOutputHash, OnlyOneBlobWithCalldataAllowed, PubdataInputTooSmall, PubdataLengthTooBig, InvalidPubdataHash} from "../L1StateTransitionErrors.sol"; /// @dev Total number of bytes in a blob. Blob = 4096 field elements * 31 bytes per field element /// @dev EIP-4844 defines it as 131_072 but we use 4096 * 31 within our circuits to always fit within a field element @@ -61,7 +61,7 @@ abstract contract CalldataDA { blobsLinearHashes = new bytes32[](_maxBlobsSupported); if (_operatorDAInput.length < BLOB_DATA_OFFSET + 32 * blobsProvided) { - revert InvalidBlobsHashes(_operatorDAInput.length, BLOB_DATA_OFFSET + 32 * blobsProvided); + revert OperatorDAInputTooSmall(_operatorDAInput.length, BLOB_DATA_OFFSET + 32 * blobsProvided); } _cloneCalldata(blobsLinearHashes, _operatorDAInput[BLOB_DATA_OFFSET:], blobsProvided); @@ -90,10 +90,10 @@ abstract contract CalldataDA { bytes calldata _pubdataInput ) internal pure virtual returns (bytes32[] memory blobCommitments, bytes calldata _pubdata) { if (_blobsProvided != 1) { - revert OnlyOneBlobWithCalldata(); + revert OnlyOneBlobWithCalldataAllowed(); } if (_pubdataInput.length < BLOB_COMMITMENT_SIZE) { - revert PubdataTooSmall(_pubdataInput.length, BLOB_COMMITMENT_SIZE); + revert PubdataInputTooSmall(_pubdataInput.length, BLOB_COMMITMENT_SIZE); } // We typically do not know whether we'll use calldata or blobs at the time when @@ -104,7 +104,7 @@ abstract contract CalldataDA { _pubdata = _pubdataInput[:_pubdataInput.length - BLOB_COMMITMENT_SIZE]; if (_pubdata.length > BLOB_SIZE_BYTES) { - revert PubdataTooLong(_pubdata.length, BLOB_SIZE_BYTES); + revert PubdataLengthTooBig(_pubdata.length, BLOB_SIZE_BYTES); } if (_fullPubdataHash != keccak256(_pubdata)) { revert InvalidPubdataHash(_fullPubdataHash, keccak256(_pubdata)); diff --git a/l1-contracts/contracts/state-transition/data-availability/CalldataDAGateway.sol b/l1-contracts/contracts/state-transition/data-availability/CalldataDAGateway.sol index 90ec4a3ba..ac9bb34e9 100644 --- a/l1-contracts/contracts/state-transition/data-availability/CalldataDAGateway.sol +++ b/l1-contracts/contracts/state-transition/data-availability/CalldataDAGateway.sol @@ -3,7 +3,7 @@ pragma solidity 0.8.24; import {CalldataDA, BLOB_COMMITMENT_SIZE, BLOB_SIZE_BYTES} from "./CalldataDA.sol"; -import {PubdataTooSmall, PubdataTooLong, InvalidPubdataHash} from "../L1StateTransitionErrors.sol"; +import {PubdataInputTooSmall, PubdataLengthTooBig, InvalidPubdataHash} from "../L1StateTransitionErrors.sol"; /// @notice Contract that contains the functionality for processing the calldata DA. /// @dev The expected l2DAValidator that should be used with it `RollupL2DAValidator`. @@ -16,7 +16,7 @@ abstract contract CalldataDAGateway is CalldataDA { bytes calldata _pubdataInput ) internal pure override returns (bytes32[] memory blobCommitments, bytes calldata _pubdata) { if (_pubdataInput.length < _blobsProvided * BLOB_COMMITMENT_SIZE) { - revert PubdataTooSmall(_pubdataInput.length, _blobsProvided * BLOB_COMMITMENT_SIZE); + revert PubdataInputTooSmall(_pubdataInput.length, _blobsProvided * BLOB_COMMITMENT_SIZE); } // We typically do not know whether we'll use calldata or blobs at the time when @@ -26,7 +26,7 @@ abstract contract CalldataDAGateway is CalldataDA { _pubdata = _pubdataInput[:_pubdataInput.length - _blobsProvided * BLOB_COMMITMENT_SIZE]; if (_pubdata.length > _blobsProvided * BLOB_SIZE_BYTES) { - revert PubdataTooLong(_pubdata.length, _blobsProvided * BLOB_SIZE_BYTES); + revert PubdataLengthTooBig(_pubdata.length, _blobsProvided * BLOB_SIZE_BYTES); } if (_fullPubdataHash != keccak256(_pubdata)) { revert InvalidPubdataHash(_fullPubdataHash, keccak256(_pubdata)); diff --git a/l1-contracts/contracts/state-transition/libraries/PriorityTree.sol b/l1-contracts/contracts/state-transition/libraries/PriorityTree.sol index b39eed029..0032fef9a 100644 --- a/l1-contracts/contracts/state-transition/libraries/PriorityTree.sol +++ b/l1-contracts/contracts/state-transition/libraries/PriorityTree.sol @@ -5,7 +5,7 @@ pragma solidity ^0.8.21; import {DynamicIncrementalMerkle} from "../../common/libraries/DynamicIncrementalMerkle.sol"; import {Merkle} from "../../common/libraries/Merkle.sol"; import {PriorityTreeCommitment} from "../../common/Config.sol"; -import {RootMismatch, InvalidCommitment, InvalidStartIndex, InvalidUnprocessedIndex, InvalidNextLeafIndex} from "../L1StateTransitionErrors.sol"; +import {NotHistoricalRoot, InvalidCommitment, InvalidStartIndex, InvalidUnprocessedIndex, InvalidNextLeafIndex} from "../L1StateTransitionErrors.sol"; struct PriorityOpsBatchInfo { bytes32[] leftPath; @@ -82,7 +82,7 @@ library PriorityTree { _priorityOpsData.itemHashes ); if (!_tree.historicalRoots[expectedRoot]) { - revert RootMismatch(); + revert NotHistoricalRoot(); } _tree.unprocessedIndex += _priorityOpsData.itemHashes.length; } diff --git a/l1-contracts/deploy-scripts/AcceptAdmin.s.sol b/l1-contracts/deploy-scripts/AcceptAdmin.s.sol index bb648262c..043b293cd 100644 --- a/l1-contracts/deploy-scripts/AcceptAdmin.s.sol +++ b/l1-contracts/deploy-scripts/AcceptAdmin.s.sol @@ -9,6 +9,7 @@ import {IAdmin} from "contracts/state-transition/chain-interfaces/IAdmin.sol"; import {ChainAdmin} from "contracts/governance/ChainAdmin.sol"; import {AccessControlRestriction} from "contracts/governance/AccessControlRestriction.sol"; import {IChainAdmin} from "contracts/governance/IChainAdmin.sol"; +import {IChainAdminSingleOwner} from "contracts/governance/IChainAdminSingleOwner.sol"; import {Call} from "contracts/governance/Common.sol"; import {Utils} from "./Utils.sol"; import {IGovernance} from "contracts/governance/IGovernance.sol"; @@ -76,10 +77,31 @@ contract AcceptAdmin is Script { // This function should be called by the owner to update token multiplier setter role function chainSetTokenMultiplierSetter( + address chainAdmin, address accessControlRestriction, address diamondProxyAddress, address setter ) public { + if (accessControlRestriction == address(0)) { + _chainSetTokenMultiplierSetterSingleOwner(chainAdmin, setter); + } else { + _chainSetTokenMultiplierSetterLatestChainAdmin(accessControlRestriction, diamondProxyAddress, setter); + } + } + + function _chainSetTokenMultiplierSetterSingleOwner(address chainAdmin, address setter) internal { + IChainAdminSingleOwner admin = IChainAdminSingleOwner(chainAdmin); + + vm.startBroadcast(); + admin.setTokenMultiplierSetter(setter); + vm.stopBroadcast(); + } + + function _chainSetTokenMultiplierSetterLatestChainAdmin( + address accessControlRestriction, + address diamondProxyAddress, + address setter + ) internal { AccessControlRestriction restriction = AccessControlRestriction(accessControlRestriction); if ( diff --git a/l1-contracts/deploy-scripts/DeployL1.s.sol b/l1-contracts/deploy-scripts/DeployL1.s.sol index 5dd225453..b430791fb 100644 --- a/l1-contracts/deploy-scripts/DeployL1.s.sol +++ b/l1-contracts/deploy-scripts/DeployL1.s.sol @@ -281,7 +281,6 @@ contract DeployL1Script is Script, DeployUtils { bridgehub.addChainTypeManager(addresses.stateTransition.chainTypeManagerProxy); console.log("ChainTypeManager registered"); CTMDeploymentTracker ctmDT = CTMDeploymentTracker(addresses.bridgehub.ctmDeploymentTrackerProxy); - // vm.startBroadcast(msg.sender); L1AssetRouter sharedBridge = L1AssetRouter(addresses.bridges.sharedBridgeProxy); sharedBridge.setAssetDeploymentTracker( bytes32(uint256(uint160(addresses.stateTransition.chainTypeManagerProxy))), @@ -294,8 +293,6 @@ contract DeployL1Script is Script, DeployUtils { console.log("CTM registered in CTMDeploymentTracker"); bytes32 assetId = bridgehub.ctmAssetIdFromAddress(addresses.stateTransition.chainTypeManagerProxy); - // console.log(address(bridgehub.ctmDeployer()), addresses.bridgehub.ctmDeploymentTrackerProxy); - // console.log(address(bridgehub.ctmDeployer().BRIDGE_HUB()), addresses.bridgehub.bridgehubProxy); console.log( "CTM in router 1", sharedBridge.assetHandlerAddress(assetId), @@ -398,7 +395,6 @@ contract DeployL1Script is Script, DeployUtils { Bridgehub bridgehub = Bridgehub(addresses.bridgehub.bridgehubProxy); vm.startBroadcast(msg.sender); bridgehub.addTokenAssetId(bridgehub.baseTokenAssetId(config.eraChainId)); - // bridgehub.setSharedBridge(addresses.bridges.sharedBridgeProxy); bridgehub.setAddresses( addresses.bridges.sharedBridgeProxy, ICTMDeploymentTracker(addresses.bridgehub.ctmDeploymentTrackerProxy), diff --git a/l1-contracts/deploy-scripts/DeployL2Contracts.sol b/l1-contracts/deploy-scripts/DeployL2Contracts.sol index 1923672b4..a6bc5cb5a 100644 --- a/l1-contracts/deploy-scripts/DeployL2Contracts.sol +++ b/l1-contracts/deploy-scripts/DeployL2Contracts.sol @@ -90,21 +90,6 @@ contract DeployL2Script is Script { saveOutput(); } - function runDeployLegacySharedBridge() public { - deploySharedBridge(true); - } - - function runDeploySharedBridge() public { - deploySharedBridge(false); - } - - // TODO(EVM-745): port legacy contract tests to new contracts - function deploySharedBridge(bool legacyBridge) internal { - initializeConfig(); - - saveOutput(); - } - function runDefaultUpgrader() public { initializeConfig(); diff --git a/l1-contracts/deploy-scripts/DeployUtils.s.sol b/l1-contracts/deploy-scripts/DeployUtils.s.sol index 44c56ce42..a3d2ff825 100644 --- a/l1-contracts/deploy-scripts/DeployUtils.s.sol +++ b/l1-contracts/deploy-scripts/DeployUtils.s.sol @@ -55,6 +55,7 @@ import {IMessageRoot} from "contracts/bridgehub/IMessageRoot.sol"; import {IAssetRouterBase} from "contracts/bridge/asset-router/IAssetRouterBase.sol"; import {L2ContractsBytecodesLib} from "./L2ContractsBytecodesLib.sol"; import {BytecodesSupplier} from "contracts/upgrades/BytecodesSupplier.sol"; +import {ChainAdminSingleOwner} from "contracts/governance/ChainAdminSingleOwner.sol"; struct FixedForceDeploymentsData { uint256 l1ChainId; @@ -332,7 +333,31 @@ contract DeployUtils is Script { } function deployChainAdmin() internal { - address accessControlRestriction = deployViaCreate2( + // TODO(EVM-924): provide an option to deploy a non-single owner ChainAdmin. + (address chainAdmin, address accessControlRestriction) = deployChainAdminSingleOwner(); + + addresses.accessControlRestrictionAddress = accessControlRestriction; + addresses.chainAdmin = chainAdmin; + } + + function deployChainAdminSingleOwner() internal returns (address chainAdmin, address accessControlRestriction) { + chainAdmin = deployViaCreate2( + type(ChainAdminSingleOwner).creationCode, + abi.encode(config.ownerAddress, address(0)) + ); + // The single owner chainAdmin does not have a separate control restriction contract. + // We set to it to zero explicitly so that it is clear to the reader. + accessControlRestriction = address(0); + + console.log("ChainAdminSingleOwner deployed at:", accessControlRestriction); + } + + // TODO(EVM-924): this function is unused + function deployChainAdminWithRestrictions() + internal + returns (address chainAdmin, address accessControlRestriction) + { + accessControlRestriction = deployViaCreate2( type(AccessControlRestriction).creationCode, abi.encode(uint256(0), config.ownerAddress) ); @@ -342,9 +367,8 @@ contract DeployUtils is Script { restrictions[0] = accessControlRestriction; addresses.accessControlRestrictionAddress = accessControlRestriction; - address contractAddress = deployViaCreate2(type(ChainAdmin).creationCode, abi.encode(restrictions)); - console.log("ChainAdmin deployed at:", contractAddress); - addresses.chainAdmin = contractAddress; + chainAdmin = deployViaCreate2(type(ChainAdmin).creationCode, abi.encode(restrictions)); + console.log("ChainAdmin deployed at:", chainAdmin); } function deployTransparentProxyAdmin() internal { diff --git a/l1-contracts/deploy-scripts/GatewayCTMFromL1.s.sol b/l1-contracts/deploy-scripts/GatewayCTMFromL1.s.sol index 8bfeab174..023ccc0c9 100644 --- a/l1-contracts/deploy-scripts/GatewayCTMFromL1.s.sol +++ b/l1-contracts/deploy-scripts/GatewayCTMFromL1.s.sol @@ -48,6 +48,8 @@ import {DeployedContracts, GatewayCTMDeployerConfig} from "contracts/state-trans import {GatewayCTMDeployerHelper} from "./GatewayCTMDeployerHelper.sol"; /// @notice Scripts that is responsible for preparing the chain to become a gateway +/// @dev IMPORTANT: this script is not intended to be used in production. +/// TODO(EVM-925): support secure gateway deployment. contract GatewayCTMFromL1 is Script { using stdToml for string; diff --git a/l1-contracts/deploy-scripts/GatewayPreparation.s.sol b/l1-contracts/deploy-scripts/GatewayPreparation.s.sol index 1f2c518d8..2701e30c6 100644 --- a/l1-contracts/deploy-scripts/GatewayPreparation.s.sol +++ b/l1-contracts/deploy-scripts/GatewayPreparation.s.sol @@ -64,6 +64,8 @@ struct Config { } /// @notice Scripts that is responsible for preparing the chain to become a gateway +/// @dev IMPORTANT: this script is not intended to be used in production. +/// TODO(EVM-925): support secure gateway deployment. contract GatewayPreparation is Script { using stdToml for string; @@ -311,7 +313,7 @@ contract GatewayPreparation is Script { function deployL2ChainAdmin() public { initializeConfig(); - // FIXME: it is deployed without any restrictions. + // TODO(EVM-925): it is deployed without any restrictions. address l2ChainAdminAddress = Utils.deployThroughL1({ bytecode: L2ContractsBytecodesLib.readChainAdminBytecode(), constructorargs: abi.encode(new address[](0)), @@ -436,7 +438,7 @@ contract GatewayPreparation is Script { l2Calldata = abi.encodeCall(ChainAdmin.multicall, (calls, true)); } - // FIXME: this should migrate to use L2 transactions directly + // TODO(EVM-925): this should migrate to use L2 transactions directly bytes32 l2TxHash = Utils.runAdminL1L2DirectTransaction( _getL1GasPrice(), chainAdmin, diff --git a/l1-contracts/deploy-scripts/RegisterZKChain.s.sol b/l1-contracts/deploy-scripts/RegisterZKChain.s.sol index 5dbf2b2b1..65675b806 100644 --- a/l1-contracts/deploy-scripts/RegisterZKChain.s.sol +++ b/l1-contracts/deploy-scripts/RegisterZKChain.s.sol @@ -32,6 +32,7 @@ import {Call} from "contracts/governance/Common.sol"; import {ETH_TOKEN_ADDRESS} from "contracts/common/Config.sol"; import {CreateAndTransfer} from "./CreateAndTransfer.sol"; +import {ChainAdminSingleOwner} from "contracts/governance/ChainAdminSingleOwner.sol"; // solhint-disable-next-line gas-struct-packing struct Config { @@ -333,24 +334,47 @@ contract RegisterZKChainScript is Script { } function deployChainAdmin() internal { + // TODO(EVM-924): provide an option to deploy a non-single owner ChainAdmin. + (address chainAdmin, address accessControlRestriction) = deployChainAdminSingleOwner(); + + output.accessControlRestrictionAddress = accessControlRestriction; + output.chainAdmin = chainAdmin; + } + + function deployChainAdminSingleOwner() internal returns (address chainAdmin, address accessControlRestriction) { + chainAdmin = Utils.deployViaCreate2( + abi.encodePacked(type(ChainAdminSingleOwner).creationCode, abi.encode(config.ownerAddress, address(0))), + config.create2Salt, + config.create2FactoryAddress + ); + // The single owner chainAdmin does not have a separate control restriction contract. + // We set to it to zero explicitly so that it is clear to the reader. + accessControlRestriction = address(0); + + console.log("ChainAdminSingleOwner deployed at:", accessControlRestriction); + } + + // TODO(EVM-924): this function is unused + function deployChainAdminWithRestrictions() + internal + returns (address chainAdmin, address accessControlRestriction) + { bytes memory input = abi.encode(0, config.ownerAddress); - address restriction = Utils.deployViaCreate2( + accessControlRestriction = Utils.deployViaCreate2( abi.encodePacked(type(AccessControlRestriction).creationCode, input), config.create2Salt, config.create2FactoryAddress ); - output.accessControlRestrictionAddress = restriction; address[] memory restrictions = new address[](1); - restrictions[0] = restriction; + restrictions[0] = accessControlRestriction; input = abi.encode(restrictions); - address chainAdmin = Utils.deployViaCreate2( + chainAdmin = Utils.deployViaCreate2( abi.encodePacked(type(ChainAdmin).creationCode, input), config.create2Salt, config.create2FactoryAddress ); - output.chainAdmin = chainAdmin; } function registerZKChain() internal { diff --git a/l1-contracts/deploy-scripts/Utils.sol b/l1-contracts/deploy-scripts/Utils.sol index a67341785..d19bafb08 100644 --- a/l1-contracts/deploy-scripts/Utils.sol +++ b/l1-contracts/deploy-scripts/Utils.sol @@ -1009,12 +1009,15 @@ library Utils { bytes memory _data, uint256 _value ) internal { - address defaultAdmin = AccessControlRestriction(_accessControlRestriction).defaultAdmin(); + // If `_accessControlRestriction` is not provided, we expect that this ChainAdmin is Ownable + address adminOwner = _accessControlRestriction == address(0) + ? Ownable(_admin).owner() + : AccessControlRestriction(_accessControlRestriction).defaultAdmin(); Call[] memory calls = new Call[](1); calls[0] = Call({target: _target, value: _value, data: _data}); - vm.startBroadcast(defaultAdmin); + vm.startBroadcast(adminOwner); IChainAdmin(_admin).multicall{value: _value}(calls, true); vm.stopBroadcast(); } diff --git a/l1-contracts/deploy-scripts/upgrade/ChainUpgrade.s.sol b/l1-contracts/deploy-scripts/upgrade/ChainUpgrade.s.sol index 4d99ae0db..103258d88 100644 --- a/l1-contracts/deploy-scripts/upgrade/ChainUpgrade.s.sol +++ b/l1-contracts/deploy-scripts/upgrade/ChainUpgrade.s.sol @@ -28,7 +28,6 @@ contract ChainUpgrade is Script { uint256 chainChainId; address chainDiamondProxyAddress; bool permanentRollup; - // FIXME: From ecosystem, maybe move to a different struct address bridgehubProxyAddress; address oldSharedBridgeProxyAddress; } @@ -58,9 +57,6 @@ contract ChainUpgrade is Script { checkCorrectOwnerAddress(); - // Deploying of the new chain admin is not strictly needed - // but our existing tooling relies on the new impl of chain admin - deployNewChainAdmin(); governanceMoveToNewChainAdmin(); // This script does nothing, it only checks that the provided inputs are correct. @@ -123,6 +119,7 @@ contract ChainUpgrade is Script { require(currentAdminOwner == config.ownerAddress, "Only the owner of the chain admin can call this function"); } + // TODO(EVM-924): this function is not used. function deployNewChainAdmin() internal { vm.broadcast(config.ownerAddress); AccessControlRestriction accessControlRestriction = new AccessControlRestriction(0, config.ownerAddress); diff --git a/l1-contracts/deploy-scripts/upgrade/EcosystemUpgrade.s.sol b/l1-contracts/deploy-scripts/upgrade/EcosystemUpgrade.s.sol index 5959fa8c8..211a17212 100644 --- a/l1-contracts/deploy-scripts/upgrade/EcosystemUpgrade.s.sol +++ b/l1-contracts/deploy-scripts/upgrade/EcosystemUpgrade.s.sol @@ -182,6 +182,7 @@ contract EcosystemUpgrade is Script { // for facilitating partially trusted, but not critical tasks. address ecosystemAdminAddress; bool testnetVerifier; + uint256 governanceUpgradeTimerInitialDelay; ContractsConfig contracts; TokensConfig tokens; } @@ -373,7 +374,7 @@ contract EcosystemUpgrade is Script { } function getNewProtocolVersion() public returns (uint256) { - return 0x1b00000000; + return 0x1a00000000; } function getProtocolUpgradeNonce() public returns (uint256) { @@ -724,8 +725,8 @@ contract EcosystemUpgrade is Script { config.contracts.oldValidatorTimelock = toml.readAddress("$.contracts.old_validator_timelock"); config.tokens.tokenWethAddress = toml.readAddress("$.tokens.token_weth_address"); + config.governanceUpgradeTimerInitialDelay = toml.readUint("$.governance_upgrade_timer_initial_delay"); - // TODO: maybe receive the address from the config + cross check config.ecosystemAdminAddress = Bridgehub(config.contracts.bridgehubProxyAddress).admin(); } @@ -838,12 +839,11 @@ contract EcosystemUpgrade is Script { function publishBytecodes() internal { bytes[] memory allDeps = getFullListOfFactoryDependencies(); - BytecodePublisher.publishBytecodesInBatches(BytecodesSupplier(addresses.bytecodesSupplier), allDeps); - uint256[] memory factoryDeps = new uint256[](allDeps.length); - require(factoryDeps.length <= 64, "Too many deps"); + BytecodePublisher.publishBytecodesInBatches(BytecodesSupplier(addresses.bytecodesSupplier), allDeps); + for (uint256 i = 0; i < allDeps.length; i++) { factoryDeps[i] = uint256(L2ContractHelper.hashL2Bytecode(allDeps[i])); } @@ -919,24 +919,6 @@ contract EcosystemUpgrade is Script { addresses.validatorTimelock = contractAddress; } - function deployChainAdmin() internal { - bytes memory accessControlRestrictionBytecode = abi.encodePacked( - type(AccessControlRestriction).creationCode, - abi.encode(uint256(0), config.ownerAddress) - ); - - address accessControlRestriction = deployViaCreate2(accessControlRestrictionBytecode); - console.log("Access control restriction deployed at:", accessControlRestriction); - address[] memory restrictions = new address[](1); - restrictions[0] = accessControlRestriction; - addresses.accessControlRestrictionAddress = accessControlRestriction; - - bytes memory bytecode = abi.encodePacked(type(ChainAdmin).creationCode, abi.encode(restrictions)); - address contractAddress = deployViaCreate2(bytecode); - console.log("ChainAdmin deployed at:", contractAddress); - addresses.chainAdmin = contractAddress; - } - function deployBridgehubImplementation() internal { bytes memory bridgeHubBytecode = abi.encodePacked( type(Bridgehub).creationCode, @@ -1055,9 +1037,8 @@ contract EcosystemUpgrade is Script { } function deployL1NullifierImplementation() internal { - // TODO(EVM-743): allow non-dev nullifier in the local deployment bytes memory bytecode = abi.encodePacked( - type(L1NullifierDev).creationCode, + type(L1Nullifier).creationCode, // solhint-disable-next-line func-named-parameters abi.encode(config.contracts.bridgehubProxyAddress, config.eraChainId, config.contracts.eraDiamondProxy) ); @@ -1133,12 +1114,6 @@ contract EcosystemUpgrade is Script { } function deployBridgedTokenBeacon() internal { - // bytes memory bytecode = abi.encodePacked( - // type(UpgradeableBeacon).creationCode, - // // solhint-disable-next-line func-named-parameters - // abi.encode(addresses.bridges.bridgedStandardERC20Implementation) - // ); - // Note, that the `msg.sender` will be set as the owner. // This means that we can not use a naive create2factory. It may be replaced // with a more advanced one, but CREATE from a hot wallet is fine too. @@ -1185,7 +1160,6 @@ contract EcosystemUpgrade is Script { IL1AssetRouter sharedBridge = IL1AssetRouter(addresses.bridges.sharedBridgeProxy); IL1Nullifier l1Nullifier = IL1Nullifier(config.contracts.oldSharedBridgeProxyAddress); - // Ownable ownable = Ownable(addresses.bridges.sharedBridgeProxy); vm.broadcast(msg.sender); sharedBridge.setNativeTokenVault(INativeTokenVault(addresses.vaults.l1NativeTokenVaultProxy)); @@ -1203,8 +1177,7 @@ contract EcosystemUpgrade is Script { } function deployGovernanceUpgradeTimer() internal { - // Needed for easy server testing, in reality it will be different - uint256 INITIAL_DELAY = 0; + uint256 INITIAL_DELAY = config.governanceUpgradeTimerInitialDelay; uint256 MAX_ADDITIONAL_DELAY = 2 weeks; @@ -1248,7 +1221,6 @@ contract EcosystemUpgrade is Script { _moveGovernanceToOwner(addresses.validatorTimelock); _moveGovernanceToOwner(addresses.bridges.sharedBridgeProxy); _moveGovernanceToOwner(addresses.bridgehub.ctmDeploymentTrackerProxy); - console.log("hi"); _moveGovernanceToOwner(addresses.daAddresses.rollupDAManager); vm.stopBroadcast(); diff --git a/l1-contracts/test/foundry/l1/da-contracts-imports/DAContractsErrors.sol b/l1-contracts/test/foundry/l1/da-contracts-imports/DAContractsErrors.sol index 4328a9dce..c84e3206e 100644 --- a/l1-contracts/test/foundry/l1/da-contracts-imports/DAContractsErrors.sol +++ b/l1-contracts/test/foundry/l1/da-contracts-imports/DAContractsErrors.sol @@ -20,28 +20,25 @@ error PointEvalCallFailed(bytes); error PointEvalFailed(bytes); // 0xf4a3e629 -error OperatorDAInputLengthTooSmall(uint256 operatorDAInputLength, uint256 blobDataOffset); +error OperatorDAInputTooSmall(uint256 operatorDAInputLength, uint256 minAllowedLength); // 0xbeb96791 error InvalidNumberOfBlobs(uint256 blobsProvided, uint256 maxBlobsSupported); -// 0xcd384e46 -error InvalidBlobsHashes(uint256 operatorDAInputLength, uint256 blobsProvided); - // 0xe9e79528 error InvalidL2DAOutputHash(); -// 0x3db6e664 -error OneBlobWithCalldata(); +// 0x04e05fd1 +error OnlyOneBlobWithCalldataAllowed(); // 0x2dc9747d -error PubdataInputTooSmall(uint256 pubdataInputLength, uint256 blobCommitmentSize); +error PubdataInputTooSmall(uint256 pubdataInputLength, uint256 totalBlobsCommitmentSize); // 0x9044dff9 -error PubdataLengthTooBig(uint256 pubdataLength, uint256 blobSizeBytes); +error PubdataLengthTooBig(uint256 pubdataLength, uint256 totalBlobSizeBytes); // 0x5513177c -error InvalidPubdataHash(bytes32 fullPubdataHash, bytes32 pubdata); +error InvalidPubdataHash(bytes32 fullPubdataHash, bytes32 providedPubdataHash); // 0xc771423e error BlobCommitmentNotPublished(); diff --git a/l1-contracts/test/foundry/l1/integration/L1GatewayTests.t.sol b/l1-contracts/test/foundry/l1/integration/L1GatewayTests.t.sol index a015f9aa1..561a7827a 100644 --- a/l1-contracts/test/foundry/l1/integration/L1GatewayTests.t.sol +++ b/l1-contracts/test/foundry/l1/integration/L1GatewayTests.t.sol @@ -105,6 +105,7 @@ contract L1GatewayTests is L1ContractDeployer, ZKChainDeployer, TokenDeployer, L // This is a method to simplify porting the tests for now. // Here we rely that the first restriction is the AccessControlRestriction + // TODO(EVM-924): this function is not used. function _extractAccessControlRestriction(address admin) internal returns (address) { return ChainAdmin(payable(admin)).getRestrictions()[0]; } @@ -126,35 +127,20 @@ contract L1GatewayTests is L1ContractDeployer, ZKChainDeployer, TokenDeployer, L // function test_moveChainToGateway() public { _setUpGatewayWithFilterer(); - gatewayScript.migrateChainToGateway( - migratingChain.getAdmin(), - address(1), - _extractAccessControlRestriction(migratingChain.getAdmin()), - migratingChainId - ); + gatewayScript.migrateChainToGateway(migratingChain.getAdmin(), address(1), address(0), migratingChainId); require(bridgehub.settlementLayer(migratingChainId) == gatewayChainId, "Migration failed"); } function test_l2Registration() public { _setUpGatewayWithFilterer(); - gatewayScript.migrateChainToGateway( - migratingChain.getAdmin(), - address(1), - _extractAccessControlRestriction(migratingChain.getAdmin()), - migratingChainId - ); + gatewayScript.migrateChainToGateway(migratingChain.getAdmin(), address(1), address(0), migratingChainId); gatewayScript.governanceSetCTMAssetHandler(bytes32(0)); gatewayScript.registerAssetIdInBridgehub(address(0x01), bytes32(0)); } function test_startMessageToL3() public { _setUpGatewayWithFilterer(); - gatewayScript.migrateChainToGateway( - migratingChain.getAdmin(), - address(1), - _extractAccessControlRestriction(migratingChain.getAdmin()), - migratingChainId - ); + gatewayScript.migrateChainToGateway(migratingChain.getAdmin(), address(1), address(0), migratingChainId); IBridgehub bridgehub = IBridgehub(bridgehub); uint256 expectedValue = 1000000000000000000000; @@ -171,12 +157,7 @@ contract L1GatewayTests is L1ContractDeployer, ZKChainDeployer, TokenDeployer, L function test_recoverFromFailedChainMigration() public { _setUpGatewayWithFilterer(); - gatewayScript.migrateChainToGateway( - migratingChain.getAdmin(), - address(1), - _extractAccessControlRestriction(migratingChain.getAdmin()), - migratingChainId - ); + gatewayScript.migrateChainToGateway(migratingChain.getAdmin(), address(1), address(0), migratingChainId); // Setup IBridgehub bridgehub = IBridgehub(bridgehub); @@ -251,12 +232,7 @@ contract L1GatewayTests is L1ContractDeployer, ZKChainDeployer, TokenDeployer, L function test_finishMigrateBackChain() public { _setUpGatewayWithFilterer(); - gatewayScript.migrateChainToGateway( - migratingChain.getAdmin(), - address(1), - _extractAccessControlRestriction(migratingChain.getAdmin()), - migratingChainId - ); + gatewayScript.migrateChainToGateway(migratingChain.getAdmin(), address(1), address(0), migratingChainId); migrateBackChain(); } diff --git a/l1-contracts/test/foundry/l1/unit/concrete/Executor/Committing.t.sol b/l1-contracts/test/foundry/l1/unit/concrete/Executor/Committing.t.sol index d115fd0cf..416bcfd17 100644 --- a/l1-contracts/test/foundry/l1/unit/concrete/Executor/Committing.t.sol +++ b/l1-contracts/test/foundry/l1/unit/concrete/Executor/Committing.t.sol @@ -12,7 +12,7 @@ import {POINT_EVALUATION_PRECOMPILE_ADDR} from "contracts/common/Config.sol"; import {L2_PUBDATA_CHUNK_PUBLISHER_ADDR} from "contracts/common/L2ContractAddresses.sol"; import {BLS_MODULUS} from "../../../da-contracts-imports/DAUtils.sol"; import {BLOB_DATA_OFFSET} from "../../../da-contracts-imports/CalldataDA.sol"; -import {PubdataCommitmentsEmpty, BlobHashCommitmentError, OperatorDAInputLengthTooSmall, EmptyBlobVersionHash, InvalidPubdataCommitmentsSize, NonEmptyBlobVersionHash} from "../../../da-contracts-imports/DAContractsErrors.sol"; +import {PubdataCommitmentsEmpty, BlobHashCommitmentError, OperatorDAInputTooSmall, EmptyBlobVersionHash, InvalidPubdataCommitmentsSize, NonEmptyBlobVersionHash} from "../../../da-contracts-imports/DAContractsErrors.sol"; import {TimeNotReached, BatchNumberMismatch, L2TimestampTooBig, CanOnlyProcessOneBatch, TimestampError, LogAlreadyProcessed, InvalidLogSender, UnexpectedSystemLog, HashMismatch, BatchHashMismatch, ValueMismatch, MissingSystemLogs} from "contracts/common/L1ContractErrors.sol"; contract CommittingTest is ExecutorTest { @@ -661,7 +661,7 @@ contract CommittingTest is ExecutorTest { vm.prank(validator); vm.expectRevert( - abi.encodeWithSelector(OperatorDAInputLengthTooSmall.selector, operatorDAInput.length, BLOB_DATA_OFFSET) + abi.encodeWithSelector(OperatorDAInputTooSmall.selector, operatorDAInput.length, BLOB_DATA_OFFSET) ); (uint256 commitBatchFrom, uint256 commitBatchTo, bytes memory commitData) = Utils.encodeCommitBatchesData( genesisStoredBatchInfo, diff --git a/l1-contracts/test/foundry/l1/unit/concrete/state-transition/data-availability/CalldataDA.t.sol b/l1-contracts/test/foundry/l1/unit/concrete/state-transition/data-availability/CalldataDA.t.sol index fe510176a..767c0b84f 100644 --- a/l1-contracts/test/foundry/l1/unit/concrete/state-transition/data-availability/CalldataDA.t.sol +++ b/l1-contracts/test/foundry/l1/unit/concrete/state-transition/data-availability/CalldataDA.t.sol @@ -6,7 +6,7 @@ import {Test} from "forge-std/Test.sol"; import {Utils} from "../../Utils/Utils.sol"; import {TestCalldataDA} from "contracts/dev-contracts/test/TestCalldataDA.sol"; import {BLOB_SIZE_BYTES, BLOB_DATA_OFFSET, BLOB_COMMITMENT_SIZE} from "contracts/state-transition/data-availability/CalldataDA.sol"; -import {OperatorDAInputTooSmall, InvalidNumberOfBlobs, InvalidBlobsHashes, InvalidL2DAOutputHash, OnlyOneBlobWithCalldata, PubdataTooSmall, PubdataTooLong, InvalidPubdataHash} from "contracts/state-transition/L1StateTransitionErrors.sol"; +import {OperatorDAInputTooSmall, InvalidNumberOfBlobs, InvalidL2DAOutputHash, OnlyOneBlobWithCalldataAllowed, PubdataInputTooSmall, PubdataLengthTooBig, InvalidPubdataHash} from "contracts/state-transition/L1StateTransitionErrors.sol"; contract CalldataDATest is Test { TestCalldataDA calldataDA; @@ -62,7 +62,7 @@ contract CalldataDATest is Test { vm.expectRevert( abi.encodeWithSelector( - InvalidBlobsHashes.selector, + OperatorDAInputTooSmall.selector, operatorDAInput.length, BLOB_DATA_OFFSET + 32 * uint256(uint8(operatorDAInput[64])) ) @@ -117,13 +117,13 @@ contract CalldataDATest is Test { CalldataDA::_processCalldataDA //////////////////////////////////////////////////////////////////////////*/ - function test_RevertWhen_OnlyOneBlobWithCalldata(uint256 blobsProvided) public { + function test_RevertWhen_OnlyOneBlobWithCalldataAllowed(uint256 blobsProvided) public { vm.assume(blobsProvided != 1); bytes32 fullPubdataHash = Utils.randomBytes32("fullPubdataHash"); uint256 maxBlobsSupported = 6; bytes memory pubdataInput = ""; - vm.expectRevert(OnlyOneBlobWithCalldata.selector); + vm.expectRevert(OnlyOneBlobWithCalldataAllowed.selector); calldataDA.processCalldataDA(blobsProvided, fullPubdataHash, maxBlobsSupported, pubdataInput); } @@ -133,17 +133,19 @@ contract CalldataDATest is Test { bytes calldata pubdataInput = makeBytesArrayOfLength(BLOB_SIZE_BYTES + 33); bytes32 fullPubdataHash = keccak256(pubdataInput); - vm.expectRevert(abi.encodeWithSelector(PubdataTooLong.selector, 126977, blobsProvided * BLOB_SIZE_BYTES)); + vm.expectRevert(abi.encodeWithSelector(PubdataLengthTooBig.selector, 126977, blobsProvided * BLOB_SIZE_BYTES)); calldataDA.processCalldataDA(blobsProvided, fullPubdataHash, maxBlobsSupported, pubdataInput); } - function test_RevertWhen_PubdataTooSmall() public { + function test_RevertWhen_PubdataInputTooSmall() public { uint256 blobsProvided = 1; uint256 maxBlobsSupported = 6; bytes calldata pubdataInput = makeBytesArrayOfLength(31); bytes32 fullPubdataHash = keccak256(pubdataInput); - vm.expectRevert(abi.encodeWithSelector(PubdataTooSmall.selector, pubdataInput.length, BLOB_COMMITMENT_SIZE)); + vm.expectRevert( + abi.encodeWithSelector(PubdataInputTooSmall.selector, pubdataInput.length, BLOB_COMMITMENT_SIZE) + ); calldataDA.processCalldataDA(blobsProvided, fullPubdataHash, maxBlobsSupported, pubdataInput); } diff --git a/l1-contracts/test/foundry/l1/unit/concrete/state-transition/data-availability/RelayedSLDAValidator.t.sol b/l1-contracts/test/foundry/l1/unit/concrete/state-transition/data-availability/RelayedSLDAValidator.t.sol index de566b0df..49c621fcf 100644 --- a/l1-contracts/test/foundry/l1/unit/concrete/state-transition/data-availability/RelayedSLDAValidator.t.sol +++ b/l1-contracts/test/foundry/l1/unit/concrete/state-transition/data-availability/RelayedSLDAValidator.t.sol @@ -10,7 +10,7 @@ import {L2_TO_L1_MESSENGER_SYSTEM_CONTRACT_ADDR} from "contracts/common/L2Contra import {IL1Messenger} from "contracts/common/interfaces/IL1Messenger.sol"; import {L2_BRIDGEHUB_ADDR} from "contracts/common/L2ContractAddresses.sol"; import {IBridgehub} from "contracts/bridgehub/IBridgehub.sol"; -import {PubdataTooSmall, L1DAValidatorInvalidSender} from "contracts/state-transition/L1StateTransitionErrors.sol"; +import {PubdataInputTooSmall, L1DAValidatorInvalidSender} from "contracts/state-transition/L1StateTransitionErrors.sol"; import {InvalidPubdataSource} from "contracts/state-transition/L1StateTransitionErrors.sol"; contract RelayedSLDAValidatorTest is Test { @@ -76,7 +76,7 @@ contract RelayedSLDAValidatorTest is Test { bytes memory operatorDAInput = abi.encodePacked(daInput, pubdataSource, l1DaInput); vm.prank(CHAIN_ADDRESS); - vm.expectRevert(abi.encodeWithSelector(PubdataTooSmall.selector, 15, 32)); + vm.expectRevert(abi.encodeWithSelector(PubdataInputTooSmall.selector, 15, 32)); daValidator.checkDA(CHAIN_ID, 0, l2DAValidatorOutputHash, operatorDAInput, maxBlobsSupported); } diff --git a/l1-contracts/test/foundry/l1/unit/concrete/state-transition/libraries/PriorityTree/PriorityTree.t.sol b/l1-contracts/test/foundry/l1/unit/concrete/state-transition/libraries/PriorityTree/PriorityTree.t.sol index fa5fdfcc4..6acd8062e 100644 --- a/l1-contracts/test/foundry/l1/unit/concrete/state-transition/libraries/PriorityTree/PriorityTree.t.sol +++ b/l1-contracts/test/foundry/l1/unit/concrete/state-transition/libraries/PriorityTree/PriorityTree.t.sol @@ -4,7 +4,7 @@ pragma solidity 0.8.24; import {PriorityTreeSharedTest, PriorityOpsBatchInfo} from "./_PriorityTree_Shared.t.sol"; import {PriorityTreeCommitment} from "contracts/common/Config.sol"; -import {RootMismatch} from "contracts/state-transition/L1StateTransitionErrors.sol"; +import {NotHistoricalRoot} from "contracts/state-transition/L1StateTransitionErrors.sol"; bytes32 constant ZERO_LEAF_HASH = keccak256(""); @@ -84,7 +84,7 @@ contract PriorityTreeTest is PriorityTreeSharedTest { function test_processBatch_shouldRevert() public { bytes32[] memory itemHashes = pushMockEntries(3); - vm.expectRevert(RootMismatch.selector); + vm.expectRevert(NotHistoricalRoot.selector); priorityTree.processBatch( PriorityOpsBatchInfo({leftPath: new bytes32[](2), rightPath: new bytes32[](2), itemHashes: itemHashes}) );