Skip to content

Commit

Permalink
Merge branch 'gateway-release-candidate-2' into sb-cleanup-scripts
Browse files Browse the repository at this point in the history
  • Loading branch information
StanislavBreadless committed Dec 20, 2024
2 parents 7bfe12d + a575417 commit 97c8b05
Show file tree
Hide file tree
Showing 18 changed files with 82 additions and 72 deletions.
8 changes: 4 additions & 4 deletions da-contracts/contracts/CalldataDA.sol
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

pragma solidity 0.8.24;

import {OperatorDAInputLengthTooSmall, InvalidNumberOfBlobs, InvalidBlobsHashes, InvalidL2DAOutputHash, OneBlobWithCalldata, PubdataInputTooSmall, PubdataLengthTooBig, InvalidPubdataHash} from "./DAContractsErrors.sol";
import {OperatorDAInputTooSmall, InvalidNumberOfBlobs, InvalidL2DAOutputHash, OnlyOneBlobWithCalldataAllowed, PubdataInputTooSmall, PubdataLengthTooBig, InvalidPubdataHash} from "./DAContractsErrors.sol";

/// @dev Total number of bytes in a blob. Blob = 4096 field elements * 31 bytes per field element
/// @dev EIP-4844 defines it as 131_072 but we use 4096 * 31 within our circuits to always fit within a field element
Expand Down Expand Up @@ -45,7 +45,7 @@ abstract contract CalldataDA {

// Check that it accommodates enough pubdata for the state diff hash, hash of pubdata + the number of blobs.
if (_operatorDAInput.length < BLOB_DATA_OFFSET) {
revert OperatorDAInputLengthTooSmall(_operatorDAInput.length, BLOB_DATA_OFFSET);
revert OperatorDAInputTooSmall(_operatorDAInput.length, BLOB_DATA_OFFSET);
}

stateDiffHash = bytes32(_operatorDAInput[:32]);
Expand All @@ -61,7 +61,7 @@ abstract contract CalldataDA {
blobsLinearHashes = new bytes32[](_maxBlobsSupported);

if (_operatorDAInput.length < BLOB_DATA_OFFSET + 32 * blobsProvided) {
revert InvalidBlobsHashes(_operatorDAInput.length, BLOB_DATA_OFFSET + 32 * blobsProvided);
revert OperatorDAInputTooSmall(_operatorDAInput.length, BLOB_DATA_OFFSET + 32 * blobsProvided);
}

_cloneCalldata(blobsLinearHashes, _operatorDAInput[BLOB_DATA_OFFSET:], blobsProvided);
Expand Down Expand Up @@ -90,7 +90,7 @@ abstract contract CalldataDA {
bytes calldata _pubdataInput
) internal pure virtual returns (bytes32[] memory blobCommitments, bytes calldata _pubdata) {
if (_blobsProvided != 1) {
revert OneBlobWithCalldata();
revert OnlyOneBlobWithCalldataAllowed();
}
if (_pubdataInput.length < BLOB_COMMITMENT_SIZE) {
revert PubdataInputTooSmall(_pubdataInput.length, BLOB_COMMITMENT_SIZE);
Expand Down
17 changes: 7 additions & 10 deletions da-contracts/contracts/DAContractsErrors.sol
Original file line number Diff line number Diff line change
Expand Up @@ -16,29 +16,26 @@ error PointEvalCallFailed(bytes);
// 0x4daa985d
error PointEvalFailed(bytes);

// 0xf4a3e629
error OperatorDAInputLengthTooSmall(uint256 operatorDAInputLength, uint256 blobDataOffset);
// 0x885ae069
error OperatorDAInputTooSmall(uint256 operatorDAInputLength, uint256 minAllowedLength);

// 0xbeb96791
error InvalidNumberOfBlobs(uint256 blobsProvided, uint256 maxBlobsSupported);

// 0xcd384e46
error InvalidBlobsHashes(uint256 operatorDAInputLength, uint256 blobsProvided);

// 0xd2531c15
error InvalidL2DAOutputHash(bytes32 l2DAValidatorOutputHash);

// 0x3db6e664
error OneBlobWithCalldata();
// 0x04e05fd1
error OnlyOneBlobWithCalldataAllowed();

// 0x2dc9747d
error PubdataInputTooSmall(uint256 pubdataInputLength, uint256 blobCommitmentSize);
error PubdataInputTooSmall(uint256 pubdataInputLength, uint256 totalBlobsCommitmentSize);

// 0x9044dff9
error PubdataLengthTooBig(uint256 pubdataLength, uint256 blobSizeBytes);
error PubdataLengthTooBig(uint256 pubdataLength, uint256 totalBlobSizeBytes);

// 0x5513177c
error InvalidPubdataHash(bytes32 fullPubdataHash, bytes32 pubdata);
error InvalidPubdataHash(bytes32 fullPubdataHash, bytes32 providedPubdataHash);

// 0xc771423e
error BlobCommitmentNotPublished();
Expand Down
2 changes: 1 addition & 1 deletion da-contracts/contracts/RollupL1DAValidator.sol
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ import {InvalidPubdataSource, PubdataCommitmentsEmpty, InvalidPubdataCommitments
uint256 constant BLOBS_SUPPORTED = 6;

/// @dev The number of blocks within each we allow blob to be used for DA.
/// On Ethereum blobs expire within 4096 slots, i.e. 4096 * 32 blocks. We reserve
/// On Ethereum blobs expire within 4096 epochs, i.e. 4096 * 32 blocks. We reserve
/// half of the time in order to ensure reader's ability to read the blob's content.
uint256 constant BLOB_EXPIRATION_BLOCKS = (4096 * 32) / 2;

Expand Down
15 changes: 11 additions & 4 deletions l1-contracts/contracts/bridge/L1Nullifier.sol
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ import {DataEncoding} from "../common/libraries/DataEncoding.sol";
import {IBridgehub} from "../bridgehub/IBridgehub.sol";
import {L2_BASE_TOKEN_SYSTEM_CONTRACT_ADDR, L2_ASSET_ROUTER_ADDR} from "../common/L2ContractAddresses.sol";
import {DataEncoding} from "../common/libraries/DataEncoding.sol";
import {LegacyBridgeNotSet, Unauthorized, SharedBridgeKey, DepositExists, AddressAlreadySet, InvalidProof, DepositDoesNotExist, SharedBridgeValueNotSet, WithdrawalAlreadyFinalized, L2WithdrawalMessageWrongLength, InvalidSelector, SharedBridgeValueNotSet, ZeroAddress} from "../common/L1ContractErrors.sol";
import {LegacyMethodForNonL1Token, LegacyBridgeNotSet, Unauthorized, SharedBridgeKey, DepositExists, AddressAlreadySet, InvalidProof, DepositDoesNotExist, SharedBridgeValueNotSet, WithdrawalAlreadyFinalized, L2WithdrawalMessageWrongLength, InvalidSelector, SharedBridgeValueNotSet, ZeroAddress} from "../common/L1ContractErrors.sol";
import {WrongL2Sender, NativeTokenVaultAlreadySet, EthTransferFailed, WrongMsgLength} from "./L1BridgeContractErrors.sol";

/// @author Matter Labs
Expand Down Expand Up @@ -570,11 +570,14 @@ contract L1Nullifier is IL1Nullifier, ReentrancyGuard, Ownable2StepUpgradeable,
// slither-disable-next-line unused-return
(amount, ) = UnsafeBytes.readUint256(_l2ToL1message, offset);
assetId = BRIDGE_HUB.baseTokenAssetId(_chainId);
address baseToken = BRIDGE_HUB.baseToken(_chainId);
transferData = DataEncoding.encodeBridgeMintData({
_originalCaller: address(0),
_remoteReceiver: l1Receiver,
_originToken: baseToken,
// Note, that `assetId` could belong to a token native to an L2, and so
// the logic for determining the correct origin token address will be complex.
// It is expected that this value won't be used in the NativeTokenVault and so providing
// any value is acceptable here.
_originToken: address(0),
_amount: amount,
_erc20Metadata: new bytes(0)
});
Expand Down Expand Up @@ -642,9 +645,13 @@ contract L1Nullifier is IL1Nullifier, ReentrancyGuard, Ownable2StepUpgradeable,
bytes32[] calldata _merkleProof
) external {
bytes32 assetId = l1NativeTokenVault.assetId(_l1Token);
bytes32 ntvAssetId = DataEncoding.encodeNTVAssetId(block.chainid, _l1Token);
if (assetId == bytes32(0)) {
assetId = DataEncoding.encodeNTVAssetId(block.chainid, _l1Token);
assetId = ntvAssetId;
} else if (assetId != ntvAssetId) {
revert LegacyMethodForNonL1Token();
}

// For legacy deposits, the l2 receiver is not required to check tx data hash
// The token address does not have to be provided for this functionality either.
bytes memory assetData = DataEncoding.encodeBridgeBurnData(_amount, address(0), address(0));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ abstract contract AssetRouterBase is IAssetRouterBase, Ownable2StepUpgradeable,
}
_setAssetHandler(assetId, _assetHandlerAddress);
assetDeploymentTracker[assetId] = msg.sender;
emit AssetDeploymentTrackerRegistered(assetId, _assetRegistrationData, sender);
emit AssetDeploymentTrackerRegistered(assetId, _assetRegistrationData, msg.sender);
}

/*//////////////////////////////////////////////////////////////
Expand Down
8 changes: 7 additions & 1 deletion l1-contracts/contracts/bridge/asset-router/L1AssetRouter.sol
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import {DataEncoding} from "../../common/libraries/DataEncoding.sol";
import {AddressAliasHelper} from "../../vendor/AddressAliasHelper.sol";
import {TWO_BRIDGES_MAGIC_VALUE, ETH_TOKEN_ADDRESS} from "../../common/Config.sol";
import {NativeTokenVaultAlreadySet} from "../L1BridgeContractErrors.sol";
import {LegacyBridgeUsesNonNativeToken, NonEmptyMsgValue, UnsupportedEncodingVersion, AssetIdNotSupported, AssetHandlerDoesNotExist, Unauthorized, ZeroAddress, TokenNotSupported, AddressAlreadyUsed, TokensWithFeesNotSupported} from "../../common/L1ContractErrors.sol";
import {LegacyEncodingUsedForNonL1Token, LegacyBridgeUsesNonNativeToken, NonEmptyMsgValue, UnsupportedEncodingVersion, AssetIdNotSupported, AssetHandlerDoesNotExist, Unauthorized, ZeroAddress, TokenNotSupported, AddressAlreadyUsed, TokensWithFeesNotSupported} from "../../common/L1ContractErrors.sol";
import {L2_ASSET_ROUTER_ADDR} from "../../common/L2ContractAddresses.sol";

import {IBridgehub, L2TransactionRequestTwoBridgesInner, L2TransactionRequestDirect} from "../../bridgehub/IBridgehub.sol";
Expand Down Expand Up @@ -386,6 +386,12 @@ contract L1AssetRouter is AssetRouterBase, IL1AssetRouter, ReentrancyGuard {
);
bytes32 assetId = _ensureTokenRegisteredWithNTV(_l1Token);

// We ensure that the legacy data format can not be used for tokens that did not originate from L1.
bytes32 expectedAssetId = DataEncoding.encodeNTVAssetId(block.chainid, _l1Token);
if (assetId != expectedAssetId) {
revert LegacyEncodingUsedForNonL1Token();
}

if (assetId == ETH_TOKEN_ASSET_ID) {
// In the old SDK/contracts the user had to always provide `0` as the deposit amount for ETH token, while
// ultimately the provided `msg.value` was used as the deposit amount. This check is needed for backwards compatibility.
Expand Down
7 changes: 3 additions & 4 deletions l1-contracts/contracts/bridge/ntv/NativeTokenVault.sol
Original file line number Diff line number Diff line change
Expand Up @@ -223,9 +223,9 @@ abstract contract NativeTokenVault is
}
}

function tryRegisterTokenFromBurnData(bytes calldata _data, bytes32 _expectedAssetId) external {
function tryRegisterTokenFromBurnData(bytes calldata _burnData, bytes32 _expectedAssetId) external {
// slither-disable-next-line unused-return
(, , address tokenAddress) = DataEncoding.decodeBridgeBurnData(_data);
(, , address tokenAddress) = DataEncoding.decodeBridgeBurnData(_burnData);

if (tokenAddress == address(0)) {
revert ZeroAddress();
Expand Down Expand Up @@ -338,8 +338,7 @@ abstract contract NativeTokenVault is
address _receiver,
address _nativeToken
) internal virtual returns (bytes memory _bridgeMintData) {
address nativeToken = tokenAddress[_assetId];
if (nativeToken == WETH_TOKEN) {
if (_nativeToken == WETH_TOKEN) {
// This ensures that WETH_TOKEN can never be bridged from chains it is native to.
// It can only be withdrawn from the chain where it has already gotten.
revert BurningNativeWETHNotSupported();
Expand Down
6 changes: 5 additions & 1 deletion l1-contracts/contracts/bridgehub/CTMDeploymentTracker.sol
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,11 @@ contract CTMDeploymentTracker is ICTMDeploymentTracker, Ownable2StepUpgradeable

/// @notice The function called by the Bridgehub after the L2 transaction has been initiated.
/// @dev Not used in this contract. In case the transaction fails, we can just re-try it.
function bridgehubConfirmL2Transaction(uint256 _chainId, bytes32 _txDataHash, bytes32 _txHash) external {}
function bridgehubConfirmL2Transaction(
uint256 _chainId,
bytes32 _txDataHash,
bytes32 _txHash
) external onlyBridgehub {}

/// @notice Used to register the ctm asset in L2 AssetRouter.
/// @param _originalCaller the address that called the Router
Expand Down
4 changes: 4 additions & 0 deletions l1-contracts/contracts/common/L1ContractErrors.sol
Original file line number Diff line number Diff line change
Expand Up @@ -362,6 +362,8 @@ error IncorrectTokenAddressFromNTV(bytes32 assetId, address tokenAddress);
error InvalidProofLengthForFinalNode();
// 0x7acd7817
error TokenIsNotLegacy();
// 0xfade089a
error LegacyEncodingUsedForNonL1Token();
// 0xa51fa558
error TokenIsLegacy();
// 0x29963361
Expand All @@ -387,6 +389,8 @@ error InvalidNTVBurnData();
error InvalidSystemLogsLength();
// 0x8efef97a
error LegacyBridgeNotSet();
// 0x767eed08
error LegacyMethodForNonL1Token();

enum SharedBridgeKey {
PostUpgradeFirstBatch,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,28 +63,25 @@ error PriorityOpsDataRightPathLengthIsNotZero();
error PriorityOpsDataItemHashesLengthIsNotZero();

// 0x885ae069
error OperatorDAInputTooSmall(uint256 operatorDAInputLength, uint256 BlobDataOffset);
error OperatorDAInputTooSmall(uint256 operatorDAInputLength, uint256 minAllowedLength);

// 0xbeb96791
error InvalidNumberOfBlobs(uint256 blobsProvided, uint256 maxBlobsSupported);

// 0xcd384e46
error InvalidBlobsHashes(uint256 operatorDAInputLength, uint256 minNumberOfBlobHashes);

// 0xd2531c15
error InvalidL2DAOutputHash(bytes32 l2DAValidatorOutputHash);

// 0x77a3c423
error OnlyOneBlobWithCalldata();
// 0x04e05fd1
error OnlyOneBlobWithCalldataAllowed();

// 0x086bb220
error PubdataTooSmall(uint256 pubdataInputLength, uint256 blobCommitmentSize);
// 0x2dc9747d
error PubdataInputTooSmall(uint256 pubdataInputLength, uint256 totalBlobsCommitmentSize);

// 0xcba35a08
error PubdataTooLong(uint256 pubdataLength, uint256 blobSizeBytes);
// 0x9044dff9
error PubdataLengthTooBig(uint256 pubdataLength, uint256 totalBlobSizeBytes);

// 0x5513177c
error InvalidPubdataHash(bytes32 fullPubdataHash, bytes32 pubdata);
error InvalidPubdataHash(bytes32 fullPubdataHash, bytes32 providedPubdataHash);

// 0x5717f940
error InvalidPubdataSource(uint8 pubdataSource);
Expand All @@ -95,9 +92,6 @@ error BlobHashBlobCommitmentMismatchValue();
// 0x7fbff2dd
error L1DAValidatorInvalidSender(address msgSender);

// 0x5ade0455
error RootMismatch();

// 0xc06789fa
error InvalidCommitment();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

pragma solidity 0.8.24;

import {OperatorDAInputTooSmall, InvalidBlobsHashes, InvalidNumberOfBlobs, InvalidL2DAOutputHash, OnlyOneBlobWithCalldata, PubdataTooSmall, PubdataTooLong, InvalidPubdataHash} from "../L1StateTransitionErrors.sol";
import {OperatorDAInputTooSmall, InvalidNumberOfBlobs, InvalidL2DAOutputHash, OnlyOneBlobWithCalldataAllowed, PubdataInputTooSmall, PubdataLengthTooBig, InvalidPubdataHash} from "../L1StateTransitionErrors.sol";

/// @dev Total number of bytes in a blob. Blob = 4096 field elements * 31 bytes per field element
/// @dev EIP-4844 defines it as 131_072 but we use 4096 * 31 within our circuits to always fit within a field element
Expand Down Expand Up @@ -61,7 +61,7 @@ abstract contract CalldataDA {
blobsLinearHashes = new bytes32[](_maxBlobsSupported);

if (_operatorDAInput.length < BLOB_DATA_OFFSET + 32 * blobsProvided) {
revert InvalidBlobsHashes(_operatorDAInput.length, BLOB_DATA_OFFSET + 32 * blobsProvided);
revert OperatorDAInputTooSmall(_operatorDAInput.length, BLOB_DATA_OFFSET + 32 * blobsProvided);
}

_cloneCalldata(blobsLinearHashes, _operatorDAInput[BLOB_DATA_OFFSET:], blobsProvided);
Expand Down Expand Up @@ -90,10 +90,10 @@ abstract contract CalldataDA {
bytes calldata _pubdataInput
) internal pure virtual returns (bytes32[] memory blobCommitments, bytes calldata _pubdata) {
if (_blobsProvided != 1) {
revert OnlyOneBlobWithCalldata();
revert OnlyOneBlobWithCalldataAllowed();
}
if (_pubdataInput.length < BLOB_COMMITMENT_SIZE) {
revert PubdataTooSmall(_pubdataInput.length, BLOB_COMMITMENT_SIZE);
revert PubdataInputTooSmall(_pubdataInput.length, BLOB_COMMITMENT_SIZE);
}

// We typically do not know whether we'll use calldata or blobs at the time when
Expand All @@ -104,7 +104,7 @@ abstract contract CalldataDA {
_pubdata = _pubdataInput[:_pubdataInput.length - BLOB_COMMITMENT_SIZE];

if (_pubdata.length > BLOB_SIZE_BYTES) {
revert PubdataTooLong(_pubdata.length, BLOB_SIZE_BYTES);
revert PubdataLengthTooBig(_pubdata.length, BLOB_SIZE_BYTES);
}
if (_fullPubdataHash != keccak256(_pubdata)) {
revert InvalidPubdataHash(_fullPubdataHash, keccak256(_pubdata));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
pragma solidity 0.8.24;

import {CalldataDA, BLOB_COMMITMENT_SIZE, BLOB_SIZE_BYTES} from "./CalldataDA.sol";
import {PubdataTooSmall, PubdataTooLong, InvalidPubdataHash} from "../L1StateTransitionErrors.sol";
import {PubdataInputTooSmall, PubdataLengthTooBig, InvalidPubdataHash} from "../L1StateTransitionErrors.sol";

/// @notice Contract that contains the functionality for processing the calldata DA.
/// @dev The expected l2DAValidator that should be used with it `RollupL2DAValidator`.
Expand All @@ -16,7 +16,7 @@ abstract contract CalldataDAGateway is CalldataDA {
bytes calldata _pubdataInput
) internal pure override returns (bytes32[] memory blobCommitments, bytes calldata _pubdata) {
if (_pubdataInput.length < _blobsProvided * BLOB_COMMITMENT_SIZE) {
revert PubdataTooSmall(_pubdataInput.length, _blobsProvided * BLOB_COMMITMENT_SIZE);
revert PubdataInputTooSmall(_pubdataInput.length, _blobsProvided * BLOB_COMMITMENT_SIZE);
}

// We typically do not know whether we'll use calldata or blobs at the time when
Expand All @@ -26,7 +26,7 @@ abstract contract CalldataDAGateway is CalldataDA {
_pubdata = _pubdataInput[:_pubdataInput.length - _blobsProvided * BLOB_COMMITMENT_SIZE];

if (_pubdata.length > _blobsProvided * BLOB_SIZE_BYTES) {
revert PubdataTooLong(_pubdata.length, _blobsProvided * BLOB_SIZE_BYTES);
revert PubdataLengthTooBig(_pubdata.length, _blobsProvided * BLOB_SIZE_BYTES);
}
if (_fullPubdataHash != keccak256(_pubdata)) {
revert InvalidPubdataHash(_fullPubdataHash, keccak256(_pubdata));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ pragma solidity ^0.8.21;
import {DynamicIncrementalMerkle} from "../../common/libraries/DynamicIncrementalMerkle.sol";
import {Merkle} from "../../common/libraries/Merkle.sol";
import {PriorityTreeCommitment} from "../../common/Config.sol";
import {RootMismatch, InvalidCommitment, InvalidStartIndex, InvalidUnprocessedIndex, InvalidNextLeafIndex} from "../L1StateTransitionErrors.sol";
import {NotHistoricalRoot, InvalidCommitment, InvalidStartIndex, InvalidUnprocessedIndex, InvalidNextLeafIndex} from "../L1StateTransitionErrors.sol";

struct PriorityOpsBatchInfo {
bytes32[] leftPath;
Expand Down Expand Up @@ -82,7 +82,7 @@ library PriorityTree {
_priorityOpsData.itemHashes
);
if (!_tree.historicalRoots[expectedRoot]) {
revert RootMismatch();
revert NotHistoricalRoot();
}
_tree.unprocessedIndex += _priorityOpsData.itemHashes.length;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,28 +20,25 @@ error PointEvalCallFailed(bytes);
error PointEvalFailed(bytes);

// 0xf4a3e629
error OperatorDAInputLengthTooSmall(uint256 operatorDAInputLength, uint256 blobDataOffset);
error OperatorDAInputTooSmall(uint256 operatorDAInputLength, uint256 minAllowedLength);

// 0xbeb96791
error InvalidNumberOfBlobs(uint256 blobsProvided, uint256 maxBlobsSupported);

// 0xcd384e46
error InvalidBlobsHashes(uint256 operatorDAInputLength, uint256 blobsProvided);

// 0xe9e79528
error InvalidL2DAOutputHash();

// 0x3db6e664
error OneBlobWithCalldata();
// 0x04e05fd1
error OnlyOneBlobWithCalldataAllowed();

// 0x2dc9747d
error PubdataInputTooSmall(uint256 pubdataInputLength, uint256 blobCommitmentSize);
error PubdataInputTooSmall(uint256 pubdataInputLength, uint256 totalBlobsCommitmentSize);

// 0x9044dff9
error PubdataLengthTooBig(uint256 pubdataLength, uint256 blobSizeBytes);
error PubdataLengthTooBig(uint256 pubdataLength, uint256 totalBlobSizeBytes);

// 0x5513177c
error InvalidPubdataHash(bytes32 fullPubdataHash, bytes32 pubdata);
error InvalidPubdataHash(bytes32 fullPubdataHash, bytes32 providedPubdataHash);

// 0xc771423e
error BlobCommitmentNotPublished();
Expand Down
Loading

0 comments on commit 97c8b05

Please sign in to comment.